blob: e195d9aa5734c7bbe378a8e8830169756c5ea75e [file] [log] [blame]
Kevin Barnett6c223762016-06-27 16:41:00 -05001/*
2 * driver for Microsemi PQI-based storage controllers
Kevin Barnettb805dbf2017-05-03 18:54:06 -05003 * Copyright (c) 2016-2017 Microsemi Corporation
Kevin Barnett6c223762016-06-27 16:41:00 -05004 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
Kevin Barnett3c509762017-05-03 18:54:37 -050027#include <linux/reboot.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050028#include <linux/cciss_ioctl.h>
Christoph Hellwig52198222016-11-01 08:12:49 -060029#include <linux/blk-mq-pci.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050030#include <scsi/scsi_host.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport_sas.h>
35#include <asm/unaligned.h>
36#include "smartpqi.h"
37#include "smartpqi_sis.h"
38
39#if !defined(BUILD_TIMESTAMP)
40#define BUILD_TIMESTAMP
41#endif
42
Don Brace4ae5e9d2018-06-18 13:23:06 -050043#define DRIVER_VERSION "1.1.4-130"
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050044#define DRIVER_MAJOR 1
Kevin Barnettb98117c2017-08-10 13:47:15 -050045#define DRIVER_MINOR 1
Don Brace61c187e2018-03-21 13:32:37 -050046#define DRIVER_RELEASE 4
Don Brace4ae5e9d2018-06-18 13:23:06 -050047#define DRIVER_REVISION 130
Kevin Barnett6c223762016-06-27 16:41:00 -050048
Kevin Barnett2d154f5f2017-05-03 18:55:55 -050049#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
Kevin Barnett6c223762016-06-27 16:41:00 -050051#define DRIVER_NAME_SHORT "smartpqi"
52
Kevin Barnette1d213b2017-05-03 18:53:18 -050053#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
54
Kevin Barnett6c223762016-06-27 16:41:00 -050055MODULE_AUTHOR("Microsemi");
56MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
57 DRIVER_VERSION);
58MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59MODULE_VERSION(DRIVER_VERSION);
60MODULE_LICENSE("GPL");
61
Kevin Barnett6c223762016-06-27 16:41:00 -050062static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett5f310422017-05-03 18:54:55 -050063static void pqi_ctrl_offline_worker(struct work_struct *work);
Kevin Barnett376fb882017-05-03 18:54:43 -050064static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -050065static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66static void pqi_scan_start(struct Scsi_Host *shost);
67static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -050076 struct pqi_encryption_info *encryption_info, bool raid_bypass);
Kevin Barnett6c223762016-06-27 16:41:00 -050077
78/* for flags argument to pqi_submit_raid_request_synchronous() */
79#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
80
81static struct scsi_transport_template *pqi_sas_transport_template;
82
83static atomic_t pqi_controller_count = ATOMIC_INIT(0);
84
Kevin Barnett3c509762017-05-03 18:54:37 -050085enum pqi_lockup_action {
86 NONE,
87 REBOOT,
88 PANIC
89};
90
91static enum pqi_lockup_action pqi_lockup_action = NONE;
92
93static struct {
94 enum pqi_lockup_action action;
95 char *name;
96} pqi_lockup_actions[] = {
97 {
98 .action = NONE,
99 .name = "none",
100 },
101 {
102 .action = REBOOT,
103 .name = "reboot",
104 },
105 {
106 .action = PANIC,
107 .name = "panic",
108 },
109};
110
Kevin Barnett6a50d6a2017-05-03 18:52:52 -0500111static unsigned int pqi_supported_event_types[] = {
112 PQI_EVENT_TYPE_HOTPLUG,
113 PQI_EVENT_TYPE_HARDWARE,
114 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
115 PQI_EVENT_TYPE_LOGICAL_DEVICE,
116 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
118};
119
Kevin Barnett6c223762016-06-27 16:41:00 -0500120static int pqi_disable_device_id_wildcards;
121module_param_named(disable_device_id_wildcards,
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -0500122 pqi_disable_device_id_wildcards, int, 0644);
Kevin Barnett6c223762016-06-27 16:41:00 -0500123MODULE_PARM_DESC(disable_device_id_wildcards,
124 "Disable device ID wildcards.");
125
Kevin Barnett5a259e32017-05-03 18:55:43 -0500126static int pqi_disable_heartbeat;
127module_param_named(disable_heartbeat,
128 pqi_disable_heartbeat, int, 0644);
129MODULE_PARM_DESC(disable_heartbeat,
130 "Disable heartbeat.");
131
132static int pqi_disable_ctrl_shutdown;
133module_param_named(disable_ctrl_shutdown,
134 pqi_disable_ctrl_shutdown, int, 0644);
135MODULE_PARM_DESC(disable_ctrl_shutdown,
136 "Disable controller shutdown when controller locked up.");
137
Kevin Barnett3c509762017-05-03 18:54:37 -0500138static char *pqi_lockup_action_param;
139module_param_named(lockup_action,
140 pqi_lockup_action_param, charp, 0644);
141MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
142 "\t\tSupported: none, reboot, panic\n"
143 "\t\tDefault: none");
144
Kevin Barnett6c223762016-06-27 16:41:00 -0500145static char *raid_levels[] = {
146 "RAID-0",
147 "RAID-4",
148 "RAID-1(1+0)",
149 "RAID-5",
150 "RAID-5+1",
151 "RAID-ADG",
152 "RAID-1(ADM)",
153};
154
155static char *pqi_raid_level_to_string(u8 raid_level)
156{
157 if (raid_level < ARRAY_SIZE(raid_levels))
158 return raid_levels[raid_level];
159
Kevin Barnetta9f93392017-05-03 18:55:31 -0500160 return "RAID UNKNOWN";
Kevin Barnett6c223762016-06-27 16:41:00 -0500161}
162
163#define SA_RAID_0 0
164#define SA_RAID_4 1
165#define SA_RAID_1 2 /* also used for RAID 10 */
166#define SA_RAID_5 3 /* also used for RAID 50 */
167#define SA_RAID_51 4
168#define SA_RAID_6 5 /* also used for RAID 60 */
169#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
170#define SA_RAID_MAX SA_RAID_ADM
171#define SA_RAID_UNKNOWN 0xff
172
173static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
174{
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500175 pqi_prep_for_scsi_done(scmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500176 scmd->scsi_done(scmd);
177}
178
179static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
180{
181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
182}
183
184static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
185{
186 void *hostdata = shost_priv(shost);
187
188 return *((struct pqi_ctrl_info **)hostdata);
189}
190
191static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
192{
193 return !device->is_physical_device;
194}
195
Kevin Barnettbd10cf02017-05-03 18:54:12 -0500196static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
197{
198 return scsi3addr[2] != 0;
199}
200
Kevin Barnett6c223762016-06-27 16:41:00 -0500201static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
202{
203 return !ctrl_info->controller_online;
204}
205
206static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
207{
208 if (ctrl_info->controller_online)
209 if (!sis_is_firmware_running(ctrl_info))
210 pqi_take_ctrl_offline(ctrl_info);
211}
212
213static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
214{
215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
216}
217
Kevin Barnettff6abb72016-08-31 14:54:41 -0500218static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
219 struct pqi_ctrl_info *ctrl_info)
220{
221 return sis_read_driver_scratch(ctrl_info);
222}
223
224static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
225 enum pqi_ctrl_mode mode)
226{
227 sis_write_driver_scratch(ctrl_info, mode);
228}
229
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500230static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
231{
232 ctrl_info->block_requests = true;
233 scsi_block_requests(ctrl_info->scsi_host);
234}
235
236static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
237{
238 ctrl_info->block_requests = false;
239 wake_up_all(&ctrl_info->block_requests_wait);
Kevin Barnett376fb882017-05-03 18:54:43 -0500240 pqi_retry_raid_bypass_requests(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500241 scsi_unblock_requests(ctrl_info->scsi_host);
242}
243
244static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
245{
246 return ctrl_info->block_requests;
247}
248
249static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
250 unsigned long timeout_msecs)
251{
252 unsigned long remaining_msecs;
253
254 if (!pqi_ctrl_blocked(ctrl_info))
255 return timeout_msecs;
256
257 atomic_inc(&ctrl_info->num_blocked_threads);
258
259 if (timeout_msecs == NO_TIMEOUT) {
260 wait_event(ctrl_info->block_requests_wait,
261 !pqi_ctrl_blocked(ctrl_info));
262 remaining_msecs = timeout_msecs;
263 } else {
264 unsigned long remaining_jiffies;
265
266 remaining_jiffies =
267 wait_event_timeout(ctrl_info->block_requests_wait,
268 !pqi_ctrl_blocked(ctrl_info),
269 msecs_to_jiffies(timeout_msecs));
270 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
271 }
272
273 atomic_dec(&ctrl_info->num_blocked_threads);
274
275 return remaining_msecs;
276}
277
278static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
279{
280 atomic_inc(&ctrl_info->num_busy_threads);
281}
282
283static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
284{
285 atomic_dec(&ctrl_info->num_busy_threads);
286}
287
288static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
289{
290 while (atomic_read(&ctrl_info->num_busy_threads) >
291 atomic_read(&ctrl_info->num_blocked_threads))
292 usleep_range(1000, 2000);
293}
294
Kevin Barnett03b288cf2017-05-03 18:54:49 -0500295static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
296{
297 return device->device_offline;
298}
299
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500300static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
301{
302 device->in_reset = true;
303}
304
305static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
306{
307 device->in_reset = false;
308}
309
310static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
311{
312 return device->in_reset;
313}
Kevin Barnett6c223762016-06-27 16:41:00 -0500314
Kevin Barnett5f310422017-05-03 18:54:55 -0500315static inline void pqi_schedule_rescan_worker_with_delay(
316 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
317{
318 if (pqi_ctrl_offline(ctrl_info))
319 return;
320
321 schedule_delayed_work(&ctrl_info->rescan_work, delay);
322}
323
Kevin Barnett6c223762016-06-27 16:41:00 -0500324static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
325{
Kevin Barnett5f310422017-05-03 18:54:55 -0500326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
327}
328
329#define PQI_RESCAN_WORK_DELAY (10 * HZ)
330
331static inline void pqi_schedule_rescan_worker_delayed(
332 struct pqi_ctrl_info *ctrl_info)
333{
334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
Kevin Barnett6c223762016-06-27 16:41:00 -0500335}
336
Kevin Barnett061ef062017-05-03 18:53:05 -0500337static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
338{
339 cancel_delayed_work_sync(&ctrl_info->rescan_work);
340}
341
Kevin Barnett98f87662017-05-03 18:53:11 -0500342static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
343{
344 if (!ctrl_info->heartbeat_counter)
345 return 0;
346
347 return readl(ctrl_info->heartbeat_counter);
348}
349
Kevin Barnett6c223762016-06-27 16:41:00 -0500350static int pqi_map_single(struct pci_dev *pci_dev,
351 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200352 size_t buffer_length, enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500353{
354 dma_addr_t bus_address;
355
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200356 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500357 return 0;
358
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200359 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
Kevin Barnett6c223762016-06-27 16:41:00 -0500360 data_direction);
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200361 if (dma_mapping_error(&pci_dev->dev, bus_address))
Kevin Barnett6c223762016-06-27 16:41:00 -0500362 return -ENOMEM;
363
364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
365 put_unaligned_le32(buffer_length, &sg_descriptor->length);
366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
367
368 return 0;
369}
370
371static void pqi_pci_unmap(struct pci_dev *pci_dev,
372 struct pqi_sg_descriptor *descriptors, int num_descriptors,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200373 enum dma_data_direction data_direction)
Kevin Barnett6c223762016-06-27 16:41:00 -0500374{
375 int i;
376
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200377 if (data_direction == DMA_NONE)
Kevin Barnett6c223762016-06-27 16:41:00 -0500378 return;
379
380 for (i = 0; i < num_descriptors; i++)
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200381 dma_unmap_single(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -0500382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
383 get_unaligned_le32(&descriptors[i].length),
384 data_direction);
385}
386
387static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
388 struct pqi_raid_path_request *request, u8 cmd,
389 u8 *scsi3addr, void *buffer, size_t buffer_length,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200390 u16 vpd_page, enum dma_data_direction *dir)
Kevin Barnett6c223762016-06-27 16:41:00 -0500391{
392 u8 *cdb;
Kevin Barnett6c223762016-06-27 16:41:00 -0500393
394 memset(request, 0, sizeof(*request));
395
396 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
397 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
398 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
399 &request->header.iu_length);
400 put_unaligned_le32(buffer_length, &request->buffer_length);
401 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
402 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
403 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
404
405 cdb = request->cdb;
406
407 switch (cmd) {
408 case INQUIRY:
409 request->data_direction = SOP_READ_FLAG;
410 cdb[0] = INQUIRY;
411 if (vpd_page & VPD_PAGE) {
412 cdb[1] = 0x1;
413 cdb[2] = (u8)vpd_page;
414 }
415 cdb[4] = (u8)buffer_length;
416 break;
417 case CISS_REPORT_LOG:
418 case CISS_REPORT_PHYS:
419 request->data_direction = SOP_READ_FLAG;
420 cdb[0] = cmd;
421 if (cmd == CISS_REPORT_PHYS)
422 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
423 else
424 cdb[1] = CISS_REPORT_LOG_EXTENDED;
425 put_unaligned_be32(buffer_length, &cdb[6]);
426 break;
427 case CISS_GET_RAID_MAP:
428 request->data_direction = SOP_READ_FLAG;
429 cdb[0] = CISS_READ;
430 cdb[1] = CISS_GET_RAID_MAP;
431 put_unaligned_be32(buffer_length, &cdb[6]);
432 break;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500433 case SA_FLUSH_CACHE:
Kevin Barnett6c223762016-06-27 16:41:00 -0500434 request->data_direction = SOP_WRITE_FLAG;
435 cdb[0] = BMIC_WRITE;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500436 cdb[6] = BMIC_FLUSH_CACHE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500437 put_unaligned_be16(buffer_length, &cdb[7]);
438 break;
439 case BMIC_IDENTIFY_CONTROLLER:
440 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
441 request->data_direction = SOP_READ_FLAG;
442 cdb[0] = BMIC_READ;
443 cdb[6] = cmd;
444 put_unaligned_be16(buffer_length, &cdb[7]);
445 break;
446 case BMIC_WRITE_HOST_WELLNESS:
447 request->data_direction = SOP_WRITE_FLAG;
448 cdb[0] = BMIC_WRITE;
449 cdb[6] = cmd;
450 put_unaligned_be16(buffer_length, &cdb[7]);
451 break;
452 default:
453 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
454 cmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500455 break;
456 }
457
458 switch (request->data_direction) {
459 case SOP_READ_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200460 *dir = DMA_FROM_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500461 break;
462 case SOP_WRITE_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200463 *dir = DMA_TO_DEVICE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500464 break;
465 case SOP_NO_DIRECTION_FLAG:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200466 *dir = DMA_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -0500467 break;
468 default:
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200469 *dir = DMA_BIDIRECTIONAL;
Kevin Barnett6c223762016-06-27 16:41:00 -0500470 break;
471 }
472
Kevin Barnett6c223762016-06-27 16:41:00 -0500473 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200474 buffer, buffer_length, *dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500475}
476
Kevin Barnett376fb882017-05-03 18:54:43 -0500477static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
478{
479 io_request->scmd = NULL;
480 io_request->status = 0;
481 io_request->error_info = NULL;
482 io_request->raid_bypass = false;
483}
484
Kevin Barnett6c223762016-06-27 16:41:00 -0500485static struct pqi_io_request *pqi_alloc_io_request(
486 struct pqi_ctrl_info *ctrl_info)
487{
488 struct pqi_io_request *io_request;
489 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
490
491 while (1) {
492 io_request = &ctrl_info->io_request_pool[i];
493 if (atomic_inc_return(&io_request->refcount) == 1)
494 break;
495 atomic_dec(&io_request->refcount);
496 i = (i + 1) % ctrl_info->max_io_slots;
497 }
498
499 /* benignly racy */
500 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
501
Kevin Barnett376fb882017-05-03 18:54:43 -0500502 pqi_reinit_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -0500503
504 return io_request;
505}
506
507static void pqi_free_io_request(struct pqi_io_request *io_request)
508{
509 atomic_dec(&io_request->refcount);
510}
511
512static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
513 struct bmic_identify_controller *buffer)
514{
515 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200516 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500517 struct pqi_raid_path_request request;
518
519 rc = pqi_build_raid_path_request(ctrl_info, &request,
520 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200521 sizeof(*buffer), 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500522 if (rc)
523 return rc;
524
525 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
526 NULL, NO_TIMEOUT);
527
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200528 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500529 return rc;
530}
531
532static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
533 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
534{
535 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200536 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500537 struct pqi_raid_path_request request;
538
539 rc = pqi_build_raid_path_request(ctrl_info, &request,
540 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200541 &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500542 if (rc)
543 return rc;
544
545 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
546 NULL, NO_TIMEOUT);
547
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200548 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500549 return rc;
550}
551
552static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
553 struct pqi_scsi_dev *device,
554 struct bmic_identify_physical_device *buffer,
555 size_t buffer_length)
556{
557 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200558 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500559 u16 bmic_device_index;
560 struct pqi_raid_path_request request;
561
562 rc = pqi_build_raid_path_request(ctrl_info, &request,
563 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200564 buffer_length, 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500565 if (rc)
566 return rc;
567
568 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
569 request.cdb[2] = (u8)bmic_device_index;
570 request.cdb[9] = (u8)(bmic_device_index >> 8);
571
572 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
573 0, NULL, NO_TIMEOUT);
574
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200575 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500576 return rc;
577}
578
Kevin Barnett58322fe2017-08-10 13:46:45 -0500579static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
580 enum bmic_flush_cache_shutdown_event shutdown_event)
Kevin Barnett6c223762016-06-27 16:41:00 -0500581{
582 int rc;
583 struct pqi_raid_path_request request;
Kevin Barnett58322fe2017-08-10 13:46:45 -0500584 struct bmic_flush_cache *flush_cache;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200585 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500586
587 /*
588 * Don't bother trying to flush the cache if the controller is
589 * locked up.
590 */
591 if (pqi_ctrl_offline(ctrl_info))
592 return -ENXIO;
593
Kevin Barnett58322fe2017-08-10 13:46:45 -0500594 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
595 if (!flush_cache)
Kevin Barnett6c223762016-06-27 16:41:00 -0500596 return -ENOMEM;
597
Kevin Barnett58322fe2017-08-10 13:46:45 -0500598 flush_cache->shutdown_event = shutdown_event;
599
Kevin Barnett6c223762016-06-27 16:41:00 -0500600 rc = pqi_build_raid_path_request(ctrl_info, &request,
Kevin Barnett58322fe2017-08-10 13:46:45 -0500601 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200602 sizeof(*flush_cache), 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500603 if (rc)
604 goto out;
605
606 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
Kevin Barnettd48f8fa2016-08-31 14:55:17 -0500607 0, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -0500608
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200609 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500610out:
Kevin Barnett58322fe2017-08-10 13:46:45 -0500611 kfree(flush_cache);
Kevin Barnett6c223762016-06-27 16:41:00 -0500612
613 return rc;
614}
615
616static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
617 void *buffer, size_t buffer_length)
618{
619 int rc;
620 struct pqi_raid_path_request request;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200621 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500622
623 rc = pqi_build_raid_path_request(ctrl_info, &request,
624 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200625 buffer_length, 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500626 if (rc)
627 return rc;
628
629 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
630 0, NULL, NO_TIMEOUT);
631
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200632 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500633 return rc;
634}
635
636#pragma pack(1)
637
638struct bmic_host_wellness_driver_version {
639 u8 start_tag[4];
640 u8 driver_version_tag[2];
641 __le16 driver_version_length;
642 char driver_version[32];
643 u8 end_tag[2];
644};
645
646#pragma pack()
647
648static int pqi_write_driver_version_to_host_wellness(
649 struct pqi_ctrl_info *ctrl_info)
650{
651 int rc;
652 struct bmic_host_wellness_driver_version *buffer;
653 size_t buffer_length;
654
655 buffer_length = sizeof(*buffer);
656
657 buffer = kmalloc(buffer_length, GFP_KERNEL);
658 if (!buffer)
659 return -ENOMEM;
660
661 buffer->start_tag[0] = '<';
662 buffer->start_tag[1] = 'H';
663 buffer->start_tag[2] = 'W';
664 buffer->start_tag[3] = '>';
665 buffer->driver_version_tag[0] = 'D';
666 buffer->driver_version_tag[1] = 'V';
667 put_unaligned_le16(sizeof(buffer->driver_version),
668 &buffer->driver_version_length);
Kevin Barnett061ef062017-05-03 18:53:05 -0500669 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
Kevin Barnett6c223762016-06-27 16:41:00 -0500670 sizeof(buffer->driver_version) - 1);
671 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
672 buffer->end_tag[0] = 'Z';
673 buffer->end_tag[1] = 'Z';
674
675 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
676
677 kfree(buffer);
678
679 return rc;
680}
681
682#pragma pack(1)
683
684struct bmic_host_wellness_time {
685 u8 start_tag[4];
686 u8 time_tag[2];
687 __le16 time_length;
688 u8 time[8];
689 u8 dont_write_tag[2];
690 u8 end_tag[2];
691};
692
693#pragma pack()
694
695static int pqi_write_current_time_to_host_wellness(
696 struct pqi_ctrl_info *ctrl_info)
697{
698 int rc;
699 struct bmic_host_wellness_time *buffer;
700 size_t buffer_length;
701 time64_t local_time;
702 unsigned int year;
Arnd Bergmanned108582017-02-17 16:03:52 +0100703 struct tm tm;
Kevin Barnett6c223762016-06-27 16:41:00 -0500704
705 buffer_length = sizeof(*buffer);
706
707 buffer = kmalloc(buffer_length, GFP_KERNEL);
708 if (!buffer)
709 return -ENOMEM;
710
711 buffer->start_tag[0] = '<';
712 buffer->start_tag[1] = 'H';
713 buffer->start_tag[2] = 'W';
714 buffer->start_tag[3] = '>';
715 buffer->time_tag[0] = 'T';
716 buffer->time_tag[1] = 'D';
717 put_unaligned_le16(sizeof(buffer->time),
718 &buffer->time_length);
719
Arnd Bergmanned108582017-02-17 16:03:52 +0100720 local_time = ktime_get_real_seconds();
721 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
Kevin Barnett6c223762016-06-27 16:41:00 -0500722 year = tm.tm_year + 1900;
723
724 buffer->time[0] = bin2bcd(tm.tm_hour);
725 buffer->time[1] = bin2bcd(tm.tm_min);
726 buffer->time[2] = bin2bcd(tm.tm_sec);
727 buffer->time[3] = 0;
728 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
729 buffer->time[5] = bin2bcd(tm.tm_mday);
730 buffer->time[6] = bin2bcd(year / 100);
731 buffer->time[7] = bin2bcd(year % 100);
732
733 buffer->dont_write_tag[0] = 'D';
734 buffer->dont_write_tag[1] = 'W';
735 buffer->end_tag[0] = 'Z';
736 buffer->end_tag[1] = 'Z';
737
738 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
739
740 kfree(buffer);
741
742 return rc;
743}
744
745#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
746
747static void pqi_update_time_worker(struct work_struct *work)
748{
749 int rc;
750 struct pqi_ctrl_info *ctrl_info;
751
752 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
753 update_time_work);
754
Kevin Barnett5f310422017-05-03 18:54:55 -0500755 if (pqi_ctrl_offline(ctrl_info))
756 return;
757
Kevin Barnett6c223762016-06-27 16:41:00 -0500758 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
759 if (rc)
760 dev_warn(&ctrl_info->pci_dev->dev,
761 "error updating time on controller\n");
762
763 schedule_delayed_work(&ctrl_info->update_time_work,
764 PQI_UPDATE_TIME_WORK_INTERVAL);
765}
766
767static inline void pqi_schedule_update_time_worker(
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500768 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett6c223762016-06-27 16:41:00 -0500769{
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500770 schedule_delayed_work(&ctrl_info->update_time_work, 0);
Kevin Barnett061ef062017-05-03 18:53:05 -0500771}
772
773static inline void pqi_cancel_update_time_worker(
774 struct pqi_ctrl_info *ctrl_info)
775{
Kevin Barnett061ef062017-05-03 18:53:05 -0500776 cancel_delayed_work_sync(&ctrl_info->update_time_work);
Kevin Barnett6c223762016-06-27 16:41:00 -0500777}
778
779static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
780 void *buffer, size_t buffer_length)
781{
782 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200783 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -0500784 struct pqi_raid_path_request request;
785
786 rc = pqi_build_raid_path_request(ctrl_info, &request,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200787 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500788 if (rc)
789 return rc;
790
791 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
792 NULL, NO_TIMEOUT);
793
Christoph Hellwig6917a9c2018-10-11 09:47:59 +0200794 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -0500795 return rc;
796}
797
798static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
799 void **buffer)
800{
801 int rc;
802 size_t lun_list_length;
803 size_t lun_data_length;
804 size_t new_lun_list_length;
805 void *lun_data = NULL;
806 struct report_lun_header *report_lun_header;
807
808 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
809 if (!report_lun_header) {
810 rc = -ENOMEM;
811 goto out;
812 }
813
814 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
815 sizeof(*report_lun_header));
816 if (rc)
817 goto out;
818
819 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
820
821again:
822 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
823
824 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
825 if (!lun_data) {
826 rc = -ENOMEM;
827 goto out;
828 }
829
830 if (lun_list_length == 0) {
831 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
832 goto out;
833 }
834
835 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
836 if (rc)
837 goto out;
838
839 new_lun_list_length = get_unaligned_be32(
840 &((struct report_lun_header *)lun_data)->list_length);
841
842 if (new_lun_list_length > lun_list_length) {
843 lun_list_length = new_lun_list_length;
844 kfree(lun_data);
845 goto again;
846 }
847
848out:
849 kfree(report_lun_header);
850
851 if (rc) {
852 kfree(lun_data);
853 lun_data = NULL;
854 }
855
856 *buffer = lun_data;
857
858 return rc;
859}
860
861static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
862 void **buffer)
863{
864 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
865 buffer);
866}
867
868static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
869 void **buffer)
870{
871 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
872}
873
874static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
875 struct report_phys_lun_extended **physdev_list,
876 struct report_log_lun_extended **logdev_list)
877{
878 int rc;
879 size_t logdev_list_length;
880 size_t logdev_data_length;
881 struct report_log_lun_extended *internal_logdev_list;
882 struct report_log_lun_extended *logdev_data;
883 struct report_lun_header report_lun_header;
884
885 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
886 if (rc)
887 dev_err(&ctrl_info->pci_dev->dev,
888 "report physical LUNs failed\n");
889
890 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
891 if (rc)
892 dev_err(&ctrl_info->pci_dev->dev,
893 "report logical LUNs failed\n");
894
895 /*
896 * Tack the controller itself onto the end of the logical device list.
897 */
898
899 logdev_data = *logdev_list;
900
901 if (logdev_data) {
902 logdev_list_length =
903 get_unaligned_be32(&logdev_data->header.list_length);
904 } else {
905 memset(&report_lun_header, 0, sizeof(report_lun_header));
906 logdev_data =
907 (struct report_log_lun_extended *)&report_lun_header;
908 logdev_list_length = 0;
909 }
910
911 logdev_data_length = sizeof(struct report_lun_header) +
912 logdev_list_length;
913
914 internal_logdev_list = kmalloc(logdev_data_length +
915 sizeof(struct report_log_lun_extended), GFP_KERNEL);
916 if (!internal_logdev_list) {
917 kfree(*logdev_list);
918 *logdev_list = NULL;
919 return -ENOMEM;
920 }
921
922 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
923 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
924 sizeof(struct report_log_lun_extended_entry));
925 put_unaligned_be32(logdev_list_length +
926 sizeof(struct report_log_lun_extended_entry),
927 &internal_logdev_list->header.list_length);
928
929 kfree(*logdev_list);
930 *logdev_list = internal_logdev_list;
931
932 return 0;
933}
934
935static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
936 int bus, int target, int lun)
937{
938 device->bus = bus;
939 device->target = target;
940 device->lun = lun;
941}
942
943static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
944{
945 u8 *scsi3addr;
946 u32 lunid;
Kevin Barnettbd10cf02017-05-03 18:54:12 -0500947 int bus;
948 int target;
949 int lun;
Kevin Barnett6c223762016-06-27 16:41:00 -0500950
951 scsi3addr = device->scsi3addr;
952 lunid = get_unaligned_le32(scsi3addr);
953
954 if (pqi_is_hba_lunid(scsi3addr)) {
955 /* The specified device is the controller. */
956 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
957 device->target_lun_valid = true;
958 return;
959 }
960
961 if (pqi_is_logical_device(device)) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -0500962 if (device->is_external_raid_device) {
963 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
964 target = (lunid >> 16) & 0x3fff;
965 lun = lunid & 0xff;
966 } else {
967 bus = PQI_RAID_VOLUME_BUS;
968 target = 0;
969 lun = lunid & 0x3fff;
970 }
971 pqi_set_bus_target_lun(device, bus, target, lun);
Kevin Barnett6c223762016-06-27 16:41:00 -0500972 device->target_lun_valid = true;
973 return;
974 }
975
976 /*
977 * Defer target and LUN assignment for non-controller physical devices
978 * because the SAS transport layer will make these assignments later.
979 */
980 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
981}
982
983static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
984 struct pqi_scsi_dev *device)
985{
986 int rc;
987 u8 raid_level;
988 u8 *buffer;
989
990 raid_level = SA_RAID_UNKNOWN;
991
992 buffer = kmalloc(64, GFP_KERNEL);
993 if (buffer) {
994 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
995 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
996 if (rc == 0) {
997 raid_level = buffer[8];
998 if (raid_level > SA_RAID_MAX)
999 raid_level = SA_RAID_UNKNOWN;
1000 }
1001 kfree(buffer);
1002 }
1003
1004 device->raid_level = raid_level;
1005}
1006
1007static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1008 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1009{
1010 char *err_msg;
1011 u32 raid_map_size;
1012 u32 r5or6_blocks_per_row;
1013 unsigned int num_phys_disks;
1014 unsigned int num_raid_map_entries;
1015
1016 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1017
1018 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1019 err_msg = "RAID map too small";
1020 goto bad_raid_map;
1021 }
1022
1023 if (raid_map_size > sizeof(*raid_map)) {
1024 err_msg = "RAID map too large";
1025 goto bad_raid_map;
1026 }
1027
1028 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1029 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1030 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1031 num_raid_map_entries = num_phys_disks *
1032 get_unaligned_le16(&raid_map->row_cnt);
1033
1034 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1035 err_msg = "invalid number of map entries in RAID map";
1036 goto bad_raid_map;
1037 }
1038
1039 if (device->raid_level == SA_RAID_1) {
1040 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1041 err_msg = "invalid RAID-1 map";
1042 goto bad_raid_map;
1043 }
1044 } else if (device->raid_level == SA_RAID_ADM) {
1045 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1046 err_msg = "invalid RAID-1(ADM) map";
1047 goto bad_raid_map;
1048 }
1049 } else if ((device->raid_level == SA_RAID_5 ||
1050 device->raid_level == SA_RAID_6) &&
1051 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1052 /* RAID 50/60 */
1053 r5or6_blocks_per_row =
1054 get_unaligned_le16(&raid_map->strip_size) *
1055 get_unaligned_le16(&raid_map->data_disks_per_row);
1056 if (r5or6_blocks_per_row == 0) {
1057 err_msg = "invalid RAID-5 or RAID-6 map";
1058 goto bad_raid_map;
1059 }
1060 }
1061
1062 return 0;
1063
1064bad_raid_map:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001065 dev_warn(&ctrl_info->pci_dev->dev,
Kevin Barnett38a73382017-09-27 16:30:05 -05001066 "logical device %08x%08x %s\n",
1067 *((u32 *)&device->scsi3addr),
1068 *((u32 *)&device->scsi3addr[4]), err_msg);
Kevin Barnett6c223762016-06-27 16:41:00 -05001069
1070 return -EINVAL;
1071}
1072
1073static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1074 struct pqi_scsi_dev *device)
1075{
1076 int rc;
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02001077 enum dma_data_direction dir;
Kevin Barnett6c223762016-06-27 16:41:00 -05001078 struct pqi_raid_path_request request;
1079 struct raid_map *raid_map;
1080
1081 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1082 if (!raid_map)
1083 return -ENOMEM;
1084
1085 rc = pqi_build_raid_path_request(ctrl_info, &request,
1086 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02001087 sizeof(*raid_map), 0, &dir);
Kevin Barnett6c223762016-06-27 16:41:00 -05001088 if (rc)
1089 goto error;
1090
1091 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1092 NULL, NO_TIMEOUT);
1093
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02001094 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
Kevin Barnett6c223762016-06-27 16:41:00 -05001095
1096 if (rc)
1097 goto error;
1098
1099 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1100 if (rc)
1101 goto error;
1102
1103 device->raid_map = raid_map;
1104
1105 return 0;
1106
1107error:
1108 kfree(raid_map);
1109
1110 return rc;
1111}
1112
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001113static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05001114 struct pqi_scsi_dev *device)
1115{
1116 int rc;
1117 u8 *buffer;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001118 u8 bypass_status;
Kevin Barnett6c223762016-06-27 16:41:00 -05001119
1120 buffer = kmalloc(64, GFP_KERNEL);
1121 if (!buffer)
1122 return;
1123
1124 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001125 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
Kevin Barnett6c223762016-06-27 16:41:00 -05001126 if (rc)
1127 goto out;
1128
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001129#define RAID_BYPASS_STATUS 4
1130#define RAID_BYPASS_CONFIGURED 0x1
1131#define RAID_BYPASS_ENABLED 0x2
Kevin Barnett6c223762016-06-27 16:41:00 -05001132
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001133 bypass_status = buffer[RAID_BYPASS_STATUS];
1134 device->raid_bypass_configured =
1135 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1136 if (device->raid_bypass_configured &&
1137 (bypass_status & RAID_BYPASS_ENABLED) &&
1138 pqi_get_raid_map(ctrl_info, device) == 0)
1139 device->raid_bypass_enabled = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001140
1141out:
1142 kfree(buffer);
1143}
1144
1145/*
1146 * Use vendor-specific VPD to determine online/offline status of a volume.
1147 */
1148
1149static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1150 struct pqi_scsi_dev *device)
1151{
1152 int rc;
1153 size_t page_length;
1154 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1155 bool volume_offline = true;
1156 u32 volume_flags;
1157 struct ciss_vpd_logical_volume_status *vpd;
1158
1159 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1160 if (!vpd)
1161 goto no_buffer;
1162
1163 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1164 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1165 if (rc)
1166 goto out;
1167
1168 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1169 volume_status) + vpd->page_length;
1170 if (page_length < sizeof(*vpd))
1171 goto out;
1172
1173 volume_status = vpd->volume_status;
1174 volume_flags = get_unaligned_be32(&vpd->flags);
1175 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1176
1177out:
1178 kfree(vpd);
1179no_buffer:
1180 device->volume_status = volume_status;
1181 device->volume_offline = volume_offline;
1182}
1183
Kevin Barnett26b390a2018-06-18 13:22:48 -05001184#define PQI_INQUIRY_PAGE0_RETRIES 3
1185
Kevin Barnett6c223762016-06-27 16:41:00 -05001186static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1187 struct pqi_scsi_dev *device)
1188{
1189 int rc;
1190 u8 *buffer;
Kevin Barnett26b390a2018-06-18 13:22:48 -05001191 unsigned int retries;
Kevin Barnett6c223762016-06-27 16:41:00 -05001192
1193 buffer = kmalloc(64, GFP_KERNEL);
1194 if (!buffer)
1195 return -ENOMEM;
1196
1197 /* Send an inquiry to the device to see what it is. */
Kevin Barnett26b390a2018-06-18 13:22:48 -05001198 for (retries = 0;;) {
1199 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1200 buffer, 64);
1201 if (rc == 0)
1202 break;
1203 if (pqi_is_logical_device(device) ||
1204 rc != PQI_CMD_STATUS_ABORTED ||
1205 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1206 goto out;
1207 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001208
1209 scsi_sanitize_inquiry_string(&buffer[8], 8);
1210 scsi_sanitize_inquiry_string(&buffer[16], 16);
1211
1212 device->devtype = buffer[0] & 0x1f;
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05001213 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1214 memcpy(device->model, &buffer[16], sizeof(device->model));
Kevin Barnett6c223762016-06-27 16:41:00 -05001215
1216 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001217 if (device->is_external_raid_device) {
1218 device->raid_level = SA_RAID_UNKNOWN;
1219 device->volume_status = CISS_LV_OK;
1220 device->volume_offline = false;
1221 } else {
1222 pqi_get_raid_level(ctrl_info, device);
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001223 pqi_get_raid_bypass_status(ctrl_info, device);
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001224 pqi_get_volume_status(ctrl_info, device);
1225 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001226 }
1227
1228out:
1229 kfree(buffer);
1230
1231 return rc;
1232}
1233
1234static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1235 struct pqi_scsi_dev *device,
1236 struct bmic_identify_physical_device *id_phys)
1237{
1238 int rc;
1239
1240 memset(id_phys, 0, sizeof(*id_phys));
1241
1242 rc = pqi_identify_physical_device(ctrl_info, device,
1243 id_phys, sizeof(*id_phys));
1244 if (rc) {
1245 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1246 return;
1247 }
1248
1249 device->queue_depth =
1250 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1251 device->device_type = id_phys->device_type;
1252 device->active_path_index = id_phys->active_path_number;
1253 device->path_map = id_phys->redundant_path_present_map;
1254 memcpy(&device->box,
1255 &id_phys->alternate_paths_phys_box_on_port,
1256 sizeof(device->box));
1257 memcpy(&device->phys_connector,
1258 &id_phys->alternate_paths_phys_connector,
1259 sizeof(device->phys_connector));
1260 device->bay = id_phys->phys_bay_in_box;
1261}
1262
1263static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1264 struct pqi_scsi_dev *device)
1265{
1266 char *status;
1267 static const char unknown_state_str[] =
1268 "Volume is in an unknown state (%u)";
1269 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1270
1271 switch (device->volume_status) {
1272 case CISS_LV_OK:
1273 status = "Volume online";
1274 break;
1275 case CISS_LV_FAILED:
1276 status = "Volume failed";
1277 break;
1278 case CISS_LV_NOT_CONFIGURED:
1279 status = "Volume not configured";
1280 break;
1281 case CISS_LV_DEGRADED:
1282 status = "Volume degraded";
1283 break;
1284 case CISS_LV_READY_FOR_RECOVERY:
1285 status = "Volume ready for recovery operation";
1286 break;
1287 case CISS_LV_UNDERGOING_RECOVERY:
1288 status = "Volume undergoing recovery";
1289 break;
1290 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1291 status = "Wrong physical drive was replaced";
1292 break;
1293 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1294 status = "A physical drive not properly connected";
1295 break;
1296 case CISS_LV_HARDWARE_OVERHEATING:
1297 status = "Hardware is overheating";
1298 break;
1299 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1300 status = "Hardware has overheated";
1301 break;
1302 case CISS_LV_UNDERGOING_EXPANSION:
1303 status = "Volume undergoing expansion";
1304 break;
1305 case CISS_LV_NOT_AVAILABLE:
1306 status = "Volume waiting for transforming volume";
1307 break;
1308 case CISS_LV_QUEUED_FOR_EXPANSION:
1309 status = "Volume queued for expansion";
1310 break;
1311 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1312 status = "Volume disabled due to SCSI ID conflict";
1313 break;
1314 case CISS_LV_EJECTED:
1315 status = "Volume has been ejected";
1316 break;
1317 case CISS_LV_UNDERGOING_ERASE:
1318 status = "Volume undergoing background erase";
1319 break;
1320 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1321 status = "Volume ready for predictive spare rebuild";
1322 break;
1323 case CISS_LV_UNDERGOING_RPI:
1324 status = "Volume undergoing rapid parity initialization";
1325 break;
1326 case CISS_LV_PENDING_RPI:
1327 status = "Volume queued for rapid parity initialization";
1328 break;
1329 case CISS_LV_ENCRYPTED_NO_KEY:
1330 status = "Encrypted volume inaccessible - key not present";
1331 break;
1332 case CISS_LV_UNDERGOING_ENCRYPTION:
1333 status = "Volume undergoing encryption process";
1334 break;
1335 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1336 status = "Volume undergoing encryption re-keying process";
1337 break;
1338 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001339 status = "Volume encrypted but encryption is disabled";
Kevin Barnett6c223762016-06-27 16:41:00 -05001340 break;
1341 case CISS_LV_PENDING_ENCRYPTION:
1342 status = "Volume pending migration to encrypted state";
1343 break;
1344 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1345 status = "Volume pending encryption rekeying";
1346 break;
1347 case CISS_LV_NOT_SUPPORTED:
1348 status = "Volume not supported on this controller";
1349 break;
1350 case CISS_LV_STATUS_UNAVAILABLE:
1351 status = "Volume status not available";
1352 break;
1353 default:
1354 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1355 unknown_state_str, device->volume_status);
1356 status = unknown_state_buffer;
1357 break;
1358 }
1359
1360 dev_info(&ctrl_info->pci_dev->dev,
1361 "scsi %d:%d:%d:%d %s\n",
1362 ctrl_info->scsi_host->host_no,
1363 device->bus, device->target, device->lun, status);
1364}
1365
Kevin Barnett6c223762016-06-27 16:41:00 -05001366static void pqi_rescan_worker(struct work_struct *work)
1367{
1368 struct pqi_ctrl_info *ctrl_info;
1369
1370 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1371 rescan_work);
1372
1373 pqi_scan_scsi_devices(ctrl_info);
1374}
1375
1376static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1377 struct pqi_scsi_dev *device)
1378{
1379 int rc;
1380
1381 if (pqi_is_logical_device(device))
1382 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1383 device->target, device->lun);
1384 else
1385 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1386
1387 return rc;
1388}
1389
1390static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1391 struct pqi_scsi_dev *device)
1392{
1393 if (pqi_is_logical_device(device))
1394 scsi_remove_device(device->sdev);
1395 else
1396 pqi_remove_sas_device(device);
1397}
1398
1399/* Assumes the SCSI device list lock is held. */
1400
1401static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1402 int bus, int target, int lun)
1403{
1404 struct pqi_scsi_dev *device;
1405
1406 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1407 scsi_device_list_entry)
1408 if (device->bus == bus && device->target == target &&
1409 device->lun == lun)
1410 return device;
1411
1412 return NULL;
1413}
1414
1415static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1416 struct pqi_scsi_dev *dev2)
1417{
1418 if (dev1->is_physical_device != dev2->is_physical_device)
1419 return false;
1420
1421 if (dev1->is_physical_device)
1422 return dev1->wwid == dev2->wwid;
1423
1424 return memcmp(dev1->volume_id, dev2->volume_id,
1425 sizeof(dev1->volume_id)) == 0;
1426}
1427
1428enum pqi_find_result {
1429 DEVICE_NOT_FOUND,
1430 DEVICE_CHANGED,
1431 DEVICE_SAME,
1432};
1433
1434static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1435 struct pqi_scsi_dev *device_to_find,
1436 struct pqi_scsi_dev **matching_device)
1437{
1438 struct pqi_scsi_dev *device;
1439
1440 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1441 scsi_device_list_entry) {
1442 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1443 device->scsi3addr)) {
1444 *matching_device = device;
1445 if (pqi_device_equal(device_to_find, device)) {
1446 if (device_to_find->volume_offline)
1447 return DEVICE_CHANGED;
1448 return DEVICE_SAME;
1449 }
1450 return DEVICE_CHANGED;
1451 }
1452 }
1453
1454 return DEVICE_NOT_FOUND;
1455}
1456
Kevin Barnett6de783f2017-05-03 18:55:19 -05001457#define PQI_DEV_INFO_BUFFER_LENGTH 128
1458
Kevin Barnett6c223762016-06-27 16:41:00 -05001459static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1460 char *action, struct pqi_scsi_dev *device)
1461{
Kevin Barnett6de783f2017-05-03 18:55:19 -05001462 ssize_t count;
1463 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1464
1465 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1466 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1467
1468 if (device->target_lun_valid)
1469 count += snprintf(buffer + count,
1470 PQI_DEV_INFO_BUFFER_LENGTH - count,
1471 "%d:%d",
1472 device->target,
1473 device->lun);
1474 else
1475 count += snprintf(buffer + count,
1476 PQI_DEV_INFO_BUFFER_LENGTH - count,
1477 "-:-");
1478
1479 if (pqi_is_logical_device(device))
1480 count += snprintf(buffer + count,
1481 PQI_DEV_INFO_BUFFER_LENGTH - count,
1482 " %08x%08x",
1483 *((u32 *)&device->scsi3addr),
1484 *((u32 *)&device->scsi3addr[4]));
1485 else
1486 count += snprintf(buffer + count,
1487 PQI_DEV_INFO_BUFFER_LENGTH - count,
1488 " %016llx", device->sas_address);
1489
1490 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1491 " %s %.8s %.16s ",
Kevin Barnett6c223762016-06-27 16:41:00 -05001492 scsi_device_type(device->devtype),
1493 device->vendor,
Kevin Barnett6de783f2017-05-03 18:55:19 -05001494 device->model);
1495
1496 if (pqi_is_logical_device(device)) {
1497 if (device->devtype == TYPE_DISK)
1498 count += snprintf(buffer + count,
1499 PQI_DEV_INFO_BUFFER_LENGTH - count,
1500 "SSDSmartPathCap%c En%c %-12s",
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001501 device->raid_bypass_configured ? '+' : '-',
1502 device->raid_bypass_enabled ? '+' : '-',
Kevin Barnett6de783f2017-05-03 18:55:19 -05001503 pqi_raid_level_to_string(device->raid_level));
1504 } else {
1505 count += snprintf(buffer + count,
1506 PQI_DEV_INFO_BUFFER_LENGTH - count,
1507 "AIO%c", device->aio_enabled ? '+' : '-');
1508 if (device->devtype == TYPE_DISK ||
1509 device->devtype == TYPE_ZBC)
1510 count += snprintf(buffer + count,
1511 PQI_DEV_INFO_BUFFER_LENGTH - count,
1512 " qd=%-6d", device->queue_depth);
1513 }
1514
1515 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
Kevin Barnett6c223762016-06-27 16:41:00 -05001516}
1517
1518/* Assumes the SCSI device list lock is held. */
1519
1520static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1521 struct pqi_scsi_dev *new_device)
1522{
1523 existing_device->devtype = new_device->devtype;
1524 existing_device->device_type = new_device->device_type;
1525 existing_device->bus = new_device->bus;
1526 if (new_device->target_lun_valid) {
1527 existing_device->target = new_device->target;
1528 existing_device->lun = new_device->lun;
1529 existing_device->target_lun_valid = true;
1530 }
1531
1532 /* By definition, the scsi3addr and wwid fields are already the same. */
1533
1534 existing_device->is_physical_device = new_device->is_physical_device;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001535 existing_device->is_external_raid_device =
1536 new_device->is_external_raid_device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001537 existing_device->aio_enabled = new_device->aio_enabled;
1538 memcpy(existing_device->vendor, new_device->vendor,
1539 sizeof(existing_device->vendor));
1540 memcpy(existing_device->model, new_device->model,
1541 sizeof(existing_device->model));
1542 existing_device->sas_address = new_device->sas_address;
1543 existing_device->raid_level = new_device->raid_level;
1544 existing_device->queue_depth = new_device->queue_depth;
1545 existing_device->aio_handle = new_device->aio_handle;
1546 existing_device->volume_status = new_device->volume_status;
1547 existing_device->active_path_index = new_device->active_path_index;
1548 existing_device->path_map = new_device->path_map;
1549 existing_device->bay = new_device->bay;
1550 memcpy(existing_device->box, new_device->box,
1551 sizeof(existing_device->box));
1552 memcpy(existing_device->phys_connector, new_device->phys_connector,
1553 sizeof(existing_device->phys_connector));
Kevin Barnett6c223762016-06-27 16:41:00 -05001554 existing_device->offload_to_mirror = 0;
1555 kfree(existing_device->raid_map);
1556 existing_device->raid_map = new_device->raid_map;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05001557 existing_device->raid_bypass_configured =
1558 new_device->raid_bypass_configured;
1559 existing_device->raid_bypass_enabled =
1560 new_device->raid_bypass_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05001561
1562 /* To prevent this from being freed later. */
1563 new_device->raid_map = NULL;
1564}
1565
1566static inline void pqi_free_device(struct pqi_scsi_dev *device)
1567{
1568 if (device) {
1569 kfree(device->raid_map);
1570 kfree(device);
1571 }
1572}
1573
1574/*
1575 * Called when exposing a new device to the OS fails in order to re-adjust
1576 * our internal SCSI device list to match the SCSI ML's view.
1577 */
1578
1579static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1580 struct pqi_scsi_dev *device)
1581{
1582 unsigned long flags;
1583
1584 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1585 list_del(&device->scsi_device_list_entry);
1586 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1587
1588 /* Allow the device structure to be freed later. */
1589 device->keep_device = false;
1590}
1591
1592static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1593 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1594{
1595 int rc;
1596 unsigned int i;
1597 unsigned long flags;
1598 enum pqi_find_result find_result;
1599 struct pqi_scsi_dev *device;
1600 struct pqi_scsi_dev *next;
1601 struct pqi_scsi_dev *matching_device;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001602 LIST_HEAD(add_list);
1603 LIST_HEAD(delete_list);
Kevin Barnett6c223762016-06-27 16:41:00 -05001604
1605 /*
1606 * The idea here is to do as little work as possible while holding the
1607 * spinlock. That's why we go to great pains to defer anything other
1608 * than updating the internal device list until after we release the
1609 * spinlock.
1610 */
1611
1612 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1613
1614 /* Assume that all devices in the existing list have gone away. */
1615 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1616 scsi_device_list_entry)
1617 device->device_gone = true;
1618
1619 for (i = 0; i < num_new_devices; i++) {
1620 device = new_device_list[i];
1621
1622 find_result = pqi_scsi_find_entry(ctrl_info, device,
1623 &matching_device);
1624
1625 switch (find_result) {
1626 case DEVICE_SAME:
1627 /*
1628 * The newly found device is already in the existing
1629 * device list.
1630 */
1631 device->new_device = false;
1632 matching_device->device_gone = false;
1633 pqi_scsi_update_device(matching_device, device);
1634 break;
1635 case DEVICE_NOT_FOUND:
1636 /*
1637 * The newly found device is NOT in the existing device
1638 * list.
1639 */
1640 device->new_device = true;
1641 break;
1642 case DEVICE_CHANGED:
1643 /*
1644 * The original device has gone away and we need to add
1645 * the new device.
1646 */
1647 device->new_device = true;
1648 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05001649 }
1650 }
1651
1652 /* Process all devices that have gone away. */
1653 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1654 scsi_device_list_entry) {
1655 if (device->device_gone) {
1656 list_del(&device->scsi_device_list_entry);
1657 list_add_tail(&device->delete_list_entry, &delete_list);
1658 }
1659 }
1660
1661 /* Process all new devices. */
1662 for (i = 0; i < num_new_devices; i++) {
1663 device = new_device_list[i];
1664 if (!device->new_device)
1665 continue;
1666 if (device->volume_offline)
1667 continue;
1668 list_add_tail(&device->scsi_device_list_entry,
1669 &ctrl_info->scsi_device_list);
1670 list_add_tail(&device->add_list_entry, &add_list);
1671 /* To prevent this device structure from being freed later. */
1672 device->keep_device = true;
1673 }
1674
Kevin Barnett6c223762016-06-27 16:41:00 -05001675 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1676
1677 /* Remove all devices that have gone away. */
1678 list_for_each_entry_safe(device, next, &delete_list,
1679 delete_list_entry) {
Kevin Barnett6c223762016-06-27 16:41:00 -05001680 if (device->volume_offline) {
1681 pqi_dev_info(ctrl_info, "offline", device);
1682 pqi_show_volume_status(ctrl_info, device);
1683 } else {
1684 pqi_dev_info(ctrl_info, "removed", device);
1685 }
Kevin Barnett6de783f2017-05-03 18:55:19 -05001686 if (device->sdev)
1687 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001688 list_del(&device->delete_list_entry);
1689 pqi_free_device(device);
1690 }
1691
1692 /*
1693 * Notify the SCSI ML if the queue depth of any existing device has
1694 * changed.
1695 */
1696 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1697 scsi_device_list_entry) {
1698 if (device->sdev && device->queue_depth !=
1699 device->advertised_queue_depth) {
1700 device->advertised_queue_depth = device->queue_depth;
1701 scsi_change_queue_depth(device->sdev,
1702 device->advertised_queue_depth);
1703 }
1704 }
1705
1706 /* Expose any new devices. */
1707 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
Kevin Barnett94086f52017-05-03 18:54:31 -05001708 if (!device->sdev) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05001709 pqi_dev_info(ctrl_info, "added", device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001710 rc = pqi_add_device(ctrl_info, device);
1711 if (rc) {
1712 dev_warn(&ctrl_info->pci_dev->dev,
1713 "scsi %d:%d:%d:%d addition failed, device not added\n",
1714 ctrl_info->scsi_host->host_no,
1715 device->bus, device->target,
1716 device->lun);
1717 pqi_fixup_botched_add(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001718 }
1719 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001720 }
1721}
1722
1723static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1724{
1725 bool is_supported = false;
1726
1727 switch (device->devtype) {
1728 case TYPE_DISK:
1729 case TYPE_ZBC:
1730 case TYPE_TAPE:
1731 case TYPE_MEDIUM_CHANGER:
1732 case TYPE_ENCLOSURE:
1733 is_supported = true;
1734 break;
1735 case TYPE_RAID:
1736 /*
1737 * Only support the HBA controller itself as a RAID
1738 * controller. If it's a RAID controller other than
Kevin Barnett376fb882017-05-03 18:54:43 -05001739 * the HBA itself (an external RAID controller, for
1740 * example), we don't support it.
Kevin Barnett6c223762016-06-27 16:41:00 -05001741 */
1742 if (pqi_is_hba_lunid(device->scsi3addr))
1743 is_supported = true;
1744 break;
1745 }
1746
1747 return is_supported;
1748}
1749
Kevin Barnett94086f52017-05-03 18:54:31 -05001750static inline bool pqi_skip_device(u8 *scsi3addr)
Kevin Barnett6c223762016-06-27 16:41:00 -05001751{
Kevin Barnett94086f52017-05-03 18:54:31 -05001752 /* Ignore all masked devices. */
1753 if (MASKED_DEVICE(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05001754 return true;
Kevin Barnett6c223762016-06-27 16:41:00 -05001755
1756 return false;
1757}
1758
Kevin Barnett6c223762016-06-27 16:41:00 -05001759static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1760{
1761 int i;
1762 int rc;
Kevin Barnett8a994a02017-05-03 18:55:37 -05001763 LIST_HEAD(new_device_list_head);
Kevin Barnett6c223762016-06-27 16:41:00 -05001764 struct report_phys_lun_extended *physdev_list = NULL;
1765 struct report_log_lun_extended *logdev_list = NULL;
1766 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1767 struct report_log_lun_extended_entry *log_lun_ext_entry;
1768 struct bmic_identify_physical_device *id_phys = NULL;
1769 u32 num_physicals;
1770 u32 num_logicals;
1771 struct pqi_scsi_dev **new_device_list = NULL;
1772 struct pqi_scsi_dev *device;
1773 struct pqi_scsi_dev *next;
1774 unsigned int num_new_devices;
1775 unsigned int num_valid_devices;
1776 bool is_physical_device;
1777 u8 *scsi3addr;
1778 static char *out_of_memory_msg =
Kevin Barnett6de783f2017-05-03 18:55:19 -05001779 "failed to allocate memory, device discovery stopped";
Kevin Barnett6c223762016-06-27 16:41:00 -05001780
Kevin Barnett6c223762016-06-27 16:41:00 -05001781 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1782 if (rc)
1783 goto out;
1784
1785 if (physdev_list)
1786 num_physicals =
1787 get_unaligned_be32(&physdev_list->header.list_length)
1788 / sizeof(physdev_list->lun_entries[0]);
1789 else
1790 num_physicals = 0;
1791
1792 if (logdev_list)
1793 num_logicals =
1794 get_unaligned_be32(&logdev_list->header.list_length)
1795 / sizeof(logdev_list->lun_entries[0]);
1796 else
1797 num_logicals = 0;
1798
1799 if (num_physicals) {
1800 /*
1801 * We need this buffer for calls to pqi_get_physical_disk_info()
1802 * below. We allocate it here instead of inside
1803 * pqi_get_physical_disk_info() because it's a fairly large
1804 * buffer.
1805 */
1806 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1807 if (!id_phys) {
1808 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1809 out_of_memory_msg);
1810 rc = -ENOMEM;
1811 goto out;
1812 }
1813 }
1814
1815 num_new_devices = num_physicals + num_logicals;
1816
Kees Cook6da2ec52018-06-12 13:55:00 -07001817 new_device_list = kmalloc_array(num_new_devices,
1818 sizeof(*new_device_list),
1819 GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05001820 if (!new_device_list) {
1821 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1822 rc = -ENOMEM;
1823 goto out;
1824 }
1825
1826 for (i = 0; i < num_new_devices; i++) {
1827 device = kzalloc(sizeof(*device), GFP_KERNEL);
1828 if (!device) {
1829 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1830 out_of_memory_msg);
1831 rc = -ENOMEM;
1832 goto out;
1833 }
1834 list_add_tail(&device->new_device_list_entry,
1835 &new_device_list_head);
1836 }
1837
1838 device = NULL;
1839 num_valid_devices = 0;
1840
1841 for (i = 0; i < num_new_devices; i++) {
1842
1843 if (i < num_physicals) {
1844 is_physical_device = true;
1845 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1846 log_lun_ext_entry = NULL;
1847 scsi3addr = phys_lun_ext_entry->lunid;
1848 } else {
1849 is_physical_device = false;
1850 phys_lun_ext_entry = NULL;
1851 log_lun_ext_entry =
1852 &logdev_list->lun_entries[i - num_physicals];
1853 scsi3addr = log_lun_ext_entry->lunid;
1854 }
1855
Kevin Barnett94086f52017-05-03 18:54:31 -05001856 if (is_physical_device && pqi_skip_device(scsi3addr))
Kevin Barnett6c223762016-06-27 16:41:00 -05001857 continue;
1858
1859 if (device)
1860 device = list_next_entry(device, new_device_list_entry);
1861 else
1862 device = list_first_entry(&new_device_list_head,
1863 struct pqi_scsi_dev, new_device_list_entry);
1864
1865 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1866 device->is_physical_device = is_physical_device;
Kevin Barnettbd10cf02017-05-03 18:54:12 -05001867 if (!is_physical_device)
1868 device->is_external_raid_device =
1869 pqi_is_external_raid_addr(scsi3addr);
Kevin Barnett6c223762016-06-27 16:41:00 -05001870
1871 /* Gather information about the device. */
1872 rc = pqi_get_device_info(ctrl_info, device);
1873 if (rc == -ENOMEM) {
1874 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1875 out_of_memory_msg);
1876 goto out;
1877 }
1878 if (rc) {
Kevin Barnett6de783f2017-05-03 18:55:19 -05001879 if (device->is_physical_device)
1880 dev_warn(&ctrl_info->pci_dev->dev,
1881 "obtaining device info failed, skipping physical device %016llx\n",
1882 get_unaligned_be64(
1883 &phys_lun_ext_entry->wwid));
1884 else
1885 dev_warn(&ctrl_info->pci_dev->dev,
1886 "obtaining device info failed, skipping logical device %08x%08x\n",
1887 *((u32 *)&device->scsi3addr),
1888 *((u32 *)&device->scsi3addr[4]));
Kevin Barnett6c223762016-06-27 16:41:00 -05001889 rc = 0;
1890 continue;
1891 }
1892
1893 if (!pqi_is_supported_device(device))
1894 continue;
1895
1896 pqi_assign_bus_target_lun(device);
1897
Kevin Barnett6c223762016-06-27 16:41:00 -05001898 if (device->is_physical_device) {
1899 device->wwid = phys_lun_ext_entry->wwid;
1900 if ((phys_lun_ext_entry->device_flags &
1901 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1902 phys_lun_ext_entry->aio_handle)
1903 device->aio_enabled = true;
1904 } else {
1905 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1906 sizeof(device->volume_id));
1907 }
1908
1909 switch (device->devtype) {
1910 case TYPE_DISK:
1911 case TYPE_ZBC:
1912 case TYPE_ENCLOSURE:
1913 if (device->is_physical_device) {
1914 device->sas_address =
1915 get_unaligned_be64(&device->wwid);
1916 if (device->devtype == TYPE_DISK ||
1917 device->devtype == TYPE_ZBC) {
1918 device->aio_handle =
1919 phys_lun_ext_entry->aio_handle;
1920 pqi_get_physical_disk_info(ctrl_info,
1921 device, id_phys);
1922 }
1923 }
1924 break;
1925 }
1926
1927 new_device_list[num_valid_devices++] = device;
1928 }
1929
1930 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1931
1932out:
1933 list_for_each_entry_safe(device, next, &new_device_list_head,
1934 new_device_list_entry) {
1935 if (device->keep_device)
1936 continue;
1937 list_del(&device->new_device_list_entry);
1938 pqi_free_device(device);
1939 }
1940
1941 kfree(new_device_list);
1942 kfree(physdev_list);
1943 kfree(logdev_list);
1944 kfree(id_phys);
1945
1946 return rc;
1947}
1948
1949static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1950{
1951 unsigned long flags;
1952 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001953
Kevin Barnetta37ef742017-05-03 18:52:22 -05001954 while (1) {
1955 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05001956
Kevin Barnetta37ef742017-05-03 18:52:22 -05001957 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1958 struct pqi_scsi_dev, scsi_device_list_entry);
1959 if (device)
1960 list_del(&device->scsi_device_list_entry);
1961
1962 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1963 flags);
1964
1965 if (!device)
1966 break;
1967
Kevin Barnett6c223762016-06-27 16:41:00 -05001968 if (device->sdev)
1969 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001970 pqi_free_device(device);
1971 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001972}
1973
1974static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1975{
1976 int rc;
1977
1978 if (pqi_ctrl_offline(ctrl_info))
1979 return -ENXIO;
1980
1981 mutex_lock(&ctrl_info->scan_mutex);
1982
1983 rc = pqi_update_scsi_devices(ctrl_info);
1984 if (rc)
Kevin Barnett5f310422017-05-03 18:54:55 -05001985 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05001986
1987 mutex_unlock(&ctrl_info->scan_mutex);
1988
1989 return rc;
1990}
1991
1992static void pqi_scan_start(struct Scsi_Host *shost)
1993{
1994 pqi_scan_scsi_devices(shost_to_hba(shost));
1995}
1996
1997/* Returns TRUE if scan is finished. */
1998
1999static int pqi_scan_finished(struct Scsi_Host *shost,
2000 unsigned long elapsed_time)
2001{
2002 struct pqi_ctrl_info *ctrl_info;
2003
2004 ctrl_info = shost_priv(shost);
2005
2006 return !mutex_is_locked(&ctrl_info->scan_mutex);
2007}
2008
Kevin Barnett061ef062017-05-03 18:53:05 -05002009static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2010{
2011 mutex_lock(&ctrl_info->scan_mutex);
2012 mutex_unlock(&ctrl_info->scan_mutex);
2013}
2014
2015static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2016{
2017 mutex_lock(&ctrl_info->lun_reset_mutex);
2018 mutex_unlock(&ctrl_info->lun_reset_mutex);
2019}
2020
Kevin Barnett6c223762016-06-27 16:41:00 -05002021static inline void pqi_set_encryption_info(
2022 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2023 u64 first_block)
2024{
2025 u32 volume_blk_size;
2026
2027 /*
2028 * Set the encryption tweak values based on logical block address.
2029 * If the block size is 512, the tweak value is equal to the LBA.
2030 * For other block sizes, tweak value is (LBA * block size) / 512.
2031 */
2032 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2033 if (volume_blk_size != 512)
2034 first_block = (first_block * volume_blk_size) / 512;
2035
2036 encryption_info->data_encryption_key_index =
2037 get_unaligned_le16(&raid_map->data_encryption_key_index);
2038 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2039 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2040}
2041
2042/*
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002043 * Attempt to perform RAID bypass mapping for a logical volume I/O.
Kevin Barnett6c223762016-06-27 16:41:00 -05002044 */
2045
2046#define PQI_RAID_BYPASS_INELIGIBLE 1
2047
2048static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2049 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2050 struct pqi_queue_group *queue_group)
2051{
2052 struct raid_map *raid_map;
2053 bool is_write = false;
2054 u32 map_index;
2055 u64 first_block;
2056 u64 last_block;
2057 u32 block_cnt;
2058 u32 blocks_per_row;
2059 u64 first_row;
2060 u64 last_row;
2061 u32 first_row_offset;
2062 u32 last_row_offset;
2063 u32 first_column;
2064 u32 last_column;
2065 u64 r0_first_row;
2066 u64 r0_last_row;
2067 u32 r5or6_blocks_per_row;
2068 u64 r5or6_first_row;
2069 u64 r5or6_last_row;
2070 u32 r5or6_first_row_offset;
2071 u32 r5or6_last_row_offset;
2072 u32 r5or6_first_column;
2073 u32 r5or6_last_column;
2074 u16 data_disks_per_row;
2075 u32 total_disks_per_row;
2076 u16 layout_map_count;
2077 u32 stripesize;
2078 u16 strip_size;
2079 u32 first_group;
2080 u32 last_group;
2081 u32 current_group;
2082 u32 map_row;
2083 u32 aio_handle;
2084 u64 disk_block;
2085 u32 disk_block_cnt;
2086 u8 cdb[16];
2087 u8 cdb_length;
2088 int offload_to_mirror;
2089 struct pqi_encryption_info *encryption_info_ptr;
2090 struct pqi_encryption_info encryption_info;
2091#if BITS_PER_LONG == 32
2092 u64 tmpdiv;
2093#endif
2094
2095 /* Check for valid opcode, get LBA and block count. */
2096 switch (scmd->cmnd[0]) {
2097 case WRITE_6:
2098 is_write = true;
2099 /* fall through */
2100 case READ_6:
kevin Barnette018ef52016-09-16 15:01:51 -05002101 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2102 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002103 block_cnt = (u32)scmd->cmnd[4];
2104 if (block_cnt == 0)
2105 block_cnt = 256;
2106 break;
2107 case WRITE_10:
2108 is_write = true;
2109 /* fall through */
2110 case READ_10:
2111 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2112 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2113 break;
2114 case WRITE_12:
2115 is_write = true;
2116 /* fall through */
2117 case READ_12:
2118 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2119 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2120 break;
2121 case WRITE_16:
2122 is_write = true;
2123 /* fall through */
2124 case READ_16:
2125 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2126 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2127 break;
2128 default:
2129 /* Process via normal I/O path. */
2130 return PQI_RAID_BYPASS_INELIGIBLE;
2131 }
2132
2133 /* Check for write to non-RAID-0. */
2134 if (is_write && device->raid_level != SA_RAID_0)
2135 return PQI_RAID_BYPASS_INELIGIBLE;
2136
2137 if (unlikely(block_cnt == 0))
2138 return PQI_RAID_BYPASS_INELIGIBLE;
2139
2140 last_block = first_block + block_cnt - 1;
2141 raid_map = device->raid_map;
2142
2143 /* Check for invalid block or wraparound. */
2144 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2145 last_block < first_block)
2146 return PQI_RAID_BYPASS_INELIGIBLE;
2147
2148 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2149 strip_size = get_unaligned_le16(&raid_map->strip_size);
2150 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2151
2152 /* Calculate stripe information for the request. */
2153 blocks_per_row = data_disks_per_row * strip_size;
2154#if BITS_PER_LONG == 32
2155 tmpdiv = first_block;
2156 do_div(tmpdiv, blocks_per_row);
2157 first_row = tmpdiv;
2158 tmpdiv = last_block;
2159 do_div(tmpdiv, blocks_per_row);
2160 last_row = tmpdiv;
2161 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2162 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2163 tmpdiv = first_row_offset;
2164 do_div(tmpdiv, strip_size);
2165 first_column = tmpdiv;
2166 tmpdiv = last_row_offset;
2167 do_div(tmpdiv, strip_size);
2168 last_column = tmpdiv;
2169#else
2170 first_row = first_block / blocks_per_row;
2171 last_row = last_block / blocks_per_row;
2172 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2173 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2174 first_column = first_row_offset / strip_size;
2175 last_column = last_row_offset / strip_size;
2176#endif
2177
2178 /* If this isn't a single row/column then give to the controller. */
2179 if (first_row != last_row || first_column != last_column)
2180 return PQI_RAID_BYPASS_INELIGIBLE;
2181
2182 /* Proceeding with driver mapping. */
2183 total_disks_per_row = data_disks_per_row +
2184 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2185 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2186 get_unaligned_le16(&raid_map->row_cnt);
2187 map_index = (map_row * total_disks_per_row) + first_column;
2188
2189 /* RAID 1 */
2190 if (device->raid_level == SA_RAID_1) {
2191 if (device->offload_to_mirror)
2192 map_index += data_disks_per_row;
2193 device->offload_to_mirror = !device->offload_to_mirror;
2194 } else if (device->raid_level == SA_RAID_ADM) {
2195 /* RAID ADM */
2196 /*
2197 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2198 * divisible by 3.
2199 */
2200 offload_to_mirror = device->offload_to_mirror;
2201 if (offload_to_mirror == 0) {
2202 /* use physical disk in the first mirrored group. */
2203 map_index %= data_disks_per_row;
2204 } else {
2205 do {
2206 /*
2207 * Determine mirror group that map_index
2208 * indicates.
2209 */
2210 current_group = map_index / data_disks_per_row;
2211
2212 if (offload_to_mirror != current_group) {
2213 if (current_group <
2214 layout_map_count - 1) {
2215 /*
2216 * Select raid index from
2217 * next group.
2218 */
2219 map_index += data_disks_per_row;
2220 current_group++;
2221 } else {
2222 /*
2223 * Select raid index from first
2224 * group.
2225 */
2226 map_index %= data_disks_per_row;
2227 current_group = 0;
2228 }
2229 }
2230 } while (offload_to_mirror != current_group);
2231 }
2232
2233 /* Set mirror group to use next time. */
2234 offload_to_mirror =
2235 (offload_to_mirror >= layout_map_count - 1) ?
2236 0 : offload_to_mirror + 1;
2237 WARN_ON(offload_to_mirror >= layout_map_count);
2238 device->offload_to_mirror = offload_to_mirror;
2239 /*
2240 * Avoid direct use of device->offload_to_mirror within this
2241 * function since multiple threads might simultaneously
2242 * increment it beyond the range of device->layout_map_count -1.
2243 */
2244 } else if ((device->raid_level == SA_RAID_5 ||
2245 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2246 /* RAID 50/60 */
2247 /* Verify first and last block are in same RAID group */
2248 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2249 stripesize = r5or6_blocks_per_row * layout_map_count;
2250#if BITS_PER_LONG == 32
2251 tmpdiv = first_block;
2252 first_group = do_div(tmpdiv, stripesize);
2253 tmpdiv = first_group;
2254 do_div(tmpdiv, r5or6_blocks_per_row);
2255 first_group = tmpdiv;
2256 tmpdiv = last_block;
2257 last_group = do_div(tmpdiv, stripesize);
2258 tmpdiv = last_group;
2259 do_div(tmpdiv, r5or6_blocks_per_row);
2260 last_group = tmpdiv;
2261#else
2262 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2263 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2264#endif
2265 if (first_group != last_group)
2266 return PQI_RAID_BYPASS_INELIGIBLE;
2267
2268 /* Verify request is in a single row of RAID 5/6 */
2269#if BITS_PER_LONG == 32
2270 tmpdiv = first_block;
2271 do_div(tmpdiv, stripesize);
2272 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2273 tmpdiv = last_block;
2274 do_div(tmpdiv, stripesize);
2275 r5or6_last_row = r0_last_row = tmpdiv;
2276#else
2277 first_row = r5or6_first_row = r0_first_row =
2278 first_block / stripesize;
2279 r5or6_last_row = r0_last_row = last_block / stripesize;
2280#endif
2281 if (r5or6_first_row != r5or6_last_row)
2282 return PQI_RAID_BYPASS_INELIGIBLE;
2283
2284 /* Verify request is in a single column */
2285#if BITS_PER_LONG == 32
2286 tmpdiv = first_block;
2287 first_row_offset = do_div(tmpdiv, stripesize);
2288 tmpdiv = first_row_offset;
2289 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2290 r5or6_first_row_offset = first_row_offset;
2291 tmpdiv = last_block;
2292 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2293 tmpdiv = r5or6_last_row_offset;
2294 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2295 tmpdiv = r5or6_first_row_offset;
2296 do_div(tmpdiv, strip_size);
2297 first_column = r5or6_first_column = tmpdiv;
2298 tmpdiv = r5or6_last_row_offset;
2299 do_div(tmpdiv, strip_size);
2300 r5or6_last_column = tmpdiv;
2301#else
2302 first_row_offset = r5or6_first_row_offset =
2303 (u32)((first_block % stripesize) %
2304 r5or6_blocks_per_row);
2305
2306 r5or6_last_row_offset =
2307 (u32)((last_block % stripesize) %
2308 r5or6_blocks_per_row);
2309
2310 first_column = r5or6_first_row_offset / strip_size;
2311 r5or6_first_column = first_column;
2312 r5or6_last_column = r5or6_last_row_offset / strip_size;
2313#endif
2314 if (r5or6_first_column != r5or6_last_column)
2315 return PQI_RAID_BYPASS_INELIGIBLE;
2316
2317 /* Request is eligible */
2318 map_row =
2319 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2320 get_unaligned_le16(&raid_map->row_cnt);
2321
2322 map_index = (first_group *
2323 (get_unaligned_le16(&raid_map->row_cnt) *
2324 total_disks_per_row)) +
2325 (map_row * total_disks_per_row) + first_column;
2326 }
2327
2328 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2329 return PQI_RAID_BYPASS_INELIGIBLE;
2330
2331 aio_handle = raid_map->disk_data[map_index].aio_handle;
2332 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2333 first_row * strip_size +
2334 (first_row_offset - first_column * strip_size);
2335 disk_block_cnt = block_cnt;
2336
2337 /* Handle differing logical/physical block sizes. */
2338 if (raid_map->phys_blk_shift) {
2339 disk_block <<= raid_map->phys_blk_shift;
2340 disk_block_cnt <<= raid_map->phys_blk_shift;
2341 }
2342
2343 if (unlikely(disk_block_cnt > 0xffff))
2344 return PQI_RAID_BYPASS_INELIGIBLE;
2345
2346 /* Build the new CDB for the physical disk I/O. */
2347 if (disk_block > 0xffffffff) {
2348 cdb[0] = is_write ? WRITE_16 : READ_16;
2349 cdb[1] = 0;
2350 put_unaligned_be64(disk_block, &cdb[2]);
2351 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2352 cdb[14] = 0;
2353 cdb[15] = 0;
2354 cdb_length = 16;
2355 } else {
2356 cdb[0] = is_write ? WRITE_10 : READ_10;
2357 cdb[1] = 0;
2358 put_unaligned_be32((u32)disk_block, &cdb[2]);
2359 cdb[6] = 0;
2360 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2361 cdb[9] = 0;
2362 cdb_length = 10;
2363 }
2364
2365 if (get_unaligned_le16(&raid_map->flags) &
2366 RAID_MAP_ENCRYPTION_ENABLED) {
2367 pqi_set_encryption_info(&encryption_info, raid_map,
2368 first_block);
2369 encryption_info_ptr = &encryption_info;
2370 } else {
2371 encryption_info_ptr = NULL;
2372 }
2373
2374 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05002375 cdb, cdb_length, queue_group, encryption_info_ptr, true);
Kevin Barnett6c223762016-06-27 16:41:00 -05002376}
2377
2378#define PQI_STATUS_IDLE 0x0
2379
2380#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2381#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2382
2383#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2384#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2385#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2386#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2387#define PQI_DEVICE_STATE_ERROR 0x4
2388
2389#define PQI_MODE_READY_TIMEOUT_SECS 30
2390#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2391
2392static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2393{
2394 struct pqi_device_registers __iomem *pqi_registers;
2395 unsigned long timeout;
2396 u64 signature;
2397 u8 status;
2398
2399 pqi_registers = ctrl_info->pqi_registers;
2400 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2401
2402 while (1) {
2403 signature = readq(&pqi_registers->signature);
2404 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2405 sizeof(signature)) == 0)
2406 break;
2407 if (time_after(jiffies, timeout)) {
2408 dev_err(&ctrl_info->pci_dev->dev,
2409 "timed out waiting for PQI signature\n");
2410 return -ETIMEDOUT;
2411 }
2412 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2413 }
2414
2415 while (1) {
2416 status = readb(&pqi_registers->function_and_status_code);
2417 if (status == PQI_STATUS_IDLE)
2418 break;
2419 if (time_after(jiffies, timeout)) {
2420 dev_err(&ctrl_info->pci_dev->dev,
2421 "timed out waiting for PQI IDLE\n");
2422 return -ETIMEDOUT;
2423 }
2424 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2425 }
2426
2427 while (1) {
2428 if (readl(&pqi_registers->device_status) ==
2429 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2430 break;
2431 if (time_after(jiffies, timeout)) {
2432 dev_err(&ctrl_info->pci_dev->dev,
2433 "timed out waiting for PQI all registers ready\n");
2434 return -ETIMEDOUT;
2435 }
2436 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2437 }
2438
2439 return 0;
2440}
2441
2442static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2443{
2444 struct pqi_scsi_dev *device;
2445
2446 device = io_request->scmd->device->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05002447 device->raid_bypass_enabled = false;
Kevin Barnett376fb882017-05-03 18:54:43 -05002448 device->aio_enabled = false;
Kevin Barnett6c223762016-06-27 16:41:00 -05002449}
2450
Kevin Barnettd87d5472017-05-03 18:54:00 -05002451static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
Kevin Barnett6c223762016-06-27 16:41:00 -05002452{
2453 struct pqi_ctrl_info *ctrl_info;
Kevin Barnette58081a2016-08-31 14:54:29 -05002454 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002455
Kevin Barnett03b288cf2017-05-03 18:54:49 -05002456 device = sdev->hostdata;
2457 if (device->device_offline)
2458 return;
2459
2460 device->device_offline = true;
2461 scsi_device_set_state(sdev, SDEV_OFFLINE);
2462 ctrl_info = shost_to_hba(sdev->host);
2463 pqi_schedule_rescan_worker(ctrl_info);
2464 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2465 path, ctrl_info->scsi_host->host_no, device->bus,
2466 device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05002467}
2468
2469static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2470{
2471 u8 scsi_status;
2472 u8 host_byte;
2473 struct scsi_cmnd *scmd;
2474 struct pqi_raid_error_info *error_info;
2475 size_t sense_data_length;
2476 int residual_count;
2477 int xfer_count;
2478 struct scsi_sense_hdr sshdr;
2479
2480 scmd = io_request->scmd;
2481 if (!scmd)
2482 return;
2483
2484 error_info = io_request->error_info;
2485 scsi_status = error_info->status;
2486 host_byte = DID_OK;
2487
Kevin Barnettf5b63202017-05-03 18:55:07 -05002488 switch (error_info->data_out_result) {
2489 case PQI_DATA_IN_OUT_GOOD:
2490 break;
2491 case PQI_DATA_IN_OUT_UNDERFLOW:
Kevin Barnett6c223762016-06-27 16:41:00 -05002492 xfer_count =
2493 get_unaligned_le32(&error_info->data_out_transferred);
2494 residual_count = scsi_bufflen(scmd) - xfer_count;
2495 scsi_set_resid(scmd, residual_count);
2496 if (xfer_count < scmd->underflow)
2497 host_byte = DID_SOFT_ERROR;
Kevin Barnettf5b63202017-05-03 18:55:07 -05002498 break;
2499 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2500 case PQI_DATA_IN_OUT_ABORTED:
2501 host_byte = DID_ABORT;
2502 break;
2503 case PQI_DATA_IN_OUT_TIMEOUT:
2504 host_byte = DID_TIME_OUT;
2505 break;
2506 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2507 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2508 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2509 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2510 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2511 case PQI_DATA_IN_OUT_ERROR:
2512 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2513 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2514 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2515 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2516 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2517 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2518 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2519 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2520 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2521 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2522 default:
2523 host_byte = DID_ERROR;
2524 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002525 }
2526
2527 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2528 if (sense_data_length == 0)
2529 sense_data_length =
2530 get_unaligned_le16(&error_info->response_data_length);
2531 if (sense_data_length) {
2532 if (sense_data_length > sizeof(error_info->data))
2533 sense_data_length = sizeof(error_info->data);
2534
2535 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2536 scsi_normalize_sense(error_info->data,
2537 sense_data_length, &sshdr) &&
2538 sshdr.sense_key == HARDWARE_ERROR &&
2539 sshdr.asc == 0x3e &&
2540 sshdr.ascq == 0x1) {
Kevin Barnettd87d5472017-05-03 18:54:00 -05002541 pqi_take_device_offline(scmd->device, "RAID");
Kevin Barnett6c223762016-06-27 16:41:00 -05002542 host_byte = DID_NO_CONNECT;
2543 }
2544
2545 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2546 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2547 memcpy(scmd->sense_buffer, error_info->data,
2548 sense_data_length);
2549 }
2550
2551 scmd->result = scsi_status;
2552 set_host_byte(scmd, host_byte);
2553}
2554
2555static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2556{
2557 u8 scsi_status;
2558 u8 host_byte;
2559 struct scsi_cmnd *scmd;
2560 struct pqi_aio_error_info *error_info;
2561 size_t sense_data_length;
2562 int residual_count;
2563 int xfer_count;
2564 bool device_offline;
2565
2566 scmd = io_request->scmd;
2567 error_info = io_request->error_info;
2568 host_byte = DID_OK;
2569 sense_data_length = 0;
2570 device_offline = false;
2571
2572 switch (error_info->service_response) {
2573 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2574 scsi_status = error_info->status;
2575 break;
2576 case PQI_AIO_SERV_RESPONSE_FAILURE:
2577 switch (error_info->status) {
2578 case PQI_AIO_STATUS_IO_ABORTED:
2579 scsi_status = SAM_STAT_TASK_ABORTED;
2580 break;
2581 case PQI_AIO_STATUS_UNDERRUN:
2582 scsi_status = SAM_STAT_GOOD;
2583 residual_count = get_unaligned_le32(
2584 &error_info->residual_count);
2585 scsi_set_resid(scmd, residual_count);
2586 xfer_count = scsi_bufflen(scmd) - residual_count;
2587 if (xfer_count < scmd->underflow)
2588 host_byte = DID_SOFT_ERROR;
2589 break;
2590 case PQI_AIO_STATUS_OVERRUN:
2591 scsi_status = SAM_STAT_GOOD;
2592 break;
2593 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2594 pqi_aio_path_disabled(io_request);
2595 scsi_status = SAM_STAT_GOOD;
2596 io_request->status = -EAGAIN;
2597 break;
2598 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2599 case PQI_AIO_STATUS_INVALID_DEVICE:
Kevin Barnett376fb882017-05-03 18:54:43 -05002600 if (!io_request->raid_bypass) {
2601 device_offline = true;
2602 pqi_take_device_offline(scmd->device, "AIO");
2603 host_byte = DID_NO_CONNECT;
2604 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002605 scsi_status = SAM_STAT_CHECK_CONDITION;
2606 break;
2607 case PQI_AIO_STATUS_IO_ERROR:
2608 default:
2609 scsi_status = SAM_STAT_CHECK_CONDITION;
2610 break;
2611 }
2612 break;
2613 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2614 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2615 scsi_status = SAM_STAT_GOOD;
2616 break;
2617 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2618 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2619 default:
2620 scsi_status = SAM_STAT_CHECK_CONDITION;
2621 break;
2622 }
2623
2624 if (error_info->data_present) {
2625 sense_data_length =
2626 get_unaligned_le16(&error_info->data_length);
2627 if (sense_data_length) {
2628 if (sense_data_length > sizeof(error_info->data))
2629 sense_data_length = sizeof(error_info->data);
2630 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2631 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2632 memcpy(scmd->sense_buffer, error_info->data,
2633 sense_data_length);
2634 }
2635 }
2636
2637 if (device_offline && sense_data_length == 0)
2638 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2639 0x3e, 0x1);
2640
2641 scmd->result = scsi_status;
2642 set_host_byte(scmd, host_byte);
2643}
2644
2645static void pqi_process_io_error(unsigned int iu_type,
2646 struct pqi_io_request *io_request)
2647{
2648 switch (iu_type) {
2649 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2650 pqi_process_raid_io_error(io_request);
2651 break;
2652 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2653 pqi_process_aio_io_error(io_request);
2654 break;
2655 }
2656}
2657
2658static int pqi_interpret_task_management_response(
2659 struct pqi_task_management_response *response)
2660{
2661 int rc;
2662
2663 switch (response->response_code) {
Kevin Barnettb17f0482016-08-31 14:54:17 -05002664 case SOP_TMF_COMPLETE:
2665 case SOP_TMF_FUNCTION_SUCCEEDED:
Kevin Barnett6c223762016-06-27 16:41:00 -05002666 rc = 0;
2667 break;
2668 default:
2669 rc = -EIO;
2670 break;
2671 }
2672
2673 return rc;
2674}
2675
2676static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2677 struct pqi_queue_group *queue_group)
2678{
2679 unsigned int num_responses;
2680 pqi_index_t oq_pi;
2681 pqi_index_t oq_ci;
2682 struct pqi_io_request *io_request;
2683 struct pqi_io_response *response;
2684 u16 request_id;
2685
2686 num_responses = 0;
2687 oq_ci = queue_group->oq_ci_copy;
2688
2689 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002690 oq_pi = readl(queue_group->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05002691 if (oq_pi == oq_ci)
2692 break;
2693
2694 num_responses++;
2695 response = queue_group->oq_element_array +
2696 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2697
2698 request_id = get_unaligned_le16(&response->request_id);
2699 WARN_ON(request_id >= ctrl_info->max_io_slots);
2700
2701 io_request = &ctrl_info->io_request_pool[request_id];
2702 WARN_ON(atomic_read(&io_request->refcount) == 0);
2703
2704 switch (response->header.iu_type) {
2705 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2706 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2707 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2708 break;
Kevin Barnettb212c252018-12-07 16:28:10 -06002709 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2710 io_request->status =
2711 get_unaligned_le16(
2712 &((struct pqi_vendor_general_response *)
2713 response)->status);
2714 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05002715 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2716 io_request->status =
2717 pqi_interpret_task_management_response(
2718 (void *)response);
2719 break;
2720 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2721 pqi_aio_path_disabled(io_request);
2722 io_request->status = -EAGAIN;
2723 break;
2724 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2725 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2726 io_request->error_info = ctrl_info->error_buffer +
2727 (get_unaligned_le16(&response->error_index) *
2728 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2729 pqi_process_io_error(response->header.iu_type,
2730 io_request);
2731 break;
2732 default:
2733 dev_err(&ctrl_info->pci_dev->dev,
2734 "unexpected IU type: 0x%x\n",
2735 response->header.iu_type);
Kevin Barnett6c223762016-06-27 16:41:00 -05002736 break;
2737 }
2738
2739 io_request->io_complete_callback(io_request,
2740 io_request->context);
2741
2742 /*
2743 * Note that the I/O request structure CANNOT BE TOUCHED after
2744 * returning from the I/O completion callback!
2745 */
2746
2747 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2748 }
2749
2750 if (num_responses) {
2751 queue_group->oq_ci_copy = oq_ci;
2752 writel(oq_ci, queue_group->oq_ci);
2753 }
2754
2755 return num_responses;
2756}
2757
2758static inline unsigned int pqi_num_elements_free(unsigned int pi,
Kevin Barnettdf7a1fc2016-08-31 14:54:59 -05002759 unsigned int ci, unsigned int elements_in_queue)
Kevin Barnett6c223762016-06-27 16:41:00 -05002760{
2761 unsigned int num_elements_used;
2762
2763 if (pi >= ci)
2764 num_elements_used = pi - ci;
2765 else
2766 num_elements_used = elements_in_queue - ci + pi;
2767
2768 return elements_in_queue - num_elements_used - 1;
2769}
2770
Kevin Barnett98f87662017-05-03 18:53:11 -05002771static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05002772 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2773{
2774 pqi_index_t iq_pi;
2775 pqi_index_t iq_ci;
2776 unsigned long flags;
2777 void *next_element;
Kevin Barnett6c223762016-06-27 16:41:00 -05002778 struct pqi_queue_group *queue_group;
2779
2780 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2781 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2782
Kevin Barnett6c223762016-06-27 16:41:00 -05002783 while (1) {
2784 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2785
2786 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002787 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002788
2789 if (pqi_num_elements_free(iq_pi, iq_ci,
2790 ctrl_info->num_elements_per_iq))
2791 break;
2792
2793 spin_unlock_irqrestore(
2794 &queue_group->submit_lock[RAID_PATH], flags);
2795
Kevin Barnett98f87662017-05-03 18:53:11 -05002796 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05002797 return;
Kevin Barnett6c223762016-06-27 16:41:00 -05002798 }
2799
2800 next_element = queue_group->iq_element_array[RAID_PATH] +
2801 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2802
2803 memcpy(next_element, iu, iu_length);
2804
2805 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
Kevin Barnett6c223762016-06-27 16:41:00 -05002806 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2807
2808 /*
2809 * This write notifies the controller that an IU is available to be
2810 * processed.
2811 */
2812 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2813
2814 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002815}
2816
2817static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2818 struct pqi_event *event)
2819{
2820 struct pqi_event_acknowledge_request request;
2821
2822 memset(&request, 0, sizeof(request));
2823
2824 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2825 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2826 &request.header.iu_length);
2827 request.event_type = event->event_type;
2828 request.event_id = event->event_id;
2829 request.additional_event_id = event->additional_event_id;
2830
Kevin Barnett98f87662017-05-03 18:53:11 -05002831 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
Kevin Barnett6c223762016-06-27 16:41:00 -05002832}
2833
2834static void pqi_event_worker(struct work_struct *work)
2835{
2836 unsigned int i;
2837 struct pqi_ctrl_info *ctrl_info;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002838 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002839
2840 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2841
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002842 pqi_ctrl_busy(ctrl_info);
2843 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
Kevin Barnett5f310422017-05-03 18:54:55 -05002844 if (pqi_ctrl_offline(ctrl_info))
2845 goto out;
2846
2847 pqi_schedule_rescan_worker_delayed(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002848
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002849 event = ctrl_info->events;
Kevin Barnett6c223762016-06-27 16:41:00 -05002850 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002851 if (event->pending) {
2852 event->pending = false;
2853 pqi_acknowledge_event(ctrl_info, event);
Kevin Barnett6c223762016-06-27 16:41:00 -05002854 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002855 event++;
Kevin Barnett6c223762016-06-27 16:41:00 -05002856 }
2857
Kevin Barnett5f310422017-05-03 18:54:55 -05002858out:
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002859 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002860}
2861
Kevin Barnett98f87662017-05-03 18:53:11 -05002862#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
Kevin Barnett6c223762016-06-27 16:41:00 -05002863
Kees Cook74a0f572017-10-11 16:27:10 -07002864static void pqi_heartbeat_timer_handler(struct timer_list *t)
Kevin Barnett6c223762016-06-27 16:41:00 -05002865{
2866 int num_interrupts;
Kevin Barnett98f87662017-05-03 18:53:11 -05002867 u32 heartbeat_count;
Kees Cook74a0f572017-10-11 16:27:10 -07002868 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
2869 heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05002870
Kevin Barnett98f87662017-05-03 18:53:11 -05002871 pqi_check_ctrl_health(ctrl_info);
2872 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett061ef062017-05-03 18:53:05 -05002873 return;
2874
Kevin Barnett6c223762016-06-27 16:41:00 -05002875 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05002876 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002877
2878 if (num_interrupts == ctrl_info->previous_num_interrupts) {
Kevin Barnett98f87662017-05-03 18:53:11 -05002879 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2880 dev_err(&ctrl_info->pci_dev->dev,
2881 "no heartbeat detected - last heartbeat count: %u\n",
2882 heartbeat_count);
Kevin Barnett6c223762016-06-27 16:41:00 -05002883 pqi_take_ctrl_offline(ctrl_info);
2884 return;
2885 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002886 } else {
Kevin Barnett98f87662017-05-03 18:53:11 -05002887 ctrl_info->previous_num_interrupts = num_interrupts;
Kevin Barnett6c223762016-06-27 16:41:00 -05002888 }
2889
Kevin Barnett98f87662017-05-03 18:53:11 -05002890 ctrl_info->previous_heartbeat_count = heartbeat_count;
Kevin Barnett6c223762016-06-27 16:41:00 -05002891 mod_timer(&ctrl_info->heartbeat_timer,
2892 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2893}
2894
2895static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2896{
Kevin Barnett98f87662017-05-03 18:53:11 -05002897 if (!ctrl_info->heartbeat_counter)
2898 return;
2899
Kevin Barnett6c223762016-06-27 16:41:00 -05002900 ctrl_info->previous_num_interrupts =
2901 atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05002902 ctrl_info->previous_heartbeat_count =
2903 pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002904
Kevin Barnett6c223762016-06-27 16:41:00 -05002905 ctrl_info->heartbeat_timer.expires =
2906 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
Kevin Barnett061ef062017-05-03 18:53:05 -05002907 add_timer(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05002908}
2909
2910static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2911{
Kevin Barnett98f87662017-05-03 18:53:11 -05002912 del_timer_sync(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05002913}
2914
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002915static inline int pqi_event_type_to_event_index(unsigned int event_type)
Kevin Barnett6c223762016-06-27 16:41:00 -05002916{
2917 int index;
2918
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002919 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2920 if (event_type == pqi_supported_event_types[index])
2921 return index;
Kevin Barnett6c223762016-06-27 16:41:00 -05002922
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002923 return -1;
2924}
2925
2926static inline bool pqi_is_supported_event(unsigned int event_type)
2927{
2928 return pqi_event_type_to_event_index(event_type) != -1;
Kevin Barnett6c223762016-06-27 16:41:00 -05002929}
2930
2931static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2932{
2933 unsigned int num_events;
2934 pqi_index_t oq_pi;
2935 pqi_index_t oq_ci;
2936 struct pqi_event_queue *event_queue;
2937 struct pqi_event_response *response;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002938 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002939 int event_index;
2940
2941 event_queue = &ctrl_info->event_queue;
2942 num_events = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05002943 oq_ci = event_queue->oq_ci_copy;
2944
2945 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05002946 oq_pi = readl(event_queue->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05002947 if (oq_pi == oq_ci)
2948 break;
2949
2950 num_events++;
2951 response = event_queue->oq_element_array +
2952 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2953
2954 event_index =
2955 pqi_event_type_to_event_index(response->event_type);
2956
2957 if (event_index >= 0) {
2958 if (response->request_acknowlege) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002959 event = &ctrl_info->events[event_index];
2960 event->pending = true;
2961 event->event_type = response->event_type;
2962 event->event_id = response->event_id;
2963 event->additional_event_id =
Kevin Barnett6c223762016-06-27 16:41:00 -05002964 response->additional_event_id;
Kevin Barnett6c223762016-06-27 16:41:00 -05002965 }
2966 }
2967
2968 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2969 }
2970
2971 if (num_events) {
2972 event_queue->oq_ci_copy = oq_ci;
2973 writel(oq_ci, event_queue->oq_ci);
Kevin Barnett98f87662017-05-03 18:53:11 -05002974 schedule_work(&ctrl_info->event_work);
Kevin Barnett6c223762016-06-27 16:41:00 -05002975 }
2976
2977 return num_events;
2978}
2979
Kevin Barnett061ef062017-05-03 18:53:05 -05002980#define PQI_LEGACY_INTX_MASK 0x1
2981
2982static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2983 bool enable_intx)
2984{
2985 u32 intx_mask;
2986 struct pqi_device_registers __iomem *pqi_registers;
2987 volatile void __iomem *register_addr;
2988
2989 pqi_registers = ctrl_info->pqi_registers;
2990
2991 if (enable_intx)
2992 register_addr = &pqi_registers->legacy_intx_mask_clear;
2993 else
2994 register_addr = &pqi_registers->legacy_intx_mask_set;
2995
2996 intx_mask = readl(register_addr);
2997 intx_mask |= PQI_LEGACY_INTX_MASK;
2998 writel(intx_mask, register_addr);
2999}
3000
3001static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3002 enum pqi_irq_mode new_mode)
3003{
3004 switch (ctrl_info->irq_mode) {
3005 case IRQ_MODE_MSIX:
3006 switch (new_mode) {
3007 case IRQ_MODE_MSIX:
3008 break;
3009 case IRQ_MODE_INTX:
3010 pqi_configure_legacy_intx(ctrl_info, true);
Kevin Barnett061ef062017-05-03 18:53:05 -05003011 sis_enable_intx(ctrl_info);
3012 break;
3013 case IRQ_MODE_NONE:
Kevin Barnett061ef062017-05-03 18:53:05 -05003014 break;
3015 }
3016 break;
3017 case IRQ_MODE_INTX:
3018 switch (new_mode) {
3019 case IRQ_MODE_MSIX:
3020 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003021 sis_enable_msix(ctrl_info);
3022 break;
3023 case IRQ_MODE_INTX:
3024 break;
3025 case IRQ_MODE_NONE:
3026 pqi_configure_legacy_intx(ctrl_info, false);
Kevin Barnett061ef062017-05-03 18:53:05 -05003027 break;
3028 }
3029 break;
3030 case IRQ_MODE_NONE:
3031 switch (new_mode) {
3032 case IRQ_MODE_MSIX:
3033 sis_enable_msix(ctrl_info);
3034 break;
3035 case IRQ_MODE_INTX:
3036 pqi_configure_legacy_intx(ctrl_info, true);
3037 sis_enable_intx(ctrl_info);
3038 break;
3039 case IRQ_MODE_NONE:
3040 break;
3041 }
3042 break;
3043 }
3044
3045 ctrl_info->irq_mode = new_mode;
3046}
3047
3048#define PQI_LEGACY_INTX_PENDING 0x1
3049
3050static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3051{
3052 bool valid_irq;
3053 u32 intx_status;
3054
3055 switch (ctrl_info->irq_mode) {
3056 case IRQ_MODE_MSIX:
3057 valid_irq = true;
3058 break;
3059 case IRQ_MODE_INTX:
3060 intx_status =
3061 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3062 if (intx_status & PQI_LEGACY_INTX_PENDING)
3063 valid_irq = true;
3064 else
3065 valid_irq = false;
3066 break;
3067 case IRQ_MODE_NONE:
3068 default:
3069 valid_irq = false;
3070 break;
3071 }
3072
3073 return valid_irq;
3074}
3075
Kevin Barnett6c223762016-06-27 16:41:00 -05003076static irqreturn_t pqi_irq_handler(int irq, void *data)
3077{
3078 struct pqi_ctrl_info *ctrl_info;
3079 struct pqi_queue_group *queue_group;
3080 unsigned int num_responses_handled;
3081
3082 queue_group = data;
3083 ctrl_info = queue_group->ctrl_info;
3084
Kevin Barnett061ef062017-05-03 18:53:05 -05003085 if (!pqi_is_valid_irq(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05003086 return IRQ_NONE;
3087
3088 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3089
3090 if (irq == ctrl_info->event_irq)
3091 num_responses_handled += pqi_process_event_intr(ctrl_info);
3092
3093 if (num_responses_handled)
3094 atomic_inc(&ctrl_info->num_interrupts);
3095
3096 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3097 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3098
3099 return IRQ_HANDLED;
3100}
3101
3102static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3103{
Kevin Barnettd91d7822017-05-03 18:53:30 -05003104 struct pci_dev *pci_dev = ctrl_info->pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05003105 int i;
3106 int rc;
3107
Kevin Barnettd91d7822017-05-03 18:53:30 -05003108 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05003109
3110 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003111 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
Christoph Hellwig52198222016-11-01 08:12:49 -06003112 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003113 if (rc) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003114 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05003115 "irq %u init failed with error %d\n",
Kevin Barnettd91d7822017-05-03 18:53:30 -05003116 pci_irq_vector(pci_dev, i), rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05003117 return rc;
3118 }
3119 ctrl_info->num_msix_vectors_initialized++;
3120 }
3121
3122 return 0;
3123}
3124
Kevin Barnett98bf0612017-05-03 18:52:28 -05003125static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3126{
3127 int i;
3128
3129 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3130 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3131 &ctrl_info->queue_groups[i]);
3132
3133 ctrl_info->num_msix_vectors_initialized = 0;
3134}
3135
Kevin Barnett6c223762016-06-27 16:41:00 -05003136static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3137{
Kevin Barnett98bf0612017-05-03 18:52:28 -05003138 int num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003139
Kevin Barnett98bf0612017-05-03 18:52:28 -05003140 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
Christoph Hellwig52198222016-11-01 08:12:49 -06003141 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3142 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
Kevin Barnett98bf0612017-05-03 18:52:28 -05003143 if (num_vectors_enabled < 0) {
Kevin Barnett6c223762016-06-27 16:41:00 -05003144 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett98bf0612017-05-03 18:52:28 -05003145 "MSI-X init failed with error %d\n",
3146 num_vectors_enabled);
3147 return num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003148 }
3149
Kevin Barnett98bf0612017-05-03 18:52:28 -05003150 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
Kevin Barnett061ef062017-05-03 18:53:05 -05003151 ctrl_info->irq_mode = IRQ_MODE_MSIX;
Kevin Barnett6c223762016-06-27 16:41:00 -05003152 return 0;
3153}
3154
Kevin Barnett98bf0612017-05-03 18:52:28 -05003155static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3156{
3157 if (ctrl_info->num_msix_vectors_enabled) {
3158 pci_free_irq_vectors(ctrl_info->pci_dev);
3159 ctrl_info->num_msix_vectors_enabled = 0;
3160 }
3161}
3162
Kevin Barnett6c223762016-06-27 16:41:00 -05003163static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3164{
3165 unsigned int i;
3166 size_t alloc_length;
3167 size_t element_array_length_per_iq;
3168 size_t element_array_length_per_oq;
3169 void *element_array;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003170 void __iomem *next_queue_index;
Kevin Barnett6c223762016-06-27 16:41:00 -05003171 void *aligned_pointer;
3172 unsigned int num_inbound_queues;
3173 unsigned int num_outbound_queues;
3174 unsigned int num_queue_indexes;
3175 struct pqi_queue_group *queue_group;
3176
3177 element_array_length_per_iq =
3178 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3179 ctrl_info->num_elements_per_iq;
3180 element_array_length_per_oq =
3181 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3182 ctrl_info->num_elements_per_oq;
3183 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3184 num_outbound_queues = ctrl_info->num_queue_groups;
3185 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3186
3187 aligned_pointer = NULL;
3188
3189 for (i = 0; i < num_inbound_queues; i++) {
3190 aligned_pointer = PTR_ALIGN(aligned_pointer,
3191 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3192 aligned_pointer += element_array_length_per_iq;
3193 }
3194
3195 for (i = 0; i < num_outbound_queues; i++) {
3196 aligned_pointer = PTR_ALIGN(aligned_pointer,
3197 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3198 aligned_pointer += element_array_length_per_oq;
3199 }
3200
3201 aligned_pointer = PTR_ALIGN(aligned_pointer,
3202 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3203 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3204 PQI_EVENT_OQ_ELEMENT_LENGTH;
3205
3206 for (i = 0; i < num_queue_indexes; i++) {
3207 aligned_pointer = PTR_ALIGN(aligned_pointer,
3208 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3209 aligned_pointer += sizeof(pqi_index_t);
3210 }
3211
3212 alloc_length = (size_t)aligned_pointer +
3213 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3214
Kevin Barnette1d213b2017-05-03 18:53:18 -05003215 alloc_length += PQI_EXTRA_SGL_MEMORY;
3216
Kevin Barnett6c223762016-06-27 16:41:00 -05003217 ctrl_info->queue_memory_base =
3218 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3219 alloc_length,
3220 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3221
Kevin Barnettd87d5472017-05-03 18:54:00 -05003222 if (!ctrl_info->queue_memory_base)
Kevin Barnett6c223762016-06-27 16:41:00 -05003223 return -ENOMEM;
Kevin Barnett6c223762016-06-27 16:41:00 -05003224
3225 ctrl_info->queue_memory_length = alloc_length;
3226
3227 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3228 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3229
3230 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3231 queue_group = &ctrl_info->queue_groups[i];
3232 queue_group->iq_element_array[RAID_PATH] = element_array;
3233 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3234 ctrl_info->queue_memory_base_dma_handle +
3235 (element_array - ctrl_info->queue_memory_base);
3236 element_array += element_array_length_per_iq;
3237 element_array = PTR_ALIGN(element_array,
3238 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3239 queue_group->iq_element_array[AIO_PATH] = element_array;
3240 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3241 ctrl_info->queue_memory_base_dma_handle +
3242 (element_array - ctrl_info->queue_memory_base);
3243 element_array += element_array_length_per_iq;
3244 element_array = PTR_ALIGN(element_array,
3245 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3246 }
3247
3248 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3249 queue_group = &ctrl_info->queue_groups[i];
3250 queue_group->oq_element_array = element_array;
3251 queue_group->oq_element_array_bus_addr =
3252 ctrl_info->queue_memory_base_dma_handle +
3253 (element_array - ctrl_info->queue_memory_base);
3254 element_array += element_array_length_per_oq;
3255 element_array = PTR_ALIGN(element_array,
3256 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3257 }
3258
3259 ctrl_info->event_queue.oq_element_array = element_array;
3260 ctrl_info->event_queue.oq_element_array_bus_addr =
3261 ctrl_info->queue_memory_base_dma_handle +
3262 (element_array - ctrl_info->queue_memory_base);
3263 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3264 PQI_EVENT_OQ_ELEMENT_LENGTH;
3265
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003266 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
Kevin Barnett6c223762016-06-27 16:41:00 -05003267 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3268
3269 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3270 queue_group = &ctrl_info->queue_groups[i];
3271 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3272 queue_group->iq_ci_bus_addr[RAID_PATH] =
3273 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003274 (next_queue_index -
3275 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003276 next_queue_index += sizeof(pqi_index_t);
3277 next_queue_index = PTR_ALIGN(next_queue_index,
3278 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3279 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3280 queue_group->iq_ci_bus_addr[AIO_PATH] =
3281 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003282 (next_queue_index -
3283 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003284 next_queue_index += sizeof(pqi_index_t);
3285 next_queue_index = PTR_ALIGN(next_queue_index,
3286 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3287 queue_group->oq_pi = next_queue_index;
3288 queue_group->oq_pi_bus_addr =
3289 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003290 (next_queue_index -
3291 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003292 next_queue_index += sizeof(pqi_index_t);
3293 next_queue_index = PTR_ALIGN(next_queue_index,
3294 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3295 }
3296
3297 ctrl_info->event_queue.oq_pi = next_queue_index;
3298 ctrl_info->event_queue.oq_pi_bus_addr =
3299 ctrl_info->queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003300 (next_queue_index -
3301 (void __iomem *)ctrl_info->queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003302
3303 return 0;
3304}
3305
3306static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3307{
3308 unsigned int i;
3309 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3310 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3311
3312 /*
3313 * Initialize the backpointers to the controller structure in
3314 * each operational queue group structure.
3315 */
3316 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3317 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3318
3319 /*
3320 * Assign IDs to all operational queues. Note that the IDs
3321 * assigned to operational IQs are independent of the IDs
3322 * assigned to operational OQs.
3323 */
3324 ctrl_info->event_queue.oq_id = next_oq_id++;
3325 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3326 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3327 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3328 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3329 }
3330
3331 /*
3332 * Assign MSI-X table entry indexes to all queues. Note that the
3333 * interrupt for the event queue is shared with the first queue group.
3334 */
3335 ctrl_info->event_queue.int_msg_num = 0;
3336 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3337 ctrl_info->queue_groups[i].int_msg_num = i;
3338
3339 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3340 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3341 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3342 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3343 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3344 }
3345}
3346
3347static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3348{
3349 size_t alloc_length;
3350 struct pqi_admin_queues_aligned *admin_queues_aligned;
3351 struct pqi_admin_queues *admin_queues;
3352
3353 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3354 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3355
3356 ctrl_info->admin_queue_memory_base =
3357 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3358 alloc_length,
3359 &ctrl_info->admin_queue_memory_base_dma_handle,
3360 GFP_KERNEL);
3361
3362 if (!ctrl_info->admin_queue_memory_base)
3363 return -ENOMEM;
3364
3365 ctrl_info->admin_queue_memory_length = alloc_length;
3366
3367 admin_queues = &ctrl_info->admin_queues;
3368 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3369 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3370 admin_queues->iq_element_array =
3371 &admin_queues_aligned->iq_element_array;
3372 admin_queues->oq_element_array =
3373 &admin_queues_aligned->oq_element_array;
3374 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003375 admin_queues->oq_pi =
3376 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
Kevin Barnett6c223762016-06-27 16:41:00 -05003377
3378 admin_queues->iq_element_array_bus_addr =
3379 ctrl_info->admin_queue_memory_base_dma_handle +
3380 (admin_queues->iq_element_array -
3381 ctrl_info->admin_queue_memory_base);
3382 admin_queues->oq_element_array_bus_addr =
3383 ctrl_info->admin_queue_memory_base_dma_handle +
3384 (admin_queues->oq_element_array -
3385 ctrl_info->admin_queue_memory_base);
3386 admin_queues->iq_ci_bus_addr =
3387 ctrl_info->admin_queue_memory_base_dma_handle +
3388 ((void *)admin_queues->iq_ci -
3389 ctrl_info->admin_queue_memory_base);
3390 admin_queues->oq_pi_bus_addr =
3391 ctrl_info->admin_queue_memory_base_dma_handle +
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003392 ((void __iomem *)admin_queues->oq_pi -
3393 (void __iomem *)ctrl_info->admin_queue_memory_base);
Kevin Barnett6c223762016-06-27 16:41:00 -05003394
3395 return 0;
3396}
3397
3398#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3399#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3400
3401static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3402{
3403 struct pqi_device_registers __iomem *pqi_registers;
3404 struct pqi_admin_queues *admin_queues;
3405 unsigned long timeout;
3406 u8 status;
3407 u32 reg;
3408
3409 pqi_registers = ctrl_info->pqi_registers;
3410 admin_queues = &ctrl_info->admin_queues;
3411
3412 writeq((u64)admin_queues->iq_element_array_bus_addr,
3413 &pqi_registers->admin_iq_element_array_addr);
3414 writeq((u64)admin_queues->oq_element_array_bus_addr,
3415 &pqi_registers->admin_oq_element_array_addr);
3416 writeq((u64)admin_queues->iq_ci_bus_addr,
3417 &pqi_registers->admin_iq_ci_addr);
3418 writeq((u64)admin_queues->oq_pi_bus_addr,
3419 &pqi_registers->admin_oq_pi_addr);
3420
3421 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3422 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3423 (admin_queues->int_msg_num << 16);
3424 writel(reg, &pqi_registers->admin_iq_num_elements);
3425 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3426 &pqi_registers->function_and_status_code);
3427
3428 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3429 while (1) {
3430 status = readb(&pqi_registers->function_and_status_code);
3431 if (status == PQI_STATUS_IDLE)
3432 break;
3433 if (time_after(jiffies, timeout))
3434 return -ETIMEDOUT;
3435 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3436 }
3437
3438 /*
3439 * The offset registers are not initialized to the correct
3440 * offsets until *after* the create admin queue pair command
3441 * completes successfully.
3442 */
3443 admin_queues->iq_pi = ctrl_info->iomem_base +
3444 PQI_DEVICE_REGISTERS_OFFSET +
3445 readq(&pqi_registers->admin_iq_pi_offset);
3446 admin_queues->oq_ci = ctrl_info->iomem_base +
3447 PQI_DEVICE_REGISTERS_OFFSET +
3448 readq(&pqi_registers->admin_oq_ci_offset);
3449
3450 return 0;
3451}
3452
3453static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3454 struct pqi_general_admin_request *request)
3455{
3456 struct pqi_admin_queues *admin_queues;
3457 void *next_element;
3458 pqi_index_t iq_pi;
3459
3460 admin_queues = &ctrl_info->admin_queues;
3461 iq_pi = admin_queues->iq_pi_copy;
3462
3463 next_element = admin_queues->iq_element_array +
3464 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3465
3466 memcpy(next_element, request, sizeof(*request));
3467
3468 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3469 admin_queues->iq_pi_copy = iq_pi;
3470
3471 /*
3472 * This write notifies the controller that an IU is available to be
3473 * processed.
3474 */
3475 writel(iq_pi, admin_queues->iq_pi);
3476}
3477
Kevin Barnett13bede62017-05-03 18:55:13 -05003478#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3479
Kevin Barnett6c223762016-06-27 16:41:00 -05003480static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3481 struct pqi_general_admin_response *response)
3482{
3483 struct pqi_admin_queues *admin_queues;
3484 pqi_index_t oq_pi;
3485 pqi_index_t oq_ci;
3486 unsigned long timeout;
3487
3488 admin_queues = &ctrl_info->admin_queues;
3489 oq_ci = admin_queues->oq_ci_copy;
3490
Kevin Barnett13bede62017-05-03 18:55:13 -05003491 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
Kevin Barnett6c223762016-06-27 16:41:00 -05003492
3493 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003494 oq_pi = readl(admin_queues->oq_pi);
Kevin Barnett6c223762016-06-27 16:41:00 -05003495 if (oq_pi != oq_ci)
3496 break;
3497 if (time_after(jiffies, timeout)) {
3498 dev_err(&ctrl_info->pci_dev->dev,
3499 "timed out waiting for admin response\n");
3500 return -ETIMEDOUT;
3501 }
Kevin Barnett13bede62017-05-03 18:55:13 -05003502 if (!sis_is_firmware_running(ctrl_info))
3503 return -ENXIO;
Kevin Barnett6c223762016-06-27 16:41:00 -05003504 usleep_range(1000, 2000);
3505 }
3506
3507 memcpy(response, admin_queues->oq_element_array +
3508 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3509
3510 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3511 admin_queues->oq_ci_copy = oq_ci;
3512 writel(oq_ci, admin_queues->oq_ci);
3513
3514 return 0;
3515}
3516
3517static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3518 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3519 struct pqi_io_request *io_request)
3520{
3521 struct pqi_io_request *next;
3522 void *next_element;
3523 pqi_index_t iq_pi;
3524 pqi_index_t iq_ci;
3525 size_t iu_length;
3526 unsigned long flags;
3527 unsigned int num_elements_needed;
3528 unsigned int num_elements_to_end_of_queue;
3529 size_t copy_count;
3530 struct pqi_iu_header *request;
3531
3532 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3533
Kevin Barnett376fb882017-05-03 18:54:43 -05003534 if (io_request) {
3535 io_request->queue_group = queue_group;
Kevin Barnett6c223762016-06-27 16:41:00 -05003536 list_add_tail(&io_request->request_list_entry,
3537 &queue_group->request_list[path]);
Kevin Barnett376fb882017-05-03 18:54:43 -05003538 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003539
3540 iq_pi = queue_group->iq_pi_copy[path];
3541
3542 list_for_each_entry_safe(io_request, next,
3543 &queue_group->request_list[path], request_list_entry) {
3544
3545 request = io_request->iu;
3546
3547 iu_length = get_unaligned_le16(&request->iu_length) +
3548 PQI_REQUEST_HEADER_LENGTH;
3549 num_elements_needed =
3550 DIV_ROUND_UP(iu_length,
3551 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3552
Kevin Barnettdac12fb2018-06-18 13:23:00 -05003553 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003554
3555 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3556 ctrl_info->num_elements_per_iq))
3557 break;
3558
3559 put_unaligned_le16(queue_group->oq_id,
3560 &request->response_queue_id);
3561
3562 next_element = queue_group->iq_element_array[path] +
3563 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3564
3565 num_elements_to_end_of_queue =
3566 ctrl_info->num_elements_per_iq - iq_pi;
3567
3568 if (num_elements_needed <= num_elements_to_end_of_queue) {
3569 memcpy(next_element, request, iu_length);
3570 } else {
3571 copy_count = num_elements_to_end_of_queue *
3572 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3573 memcpy(next_element, request, copy_count);
3574 memcpy(queue_group->iq_element_array[path],
3575 (u8 *)request + copy_count,
3576 iu_length - copy_count);
3577 }
3578
3579 iq_pi = (iq_pi + num_elements_needed) %
3580 ctrl_info->num_elements_per_iq;
3581
3582 list_del(&io_request->request_list_entry);
3583 }
3584
3585 if (iq_pi != queue_group->iq_pi_copy[path]) {
3586 queue_group->iq_pi_copy[path] = iq_pi;
3587 /*
3588 * This write notifies the controller that one or more IUs are
3589 * available to be processed.
3590 */
3591 writel(iq_pi, queue_group->iq_pi[path]);
3592 }
3593
3594 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3595}
3596
Kevin Barnett1f37e992017-05-03 18:53:24 -05003597#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3598
3599static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3600 struct completion *wait)
3601{
3602 int rc;
Kevin Barnett1f37e992017-05-03 18:53:24 -05003603
3604 while (1) {
3605 if (wait_for_completion_io_timeout(wait,
3606 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3607 rc = 0;
3608 break;
3609 }
3610
3611 pqi_check_ctrl_health(ctrl_info);
3612 if (pqi_ctrl_offline(ctrl_info)) {
3613 rc = -ENXIO;
3614 break;
3615 }
Kevin Barnett1f37e992017-05-03 18:53:24 -05003616 }
3617
3618 return rc;
3619}
3620
Kevin Barnett6c223762016-06-27 16:41:00 -05003621static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3622 void *context)
3623{
3624 struct completion *waiting = context;
3625
3626 complete(waiting);
3627}
3628
Kevin Barnett26b390a2018-06-18 13:22:48 -05003629static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
3630 *error_info)
3631{
3632 int rc = -EIO;
3633
3634 switch (error_info->data_out_result) {
3635 case PQI_DATA_IN_OUT_GOOD:
3636 if (error_info->status == SAM_STAT_GOOD)
3637 rc = 0;
3638 break;
3639 case PQI_DATA_IN_OUT_UNDERFLOW:
3640 if (error_info->status == SAM_STAT_GOOD ||
3641 error_info->status == SAM_STAT_CHECK_CONDITION)
3642 rc = 0;
3643 break;
3644 case PQI_DATA_IN_OUT_ABORTED:
3645 rc = PQI_CMD_STATUS_ABORTED;
3646 break;
3647 }
3648
3649 return rc;
3650}
3651
Kevin Barnett6c223762016-06-27 16:41:00 -05003652static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3653 struct pqi_iu_header *request, unsigned int flags,
3654 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3655{
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003656 int rc = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05003657 struct pqi_io_request *io_request;
3658 unsigned long start_jiffies;
3659 unsigned long msecs_blocked;
3660 size_t iu_length;
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003661 DECLARE_COMPLETION_ONSTACK(wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05003662
3663 /*
3664 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3665 * are mutually exclusive.
3666 */
3667
3668 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3669 if (down_interruptible(&ctrl_info->sync_request_sem))
3670 return -ERESTARTSYS;
3671 } else {
3672 if (timeout_msecs == NO_TIMEOUT) {
3673 down(&ctrl_info->sync_request_sem);
3674 } else {
3675 start_jiffies = jiffies;
3676 if (down_timeout(&ctrl_info->sync_request_sem,
3677 msecs_to_jiffies(timeout_msecs)))
3678 return -ETIMEDOUT;
3679 msecs_blocked =
3680 jiffies_to_msecs(jiffies - start_jiffies);
3681 if (msecs_blocked >= timeout_msecs)
3682 return -ETIMEDOUT;
3683 timeout_msecs -= msecs_blocked;
3684 }
3685 }
3686
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003687 pqi_ctrl_busy(ctrl_info);
3688 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3689 if (timeout_msecs == 0) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003690 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003691 rc = -ETIMEDOUT;
3692 goto out;
3693 }
3694
Kevin Barnett376fb882017-05-03 18:54:43 -05003695 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003696 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05003697 rc = -ENXIO;
3698 goto out;
3699 }
3700
Kevin Barnett6c223762016-06-27 16:41:00 -05003701 io_request = pqi_alloc_io_request(ctrl_info);
3702
3703 put_unaligned_le16(io_request->index,
3704 &(((struct pqi_raid_path_request *)request)->request_id));
3705
3706 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3707 ((struct pqi_raid_path_request *)request)->error_index =
3708 ((struct pqi_raid_path_request *)request)->request_id;
3709
3710 iu_length = get_unaligned_le16(&request->iu_length) +
3711 PQI_REQUEST_HEADER_LENGTH;
3712 memcpy(io_request->iu, request, iu_length);
3713
Kevin Barnett957c5ab2018-06-18 13:22:42 -05003714 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3715 io_request->context = &wait;
3716
3717 pqi_start_io(ctrl_info,
3718 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3719 io_request);
3720
3721 pqi_ctrl_unbusy(ctrl_info);
3722
3723 if (timeout_msecs == NO_TIMEOUT) {
3724 pqi_wait_for_completion_io(ctrl_info, &wait);
3725 } else {
3726 if (!wait_for_completion_io_timeout(&wait,
3727 msecs_to_jiffies(timeout_msecs))) {
3728 dev_warn(&ctrl_info->pci_dev->dev,
3729 "command timed out\n");
3730 rc = -ETIMEDOUT;
3731 }
3732 }
Kevin Barnett6c223762016-06-27 16:41:00 -05003733
3734 if (error_info) {
3735 if (io_request->error_info)
3736 memcpy(error_info, io_request->error_info,
3737 sizeof(*error_info));
3738 else
3739 memset(error_info, 0, sizeof(*error_info));
3740 } else if (rc == 0 && io_request->error_info) {
Kevin Barnett26b390a2018-06-18 13:22:48 -05003741 rc = pqi_process_raid_io_error_synchronous(
3742 io_request->error_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003743 }
3744
3745 pqi_free_io_request(io_request);
3746
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003747out:
Kevin Barnett6c223762016-06-27 16:41:00 -05003748 up(&ctrl_info->sync_request_sem);
3749
3750 return rc;
3751}
3752
3753static int pqi_validate_admin_response(
3754 struct pqi_general_admin_response *response, u8 expected_function_code)
3755{
3756 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3757 return -EINVAL;
3758
3759 if (get_unaligned_le16(&response->header.iu_length) !=
3760 PQI_GENERAL_ADMIN_IU_LENGTH)
3761 return -EINVAL;
3762
3763 if (response->function_code != expected_function_code)
3764 return -EINVAL;
3765
3766 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3767 return -EINVAL;
3768
3769 return 0;
3770}
3771
3772static int pqi_submit_admin_request_synchronous(
3773 struct pqi_ctrl_info *ctrl_info,
3774 struct pqi_general_admin_request *request,
3775 struct pqi_general_admin_response *response)
3776{
3777 int rc;
3778
3779 pqi_submit_admin_request(ctrl_info, request);
3780
3781 rc = pqi_poll_for_admin_response(ctrl_info, response);
3782
3783 if (rc == 0)
3784 rc = pqi_validate_admin_response(response,
3785 request->function_code);
3786
3787 return rc;
3788}
3789
3790static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3791{
3792 int rc;
3793 struct pqi_general_admin_request request;
3794 struct pqi_general_admin_response response;
3795 struct pqi_device_capability *capability;
3796 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3797
3798 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3799 if (!capability)
3800 return -ENOMEM;
3801
3802 memset(&request, 0, sizeof(request));
3803
3804 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3805 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3806 &request.header.iu_length);
3807 request.function_code =
3808 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3809 put_unaligned_le32(sizeof(*capability),
3810 &request.data.report_device_capability.buffer_length);
3811
3812 rc = pqi_map_single(ctrl_info->pci_dev,
3813 &request.data.report_device_capability.sg_descriptor,
3814 capability, sizeof(*capability),
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02003815 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05003816 if (rc)
3817 goto out;
3818
3819 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3820 &response);
3821
3822 pqi_pci_unmap(ctrl_info->pci_dev,
3823 &request.data.report_device_capability.sg_descriptor, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02003824 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05003825
3826 if (rc)
3827 goto out;
3828
3829 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3830 rc = -EIO;
3831 goto out;
3832 }
3833
3834 ctrl_info->max_inbound_queues =
3835 get_unaligned_le16(&capability->max_inbound_queues);
3836 ctrl_info->max_elements_per_iq =
3837 get_unaligned_le16(&capability->max_elements_per_iq);
3838 ctrl_info->max_iq_element_length =
3839 get_unaligned_le16(&capability->max_iq_element_length)
3840 * 16;
3841 ctrl_info->max_outbound_queues =
3842 get_unaligned_le16(&capability->max_outbound_queues);
3843 ctrl_info->max_elements_per_oq =
3844 get_unaligned_le16(&capability->max_elements_per_oq);
3845 ctrl_info->max_oq_element_length =
3846 get_unaligned_le16(&capability->max_oq_element_length)
3847 * 16;
3848
3849 sop_iu_layer_descriptor =
3850 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3851
3852 ctrl_info->max_inbound_iu_length_per_firmware =
3853 get_unaligned_le16(
3854 &sop_iu_layer_descriptor->max_inbound_iu_length);
3855 ctrl_info->inbound_spanning_supported =
3856 sop_iu_layer_descriptor->inbound_spanning_supported;
3857 ctrl_info->outbound_spanning_supported =
3858 sop_iu_layer_descriptor->outbound_spanning_supported;
3859
3860out:
3861 kfree(capability);
3862
3863 return rc;
3864}
3865
3866static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3867{
3868 if (ctrl_info->max_iq_element_length <
3869 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3870 dev_err(&ctrl_info->pci_dev->dev,
3871 "max. inbound queue element length of %d is less than the required length of %d\n",
3872 ctrl_info->max_iq_element_length,
3873 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3874 return -EINVAL;
3875 }
3876
3877 if (ctrl_info->max_oq_element_length <
3878 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3879 dev_err(&ctrl_info->pci_dev->dev,
3880 "max. outbound queue element length of %d is less than the required length of %d\n",
3881 ctrl_info->max_oq_element_length,
3882 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3883 return -EINVAL;
3884 }
3885
3886 if (ctrl_info->max_inbound_iu_length_per_firmware <
3887 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3888 dev_err(&ctrl_info->pci_dev->dev,
3889 "max. inbound IU length of %u is less than the min. required length of %d\n",
3890 ctrl_info->max_inbound_iu_length_per_firmware,
3891 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3892 return -EINVAL;
3893 }
3894
Kevin Barnett77668f42016-08-31 14:54:23 -05003895 if (!ctrl_info->inbound_spanning_supported) {
3896 dev_err(&ctrl_info->pci_dev->dev,
3897 "the controller does not support inbound spanning\n");
3898 return -EINVAL;
3899 }
3900
3901 if (ctrl_info->outbound_spanning_supported) {
3902 dev_err(&ctrl_info->pci_dev->dev,
3903 "the controller supports outbound spanning but this driver does not\n");
3904 return -EINVAL;
3905 }
3906
Kevin Barnett6c223762016-06-27 16:41:00 -05003907 return 0;
3908}
3909
Kevin Barnett6c223762016-06-27 16:41:00 -05003910static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3911{
3912 int rc;
3913 struct pqi_event_queue *event_queue;
3914 struct pqi_general_admin_request request;
3915 struct pqi_general_admin_response response;
3916
3917 event_queue = &ctrl_info->event_queue;
3918
3919 /*
3920 * Create OQ (Outbound Queue - device to host queue) to dedicate
3921 * to events.
3922 */
3923 memset(&request, 0, sizeof(request));
3924 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3925 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3926 &request.header.iu_length);
3927 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3928 put_unaligned_le16(event_queue->oq_id,
3929 &request.data.create_operational_oq.queue_id);
3930 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3931 &request.data.create_operational_oq.element_array_addr);
3932 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3933 &request.data.create_operational_oq.pi_addr);
3934 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3935 &request.data.create_operational_oq.num_elements);
3936 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3937 &request.data.create_operational_oq.element_length);
3938 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3939 put_unaligned_le16(event_queue->int_msg_num,
3940 &request.data.create_operational_oq.int_msg_num);
3941
3942 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3943 &response);
3944 if (rc)
3945 return rc;
3946
3947 event_queue->oq_ci = ctrl_info->iomem_base +
3948 PQI_DEVICE_REGISTERS_OFFSET +
3949 get_unaligned_le64(
3950 &response.data.create_operational_oq.oq_ci_offset);
3951
3952 return 0;
3953}
3954
Kevin Barnett061ef062017-05-03 18:53:05 -05003955static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3956 unsigned int group_number)
Kevin Barnett6c223762016-06-27 16:41:00 -05003957{
Kevin Barnett6c223762016-06-27 16:41:00 -05003958 int rc;
3959 struct pqi_queue_group *queue_group;
3960 struct pqi_general_admin_request request;
3961 struct pqi_general_admin_response response;
3962
Kevin Barnett061ef062017-05-03 18:53:05 -05003963 queue_group = &ctrl_info->queue_groups[group_number];
Kevin Barnett6c223762016-06-27 16:41:00 -05003964
3965 /*
3966 * Create IQ (Inbound Queue - host to device queue) for
3967 * RAID path.
3968 */
3969 memset(&request, 0, sizeof(request));
3970 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3971 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3972 &request.header.iu_length);
3973 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3974 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3975 &request.data.create_operational_iq.queue_id);
3976 put_unaligned_le64(
3977 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3978 &request.data.create_operational_iq.element_array_addr);
3979 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3980 &request.data.create_operational_iq.ci_addr);
3981 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3982 &request.data.create_operational_iq.num_elements);
3983 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3984 &request.data.create_operational_iq.element_length);
3985 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3986
3987 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3988 &response);
3989 if (rc) {
3990 dev_err(&ctrl_info->pci_dev->dev,
3991 "error creating inbound RAID queue\n");
3992 return rc;
3993 }
3994
3995 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3996 PQI_DEVICE_REGISTERS_OFFSET +
3997 get_unaligned_le64(
3998 &response.data.create_operational_iq.iq_pi_offset);
3999
4000 /*
4001 * Create IQ (Inbound Queue - host to device queue) for
4002 * Advanced I/O (AIO) path.
4003 */
4004 memset(&request, 0, sizeof(request));
4005 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4006 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4007 &request.header.iu_length);
4008 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4009 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4010 &request.data.create_operational_iq.queue_id);
4011 put_unaligned_le64((u64)queue_group->
4012 iq_element_array_bus_addr[AIO_PATH],
4013 &request.data.create_operational_iq.element_array_addr);
4014 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4015 &request.data.create_operational_iq.ci_addr);
4016 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4017 &request.data.create_operational_iq.num_elements);
4018 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4019 &request.data.create_operational_iq.element_length);
4020 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4021
4022 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4023 &response);
4024 if (rc) {
4025 dev_err(&ctrl_info->pci_dev->dev,
4026 "error creating inbound AIO queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004027 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004028 }
4029
4030 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4031 PQI_DEVICE_REGISTERS_OFFSET +
4032 get_unaligned_le64(
4033 &response.data.create_operational_iq.iq_pi_offset);
4034
4035 /*
4036 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4037 * assumed to be for RAID path I/O unless we change the queue's
4038 * property.
4039 */
4040 memset(&request, 0, sizeof(request));
4041 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4042 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4043 &request.header.iu_length);
4044 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4045 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4046 &request.data.change_operational_iq_properties.queue_id);
4047 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4048 &request.data.change_operational_iq_properties.vendor_specific);
4049
4050 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4051 &response);
4052 if (rc) {
4053 dev_err(&ctrl_info->pci_dev->dev,
4054 "error changing queue property\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004055 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004056 }
4057
4058 /*
4059 * Create OQ (Outbound Queue - device to host queue).
4060 */
4061 memset(&request, 0, sizeof(request));
4062 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4063 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4064 &request.header.iu_length);
4065 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4066 put_unaligned_le16(queue_group->oq_id,
4067 &request.data.create_operational_oq.queue_id);
4068 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4069 &request.data.create_operational_oq.element_array_addr);
4070 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4071 &request.data.create_operational_oq.pi_addr);
4072 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4073 &request.data.create_operational_oq.num_elements);
4074 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4075 &request.data.create_operational_oq.element_length);
4076 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4077 put_unaligned_le16(queue_group->int_msg_num,
4078 &request.data.create_operational_oq.int_msg_num);
4079
4080 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4081 &response);
4082 if (rc) {
4083 dev_err(&ctrl_info->pci_dev->dev,
4084 "error creating outbound queue\n");
Kevin Barnett339faa82018-03-21 13:32:31 -05004085 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05004086 }
4087
4088 queue_group->oq_ci = ctrl_info->iomem_base +
4089 PQI_DEVICE_REGISTERS_OFFSET +
4090 get_unaligned_le64(
4091 &response.data.create_operational_oq.oq_ci_offset);
4092
Kevin Barnett6c223762016-06-27 16:41:00 -05004093 return 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004094}
4095
4096static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4097{
4098 int rc;
4099 unsigned int i;
4100
4101 rc = pqi_create_event_queue(ctrl_info);
4102 if (rc) {
4103 dev_err(&ctrl_info->pci_dev->dev,
4104 "error creating event queue\n");
4105 return rc;
4106 }
4107
4108 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
Kevin Barnett061ef062017-05-03 18:53:05 -05004109 rc = pqi_create_queue_group(ctrl_info, i);
Kevin Barnett6c223762016-06-27 16:41:00 -05004110 if (rc) {
4111 dev_err(&ctrl_info->pci_dev->dev,
4112 "error creating queue group number %u/%u\n",
4113 i, ctrl_info->num_queue_groups);
4114 return rc;
4115 }
4116 }
4117
4118 return 0;
4119}
4120
4121#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4122 (offsetof(struct pqi_event_config, descriptors) + \
4123 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4124
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004125static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4126 bool enable_events)
Kevin Barnett6c223762016-06-27 16:41:00 -05004127{
4128 int rc;
4129 unsigned int i;
4130 struct pqi_event_config *event_config;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004131 struct pqi_event_descriptor *event_descriptor;
Kevin Barnett6c223762016-06-27 16:41:00 -05004132 struct pqi_general_management_request request;
4133
4134 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4135 GFP_KERNEL);
4136 if (!event_config)
4137 return -ENOMEM;
4138
4139 memset(&request, 0, sizeof(request));
4140
4141 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4142 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4143 data.report_event_configuration.sg_descriptors[1]) -
4144 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4145 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4146 &request.data.report_event_configuration.buffer_length);
4147
4148 rc = pqi_map_single(ctrl_info->pci_dev,
4149 request.data.report_event_configuration.sg_descriptors,
4150 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004151 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004152 if (rc)
4153 goto out;
4154
4155 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4156 0, NULL, NO_TIMEOUT);
4157
4158 pqi_pci_unmap(ctrl_info->pci_dev,
4159 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004160 DMA_FROM_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004161
4162 if (rc)
4163 goto out;
4164
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004165 for (i = 0; i < event_config->num_event_descriptors; i++) {
4166 event_descriptor = &event_config->descriptors[i];
4167 if (enable_events &&
4168 pqi_is_supported_event(event_descriptor->event_type))
4169 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4170 &event_descriptor->oq_id);
4171 else
4172 put_unaligned_le16(0, &event_descriptor->oq_id);
4173 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004174
4175 memset(&request, 0, sizeof(request));
4176
4177 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4178 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4179 data.report_event_configuration.sg_descriptors[1]) -
4180 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4181 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4182 &request.data.report_event_configuration.buffer_length);
4183
4184 rc = pqi_map_single(ctrl_info->pci_dev,
4185 request.data.report_event_configuration.sg_descriptors,
4186 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004187 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004188 if (rc)
4189 goto out;
4190
4191 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4192 NULL, NO_TIMEOUT);
4193
4194 pqi_pci_unmap(ctrl_info->pci_dev,
4195 request.data.report_event_configuration.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02004196 DMA_TO_DEVICE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004197
4198out:
4199 kfree(event_config);
4200
4201 return rc;
4202}
4203
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004204static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4205{
4206 return pqi_configure_events(ctrl_info, true);
4207}
4208
4209static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4210{
4211 return pqi_configure_events(ctrl_info, false);
4212}
4213
Kevin Barnett6c223762016-06-27 16:41:00 -05004214static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4215{
4216 unsigned int i;
4217 struct device *dev;
4218 size_t sg_chain_buffer_length;
4219 struct pqi_io_request *io_request;
4220
4221 if (!ctrl_info->io_request_pool)
4222 return;
4223
4224 dev = &ctrl_info->pci_dev->dev;
4225 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4226 io_request = ctrl_info->io_request_pool;
4227
4228 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4229 kfree(io_request->iu);
4230 if (!io_request->sg_chain_buffer)
4231 break;
4232 dma_free_coherent(dev, sg_chain_buffer_length,
4233 io_request->sg_chain_buffer,
4234 io_request->sg_chain_buffer_dma_handle);
4235 io_request++;
4236 }
4237
4238 kfree(ctrl_info->io_request_pool);
4239 ctrl_info->io_request_pool = NULL;
4240}
4241
4242static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4243{
4244 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4245 ctrl_info->error_buffer_length,
4246 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4247
4248 if (!ctrl_info->error_buffer)
4249 return -ENOMEM;
4250
4251 return 0;
4252}
4253
4254static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4255{
4256 unsigned int i;
4257 void *sg_chain_buffer;
4258 size_t sg_chain_buffer_length;
4259 dma_addr_t sg_chain_buffer_dma_handle;
4260 struct device *dev;
4261 struct pqi_io_request *io_request;
4262
Kees Cook6396bb22018-06-12 14:03:40 -07004263 ctrl_info->io_request_pool =
4264 kcalloc(ctrl_info->max_io_slots,
4265 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
Kevin Barnett6c223762016-06-27 16:41:00 -05004266
4267 if (!ctrl_info->io_request_pool) {
4268 dev_err(&ctrl_info->pci_dev->dev,
4269 "failed to allocate I/O request pool\n");
4270 goto error;
4271 }
4272
4273 dev = &ctrl_info->pci_dev->dev;
4274 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4275 io_request = ctrl_info->io_request_pool;
4276
4277 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4278 io_request->iu =
4279 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4280
4281 if (!io_request->iu) {
4282 dev_err(&ctrl_info->pci_dev->dev,
4283 "failed to allocate IU buffers\n");
4284 goto error;
4285 }
4286
4287 sg_chain_buffer = dma_alloc_coherent(dev,
4288 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4289 GFP_KERNEL);
4290
4291 if (!sg_chain_buffer) {
4292 dev_err(&ctrl_info->pci_dev->dev,
4293 "failed to allocate PQI scatter-gather chain buffers\n");
4294 goto error;
4295 }
4296
4297 io_request->index = i;
4298 io_request->sg_chain_buffer = sg_chain_buffer;
4299 io_request->sg_chain_buffer_dma_handle =
4300 sg_chain_buffer_dma_handle;
4301 io_request++;
4302 }
4303
4304 return 0;
4305
4306error:
4307 pqi_free_all_io_requests(ctrl_info);
4308
4309 return -ENOMEM;
4310}
4311
4312/*
4313 * Calculate required resources that are sized based on max. outstanding
4314 * requests and max. transfer size.
4315 */
4316
4317static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4318{
4319 u32 max_transfer_size;
4320 u32 max_sg_entries;
4321
4322 ctrl_info->scsi_ml_can_queue =
4323 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4324 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4325
4326 ctrl_info->error_buffer_length =
4327 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4328
Kevin Barnettd727a772017-05-03 18:54:25 -05004329 if (reset_devices)
4330 max_transfer_size = min(ctrl_info->max_transfer_size,
4331 PQI_MAX_TRANSFER_SIZE_KDUMP);
4332 else
4333 max_transfer_size = min(ctrl_info->max_transfer_size,
4334 PQI_MAX_TRANSFER_SIZE);
Kevin Barnett6c223762016-06-27 16:41:00 -05004335
4336 max_sg_entries = max_transfer_size / PAGE_SIZE;
4337
4338 /* +1 to cover when the buffer is not page-aligned. */
4339 max_sg_entries++;
4340
4341 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4342
4343 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4344
4345 ctrl_info->sg_chain_buffer_length =
Kevin Barnette1d213b2017-05-03 18:53:18 -05004346 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4347 PQI_EXTRA_SGL_MEMORY;
Kevin Barnett6c223762016-06-27 16:41:00 -05004348 ctrl_info->sg_tablesize = max_sg_entries;
4349 ctrl_info->max_sectors = max_transfer_size / 512;
4350}
4351
4352static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4353{
Kevin Barnett6c223762016-06-27 16:41:00 -05004354 int num_queue_groups;
4355 u16 num_elements_per_iq;
4356 u16 num_elements_per_oq;
4357
Kevin Barnettd727a772017-05-03 18:54:25 -05004358 if (reset_devices) {
4359 num_queue_groups = 1;
4360 } else {
4361 int num_cpus;
4362 int max_queue_groups;
Kevin Barnett6c223762016-06-27 16:41:00 -05004363
Kevin Barnettd727a772017-05-03 18:54:25 -05004364 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4365 ctrl_info->max_outbound_queues - 1);
4366 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4367
4368 num_cpus = num_online_cpus();
4369 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4370 num_queue_groups = min(num_queue_groups, max_queue_groups);
4371 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004372
4373 ctrl_info->num_queue_groups = num_queue_groups;
Kevin Barnett061ef062017-05-03 18:53:05 -05004374 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05004375
Kevin Barnett77668f42016-08-31 14:54:23 -05004376 /*
4377 * Make sure that the max. inbound IU length is an even multiple
4378 * of our inbound element length.
4379 */
4380 ctrl_info->max_inbound_iu_length =
4381 (ctrl_info->max_inbound_iu_length_per_firmware /
4382 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4383 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
Kevin Barnett6c223762016-06-27 16:41:00 -05004384
4385 num_elements_per_iq =
4386 (ctrl_info->max_inbound_iu_length /
4387 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4388
4389 /* Add one because one element in each queue is unusable. */
4390 num_elements_per_iq++;
4391
4392 num_elements_per_iq = min(num_elements_per_iq,
4393 ctrl_info->max_elements_per_iq);
4394
4395 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4396 num_elements_per_oq = min(num_elements_per_oq,
4397 ctrl_info->max_elements_per_oq);
4398
4399 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4400 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4401
4402 ctrl_info->max_sg_per_iu =
4403 ((ctrl_info->max_inbound_iu_length -
4404 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4405 sizeof(struct pqi_sg_descriptor)) +
4406 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4407}
4408
4409static inline void pqi_set_sg_descriptor(
4410 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4411{
4412 u64 address = (u64)sg_dma_address(sg);
4413 unsigned int length = sg_dma_len(sg);
4414
4415 put_unaligned_le64(address, &sg_descriptor->address);
4416 put_unaligned_le32(length, &sg_descriptor->length);
4417 put_unaligned_le32(0, &sg_descriptor->flags);
4418}
4419
4420static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4421 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4422 struct pqi_io_request *io_request)
4423{
4424 int i;
4425 u16 iu_length;
4426 int sg_count;
4427 bool chained;
4428 unsigned int num_sg_in_iu;
4429 unsigned int max_sg_per_iu;
4430 struct scatterlist *sg;
4431 struct pqi_sg_descriptor *sg_descriptor;
4432
4433 sg_count = scsi_dma_map(scmd);
4434 if (sg_count < 0)
4435 return sg_count;
4436
4437 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4438 PQI_REQUEST_HEADER_LENGTH;
4439
4440 if (sg_count == 0)
4441 goto out;
4442
4443 sg = scsi_sglist(scmd);
4444 sg_descriptor = request->sg_descriptors;
4445 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4446 chained = false;
4447 num_sg_in_iu = 0;
4448 i = 0;
4449
4450 while (1) {
4451 pqi_set_sg_descriptor(sg_descriptor, sg);
4452 if (!chained)
4453 num_sg_in_iu++;
4454 i++;
4455 if (i == sg_count)
4456 break;
4457 sg_descriptor++;
4458 if (i == max_sg_per_iu) {
4459 put_unaligned_le64(
4460 (u64)io_request->sg_chain_buffer_dma_handle,
4461 &sg_descriptor->address);
4462 put_unaligned_le32((sg_count - num_sg_in_iu)
4463 * sizeof(*sg_descriptor),
4464 &sg_descriptor->length);
4465 put_unaligned_le32(CISS_SG_CHAIN,
4466 &sg_descriptor->flags);
4467 chained = true;
4468 num_sg_in_iu++;
4469 sg_descriptor = io_request->sg_chain_buffer;
4470 }
4471 sg = sg_next(sg);
4472 }
4473
4474 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4475 request->partial = chained;
4476 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4477
4478out:
4479 put_unaligned_le16(iu_length, &request->header.iu_length);
4480
4481 return 0;
4482}
4483
4484static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4485 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4486 struct pqi_io_request *io_request)
4487{
4488 int i;
4489 u16 iu_length;
4490 int sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004491 bool chained;
4492 unsigned int num_sg_in_iu;
4493 unsigned int max_sg_per_iu;
Kevin Barnett6c223762016-06-27 16:41:00 -05004494 struct scatterlist *sg;
4495 struct pqi_sg_descriptor *sg_descriptor;
4496
4497 sg_count = scsi_dma_map(scmd);
4498 if (sg_count < 0)
4499 return sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004500
4501 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4502 PQI_REQUEST_HEADER_LENGTH;
4503 num_sg_in_iu = 0;
4504
Kevin Barnett6c223762016-06-27 16:41:00 -05004505 if (sg_count == 0)
4506 goto out;
4507
Kevin Barnetta60eec02016-08-31 14:54:11 -05004508 sg = scsi_sglist(scmd);
4509 sg_descriptor = request->sg_descriptors;
4510 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4511 chained = false;
4512 i = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004513
Kevin Barnetta60eec02016-08-31 14:54:11 -05004514 while (1) {
4515 pqi_set_sg_descriptor(sg_descriptor, sg);
4516 if (!chained)
4517 num_sg_in_iu++;
4518 i++;
4519 if (i == sg_count)
4520 break;
4521 sg_descriptor++;
4522 if (i == max_sg_per_iu) {
4523 put_unaligned_le64(
4524 (u64)io_request->sg_chain_buffer_dma_handle,
4525 &sg_descriptor->address);
4526 put_unaligned_le32((sg_count - num_sg_in_iu)
4527 * sizeof(*sg_descriptor),
4528 &sg_descriptor->length);
4529 put_unaligned_le32(CISS_SG_CHAIN,
4530 &sg_descriptor->flags);
4531 chained = true;
4532 num_sg_in_iu++;
4533 sg_descriptor = io_request->sg_chain_buffer;
Kevin Barnett6c223762016-06-27 16:41:00 -05004534 }
Kevin Barnetta60eec02016-08-31 14:54:11 -05004535 sg = sg_next(sg);
Kevin Barnett6c223762016-06-27 16:41:00 -05004536 }
4537
Kevin Barnetta60eec02016-08-31 14:54:11 -05004538 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4539 request->partial = chained;
Kevin Barnett6c223762016-06-27 16:41:00 -05004540 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
Kevin Barnetta60eec02016-08-31 14:54:11 -05004541
4542out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004543 put_unaligned_le16(iu_length, &request->header.iu_length);
4544 request->num_sg_descriptors = num_sg_in_iu;
4545
4546 return 0;
4547}
4548
4549static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4550 void *context)
4551{
4552 struct scsi_cmnd *scmd;
4553
4554 scmd = io_request->scmd;
4555 pqi_free_io_request(io_request);
4556 scsi_dma_unmap(scmd);
4557 pqi_scsi_done(scmd);
4558}
4559
Kevin Barnett376fb882017-05-03 18:54:43 -05004560static int pqi_raid_submit_scsi_cmd_with_io_request(
4561 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05004562 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4563 struct pqi_queue_group *queue_group)
4564{
4565 int rc;
4566 size_t cdb_length;
Kevin Barnett6c223762016-06-27 16:41:00 -05004567 struct pqi_raid_path_request *request;
4568
Kevin Barnett6c223762016-06-27 16:41:00 -05004569 io_request->io_complete_callback = pqi_raid_io_complete;
4570 io_request->scmd = scmd;
4571
Kevin Barnett6c223762016-06-27 16:41:00 -05004572 request = io_request->iu;
4573 memset(request, 0,
4574 offsetof(struct pqi_raid_path_request, sg_descriptors));
4575
4576 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4577 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4578 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4579 put_unaligned_le16(io_request->index, &request->request_id);
4580 request->error_index = request->request_id;
4581 memcpy(request->lun_number, device->scsi3addr,
4582 sizeof(request->lun_number));
4583
4584 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4585 memcpy(request->cdb, scmd->cmnd, cdb_length);
4586
4587 switch (cdb_length) {
4588 case 6:
4589 case 10:
4590 case 12:
4591 case 16:
4592 /* No bytes in the Additional CDB bytes field */
4593 request->additional_cdb_bytes_usage =
4594 SOP_ADDITIONAL_CDB_BYTES_0;
4595 break;
4596 case 20:
4597 /* 4 bytes in the Additional cdb field */
4598 request->additional_cdb_bytes_usage =
4599 SOP_ADDITIONAL_CDB_BYTES_4;
4600 break;
4601 case 24:
4602 /* 8 bytes in the Additional cdb field */
4603 request->additional_cdb_bytes_usage =
4604 SOP_ADDITIONAL_CDB_BYTES_8;
4605 break;
4606 case 28:
4607 /* 12 bytes in the Additional cdb field */
4608 request->additional_cdb_bytes_usage =
4609 SOP_ADDITIONAL_CDB_BYTES_12;
4610 break;
4611 case 32:
4612 default:
4613 /* 16 bytes in the Additional cdb field */
4614 request->additional_cdb_bytes_usage =
4615 SOP_ADDITIONAL_CDB_BYTES_16;
4616 break;
4617 }
4618
4619 switch (scmd->sc_data_direction) {
4620 case DMA_TO_DEVICE:
4621 request->data_direction = SOP_READ_FLAG;
4622 break;
4623 case DMA_FROM_DEVICE:
4624 request->data_direction = SOP_WRITE_FLAG;
4625 break;
4626 case DMA_NONE:
4627 request->data_direction = SOP_NO_DIRECTION_FLAG;
4628 break;
4629 case DMA_BIDIRECTIONAL:
4630 request->data_direction = SOP_BIDIRECTIONAL;
4631 break;
4632 default:
4633 dev_err(&ctrl_info->pci_dev->dev,
4634 "unknown data direction: %d\n",
4635 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05004636 break;
4637 }
4638
4639 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4640 if (rc) {
4641 pqi_free_io_request(io_request);
4642 return SCSI_MLQUEUE_HOST_BUSY;
4643 }
4644
4645 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4646
4647 return 0;
4648}
4649
Kevin Barnett376fb882017-05-03 18:54:43 -05004650static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4651 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4652 struct pqi_queue_group *queue_group)
4653{
4654 struct pqi_io_request *io_request;
4655
4656 io_request = pqi_alloc_io_request(ctrl_info);
4657
4658 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4659 device, scmd, queue_group);
4660}
4661
4662static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4663{
4664 if (!pqi_ctrl_blocked(ctrl_info))
4665 schedule_work(&ctrl_info->raid_bypass_retry_work);
4666}
4667
4668static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4669{
4670 struct scsi_cmnd *scmd;
Kevin Barnett03b288cf2017-05-03 18:54:49 -05004671 struct pqi_scsi_dev *device;
Kevin Barnett376fb882017-05-03 18:54:43 -05004672 struct pqi_ctrl_info *ctrl_info;
4673
4674 if (!io_request->raid_bypass)
4675 return false;
4676
4677 scmd = io_request->scmd;
4678 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4679 return false;
4680 if (host_byte(scmd->result) == DID_NO_CONNECT)
4681 return false;
4682
Kevin Barnett03b288cf2017-05-03 18:54:49 -05004683 device = scmd->device->hostdata;
4684 if (pqi_device_offline(device))
4685 return false;
4686
Kevin Barnett376fb882017-05-03 18:54:43 -05004687 ctrl_info = shost_to_hba(scmd->device->host);
4688 if (pqi_ctrl_offline(ctrl_info))
4689 return false;
4690
4691 return true;
4692}
4693
4694static inline void pqi_add_to_raid_bypass_retry_list(
4695 struct pqi_ctrl_info *ctrl_info,
4696 struct pqi_io_request *io_request, bool at_head)
4697{
4698 unsigned long flags;
4699
4700 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4701 if (at_head)
4702 list_add(&io_request->request_list_entry,
4703 &ctrl_info->raid_bypass_retry_list);
4704 else
4705 list_add_tail(&io_request->request_list_entry,
4706 &ctrl_info->raid_bypass_retry_list);
4707 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4708}
4709
4710static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4711 void *context)
4712{
4713 struct scsi_cmnd *scmd;
4714
4715 scmd = io_request->scmd;
4716 pqi_free_io_request(io_request);
4717 pqi_scsi_done(scmd);
4718}
4719
4720static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4721{
4722 struct scsi_cmnd *scmd;
4723 struct pqi_ctrl_info *ctrl_info;
4724
4725 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4726 scmd = io_request->scmd;
4727 scmd->result = 0;
4728 ctrl_info = shost_to_hba(scmd->device->host);
4729
4730 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4731 pqi_schedule_bypass_retry(ctrl_info);
4732}
4733
4734static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4735{
4736 struct scsi_cmnd *scmd;
4737 struct pqi_scsi_dev *device;
4738 struct pqi_ctrl_info *ctrl_info;
4739 struct pqi_queue_group *queue_group;
4740
4741 scmd = io_request->scmd;
4742 device = scmd->device->hostdata;
4743 if (pqi_device_in_reset(device)) {
4744 pqi_free_io_request(io_request);
4745 set_host_byte(scmd, DID_RESET);
4746 pqi_scsi_done(scmd);
4747 return 0;
4748 }
4749
4750 ctrl_info = shost_to_hba(scmd->device->host);
4751 queue_group = io_request->queue_group;
4752
4753 pqi_reinit_io_request(io_request);
4754
4755 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4756 device, scmd, queue_group);
4757}
4758
4759static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4760 struct pqi_ctrl_info *ctrl_info)
4761{
4762 unsigned long flags;
4763 struct pqi_io_request *io_request;
4764
4765 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4766 io_request = list_first_entry_or_null(
4767 &ctrl_info->raid_bypass_retry_list,
4768 struct pqi_io_request, request_list_entry);
4769 if (io_request)
4770 list_del(&io_request->request_list_entry);
4771 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4772
4773 return io_request;
4774}
4775
4776static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4777{
4778 int rc;
4779 struct pqi_io_request *io_request;
4780
4781 pqi_ctrl_busy(ctrl_info);
4782
4783 while (1) {
4784 if (pqi_ctrl_blocked(ctrl_info))
4785 break;
4786 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4787 if (!io_request)
4788 break;
4789 rc = pqi_retry_raid_bypass(io_request);
4790 if (rc) {
4791 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4792 true);
4793 pqi_schedule_bypass_retry(ctrl_info);
4794 break;
4795 }
4796 }
4797
4798 pqi_ctrl_unbusy(ctrl_info);
4799}
4800
4801static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4802{
4803 struct pqi_ctrl_info *ctrl_info;
4804
4805 ctrl_info = container_of(work, struct pqi_ctrl_info,
4806 raid_bypass_retry_work);
4807 pqi_retry_raid_bypass_requests(ctrl_info);
4808}
4809
Kevin Barnett5f310422017-05-03 18:54:55 -05004810static void pqi_clear_all_queued_raid_bypass_retries(
4811 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05004812{
4813 unsigned long flags;
Kevin Barnett376fb882017-05-03 18:54:43 -05004814
4815 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
Kevin Barnett5f310422017-05-03 18:54:55 -05004816 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
Kevin Barnett376fb882017-05-03 18:54:43 -05004817 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4818}
4819
Kevin Barnett6c223762016-06-27 16:41:00 -05004820static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4821 void *context)
4822{
4823 struct scsi_cmnd *scmd;
4824
4825 scmd = io_request->scmd;
4826 scsi_dma_unmap(scmd);
4827 if (io_request->status == -EAGAIN)
4828 set_host_byte(scmd, DID_IMM_RETRY);
Kevin Barnett376fb882017-05-03 18:54:43 -05004829 else if (pqi_raid_bypass_retry_needed(io_request)) {
4830 pqi_queue_raid_bypass_retry(io_request);
4831 return;
4832 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004833 pqi_free_io_request(io_request);
4834 pqi_scsi_done(scmd);
4835}
4836
4837static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4838 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4839 struct pqi_queue_group *queue_group)
4840{
4841 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
Kevin Barnett376fb882017-05-03 18:54:43 -05004842 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
Kevin Barnett6c223762016-06-27 16:41:00 -05004843}
4844
4845static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4846 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4847 unsigned int cdb_length, struct pqi_queue_group *queue_group,
Kevin Barnett376fb882017-05-03 18:54:43 -05004848 struct pqi_encryption_info *encryption_info, bool raid_bypass)
Kevin Barnett6c223762016-06-27 16:41:00 -05004849{
4850 int rc;
4851 struct pqi_io_request *io_request;
4852 struct pqi_aio_path_request *request;
4853
4854 io_request = pqi_alloc_io_request(ctrl_info);
4855 io_request->io_complete_callback = pqi_aio_io_complete;
4856 io_request->scmd = scmd;
Kevin Barnett376fb882017-05-03 18:54:43 -05004857 io_request->raid_bypass = raid_bypass;
Kevin Barnett6c223762016-06-27 16:41:00 -05004858
4859 request = io_request->iu;
4860 memset(request, 0,
4861 offsetof(struct pqi_raid_path_request, sg_descriptors));
4862
4863 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4864 put_unaligned_le32(aio_handle, &request->nexus_id);
4865 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4866 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4867 put_unaligned_le16(io_request->index, &request->request_id);
4868 request->error_index = request->request_id;
4869 if (cdb_length > sizeof(request->cdb))
4870 cdb_length = sizeof(request->cdb);
4871 request->cdb_length = cdb_length;
4872 memcpy(request->cdb, cdb, cdb_length);
4873
4874 switch (scmd->sc_data_direction) {
4875 case DMA_TO_DEVICE:
4876 request->data_direction = SOP_READ_FLAG;
4877 break;
4878 case DMA_FROM_DEVICE:
4879 request->data_direction = SOP_WRITE_FLAG;
4880 break;
4881 case DMA_NONE:
4882 request->data_direction = SOP_NO_DIRECTION_FLAG;
4883 break;
4884 case DMA_BIDIRECTIONAL:
4885 request->data_direction = SOP_BIDIRECTIONAL;
4886 break;
4887 default:
4888 dev_err(&ctrl_info->pci_dev->dev,
4889 "unknown data direction: %d\n",
4890 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05004891 break;
4892 }
4893
4894 if (encryption_info) {
4895 request->encryption_enable = true;
4896 put_unaligned_le16(encryption_info->data_encryption_key_index,
4897 &request->data_encryption_key_index);
4898 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4899 &request->encrypt_tweak_lower);
4900 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4901 &request->encrypt_tweak_upper);
4902 }
4903
4904 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4905 if (rc) {
4906 pqi_free_io_request(io_request);
4907 return SCSI_MLQUEUE_HOST_BUSY;
4908 }
4909
4910 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4911
4912 return 0;
4913}
4914
Kevin Barnett061ef062017-05-03 18:53:05 -05004915static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4916 struct scsi_cmnd *scmd)
4917{
4918 u16 hw_queue;
4919
4920 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4921 if (hw_queue > ctrl_info->max_hw_queue_index)
4922 hw_queue = 0;
4923
4924 return hw_queue;
4925}
4926
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004927/*
4928 * This function gets called just before we hand the completed SCSI request
4929 * back to the SML.
4930 */
4931
4932void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4933{
4934 struct pqi_scsi_dev *device;
4935
4936 device = scmd->device->hostdata;
4937 atomic_dec(&device->scsi_cmds_outstanding);
4938}
4939
Kevin Barnett6c223762016-06-27 16:41:00 -05004940static int pqi_scsi_queue_command(struct Scsi_Host *shost,
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004941 struct scsi_cmnd *scmd)
Kevin Barnett6c223762016-06-27 16:41:00 -05004942{
4943 int rc;
4944 struct pqi_ctrl_info *ctrl_info;
4945 struct pqi_scsi_dev *device;
Kevin Barnett061ef062017-05-03 18:53:05 -05004946 u16 hw_queue;
Kevin Barnett6c223762016-06-27 16:41:00 -05004947 struct pqi_queue_group *queue_group;
4948 bool raid_bypassed;
4949
4950 device = scmd->device->hostdata;
Kevin Barnett6c223762016-06-27 16:41:00 -05004951 ctrl_info = shost_to_hba(shost);
4952
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004953 atomic_inc(&device->scsi_cmds_outstanding);
4954
Kevin Barnett6c223762016-06-27 16:41:00 -05004955 if (pqi_ctrl_offline(ctrl_info)) {
4956 set_host_byte(scmd, DID_NO_CONNECT);
4957 pqi_scsi_done(scmd);
4958 return 0;
4959 }
4960
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004961 pqi_ctrl_busy(ctrl_info);
4962 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4963 rc = SCSI_MLQUEUE_HOST_BUSY;
4964 goto out;
4965 }
4966
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004967 /*
4968 * This is necessary because the SML doesn't zero out this field during
4969 * error recovery.
4970 */
4971 scmd->result = 0;
4972
Kevin Barnett061ef062017-05-03 18:53:05 -05004973 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4974 queue_group = &ctrl_info->queue_groups[hw_queue];
Kevin Barnett6c223762016-06-27 16:41:00 -05004975
4976 if (pqi_is_logical_device(device)) {
4977 raid_bypassed = false;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05004978 if (device->raid_bypass_enabled &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01004979 !blk_rq_is_passthrough(scmd->request)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05004980 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4981 scmd, queue_group);
Kevin Barnett376fb882017-05-03 18:54:43 -05004982 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
4983 raid_bypassed = true;
Kevin Barnett6c223762016-06-27 16:41:00 -05004984 }
4985 if (!raid_bypassed)
4986 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4987 queue_group);
4988 } else {
4989 if (device->aio_enabled)
4990 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4991 queue_group);
4992 else
4993 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4994 queue_group);
4995 }
4996
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004997out:
4998 pqi_ctrl_unbusy(ctrl_info);
4999 if (rc)
5000 atomic_dec(&device->scsi_cmds_outstanding);
5001
Kevin Barnett6c223762016-06-27 16:41:00 -05005002 return rc;
5003}
5004
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005005static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5006 struct pqi_queue_group *queue_group)
5007{
5008 unsigned int path;
5009 unsigned long flags;
5010 bool list_is_empty;
5011
5012 for (path = 0; path < 2; path++) {
5013 while (1) {
5014 spin_lock_irqsave(
5015 &queue_group->submit_lock[path], flags);
5016 list_is_empty =
5017 list_empty(&queue_group->request_list[path]);
5018 spin_unlock_irqrestore(
5019 &queue_group->submit_lock[path], flags);
5020 if (list_is_empty)
5021 break;
5022 pqi_check_ctrl_health(ctrl_info);
5023 if (pqi_ctrl_offline(ctrl_info))
5024 return -ENXIO;
5025 usleep_range(1000, 2000);
5026 }
5027 }
5028
5029 return 0;
5030}
5031
5032static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5033{
5034 int rc;
5035 unsigned int i;
5036 unsigned int path;
5037 struct pqi_queue_group *queue_group;
5038 pqi_index_t iq_pi;
5039 pqi_index_t iq_ci;
5040
5041 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5042 queue_group = &ctrl_info->queue_groups[i];
5043
5044 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5045 if (rc)
5046 return rc;
5047
5048 for (path = 0; path < 2; path++) {
5049 iq_pi = queue_group->iq_pi_copy[path];
5050
5051 while (1) {
Kevin Barnettdac12fb2018-06-18 13:23:00 -05005052 iq_ci = readl(queue_group->iq_ci[path]);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005053 if (iq_ci == iq_pi)
5054 break;
5055 pqi_check_ctrl_health(ctrl_info);
5056 if (pqi_ctrl_offline(ctrl_info))
5057 return -ENXIO;
5058 usleep_range(1000, 2000);
5059 }
5060 }
5061 }
5062
5063 return 0;
5064}
5065
5066static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5067 struct pqi_scsi_dev *device)
5068{
5069 unsigned int i;
5070 unsigned int path;
5071 struct pqi_queue_group *queue_group;
5072 unsigned long flags;
5073 struct pqi_io_request *io_request;
5074 struct pqi_io_request *next;
5075 struct scsi_cmnd *scmd;
5076 struct pqi_scsi_dev *scsi_device;
5077
5078 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5079 queue_group = &ctrl_info->queue_groups[i];
5080
5081 for (path = 0; path < 2; path++) {
5082 spin_lock_irqsave(
5083 &queue_group->submit_lock[path], flags);
5084
5085 list_for_each_entry_safe(io_request, next,
5086 &queue_group->request_list[path],
5087 request_list_entry) {
5088 scmd = io_request->scmd;
5089 if (!scmd)
5090 continue;
5091
5092 scsi_device = scmd->device->hostdata;
5093 if (scsi_device != device)
5094 continue;
5095
5096 list_del(&io_request->request_list_entry);
5097 set_host_byte(scmd, DID_RESET);
5098 pqi_scsi_done(scmd);
5099 }
5100
5101 spin_unlock_irqrestore(
5102 &queue_group->submit_lock[path], flags);
5103 }
5104 }
5105}
5106
Kevin Barnett061ef062017-05-03 18:53:05 -05005107static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5108 struct pqi_scsi_dev *device)
5109{
5110 while (atomic_read(&device->scsi_cmds_outstanding)) {
5111 pqi_check_ctrl_health(ctrl_info);
5112 if (pqi_ctrl_offline(ctrl_info))
5113 return -ENXIO;
5114 usleep_range(1000, 2000);
5115 }
5116
5117 return 0;
5118}
5119
5120static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5121{
5122 bool io_pending;
5123 unsigned long flags;
5124 struct pqi_scsi_dev *device;
5125
5126 while (1) {
5127 io_pending = false;
5128
5129 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5130 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5131 scsi_device_list_entry) {
5132 if (atomic_read(&device->scsi_cmds_outstanding)) {
5133 io_pending = true;
5134 break;
5135 }
5136 }
5137 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5138 flags);
5139
5140 if (!io_pending)
5141 break;
5142
5143 pqi_check_ctrl_health(ctrl_info);
5144 if (pqi_ctrl_offline(ctrl_info))
5145 return -ENXIO;
5146
5147 usleep_range(1000, 2000);
5148 }
5149
5150 return 0;
5151}
5152
Kevin Barnett14bb2152016-08-31 14:54:35 -05005153static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05005154 void *context)
5155{
5156 struct completion *waiting = context;
5157
5158 complete(waiting);
5159}
5160
Kevin Barnett14bb2152016-08-31 14:54:35 -05005161#define PQI_LUN_RESET_TIMEOUT_SECS 10
5162
5163static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5164 struct pqi_scsi_dev *device, struct completion *wait)
5165{
5166 int rc;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005167
5168 while (1) {
5169 if (wait_for_completion_io_timeout(wait,
5170 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5171 rc = 0;
5172 break;
5173 }
5174
5175 pqi_check_ctrl_health(ctrl_info);
5176 if (pqi_ctrl_offline(ctrl_info)) {
Kevin Barnett4e8415e2017-05-03 18:54:18 -05005177 rc = -ENXIO;
Kevin Barnett14bb2152016-08-31 14:54:35 -05005178 break;
5179 }
Kevin Barnett14bb2152016-08-31 14:54:35 -05005180 }
5181
5182 return rc;
5183}
5184
5185static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05005186 struct pqi_scsi_dev *device)
5187{
5188 int rc;
5189 struct pqi_io_request *io_request;
5190 DECLARE_COMPLETION_ONSTACK(wait);
5191 struct pqi_task_management_request *request;
5192
Kevin Barnett6c223762016-06-27 16:41:00 -05005193 io_request = pqi_alloc_io_request(ctrl_info);
Kevin Barnett14bb2152016-08-31 14:54:35 -05005194 io_request->io_complete_callback = pqi_lun_reset_complete;
Kevin Barnett6c223762016-06-27 16:41:00 -05005195 io_request->context = &wait;
5196
5197 request = io_request->iu;
5198 memset(request, 0, sizeof(*request));
5199
5200 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5201 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5202 &request->header.iu_length);
5203 put_unaligned_le16(io_request->index, &request->request_id);
5204 memcpy(request->lun_number, device->scsi3addr,
5205 sizeof(request->lun_number));
5206 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5207
5208 pqi_start_io(ctrl_info,
5209 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5210 io_request);
5211
Kevin Barnett14bb2152016-08-31 14:54:35 -05005212 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5213 if (rc == 0)
Kevin Barnett6c223762016-06-27 16:41:00 -05005214 rc = io_request->status;
Kevin Barnett6c223762016-06-27 16:41:00 -05005215
5216 pqi_free_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -05005217
5218 return rc;
5219}
5220
5221/* Performs a reset at the LUN level. */
5222
5223static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5224 struct pqi_scsi_dev *device)
5225{
5226 int rc;
5227
Kevin Barnett14bb2152016-08-31 14:54:35 -05005228 rc = pqi_lun_reset(ctrl_info, device);
Kevin Barnett061ef062017-05-03 18:53:05 -05005229 if (rc == 0)
5230 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05005231
Kevin Barnett14bb2152016-08-31 14:54:35 -05005232 return rc == 0 ? SUCCESS : FAILED;
Kevin Barnett6c223762016-06-27 16:41:00 -05005233}
5234
5235static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5236{
5237 int rc;
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005238 struct Scsi_Host *shost;
Kevin Barnett6c223762016-06-27 16:41:00 -05005239 struct pqi_ctrl_info *ctrl_info;
5240 struct pqi_scsi_dev *device;
5241
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005242 shost = scmd->device->host;
5243 ctrl_info = shost_to_hba(shost);
Kevin Barnett6c223762016-06-27 16:41:00 -05005244 device = scmd->device->hostdata;
5245
5246 dev_err(&ctrl_info->pci_dev->dev,
5247 "resetting scsi %d:%d:%d:%d\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005248 shost->host_no, device->bus, device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05005249
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005250 pqi_check_ctrl_health(ctrl_info);
5251 if (pqi_ctrl_offline(ctrl_info)) {
5252 rc = FAILED;
5253 goto out;
5254 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005255
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005256 mutex_lock(&ctrl_info->lun_reset_mutex);
5257
5258 pqi_ctrl_block_requests(ctrl_info);
5259 pqi_ctrl_wait_until_quiesced(ctrl_info);
5260 pqi_fail_io_queued_for_device(ctrl_info, device);
5261 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5262 pqi_device_reset_start(device);
5263 pqi_ctrl_unblock_requests(ctrl_info);
5264
5265 if (rc)
5266 rc = FAILED;
5267 else
5268 rc = pqi_device_reset(ctrl_info, device);
5269
5270 pqi_device_reset_done(device);
5271
5272 mutex_unlock(&ctrl_info->lun_reset_mutex);
5273
5274out:
Kevin Barnett6c223762016-06-27 16:41:00 -05005275 dev_err(&ctrl_info->pci_dev->dev,
5276 "reset of scsi %d:%d:%d:%d: %s\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005277 shost->host_no, device->bus, device->target, device->lun,
Kevin Barnett6c223762016-06-27 16:41:00 -05005278 rc == SUCCESS ? "SUCCESS" : "FAILED");
5279
5280 return rc;
5281}
5282
5283static int pqi_slave_alloc(struct scsi_device *sdev)
5284{
5285 struct pqi_scsi_dev *device;
5286 unsigned long flags;
5287 struct pqi_ctrl_info *ctrl_info;
5288 struct scsi_target *starget;
5289 struct sas_rphy *rphy;
5290
5291 ctrl_info = shost_to_hba(sdev->host);
5292
5293 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5294
5295 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5296 starget = scsi_target(sdev);
5297 rphy = target_to_rphy(starget);
5298 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5299 if (device) {
5300 device->target = sdev_id(sdev);
5301 device->lun = sdev->lun;
5302 device->target_lun_valid = true;
5303 }
5304 } else {
5305 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5306 sdev_id(sdev), sdev->lun);
5307 }
5308
Kevin Barnett94086f52017-05-03 18:54:31 -05005309 if (device) {
Kevin Barnett6c223762016-06-27 16:41:00 -05005310 sdev->hostdata = device;
5311 device->sdev = sdev;
5312 if (device->queue_depth) {
5313 device->advertised_queue_depth = device->queue_depth;
5314 scsi_change_queue_depth(sdev,
5315 device->advertised_queue_depth);
5316 }
5317 }
5318
5319 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5320
5321 return 0;
5322}
5323
Christoph Hellwig52198222016-11-01 08:12:49 -06005324static int pqi_map_queues(struct Scsi_Host *shost)
5325{
5326 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5327
Keith Buschf23f5bec2018-03-27 09:39:06 -06005328 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
Christoph Hellwig52198222016-11-01 08:12:49 -06005329}
5330
Kevin Barnett6c223762016-06-27 16:41:00 -05005331static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5332 void __user *arg)
5333{
5334 struct pci_dev *pci_dev;
5335 u32 subsystem_vendor;
5336 u32 subsystem_device;
5337 cciss_pci_info_struct pciinfo;
5338
5339 if (!arg)
5340 return -EINVAL;
5341
5342 pci_dev = ctrl_info->pci_dev;
5343
5344 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5345 pciinfo.bus = pci_dev->bus->number;
5346 pciinfo.dev_fn = pci_dev->devfn;
5347 subsystem_vendor = pci_dev->subsystem_vendor;
5348 subsystem_device = pci_dev->subsystem_device;
5349 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5350 subsystem_vendor;
5351
5352 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5353 return -EFAULT;
5354
5355 return 0;
5356}
5357
5358static int pqi_getdrivver_ioctl(void __user *arg)
5359{
5360 u32 version;
5361
5362 if (!arg)
5363 return -EINVAL;
5364
5365 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5366 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5367
5368 if (copy_to_user(arg, &version, sizeof(version)))
5369 return -EFAULT;
5370
5371 return 0;
5372}
5373
5374struct ciss_error_info {
5375 u8 scsi_status;
5376 int command_status;
5377 size_t sense_data_length;
5378};
5379
5380static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5381 struct ciss_error_info *ciss_error_info)
5382{
5383 int ciss_cmd_status;
5384 size_t sense_data_length;
5385
5386 switch (pqi_error_info->data_out_result) {
5387 case PQI_DATA_IN_OUT_GOOD:
5388 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5389 break;
5390 case PQI_DATA_IN_OUT_UNDERFLOW:
5391 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5392 break;
5393 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5394 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5395 break;
5396 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5397 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5398 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5399 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5400 case PQI_DATA_IN_OUT_ERROR:
5401 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5402 break;
5403 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5404 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5405 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5406 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5407 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5408 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5409 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5410 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5411 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5412 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5413 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5414 break;
5415 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5416 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5417 break;
5418 case PQI_DATA_IN_OUT_ABORTED:
5419 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5420 break;
5421 case PQI_DATA_IN_OUT_TIMEOUT:
5422 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5423 break;
5424 default:
5425 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5426 break;
5427 }
5428
5429 sense_data_length =
5430 get_unaligned_le16(&pqi_error_info->sense_data_length);
5431 if (sense_data_length == 0)
5432 sense_data_length =
5433 get_unaligned_le16(&pqi_error_info->response_data_length);
5434 if (sense_data_length)
5435 if (sense_data_length > sizeof(pqi_error_info->data))
5436 sense_data_length = sizeof(pqi_error_info->data);
5437
5438 ciss_error_info->scsi_status = pqi_error_info->status;
5439 ciss_error_info->command_status = ciss_cmd_status;
5440 ciss_error_info->sense_data_length = sense_data_length;
5441}
5442
5443static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5444{
5445 int rc;
5446 char *kernel_buffer = NULL;
5447 u16 iu_length;
5448 size_t sense_data_length;
5449 IOCTL_Command_struct iocommand;
5450 struct pqi_raid_path_request request;
5451 struct pqi_raid_error_info pqi_error_info;
5452 struct ciss_error_info ciss_error_info;
5453
5454 if (pqi_ctrl_offline(ctrl_info))
5455 return -ENXIO;
5456 if (!arg)
5457 return -EINVAL;
5458 if (!capable(CAP_SYS_RAWIO))
5459 return -EPERM;
5460 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5461 return -EFAULT;
5462 if (iocommand.buf_size < 1 &&
5463 iocommand.Request.Type.Direction != XFER_NONE)
5464 return -EINVAL;
5465 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5466 return -EINVAL;
5467 if (iocommand.Request.Type.Type != TYPE_CMD)
5468 return -EINVAL;
5469
5470 switch (iocommand.Request.Type.Direction) {
5471 case XFER_NONE:
5472 case XFER_WRITE:
5473 case XFER_READ:
Kevin Barnett41555d52017-08-10 13:46:51 -05005474 case XFER_READ | XFER_WRITE:
Kevin Barnett6c223762016-06-27 16:41:00 -05005475 break;
5476 default:
5477 return -EINVAL;
5478 }
5479
5480 if (iocommand.buf_size > 0) {
5481 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5482 if (!kernel_buffer)
5483 return -ENOMEM;
5484 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5485 if (copy_from_user(kernel_buffer, iocommand.buf,
5486 iocommand.buf_size)) {
5487 rc = -EFAULT;
5488 goto out;
5489 }
5490 } else {
5491 memset(kernel_buffer, 0, iocommand.buf_size);
5492 }
5493 }
5494
5495 memset(&request, 0, sizeof(request));
5496
5497 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5498 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5499 PQI_REQUEST_HEADER_LENGTH;
5500 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5501 sizeof(request.lun_number));
5502 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5503 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5504
5505 switch (iocommand.Request.Type.Direction) {
5506 case XFER_NONE:
5507 request.data_direction = SOP_NO_DIRECTION_FLAG;
5508 break;
5509 case XFER_WRITE:
5510 request.data_direction = SOP_WRITE_FLAG;
5511 break;
5512 case XFER_READ:
5513 request.data_direction = SOP_READ_FLAG;
5514 break;
Kevin Barnett41555d52017-08-10 13:46:51 -05005515 case XFER_READ | XFER_WRITE:
5516 request.data_direction = SOP_BIDIRECTIONAL;
5517 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05005518 }
5519
5520 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5521
5522 if (iocommand.buf_size > 0) {
5523 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5524
5525 rc = pqi_map_single(ctrl_info->pci_dev,
5526 &request.sg_descriptors[0], kernel_buffer,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02005527 iocommand.buf_size, DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005528 if (rc)
5529 goto out;
5530
5531 iu_length += sizeof(request.sg_descriptors[0]);
5532 }
5533
5534 put_unaligned_le16(iu_length, &request.header.iu_length);
5535
5536 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5537 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5538
5539 if (iocommand.buf_size > 0)
5540 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
Christoph Hellwig6917a9c2018-10-11 09:47:59 +02005541 DMA_BIDIRECTIONAL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005542
5543 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5544
5545 if (rc == 0) {
5546 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5547 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5548 iocommand.error_info.CommandStatus =
5549 ciss_error_info.command_status;
5550 sense_data_length = ciss_error_info.sense_data_length;
5551 if (sense_data_length) {
5552 if (sense_data_length >
5553 sizeof(iocommand.error_info.SenseInfo))
5554 sense_data_length =
5555 sizeof(iocommand.error_info.SenseInfo);
5556 memcpy(iocommand.error_info.SenseInfo,
5557 pqi_error_info.data, sense_data_length);
5558 iocommand.error_info.SenseLen = sense_data_length;
5559 }
5560 }
5561
5562 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5563 rc = -EFAULT;
5564 goto out;
5565 }
5566
5567 if (rc == 0 && iocommand.buf_size > 0 &&
5568 (iocommand.Request.Type.Direction & XFER_READ)) {
5569 if (copy_to_user(iocommand.buf, kernel_buffer,
5570 iocommand.buf_size)) {
5571 rc = -EFAULT;
5572 }
5573 }
5574
5575out:
5576 kfree(kernel_buffer);
5577
5578 return rc;
5579}
5580
5581static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5582{
5583 int rc;
5584 struct pqi_ctrl_info *ctrl_info;
5585
5586 ctrl_info = shost_to_hba(sdev->host);
5587
5588 switch (cmd) {
5589 case CCISS_DEREGDISK:
5590 case CCISS_REGNEWDISK:
5591 case CCISS_REGNEWD:
5592 rc = pqi_scan_scsi_devices(ctrl_info);
5593 break;
5594 case CCISS_GETPCIINFO:
5595 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5596 break;
5597 case CCISS_GETDRIVVER:
5598 rc = pqi_getdrivver_ioctl(arg);
5599 break;
5600 case CCISS_PASSTHRU:
5601 rc = pqi_passthru_ioctl(ctrl_info, arg);
5602 break;
5603 default:
5604 rc = -EINVAL;
5605 break;
5606 }
5607
5608 return rc;
5609}
5610
5611static ssize_t pqi_version_show(struct device *dev,
5612 struct device_attribute *attr, char *buffer)
5613{
5614 ssize_t count = 0;
5615 struct Scsi_Host *shost;
5616 struct pqi_ctrl_info *ctrl_info;
5617
5618 shost = class_to_shost(dev);
5619 ctrl_info = shost_to_hba(shost);
5620
5621 count += snprintf(buffer + count, PAGE_SIZE - count,
5622 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5623
5624 count += snprintf(buffer + count, PAGE_SIZE - count,
5625 "firmware: %s\n", ctrl_info->firmware_version);
5626
5627 return count;
5628}
5629
5630static ssize_t pqi_host_rescan_store(struct device *dev,
5631 struct device_attribute *attr, const char *buffer, size_t count)
5632{
5633 struct Scsi_Host *shost = class_to_shost(dev);
5634
5635 pqi_scan_start(shost);
5636
5637 return count;
5638}
5639
Kevin Barnett3c509762017-05-03 18:54:37 -05005640static ssize_t pqi_lockup_action_show(struct device *dev,
5641 struct device_attribute *attr, char *buffer)
5642{
5643 int count = 0;
5644 unsigned int i;
5645
5646 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5647 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5648 count += snprintf(buffer + count, PAGE_SIZE - count,
5649 "[%s] ", pqi_lockup_actions[i].name);
5650 else
5651 count += snprintf(buffer + count, PAGE_SIZE - count,
5652 "%s ", pqi_lockup_actions[i].name);
5653 }
5654
5655 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5656
5657 return count;
5658}
5659
5660static ssize_t pqi_lockup_action_store(struct device *dev,
5661 struct device_attribute *attr, const char *buffer, size_t count)
5662{
5663 unsigned int i;
5664 char *action_name;
5665 char action_name_buffer[32];
5666
5667 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5668 action_name = strstrip(action_name_buffer);
5669
5670 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5671 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5672 pqi_lockup_action = pqi_lockup_actions[i].action;
5673 return count;
5674 }
5675 }
5676
5677 return -EINVAL;
5678}
5679
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05005680static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5681static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
Kevin Barnett3c509762017-05-03 18:54:37 -05005682static DEVICE_ATTR(lockup_action, 0644,
5683 pqi_lockup_action_show, pqi_lockup_action_store);
Kevin Barnett6c223762016-06-27 16:41:00 -05005684
5685static struct device_attribute *pqi_shost_attrs[] = {
5686 &dev_attr_version,
5687 &dev_attr_rescan,
Kevin Barnett3c509762017-05-03 18:54:37 -05005688 &dev_attr_lockup_action,
Kevin Barnett6c223762016-06-27 16:41:00 -05005689 NULL
5690};
5691
5692static ssize_t pqi_sas_address_show(struct device *dev,
5693 struct device_attribute *attr, char *buffer)
5694{
5695 struct pqi_ctrl_info *ctrl_info;
5696 struct scsi_device *sdev;
5697 struct pqi_scsi_dev *device;
5698 unsigned long flags;
5699 u64 sas_address;
5700
5701 sdev = to_scsi_device(dev);
5702 ctrl_info = shost_to_hba(sdev->host);
5703
5704 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5705
5706 device = sdev->hostdata;
5707 if (pqi_is_logical_device(device)) {
5708 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5709 flags);
5710 return -ENODEV;
5711 }
5712 sas_address = device->sas_address;
5713
5714 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5715
5716 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5717}
5718
5719static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5720 struct device_attribute *attr, char *buffer)
5721{
5722 struct pqi_ctrl_info *ctrl_info;
5723 struct scsi_device *sdev;
5724 struct pqi_scsi_dev *device;
5725 unsigned long flags;
5726
5727 sdev = to_scsi_device(dev);
5728 ctrl_info = shost_to_hba(sdev->host);
5729
5730 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5731
5732 device = sdev->hostdata;
Kevin Barnett588a63fe2017-05-03 18:55:25 -05005733 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
Kevin Barnett6c223762016-06-27 16:41:00 -05005734 buffer[1] = '\n';
5735 buffer[2] = '\0';
5736
5737 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5738
5739 return 2;
5740}
5741
Kevin Barnetta9f93392017-05-03 18:55:31 -05005742static ssize_t pqi_raid_level_show(struct device *dev,
5743 struct device_attribute *attr, char *buffer)
5744{
5745 struct pqi_ctrl_info *ctrl_info;
5746 struct scsi_device *sdev;
5747 struct pqi_scsi_dev *device;
5748 unsigned long flags;
5749 char *raid_level;
5750
5751 sdev = to_scsi_device(dev);
5752 ctrl_info = shost_to_hba(sdev->host);
5753
5754 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5755
5756 device = sdev->hostdata;
5757
5758 if (pqi_is_logical_device(device))
5759 raid_level = pqi_raid_level_to_string(device->raid_level);
5760 else
5761 raid_level = "N/A";
5762
5763 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5764
5765 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
5766}
5767
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05005768static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5769static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
Kevin Barnett6c223762016-06-27 16:41:00 -05005770 pqi_ssd_smart_path_enabled_show, NULL);
Kevin Barnetta9f93392017-05-03 18:55:31 -05005771static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
Kevin Barnett6c223762016-06-27 16:41:00 -05005772
5773static struct device_attribute *pqi_sdev_attrs[] = {
5774 &dev_attr_sas_address,
5775 &dev_attr_ssd_smart_path_enabled,
Kevin Barnetta9f93392017-05-03 18:55:31 -05005776 &dev_attr_raid_level,
Kevin Barnett6c223762016-06-27 16:41:00 -05005777 NULL
5778};
5779
5780static struct scsi_host_template pqi_driver_template = {
5781 .module = THIS_MODULE,
5782 .name = DRIVER_NAME_SHORT,
5783 .proc_name = DRIVER_NAME_SHORT,
5784 .queuecommand = pqi_scsi_queue_command,
5785 .scan_start = pqi_scan_start,
5786 .scan_finished = pqi_scan_finished,
5787 .this_id = -1,
Kevin Barnett6c223762016-06-27 16:41:00 -05005788 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5789 .ioctl = pqi_ioctl,
5790 .slave_alloc = pqi_slave_alloc,
Christoph Hellwig52198222016-11-01 08:12:49 -06005791 .map_queues = pqi_map_queues,
Kevin Barnett6c223762016-06-27 16:41:00 -05005792 .sdev_attrs = pqi_sdev_attrs,
5793 .shost_attrs = pqi_shost_attrs,
5794};
5795
5796static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5797{
5798 int rc;
5799 struct Scsi_Host *shost;
5800
5801 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5802 if (!shost) {
5803 dev_err(&ctrl_info->pci_dev->dev,
5804 "scsi_host_alloc failed for controller %u\n",
5805 ctrl_info->ctrl_id);
5806 return -ENOMEM;
5807 }
5808
5809 shost->io_port = 0;
5810 shost->n_io_port = 0;
5811 shost->this_id = -1;
5812 shost->max_channel = PQI_MAX_BUS;
5813 shost->max_cmd_len = MAX_COMMAND_SIZE;
5814 shost->max_lun = ~0;
5815 shost->max_id = ~0;
5816 shost->max_sectors = ctrl_info->max_sectors;
5817 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5818 shost->cmd_per_lun = shost->can_queue;
5819 shost->sg_tablesize = ctrl_info->sg_tablesize;
5820 shost->transportt = pqi_sas_transport_template;
Christoph Hellwig52198222016-11-01 08:12:49 -06005821 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05005822 shost->unique_id = shost->irq;
5823 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5824 shost->hostdata[0] = (unsigned long)ctrl_info;
5825
5826 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5827 if (rc) {
5828 dev_err(&ctrl_info->pci_dev->dev,
5829 "scsi_add_host failed for controller %u\n",
5830 ctrl_info->ctrl_id);
5831 goto free_host;
5832 }
5833
5834 rc = pqi_add_sas_host(shost, ctrl_info);
5835 if (rc) {
5836 dev_err(&ctrl_info->pci_dev->dev,
5837 "add SAS host failed for controller %u\n",
5838 ctrl_info->ctrl_id);
5839 goto remove_host;
5840 }
5841
5842 ctrl_info->scsi_host = shost;
5843
5844 return 0;
5845
5846remove_host:
5847 scsi_remove_host(shost);
5848free_host:
5849 scsi_host_put(shost);
5850
5851 return rc;
5852}
5853
5854static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5855{
5856 struct Scsi_Host *shost;
5857
5858 pqi_delete_sas_host(ctrl_info);
5859
5860 shost = ctrl_info->scsi_host;
5861 if (!shost)
5862 return;
5863
5864 scsi_remove_host(shost);
5865 scsi_host_put(shost);
5866}
5867
Kevin Barnett336b6812017-08-10 13:46:39 -05005868static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
5869{
5870 int rc = 0;
5871 struct pqi_device_registers __iomem *pqi_registers;
5872 unsigned long timeout;
5873 unsigned int timeout_msecs;
5874 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05005875
Kevin Barnett336b6812017-08-10 13:46:39 -05005876 pqi_registers = ctrl_info->pqi_registers;
5877 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
5878 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
5879
5880 while (1) {
5881 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
5882 reset_reg.all_bits = readl(&pqi_registers->device_reset);
5883 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
5884 break;
5885 pqi_check_ctrl_health(ctrl_info);
5886 if (pqi_ctrl_offline(ctrl_info)) {
5887 rc = -ENXIO;
5888 break;
5889 }
5890 if (time_after(jiffies, timeout)) {
5891 rc = -ETIMEDOUT;
5892 break;
5893 }
5894 }
5895
5896 return rc;
5897}
Kevin Barnett6c223762016-06-27 16:41:00 -05005898
5899static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5900{
5901 int rc;
Kevin Barnett336b6812017-08-10 13:46:39 -05005902 union pqi_reset_register reset_reg;
Kevin Barnett6c223762016-06-27 16:41:00 -05005903
Kevin Barnett336b6812017-08-10 13:46:39 -05005904 if (ctrl_info->pqi_reset_quiesce_supported) {
5905 rc = sis_pqi_reset_quiesce(ctrl_info);
5906 if (rc) {
5907 dev_err(&ctrl_info->pci_dev->dev,
5908 "PQI reset failed during quiesce with error %d\n",
5909 rc);
5910 return rc;
5911 }
5912 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005913
Kevin Barnett336b6812017-08-10 13:46:39 -05005914 reset_reg.all_bits = 0;
5915 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
5916 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
Kevin Barnett6c223762016-06-27 16:41:00 -05005917
Kevin Barnett336b6812017-08-10 13:46:39 -05005918 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
5919
5920 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005921 if (rc)
5922 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett336b6812017-08-10 13:46:39 -05005923 "PQI reset failed with error %d\n", rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05005924
5925 return rc;
5926}
5927
5928static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5929{
5930 int rc;
5931 struct bmic_identify_controller *identify;
5932
5933 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5934 if (!identify)
5935 return -ENOMEM;
5936
5937 rc = pqi_identify_controller(ctrl_info, identify);
5938 if (rc)
5939 goto out;
5940
5941 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5942 sizeof(identify->firmware_version));
5943 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5944 snprintf(ctrl_info->firmware_version +
5945 strlen(ctrl_info->firmware_version),
5946 sizeof(ctrl_info->firmware_version),
5947 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5948
5949out:
5950 kfree(identify);
5951
5952 return rc;
5953}
5954
Kevin Barnettb212c252018-12-07 16:28:10 -06005955struct pqi_config_table_section_info {
5956 struct pqi_ctrl_info *ctrl_info;
5957 void *section;
5958 u32 section_offset;
5959 void __iomem *section_iomem_addr;
5960};
5961
5962static inline bool pqi_is_firmware_feature_supported(
5963 struct pqi_config_table_firmware_features *firmware_features,
5964 unsigned int bit_position)
5965{
5966 unsigned int byte_index;
5967
5968 byte_index = bit_position / BITS_PER_BYTE;
5969
5970 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
5971 return false;
5972
5973 return firmware_features->features_supported[byte_index] &
5974 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
5975}
5976
5977static inline bool pqi_is_firmware_feature_enabled(
5978 struct pqi_config_table_firmware_features *firmware_features,
5979 void __iomem *firmware_features_iomem_addr,
5980 unsigned int bit_position)
5981{
5982 unsigned int byte_index;
5983 u8 __iomem *features_enabled_iomem_addr;
5984
5985 byte_index = (bit_position / BITS_PER_BYTE) +
5986 (le16_to_cpu(firmware_features->num_elements) * 2);
5987
5988 features_enabled_iomem_addr = firmware_features_iomem_addr +
5989 offsetof(struct pqi_config_table_firmware_features,
5990 features_supported) + byte_index;
5991
5992 return *((__force u8 *)features_enabled_iomem_addr) &
5993 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
5994}
5995
5996static inline void pqi_request_firmware_feature(
5997 struct pqi_config_table_firmware_features *firmware_features,
5998 unsigned int bit_position)
5999{
6000 unsigned int byte_index;
6001
6002 byte_index = (bit_position / BITS_PER_BYTE) +
6003 le16_to_cpu(firmware_features->num_elements);
6004
6005 firmware_features->features_supported[byte_index] |=
6006 (1 << (bit_position % BITS_PER_BYTE));
6007}
6008
6009static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6010 u16 first_section, u16 last_section)
6011{
6012 struct pqi_vendor_general_request request;
6013
6014 memset(&request, 0, sizeof(request));
6015
6016 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6017 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6018 &request.header.iu_length);
6019 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6020 &request.function_code);
6021 put_unaligned_le16(first_section,
6022 &request.data.config_table_update.first_section);
6023 put_unaligned_le16(last_section,
6024 &request.data.config_table_update.last_section);
6025
6026 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6027 0, NULL, NO_TIMEOUT);
6028}
6029
6030static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6031 struct pqi_config_table_firmware_features *firmware_features,
6032 void __iomem *firmware_features_iomem_addr)
6033{
6034 void *features_requested;
6035 void __iomem *features_requested_iomem_addr;
6036
6037 features_requested = firmware_features->features_supported +
6038 le16_to_cpu(firmware_features->num_elements);
6039
6040 features_requested_iomem_addr = firmware_features_iomem_addr +
6041 (features_requested - (void *)firmware_features);
6042
6043 memcpy_toio(features_requested_iomem_addr, features_requested,
6044 le16_to_cpu(firmware_features->num_elements));
6045
6046 return pqi_config_table_update(ctrl_info,
6047 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6048 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6049}
6050
6051struct pqi_firmware_feature {
6052 char *feature_name;
6053 unsigned int feature_bit;
6054 bool supported;
6055 bool enabled;
6056 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6057 struct pqi_firmware_feature *firmware_feature);
6058};
6059
6060static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6061 struct pqi_firmware_feature *firmware_feature)
6062{
6063 if (!firmware_feature->supported) {
6064 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6065 firmware_feature->feature_name);
6066 return;
6067 }
6068
6069 if (firmware_feature->enabled) {
6070 dev_info(&ctrl_info->pci_dev->dev,
6071 "%s enabled\n", firmware_feature->feature_name);
6072 return;
6073 }
6074
6075 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6076 firmware_feature->feature_name);
6077}
6078
6079static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6080 struct pqi_firmware_feature *firmware_feature)
6081{
6082 if (firmware_feature->feature_status)
6083 firmware_feature->feature_status(ctrl_info, firmware_feature);
6084}
6085
6086static DEFINE_MUTEX(pqi_firmware_features_mutex);
6087
6088static struct pqi_firmware_feature pqi_firmware_features[] = {
6089 {
6090 .feature_name = "Online Firmware Activation",
6091 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6092 .feature_status = pqi_firmware_feature_status,
6093 },
6094 {
6095 .feature_name = "Serial Management Protocol",
6096 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6097 .feature_status = pqi_firmware_feature_status,
6098 },
6099};
6100
6101static void pqi_process_firmware_features(
6102 struct pqi_config_table_section_info *section_info)
6103{
6104 int rc;
6105 struct pqi_ctrl_info *ctrl_info;
6106 struct pqi_config_table_firmware_features *firmware_features;
6107 void __iomem *firmware_features_iomem_addr;
6108 unsigned int i;
6109 unsigned int num_features_supported;
6110
6111 ctrl_info = section_info->ctrl_info;
6112 firmware_features = section_info->section;
6113 firmware_features_iomem_addr = section_info->section_iomem_addr;
6114
6115 for (i = 0, num_features_supported = 0;
6116 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6117 if (pqi_is_firmware_feature_supported(firmware_features,
6118 pqi_firmware_features[i].feature_bit)) {
6119 pqi_firmware_features[i].supported = true;
6120 num_features_supported++;
6121 } else {
6122 pqi_firmware_feature_update(ctrl_info,
6123 &pqi_firmware_features[i]);
6124 }
6125 }
6126
6127 if (num_features_supported == 0)
6128 return;
6129
6130 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6131 if (!pqi_firmware_features[i].supported)
6132 continue;
6133 pqi_request_firmware_feature(firmware_features,
6134 pqi_firmware_features[i].feature_bit);
6135 }
6136
6137 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6138 firmware_features_iomem_addr);
6139 if (rc) {
6140 dev_err(&ctrl_info->pci_dev->dev,
6141 "failed to enable firmware features in PQI configuration table\n");
6142 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6143 if (!pqi_firmware_features[i].supported)
6144 continue;
6145 pqi_firmware_feature_update(ctrl_info,
6146 &pqi_firmware_features[i]);
6147 }
6148 return;
6149 }
6150
6151 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6152 if (!pqi_firmware_features[i].supported)
6153 continue;
6154 if (pqi_is_firmware_feature_enabled(firmware_features,
6155 firmware_features_iomem_addr,
6156 pqi_firmware_features[i].feature_bit))
6157 pqi_firmware_features[i].enabled = true;
6158 pqi_firmware_feature_update(ctrl_info,
6159 &pqi_firmware_features[i]);
6160 }
6161}
6162
6163static void pqi_init_firmware_features(void)
6164{
6165 unsigned int i;
6166
6167 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6168 pqi_firmware_features[i].supported = false;
6169 pqi_firmware_features[i].enabled = false;
6170 }
6171}
6172
6173static void pqi_process_firmware_features_section(
6174 struct pqi_config_table_section_info *section_info)
6175{
6176 mutex_lock(&pqi_firmware_features_mutex);
6177 pqi_init_firmware_features();
6178 pqi_process_firmware_features(section_info);
6179 mutex_unlock(&pqi_firmware_features_mutex);
6180}
6181
Kevin Barnett98f87662017-05-03 18:53:11 -05006182static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6183{
6184 u32 table_length;
6185 u32 section_offset;
6186 void __iomem *table_iomem_addr;
6187 struct pqi_config_table *config_table;
6188 struct pqi_config_table_section_header *section;
Kevin Barnettb212c252018-12-07 16:28:10 -06006189 struct pqi_config_table_section_info section_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006190
6191 table_length = ctrl_info->config_table_length;
Kevin Barnettb212c252018-12-07 16:28:10 -06006192 if (table_length == 0)
6193 return 0;
Kevin Barnett98f87662017-05-03 18:53:11 -05006194
6195 config_table = kmalloc(table_length, GFP_KERNEL);
6196 if (!config_table) {
6197 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006198 "failed to allocate memory for PQI configuration table\n");
Kevin Barnett98f87662017-05-03 18:53:11 -05006199 return -ENOMEM;
6200 }
6201
6202 /*
6203 * Copy the config table contents from I/O memory space into the
6204 * temporary buffer.
6205 */
6206 table_iomem_addr = ctrl_info->iomem_base +
6207 ctrl_info->config_table_offset;
6208 memcpy_fromio(config_table, table_iomem_addr, table_length);
6209
Kevin Barnettb212c252018-12-07 16:28:10 -06006210 section_info.ctrl_info = ctrl_info;
Kevin Barnett98f87662017-05-03 18:53:11 -05006211 section_offset =
6212 get_unaligned_le32(&config_table->first_section_offset);
6213
6214 while (section_offset) {
6215 section = (void *)config_table + section_offset;
6216
Kevin Barnettb212c252018-12-07 16:28:10 -06006217 section_info.section = section;
6218 section_info.section_offset = section_offset;
6219 section_info.section_iomem_addr =
6220 table_iomem_addr + section_offset;
6221
Kevin Barnett98f87662017-05-03 18:53:11 -05006222 switch (get_unaligned_le16(&section->section_id)) {
Kevin Barnettb212c252018-12-07 16:28:10 -06006223 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
6224 pqi_process_firmware_features_section(&section_info);
6225 break;
Kevin Barnett98f87662017-05-03 18:53:11 -05006226 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
Kevin Barnett5a259e32017-05-03 18:55:43 -05006227 if (pqi_disable_heartbeat)
6228 dev_warn(&ctrl_info->pci_dev->dev,
6229 "heartbeat disabled by module parameter\n");
6230 else
6231 ctrl_info->heartbeat_counter =
6232 table_iomem_addr +
6233 section_offset +
6234 offsetof(
6235 struct pqi_config_table_heartbeat,
6236 heartbeat_counter);
Kevin Barnett98f87662017-05-03 18:53:11 -05006237 break;
6238 }
6239
6240 section_offset =
6241 get_unaligned_le16(&section->next_section_offset);
6242 }
6243
6244 kfree(config_table);
6245
6246 return 0;
6247}
6248
Kevin Barnett162d7752017-05-03 18:52:46 -05006249/* Switches the controller from PQI mode back into SIS mode. */
6250
6251static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6252{
6253 int rc;
6254
Kevin Barnett061ef062017-05-03 18:53:05 -05006255 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
Kevin Barnett162d7752017-05-03 18:52:46 -05006256 rc = pqi_reset(ctrl_info);
6257 if (rc)
6258 return rc;
Kevin Barnett4f078e22017-08-10 13:46:57 -05006259 rc = sis_reenable_sis_mode(ctrl_info);
6260 if (rc) {
6261 dev_err(&ctrl_info->pci_dev->dev,
6262 "re-enabling SIS mode failed with error %d\n", rc);
6263 return rc;
6264 }
Kevin Barnett162d7752017-05-03 18:52:46 -05006265 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6266
6267 return 0;
6268}
6269
6270/*
6271 * If the controller isn't already in SIS mode, this function forces it into
6272 * SIS mode.
6273 */
6274
6275static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
Kevin Barnettff6abb72016-08-31 14:54:41 -05006276{
6277 if (!sis_is_firmware_running(ctrl_info))
6278 return -ENXIO;
6279
Kevin Barnett162d7752017-05-03 18:52:46 -05006280 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6281 return 0;
6282
6283 if (sis_is_kernel_up(ctrl_info)) {
6284 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6285 return 0;
Kevin Barnettff6abb72016-08-31 14:54:41 -05006286 }
6287
Kevin Barnett162d7752017-05-03 18:52:46 -05006288 return pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnettff6abb72016-08-31 14:54:41 -05006289}
6290
Kevin Barnett6c223762016-06-27 16:41:00 -05006291static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6292{
6293 int rc;
6294
Kevin Barnett162d7752017-05-03 18:52:46 -05006295 rc = pqi_force_sis_mode(ctrl_info);
6296 if (rc)
6297 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006298
6299 /*
6300 * Wait until the controller is ready to start accepting SIS
6301 * commands.
6302 */
6303 rc = sis_wait_for_ctrl_ready(ctrl_info);
Kevin Barnett8845fdf2017-05-03 18:53:36 -05006304 if (rc)
Kevin Barnett6c223762016-06-27 16:41:00 -05006305 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05006306
6307 /*
6308 * Get the controller properties. This allows us to determine
6309 * whether or not it supports PQI mode.
6310 */
6311 rc = sis_get_ctrl_properties(ctrl_info);
6312 if (rc) {
6313 dev_err(&ctrl_info->pci_dev->dev,
6314 "error obtaining controller properties\n");
6315 return rc;
6316 }
6317
6318 rc = sis_get_pqi_capabilities(ctrl_info);
6319 if (rc) {
6320 dev_err(&ctrl_info->pci_dev->dev,
6321 "error obtaining controller capabilities\n");
6322 return rc;
6323 }
6324
Kevin Barnettd727a772017-05-03 18:54:25 -05006325 if (reset_devices) {
6326 if (ctrl_info->max_outstanding_requests >
6327 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6328 ctrl_info->max_outstanding_requests =
6329 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6330 } else {
6331 if (ctrl_info->max_outstanding_requests >
6332 PQI_MAX_OUTSTANDING_REQUESTS)
6333 ctrl_info->max_outstanding_requests =
6334 PQI_MAX_OUTSTANDING_REQUESTS;
6335 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006336
6337 pqi_calculate_io_resources(ctrl_info);
6338
6339 rc = pqi_alloc_error_buffer(ctrl_info);
6340 if (rc) {
6341 dev_err(&ctrl_info->pci_dev->dev,
6342 "failed to allocate PQI error buffer\n");
6343 return rc;
6344 }
6345
6346 /*
6347 * If the function we are about to call succeeds, the
6348 * controller will transition from legacy SIS mode
6349 * into PQI mode.
6350 */
6351 rc = sis_init_base_struct_addr(ctrl_info);
6352 if (rc) {
6353 dev_err(&ctrl_info->pci_dev->dev,
6354 "error initializing PQI mode\n");
6355 return rc;
6356 }
6357
6358 /* Wait for the controller to complete the SIS -> PQI transition. */
6359 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6360 if (rc) {
6361 dev_err(&ctrl_info->pci_dev->dev,
6362 "transition to PQI mode failed\n");
6363 return rc;
6364 }
6365
6366 /* From here on, we are running in PQI mode. */
6367 ctrl_info->pqi_mode_enabled = true;
Kevin Barnettff6abb72016-08-31 14:54:41 -05006368 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
Kevin Barnett6c223762016-06-27 16:41:00 -05006369
6370 rc = pqi_alloc_admin_queues(ctrl_info);
6371 if (rc) {
6372 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006373 "failed to allocate admin queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006374 return rc;
6375 }
6376
6377 rc = pqi_create_admin_queues(ctrl_info);
6378 if (rc) {
6379 dev_err(&ctrl_info->pci_dev->dev,
6380 "error creating admin queues\n");
6381 return rc;
6382 }
6383
6384 rc = pqi_report_device_capability(ctrl_info);
6385 if (rc) {
6386 dev_err(&ctrl_info->pci_dev->dev,
6387 "obtaining device capability failed\n");
6388 return rc;
6389 }
6390
6391 rc = pqi_validate_device_capability(ctrl_info);
6392 if (rc)
6393 return rc;
6394
6395 pqi_calculate_queue_resources(ctrl_info);
6396
6397 rc = pqi_enable_msix_interrupts(ctrl_info);
6398 if (rc)
6399 return rc;
6400
6401 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6402 ctrl_info->max_msix_vectors =
6403 ctrl_info->num_msix_vectors_enabled;
6404 pqi_calculate_queue_resources(ctrl_info);
6405 }
6406
6407 rc = pqi_alloc_io_resources(ctrl_info);
6408 if (rc)
6409 return rc;
6410
6411 rc = pqi_alloc_operational_queues(ctrl_info);
Kevin Barnettd87d5472017-05-03 18:54:00 -05006412 if (rc) {
6413 dev_err(&ctrl_info->pci_dev->dev,
6414 "failed to allocate operational queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006415 return rc;
Kevin Barnettd87d5472017-05-03 18:54:00 -05006416 }
Kevin Barnett6c223762016-06-27 16:41:00 -05006417
6418 pqi_init_operational_queues(ctrl_info);
6419
6420 rc = pqi_request_irqs(ctrl_info);
6421 if (rc)
6422 return rc;
6423
Kevin Barnett6c223762016-06-27 16:41:00 -05006424 rc = pqi_create_queues(ctrl_info);
6425 if (rc)
6426 return rc;
6427
Kevin Barnett061ef062017-05-03 18:53:05 -05006428 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6429
6430 ctrl_info->controller_online = true;
Kevin Barnettb212c252018-12-07 16:28:10 -06006431
6432 rc = pqi_process_config_table(ctrl_info);
6433 if (rc)
6434 return rc;
6435
Kevin Barnett061ef062017-05-03 18:53:05 -05006436 pqi_start_heartbeat_timer(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006437
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05006438 rc = pqi_enable_events(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006439 if (rc) {
6440 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05006441 "error enabling events\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05006442 return rc;
6443 }
6444
Kevin Barnett6c223762016-06-27 16:41:00 -05006445 /* Register with the SCSI subsystem. */
6446 rc = pqi_register_scsi(ctrl_info);
6447 if (rc)
6448 return rc;
6449
6450 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6451 if (rc) {
6452 dev_err(&ctrl_info->pci_dev->dev,
6453 "error obtaining firmware version\n");
6454 return rc;
6455 }
6456
6457 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6458 if (rc) {
6459 dev_err(&ctrl_info->pci_dev->dev,
6460 "error updating host wellness\n");
6461 return rc;
6462 }
6463
6464 pqi_schedule_update_time_worker(ctrl_info);
6465
6466 pqi_scan_scsi_devices(ctrl_info);
6467
6468 return 0;
6469}
6470
Kevin Barnett061ef062017-05-03 18:53:05 -05006471static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6472{
6473 unsigned int i;
6474 struct pqi_admin_queues *admin_queues;
6475 struct pqi_event_queue *event_queue;
6476
6477 admin_queues = &ctrl_info->admin_queues;
6478 admin_queues->iq_pi_copy = 0;
6479 admin_queues->oq_ci_copy = 0;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006480 writel(0, admin_queues->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006481
6482 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6483 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6484 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6485 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6486
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006487 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
6488 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
6489 writel(0, ctrl_info->queue_groups[i].oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006490 }
6491
6492 event_queue = &ctrl_info->event_queue;
Kevin Barnettdac12fb2018-06-18 13:23:00 -05006493 writel(0, event_queue->oq_pi);
Kevin Barnett061ef062017-05-03 18:53:05 -05006494 event_queue->oq_ci_copy = 0;
6495}
6496
6497static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6498{
6499 int rc;
6500
6501 rc = pqi_force_sis_mode(ctrl_info);
6502 if (rc)
6503 return rc;
6504
6505 /*
6506 * Wait until the controller is ready to start accepting SIS
6507 * commands.
6508 */
6509 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6510 if (rc)
6511 return rc;
6512
6513 /*
6514 * If the function we are about to call succeeds, the
6515 * controller will transition from legacy SIS mode
6516 * into PQI mode.
6517 */
6518 rc = sis_init_base_struct_addr(ctrl_info);
6519 if (rc) {
6520 dev_err(&ctrl_info->pci_dev->dev,
6521 "error initializing PQI mode\n");
6522 return rc;
6523 }
6524
6525 /* Wait for the controller to complete the SIS -> PQI transition. */
6526 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6527 if (rc) {
6528 dev_err(&ctrl_info->pci_dev->dev,
6529 "transition to PQI mode failed\n");
6530 return rc;
6531 }
6532
6533 /* From here on, we are running in PQI mode. */
6534 ctrl_info->pqi_mode_enabled = true;
6535 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6536
6537 pqi_reinit_queues(ctrl_info);
6538
6539 rc = pqi_create_admin_queues(ctrl_info);
6540 if (rc) {
6541 dev_err(&ctrl_info->pci_dev->dev,
6542 "error creating admin queues\n");
6543 return rc;
6544 }
6545
6546 rc = pqi_create_queues(ctrl_info);
6547 if (rc)
6548 return rc;
6549
6550 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6551
6552 ctrl_info->controller_online = true;
6553 pqi_start_heartbeat_timer(ctrl_info);
6554 pqi_ctrl_unblock_requests(ctrl_info);
6555
6556 rc = pqi_enable_events(ctrl_info);
6557 if (rc) {
6558 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006559 "error enabling events\n");
Kevin Barnett061ef062017-05-03 18:53:05 -05006560 return rc;
6561 }
6562
6563 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6564 if (rc) {
6565 dev_err(&ctrl_info->pci_dev->dev,
6566 "error updating host wellness\n");
6567 return rc;
6568 }
6569
6570 pqi_schedule_update_time_worker(ctrl_info);
6571
6572 pqi_scan_scsi_devices(ctrl_info);
6573
6574 return 0;
6575}
6576
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006577static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6578 u16 timeout)
6579{
6580 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6581 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6582}
6583
Kevin Barnett6c223762016-06-27 16:41:00 -05006584static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6585{
6586 int rc;
6587 u64 mask;
6588
6589 rc = pci_enable_device(ctrl_info->pci_dev);
6590 if (rc) {
6591 dev_err(&ctrl_info->pci_dev->dev,
6592 "failed to enable PCI device\n");
6593 return rc;
6594 }
6595
6596 if (sizeof(dma_addr_t) > 4)
6597 mask = DMA_BIT_MASK(64);
6598 else
6599 mask = DMA_BIT_MASK(32);
6600
6601 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6602 if (rc) {
6603 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6604 goto disable_device;
6605 }
6606
6607 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6608 if (rc) {
6609 dev_err(&ctrl_info->pci_dev->dev,
6610 "failed to obtain PCI resources\n");
6611 goto disable_device;
6612 }
6613
6614 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6615 ctrl_info->pci_dev, 0),
6616 sizeof(struct pqi_ctrl_registers));
6617 if (!ctrl_info->iomem_base) {
6618 dev_err(&ctrl_info->pci_dev->dev,
6619 "failed to map memory for controller registers\n");
6620 rc = -ENOMEM;
6621 goto release_regions;
6622 }
6623
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006624#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6625
6626 /* Increase the PCIe completion timeout. */
6627 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6628 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6629 if (rc) {
6630 dev_err(&ctrl_info->pci_dev->dev,
6631 "failed to set PCIe completion timeout\n");
6632 goto release_regions;
6633 }
6634
Kevin Barnett6c223762016-06-27 16:41:00 -05006635 /* Enable bus mastering. */
6636 pci_set_master(ctrl_info->pci_dev);
6637
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006638 ctrl_info->registers = ctrl_info->iomem_base;
6639 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6640
Kevin Barnett6c223762016-06-27 16:41:00 -05006641 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6642
6643 return 0;
6644
6645release_regions:
6646 pci_release_regions(ctrl_info->pci_dev);
6647disable_device:
6648 pci_disable_device(ctrl_info->pci_dev);
6649
6650 return rc;
6651}
6652
6653static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6654{
6655 iounmap(ctrl_info->iomem_base);
6656 pci_release_regions(ctrl_info->pci_dev);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006657 if (pci_is_enabled(ctrl_info->pci_dev))
6658 pci_disable_device(ctrl_info->pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006659 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6660}
6661
6662static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6663{
6664 struct pqi_ctrl_info *ctrl_info;
6665
6666 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6667 GFP_KERNEL, numa_node);
6668 if (!ctrl_info)
6669 return NULL;
6670
6671 mutex_init(&ctrl_info->scan_mutex);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05006672 mutex_init(&ctrl_info->lun_reset_mutex);
Kevin Barnett6c223762016-06-27 16:41:00 -05006673
6674 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6675 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6676
6677 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6678 atomic_set(&ctrl_info->num_interrupts, 0);
6679
6680 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6681 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6682
Kees Cook74a0f572017-10-11 16:27:10 -07006683 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
Kevin Barnett5f310422017-05-03 18:54:55 -05006684 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
Kevin Barnett98f87662017-05-03 18:53:11 -05006685
Kevin Barnett6c223762016-06-27 16:41:00 -05006686 sema_init(&ctrl_info->sync_request_sem,
6687 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05006688 init_waitqueue_head(&ctrl_info->block_requests_wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05006689
Kevin Barnett376fb882017-05-03 18:54:43 -05006690 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
6691 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
6692 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
6693 pqi_raid_bypass_retry_worker);
6694
Kevin Barnett6c223762016-06-27 16:41:00 -05006695 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
Kevin Barnett061ef062017-05-03 18:53:05 -05006696 ctrl_info->irq_mode = IRQ_MODE_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -05006697 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6698
6699 return ctrl_info;
6700}
6701
6702static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6703{
6704 kfree(ctrl_info);
6705}
6706
6707static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6708{
Kevin Barnett98bf0612017-05-03 18:52:28 -05006709 pqi_free_irqs(ctrl_info);
6710 pqi_disable_msix_interrupts(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006711}
6712
6713static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6714{
6715 pqi_stop_heartbeat_timer(ctrl_info);
6716 pqi_free_interrupts(ctrl_info);
6717 if (ctrl_info->queue_memory_base)
6718 dma_free_coherent(&ctrl_info->pci_dev->dev,
6719 ctrl_info->queue_memory_length,
6720 ctrl_info->queue_memory_base,
6721 ctrl_info->queue_memory_base_dma_handle);
6722 if (ctrl_info->admin_queue_memory_base)
6723 dma_free_coherent(&ctrl_info->pci_dev->dev,
6724 ctrl_info->admin_queue_memory_length,
6725 ctrl_info->admin_queue_memory_base,
6726 ctrl_info->admin_queue_memory_base_dma_handle);
6727 pqi_free_all_io_requests(ctrl_info);
6728 if (ctrl_info->error_buffer)
6729 dma_free_coherent(&ctrl_info->pci_dev->dev,
6730 ctrl_info->error_buffer_length,
6731 ctrl_info->error_buffer,
6732 ctrl_info->error_buffer_dma_handle);
6733 if (ctrl_info->iomem_base)
6734 pqi_cleanup_pci_init(ctrl_info);
6735 pqi_free_ctrl_info(ctrl_info);
6736}
6737
6738static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6739{
Kevin Barnett061ef062017-05-03 18:53:05 -05006740 pqi_cancel_rescan_worker(ctrl_info);
6741 pqi_cancel_update_time_worker(ctrl_info);
Kevin Barnette57a1f92016-08-31 14:54:47 -05006742 pqi_remove_all_scsi_devices(ctrl_info);
6743 pqi_unregister_scsi(ctrl_info);
Kevin Barnett162d7752017-05-03 18:52:46 -05006744 if (ctrl_info->pqi_mode_enabled)
6745 pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006746 pqi_free_ctrl_resources(ctrl_info);
6747}
6748
Kevin Barnett3c509762017-05-03 18:54:37 -05006749static void pqi_perform_lockup_action(void)
6750{
6751 switch (pqi_lockup_action) {
6752 case PANIC:
6753 panic("FATAL: Smart Family Controller lockup detected");
6754 break;
6755 case REBOOT:
6756 emergency_restart();
6757 break;
6758 case NONE:
6759 default:
6760 break;
6761 }
6762}
6763
Kevin Barnett5f310422017-05-03 18:54:55 -05006764static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
6765 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
6766 .status = SAM_STAT_CHECK_CONDITION,
6767};
6768
6769static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05006770{
6771 unsigned int i;
Kevin Barnett376fb882017-05-03 18:54:43 -05006772 struct pqi_io_request *io_request;
Kevin Barnett376fb882017-05-03 18:54:43 -05006773 struct scsi_cmnd *scmd;
6774
Kevin Barnett5f310422017-05-03 18:54:55 -05006775 for (i = 0; i < ctrl_info->max_io_slots; i++) {
6776 io_request = &ctrl_info->io_request_pool[i];
6777 if (atomic_read(&io_request->refcount) == 0)
6778 continue;
Kevin Barnett376fb882017-05-03 18:54:43 -05006779
Kevin Barnett5f310422017-05-03 18:54:55 -05006780 scmd = io_request->scmd;
6781 if (scmd) {
6782 set_host_byte(scmd, DID_NO_CONNECT);
6783 } else {
6784 io_request->status = -ENXIO;
6785 io_request->error_info =
6786 &pqi_ctrl_offline_raid_error_info;
Kevin Barnett376fb882017-05-03 18:54:43 -05006787 }
Kevin Barnett5f310422017-05-03 18:54:55 -05006788
6789 io_request->io_complete_callback(io_request,
6790 io_request->context);
Kevin Barnett376fb882017-05-03 18:54:43 -05006791 }
6792}
6793
Kevin Barnett5f310422017-05-03 18:54:55 -05006794static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
Kevin Barnett376fb882017-05-03 18:54:43 -05006795{
Kevin Barnett5f310422017-05-03 18:54:55 -05006796 pqi_perform_lockup_action();
6797 pqi_stop_heartbeat_timer(ctrl_info);
6798 pqi_free_interrupts(ctrl_info);
6799 pqi_cancel_rescan_worker(ctrl_info);
6800 pqi_cancel_update_time_worker(ctrl_info);
6801 pqi_ctrl_wait_until_quiesced(ctrl_info);
6802 pqi_fail_all_outstanding_requests(ctrl_info);
6803 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
6804 pqi_ctrl_unblock_requests(ctrl_info);
6805}
6806
6807static void pqi_ctrl_offline_worker(struct work_struct *work)
6808{
6809 struct pqi_ctrl_info *ctrl_info;
6810
6811 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
6812 pqi_take_ctrl_offline_deferred(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05006813}
6814
6815static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
6816{
Kevin Barnett5f310422017-05-03 18:54:55 -05006817 if (!ctrl_info->controller_online)
6818 return;
6819
Kevin Barnett376fb882017-05-03 18:54:43 -05006820 ctrl_info->controller_online = false;
Kevin Barnett5f310422017-05-03 18:54:55 -05006821 ctrl_info->pqi_mode_enabled = false;
6822 pqi_ctrl_block_requests(ctrl_info);
Kevin Barnett5a259e32017-05-03 18:55:43 -05006823 if (!pqi_disable_ctrl_shutdown)
6824 sis_shutdown_ctrl(ctrl_info);
Kevin Barnett376fb882017-05-03 18:54:43 -05006825 pci_disable_device(ctrl_info->pci_dev);
6826 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
Kevin Barnett5f310422017-05-03 18:54:55 -05006827 schedule_work(&ctrl_info->ctrl_offline_work);
Kevin Barnett376fb882017-05-03 18:54:43 -05006828}
6829
Kevin Barnettd91d7822017-05-03 18:53:30 -05006830static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006831 const struct pci_device_id *id)
6832{
6833 char *ctrl_description;
6834
Kevin Barnett37b36842017-05-03 18:55:01 -05006835 if (id->driver_data)
Kevin Barnett6c223762016-06-27 16:41:00 -05006836 ctrl_description = (char *)id->driver_data;
Kevin Barnett37b36842017-05-03 18:55:01 -05006837 else
6838 ctrl_description = "Microsemi Smart Family Controller";
Kevin Barnett6c223762016-06-27 16:41:00 -05006839
Kevin Barnettd91d7822017-05-03 18:53:30 -05006840 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
Kevin Barnett6c223762016-06-27 16:41:00 -05006841}
6842
Kevin Barnettd91d7822017-05-03 18:53:30 -05006843static int pqi_pci_probe(struct pci_dev *pci_dev,
6844 const struct pci_device_id *id)
Kevin Barnett6c223762016-06-27 16:41:00 -05006845{
6846 int rc;
6847 int node;
6848 struct pqi_ctrl_info *ctrl_info;
6849
Kevin Barnettd91d7822017-05-03 18:53:30 -05006850 pqi_print_ctrl_info(pci_dev, id);
Kevin Barnett6c223762016-06-27 16:41:00 -05006851
6852 if (pqi_disable_device_id_wildcards &&
6853 id->subvendor == PCI_ANY_ID &&
6854 id->subdevice == PCI_ANY_ID) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05006855 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006856 "controller not probed because device ID wildcards are disabled\n");
6857 return -ENODEV;
6858 }
6859
6860 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
Kevin Barnettd91d7822017-05-03 18:53:30 -05006861 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006862 "controller device ID matched using wildcards\n");
6863
Kevin Barnettd91d7822017-05-03 18:53:30 -05006864 node = dev_to_node(&pci_dev->dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006865 if (node == NUMA_NO_NODE)
Kevin Barnettd91d7822017-05-03 18:53:30 -05006866 set_dev_node(&pci_dev->dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05006867
6868 ctrl_info = pqi_alloc_ctrl_info(node);
6869 if (!ctrl_info) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05006870 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006871 "failed to allocate controller info block\n");
6872 return -ENOMEM;
6873 }
6874
Kevin Barnettd91d7822017-05-03 18:53:30 -05006875 ctrl_info->pci_dev = pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05006876
6877 rc = pqi_pci_init(ctrl_info);
6878 if (rc)
6879 goto error;
6880
6881 rc = pqi_ctrl_init(ctrl_info);
6882 if (rc)
6883 goto error;
6884
6885 return 0;
6886
6887error:
6888 pqi_remove_ctrl(ctrl_info);
6889
6890 return rc;
6891}
6892
Kevin Barnettd91d7822017-05-03 18:53:30 -05006893static void pqi_pci_remove(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05006894{
6895 struct pqi_ctrl_info *ctrl_info;
6896
Kevin Barnettd91d7822017-05-03 18:53:30 -05006897 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006898 if (!ctrl_info)
6899 return;
6900
6901 pqi_remove_ctrl(ctrl_info);
6902}
6903
Kevin Barnettd91d7822017-05-03 18:53:30 -05006904static void pqi_shutdown(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05006905{
6906 int rc;
6907 struct pqi_ctrl_info *ctrl_info;
6908
Kevin Barnettd91d7822017-05-03 18:53:30 -05006909 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006910 if (!ctrl_info)
6911 goto error;
6912
6913 /*
6914 * Write all data in the controller's battery-backed cache to
6915 * storage.
6916 */
Kevin Barnett58322fe2017-08-10 13:46:45 -05006917 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
Kevin Barnettb6d47812017-08-10 13:47:03 -05006918 pqi_reset(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006919 if (rc == 0)
6920 return;
6921
6922error:
Kevin Barnettd91d7822017-05-03 18:53:30 -05006923 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006924 "unable to flush controller cache\n");
6925}
6926
Kevin Barnett3c509762017-05-03 18:54:37 -05006927static void pqi_process_lockup_action_param(void)
6928{
6929 unsigned int i;
6930
6931 if (!pqi_lockup_action_param)
6932 return;
6933
6934 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6935 if (strcmp(pqi_lockup_action_param,
6936 pqi_lockup_actions[i].name) == 0) {
6937 pqi_lockup_action = pqi_lockup_actions[i].action;
6938 return;
6939 }
6940 }
6941
6942 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
6943 DRIVER_NAME_SHORT, pqi_lockup_action_param);
6944}
6945
6946static void pqi_process_module_params(void)
6947{
6948 pqi_process_lockup_action_param();
6949}
6950
Arnd Bergmann5c146682017-05-18 10:32:18 +02006951static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
Kevin Barnett061ef062017-05-03 18:53:05 -05006952{
6953 struct pqi_ctrl_info *ctrl_info;
6954
6955 ctrl_info = pci_get_drvdata(pci_dev);
6956
6957 pqi_disable_events(ctrl_info);
6958 pqi_cancel_update_time_worker(ctrl_info);
6959 pqi_cancel_rescan_worker(ctrl_info);
6960 pqi_wait_until_scan_finished(ctrl_info);
6961 pqi_wait_until_lun_reset_finished(ctrl_info);
Kevin Barnett58322fe2017-08-10 13:46:45 -05006962 pqi_flush_cache(ctrl_info, SUSPEND);
Kevin Barnett061ef062017-05-03 18:53:05 -05006963 pqi_ctrl_block_requests(ctrl_info);
6964 pqi_ctrl_wait_until_quiesced(ctrl_info);
6965 pqi_wait_until_inbound_queues_empty(ctrl_info);
6966 pqi_ctrl_wait_for_pending_io(ctrl_info);
6967 pqi_stop_heartbeat_timer(ctrl_info);
6968
6969 if (state.event == PM_EVENT_FREEZE)
6970 return 0;
6971
6972 pci_save_state(pci_dev);
6973 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6974
6975 ctrl_info->controller_online = false;
6976 ctrl_info->pqi_mode_enabled = false;
6977
6978 return 0;
6979}
6980
Arnd Bergmann5c146682017-05-18 10:32:18 +02006981static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
Kevin Barnett061ef062017-05-03 18:53:05 -05006982{
6983 int rc;
6984 struct pqi_ctrl_info *ctrl_info;
6985
6986 ctrl_info = pci_get_drvdata(pci_dev);
6987
6988 if (pci_dev->current_state != PCI_D0) {
6989 ctrl_info->max_hw_queue_index = 0;
6990 pqi_free_interrupts(ctrl_info);
6991 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6992 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6993 IRQF_SHARED, DRIVER_NAME_SHORT,
6994 &ctrl_info->queue_groups[0]);
6995 if (rc) {
6996 dev_err(&ctrl_info->pci_dev->dev,
6997 "irq %u init failed with error %d\n",
6998 pci_dev->irq, rc);
6999 return rc;
7000 }
7001 pqi_start_heartbeat_timer(ctrl_info);
7002 pqi_ctrl_unblock_requests(ctrl_info);
7003 return 0;
7004 }
7005
7006 pci_set_power_state(pci_dev, PCI_D0);
7007 pci_restore_state(pci_dev);
7008
7009 return pqi_ctrl_init_resume(ctrl_info);
7010}
7011
Kevin Barnett6c223762016-06-27 16:41:00 -05007012/* Define the PCI IDs for the controllers that we support. */
7013static const struct pci_device_id pqi_pci_id_table[] = {
7014 {
7015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007016 0x105b, 0x1211)
7017 },
7018 {
7019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7020 0x105b, 0x1321)
7021 },
7022 {
7023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007024 0x152d, 0x8a22)
7025 },
7026 {
7027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7028 0x152d, 0x8a23)
7029 },
7030 {
7031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7032 0x152d, 0x8a24)
7033 },
7034 {
7035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7036 0x152d, 0x8a36)
7037 },
7038 {
7039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7040 0x152d, 0x8a37)
7041 },
7042 {
7043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007044 0x193d, 0x8460)
7045 },
7046 {
7047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7048 0x193d, 0x8461)
7049 },
7050 {
7051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7052 0x193d, 0xf460)
7053 },
7054 {
7055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7056 0x193d, 0xf461)
7057 },
7058 {
7059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7060 0x1bd4, 0x0045)
7061 },
7062 {
7063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7064 0x1bd4, 0x0046)
7065 },
7066 {
7067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7068 0x1bd4, 0x0047)
7069 },
7070 {
7071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7072 0x1bd4, 0x0048)
7073 },
7074 {
7075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05007076 0x1bd4, 0x004a)
7077 },
7078 {
7079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7080 0x1bd4, 0x004b)
7081 },
7082 {
7083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7084 0x1bd4, 0x004c)
7085 },
7086 {
7087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007088 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
7089 },
7090 {
7091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007092 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
Kevin Barnett6c223762016-06-27 16:41:00 -05007093 },
7094 {
7095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7096 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
7097 },
7098 {
7099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7100 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
7101 },
7102 {
7103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7104 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
7105 },
7106 {
7107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7108 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
7109 },
7110 {
7111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7112 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
7113 },
7114 {
7115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7116 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
7117 },
7118 {
7119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007120 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
7121 },
7122 {
7123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007124 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
7125 },
7126 {
7127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007128 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
7129 },
7130 {
7131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7132 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
7133 },
7134 {
7135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7136 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
7137 },
7138 {
7139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7140 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
7141 },
7142 {
7143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7144 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
7145 },
7146 {
7147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7148 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
7149 },
7150 {
7151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7152 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
7153 },
7154 {
7155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007156 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
7157 },
7158 {
7159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7160 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
7161 },
7162 {
7163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007164 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
7165 },
7166 {
7167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007168 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
7169 },
7170 {
7171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7172 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
7173 },
7174 {
7175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7176 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
7177 },
7178 {
7179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7180 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
7181 },
7182 {
7183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7184 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
7185 },
7186 {
7187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettb0f94082018-03-05 09:01:00 -06007188 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
7189 },
7190 {
7191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007192 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
7193 },
7194 {
7195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7196 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
7197 },
7198 {
7199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnettbd809e82017-09-27 16:29:59 -05007200 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
7201 },
7202 {
7203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7204 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
7205 },
7206 {
7207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007208 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
7209 },
7210 {
7211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett9f8d05f2018-06-18 13:22:54 -05007212 PCI_VENDOR_ID_ADVANTECH, 0x8312)
7213 },
7214 {
7215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007216 PCI_VENDOR_ID_DELL, 0x1fe0)
7217 },
7218 {
7219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007220 PCI_VENDOR_ID_HP, 0x0600)
7221 },
7222 {
7223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7224 PCI_VENDOR_ID_HP, 0x0601)
7225 },
7226 {
7227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7228 PCI_VENDOR_ID_HP, 0x0602)
7229 },
7230 {
7231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7232 PCI_VENDOR_ID_HP, 0x0603)
7233 },
7234 {
7235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett55790062017-08-10 13:47:09 -05007236 PCI_VENDOR_ID_HP, 0x0609)
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007237 },
7238 {
7239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7240 PCI_VENDOR_ID_HP, 0x0650)
7241 },
7242 {
7243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7244 PCI_VENDOR_ID_HP, 0x0651)
7245 },
7246 {
7247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7248 PCI_VENDOR_ID_HP, 0x0652)
7249 },
7250 {
7251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7252 PCI_VENDOR_ID_HP, 0x0653)
7253 },
7254 {
7255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7256 PCI_VENDOR_ID_HP, 0x0654)
7257 },
7258 {
7259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7260 PCI_VENDOR_ID_HP, 0x0655)
7261 },
7262 {
7263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05007264 PCI_VENDOR_ID_HP, 0x0700)
7265 },
7266 {
7267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7268 PCI_VENDOR_ID_HP, 0x0701)
7269 },
7270 {
7271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007272 PCI_VENDOR_ID_HP, 0x1001)
7273 },
7274 {
7275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7276 PCI_VENDOR_ID_HP, 0x1100)
7277 },
7278 {
7279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7280 PCI_VENDOR_ID_HP, 0x1101)
7281 },
7282 {
7283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05007284 PCI_ANY_ID, PCI_ANY_ID)
7285 },
7286 { 0 }
7287};
7288
7289MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
7290
7291static struct pci_driver pqi_pci_driver = {
7292 .name = DRIVER_NAME_SHORT,
7293 .id_table = pqi_pci_id_table,
7294 .probe = pqi_pci_probe,
7295 .remove = pqi_pci_remove,
7296 .shutdown = pqi_shutdown,
Kevin Barnett061ef062017-05-03 18:53:05 -05007297#if defined(CONFIG_PM)
7298 .suspend = pqi_suspend,
7299 .resume = pqi_resume,
7300#endif
Kevin Barnett6c223762016-06-27 16:41:00 -05007301};
7302
7303static int __init pqi_init(void)
7304{
7305 int rc;
7306
7307 pr_info(DRIVER_NAME "\n");
7308
7309 pqi_sas_transport_template =
7310 sas_attach_transport(&pqi_sas_transport_functions);
7311 if (!pqi_sas_transport_template)
7312 return -ENODEV;
7313
Kevin Barnett3c509762017-05-03 18:54:37 -05007314 pqi_process_module_params();
7315
Kevin Barnett6c223762016-06-27 16:41:00 -05007316 rc = pci_register_driver(&pqi_pci_driver);
7317 if (rc)
7318 sas_release_transport(pqi_sas_transport_template);
7319
7320 return rc;
7321}
7322
7323static void __exit pqi_cleanup(void)
7324{
7325 pci_unregister_driver(&pqi_pci_driver);
7326 sas_release_transport(pqi_sas_transport_template);
7327}
7328
7329module_init(pqi_init);
7330module_exit(pqi_cleanup);
7331
7332static void __attribute__((unused)) verify_structures(void)
7333{
7334 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7335 sis_host_to_ctrl_doorbell) != 0x20);
7336 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7337 sis_interrupt_mask) != 0x34);
7338 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7339 sis_ctrl_to_host_doorbell) != 0x9c);
7340 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7341 sis_ctrl_to_host_doorbell_clear) != 0xa0);
7342 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnettff6abb72016-08-31 14:54:41 -05007343 sis_driver_scratch) != 0xb0);
7344 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnett6c223762016-06-27 16:41:00 -05007345 sis_firmware_status) != 0xbc);
7346 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7347 sis_mailbox) != 0x1000);
7348 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7349 pqi_registers) != 0x4000);
7350
7351 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7352 iu_type) != 0x0);
7353 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7354 iu_length) != 0x2);
7355 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7356 response_queue_id) != 0x4);
7357 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7358 work_area) != 0x6);
7359 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
7360
7361 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7362 status) != 0x0);
7363 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7364 service_response) != 0x1);
7365 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7366 data_present) != 0x2);
7367 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7368 reserved) != 0x3);
7369 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7370 residual_count) != 0x4);
7371 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7372 data_length) != 0x8);
7373 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7374 reserved1) != 0xa);
7375 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7376 data) != 0xc);
7377 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
7378
7379 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7380 data_in_result) != 0x0);
7381 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7382 data_out_result) != 0x1);
7383 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7384 reserved) != 0x2);
7385 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7386 status) != 0x5);
7387 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7388 status_qualifier) != 0x6);
7389 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7390 sense_data_length) != 0x8);
7391 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7392 response_data_length) != 0xa);
7393 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7394 data_in_transferred) != 0xc);
7395 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7396 data_out_transferred) != 0x10);
7397 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7398 data) != 0x14);
7399 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
7400
7401 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7402 signature) != 0x0);
7403 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7404 function_and_status_code) != 0x8);
7405 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7406 max_admin_iq_elements) != 0x10);
7407 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7408 max_admin_oq_elements) != 0x11);
7409 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7410 admin_iq_element_length) != 0x12);
7411 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7412 admin_oq_element_length) != 0x13);
7413 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7414 max_reset_timeout) != 0x14);
7415 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7416 legacy_intx_status) != 0x18);
7417 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7418 legacy_intx_mask_set) != 0x1c);
7419 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7420 legacy_intx_mask_clear) != 0x20);
7421 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7422 device_status) != 0x40);
7423 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7424 admin_iq_pi_offset) != 0x48);
7425 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7426 admin_oq_ci_offset) != 0x50);
7427 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7428 admin_iq_element_array_addr) != 0x58);
7429 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7430 admin_oq_element_array_addr) != 0x60);
7431 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7432 admin_iq_ci_addr) != 0x68);
7433 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7434 admin_oq_pi_addr) != 0x70);
7435 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7436 admin_iq_num_elements) != 0x78);
7437 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7438 admin_oq_num_elements) != 0x79);
7439 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7440 admin_queue_int_msg_num) != 0x7a);
7441 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7442 device_error) != 0x80);
7443 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7444 error_details) != 0x88);
7445 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7446 device_reset) != 0x90);
7447 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7448 power_action) != 0x94);
7449 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7450
7451 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7452 header.iu_type) != 0);
7453 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7454 header.iu_length) != 2);
7455 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7456 header.work_area) != 6);
7457 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7458 request_id) != 8);
7459 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7460 function_code) != 10);
7461 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7462 data.report_device_capability.buffer_length) != 44);
7463 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7464 data.report_device_capability.sg_descriptor) != 48);
7465 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7466 data.create_operational_iq.queue_id) != 12);
7467 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7468 data.create_operational_iq.element_array_addr) != 16);
7469 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7470 data.create_operational_iq.ci_addr) != 24);
7471 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7472 data.create_operational_iq.num_elements) != 32);
7473 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7474 data.create_operational_iq.element_length) != 34);
7475 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7476 data.create_operational_iq.queue_protocol) != 36);
7477 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7478 data.create_operational_oq.queue_id) != 12);
7479 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7480 data.create_operational_oq.element_array_addr) != 16);
7481 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7482 data.create_operational_oq.pi_addr) != 24);
7483 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7484 data.create_operational_oq.num_elements) != 32);
7485 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7486 data.create_operational_oq.element_length) != 34);
7487 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7488 data.create_operational_oq.queue_protocol) != 36);
7489 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7490 data.create_operational_oq.int_msg_num) != 40);
7491 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7492 data.create_operational_oq.coalescing_count) != 42);
7493 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7494 data.create_operational_oq.min_coalescing_time) != 44);
7495 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7496 data.create_operational_oq.max_coalescing_time) != 48);
7497 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7498 data.delete_operational_queue.queue_id) != 12);
7499 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7500 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7501 data.create_operational_iq) != 64 - 11);
7502 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7503 data.create_operational_oq) != 64 - 11);
7504 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7505 data.delete_operational_queue) != 64 - 11);
7506
7507 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7508 header.iu_type) != 0);
7509 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7510 header.iu_length) != 2);
7511 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7512 header.work_area) != 6);
7513 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7514 request_id) != 8);
7515 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7516 function_code) != 10);
7517 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7518 status) != 11);
7519 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7520 data.create_operational_iq.status_descriptor) != 12);
7521 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7522 data.create_operational_iq.iq_pi_offset) != 16);
7523 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7524 data.create_operational_oq.status_descriptor) != 12);
7525 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7526 data.create_operational_oq.oq_ci_offset) != 16);
7527 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7528
7529 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7530 header.iu_type) != 0);
7531 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7532 header.iu_length) != 2);
7533 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7534 header.response_queue_id) != 4);
7535 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7536 header.work_area) != 6);
7537 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7538 request_id) != 8);
7539 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7540 nexus_id) != 10);
7541 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7542 buffer_length) != 12);
7543 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7544 lun_number) != 16);
7545 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7546 protocol_specific) != 24);
7547 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7548 error_index) != 27);
7549 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7550 cdb) != 32);
7551 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7552 sg_descriptors) != 64);
7553 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7554 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7555
7556 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7557 header.iu_type) != 0);
7558 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7559 header.iu_length) != 2);
7560 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7561 header.response_queue_id) != 4);
7562 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7563 header.work_area) != 6);
7564 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7565 request_id) != 8);
7566 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7567 nexus_id) != 12);
7568 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7569 buffer_length) != 16);
7570 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7571 data_encryption_key_index) != 22);
7572 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7573 encrypt_tweak_lower) != 24);
7574 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7575 encrypt_tweak_upper) != 28);
7576 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7577 cdb) != 32);
7578 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7579 error_index) != 48);
7580 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7581 num_sg_descriptors) != 50);
7582 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7583 cdb_length) != 51);
7584 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7585 lun_number) != 52);
7586 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7587 sg_descriptors) != 64);
7588 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
7589 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7590
7591 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7592 header.iu_type) != 0);
7593 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7594 header.iu_length) != 2);
7595 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7596 request_id) != 8);
7597 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7598 error_index) != 10);
7599
7600 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7601 header.iu_type) != 0);
7602 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7603 header.iu_length) != 2);
7604 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7605 header.response_queue_id) != 4);
7606 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7607 request_id) != 8);
7608 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7609 data.report_event_configuration.buffer_length) != 12);
7610 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7611 data.report_event_configuration.sg_descriptors) != 16);
7612 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7613 data.set_event_configuration.global_event_oq_id) != 10);
7614 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7615 data.set_event_configuration.buffer_length) != 12);
7616 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7617 data.set_event_configuration.sg_descriptors) != 16);
7618
7619 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7620 max_inbound_iu_length) != 6);
7621 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7622 max_outbound_iu_length) != 14);
7623 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
7624
7625 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7626 data_length) != 0);
7627 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7628 iq_arbitration_priority_support_bitmask) != 8);
7629 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7630 maximum_aw_a) != 9);
7631 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7632 maximum_aw_b) != 10);
7633 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7634 maximum_aw_c) != 11);
7635 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7636 max_inbound_queues) != 16);
7637 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7638 max_elements_per_iq) != 18);
7639 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7640 max_iq_element_length) != 24);
7641 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7642 min_iq_element_length) != 26);
7643 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7644 max_outbound_queues) != 30);
7645 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7646 max_elements_per_oq) != 32);
7647 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7648 intr_coalescing_time_granularity) != 34);
7649 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7650 max_oq_element_length) != 36);
7651 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7652 min_oq_element_length) != 38);
7653 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7654 iu_layer_descriptors) != 64);
7655 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
7656
7657 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7658 event_type) != 0);
7659 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7660 oq_id) != 2);
7661 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
7662
7663 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7664 num_event_descriptors) != 2);
7665 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7666 descriptors) != 4);
7667
Kevin Barnett061ef062017-05-03 18:53:05 -05007668 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
7669 ARRAY_SIZE(pqi_supported_event_types));
7670
Kevin Barnett6c223762016-06-27 16:41:00 -05007671 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7672 header.iu_type) != 0);
7673 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7674 header.iu_length) != 2);
7675 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7676 event_type) != 8);
7677 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7678 event_id) != 10);
7679 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7680 additional_event_id) != 12);
7681 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7682 data) != 16);
7683 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
7684
7685 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7686 header.iu_type) != 0);
7687 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7688 header.iu_length) != 2);
7689 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7690 event_type) != 8);
7691 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7692 event_id) != 10);
7693 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7694 additional_event_id) != 12);
7695 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7696
7697 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7698 header.iu_type) != 0);
7699 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7700 header.iu_length) != 2);
7701 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7702 request_id) != 8);
7703 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7704 nexus_id) != 10);
7705 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7706 lun_number) != 16);
7707 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7708 protocol_specific) != 24);
7709 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7710 outbound_queue_id_to_manage) != 26);
7711 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7712 request_id_to_manage) != 28);
7713 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7714 task_management_function) != 30);
7715 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7716
7717 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7718 header.iu_type) != 0);
7719 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7720 header.iu_length) != 2);
7721 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7722 request_id) != 8);
7723 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7724 nexus_id) != 10);
7725 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7726 additional_response_info) != 12);
7727 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7728 response_code) != 15);
7729 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7730
7731 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7732 configured_logical_drive_count) != 0);
7733 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7734 configuration_signature) != 1);
7735 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7736 firmware_version) != 5);
7737 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7738 extended_logical_unit_count) != 154);
7739 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7740 firmware_build_number) != 190);
7741 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7742 controller_mode) != 292);
7743
Kevin Barnett1be42f42017-05-03 18:53:42 -05007744 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7745 phys_bay_in_box) != 115);
7746 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7747 device_type) != 120);
7748 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7749 redundant_path_present_map) != 1736);
7750 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7751 active_path_number) != 1738);
7752 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7753 alternate_paths_phys_connector) != 1739);
7754 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7755 alternate_paths_phys_box_on_port) != 1755);
7756 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7757 current_queue_depth_limit) != 1796);
7758 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7759
Kevin Barnett6c223762016-06-27 16:41:00 -05007760 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7761 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7762 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7763 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7764 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7765 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7766 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7767 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7768 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7769 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7770 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7771 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7772
7773 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
Kevin Barnettd727a772017-05-03 18:54:25 -05007774 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
7775 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
Kevin Barnett6c223762016-06-27 16:41:00 -05007776}