blob: 0f64671739866b3b876bff20bfab84c866198346 [file] [log] [blame]
Anup Patel743e1c82017-05-15 10:34:54 +05301/*
2 * Copyright (C) 2017 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Broadcom SBA RAID Driver
11 *
12 * The Broadcom stream buffer accelerator (SBA) provides offloading
13 * capabilities for RAID operations. The SBA offload engine is accessible
14 * via Broadcom SoC specific ring manager. Two or more offload engines
15 * can share same Broadcom SoC specific ring manager due to this Broadcom
16 * SoC specific ring manager driver is implemented as a mailbox controller
17 * driver and offload engine drivers are implemented as mallbox clients.
18 *
19 * Typically, Broadcom SoC specific ring manager will implement larger
20 * number of hardware rings over one or more SBA hardware devices. By
21 * design, the internal buffer size of SBA hardware device is limited
22 * but all offload operations supported by SBA can be broken down into
23 * multiple small size requests and executed parallely on multiple SBA
24 * hardware devices for achieving high through-put.
25 *
26 * The Broadcom SBA RAID driver does not require any register programming
27 * except submitting request to SBA hardware device via mailbox channels.
28 * This driver implements a DMA device with one DMA channel using a set
29 * of mailbox channels provided by Broadcom SoC specific ring manager
30 * driver. To exploit parallelism (as described above), all DMA request
31 * coming to SBA RAID DMA channel are broken down to smaller requests
32 * and submitted to multiple mailbox channels in round-robin fashion.
33 * For having more SBA DMA channels, we can create more SBA device nodes
34 * in Broadcom SoC specific DTS based on number of hardware rings supported
35 * by Broadcom SoC ring manager.
36 */
37
38#include <linux/bitops.h>
39#include <linux/dma-mapping.h>
40#include <linux/dmaengine.h>
41#include <linux/list.h>
42#include <linux/mailbox_client.h>
43#include <linux/mailbox/brcm-message.h>
44#include <linux/module.h>
45#include <linux/of_device.h>
46#include <linux/slab.h>
47#include <linux/raid/pq.h>
48
49#include "dmaengine.h"
50
Anup Patele8970912017-08-22 15:26:50 +053051/* ====== Driver macros and defines ===== */
52
Anup Patel743e1c82017-05-15 10:34:54 +053053#define SBA_TYPE_SHIFT 48
54#define SBA_TYPE_MASK GENMASK(1, 0)
55#define SBA_TYPE_A 0x0
56#define SBA_TYPE_B 0x2
57#define SBA_TYPE_C 0x3
58#define SBA_USER_DEF_SHIFT 32
59#define SBA_USER_DEF_MASK GENMASK(15, 0)
60#define SBA_R_MDATA_SHIFT 24
61#define SBA_R_MDATA_MASK GENMASK(7, 0)
62#define SBA_C_MDATA_MS_SHIFT 18
63#define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
64#define SBA_INT_SHIFT 17
65#define SBA_INT_MASK BIT(0)
66#define SBA_RESP_SHIFT 16
67#define SBA_RESP_MASK BIT(0)
68#define SBA_C_MDATA_SHIFT 8
69#define SBA_C_MDATA_MASK GENMASK(7, 0)
70#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
71#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
72#define SBA_C_MDATA_DNUM_SHIFT 5
73#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
74#define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
75#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
76#define SBA_CMD_SHIFT 0
77#define SBA_CMD_MASK GENMASK(3, 0)
78#define SBA_CMD_ZERO_BUFFER 0x4
79#define SBA_CMD_ZERO_ALL_BUFFERS 0x8
80#define SBA_CMD_LOAD_BUFFER 0x9
81#define SBA_CMD_XOR 0xa
82#define SBA_CMD_GALOIS_XOR 0xb
83#define SBA_CMD_WRITE_BUFFER 0xc
84#define SBA_CMD_GALOIS 0xe
85
86/* Driver helper macros */
87#define to_sba_request(tx) \
88 container_of(tx, struct sba_request, tx)
89#define to_sba_device(dchan) \
90 container_of(dchan, struct sba_device, dma_chan)
91
Anup Patele8970912017-08-22 15:26:50 +053092/* ===== Driver data structures ===== */
93
Anup Patel57a28502017-08-22 15:26:52 +053094enum sba_request_flags {
95 SBA_REQUEST_STATE_FREE = 0x001,
96 SBA_REQUEST_STATE_ALLOCED = 0x002,
97 SBA_REQUEST_STATE_PENDING = 0x004,
98 SBA_REQUEST_STATE_ACTIVE = 0x008,
99 SBA_REQUEST_STATE_RECEIVED = 0x010,
100 SBA_REQUEST_STATE_COMPLETED = 0x020,
101 SBA_REQUEST_STATE_ABORTED = 0x040,
102 SBA_REQUEST_STATE_MASK = 0x0ff,
103 SBA_REQUEST_FENCE = 0x100,
Anup Patel743e1c82017-05-15 10:34:54 +0530104};
105
106struct sba_request {
107 /* Global state */
108 struct list_head node;
109 struct sba_device *sba;
Anup Patel57a28502017-08-22 15:26:52 +0530110 u32 flags;
Anup Patel743e1c82017-05-15 10:34:54 +0530111 /* Chained requests management */
112 struct sba_request *first;
113 struct list_head next;
114 unsigned int next_count;
115 atomic_t next_pending_count;
116 /* BRCM message data */
117 void *resp;
118 dma_addr_t resp_dma;
119 struct brcm_sba_command *cmds;
120 struct brcm_message msg;
121 struct dma_async_tx_descriptor tx;
122};
123
124enum sba_version {
125 SBA_VER_1 = 0,
126 SBA_VER_2
127};
128
129struct sba_device {
130 /* Underlying device */
131 struct device *dev;
132 /* DT configuration parameters */
133 enum sba_version ver;
134 /* Derived configuration parameters */
135 u32 max_req;
136 u32 hw_buf_size;
137 u32 hw_resp_size;
138 u32 max_pq_coefs;
139 u32 max_pq_srcs;
140 u32 max_cmd_per_req;
141 u32 max_xor_srcs;
142 u32 max_resp_pool_size;
143 u32 max_cmds_pool_size;
144 /* Maibox client and Mailbox channels */
145 struct mbox_client client;
146 int mchans_count;
147 atomic_t mchans_current;
148 struct mbox_chan **mchans;
149 struct device *mbox_dev;
150 /* DMA device and DMA channel */
151 struct dma_device dma_dev;
152 struct dma_chan dma_chan;
153 /* DMA channel resources */
154 void *resp_base;
155 dma_addr_t resp_dma_base;
156 void *cmds_base;
157 dma_addr_t cmds_dma_base;
158 spinlock_t reqs_lock;
159 struct sba_request *reqs;
160 bool reqs_fence;
161 struct list_head reqs_alloc_list;
162 struct list_head reqs_pending_list;
163 struct list_head reqs_active_list;
164 struct list_head reqs_received_list;
165 struct list_head reqs_completed_list;
166 struct list_head reqs_aborted_list;
167 struct list_head reqs_free_list;
168 int reqs_free_count;
169};
170
Anup Patele8970912017-08-22 15:26:50 +0530171/* ====== Command helper routines ===== */
Anup Patel743e1c82017-05-15 10:34:54 +0530172
173static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
174{
175 cmd &= ~((u64)mask << shift);
176 cmd |= ((u64)(val & mask) << shift);
177 return cmd;
178}
179
180static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
181{
182 return b0 & SBA_C_MDATA_BNUMx_MASK;
183}
184
185static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
186{
187 return b0 & SBA_C_MDATA_BNUMx_MASK;
188}
189
190static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
191{
192 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
193 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
194}
195
196static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
197{
198 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
199 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
200 ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
201}
202
Anup Patele8970912017-08-22 15:26:50 +0530203/* ====== General helper routines ===== */
Anup Patel743e1c82017-05-15 10:34:54 +0530204
205static struct sba_request *sba_alloc_request(struct sba_device *sba)
206{
207 unsigned long flags;
208 struct sba_request *req = NULL;
209
210 spin_lock_irqsave(&sba->reqs_lock, flags);
Anup Patel743e1c82017-05-15 10:34:54 +0530211 req = list_first_entry_or_null(&sba->reqs_free_list,
212 struct sba_request, node);
213 if (req) {
214 list_move_tail(&req->node, &sba->reqs_alloc_list);
Anup Patel743e1c82017-05-15 10:34:54 +0530215 sba->reqs_free_count--;
Anup Patel743e1c82017-05-15 10:34:54 +0530216 }
Anup Patel743e1c82017-05-15 10:34:54 +0530217 spin_unlock_irqrestore(&sba->reqs_lock, flags);
Anup Patele4274cf2017-08-22 15:26:51 +0530218 if (!req)
219 return NULL;
220
Anup Patel57a28502017-08-22 15:26:52 +0530221 req->flags = SBA_REQUEST_STATE_ALLOCED;
Anup Patele4274cf2017-08-22 15:26:51 +0530222 req->first = req;
223 INIT_LIST_HEAD(&req->next);
224 req->next_count = 1;
225 atomic_set(&req->next_pending_count, 1);
226
227 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
Anup Patel743e1c82017-05-15 10:34:54 +0530228
229 return req;
230}
231
232/* Note: Must be called with sba->reqs_lock held */
233static void _sba_pending_request(struct sba_device *sba,
234 struct sba_request *req)
235{
236 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530237 req->flags &= ~SBA_REQUEST_STATE_MASK;
238 req->flags |= SBA_REQUEST_STATE_PENDING;
Anup Patel743e1c82017-05-15 10:34:54 +0530239 list_move_tail(&req->node, &sba->reqs_pending_list);
240 if (list_empty(&sba->reqs_active_list))
241 sba->reqs_fence = false;
242}
243
244/* Note: Must be called with sba->reqs_lock held */
245static bool _sba_active_request(struct sba_device *sba,
246 struct sba_request *req)
247{
248 lockdep_assert_held(&sba->reqs_lock);
249 if (list_empty(&sba->reqs_active_list))
250 sba->reqs_fence = false;
251 if (sba->reqs_fence)
252 return false;
Anup Patel57a28502017-08-22 15:26:52 +0530253 req->flags &= ~SBA_REQUEST_STATE_MASK;
254 req->flags |= SBA_REQUEST_STATE_ACTIVE;
Anup Patel743e1c82017-05-15 10:34:54 +0530255 list_move_tail(&req->node, &sba->reqs_active_list);
Anup Patel57a28502017-08-22 15:26:52 +0530256 if (req->flags & SBA_REQUEST_FENCE)
Anup Patel743e1c82017-05-15 10:34:54 +0530257 sba->reqs_fence = true;
258 return true;
259}
260
261/* Note: Must be called with sba->reqs_lock held */
262static void _sba_abort_request(struct sba_device *sba,
263 struct sba_request *req)
264{
265 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530266 req->flags &= ~SBA_REQUEST_STATE_MASK;
267 req->flags |= SBA_REQUEST_STATE_ABORTED;
Anup Patel743e1c82017-05-15 10:34:54 +0530268 list_move_tail(&req->node, &sba->reqs_aborted_list);
269 if (list_empty(&sba->reqs_active_list))
270 sba->reqs_fence = false;
271}
272
273/* Note: Must be called with sba->reqs_lock held */
274static void _sba_free_request(struct sba_device *sba,
275 struct sba_request *req)
276{
277 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530278 req->flags &= ~SBA_REQUEST_STATE_MASK;
279 req->flags |= SBA_REQUEST_STATE_FREE;
Anup Patel743e1c82017-05-15 10:34:54 +0530280 list_move_tail(&req->node, &sba->reqs_free_list);
281 if (list_empty(&sba->reqs_active_list))
282 sba->reqs_fence = false;
283 sba->reqs_free_count++;
284}
285
286static void sba_received_request(struct sba_request *req)
287{
288 unsigned long flags;
289 struct sba_device *sba = req->sba;
290
291 spin_lock_irqsave(&sba->reqs_lock, flags);
Anup Patel57a28502017-08-22 15:26:52 +0530292 req->flags &= ~SBA_REQUEST_STATE_MASK;
293 req->flags |= SBA_REQUEST_STATE_RECEIVED;
Anup Patel743e1c82017-05-15 10:34:54 +0530294 list_move_tail(&req->node, &sba->reqs_received_list);
295 spin_unlock_irqrestore(&sba->reqs_lock, flags);
296}
297
298static void sba_complete_chained_requests(struct sba_request *req)
299{
300 unsigned long flags;
301 struct sba_request *nreq;
302 struct sba_device *sba = req->sba;
303
304 spin_lock_irqsave(&sba->reqs_lock, flags);
305
Anup Patel57a28502017-08-22 15:26:52 +0530306 req->flags &= ~SBA_REQUEST_STATE_MASK;
307 req->flags |= SBA_REQUEST_STATE_COMPLETED;
Anup Patel743e1c82017-05-15 10:34:54 +0530308 list_move_tail(&req->node, &sba->reqs_completed_list);
309 list_for_each_entry(nreq, &req->next, next) {
Anup Patel57a28502017-08-22 15:26:52 +0530310 nreq->flags &= ~SBA_REQUEST_STATE_MASK;
311 nreq->flags |= SBA_REQUEST_STATE_COMPLETED;
Anup Patel743e1c82017-05-15 10:34:54 +0530312 list_move_tail(&nreq->node, &sba->reqs_completed_list);
313 }
314 if (list_empty(&sba->reqs_active_list))
315 sba->reqs_fence = false;
316
317 spin_unlock_irqrestore(&sba->reqs_lock, flags);
318}
319
320static void sba_free_chained_requests(struct sba_request *req)
321{
322 unsigned long flags;
323 struct sba_request *nreq;
324 struct sba_device *sba = req->sba;
325
326 spin_lock_irqsave(&sba->reqs_lock, flags);
327
328 _sba_free_request(sba, req);
329 list_for_each_entry(nreq, &req->next, next)
330 _sba_free_request(sba, nreq);
331
332 spin_unlock_irqrestore(&sba->reqs_lock, flags);
333}
334
335static void sba_chain_request(struct sba_request *first,
336 struct sba_request *req)
337{
338 unsigned long flags;
339 struct sba_device *sba = req->sba;
340
341 spin_lock_irqsave(&sba->reqs_lock, flags);
342
343 list_add_tail(&req->next, &first->next);
344 req->first = first;
345 first->next_count++;
346 atomic_set(&first->next_pending_count, first->next_count);
347
348 spin_unlock_irqrestore(&sba->reqs_lock, flags);
349}
350
351static void sba_cleanup_nonpending_requests(struct sba_device *sba)
352{
353 unsigned long flags;
354 struct sba_request *req, *req1;
355
356 spin_lock_irqsave(&sba->reqs_lock, flags);
357
358 /* Freeup all alloced request */
359 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
360 _sba_free_request(sba, req);
361
362 /* Freeup all received request */
363 list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node)
364 _sba_free_request(sba, req);
365
366 /* Freeup all completed request */
367 list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node)
368 _sba_free_request(sba, req);
369
370 /* Set all active requests as aborted */
371 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
372 _sba_abort_request(sba, req);
373
374 /*
375 * Note: We expect that aborted request will be eventually
376 * freed by sba_receive_message()
377 */
378
379 spin_unlock_irqrestore(&sba->reqs_lock, flags);
380}
381
382static void sba_cleanup_pending_requests(struct sba_device *sba)
383{
384 unsigned long flags;
385 struct sba_request *req, *req1;
386
387 spin_lock_irqsave(&sba->reqs_lock, flags);
388
389 /* Freeup all pending request */
390 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
391 _sba_free_request(sba, req);
392
393 spin_unlock_irqrestore(&sba->reqs_lock, flags);
394}
395
396/* ====== DMAENGINE callbacks ===== */
397
398static void sba_free_chan_resources(struct dma_chan *dchan)
399{
400 /*
401 * Channel resources are pre-alloced so we just free-up
402 * whatever we can so that we can re-use pre-alloced
403 * channel resources next time.
404 */
405 sba_cleanup_nonpending_requests(to_sba_device(dchan));
406}
407
408static int sba_device_terminate_all(struct dma_chan *dchan)
409{
410 /* Cleanup all pending requests */
411 sba_cleanup_pending_requests(to_sba_device(dchan));
412
413 return 0;
414}
415
416static int sba_send_mbox_request(struct sba_device *sba,
417 struct sba_request *req)
418{
419 int mchans_idx, ret = 0;
420
421 /* Select mailbox channel in round-robin fashion */
422 mchans_idx = atomic_inc_return(&sba->mchans_current);
423 mchans_idx = mchans_idx % sba->mchans_count;
424
425 /* Send message for the request */
426 req->msg.error = 0;
427 ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
428 if (ret < 0) {
429 dev_err(sba->dev, "send message failed with error %d", ret);
430 return ret;
431 }
432 ret = req->msg.error;
433 if (ret < 0) {
434 dev_err(sba->dev, "message error %d", ret);
435 return ret;
436 }
437
438 return 0;
439}
440
441static void sba_issue_pending(struct dma_chan *dchan)
442{
443 int ret;
444 unsigned long flags;
445 struct sba_request *req, *req1;
446 struct sba_device *sba = to_sba_device(dchan);
447
448 spin_lock_irqsave(&sba->reqs_lock, flags);
449
450 /* Process all pending request */
451 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) {
452 /* Try to make request active */
453 if (!_sba_active_request(sba, req))
454 break;
455
456 /* Send request to mailbox channel */
457 spin_unlock_irqrestore(&sba->reqs_lock, flags);
458 ret = sba_send_mbox_request(sba, req);
459 spin_lock_irqsave(&sba->reqs_lock, flags);
460
461 /* If something went wrong then keep request pending */
462 if (ret < 0) {
463 _sba_pending_request(sba, req);
464 break;
465 }
466 }
467
468 spin_unlock_irqrestore(&sba->reqs_lock, flags);
469}
470
471static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
472{
473 unsigned long flags;
474 dma_cookie_t cookie;
475 struct sba_device *sba;
476 struct sba_request *req, *nreq;
477
478 if (unlikely(!tx))
479 return -EINVAL;
480
481 sba = to_sba_device(tx->chan);
482 req = to_sba_request(tx);
483
484 /* Assign cookie and mark all chained requests pending */
485 spin_lock_irqsave(&sba->reqs_lock, flags);
486 cookie = dma_cookie_assign(tx);
487 _sba_pending_request(sba, req);
488 list_for_each_entry(nreq, &req->next, next)
489 _sba_pending_request(sba, nreq);
490 spin_unlock_irqrestore(&sba->reqs_lock, flags);
491
492 return cookie;
493}
494
495static enum dma_status sba_tx_status(struct dma_chan *dchan,
496 dma_cookie_t cookie,
497 struct dma_tx_state *txstate)
498{
499 int mchan_idx;
500 enum dma_status ret;
501 struct sba_device *sba = to_sba_device(dchan);
502
503 for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
504 mbox_client_peek_data(sba->mchans[mchan_idx]);
505
506 ret = dma_cookie_status(dchan, cookie, txstate);
507 if (ret == DMA_COMPLETE)
508 return ret;
509
510 return dma_cookie_status(dchan, cookie, txstate);
511}
512
513static void sba_fillup_interrupt_msg(struct sba_request *req,
514 struct brcm_sba_command *cmds,
515 struct brcm_message *msg)
516{
517 u64 cmd;
518 u32 c_mdata;
519 struct brcm_sba_command *cmdsp = cmds;
520
521 /* Type-B command to load dummy data into buf0 */
522 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
523 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
524 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
525 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
526 c_mdata = sba_cmd_load_c_mdata(0);
527 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
528 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
529 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
530 SBA_CMD_SHIFT, SBA_CMD_MASK);
531 cmdsp->cmd = cmd;
532 *cmdsp->cmd_dma = cpu_to_le64(cmd);
533 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
534 cmdsp->data = req->resp_dma;
535 cmdsp->data_len = req->sba->hw_resp_size;
536 cmdsp++;
537
538 /* Type-A command to write buf0 to dummy location */
539 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
540 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
541 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
542 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
543 cmd = sba_cmd_enc(cmd, 0x1,
544 SBA_RESP_SHIFT, SBA_RESP_MASK);
545 c_mdata = sba_cmd_write_c_mdata(0);
546 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
547 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
548 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
549 SBA_CMD_SHIFT, SBA_CMD_MASK);
550 cmdsp->cmd = cmd;
551 *cmdsp->cmd_dma = cpu_to_le64(cmd);
552 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
553 if (req->sba->hw_resp_size) {
554 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
555 cmdsp->resp = req->resp_dma;
556 cmdsp->resp_len = req->sba->hw_resp_size;
557 }
558 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
559 cmdsp->data = req->resp_dma;
560 cmdsp->data_len = req->sba->hw_resp_size;
561 cmdsp++;
562
563 /* Fillup brcm_message */
564 msg->type = BRCM_MESSAGE_SBA;
565 msg->sba.cmds = cmds;
566 msg->sba.cmds_count = cmdsp - cmds;
567 msg->ctx = req;
568 msg->error = 0;
569}
570
571static struct dma_async_tx_descriptor *
572sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
573{
574 struct sba_request *req = NULL;
575 struct sba_device *sba = to_sba_device(dchan);
576
577 /* Alloc new request */
578 req = sba_alloc_request(sba);
579 if (!req)
580 return NULL;
581
582 /*
583 * Force fence so that no requests are submitted
584 * until DMA callback for this request is invoked.
585 */
Anup Patel57a28502017-08-22 15:26:52 +0530586 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530587
588 /* Fillup request message */
589 sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
590
591 /* Init async_tx descriptor */
592 req->tx.flags = flags;
593 req->tx.cookie = -EBUSY;
594
Colin Ian King1fc63cb2017-05-17 22:58:50 +0100595 return &req->tx;
Anup Patel743e1c82017-05-15 10:34:54 +0530596}
597
598static void sba_fillup_memcpy_msg(struct sba_request *req,
599 struct brcm_sba_command *cmds,
600 struct brcm_message *msg,
601 dma_addr_t msg_offset, size_t msg_len,
602 dma_addr_t dst, dma_addr_t src)
603{
604 u64 cmd;
605 u32 c_mdata;
606 struct brcm_sba_command *cmdsp = cmds;
607
608 /* Type-B command to load data into buf0 */
609 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
610 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
611 cmd = sba_cmd_enc(cmd, msg_len,
612 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
613 c_mdata = sba_cmd_load_c_mdata(0);
614 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
615 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
616 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
617 SBA_CMD_SHIFT, SBA_CMD_MASK);
618 cmdsp->cmd = cmd;
619 *cmdsp->cmd_dma = cpu_to_le64(cmd);
620 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
621 cmdsp->data = src + msg_offset;
622 cmdsp->data_len = msg_len;
623 cmdsp++;
624
625 /* Type-A command to write buf0 */
626 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
627 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
628 cmd = sba_cmd_enc(cmd, msg_len,
629 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
630 cmd = sba_cmd_enc(cmd, 0x1,
631 SBA_RESP_SHIFT, SBA_RESP_MASK);
632 c_mdata = sba_cmd_write_c_mdata(0);
633 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
634 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
635 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
636 SBA_CMD_SHIFT, SBA_CMD_MASK);
637 cmdsp->cmd = cmd;
638 *cmdsp->cmd_dma = cpu_to_le64(cmd);
639 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
640 if (req->sba->hw_resp_size) {
641 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
642 cmdsp->resp = req->resp_dma;
643 cmdsp->resp_len = req->sba->hw_resp_size;
644 }
645 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
646 cmdsp->data = dst + msg_offset;
647 cmdsp->data_len = msg_len;
648 cmdsp++;
649
650 /* Fillup brcm_message */
651 msg->type = BRCM_MESSAGE_SBA;
652 msg->sba.cmds = cmds;
653 msg->sba.cmds_count = cmdsp - cmds;
654 msg->ctx = req;
655 msg->error = 0;
656}
657
658static struct sba_request *
659sba_prep_dma_memcpy_req(struct sba_device *sba,
660 dma_addr_t off, dma_addr_t dst, dma_addr_t src,
661 size_t len, unsigned long flags)
662{
663 struct sba_request *req = NULL;
664
665 /* Alloc new request */
666 req = sba_alloc_request(sba);
667 if (!req)
668 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +0530669 if (flags & DMA_PREP_FENCE)
670 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530671
672 /* Fillup request message */
673 sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
674 off, len, dst, src);
675
676 /* Init async_tx descriptor */
677 req->tx.flags = flags;
678 req->tx.cookie = -EBUSY;
679
680 return req;
681}
682
683static struct dma_async_tx_descriptor *
684sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
685 size_t len, unsigned long flags)
686{
687 size_t req_len;
688 dma_addr_t off = 0;
689 struct sba_device *sba = to_sba_device(dchan);
690 struct sba_request *first = NULL, *req;
691
692 /* Create chained requests where each request is upto hw_buf_size */
693 while (len) {
694 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
695
696 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
697 req_len, flags);
698 if (!req) {
699 if (first)
700 sba_free_chained_requests(first);
701 return NULL;
702 }
703
704 if (first)
705 sba_chain_request(first, req);
706 else
707 first = req;
708
709 off += req_len;
710 len -= req_len;
711 }
712
713 return (first) ? &first->tx : NULL;
714}
715
716static void sba_fillup_xor_msg(struct sba_request *req,
717 struct brcm_sba_command *cmds,
718 struct brcm_message *msg,
719 dma_addr_t msg_offset, size_t msg_len,
720 dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
721{
722 u64 cmd;
723 u32 c_mdata;
724 unsigned int i;
725 struct brcm_sba_command *cmdsp = cmds;
726
727 /* Type-B command to load data into buf0 */
728 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
729 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
730 cmd = sba_cmd_enc(cmd, msg_len,
731 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
732 c_mdata = sba_cmd_load_c_mdata(0);
733 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
734 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
735 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
736 SBA_CMD_SHIFT, SBA_CMD_MASK);
737 cmdsp->cmd = cmd;
738 *cmdsp->cmd_dma = cpu_to_le64(cmd);
739 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
740 cmdsp->data = src[0] + msg_offset;
741 cmdsp->data_len = msg_len;
742 cmdsp++;
743
744 /* Type-B commands to xor data with buf0 and put it back in buf0 */
745 for (i = 1; i < src_cnt; i++) {
746 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
747 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
748 cmd = sba_cmd_enc(cmd, msg_len,
749 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
750 c_mdata = sba_cmd_xor_c_mdata(0, 0);
751 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
752 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
753 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
754 SBA_CMD_SHIFT, SBA_CMD_MASK);
755 cmdsp->cmd = cmd;
756 *cmdsp->cmd_dma = cpu_to_le64(cmd);
757 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
758 cmdsp->data = src[i] + msg_offset;
759 cmdsp->data_len = msg_len;
760 cmdsp++;
761 }
762
763 /* Type-A command to write buf0 */
764 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
765 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
766 cmd = sba_cmd_enc(cmd, msg_len,
767 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
768 cmd = sba_cmd_enc(cmd, 0x1,
769 SBA_RESP_SHIFT, SBA_RESP_MASK);
770 c_mdata = sba_cmd_write_c_mdata(0);
771 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
772 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
773 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
774 SBA_CMD_SHIFT, SBA_CMD_MASK);
775 cmdsp->cmd = cmd;
776 *cmdsp->cmd_dma = cpu_to_le64(cmd);
777 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
778 if (req->sba->hw_resp_size) {
779 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
780 cmdsp->resp = req->resp_dma;
781 cmdsp->resp_len = req->sba->hw_resp_size;
782 }
783 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
784 cmdsp->data = dst + msg_offset;
785 cmdsp->data_len = msg_len;
786 cmdsp++;
787
788 /* Fillup brcm_message */
789 msg->type = BRCM_MESSAGE_SBA;
790 msg->sba.cmds = cmds;
791 msg->sba.cmds_count = cmdsp - cmds;
792 msg->ctx = req;
793 msg->error = 0;
794}
795
Vinod Kouldd2bceb2017-07-19 10:03:24 +0530796static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +0530797sba_prep_dma_xor_req(struct sba_device *sba,
798 dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
799 u32 src_cnt, size_t len, unsigned long flags)
800{
801 struct sba_request *req = NULL;
802
803 /* Alloc new request */
804 req = sba_alloc_request(sba);
805 if (!req)
806 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +0530807 if (flags & DMA_PREP_FENCE)
808 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530809
810 /* Fillup request message */
811 sba_fillup_xor_msg(req, req->cmds, &req->msg,
812 off, len, dst, src, src_cnt);
813
814 /* Init async_tx descriptor */
815 req->tx.flags = flags;
816 req->tx.cookie = -EBUSY;
817
818 return req;
819}
820
821static struct dma_async_tx_descriptor *
822sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
823 u32 src_cnt, size_t len, unsigned long flags)
824{
825 size_t req_len;
826 dma_addr_t off = 0;
827 struct sba_device *sba = to_sba_device(dchan);
828 struct sba_request *first = NULL, *req;
829
830 /* Sanity checks */
831 if (unlikely(src_cnt > sba->max_xor_srcs))
832 return NULL;
833
834 /* Create chained requests where each request is upto hw_buf_size */
835 while (len) {
836 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
837
838 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
839 req_len, flags);
840 if (!req) {
841 if (first)
842 sba_free_chained_requests(first);
843 return NULL;
844 }
845
846 if (first)
847 sba_chain_request(first, req);
848 else
849 first = req;
850
851 off += req_len;
852 len -= req_len;
853 }
854
855 return (first) ? &first->tx : NULL;
856}
857
858static void sba_fillup_pq_msg(struct sba_request *req,
859 bool pq_continue,
860 struct brcm_sba_command *cmds,
861 struct brcm_message *msg,
862 dma_addr_t msg_offset, size_t msg_len,
863 dma_addr_t *dst_p, dma_addr_t *dst_q,
864 const u8 *scf, dma_addr_t *src, u32 src_cnt)
865{
866 u64 cmd;
867 u32 c_mdata;
868 unsigned int i;
869 struct brcm_sba_command *cmdsp = cmds;
870
871 if (pq_continue) {
872 /* Type-B command to load old P into buf0 */
873 if (dst_p) {
874 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
875 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
876 cmd = sba_cmd_enc(cmd, msg_len,
877 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
878 c_mdata = sba_cmd_load_c_mdata(0);
879 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
880 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
881 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
882 SBA_CMD_SHIFT, SBA_CMD_MASK);
883 cmdsp->cmd = cmd;
884 *cmdsp->cmd_dma = cpu_to_le64(cmd);
885 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
886 cmdsp->data = *dst_p + msg_offset;
887 cmdsp->data_len = msg_len;
888 cmdsp++;
889 }
890
891 /* Type-B command to load old Q into buf1 */
892 if (dst_q) {
893 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
894 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
895 cmd = sba_cmd_enc(cmd, msg_len,
896 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
897 c_mdata = sba_cmd_load_c_mdata(1);
898 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
899 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
900 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
901 SBA_CMD_SHIFT, SBA_CMD_MASK);
902 cmdsp->cmd = cmd;
903 *cmdsp->cmd_dma = cpu_to_le64(cmd);
904 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
905 cmdsp->data = *dst_q + msg_offset;
906 cmdsp->data_len = msg_len;
907 cmdsp++;
908 }
909 } else {
910 /* Type-A command to zero all buffers */
911 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
912 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
913 cmd = sba_cmd_enc(cmd, msg_len,
914 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
915 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
916 SBA_CMD_SHIFT, SBA_CMD_MASK);
917 cmdsp->cmd = cmd;
918 *cmdsp->cmd_dma = cpu_to_le64(cmd);
919 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
920 cmdsp++;
921 }
922
923 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
924 for (i = 0; i < src_cnt; i++) {
925 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
926 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
927 cmd = sba_cmd_enc(cmd, msg_len,
928 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
929 c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
930 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
931 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
932 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
933 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
934 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
935 SBA_CMD_SHIFT, SBA_CMD_MASK);
936 cmdsp->cmd = cmd;
937 *cmdsp->cmd_dma = cpu_to_le64(cmd);
938 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
939 cmdsp->data = src[i] + msg_offset;
940 cmdsp->data_len = msg_len;
941 cmdsp++;
942 }
943
944 /* Type-A command to write buf0 */
945 if (dst_p) {
946 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
947 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
948 cmd = sba_cmd_enc(cmd, msg_len,
949 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
950 cmd = sba_cmd_enc(cmd, 0x1,
951 SBA_RESP_SHIFT, SBA_RESP_MASK);
952 c_mdata = sba_cmd_write_c_mdata(0);
953 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
954 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
955 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
956 SBA_CMD_SHIFT, SBA_CMD_MASK);
957 cmdsp->cmd = cmd;
958 *cmdsp->cmd_dma = cpu_to_le64(cmd);
959 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
960 if (req->sba->hw_resp_size) {
961 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
962 cmdsp->resp = req->resp_dma;
963 cmdsp->resp_len = req->sba->hw_resp_size;
964 }
965 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
966 cmdsp->data = *dst_p + msg_offset;
967 cmdsp->data_len = msg_len;
968 cmdsp++;
969 }
970
971 /* Type-A command to write buf1 */
972 if (dst_q) {
973 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
974 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
975 cmd = sba_cmd_enc(cmd, msg_len,
976 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
977 cmd = sba_cmd_enc(cmd, 0x1,
978 SBA_RESP_SHIFT, SBA_RESP_MASK);
979 c_mdata = sba_cmd_write_c_mdata(1);
980 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
981 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
982 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
983 SBA_CMD_SHIFT, SBA_CMD_MASK);
984 cmdsp->cmd = cmd;
985 *cmdsp->cmd_dma = cpu_to_le64(cmd);
986 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
987 if (req->sba->hw_resp_size) {
988 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
989 cmdsp->resp = req->resp_dma;
990 cmdsp->resp_len = req->sba->hw_resp_size;
991 }
992 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
993 cmdsp->data = *dst_q + msg_offset;
994 cmdsp->data_len = msg_len;
995 cmdsp++;
996 }
997
998 /* Fillup brcm_message */
999 msg->type = BRCM_MESSAGE_SBA;
1000 msg->sba.cmds = cmds;
1001 msg->sba.cmds_count = cmdsp - cmds;
1002 msg->ctx = req;
1003 msg->error = 0;
1004}
1005
Vinod Kouldd2bceb2017-07-19 10:03:24 +05301006static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +05301007sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1008 dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
1009 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1010{
1011 struct sba_request *req = NULL;
1012
1013 /* Alloc new request */
1014 req = sba_alloc_request(sba);
1015 if (!req)
1016 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +05301017 if (flags & DMA_PREP_FENCE)
1018 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +05301019
1020 /* Fillup request messages */
1021 sba_fillup_pq_msg(req, dmaf_continue(flags),
1022 req->cmds, &req->msg,
1023 off, len, dst_p, dst_q, scf, src, src_cnt);
1024
1025 /* Init async_tx descriptor */
1026 req->tx.flags = flags;
1027 req->tx.cookie = -EBUSY;
1028
1029 return req;
1030}
1031
1032static void sba_fillup_pq_single_msg(struct sba_request *req,
1033 bool pq_continue,
1034 struct brcm_sba_command *cmds,
1035 struct brcm_message *msg,
1036 dma_addr_t msg_offset, size_t msg_len,
1037 dma_addr_t *dst_p, dma_addr_t *dst_q,
1038 dma_addr_t src, u8 scf)
1039{
1040 u64 cmd;
1041 u32 c_mdata;
1042 u8 pos, dpos = raid6_gflog[scf];
1043 struct brcm_sba_command *cmdsp = cmds;
1044
1045 if (!dst_p)
1046 goto skip_p;
1047
1048 if (pq_continue) {
1049 /* Type-B command to load old P into buf0 */
1050 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1051 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1052 cmd = sba_cmd_enc(cmd, msg_len,
1053 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1054 c_mdata = sba_cmd_load_c_mdata(0);
1055 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1056 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1057 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1058 SBA_CMD_SHIFT, SBA_CMD_MASK);
1059 cmdsp->cmd = cmd;
1060 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1061 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1062 cmdsp->data = *dst_p + msg_offset;
1063 cmdsp->data_len = msg_len;
1064 cmdsp++;
1065
1066 /*
1067 * Type-B commands to xor data with buf0 and put it
1068 * back in buf0
1069 */
1070 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1071 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1072 cmd = sba_cmd_enc(cmd, msg_len,
1073 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1074 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1075 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1076 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1077 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1078 SBA_CMD_SHIFT, SBA_CMD_MASK);
1079 cmdsp->cmd = cmd;
1080 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1081 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1082 cmdsp->data = src + msg_offset;
1083 cmdsp->data_len = msg_len;
1084 cmdsp++;
1085 } else {
1086 /* Type-B command to load old P into buf0 */
1087 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1088 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1089 cmd = sba_cmd_enc(cmd, msg_len,
1090 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1091 c_mdata = sba_cmd_load_c_mdata(0);
1092 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1093 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1094 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1095 SBA_CMD_SHIFT, SBA_CMD_MASK);
1096 cmdsp->cmd = cmd;
1097 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1098 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1099 cmdsp->data = src + msg_offset;
1100 cmdsp->data_len = msg_len;
1101 cmdsp++;
1102 }
1103
1104 /* Type-A command to write buf0 */
1105 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1106 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1107 cmd = sba_cmd_enc(cmd, msg_len,
1108 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1109 cmd = sba_cmd_enc(cmd, 0x1,
1110 SBA_RESP_SHIFT, SBA_RESP_MASK);
1111 c_mdata = sba_cmd_write_c_mdata(0);
1112 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1113 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1114 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1115 SBA_CMD_SHIFT, SBA_CMD_MASK);
1116 cmdsp->cmd = cmd;
1117 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1118 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1119 if (req->sba->hw_resp_size) {
1120 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1121 cmdsp->resp = req->resp_dma;
1122 cmdsp->resp_len = req->sba->hw_resp_size;
1123 }
1124 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1125 cmdsp->data = *dst_p + msg_offset;
1126 cmdsp->data_len = msg_len;
1127 cmdsp++;
1128
1129skip_p:
1130 if (!dst_q)
1131 goto skip_q;
1132
1133 /* Type-A command to zero all buffers */
1134 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1135 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1136 cmd = sba_cmd_enc(cmd, msg_len,
1137 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1138 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
1139 SBA_CMD_SHIFT, SBA_CMD_MASK);
1140 cmdsp->cmd = cmd;
1141 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1142 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1143 cmdsp++;
1144
1145 if (dpos == 255)
1146 goto skip_q_computation;
1147 pos = (dpos < req->sba->max_pq_coefs) ?
1148 dpos : (req->sba->max_pq_coefs - 1);
1149
1150 /*
1151 * Type-B command to generate initial Q from data
1152 * and store output into buf0
1153 */
1154 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1155 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1156 cmd = sba_cmd_enc(cmd, msg_len,
1157 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1158 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
1159 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1160 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1161 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1162 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1163 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1164 SBA_CMD_SHIFT, SBA_CMD_MASK);
1165 cmdsp->cmd = cmd;
1166 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1167 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1168 cmdsp->data = src + msg_offset;
1169 cmdsp->data_len = msg_len;
1170 cmdsp++;
1171
1172 dpos -= pos;
1173
1174 /* Multiple Type-A command to generate final Q */
1175 while (dpos) {
1176 pos = (dpos < req->sba->max_pq_coefs) ?
1177 dpos : (req->sba->max_pq_coefs - 1);
1178
1179 /*
1180 * Type-A command to generate Q with buf0 and
1181 * buf1 store result in buf0
1182 */
1183 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1184 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1185 cmd = sba_cmd_enc(cmd, msg_len,
1186 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1187 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
1188 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1189 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1190 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1191 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1192 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1193 SBA_CMD_SHIFT, SBA_CMD_MASK);
1194 cmdsp->cmd = cmd;
1195 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1196 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1197 cmdsp++;
1198
1199 dpos -= pos;
1200 }
1201
1202skip_q_computation:
1203 if (pq_continue) {
1204 /*
1205 * Type-B command to XOR previous output with
1206 * buf0 and write it into buf0
1207 */
1208 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1209 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1210 cmd = sba_cmd_enc(cmd, msg_len,
1211 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1212 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1213 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1214 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1215 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1216 SBA_CMD_SHIFT, SBA_CMD_MASK);
1217 cmdsp->cmd = cmd;
1218 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1219 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1220 cmdsp->data = *dst_q + msg_offset;
1221 cmdsp->data_len = msg_len;
1222 cmdsp++;
1223 }
1224
1225 /* Type-A command to write buf0 */
1226 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1227 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1228 cmd = sba_cmd_enc(cmd, msg_len,
1229 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1230 cmd = sba_cmd_enc(cmd, 0x1,
1231 SBA_RESP_SHIFT, SBA_RESP_MASK);
1232 c_mdata = sba_cmd_write_c_mdata(0);
1233 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1234 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1235 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1236 SBA_CMD_SHIFT, SBA_CMD_MASK);
1237 cmdsp->cmd = cmd;
1238 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1239 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1240 if (req->sba->hw_resp_size) {
1241 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
1242 cmdsp->resp = req->resp_dma;
1243 cmdsp->resp_len = req->sba->hw_resp_size;
1244 }
1245 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1246 cmdsp->data = *dst_q + msg_offset;
1247 cmdsp->data_len = msg_len;
1248 cmdsp++;
1249
1250skip_q:
1251 /* Fillup brcm_message */
1252 msg->type = BRCM_MESSAGE_SBA;
1253 msg->sba.cmds = cmds;
1254 msg->sba.cmds_count = cmdsp - cmds;
1255 msg->ctx = req;
1256 msg->error = 0;
1257}
1258
Vinod Kouldd2bceb2017-07-19 10:03:24 +05301259static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +05301260sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1261 dma_addr_t *dst_p, dma_addr_t *dst_q,
1262 dma_addr_t src, u8 scf, size_t len,
1263 unsigned long flags)
1264{
1265 struct sba_request *req = NULL;
1266
1267 /* Alloc new request */
1268 req = sba_alloc_request(sba);
1269 if (!req)
1270 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +05301271 if (flags & DMA_PREP_FENCE)
1272 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +05301273
1274 /* Fillup request messages */
1275 sba_fillup_pq_single_msg(req, dmaf_continue(flags),
1276 req->cmds, &req->msg, off, len,
1277 dst_p, dst_q, src, scf);
1278
1279 /* Init async_tx descriptor */
1280 req->tx.flags = flags;
1281 req->tx.cookie = -EBUSY;
1282
1283 return req;
1284}
1285
1286static struct dma_async_tx_descriptor *
1287sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1288 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1289{
1290 u32 i, dst_q_index;
1291 size_t req_len;
1292 bool slow = false;
1293 dma_addr_t off = 0;
1294 dma_addr_t *dst_p = NULL, *dst_q = NULL;
1295 struct sba_device *sba = to_sba_device(dchan);
1296 struct sba_request *first = NULL, *req;
1297
1298 /* Sanity checks */
1299 if (unlikely(src_cnt > sba->max_pq_srcs))
1300 return NULL;
1301 for (i = 0; i < src_cnt; i++)
1302 if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1303 slow = true;
1304
1305 /* Figure-out P and Q destination addresses */
1306 if (!(flags & DMA_PREP_PQ_DISABLE_P))
1307 dst_p = &dst[0];
1308 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
1309 dst_q = &dst[1];
1310
1311 /* Create chained requests where each request is upto hw_buf_size */
1312 while (len) {
1313 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1314
1315 if (slow) {
1316 dst_q_index = src_cnt;
1317
1318 if (dst_q) {
1319 for (i = 0; i < src_cnt; i++) {
1320 if (*dst_q == src[i]) {
1321 dst_q_index = i;
1322 break;
1323 }
1324 }
1325 }
1326
1327 if (dst_q_index < src_cnt) {
1328 i = dst_q_index;
1329 req = sba_prep_dma_pq_single_req(sba,
1330 off, dst_p, dst_q, src[i], scf[i],
1331 req_len, flags | DMA_PREP_FENCE);
1332 if (!req)
1333 goto fail;
1334
1335 if (first)
1336 sba_chain_request(first, req);
1337 else
1338 first = req;
1339
1340 flags |= DMA_PREP_CONTINUE;
1341 }
1342
1343 for (i = 0; i < src_cnt; i++) {
1344 if (dst_q_index == i)
1345 continue;
1346
1347 req = sba_prep_dma_pq_single_req(sba,
1348 off, dst_p, dst_q, src[i], scf[i],
1349 req_len, flags | DMA_PREP_FENCE);
1350 if (!req)
1351 goto fail;
1352
1353 if (first)
1354 sba_chain_request(first, req);
1355 else
1356 first = req;
1357
1358 flags |= DMA_PREP_CONTINUE;
1359 }
1360 } else {
1361 req = sba_prep_dma_pq_req(sba, off,
1362 dst_p, dst_q, src, src_cnt,
1363 scf, req_len, flags);
1364 if (!req)
1365 goto fail;
1366
1367 if (first)
1368 sba_chain_request(first, req);
1369 else
1370 first = req;
1371 }
1372
1373 off += req_len;
1374 len -= req_len;
1375 }
1376
1377 return (first) ? &first->tx : NULL;
1378
1379fail:
1380 if (first)
1381 sba_free_chained_requests(first);
1382 return NULL;
1383}
1384
1385/* ====== Mailbox callbacks ===== */
1386
1387static void sba_dma_tx_actions(struct sba_request *req)
1388{
1389 struct dma_async_tx_descriptor *tx = &req->tx;
1390
1391 WARN_ON(tx->cookie < 0);
1392
1393 if (tx->cookie > 0) {
1394 dma_cookie_complete(tx);
1395
1396 /*
1397 * Call the callback (must not sleep or submit new
1398 * operations to this channel)
1399 */
1400 if (tx->callback)
1401 tx->callback(tx->callback_param);
1402
1403 dma_descriptor_unmap(tx);
1404 }
1405
1406 /* Run dependent operations */
1407 dma_run_dependencies(tx);
1408
1409 /* If waiting for 'ack' then move to completed list */
1410 if (!async_tx_test_ack(&req->tx))
1411 sba_complete_chained_requests(req);
1412 else
1413 sba_free_chained_requests(req);
1414}
1415
1416static void sba_receive_message(struct mbox_client *cl, void *msg)
1417{
1418 unsigned long flags;
1419 struct brcm_message *m = msg;
1420 struct sba_request *req = m->ctx, *req1;
1421 struct sba_device *sba = req->sba;
1422
1423 /* Error count if message has error */
1424 if (m->error < 0)
1425 dev_err(sba->dev, "%s got message with error %d",
1426 dma_chan_name(&sba->dma_chan), m->error);
1427
1428 /* Mark request as received */
1429 sba_received_request(req);
1430
1431 /* Wait for all chained requests to be completed */
1432 if (atomic_dec_return(&req->first->next_pending_count))
1433 goto done;
1434
1435 /* Point to first request */
1436 req = req->first;
1437
1438 /* Update request */
Anup Patel57a28502017-08-22 15:26:52 +05301439 if (req->flags & SBA_REQUEST_STATE_RECEIVED)
Anup Patel743e1c82017-05-15 10:34:54 +05301440 sba_dma_tx_actions(req);
1441 else
1442 sba_free_chained_requests(req);
1443
1444 spin_lock_irqsave(&sba->reqs_lock, flags);
1445
1446 /* Re-check all completed request waiting for 'ack' */
1447 list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) {
1448 spin_unlock_irqrestore(&sba->reqs_lock, flags);
1449 sba_dma_tx_actions(req);
1450 spin_lock_irqsave(&sba->reqs_lock, flags);
1451 }
1452
1453 spin_unlock_irqrestore(&sba->reqs_lock, flags);
1454
1455done:
1456 /* Try to submit pending request */
1457 sba_issue_pending(&sba->dma_chan);
1458}
1459
1460/* ====== Platform driver routines ===== */
1461
1462static int sba_prealloc_channel_resources(struct sba_device *sba)
1463{
1464 int i, j, p, ret = 0;
1465 struct sba_request *req = NULL;
1466
1467 sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev,
1468 sba->max_resp_pool_size,
1469 &sba->resp_dma_base, GFP_KERNEL);
1470 if (!sba->resp_base)
1471 return -ENOMEM;
1472
1473 sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev,
1474 sba->max_cmds_pool_size,
1475 &sba->cmds_dma_base, GFP_KERNEL);
1476 if (!sba->cmds_base) {
1477 ret = -ENOMEM;
1478 goto fail_free_resp_pool;
1479 }
1480
1481 spin_lock_init(&sba->reqs_lock);
1482 sba->reqs_fence = false;
1483 INIT_LIST_HEAD(&sba->reqs_alloc_list);
1484 INIT_LIST_HEAD(&sba->reqs_pending_list);
1485 INIT_LIST_HEAD(&sba->reqs_active_list);
1486 INIT_LIST_HEAD(&sba->reqs_received_list);
1487 INIT_LIST_HEAD(&sba->reqs_completed_list);
1488 INIT_LIST_HEAD(&sba->reqs_aborted_list);
1489 INIT_LIST_HEAD(&sba->reqs_free_list);
1490
1491 sba->reqs = devm_kcalloc(sba->dev, sba->max_req,
1492 sizeof(*req), GFP_KERNEL);
1493 if (!sba->reqs) {
1494 ret = -ENOMEM;
1495 goto fail_free_cmds_pool;
1496 }
1497
1498 for (i = 0, p = 0; i < sba->max_req; i++) {
1499 req = &sba->reqs[i];
1500 INIT_LIST_HEAD(&req->node);
1501 req->sba = sba;
Anup Patel57a28502017-08-22 15:26:52 +05301502 req->flags = SBA_REQUEST_STATE_FREE;
Anup Patel743e1c82017-05-15 10:34:54 +05301503 INIT_LIST_HEAD(&req->next);
1504 req->next_count = 1;
1505 atomic_set(&req->next_pending_count, 0);
Anup Patel743e1c82017-05-15 10:34:54 +05301506 req->resp = sba->resp_base + p;
1507 req->resp_dma = sba->resp_dma_base + p;
1508 p += sba->hw_resp_size;
1509 req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req,
1510 sizeof(*req->cmds), GFP_KERNEL);
1511 if (!req->cmds) {
1512 ret = -ENOMEM;
1513 goto fail_free_cmds_pool;
1514 }
1515 for (j = 0; j < sba->max_cmd_per_req; j++) {
1516 req->cmds[j].cmd = 0;
1517 req->cmds[j].cmd_dma = sba->cmds_base +
1518 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1519 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1520 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1521 req->cmds[j].flags = 0;
1522 }
1523 memset(&req->msg, 0, sizeof(req->msg));
1524 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1525 req->tx.tx_submit = sba_tx_submit;
1526 req->tx.phys = req->resp_dma;
1527 list_add_tail(&req->node, &sba->reqs_free_list);
1528 }
1529
1530 sba->reqs_free_count = sba->max_req;
1531
1532 return 0;
1533
1534fail_free_cmds_pool:
1535 dma_free_coherent(sba->dma_dev.dev,
1536 sba->max_cmds_pool_size,
1537 sba->cmds_base, sba->cmds_dma_base);
1538fail_free_resp_pool:
1539 dma_free_coherent(sba->dma_dev.dev,
1540 sba->max_resp_pool_size,
1541 sba->resp_base, sba->resp_dma_base);
1542 return ret;
1543}
1544
1545static void sba_freeup_channel_resources(struct sba_device *sba)
1546{
1547 dmaengine_terminate_all(&sba->dma_chan);
1548 dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size,
1549 sba->cmds_base, sba->cmds_dma_base);
1550 dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size,
1551 sba->resp_base, sba->resp_dma_base);
1552 sba->resp_base = NULL;
1553 sba->resp_dma_base = 0;
1554}
1555
1556static int sba_async_register(struct sba_device *sba)
1557{
1558 int ret;
1559 struct dma_device *dma_dev = &sba->dma_dev;
1560
1561 /* Initialize DMA channel cookie */
1562 sba->dma_chan.device = dma_dev;
1563 dma_cookie_init(&sba->dma_chan);
1564
1565 /* Initialize DMA device capability mask */
1566 dma_cap_zero(dma_dev->cap_mask);
1567 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
1568 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1569 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1570 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1571
1572 /*
1573 * Set mailbox channel device as the base device of
1574 * our dma_device because the actual memory accesses
1575 * will be done by mailbox controller
1576 */
1577 dma_dev->dev = sba->mbox_dev;
1578
1579 /* Set base prep routines */
1580 dma_dev->device_free_chan_resources = sba_free_chan_resources;
1581 dma_dev->device_terminate_all = sba_device_terminate_all;
1582 dma_dev->device_issue_pending = sba_issue_pending;
1583 dma_dev->device_tx_status = sba_tx_status;
1584
1585 /* Set interrupt routine */
1586 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1587 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
1588
1589 /* Set memcpy routine */
1590 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1591 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
1592
1593 /* Set xor routine and capability */
1594 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1595 dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
1596 dma_dev->max_xor = sba->max_xor_srcs;
1597 }
1598
1599 /* Set pq routine and capability */
1600 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1601 dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
1602 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1603 }
1604
1605 /* Initialize DMA device channel list */
1606 INIT_LIST_HEAD(&dma_dev->channels);
1607 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1608
1609 /* Register with Linux async DMA framework*/
1610 ret = dma_async_device_register(dma_dev);
1611 if (ret) {
1612 dev_err(sba->dev, "async device register error %d", ret);
1613 return ret;
1614 }
1615
1616 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1617 dma_chan_name(&sba->dma_chan),
1618 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
1619 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
1620 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1621 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
1622
1623 return 0;
1624}
1625
1626static int sba_probe(struct platform_device *pdev)
1627{
1628 int i, ret = 0, mchans_count;
1629 struct sba_device *sba;
1630 struct platform_device *mbox_pdev;
1631 struct of_phandle_args args;
1632
1633 /* Allocate main SBA struct */
1634 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1635 if (!sba)
1636 return -ENOMEM;
1637
1638 sba->dev = &pdev->dev;
1639 platform_set_drvdata(pdev, sba);
1640
1641 /* Determine SBA version from DT compatible string */
1642 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1643 sba->ver = SBA_VER_1;
1644 else if (of_device_is_compatible(sba->dev->of_node,
1645 "brcm,iproc-sba-v2"))
1646 sba->ver = SBA_VER_2;
1647 else
1648 return -ENODEV;
1649
1650 /* Derived Configuration parameters */
1651 switch (sba->ver) {
1652 case SBA_VER_1:
1653 sba->max_req = 1024;
1654 sba->hw_buf_size = 4096;
1655 sba->hw_resp_size = 8;
1656 sba->max_pq_coefs = 6;
1657 sba->max_pq_srcs = 6;
1658 break;
1659 case SBA_VER_2:
1660 sba->max_req = 1024;
1661 sba->hw_buf_size = 4096;
1662 sba->hw_resp_size = 8;
1663 sba->max_pq_coefs = 30;
1664 /*
1665 * We can support max_pq_srcs == max_pq_coefs because
1666 * we are limited by number of SBA commands that we can
1667 * fit in one message for underlying ring manager HW.
1668 */
1669 sba->max_pq_srcs = 12;
1670 break;
1671 default:
1672 return -EINVAL;
1673 }
1674 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1675 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1676 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1677 sba->max_cmds_pool_size = sba->max_req *
1678 sba->max_cmd_per_req * sizeof(u64);
1679
1680 /* Setup mailbox client */
1681 sba->client.dev = &pdev->dev;
1682 sba->client.rx_callback = sba_receive_message;
1683 sba->client.tx_block = false;
1684 sba->client.knows_txdone = false;
1685 sba->client.tx_tout = 0;
1686
1687 /* Number of channels equals number of mailbox channels */
1688 ret = of_count_phandle_with_args(pdev->dev.of_node,
1689 "mboxes", "#mbox-cells");
1690 if (ret <= 0)
1691 return -ENODEV;
1692 mchans_count = ret;
1693 sba->mchans_count = 0;
1694 atomic_set(&sba->mchans_current, 0);
1695
1696 /* Allocate mailbox channel array */
1697 sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count,
1698 sizeof(*sba->mchans), GFP_KERNEL);
1699 if (!sba->mchans)
1700 return -ENOMEM;
1701
1702 /* Request mailbox channels */
1703 for (i = 0; i < mchans_count; i++) {
1704 sba->mchans[i] = mbox_request_channel(&sba->client, i);
1705 if (IS_ERR(sba->mchans[i])) {
1706 ret = PTR_ERR(sba->mchans[i]);
1707 goto fail_free_mchans;
1708 }
1709 sba->mchans_count++;
1710 }
1711
1712 /* Find-out underlying mailbox device */
1713 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1714 "mboxes", "#mbox-cells", 0, &args);
1715 if (ret)
1716 goto fail_free_mchans;
1717 mbox_pdev = of_find_device_by_node(args.np);
1718 of_node_put(args.np);
1719 if (!mbox_pdev) {
1720 ret = -ENODEV;
1721 goto fail_free_mchans;
1722 }
1723 sba->mbox_dev = &mbox_pdev->dev;
1724
1725 /* All mailbox channels should be of same ring manager device */
1726 for (i = 1; i < mchans_count; i++) {
1727 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1728 "mboxes", "#mbox-cells", i, &args);
1729 if (ret)
1730 goto fail_free_mchans;
1731 mbox_pdev = of_find_device_by_node(args.np);
1732 of_node_put(args.np);
1733 if (sba->mbox_dev != &mbox_pdev->dev) {
1734 ret = -EINVAL;
1735 goto fail_free_mchans;
1736 }
1737 }
1738
1739 /* Register DMA device with linux async framework */
1740 ret = sba_async_register(sba);
1741 if (ret)
1742 goto fail_free_mchans;
1743
1744 /* Prealloc channel resource */
1745 ret = sba_prealloc_channel_resources(sba);
1746 if (ret)
1747 goto fail_async_dev_unreg;
1748
1749 /* Print device info */
1750 dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
1751 dma_chan_name(&sba->dma_chan), sba->ver+1,
1752 sba->mchans_count);
1753
1754 return 0;
1755
1756fail_async_dev_unreg:
1757 dma_async_device_unregister(&sba->dma_dev);
1758fail_free_mchans:
1759 for (i = 0; i < sba->mchans_count; i++)
1760 mbox_free_channel(sba->mchans[i]);
1761 return ret;
1762}
1763
1764static int sba_remove(struct platform_device *pdev)
1765{
1766 int i;
1767 struct sba_device *sba = platform_get_drvdata(pdev);
1768
1769 sba_freeup_channel_resources(sba);
1770
1771 dma_async_device_unregister(&sba->dma_dev);
1772
1773 for (i = 0; i < sba->mchans_count; i++)
1774 mbox_free_channel(sba->mchans[i]);
1775
1776 return 0;
1777}
1778
1779static const struct of_device_id sba_of_match[] = {
1780 { .compatible = "brcm,iproc-sba", },
1781 { .compatible = "brcm,iproc-sba-v2", },
1782 {},
1783};
1784MODULE_DEVICE_TABLE(of, sba_of_match);
1785
1786static struct platform_driver sba_driver = {
1787 .probe = sba_probe,
1788 .remove = sba_remove,
1789 .driver = {
1790 .name = "bcm-sba-raid",
1791 .of_match_table = sba_of_match,
1792 },
1793};
1794module_platform_driver(sba_driver);
1795
1796MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1797MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1798MODULE_LICENSE("GPL v2");