blob: 4ace9bcd603a45ee165a90af729052fdc0d8e8c1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Srikanth Jampala14fa93c2017-05-30 17:28:01 +05302#include <linux/cpumask.h>
3#include <linux/dma-mapping.h>
4#include <linux/dmapool.h>
5#include <linux/delay.h>
6#include <linux/gfp.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/pci_regs.h>
10#include <linux/vmalloc.h>
11#include <linux/pci.h>
12
13#include "nitrox_dev.h"
14#include "nitrox_common.h"
15#include "nitrox_req.h"
16#include "nitrox_csr.h"
17
18#define CRYPTO_CTX_SIZE 256
19
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053020/* packet inuput ring alignments */
21#define PKTIN_Q_ALIGN_BYTES 16
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053022
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053023static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053024{
25 struct nitrox_device *ndev = cmdq->ndev;
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053026
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053027 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
Luis Chamberlain750afb02019-01-04 09:23:09 +010028 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
29 &cmdq->unalign_dma,
30 GFP_KERNEL);
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053031 if (!cmdq->unalign_base)
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053032 return -ENOMEM;
33
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053034 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
35 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
Srikanth Jampala3d7c8202018-08-22 12:40:52 +053036 cmdq->write_idx = 0;
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053037
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053038 spin_lock_init(&cmdq->cmd_qlock);
39 spin_lock_init(&cmdq->resp_qlock);
40 spin_lock_init(&cmdq->backlog_qlock);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053041
42 INIT_LIST_HEAD(&cmdq->response_head);
43 INIT_LIST_HEAD(&cmdq->backlog_head);
44 INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
45
46 atomic_set(&cmdq->pending_count, 0);
47 atomic_set(&cmdq->backlog_count, 0);
48 return 0;
49}
50
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053051static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
52{
53 cmdq->write_idx = 0;
54 atomic_set(&cmdq->pending_count, 0);
55 atomic_set(&cmdq->backlog_count, 0);
56}
57
58static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053059{
60 struct nitrox_device *ndev = cmdq->ndev;
61
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053062 if (!cmdq->unalign_base)
63 return;
64
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053065 cancel_work_sync(&cmdq->backlog_qflush);
66
67 dma_free_coherent(DEV(ndev), cmdq->qsize,
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053068 cmdq->unalign_base, cmdq->unalign_dma);
69 nitrox_cmdq_reset(cmdq);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053070
71 cmdq->dbell_csr_addr = NULL;
Srikanth Jampala5155e112018-09-29 13:49:10 +053072 cmdq->compl_cnt_csr_addr = NULL;
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053073 cmdq->unalign_base = NULL;
74 cmdq->base = NULL;
75 cmdq->unalign_dma = 0;
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053076 cmdq->dma = 0;
77 cmdq->qsize = 0;
78 cmdq->instr_size = 0;
79}
80
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053081static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053082{
83 int i;
84
85 for (i = 0; i < ndev->nr_queues; i++) {
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053086 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053087
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053088 nitrox_cmdq_cleanup(cmdq);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053089 }
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053090 kfree(ndev->pkt_inq);
91 ndev->pkt_inq = NULL;
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053092}
93
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053094static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053095{
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053096 int i, err;
Srikanth Jampala14fa93c2017-05-30 17:28:01 +053097
Srikanth Jampalae7892dd2018-09-29 13:49:09 +053098 ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
99 sizeof(struct nitrox_cmdq),
100 GFP_KERNEL, ndev->node);
101 if (!ndev->pkt_inq)
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530102 return -ENOMEM;
103
104 for (i = 0; i < ndev->nr_queues; i++) {
105 struct nitrox_cmdq *cmdq;
106 u64 offset;
107
Srikanth Jampalae7892dd2018-09-29 13:49:09 +0530108 cmdq = &ndev->pkt_inq[i];
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530109 cmdq->ndev = ndev;
110 cmdq->qno = i;
111 cmdq->instr_size = sizeof(struct nps_pkt_instr);
112
Srikanth Jampalae7892dd2018-09-29 13:49:09 +0530113 /* packet input ring doorbell address */
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530114 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530115 cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
Srikanth Jampala5155e112018-09-29 13:49:10 +0530116 /* packet solicit port completion count address */
117 offset = NPS_PKT_SLC_CNTSX(i);
118 cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530119
Srikanth Jampalae7892dd2018-09-29 13:49:09 +0530120 err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530121 if (err)
Srikanth Jampalae7892dd2018-09-29 13:49:09 +0530122 goto pktq_fail;
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530123 }
124 return 0;
125
Srikanth Jampalae7892dd2018-09-29 13:49:09 +0530126pktq_fail:
127 nitrox_free_pktin_queues(ndev);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530128 return err;
129}
130
131static int create_crypto_dma_pool(struct nitrox_device *ndev)
132{
133 size_t size;
134
135 /* Crypto context pool, 16 byte aligned */
136 size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
Srikanth Jampala718f6082018-09-10 13:54:52 +0530137 ndev->ctx_pool = dma_pool_create("nitrox-context",
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530138 DEV(ndev), size, 16, 0);
139 if (!ndev->ctx_pool)
140 return -ENOMEM;
141
142 return 0;
143}
144
145static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
146{
147 if (!ndev->ctx_pool)
148 return;
149
150 dma_pool_destroy(ndev->ctx_pool);
151 ndev->ctx_pool = NULL;
152}
153
Srikanth Jampalaf2663872017-05-30 17:28:03 +0530154/*
155 * crypto_alloc_context - Allocate crypto context from pool
156 * @ndev: NITROX Device
157 */
158void *crypto_alloc_context(struct nitrox_device *ndev)
159{
160 struct ctx_hdr *ctx;
Wenwen Wang71721222018-10-18 19:50:43 -0500161 struct crypto_ctx_hdr *chdr;
Srikanth Jampalaf2663872017-05-30 17:28:03 +0530162 void *vaddr;
163 dma_addr_t dma;
164
Wenwen Wang71721222018-10-18 19:50:43 -0500165 chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
166 if (!chdr)
Srikanth Jampalaf2663872017-05-30 17:28:03 +0530167 return NULL;
168
Wenwen Wang71721222018-10-18 19:50:43 -0500169 vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
170 if (!vaddr) {
171 kfree(chdr);
172 return NULL;
173 }
174
Srikanth Jampalaf2663872017-05-30 17:28:03 +0530175 /* fill meta data */
176 ctx = vaddr;
177 ctx->pool = ndev->ctx_pool;
178 ctx->dma = dma;
179 ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
180
Wenwen Wang71721222018-10-18 19:50:43 -0500181 chdr->pool = ndev->ctx_pool;
182 chdr->dma = dma;
183 chdr->vaddr = vaddr;
184
185 return chdr;
Srikanth Jampalaf2663872017-05-30 17:28:03 +0530186}
187
188/**
189 * crypto_free_context - Free crypto context to pool
190 * @ctx: context to free
191 */
192void crypto_free_context(void *ctx)
193{
Wenwen Wang71721222018-10-18 19:50:43 -0500194 struct crypto_ctx_hdr *ctxp;
Srikanth Jampalaf2663872017-05-30 17:28:03 +0530195
196 if (!ctx)
197 return;
198
Wenwen Wang71721222018-10-18 19:50:43 -0500199 ctxp = ctx;
200 dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
201 kfree(ctxp);
Srikanth Jampalaf2663872017-05-30 17:28:03 +0530202}
203
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530204/**
205 * nitrox_common_sw_init - allocate software resources.
206 * @ndev: NITROX device
207 *
208 * Allocates crypto context pools and command queues etc.
209 *
210 * Return: 0 on success, or a negative error code on error.
211 */
212int nitrox_common_sw_init(struct nitrox_device *ndev)
213{
214 int err = 0;
215
216 /* per device crypto context pool */
217 err = create_crypto_dma_pool(ndev);
218 if (err)
219 return err;
220
Srikanth Jampalae7892dd2018-09-29 13:49:09 +0530221 err = nitrox_alloc_pktin_queues(ndev);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530222 if (err)
223 destroy_crypto_dma_pool(ndev);
224
225 return err;
226}
227
228/**
229 * nitrox_common_sw_cleanup - free software resources.
230 * @ndev: NITROX device
231 */
232void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
233{
Srikanth Jampalae7892dd2018-09-29 13:49:09 +0530234 nitrox_free_pktin_queues(ndev);
Srikanth Jampala14fa93c2017-05-30 17:28:01 +0530235 destroy_crypto_dma_pool(ndev);
236}