blob: 5c95bf97c13291c8f33c1dc21d79aaf0726fe350 [file] [log] [blame]
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001/*
2 * Cryptographic API.
3 *
4 * Support for OMAP SHA1/MD5 HW acceleration.
5 *
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
Mark A. Greer0d373d62012-12-21 10:04:08 -07008 * Copyright (c) 2011 Texas Instruments Incorporated
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Some ideas are from old omap-sha1-md5.c driver.
15 */
16
17#define pr_fmt(fmt) "%s: " fmt, __func__
18
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080019#include <linux/err.h>
20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/errno.h>
24#include <linux/interrupt.h>
25#include <linux/kernel.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080026#include <linux/irq.h>
27#include <linux/io.h>
28#include <linux/platform_device.h>
29#include <linux/scatterlist.h>
30#include <linux/dma-mapping.h>
Mark A. Greerdfd061d2012-12-21 10:04:04 -070031#include <linux/dmaengine.h>
Mark A. Greerb359f032012-12-21 10:04:02 -070032#include <linux/pm_runtime.h>
Mark A. Greer03feec92012-12-21 10:04:06 -070033#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080037#include <linux/delay.h>
38#include <linux/crypto.h>
39#include <linux/cryptohash.h>
40#include <crypto/scatterwalk.h>
41#include <crypto/algapi.h>
42#include <crypto/sha.h>
43#include <crypto/hash.h>
44#include <crypto/internal/hash.h>
45
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080046#define MD5_DIGEST_SIZE 16
47
Mark A. Greer0d373d62012-12-21 10:04:08 -070048#define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
49#define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
50#define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
51
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +053052#define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080053
54#define SHA_REG_CTRL 0x18
55#define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
56#define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
57#define SHA_REG_CTRL_ALGO_CONST (1 << 3)
58#define SHA_REG_CTRL_ALGO (1 << 2)
59#define SHA_REG_CTRL_INPUT_READY (1 << 1)
60#define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
61
Mark A. Greer0d373d62012-12-21 10:04:08 -070062#define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080063
Mark A. Greer0d373d62012-12-21 10:04:08 -070064#define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080065#define SHA_REG_MASK_DMA_EN (1 << 3)
66#define SHA_REG_MASK_IT_EN (1 << 2)
67#define SHA_REG_MASK_SOFTRESET (1 << 1)
68#define SHA_REG_AUTOIDLE (1 << 0)
69
Mark A. Greer0d373d62012-12-21 10:04:08 -070070#define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +080071#define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
72
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +053073#define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
Mark A. Greer0d373d62012-12-21 10:04:08 -070074#define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
75#define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
76#define SHA_REG_MODE_CLOSE_HASH (1 << 4)
77#define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
Mark A. Greer0d373d62012-12-21 10:04:08 -070078
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +053079#define SHA_REG_MODE_ALGO_MASK (7 << 0)
80#define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
81#define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
82#define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
83#define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
84#define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
85#define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
86
87#define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
Mark A. Greer0d373d62012-12-21 10:04:08 -070088
89#define SHA_REG_IRQSTATUS 0x118
90#define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
91#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
92#define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
93#define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
94
95#define SHA_REG_IRQENA 0x11C
96#define SHA_REG_IRQENA_CTX_RDY (1 << 3)
97#define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
98#define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
99#define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
100
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800101#define DEFAULT_TIMEOUT_INTERVAL HZ
102
Tero Kristoe93f7672016-06-22 16:23:34 +0300103#define DEFAULT_AUTOSUSPEND_DELAY 1000
104
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300105/* mostly device flags */
106#define FLAGS_BUSY 0
107#define FLAGS_FINAL 1
108#define FLAGS_DMA_ACTIVE 2
109#define FLAGS_OUTPUT_READY 3
110#define FLAGS_INIT 4
111#define FLAGS_CPU 5
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300112#define FLAGS_DMA_READY 6
Mark A. Greer0d373d62012-12-21 10:04:08 -0700113#define FLAGS_AUTO_XOR 7
114#define FLAGS_BE32_SHA1 8
Tero Kristof19de1b2016-09-19 18:22:15 +0300115#define FLAGS_SGS_COPIED 9
116#define FLAGS_SGS_ALLOCED 10
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300117/* context flags */
118#define FLAGS_FINUP 16
119#define FLAGS_SG 17
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800120
Mark A. Greer0d373d62012-12-21 10:04:08 -0700121#define FLAGS_MODE_SHIFT 18
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530122#define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
123#define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
124#define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
125#define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
126#define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
127#define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
128#define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
129
130#define FLAGS_HMAC 21
131#define FLAGS_ERROR 22
Mark A. Greer0d373d62012-12-21 10:04:08 -0700132
133#define OP_UPDATE 1
134#define OP_FINAL 2
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800135
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200136#define OMAP_ALIGN_MASK (sizeof(u32)-1)
137#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
138
Mark A. Greer0d373d62012-12-21 10:04:08 -0700139#define BUFLEN PAGE_SIZE
Tero Kristo2c5bd1e2016-09-19 18:22:16 +0300140#define OMAP_SHA_DMA_THRESHOLD 256
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200141
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800142struct omap_sham_dev;
143
144struct omap_sham_reqctx {
145 struct omap_sham_dev *dd;
146 unsigned long flags;
147 unsigned long op;
148
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530149 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800150 size_t digcnt;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800151 size_t bufcnt;
152 size_t buflen;
153 dma_addr_t dma_addr;
154
155 /* walk state */
156 struct scatterlist *sg;
Tero Kristof19de1b2016-09-19 18:22:15 +0300157 struct scatterlist sgl[2];
Tero Kristo8addf572016-09-19 18:22:14 +0300158 struct scatterlist sgl_tmp;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800159 unsigned int offset; /* offset in current sg */
Tero Kristof19de1b2016-09-19 18:22:15 +0300160 int sg_len;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800161 unsigned int total; /* total request */
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200162
163 u8 buffer[0] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800164};
165
166struct omap_sham_hmac_ctx {
167 struct crypto_shash *shash;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530168 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
169 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800170};
171
172struct omap_sham_ctx {
173 struct omap_sham_dev *dd;
174
175 unsigned long flags;
176
177 /* fallback stuff */
178 struct crypto_shash *fallback;
179
180 struct omap_sham_hmac_ctx base[0];
181};
182
Tero Kristo65e7a542016-06-22 16:23:35 +0300183#define OMAP_SHAM_QUEUE_LENGTH 10
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800184
Mark A. Greerd20fb182012-12-21 10:04:09 -0700185struct omap_sham_algs_info {
186 struct ahash_alg *algs_list;
187 unsigned int size;
188 unsigned int registered;
189};
190
Mark A. Greer0d373d62012-12-21 10:04:08 -0700191struct omap_sham_pdata {
Mark A. Greerd20fb182012-12-21 10:04:09 -0700192 struct omap_sham_algs_info *algs_info;
193 unsigned int algs_info_size;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700194 unsigned long flags;
195 int digest_size;
196
197 void (*copy_hash)(struct ahash_request *req, int out);
198 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
199 int final, int dma);
200 void (*trigger)(struct omap_sham_dev *dd, size_t length);
201 int (*poll_irq)(struct omap_sham_dev *dd);
202 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
203
204 u32 odigest_ofs;
205 u32 idigest_ofs;
206 u32 din_ofs;
207 u32 digcnt_ofs;
208 u32 rev_ofs;
209 u32 mask_ofs;
210 u32 sysstatus_ofs;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530211 u32 mode_ofs;
212 u32 length_ofs;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700213
214 u32 major_mask;
215 u32 major_shift;
216 u32 minor_mask;
217 u32 minor_shift;
218};
219
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800220struct omap_sham_dev {
221 struct list_head list;
222 unsigned long phys_base;
223 struct device *dev;
224 void __iomem *io_base;
225 int irq;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800226 spinlock_t lock;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200227 int err;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700228 struct dma_chan *dma_lch;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800229 struct tasklet_struct done_task;
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530230 u8 polling_mode;
Tero Kristof19de1b2016-09-19 18:22:15 +0300231 u8 xmit_buf[BUFLEN];
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800232
233 unsigned long flags;
234 struct crypto_queue queue;
235 struct ahash_request *req;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700236
237 const struct omap_sham_pdata *pdata;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800238};
239
240struct omap_sham_drv {
241 struct list_head dev_list;
242 spinlock_t lock;
243 unsigned long flags;
244};
245
246static struct omap_sham_drv sham = {
247 .dev_list = LIST_HEAD_INIT(sham.dev_list),
248 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
249};
250
251static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
252{
253 return __raw_readl(dd->io_base + offset);
254}
255
256static inline void omap_sham_write(struct omap_sham_dev *dd,
257 u32 offset, u32 value)
258{
259 __raw_writel(value, dd->io_base + offset);
260}
261
262static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
263 u32 value, u32 mask)
264{
265 u32 val;
266
267 val = omap_sham_read(dd, address);
268 val &= ~mask;
269 val |= value;
270 omap_sham_write(dd, address, val);
271}
272
273static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
274{
275 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
276
277 while (!(omap_sham_read(dd, offset) & bit)) {
278 if (time_is_before_jiffies(timeout))
279 return -ETIMEDOUT;
280 }
281
282 return 0;
283}
284
Mark A. Greer0d373d62012-12-21 10:04:08 -0700285static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800286{
287 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700288 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin0c3cf4c2010-11-19 16:04:22 +0200289 u32 *hash = (u32 *)ctx->digest;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800290 int i;
291
Mark A. Greer0d373d62012-12-21 10:04:08 -0700292 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200293 if (out)
Mark A. Greer0d373d62012-12-21 10:04:08 -0700294 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200295 else
Mark A. Greer0d373d62012-12-21 10:04:08 -0700296 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200297 }
298}
299
Mark A. Greer0d373d62012-12-21 10:04:08 -0700300static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
301{
302 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
303 struct omap_sham_dev *dd = ctx->dd;
304 int i;
305
306 if (ctx->flags & BIT(FLAGS_HMAC)) {
307 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
308 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
309 struct omap_sham_hmac_ctx *bctx = tctx->base;
310 u32 *opad = (u32 *)bctx->opad;
311
312 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
313 if (out)
314 opad[i] = omap_sham_read(dd,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530315 SHA_REG_ODIGEST(dd, i));
Mark A. Greer0d373d62012-12-21 10:04:08 -0700316 else
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530317 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
Mark A. Greer0d373d62012-12-21 10:04:08 -0700318 opad[i]);
319 }
320 }
321
322 omap_sham_copy_hash_omap2(req, out);
323}
324
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200325static void omap_sham_copy_ready_hash(struct ahash_request *req)
326{
327 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
328 u32 *in = (u32 *)ctx->digest;
329 u32 *hash = (u32 *)req->result;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700330 int i, d, big_endian = 0;
Dmitry Kasatkin3c8d7582010-11-19 16:04:27 +0200331
332 if (!hash)
333 return;
334
Mark A. Greer0d373d62012-12-21 10:04:08 -0700335 switch (ctx->flags & FLAGS_MODE_MASK) {
336 case FLAGS_MODE_MD5:
337 d = MD5_DIGEST_SIZE / sizeof(u32);
338 break;
339 case FLAGS_MODE_SHA1:
340 /* OMAP2 SHA1 is big endian */
341 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
342 big_endian = 1;
343 d = SHA1_DIGEST_SIZE / sizeof(u32);
344 break;
Mark A. Greerd20fb182012-12-21 10:04:09 -0700345 case FLAGS_MODE_SHA224:
346 d = SHA224_DIGEST_SIZE / sizeof(u32);
347 break;
348 case FLAGS_MODE_SHA256:
349 d = SHA256_DIGEST_SIZE / sizeof(u32);
350 break;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530351 case FLAGS_MODE_SHA384:
352 d = SHA384_DIGEST_SIZE / sizeof(u32);
353 break;
354 case FLAGS_MODE_SHA512:
355 d = SHA512_DIGEST_SIZE / sizeof(u32);
356 break;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700357 default:
358 d = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800359 }
Mark A. Greer0d373d62012-12-21 10:04:08 -0700360
361 if (big_endian)
362 for (i = 0; i < d; i++)
363 hash[i] = be32_to_cpu(in[i]);
364 else
365 for (i = 0; i < d; i++)
366 hash[i] = le32_to_cpu(in[i]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800367}
368
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200369static int omap_sham_hw_init(struct omap_sham_dev *dd)
370{
Pali Rohár604c3102015-03-08 11:01:01 +0100371 int err;
372
373 err = pm_runtime_get_sync(dd->dev);
374 if (err < 0) {
375 dev_err(dd->dev, "failed to get sync: %d\n", err);
376 return err;
377 }
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200378
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300379 if (!test_bit(FLAGS_INIT, &dd->flags)) {
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300380 set_bit(FLAGS_INIT, &dd->flags);
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200381 dd->err = 0;
382 }
383
384 return 0;
385}
386
Mark A. Greer0d373d62012-12-21 10:04:08 -0700387static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800388 int final, int dma)
389{
390 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
391 u32 val = length << 5, mask;
392
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200393 if (likely(ctx->digcnt))
Mark A. Greer0d373d62012-12-21 10:04:08 -0700394 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800395
Mark A. Greer0d373d62012-12-21 10:04:08 -0700396 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800397 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
398 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
399 /*
400 * Setting ALGO_CONST only for the first iteration
401 * and CLOSE_HASH only for the last one.
402 */
Mark A. Greer0d373d62012-12-21 10:04:08 -0700403 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800404 val |= SHA_REG_CTRL_ALGO;
405 if (!ctx->digcnt)
406 val |= SHA_REG_CTRL_ALGO_CONST;
407 if (final)
408 val |= SHA_REG_CTRL_CLOSE_HASH;
409
410 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
411 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
412
413 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800414}
415
Mark A. Greer0d373d62012-12-21 10:04:08 -0700416static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
417{
418}
419
420static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
421{
422 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
423}
424
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530425static int get_block_size(struct omap_sham_reqctx *ctx)
426{
427 int d;
428
429 switch (ctx->flags & FLAGS_MODE_MASK) {
430 case FLAGS_MODE_MD5:
431 case FLAGS_MODE_SHA1:
432 d = SHA1_BLOCK_SIZE;
433 break;
434 case FLAGS_MODE_SHA224:
435 case FLAGS_MODE_SHA256:
436 d = SHA256_BLOCK_SIZE;
437 break;
438 case FLAGS_MODE_SHA384:
439 case FLAGS_MODE_SHA512:
440 d = SHA512_BLOCK_SIZE;
441 break;
442 default:
443 d = 0;
444 }
445
446 return d;
447}
448
Mark A. Greer0d373d62012-12-21 10:04:08 -0700449static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
450 u32 *value, int count)
451{
452 for (; count--; value++, offset += 4)
453 omap_sham_write(dd, offset, *value);
454}
455
456static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
457 int final, int dma)
458{
459 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
460 u32 val, mask;
461
462 /*
463 * Setting ALGO_CONST only for the first iteration and
464 * CLOSE_HASH only for the last one. Note that flags mode bits
465 * correspond to algorithm encoding in mode register.
466 */
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530467 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700468 if (!ctx->digcnt) {
469 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
470 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
471 struct omap_sham_hmac_ctx *bctx = tctx->base;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530472 int bs, nr_dr;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700473
474 val |= SHA_REG_MODE_ALGO_CONSTANT;
475
476 if (ctx->flags & BIT(FLAGS_HMAC)) {
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530477 bs = get_block_size(ctx);
478 nr_dr = bs / (2 * sizeof(u32));
Mark A. Greer0d373d62012-12-21 10:04:08 -0700479 val |= SHA_REG_MODE_HMAC_KEY_PROC;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530480 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
481 (u32 *)bctx->ipad, nr_dr);
482 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
483 (u32 *)bctx->ipad + nr_dr, nr_dr);
484 ctx->digcnt += bs;
Mark A. Greer0d373d62012-12-21 10:04:08 -0700485 }
486 }
487
488 if (final) {
489 val |= SHA_REG_MODE_CLOSE_HASH;
490
491 if (ctx->flags & BIT(FLAGS_HMAC))
492 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
493 }
494
495 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
496 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
497 SHA_REG_MODE_HMAC_KEY_PROC;
498
499 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530500 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700501 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
502 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
503 SHA_REG_MASK_IT_EN |
504 (dma ? SHA_REG_MASK_DMA_EN : 0),
505 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
506}
507
508static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
509{
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530510 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700511}
512
513static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
514{
515 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
516 SHA_REG_IRQSTATUS_INPUT_RDY);
517}
518
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800519static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
520 size_t length, int final)
521{
522 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530523 int count, len32, bs32, offset = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800524 const u32 *buffer = (const u32 *)buf;
525
526 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
527 ctx->digcnt, length, final);
528
Mark A. Greer0d373d62012-12-21 10:04:08 -0700529 dd->pdata->write_ctrl(dd, length, final, 0);
530 dd->pdata->trigger(dd, length);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800531
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +0200532 /* should be non-zero before next lines to disable clocks later */
533 ctx->digcnt += length;
534
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800535 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300536 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800537
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +0300538 set_bit(FLAGS_CPU, &dd->flags);
539
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800540 len32 = DIV_ROUND_UP(length, sizeof(u32));
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530541 bs32 = get_block_size(ctx) / sizeof(u32);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800542
Lokesh Vutlab8411cc2013-08-20 20:32:34 +0530543 while (len32) {
544 if (dd->pdata->poll_irq(dd))
545 return -ETIMEDOUT;
546
547 for (count = 0; count < min(len32, bs32); count++, offset++)
548 omap_sham_write(dd, SHA_REG_DIN(dd, count),
549 buffer[offset]);
550 len32 -= min(len32, bs32);
551 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800552
553 return -EINPROGRESS;
554}
555
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700556static void omap_sham_dma_callback(void *param)
557{
558 struct omap_sham_dev *dd = param;
559
560 set_bit(FLAGS_DMA_READY, &dd->flags);
561 tasklet_schedule(&dd->done_task);
562}
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700563
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800564static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700565 size_t length, int final, int is_sg)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800566{
567 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700568 struct dma_async_tx_descriptor *tx;
569 struct dma_slave_config cfg;
Lokesh Vutlaf5e46262013-08-20 20:32:35 +0530570 int len32, ret, dma_min = get_block_size(ctx);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800571
572 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
573 ctx->digcnt, length, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800574
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700575 memset(&cfg, 0, sizeof(cfg));
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800576
Mark A. Greer0d373d62012-12-21 10:04:08 -0700577 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700578 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
Lokesh Vutlaf5e46262013-08-20 20:32:35 +0530579 cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800580
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700581 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
582 if (ret) {
583 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
584 return ret;
585 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800586
Lokesh Vutlaf5e46262013-08-20 20:32:35 +0530587 len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700588
589 if (is_sg) {
590 /*
591 * The SG entry passed in may not have the 'length' member
Tero Kristo8addf572016-09-19 18:22:14 +0300592 * set correctly so use a local SG entry (sgl_tmp) with the
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700593 * proper value for 'length' instead. If this is not done,
594 * the dmaengine may try to DMA the incorrect amount of data.
595 */
Tero Kristo8addf572016-09-19 18:22:14 +0300596 sg_init_table(&ctx->sgl_tmp, 1);
597 sg_assign_page(&ctx->sgl_tmp, sg_page(ctx->sg));
598 ctx->sgl_tmp.offset = ctx->sg->offset;
599 sg_dma_len(&ctx->sgl_tmp) = len32;
600 sg_dma_address(&ctx->sgl_tmp) = sg_dma_address(ctx->sg);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700601
Tero Kristo8addf572016-09-19 18:22:14 +0300602 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl_tmp, 1,
603 DMA_MEM_TO_DEV,
604 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700605 } else {
606 tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
607 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
608 }
609
610 if (!tx) {
611 dev_err(dd->dev, "prep_slave_sg/single() failed\n");
612 return -EINVAL;
613 }
614
615 tx->callback = omap_sham_dma_callback;
616 tx->callback_param = dd;
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700617
Mark A. Greer0d373d62012-12-21 10:04:08 -0700618 dd->pdata->write_ctrl(dd, length, final, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800619
620 ctx->digcnt += length;
621
622 if (final)
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +0300623 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800624
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +0300625 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800626
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700627 dmaengine_submit(tx);
628 dma_async_issue_pending(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800629
Mark A. Greer0d373d62012-12-21 10:04:08 -0700630 dd->pdata->trigger(dd, length);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800631
632 return -EINPROGRESS;
633}
634
Tero Kristof19de1b2016-09-19 18:22:15 +0300635static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
636 struct scatterlist *sg, int bs, int new_len)
637{
638 int n = sg_nents(sg);
639 struct scatterlist *tmp;
640 int offset = ctx->offset;
641
642 if (ctx->bufcnt)
643 n++;
644
645 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
646 if (!ctx->sg)
647 return -ENOMEM;
648
649 sg_init_table(ctx->sg, n);
650
651 tmp = ctx->sg;
652
653 ctx->sg_len = 0;
654
655 if (ctx->bufcnt) {
656 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
657 tmp = sg_next(tmp);
658 ctx->sg_len++;
659 }
660
661 while (sg && new_len) {
662 int len = sg->length - offset;
663
664 if (offset) {
665 offset -= sg->length;
666 if (offset < 0)
667 offset = 0;
668 }
669
670 if (new_len < len)
671 len = new_len;
672
673 if (len > 0) {
674 new_len -= len;
675 sg_set_page(tmp, sg_page(sg), len, sg->offset);
676 if (new_len <= 0)
677 sg_mark_end(tmp);
678 tmp = sg_next(tmp);
679 ctx->sg_len++;
680 }
681
682 sg = sg_next(sg);
683 }
684
685 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
686
687 ctx->bufcnt = 0;
688
689 return 0;
690}
691
692static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
693 struct scatterlist *sg, int bs, int new_len)
694{
695 int pages;
696 void *buf;
697 int len;
698
699 len = new_len + ctx->bufcnt;
700
701 pages = get_order(ctx->total);
702
703 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
704 if (!buf) {
705 pr_err("Couldn't allocate pages for unaligned cases.\n");
706 return -ENOMEM;
707 }
708
709 if (ctx->bufcnt)
710 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
711
712 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
713 ctx->total - ctx->bufcnt, 0);
714 sg_init_table(ctx->sgl, 1);
715 sg_set_buf(ctx->sgl, buf, len);
716 ctx->sg = ctx->sgl;
717 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
718 ctx->sg_len = 1;
719 ctx->bufcnt = 0;
720 ctx->offset = 0;
721
722 return 0;
723}
724
725static int omap_sham_align_sgs(struct scatterlist *sg,
726 int nbytes, int bs, bool final,
727 struct omap_sham_reqctx *rctx)
728{
729 int n = 0;
730 bool aligned = true;
731 bool list_ok = true;
732 struct scatterlist *sg_tmp = sg;
733 int new_len;
734 int offset = rctx->offset;
735
736 if (!sg || !sg->length || !nbytes)
737 return 0;
738
739 new_len = nbytes;
740
741 if (offset)
742 list_ok = false;
743
744 if (final)
745 new_len = DIV_ROUND_UP(new_len, bs) * bs;
746 else
747 new_len = new_len / bs * bs;
748
749 while (nbytes > 0 && sg_tmp) {
750 n++;
751
752 if (offset < sg_tmp->length) {
753 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
754 aligned = false;
755 break;
756 }
757
758 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
759 aligned = false;
760 break;
761 }
762 }
763
764 if (offset) {
765 offset -= sg_tmp->length;
766 if (offset < 0) {
767 nbytes += offset;
768 offset = 0;
769 }
770 } else {
771 nbytes -= sg_tmp->length;
772 }
773
774 sg_tmp = sg_next(sg_tmp);
775
776 if (nbytes < 0) {
777 list_ok = false;
778 break;
779 }
780 }
781
782 if (!aligned)
783 return omap_sham_copy_sgs(rctx, sg, bs, new_len);
784 else if (!list_ok)
785 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
786
787 rctx->sg_len = n;
788 rctx->sg = sg;
789
790 return 0;
791}
792
793static int omap_sham_prepare_request(struct ahash_request *req, bool update)
794{
795 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
796 int bs;
797 int ret;
798 int nbytes;
799 bool final = rctx->flags & BIT(FLAGS_FINUP);
800 int xmit_len, hash_later;
801
802 if (!req)
803 return 0;
804
805 bs = get_block_size(rctx);
806
807 if (update)
808 nbytes = req->nbytes;
809 else
810 nbytes = 0;
811
812 rctx->total = nbytes + rctx->bufcnt;
813
814 if (!rctx->total)
815 return 0;
816
817 if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
818 int len = bs - rctx->bufcnt % bs;
819
820 if (len > nbytes)
821 len = nbytes;
822 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
823 0, len, 0);
824 rctx->bufcnt += len;
825 nbytes -= len;
826 rctx->offset = len;
827 }
828
829 if (rctx->bufcnt)
830 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
831
832 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
833 if (ret)
834 return ret;
835
836 xmit_len = rctx->total;
837
838 if (!IS_ALIGNED(xmit_len, bs)) {
839 if (final)
840 xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
841 else
842 xmit_len = xmit_len / bs * bs;
843 }
844
845 hash_later = rctx->total - xmit_len;
846 if (hash_later < 0)
847 hash_later = 0;
848
849 if (rctx->bufcnt && nbytes) {
850 /* have data from previous operation and current */
851 sg_init_table(rctx->sgl, 2);
852 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
853
854 sg_chain(rctx->sgl, 2, req->src);
855
856 rctx->sg = rctx->sgl;
857
858 rctx->sg_len++;
859 } else if (rctx->bufcnt) {
860 /* have buffered data only */
861 sg_init_table(rctx->sgl, 1);
862 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
863
864 rctx->sg = rctx->sgl;
865
866 rctx->sg_len = 1;
867 }
868
869 if (hash_later) {
870 if (req->nbytes) {
871 scatterwalk_map_and_copy(rctx->buffer, req->src,
872 req->nbytes - hash_later,
873 hash_later, 0);
874 } else {
875 memcpy(rctx->buffer, rctx->buffer + xmit_len,
876 hash_later);
877 }
878 rctx->bufcnt = hash_later;
879 } else {
880 rctx->bufcnt = 0;
881 }
882
883 if (!final)
884 rctx->total = xmit_len;
885
886 return 0;
887}
888
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800889static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
890 const u8 *data, size_t length)
891{
892 size_t count = min(length, ctx->buflen - ctx->bufcnt);
893
894 count = min(count, ctx->total);
895 if (count <= 0)
896 return 0;
897 memcpy(ctx->buffer + ctx->bufcnt, data, count);
898 ctx->bufcnt += count;
899
900 return count;
901}
902
903static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
904{
905 size_t count;
Joel Fernandes26a05482014-03-07 10:28:46 -0600906 const u8 *vaddr;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800907
908 while (ctx->sg) {
Joel Fernandes26a05482014-03-07 10:28:46 -0600909 vaddr = kmap_atomic(sg_page(ctx->sg));
Vutla, Lokesh13cf3942015-04-02 15:32:45 +0530910 vaddr += ctx->sg->offset;
Joel Fernandes26a05482014-03-07 10:28:46 -0600911
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800912 count = omap_sham_append_buffer(ctx,
Joel Fernandes26a05482014-03-07 10:28:46 -0600913 vaddr + ctx->offset,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800914 ctx->sg->length - ctx->offset);
Joel Fernandes26a05482014-03-07 10:28:46 -0600915
916 kunmap_atomic((void *)vaddr);
917
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800918 if (!count)
919 break;
920 ctx->offset += count;
921 ctx->total -= count;
922 if (ctx->offset == ctx->sg->length) {
923 ctx->sg = sg_next(ctx->sg);
924 if (ctx->sg)
925 ctx->offset = 0;
926 else
927 ctx->total = 0;
928 }
929 }
930
931 return 0;
932}
933
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200934static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
935 struct omap_sham_reqctx *ctx,
936 size_t length, int final)
937{
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700938 int ret;
939
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200940 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
941 DMA_TO_DEVICE);
942 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
943 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
944 return -EINVAL;
945 }
946
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300947 ctx->flags &= ~BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200948
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700949 ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
Mark A. Greer0d373d62012-12-21 10:04:08 -0700950 if (ret != -EINPROGRESS)
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700951 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
952 DMA_TO_DEVICE);
953
954 return ret;
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200955}
956
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800957static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
958{
959 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
960 unsigned int final;
961 size_t count;
962
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800963 omap_sham_append_sg(ctx);
964
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +0300965 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800966
967 dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
968 ctx->bufcnt, ctx->digcnt, final);
969
970 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
971 count = ctx->bufcnt;
972 ctx->bufcnt = 0;
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +0200973 return omap_sham_xmit_dma_map(dd, ctx, count, final);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800974 }
975
976 return 0;
977}
978
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200979/* Start address alignment */
980#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
981/* SHA1 block size alignment */
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530982#define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs))
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200983
984static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800985{
986 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200987 unsigned int length, final, tail;
988 struct scatterlist *sg;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +0530989 int ret, bs;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800990
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200991 if (!ctx->total)
992 return 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +0800993
Dmitry Kasatkin887c8832010-11-19 16:04:29 +0200994 if (ctx->bufcnt || ctx->offset)
995 return omap_sham_update_dma_slow(dd);
996
Mark A. Greerdfd061d2012-12-21 10:04:04 -0700997 /*
998 * Don't use the sg interface when the transfer size is less
999 * than the number of elements in a DMA frame. Otherwise,
1000 * the dmaengine infrastructure will calculate that it needs
1001 * to transfer 0 frames which ultimately fails.
1002 */
Lokesh Vutlaf5e46262013-08-20 20:32:35 +05301003 if (ctx->total < get_block_size(ctx))
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001004 return omap_sham_update_dma_slow(dd);
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001005
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001006 dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
1007 ctx->digcnt, ctx->bufcnt, ctx->total);
1008
1009 sg = ctx->sg;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301010 bs = get_block_size(ctx);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001011
1012 if (!SG_AA(sg))
1013 return omap_sham_update_dma_slow(dd);
1014
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301015 if (!sg_is_last(sg) && !SG_SA(sg, bs))
1016 /* size is not BLOCK_SIZE aligned */
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001017 return omap_sham_update_dma_slow(dd);
1018
1019 length = min(ctx->total, sg->length);
1020
1021 if (sg_is_last(sg)) {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001022 if (!(ctx->flags & BIT(FLAGS_FINUP))) {
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301023 /* not last sg must be BLOCK_SIZE aligned */
1024 tail = length & (bs - 1);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001025 /* without finup() we need one block to close hash */
1026 if (!tail)
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301027 tail = bs;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001028 length -= tail;
1029 }
1030 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001031
1032 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
1033 dev_err(dd->dev, "dma_map_sg error\n");
1034 return -EINVAL;
1035 }
1036
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001037 ctx->flags |= BIT(FLAGS_SG);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001038
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001039 ctx->total -= length;
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001040 ctx->offset = length; /* offset where to start slow */
1041
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001042 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001043
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001044 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
Mark A. Greer0d373d62012-12-21 10:04:08 -07001045 if (ret != -EINPROGRESS)
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001046 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
1047
1048 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001049}
1050
1051static int omap_sham_update_cpu(struct omap_sham_dev *dd)
1052{
1053 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301054 int bufcnt, final;
1055
1056 if (!ctx->total)
1057 return 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001058
1059 omap_sham_append_sg(ctx);
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301060
1061 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
1062
1063 dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
1064 ctx->bufcnt, ctx->digcnt, final);
1065
Lokesh Vutlaacef7b02013-12-18 19:03:33 +05301066 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
1067 bufcnt = ctx->bufcnt;
1068 ctx->bufcnt = 0;
1069 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
1070 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001071
Lokesh Vutlaacef7b02013-12-18 19:03:33 +05301072 return 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001073}
1074
1075static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
1076{
1077 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
1078
Mark A. Greerdfd061d2012-12-21 10:04:04 -07001079
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001080 if (ctx->flags & BIT(FLAGS_SG)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001081 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001082 if (ctx->sg->length == ctx->offset) {
1083 ctx->sg = sg_next(ctx->sg);
1084 if (ctx->sg)
1085 ctx->offset = 0;
1086 }
1087 } else {
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001088 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
1089 DMA_TO_DEVICE);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001090 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001091
1092 return 0;
1093}
1094
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001095static int omap_sham_init(struct ahash_request *req)
1096{
1097 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1098 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1099 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1100 struct omap_sham_dev *dd = NULL, *tmp;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301101 int bs = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001102
1103 spin_lock_bh(&sham.lock);
1104 if (!tctx->dd) {
1105 list_for_each_entry(tmp, &sham.dev_list, list) {
1106 dd = tmp;
1107 break;
1108 }
1109 tctx->dd = dd;
1110 } else {
1111 dd = tctx->dd;
1112 }
1113 spin_unlock_bh(&sham.lock);
1114
1115 ctx->dd = dd;
1116
1117 ctx->flags = 0;
1118
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001119 dev_dbg(dd->dev, "init: digest size: %d\n",
1120 crypto_ahash_digestsize(tfm));
1121
Mark A. Greer0d373d62012-12-21 10:04:08 -07001122 switch (crypto_ahash_digestsize(tfm)) {
1123 case MD5_DIGEST_SIZE:
1124 ctx->flags |= FLAGS_MODE_MD5;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301125 bs = SHA1_BLOCK_SIZE;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001126 break;
1127 case SHA1_DIGEST_SIZE:
1128 ctx->flags |= FLAGS_MODE_SHA1;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301129 bs = SHA1_BLOCK_SIZE;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001130 break;
Mark A. Greerd20fb182012-12-21 10:04:09 -07001131 case SHA224_DIGEST_SIZE:
1132 ctx->flags |= FLAGS_MODE_SHA224;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301133 bs = SHA224_BLOCK_SIZE;
Mark A. Greerd20fb182012-12-21 10:04:09 -07001134 break;
1135 case SHA256_DIGEST_SIZE:
1136 ctx->flags |= FLAGS_MODE_SHA256;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301137 bs = SHA256_BLOCK_SIZE;
1138 break;
1139 case SHA384_DIGEST_SIZE:
1140 ctx->flags |= FLAGS_MODE_SHA384;
1141 bs = SHA384_BLOCK_SIZE;
1142 break;
1143 case SHA512_DIGEST_SIZE:
1144 ctx->flags |= FLAGS_MODE_SHA512;
1145 bs = SHA512_BLOCK_SIZE;
Mark A. Greerd20fb182012-12-21 10:04:09 -07001146 break;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001147 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001148
1149 ctx->bufcnt = 0;
1150 ctx->digcnt = 0;
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001151 ctx->buflen = BUFLEN;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001152
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001153 if (tctx->flags & BIT(FLAGS_HMAC)) {
Mark A. Greer0d373d62012-12-21 10:04:08 -07001154 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1155 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001156
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301157 memcpy(ctx->buffer, bctx->ipad, bs);
1158 ctx->bufcnt = bs;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001159 }
1160
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001161 ctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001162 }
1163
1164 return 0;
1165
1166}
1167
1168static int omap_sham_update_req(struct omap_sham_dev *dd)
1169{
1170 struct ahash_request *req = dd->req;
1171 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1172 int err;
1173
1174 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001175 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001176
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001177 if (ctx->flags & BIT(FLAGS_CPU))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001178 err = omap_sham_update_cpu(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001179 else
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001180 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001181
1182 /* wait for dma completion before can take more data */
1183 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
1184
1185 return err;
1186}
1187
1188static int omap_sham_final_req(struct omap_sham_dev *dd)
1189{
1190 struct ahash_request *req = dd->req;
1191 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1192 int err = 0, use_dma = 1;
1193
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301194 if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
1195 /*
1196 * faster to handle last block with cpu or
1197 * use cpu when dma is not present.
1198 */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001199 use_dma = 0;
1200
1201 if (use_dma)
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001202 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001203 else
1204 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
1205
1206 ctx->bufcnt = 0;
1207
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001208 dev_dbg(dd->dev, "final_req: err: %d\n", err);
1209
1210 return err;
1211}
1212
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001213static int omap_sham_finish_hmac(struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001214{
1215 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1216 struct omap_sham_hmac_ctx *bctx = tctx->base;
1217 int bs = crypto_shash_blocksize(bctx->shash);
1218 int ds = crypto_shash_digestsize(bctx->shash);
Behan Webster7bc53c32014-04-04 18:18:00 -03001219 SHASH_DESC_ON_STACK(shash, bctx->shash);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001220
Behan Webster7bc53c32014-04-04 18:18:00 -03001221 shash->tfm = bctx->shash;
1222 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001223
Behan Webster7bc53c32014-04-04 18:18:00 -03001224 return crypto_shash_init(shash) ?:
1225 crypto_shash_update(shash, bctx->opad, bs) ?:
1226 crypto_shash_finup(shash, req->result, ds, req->result);
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001227}
1228
1229static int omap_sham_finish(struct ahash_request *req)
1230{
1231 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1232 struct omap_sham_dev *dd = ctx->dd;
1233 int err = 0;
1234
1235 if (ctx->digcnt) {
1236 omap_sham_copy_ready_hash(req);
Mark A. Greer0d373d62012-12-21 10:04:08 -07001237 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1238 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001239 err = omap_sham_finish_hmac(req);
1240 }
1241
1242 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
1243
1244 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001245}
1246
1247static void omap_sham_finish_req(struct ahash_request *req, int err)
1248{
1249 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001250 struct omap_sham_dev *dd = ctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001251
1252 if (!err) {
Mark A. Greer0d373d62012-12-21 10:04:08 -07001253 dd->pdata->copy_hash(req, 1);
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001254 if (test_bit(FLAGS_FINAL, &dd->flags))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001255 err = omap_sham_finish(req);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001256 } else {
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001257 ctx->flags |= BIT(FLAGS_ERROR);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001258 }
1259
Dmitry Kasatkin0efd4d82011-06-02 21:10:12 +03001260 /* atomic operation is not needed here */
1261 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1262 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
Mark A. Greerb359f032012-12-21 10:04:02 -07001263
Tero Kristoe93f7672016-06-22 16:23:34 +03001264 pm_runtime_mark_last_busy(dd->dev);
1265 pm_runtime_put_autosuspend(dd->dev);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001266
1267 if (req->base.complete)
1268 req->base.complete(&req->base, err);
1269}
1270
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001271static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1272 struct ahash_request *req)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001273{
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +11001274 struct crypto_async_request *async_req, *backlog;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001275 struct omap_sham_reqctx *ctx;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001276 unsigned long flags;
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001277 int err = 0, ret = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001278
Tero Kristo4e7813a2016-08-04 13:28:36 +03001279retry:
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001280 spin_lock_irqsave(&dd->lock, flags);
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001281 if (req)
1282 ret = ahash_enqueue_request(&dd->queue, req);
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +03001283 if (test_bit(FLAGS_BUSY, &dd->flags)) {
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001284 spin_unlock_irqrestore(&dd->lock, flags);
1285 return ret;
1286 }
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +11001287 backlog = crypto_get_backlog(&dd->queue);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001288 async_req = crypto_dequeue_request(&dd->queue);
Dmitry Kasatkin6c39d112010-12-29 21:52:04 +11001289 if (async_req)
Dmitry Kasatkina929cbe2011-06-02 21:10:06 +03001290 set_bit(FLAGS_BUSY, &dd->flags);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001291 spin_unlock_irqrestore(&dd->lock, flags);
1292
1293 if (!async_req)
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001294 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001295
1296 if (backlog)
1297 backlog->complete(backlog, -EINPROGRESS);
1298
1299 req = ahash_request_cast(async_req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001300 dd->req = req;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001301 ctx = ahash_request_ctx(req);
1302
Tero Kristof19de1b2016-09-19 18:22:15 +03001303 err = omap_sham_prepare_request(NULL, ctx->op == OP_UPDATE);
1304 if (err)
1305 goto err1;
1306
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001307 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1308 ctx->op, req->nbytes);
1309
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001310 err = omap_sham_hw_init(dd);
1311 if (err)
1312 goto err1;
1313
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001314 if (ctx->digcnt)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001315 /* request has changed - restore hash */
Mark A. Greer0d373d62012-12-21 10:04:08 -07001316 dd->pdata->copy_hash(req, 0);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001317
1318 if (ctx->op == OP_UPDATE) {
1319 err = omap_sham_update_req(dd);
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001320 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001321 /* no final() after finup() */
1322 err = omap_sham_final_req(dd);
1323 } else if (ctx->op == OP_FINAL) {
1324 err = omap_sham_final_req(dd);
1325 }
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001326err1:
Tero Kristo4e7813a2016-08-04 13:28:36 +03001327 dev_dbg(dd->dev, "exit, err: %d\n", err);
1328
1329 if (err != -EINPROGRESS) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001330 /* done_task will not finish it, so do it here */
1331 omap_sham_finish_req(req, err);
Tero Kristo4e7813a2016-08-04 13:28:36 +03001332 req = NULL;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001333
Tero Kristo4e7813a2016-08-04 13:28:36 +03001334 /*
1335 * Execute next request immediately if there is anything
1336 * in queue.
1337 */
1338 goto retry;
1339 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001340
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001341 return ret;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001342}
1343
1344static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1345{
1346 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1347 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1348 struct omap_sham_dev *dd = tctx->dd;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001349
1350 ctx->op = op;
1351
Dmitry Kasatkina5d87232010-11-19 16:04:25 +02001352 return omap_sham_handle_queue(dd, req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001353}
1354
1355static int omap_sham_update(struct ahash_request *req)
1356{
1357 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301358 struct omap_sham_dev *dd = ctx->dd;
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301359 int bs = get_block_size(ctx);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001360
1361 if (!req->nbytes)
1362 return 0;
1363
1364 ctx->total = req->nbytes;
1365 ctx->sg = req->src;
1366 ctx->offset = 0;
1367
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001368 if (ctx->flags & BIT(FLAGS_FINUP)) {
Bin Liu85e06872016-06-22 16:23:37 +03001369 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 240) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001370 /*
1371 * OMAP HW accel works only with buffers >= 9
1372 * will switch to bypass in final()
1373 * final has the same request and data
1374 */
1375 omap_sham_append_sg(ctx);
1376 return 0;
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301377 } else if ((ctx->bufcnt + ctx->total <= bs) ||
1378 dd->polling_mode) {
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001379 /*
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301380 * faster to use CPU for short transfers or
1381 * use cpu when dma is not present.
1382 */
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001383 ctx->flags |= BIT(FLAGS_CPU);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001384 }
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001385 } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001386 omap_sham_append_sg(ctx);
1387 return 0;
1388 }
1389
Lokesh Vutlaacef7b02013-12-18 19:03:33 +05301390 if (dd->polling_mode)
1391 ctx->flags |= BIT(FLAGS_CPU);
1392
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001393 return omap_sham_enqueue(req, OP_UPDATE);
1394}
1395
Behan Webster7bc53c32014-04-04 18:18:00 -03001396static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001397 const u8 *data, unsigned int len, u8 *out)
1398{
Behan Webster7bc53c32014-04-04 18:18:00 -03001399 SHASH_DESC_ON_STACK(shash, tfm);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001400
Behan Webster7bc53c32014-04-04 18:18:00 -03001401 shash->tfm = tfm;
1402 shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001403
Behan Webster7bc53c32014-04-04 18:18:00 -03001404 return crypto_shash_digest(shash, data, len, out);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001405}
1406
1407static int omap_sham_final_shash(struct ahash_request *req)
1408{
1409 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1410 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Tero Kristocb8d5c82016-08-04 13:28:40 +03001411 int offset = 0;
1412
1413 /*
1414 * If we are running HMAC on limited hardware support, skip
1415 * the ipad in the beginning of the buffer if we are going for
1416 * software fallback algorithm.
1417 */
1418 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1419 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1420 offset = get_block_size(ctx);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001421
1422 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
Tero Kristocb8d5c82016-08-04 13:28:40 +03001423 ctx->buffer + offset,
1424 ctx->bufcnt - offset, req->result);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001425}
1426
1427static int omap_sham_final(struct ahash_request *req)
1428{
1429 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001430
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001431 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001432
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001433 if (ctx->flags & BIT(FLAGS_ERROR))
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001434 return 0; /* uncompleted hash is not needed */
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001435
Bin Liu85e06872016-06-22 16:23:37 +03001436 /*
1437 * OMAP HW accel works only with buffers >= 9.
1438 * HMAC is always >= 9 because ipad == block size.
Tero Kristo2c5bd1e2016-09-19 18:22:16 +03001439 * If buffersize is less than DMA_THRESHOLD, we use fallback
1440 * SW encoding, as using DMA + HW in this case doesn't provide
1441 * any benefit.
Bin Liu85e06872016-06-22 16:23:37 +03001442 */
Tero Kristo2c5bd1e2016-09-19 18:22:16 +03001443 if (!ctx->digcnt && ctx->bufcnt < OMAP_SHA_DMA_THRESHOLD)
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001444 return omap_sham_final_shash(req);
1445 else if (ctx->bufcnt)
1446 return omap_sham_enqueue(req, OP_FINAL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001447
Dmitry Kasatkinbf362752011-04-20 13:34:58 +03001448 /* copy ready hash (+ finalize hmac) */
1449 return omap_sham_finish(req);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001450}
1451
1452static int omap_sham_finup(struct ahash_request *req)
1453{
1454 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1455 int err1, err2;
1456
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001457 ctx->flags |= BIT(FLAGS_FINUP);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001458
1459 err1 = omap_sham_update(req);
Markku Kylanpaa455e3382011-04-20 13:34:55 +03001460 if (err1 == -EINPROGRESS || err1 == -EBUSY)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001461 return err1;
1462 /*
1463 * final() has to be always called to cleanup resources
1464 * even if udpate() failed, except EINPROGRESS
1465 */
1466 err2 = omap_sham_final(req);
1467
1468 return err1 ?: err2;
1469}
1470
1471static int omap_sham_digest(struct ahash_request *req)
1472{
1473 return omap_sham_init(req) ?: omap_sham_finup(req);
1474}
1475
1476static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1477 unsigned int keylen)
1478{
1479 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1480 struct omap_sham_hmac_ctx *bctx = tctx->base;
1481 int bs = crypto_shash_blocksize(bctx->shash);
1482 int ds = crypto_shash_digestsize(bctx->shash);
Mark A. Greer0d373d62012-12-21 10:04:08 -07001483 struct omap_sham_dev *dd = NULL, *tmp;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001484 int err, i;
Mark A. Greer0d373d62012-12-21 10:04:08 -07001485
1486 spin_lock_bh(&sham.lock);
1487 if (!tctx->dd) {
1488 list_for_each_entry(tmp, &sham.dev_list, list) {
1489 dd = tmp;
1490 break;
1491 }
1492 tctx->dd = dd;
1493 } else {
1494 dd = tctx->dd;
1495 }
1496 spin_unlock_bh(&sham.lock);
1497
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001498 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1499 if (err)
1500 return err;
1501
1502 if (keylen > bs) {
1503 err = omap_sham_shash_digest(bctx->shash,
1504 crypto_shash_get_flags(bctx->shash),
1505 key, keylen, bctx->ipad);
1506 if (err)
1507 return err;
1508 keylen = ds;
1509 } else {
1510 memcpy(bctx->ipad, key, keylen);
1511 }
1512
1513 memset(bctx->ipad + keylen, 0, bs - keylen);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001514
Mark A. Greer0d373d62012-12-21 10:04:08 -07001515 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1516 memcpy(bctx->opad, bctx->ipad, bs);
1517
1518 for (i = 0; i < bs; i++) {
1519 bctx->ipad[i] ^= 0x36;
1520 bctx->opad[i] ^= 0x5c;
1521 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001522 }
1523
1524 return err;
1525}
1526
1527static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1528{
1529 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1530 const char *alg_name = crypto_tfm_alg_name(tfm);
1531
1532 /* Allocate a fallback and abort if it failed. */
1533 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1534 CRYPTO_ALG_NEED_FALLBACK);
1535 if (IS_ERR(tctx->fallback)) {
1536 pr_err("omap-sham: fallback driver '%s' "
1537 "could not be loaded.\n", alg_name);
1538 return PTR_ERR(tctx->fallback);
1539 }
1540
1541 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001542 sizeof(struct omap_sham_reqctx) + BUFLEN);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001543
1544 if (alg_base) {
1545 struct omap_sham_hmac_ctx *bctx = tctx->base;
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001546 tctx->flags |= BIT(FLAGS_HMAC);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001547 bctx->shash = crypto_alloc_shash(alg_base, 0,
1548 CRYPTO_ALG_NEED_FALLBACK);
1549 if (IS_ERR(bctx->shash)) {
1550 pr_err("omap-sham: base driver '%s' "
1551 "could not be loaded.\n", alg_base);
1552 crypto_free_shash(tctx->fallback);
1553 return PTR_ERR(bctx->shash);
1554 }
1555
1556 }
1557
1558 return 0;
1559}
1560
1561static int omap_sham_cra_init(struct crypto_tfm *tfm)
1562{
1563 return omap_sham_cra_init_alg(tfm, NULL);
1564}
1565
1566static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1567{
1568 return omap_sham_cra_init_alg(tfm, "sha1");
1569}
1570
Mark A. Greerd20fb182012-12-21 10:04:09 -07001571static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1572{
1573 return omap_sham_cra_init_alg(tfm, "sha224");
1574}
1575
1576static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1577{
1578 return omap_sham_cra_init_alg(tfm, "sha256");
1579}
1580
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001581static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1582{
1583 return omap_sham_cra_init_alg(tfm, "md5");
1584}
1585
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301586static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1587{
1588 return omap_sham_cra_init_alg(tfm, "sha384");
1589}
1590
1591static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1592{
1593 return omap_sham_cra_init_alg(tfm, "sha512");
1594}
1595
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001596static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1597{
1598 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1599
1600 crypto_free_shash(tctx->fallback);
1601 tctx->fallback = NULL;
1602
Dmitry Kasatkinea1fd222011-06-02 21:10:05 +03001603 if (tctx->flags & BIT(FLAGS_HMAC)) {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001604 struct omap_sham_hmac_ctx *bctx = tctx->base;
1605 crypto_free_shash(bctx->shash);
1606 }
1607}
1608
Tero Kristo99a7fff2016-09-19 18:22:12 +03001609static int omap_sham_export(struct ahash_request *req, void *out)
1610{
1611 return -ENOTSUPP;
1612}
1613
1614static int omap_sham_import(struct ahash_request *req, const void *in)
1615{
1616 return -ENOTSUPP;
1617}
1618
Mark A. Greerd20fb182012-12-21 10:04:09 -07001619static struct ahash_alg algs_sha1_md5[] = {
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001620{
1621 .init = omap_sham_init,
1622 .update = omap_sham_update,
1623 .final = omap_sham_final,
1624 .finup = omap_sham_finup,
1625 .digest = omap_sham_digest,
1626 .halg.digestsize = SHA1_DIGEST_SIZE,
1627 .halg.base = {
1628 .cra_name = "sha1",
1629 .cra_driver_name = "omap-sha1",
Bin Liueb354782016-06-30 14:04:11 -05001630 .cra_priority = 400,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001631 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001632 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001633 CRYPTO_ALG_ASYNC |
1634 CRYPTO_ALG_NEED_FALLBACK,
1635 .cra_blocksize = SHA1_BLOCK_SIZE,
1636 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001637 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001638 .cra_module = THIS_MODULE,
1639 .cra_init = omap_sham_cra_init,
1640 .cra_exit = omap_sham_cra_exit,
1641 }
1642},
1643{
1644 .init = omap_sham_init,
1645 .update = omap_sham_update,
1646 .final = omap_sham_final,
1647 .finup = omap_sham_finup,
1648 .digest = omap_sham_digest,
1649 .halg.digestsize = MD5_DIGEST_SIZE,
1650 .halg.base = {
1651 .cra_name = "md5",
1652 .cra_driver_name = "omap-md5",
Bin Liueb354782016-06-30 14:04:11 -05001653 .cra_priority = 400,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001654 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001655 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001656 CRYPTO_ALG_ASYNC |
1657 CRYPTO_ALG_NEED_FALLBACK,
1658 .cra_blocksize = SHA1_BLOCK_SIZE,
1659 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001660 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001661 .cra_module = THIS_MODULE,
1662 .cra_init = omap_sham_cra_init,
1663 .cra_exit = omap_sham_cra_exit,
1664 }
1665},
1666{
1667 .init = omap_sham_init,
1668 .update = omap_sham_update,
1669 .final = omap_sham_final,
1670 .finup = omap_sham_finup,
1671 .digest = omap_sham_digest,
1672 .setkey = omap_sham_setkey,
1673 .halg.digestsize = SHA1_DIGEST_SIZE,
1674 .halg.base = {
1675 .cra_name = "hmac(sha1)",
1676 .cra_driver_name = "omap-hmac-sha1",
Bin Liueb354782016-06-30 14:04:11 -05001677 .cra_priority = 400,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001678 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001679 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001680 CRYPTO_ALG_ASYNC |
1681 CRYPTO_ALG_NEED_FALLBACK,
1682 .cra_blocksize = SHA1_BLOCK_SIZE,
1683 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1684 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001685 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001686 .cra_module = THIS_MODULE,
1687 .cra_init = omap_sham_cra_sha1_init,
1688 .cra_exit = omap_sham_cra_exit,
1689 }
1690},
1691{
1692 .init = omap_sham_init,
1693 .update = omap_sham_update,
1694 .final = omap_sham_final,
1695 .finup = omap_sham_finup,
1696 .digest = omap_sham_digest,
1697 .setkey = omap_sham_setkey,
1698 .halg.digestsize = MD5_DIGEST_SIZE,
1699 .halg.base = {
1700 .cra_name = "hmac(md5)",
1701 .cra_driver_name = "omap-hmac-md5",
Bin Liueb354782016-06-30 14:04:11 -05001702 .cra_priority = 400,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001703 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
Nikos Mavrogiannopoulosd912bb72011-11-01 13:39:56 +01001704 CRYPTO_ALG_KERN_DRIVER_ONLY |
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001705 CRYPTO_ALG_ASYNC |
1706 CRYPTO_ALG_NEED_FALLBACK,
1707 .cra_blocksize = SHA1_BLOCK_SIZE,
1708 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1709 sizeof(struct omap_sham_hmac_ctx),
Dmitry Kasatkin798eed5d2010-11-19 16:04:26 +02001710 .cra_alignmask = OMAP_ALIGN_MASK,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001711 .cra_module = THIS_MODULE,
1712 .cra_init = omap_sham_cra_md5_init,
1713 .cra_exit = omap_sham_cra_exit,
1714 }
1715}
1716};
1717
Mark A. Greerd20fb182012-12-21 10:04:09 -07001718/* OMAP4 has some algs in addition to what OMAP2 has */
1719static struct ahash_alg algs_sha224_sha256[] = {
1720{
1721 .init = omap_sham_init,
1722 .update = omap_sham_update,
1723 .final = omap_sham_final,
1724 .finup = omap_sham_finup,
1725 .digest = omap_sham_digest,
1726 .halg.digestsize = SHA224_DIGEST_SIZE,
1727 .halg.base = {
1728 .cra_name = "sha224",
1729 .cra_driver_name = "omap-sha224",
Bin Liueb354782016-06-30 14:04:11 -05001730 .cra_priority = 400,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001731 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1732 CRYPTO_ALG_ASYNC |
1733 CRYPTO_ALG_NEED_FALLBACK,
1734 .cra_blocksize = SHA224_BLOCK_SIZE,
1735 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001736 .cra_alignmask = OMAP_ALIGN_MASK,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001737 .cra_module = THIS_MODULE,
1738 .cra_init = omap_sham_cra_init,
1739 .cra_exit = omap_sham_cra_exit,
1740 }
1741},
1742{
1743 .init = omap_sham_init,
1744 .update = omap_sham_update,
1745 .final = omap_sham_final,
1746 .finup = omap_sham_finup,
1747 .digest = omap_sham_digest,
1748 .halg.digestsize = SHA256_DIGEST_SIZE,
1749 .halg.base = {
1750 .cra_name = "sha256",
1751 .cra_driver_name = "omap-sha256",
Bin Liueb354782016-06-30 14:04:11 -05001752 .cra_priority = 400,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001753 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1754 CRYPTO_ALG_ASYNC |
1755 CRYPTO_ALG_NEED_FALLBACK,
1756 .cra_blocksize = SHA256_BLOCK_SIZE,
1757 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001758 .cra_alignmask = OMAP_ALIGN_MASK,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001759 .cra_module = THIS_MODULE,
1760 .cra_init = omap_sham_cra_init,
1761 .cra_exit = omap_sham_cra_exit,
1762 }
1763},
1764{
1765 .init = omap_sham_init,
1766 .update = omap_sham_update,
1767 .final = omap_sham_final,
1768 .finup = omap_sham_finup,
1769 .digest = omap_sham_digest,
1770 .setkey = omap_sham_setkey,
1771 .halg.digestsize = SHA224_DIGEST_SIZE,
1772 .halg.base = {
1773 .cra_name = "hmac(sha224)",
1774 .cra_driver_name = "omap-hmac-sha224",
Bin Liueb354782016-06-30 14:04:11 -05001775 .cra_priority = 400,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001776 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1777 CRYPTO_ALG_ASYNC |
1778 CRYPTO_ALG_NEED_FALLBACK,
1779 .cra_blocksize = SHA224_BLOCK_SIZE,
1780 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1781 sizeof(struct omap_sham_hmac_ctx),
1782 .cra_alignmask = OMAP_ALIGN_MASK,
1783 .cra_module = THIS_MODULE,
1784 .cra_init = omap_sham_cra_sha224_init,
1785 .cra_exit = omap_sham_cra_exit,
1786 }
1787},
1788{
1789 .init = omap_sham_init,
1790 .update = omap_sham_update,
1791 .final = omap_sham_final,
1792 .finup = omap_sham_finup,
1793 .digest = omap_sham_digest,
1794 .setkey = omap_sham_setkey,
1795 .halg.digestsize = SHA256_DIGEST_SIZE,
1796 .halg.base = {
1797 .cra_name = "hmac(sha256)",
1798 .cra_driver_name = "omap-hmac-sha256",
Bin Liueb354782016-06-30 14:04:11 -05001799 .cra_priority = 400,
Mark A. Greerd20fb182012-12-21 10:04:09 -07001800 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1801 CRYPTO_ALG_ASYNC |
1802 CRYPTO_ALG_NEED_FALLBACK,
1803 .cra_blocksize = SHA256_BLOCK_SIZE,
1804 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1805 sizeof(struct omap_sham_hmac_ctx),
1806 .cra_alignmask = OMAP_ALIGN_MASK,
1807 .cra_module = THIS_MODULE,
1808 .cra_init = omap_sham_cra_sha256_init,
1809 .cra_exit = omap_sham_cra_exit,
1810 }
1811},
1812};
1813
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301814static struct ahash_alg algs_sha384_sha512[] = {
1815{
1816 .init = omap_sham_init,
1817 .update = omap_sham_update,
1818 .final = omap_sham_final,
1819 .finup = omap_sham_finup,
1820 .digest = omap_sham_digest,
1821 .halg.digestsize = SHA384_DIGEST_SIZE,
1822 .halg.base = {
1823 .cra_name = "sha384",
1824 .cra_driver_name = "omap-sha384",
Bin Liueb354782016-06-30 14:04:11 -05001825 .cra_priority = 400,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301826 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1827 CRYPTO_ALG_ASYNC |
1828 CRYPTO_ALG_NEED_FALLBACK,
1829 .cra_blocksize = SHA384_BLOCK_SIZE,
1830 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001831 .cra_alignmask = OMAP_ALIGN_MASK,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301832 .cra_module = THIS_MODULE,
1833 .cra_init = omap_sham_cra_init,
1834 .cra_exit = omap_sham_cra_exit,
1835 }
1836},
1837{
1838 .init = omap_sham_init,
1839 .update = omap_sham_update,
1840 .final = omap_sham_final,
1841 .finup = omap_sham_finup,
1842 .digest = omap_sham_digest,
1843 .halg.digestsize = SHA512_DIGEST_SIZE,
1844 .halg.base = {
1845 .cra_name = "sha512",
1846 .cra_driver_name = "omap-sha512",
Bin Liueb354782016-06-30 14:04:11 -05001847 .cra_priority = 400,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301848 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1849 CRYPTO_ALG_ASYNC |
1850 CRYPTO_ALG_NEED_FALLBACK,
1851 .cra_blocksize = SHA512_BLOCK_SIZE,
1852 .cra_ctxsize = sizeof(struct omap_sham_ctx),
Tero Kristo744e6862016-09-19 18:22:13 +03001853 .cra_alignmask = OMAP_ALIGN_MASK,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301854 .cra_module = THIS_MODULE,
1855 .cra_init = omap_sham_cra_init,
1856 .cra_exit = omap_sham_cra_exit,
1857 }
1858},
1859{
1860 .init = omap_sham_init,
1861 .update = omap_sham_update,
1862 .final = omap_sham_final,
1863 .finup = omap_sham_finup,
1864 .digest = omap_sham_digest,
1865 .setkey = omap_sham_setkey,
1866 .halg.digestsize = SHA384_DIGEST_SIZE,
1867 .halg.base = {
1868 .cra_name = "hmac(sha384)",
1869 .cra_driver_name = "omap-hmac-sha384",
Bin Liueb354782016-06-30 14:04:11 -05001870 .cra_priority = 400,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301871 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1872 CRYPTO_ALG_ASYNC |
1873 CRYPTO_ALG_NEED_FALLBACK,
1874 .cra_blocksize = SHA384_BLOCK_SIZE,
1875 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1876 sizeof(struct omap_sham_hmac_ctx),
1877 .cra_alignmask = OMAP_ALIGN_MASK,
1878 .cra_module = THIS_MODULE,
1879 .cra_init = omap_sham_cra_sha384_init,
1880 .cra_exit = omap_sham_cra_exit,
1881 }
1882},
1883{
1884 .init = omap_sham_init,
1885 .update = omap_sham_update,
1886 .final = omap_sham_final,
1887 .finup = omap_sham_finup,
1888 .digest = omap_sham_digest,
1889 .setkey = omap_sham_setkey,
1890 .halg.digestsize = SHA512_DIGEST_SIZE,
1891 .halg.base = {
1892 .cra_name = "hmac(sha512)",
1893 .cra_driver_name = "omap-hmac-sha512",
Bin Liueb354782016-06-30 14:04:11 -05001894 .cra_priority = 400,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05301895 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1896 CRYPTO_ALG_ASYNC |
1897 CRYPTO_ALG_NEED_FALLBACK,
1898 .cra_blocksize = SHA512_BLOCK_SIZE,
1899 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1900 sizeof(struct omap_sham_hmac_ctx),
1901 .cra_alignmask = OMAP_ALIGN_MASK,
1902 .cra_module = THIS_MODULE,
1903 .cra_init = omap_sham_cra_sha512_init,
1904 .cra_exit = omap_sham_cra_exit,
1905 }
1906},
1907};
1908
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001909static void omap_sham_done_task(unsigned long data)
1910{
1911 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001912 int err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001913
Dmitry Kasatkin6cb3ffe2011-06-02 21:10:09 +03001914 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1915 omap_sham_handle_queue(dd, NULL);
1916 return;
1917 }
1918
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001919 if (test_bit(FLAGS_CPU, &dd->flags)) {
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05301920 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1921 /* hash or semi-hash ready */
1922 err = omap_sham_update_cpu(dd);
1923 if (err != -EINPROGRESS)
1924 goto finish;
1925 }
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001926 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1927 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1928 omap_sham_update_dma_stop(dd);
1929 if (dd->err) {
1930 err = dd->err;
1931 goto finish;
1932 }
1933 }
1934 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1935 /* hash or semi-hash ready */
1936 clear_bit(FLAGS_DMA_READY, &dd->flags);
Dmitry Kasatkin887c8832010-11-19 16:04:29 +02001937 err = omap_sham_update_dma_start(dd);
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001938 if (err != -EINPROGRESS)
1939 goto finish;
1940 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001941 }
1942
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001943 return;
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001944
Dmitry Kasatkin6c63db82011-06-02 21:10:10 +03001945finish:
1946 dev_dbg(dd->dev, "update done: err: %d\n", err);
1947 /* finish curent request */
1948 omap_sham_finish_req(dd->req, err);
Tero Kristo4e7813a2016-08-04 13:28:36 +03001949
1950 /* If we are not busy, process next req */
1951 if (!test_bit(FLAGS_BUSY, &dd->flags))
1952 omap_sham_handle_queue(dd, NULL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001953}
1954
Mark A. Greer0d373d62012-12-21 10:04:08 -07001955static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1956{
1957 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1958 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1959 } else {
1960 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1961 tasklet_schedule(&dd->done_task);
1962 }
1963
1964 return IRQ_HANDLED;
1965}
1966
1967static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001968{
1969 struct omap_sham_dev *dd = dev_id;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001970
Dmitry Kasatkined3ea9a82011-06-02 21:10:07 +03001971 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001972 /* final -> allow device to go to power-saving mode */
1973 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1974
1975 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1976 SHA_REG_CTRL_OUTPUT_READY);
1977 omap_sham_read(dd, SHA_REG_CTRL);
1978
Mark A. Greer0d373d62012-12-21 10:04:08 -07001979 return omap_sham_irq_common(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001980}
1981
Mark A. Greer0d373d62012-12-21 10:04:08 -07001982static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001983{
Mark A. Greer0d373d62012-12-21 10:04:08 -07001984 struct omap_sham_dev *dd = dev_id;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001985
Mark A. Greer0d373d62012-12-21 10:04:08 -07001986 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
Dmitry Kasatkin3e133c82010-11-19 16:04:24 +02001987
Mark A. Greer0d373d62012-12-21 10:04:08 -07001988 return omap_sham_irq_common(dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08001989}
1990
Mark A. Greerd20fb182012-12-21 10:04:09 -07001991static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1992 {
1993 .algs_list = algs_sha1_md5,
1994 .size = ARRAY_SIZE(algs_sha1_md5),
1995 },
1996};
1997
Mark A. Greer0d373d62012-12-21 10:04:08 -07001998static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
Mark A. Greerd20fb182012-12-21 10:04:09 -07001999 .algs_info = omap_sham_algs_info_omap2,
2000 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
Mark A. Greer0d373d62012-12-21 10:04:08 -07002001 .flags = BIT(FLAGS_BE32_SHA1),
2002 .digest_size = SHA1_DIGEST_SIZE,
2003 .copy_hash = omap_sham_copy_hash_omap2,
2004 .write_ctrl = omap_sham_write_ctrl_omap2,
2005 .trigger = omap_sham_trigger_omap2,
2006 .poll_irq = omap_sham_poll_irq_omap2,
2007 .intr_hdlr = omap_sham_irq_omap2,
2008 .idigest_ofs = 0x00,
2009 .din_ofs = 0x1c,
2010 .digcnt_ofs = 0x14,
2011 .rev_ofs = 0x5c,
2012 .mask_ofs = 0x60,
2013 .sysstatus_ofs = 0x64,
2014 .major_mask = 0xf0,
2015 .major_shift = 4,
2016 .minor_mask = 0x0f,
2017 .minor_shift = 0,
2018};
2019
Mark A. Greer03feec92012-12-21 10:04:06 -07002020#ifdef CONFIG_OF
Mark A. Greerd20fb182012-12-21 10:04:09 -07002021static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
2022 {
2023 .algs_list = algs_sha1_md5,
2024 .size = ARRAY_SIZE(algs_sha1_md5),
2025 },
2026 {
2027 .algs_list = algs_sha224_sha256,
2028 .size = ARRAY_SIZE(algs_sha224_sha256),
2029 },
2030};
2031
Mark A. Greer0d373d62012-12-21 10:04:08 -07002032static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
Mark A. Greerd20fb182012-12-21 10:04:09 -07002033 .algs_info = omap_sham_algs_info_omap4,
2034 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
Mark A. Greer0d373d62012-12-21 10:04:08 -07002035 .flags = BIT(FLAGS_AUTO_XOR),
2036 .digest_size = SHA256_DIGEST_SIZE,
2037 .copy_hash = omap_sham_copy_hash_omap4,
2038 .write_ctrl = omap_sham_write_ctrl_omap4,
2039 .trigger = omap_sham_trigger_omap4,
2040 .poll_irq = omap_sham_poll_irq_omap4,
2041 .intr_hdlr = omap_sham_irq_omap4,
2042 .idigest_ofs = 0x020,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05302043 .odigest_ofs = 0x0,
Mark A. Greer0d373d62012-12-21 10:04:08 -07002044 .din_ofs = 0x080,
2045 .digcnt_ofs = 0x040,
2046 .rev_ofs = 0x100,
2047 .mask_ofs = 0x110,
2048 .sysstatus_ofs = 0x114,
Lokesh Vutlaeaef7e32013-07-26 12:29:14 +05302049 .mode_ofs = 0x44,
2050 .length_ofs = 0x48,
Mark A. Greer0d373d62012-12-21 10:04:08 -07002051 .major_mask = 0x0700,
2052 .major_shift = 8,
2053 .minor_mask = 0x003f,
2054 .minor_shift = 0,
2055};
2056
Lokesh Vutla7d7c7042013-07-26 12:29:15 +05302057static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
2058 {
2059 .algs_list = algs_sha1_md5,
2060 .size = ARRAY_SIZE(algs_sha1_md5),
2061 },
2062 {
2063 .algs_list = algs_sha224_sha256,
2064 .size = ARRAY_SIZE(algs_sha224_sha256),
2065 },
2066 {
2067 .algs_list = algs_sha384_sha512,
2068 .size = ARRAY_SIZE(algs_sha384_sha512),
2069 },
2070};
2071
2072static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
2073 .algs_info = omap_sham_algs_info_omap5,
2074 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
2075 .flags = BIT(FLAGS_AUTO_XOR),
2076 .digest_size = SHA512_DIGEST_SIZE,
2077 .copy_hash = omap_sham_copy_hash_omap4,
2078 .write_ctrl = omap_sham_write_ctrl_omap4,
2079 .trigger = omap_sham_trigger_omap4,
2080 .poll_irq = omap_sham_poll_irq_omap4,
2081 .intr_hdlr = omap_sham_irq_omap4,
2082 .idigest_ofs = 0x240,
2083 .odigest_ofs = 0x200,
2084 .din_ofs = 0x080,
2085 .digcnt_ofs = 0x280,
2086 .rev_ofs = 0x100,
2087 .mask_ofs = 0x110,
2088 .sysstatus_ofs = 0x114,
2089 .mode_ofs = 0x284,
2090 .length_ofs = 0x288,
2091 .major_mask = 0x0700,
2092 .major_shift = 8,
2093 .minor_mask = 0x003f,
2094 .minor_shift = 0,
2095};
2096
Mark A. Greer03feec92012-12-21 10:04:06 -07002097static const struct of_device_id omap_sham_of_match[] = {
2098 {
2099 .compatible = "ti,omap2-sham",
Mark A. Greer0d373d62012-12-21 10:04:08 -07002100 .data = &omap_sham_pdata_omap2,
2101 },
2102 {
Pali Roháreddca852015-02-26 14:49:53 +01002103 .compatible = "ti,omap3-sham",
2104 .data = &omap_sham_pdata_omap2,
2105 },
2106 {
Mark A. Greer0d373d62012-12-21 10:04:08 -07002107 .compatible = "ti,omap4-sham",
2108 .data = &omap_sham_pdata_omap4,
Mark A. Greer03feec92012-12-21 10:04:06 -07002109 },
Lokesh Vutla7d7c7042013-07-26 12:29:15 +05302110 {
2111 .compatible = "ti,omap5-sham",
2112 .data = &omap_sham_pdata_omap5,
2113 },
Mark A. Greer03feec92012-12-21 10:04:06 -07002114 {},
2115};
2116MODULE_DEVICE_TABLE(of, omap_sham_of_match);
2117
2118static int omap_sham_get_res_of(struct omap_sham_dev *dd,
2119 struct device *dev, struct resource *res)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002120{
Mark A. Greer03feec92012-12-21 10:04:06 -07002121 struct device_node *node = dev->of_node;
2122 const struct of_device_id *match;
2123 int err = 0;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002124
Mark A. Greer03feec92012-12-21 10:04:06 -07002125 match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
2126 if (!match) {
2127 dev_err(dev, "no compatible OF match\n");
2128 err = -EINVAL;
2129 goto err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002130 }
Samu Onkalo584db6a2010-09-03 19:20:19 +08002131
Mark A. Greer03feec92012-12-21 10:04:06 -07002132 err = of_address_to_resource(node, 0, res);
2133 if (err < 0) {
2134 dev_err(dev, "can't translate OF node address\n");
2135 err = -EINVAL;
2136 goto err;
2137 }
2138
Thierry Redingf7578492013-09-18 15:24:44 +02002139 dd->irq = irq_of_parse_and_map(node, 0);
Mark A. Greer03feec92012-12-21 10:04:06 -07002140 if (!dd->irq) {
2141 dev_err(dev, "can't translate OF irq value\n");
2142 err = -EINVAL;
2143 goto err;
2144 }
2145
Mark A. Greer0d373d62012-12-21 10:04:08 -07002146 dd->pdata = match->data;
Mark A. Greer03feec92012-12-21 10:04:06 -07002147
2148err:
2149 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002150}
Mark A. Greer03feec92012-12-21 10:04:06 -07002151#else
Mark A. Greerc3c3b322013-01-15 13:53:02 -07002152static const struct of_device_id omap_sham_of_match[] = {
2153 {},
2154};
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002155
Mark A. Greerc3c3b322013-01-15 13:53:02 -07002156static int omap_sham_get_res_of(struct omap_sham_dev *dd,
Mark A. Greer03feec92012-12-21 10:04:06 -07002157 struct device *dev, struct resource *res)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002158{
Mark A. Greer03feec92012-12-21 10:04:06 -07002159 return -EINVAL;
2160}
2161#endif
2162
2163static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
2164 struct platform_device *pdev, struct resource *res)
2165{
2166 struct device *dev = &pdev->dev;
2167 struct resource *r;
2168 int err = 0;
2169
2170 /* Get the base address */
2171 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2172 if (!r) {
2173 dev_err(dev, "no MEM resource info\n");
2174 err = -ENODEV;
2175 goto err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002176 }
Mark A. Greer03feec92012-12-21 10:04:06 -07002177 memcpy(res, r, sizeof(*res));
2178
2179 /* Get the IRQ */
2180 dd->irq = platform_get_irq(pdev, 0);
2181 if (dd->irq < 0) {
2182 dev_err(dev, "no IRQ resource info\n");
2183 err = dd->irq;
2184 goto err;
2185 }
2186
Mark A. Greer0d373d62012-12-21 10:04:08 -07002187 /* Only OMAP2/3 can be non-DT */
2188 dd->pdata = &omap_sham_pdata_omap2;
2189
Mark A. Greer03feec92012-12-21 10:04:06 -07002190err:
2191 return err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002192}
2193
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002194static int omap_sham_probe(struct platform_device *pdev)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002195{
2196 struct omap_sham_dev *dd;
2197 struct device *dev = &pdev->dev;
Mark A. Greer03feec92012-12-21 10:04:06 -07002198 struct resource res;
Mark A. Greerdfd061d2012-12-21 10:04:04 -07002199 dma_cap_mask_t mask;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002200 int err, i, j;
Mark A. Greer0d373d62012-12-21 10:04:08 -07002201 u32 rev;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002202
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302203 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002204 if (dd == NULL) {
2205 dev_err(dev, "unable to alloc data struct.\n");
2206 err = -ENOMEM;
2207 goto data_err;
2208 }
2209 dd->dev = dev;
2210 platform_set_drvdata(pdev, dd);
2211
2212 INIT_LIST_HEAD(&dd->list);
2213 spin_lock_init(&dd->lock);
2214 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002215 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2216
Mark A. Greer03feec92012-12-21 10:04:06 -07002217 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2218 omap_sham_get_res_pdev(dd, pdev, &res);
2219 if (err)
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302220 goto data_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002221
Laurent Navet30862282013-05-02 14:00:38 +02002222 dd->io_base = devm_ioremap_resource(dev, &res);
2223 if (IS_ERR(dd->io_base)) {
2224 err = PTR_ERR(dd->io_base);
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302225 goto data_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002226 }
Mark A. Greer03feec92012-12-21 10:04:06 -07002227 dd->phys_base = res.start;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002228
Lokesh Vutla0de9c382013-07-26 12:29:16 +05302229 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2230 IRQF_TRIGGER_NONE, dev_name(dev), dd);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002231 if (err) {
Lokesh Vutla0de9c382013-07-26 12:29:16 +05302232 dev_err(dev, "unable to request irq %d, err = %d\n",
2233 dd->irq, err);
Lokesh Vutla7a7e4b72013-07-26 12:29:17 +05302234 goto data_err;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002235 }
2236
Mark A. Greerdfd061d2012-12-21 10:04:04 -07002237 dma_cap_zero(mask);
2238 dma_cap_set(DMA_SLAVE, mask);
2239
Peter Ujfalusidbe24622016-04-29 16:03:41 +03002240 dd->dma_lch = dma_request_chan(dev, "rx");
2241 if (IS_ERR(dd->dma_lch)) {
2242 err = PTR_ERR(dd->dma_lch);
2243 if (err == -EPROBE_DEFER)
2244 goto data_err;
2245
Lokesh Vutlab8411cc2013-08-20 20:32:34 +05302246 dd->polling_mode = 1;
2247 dev_dbg(dev, "using polling mode instead of dma\n");
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002248 }
2249
Mark A. Greer0d373d62012-12-21 10:04:08 -07002250 dd->flags |= dd->pdata->flags;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002251
Tero Kristoe93f7672016-06-22 16:23:34 +03002252 pm_runtime_use_autosuspend(dev);
2253 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2254
Mark A. Greerb359f032012-12-21 10:04:02 -07002255 pm_runtime_enable(dev);
Vutla, Lokeshb0a3d892015-03-31 09:52:24 +05302256 pm_runtime_irq_safe(dev);
Pali Rohár604c3102015-03-08 11:01:01 +01002257
2258 err = pm_runtime_get_sync(dev);
2259 if (err < 0) {
2260 dev_err(dev, "failed to get sync: %d\n", err);
2261 goto err_pm;
2262 }
2263
Mark A. Greer0d373d62012-12-21 10:04:08 -07002264 rev = omap_sham_read(dd, SHA_REG_REV(dd));
2265 pm_runtime_put_sync(&pdev->dev);
Mark A. Greerb359f032012-12-21 10:04:02 -07002266
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002267 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
Mark A. Greer0d373d62012-12-21 10:04:08 -07002268 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2269 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002270
2271 spin_lock(&sham.lock);
2272 list_add_tail(&dd->list, &sham.dev_list);
2273 spin_unlock(&sham.lock);
2274
Mark A. Greerd20fb182012-12-21 10:04:09 -07002275 for (i = 0; i < dd->pdata->algs_info_size; i++) {
2276 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
Tero Kristo99a7fff2016-09-19 18:22:12 +03002277 struct ahash_alg *alg;
2278
2279 alg = &dd->pdata->algs_info[i].algs_list[j];
2280 alg->export = omap_sham_export;
2281 alg->import = omap_sham_import;
2282 alg->halg.statesize = sizeof(struct omap_sham_reqctx);
2283 err = crypto_register_ahash(alg);
Mark A. Greerd20fb182012-12-21 10:04:09 -07002284 if (err)
2285 goto err_algs;
2286
2287 dd->pdata->algs_info[i].registered++;
2288 }
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002289 }
2290
2291 return 0;
2292
2293err_algs:
Mark A. Greerd20fb182012-12-21 10:04:09 -07002294 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2295 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2296 crypto_unregister_ahash(
2297 &dd->pdata->algs_info[i].algs_list[j]);
Pali Rohár604c3102015-03-08 11:01:01 +01002298err_pm:
Mark A. Greerb359f032012-12-21 10:04:02 -07002299 pm_runtime_disable(dev);
Dan Carpenterd462e322016-05-18 13:39:05 +03002300 if (!dd->polling_mode)
Mark A. Greerf13ab862013-11-12 13:12:27 -07002301 dma_release_channel(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002302data_err:
2303 dev_err(dev, "initialization failed.\n");
2304
2305 return err;
2306}
2307
Greg Kroah-Hartman49cfe4d2012-12-21 13:14:09 -08002308static int omap_sham_remove(struct platform_device *pdev)
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002309{
2310 static struct omap_sham_dev *dd;
Mark A. Greerd20fb182012-12-21 10:04:09 -07002311 int i, j;
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002312
2313 dd = platform_get_drvdata(pdev);
2314 if (!dd)
2315 return -ENODEV;
2316 spin_lock(&sham.lock);
2317 list_del(&dd->list);
2318 spin_unlock(&sham.lock);
Mark A. Greerd20fb182012-12-21 10:04:09 -07002319 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2320 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2321 crypto_unregister_ahash(
2322 &dd->pdata->algs_info[i].algs_list[j]);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002323 tasklet_kill(&dd->done_task);
Mark A. Greerb359f032012-12-21 10:04:02 -07002324 pm_runtime_disable(&pdev->dev);
Mark A. Greerf13ab862013-11-12 13:12:27 -07002325
Peter Ujfalusidbe24622016-04-29 16:03:41 +03002326 if (!dd->polling_mode)
Mark A. Greerf13ab862013-11-12 13:12:27 -07002327 dma_release_channel(dd->dma_lch);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002328
2329 return 0;
2330}
2331
Mark A. Greer3b3f4402012-12-21 10:04:03 -07002332#ifdef CONFIG_PM_SLEEP
2333static int omap_sham_suspend(struct device *dev)
2334{
2335 pm_runtime_put_sync(dev);
2336 return 0;
2337}
2338
2339static int omap_sham_resume(struct device *dev)
2340{
Pali Rohár604c3102015-03-08 11:01:01 +01002341 int err = pm_runtime_get_sync(dev);
2342 if (err < 0) {
2343 dev_err(dev, "failed to get sync: %d\n", err);
2344 return err;
2345 }
Mark A. Greer3b3f4402012-12-21 10:04:03 -07002346 return 0;
2347}
2348#endif
2349
Jingoo Hanae12fe22014-02-27 20:33:32 +09002350static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
Mark A. Greer3b3f4402012-12-21 10:04:03 -07002351
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002352static struct platform_driver omap_sham_driver = {
2353 .probe = omap_sham_probe,
2354 .remove = omap_sham_remove,
2355 .driver = {
2356 .name = "omap-sham",
Mark A. Greer3b3f4402012-12-21 10:04:03 -07002357 .pm = &omap_sham_pm_ops,
Mark A. Greer03feec92012-12-21 10:04:06 -07002358 .of_match_table = omap_sham_of_match,
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002359 },
2360};
2361
Sachin Kamat02613702013-03-04 15:09:43 +05302362module_platform_driver(omap_sham_driver);
Dmitry Kasatkin8628e7c2010-05-03 11:10:59 +08002363
2364MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2365MODULE_LICENSE("GPL v2");
2366MODULE_AUTHOR("Dmitry Kasatkin");
Joni Lapilainen718249d2013-10-26 23:00:41 +02002367MODULE_ALIAS("platform:omap-sham");