blob: 1ce8fbb85f958d960dbf4df9f0fbccfbf3912eb1 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Herbert Xu7a7ffe62015-08-20 15:21:45 +08002/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080010 */
11
Herbert Xub286d8b2016-11-22 20:08:12 +080012#include <crypto/internal/aead.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080013#include <crypto/internal/skcipher.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080014#include <crypto/scatterwalk.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080015#include <linux/bug.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080016#include <linux/cryptouser.h>
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +053017#include <linux/compiler.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080018#include <linux/list.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080019#include <linux/module.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080020#include <linux/rtnetlink.h>
21#include <linux/seq_file.h>
22#include <net/netlink.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080023
24#include "internal.h"
25
Herbert Xub286d8b2016-11-22 20:08:12 +080026enum {
27 SKCIPHER_WALK_PHYS = 1 << 0,
28 SKCIPHER_WALK_SLOW = 1 << 1,
29 SKCIPHER_WALK_COPY = 1 << 2,
30 SKCIPHER_WALK_DIFF = 1 << 3,
31 SKCIPHER_WALK_SLEEP = 1 << 4,
32};
33
34struct skcipher_walk_buffer {
35 struct list_head entry;
36 struct scatter_walk dst;
37 unsigned int len;
38 u8 *data;
39 u8 buffer[];
40};
41
42static int skcipher_walk_next(struct skcipher_walk *walk);
43
44static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
45{
46 if (PageHighMem(scatterwalk_page(walk)))
47 kunmap_atomic(vaddr);
48}
49
50static inline void *skcipher_map(struct scatter_walk *walk)
51{
52 struct page *page = scatterwalk_page(walk);
53
54 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
55 offset_in_page(walk->offset);
56}
57
58static inline void skcipher_map_src(struct skcipher_walk *walk)
59{
60 walk->src.virt.addr = skcipher_map(&walk->in);
61}
62
63static inline void skcipher_map_dst(struct skcipher_walk *walk)
64{
65 walk->dst.virt.addr = skcipher_map(&walk->out);
66}
67
68static inline void skcipher_unmap_src(struct skcipher_walk *walk)
69{
70 skcipher_unmap(&walk->in, walk->src.virt.addr);
71}
72
73static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
74{
75 skcipher_unmap(&walk->out, walk->dst.virt.addr);
76}
77
78static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
79{
80 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
81}
82
83/* Get a spot of the specified length that does not straddle a page.
84 * The caller needs to ensure that there is enough space for this operation.
85 */
86static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
87{
88 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
89
90 return max(start, end_page);
91}
92
Herbert Xu0ba3c022019-09-06 13:13:06 +100093static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
Herbert Xub286d8b2016-11-22 20:08:12 +080094{
95 u8 *addr;
96
97 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
98 addr = skcipher_get_spot(addr, bsize);
99 scatterwalk_copychunks(addr, &walk->out, bsize,
100 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
Herbert Xu0ba3c022019-09-06 13:13:06 +1000101 return 0;
Herbert Xub286d8b2016-11-22 20:08:12 +0800102}
103
104int skcipher_walk_done(struct skcipher_walk *walk, int err)
105{
Herbert Xu0ba3c022019-09-06 13:13:06 +1000106 unsigned int n = walk->nbytes;
107 unsigned int nbytes = 0;
Herbert Xub286d8b2016-11-22 20:08:12 +0800108
Herbert Xu0ba3c022019-09-06 13:13:06 +1000109 if (!n)
Eric Biggers8088d3d2018-07-23 10:54:56 -0700110 goto finish;
Herbert Xub286d8b2016-11-22 20:08:12 +0800111
Herbert Xu0ba3c022019-09-06 13:13:06 +1000112 if (likely(err >= 0)) {
113 n -= err;
114 nbytes = walk->total - n;
115 }
Eric Biggers8088d3d2018-07-23 10:54:56 -0700116
117 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
118 SKCIPHER_WALK_SLOW |
119 SKCIPHER_WALK_COPY |
120 SKCIPHER_WALK_DIFF)))) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800121unmap_src:
122 skcipher_unmap_src(walk);
123 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
124 skcipher_unmap_dst(walk);
125 goto unmap_src;
126 } else if (walk->flags & SKCIPHER_WALK_COPY) {
127 skcipher_map_dst(walk);
128 memcpy(walk->dst.virt.addr, walk->page, n);
129 skcipher_unmap_dst(walk);
130 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
Herbert Xu0ba3c022019-09-06 13:13:06 +1000131 if (err > 0) {
Eric Biggersdcaca012019-03-31 13:04:15 -0700132 /*
133 * Didn't process all bytes. Either the algorithm is
134 * broken, or this was the last step and it turned out
135 * the message wasn't evenly divisible into blocks but
136 * the algorithm requires it.
137 */
Herbert Xub286d8b2016-11-22 20:08:12 +0800138 err = -EINVAL;
Herbert Xu0ba3c022019-09-06 13:13:06 +1000139 nbytes = 0;
140 } else
141 n = skcipher_done_slow(walk, n);
Herbert Xub286d8b2016-11-22 20:08:12 +0800142 }
143
Herbert Xu0ba3c022019-09-06 13:13:06 +1000144 if (err > 0)
145 err = 0;
146
147 walk->total = nbytes;
148 walk->nbytes = 0;
149
Herbert Xub286d8b2016-11-22 20:08:12 +0800150 scatterwalk_advance(&walk->in, n);
151 scatterwalk_advance(&walk->out, n);
Herbert Xu0ba3c022019-09-06 13:13:06 +1000152 scatterwalk_done(&walk->in, 0, nbytes);
153 scatterwalk_done(&walk->out, 1, nbytes);
Herbert Xub286d8b2016-11-22 20:08:12 +0800154
Herbert Xu0ba3c022019-09-06 13:13:06 +1000155 if (nbytes) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800156 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
157 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
158 return skcipher_walk_next(walk);
159 }
160
Herbert Xu0ba3c022019-09-06 13:13:06 +1000161finish:
Herbert Xub286d8b2016-11-22 20:08:12 +0800162 /* Short-circuit for the common/fast path. */
163 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
164 goto out;
165
166 if (walk->flags & SKCIPHER_WALK_PHYS)
167 goto out;
168
169 if (walk->iv != walk->oiv)
170 memcpy(walk->oiv, walk->iv, walk->ivsize);
171 if (walk->buffer != walk->page)
172 kfree(walk->buffer);
173 if (walk->page)
174 free_page((unsigned long)walk->page);
175
176out:
177 return err;
178}
179EXPORT_SYMBOL_GPL(skcipher_walk_done);
180
181void skcipher_walk_complete(struct skcipher_walk *walk, int err)
182{
183 struct skcipher_walk_buffer *p, *tmp;
184
185 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
186 u8 *data;
187
188 if (err)
189 goto done;
190
191 data = p->data;
192 if (!data) {
193 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000194 data = skcipher_get_spot(data, walk->stride);
Herbert Xub286d8b2016-11-22 20:08:12 +0800195 }
196
197 scatterwalk_copychunks(data, &p->dst, p->len, 1);
198
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000199 if (offset_in_page(p->data) + p->len + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800200 PAGE_SIZE)
201 free_page((unsigned long)p->data);
202
203done:
204 list_del(&p->entry);
205 kfree(p);
206 }
207
208 if (!err && walk->iv != walk->oiv)
209 memcpy(walk->oiv, walk->iv, walk->ivsize);
210 if (walk->buffer != walk->page)
211 kfree(walk->buffer);
212 if (walk->page)
213 free_page((unsigned long)walk->page);
214}
215EXPORT_SYMBOL_GPL(skcipher_walk_complete);
216
217static void skcipher_queue_write(struct skcipher_walk *walk,
218 struct skcipher_walk_buffer *p)
219{
220 p->dst = walk->out;
221 list_add_tail(&p->entry, &walk->buffers);
222}
223
224static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
225{
226 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
227 unsigned alignmask = walk->alignmask;
228 struct skcipher_walk_buffer *p;
229 unsigned a;
230 unsigned n;
231 u8 *buffer;
232 void *v;
233
234 if (!phys) {
Ard Biesheuvel18e615a2016-12-13 13:34:02 +0000235 if (!walk->buffer)
236 walk->buffer = walk->page;
237 buffer = walk->buffer;
Herbert Xub286d8b2016-11-22 20:08:12 +0800238 if (buffer)
239 goto ok;
240 }
241
242 /* Start with the minimum alignment of kmalloc. */
243 a = crypto_tfm_ctx_alignment() - 1;
244 n = bsize;
245
246 if (phys) {
247 /* Calculate the minimum alignment of p->buffer. */
248 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
249 n += sizeof(*p);
250 }
251
252 /* Minimum size to align p->buffer by alignmask. */
253 n += alignmask & ~a;
254
255 /* Minimum size to ensure p->buffer does not straddle a page. */
256 n += (bsize - 1) & ~(alignmask | a);
257
258 v = kzalloc(n, skcipher_walk_gfp(walk));
259 if (!v)
260 return skcipher_walk_done(walk, -ENOMEM);
261
262 if (phys) {
263 p = v;
264 p->len = bsize;
265 skcipher_queue_write(walk, p);
266 buffer = p->buffer;
267 } else {
268 walk->buffer = v;
269 buffer = v;
270 }
271
272ok:
273 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
274 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
275 walk->src.virt.addr = walk->dst.virt.addr;
276
277 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
278
279 walk->nbytes = bsize;
280 walk->flags |= SKCIPHER_WALK_SLOW;
281
282 return 0;
283}
284
285static int skcipher_next_copy(struct skcipher_walk *walk)
286{
287 struct skcipher_walk_buffer *p;
288 u8 *tmp = walk->page;
289
290 skcipher_map_src(walk);
291 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
292 skcipher_unmap_src(walk);
293
294 walk->src.virt.addr = tmp;
295 walk->dst.virt.addr = tmp;
296
297 if (!(walk->flags & SKCIPHER_WALK_PHYS))
298 return 0;
299
300 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
301 if (!p)
302 return -ENOMEM;
303
304 p->data = walk->page;
305 p->len = walk->nbytes;
306 skcipher_queue_write(walk, p);
307
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000308 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800309 PAGE_SIZE)
310 walk->page = NULL;
311 else
312 walk->page += walk->nbytes;
313
314 return 0;
315}
316
317static int skcipher_next_fast(struct skcipher_walk *walk)
318{
319 unsigned long diff;
320
321 walk->src.phys.page = scatterwalk_page(&walk->in);
322 walk->src.phys.offset = offset_in_page(walk->in.offset);
323 walk->dst.phys.page = scatterwalk_page(&walk->out);
324 walk->dst.phys.offset = offset_in_page(walk->out.offset);
325
326 if (walk->flags & SKCIPHER_WALK_PHYS)
327 return 0;
328
329 diff = walk->src.phys.offset - walk->dst.phys.offset;
330 diff |= walk->src.virt.page - walk->dst.virt.page;
331
332 skcipher_map_src(walk);
333 walk->dst.virt.addr = walk->src.virt.addr;
334
335 if (diff) {
336 walk->flags |= SKCIPHER_WALK_DIFF;
337 skcipher_map_dst(walk);
338 }
339
340 return 0;
341}
342
343static int skcipher_walk_next(struct skcipher_walk *walk)
344{
345 unsigned int bsize;
346 unsigned int n;
347 int err;
348
349 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
350 SKCIPHER_WALK_DIFF);
351
352 n = walk->total;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000353 bsize = min(walk->stride, max(n, walk->blocksize));
Herbert Xub286d8b2016-11-22 20:08:12 +0800354 n = scatterwalk_clamp(&walk->in, n);
355 n = scatterwalk_clamp(&walk->out, n);
356
357 if (unlikely(n < bsize)) {
358 if (unlikely(walk->total < walk->blocksize))
359 return skcipher_walk_done(walk, -EINVAL);
360
361slow_path:
362 err = skcipher_next_slow(walk, bsize);
363 goto set_phys_lowmem;
364 }
365
366 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
367 if (!walk->page) {
368 gfp_t gfp = skcipher_walk_gfp(walk);
369
370 walk->page = (void *)__get_free_page(gfp);
371 if (!walk->page)
372 goto slow_path;
373 }
374
375 walk->nbytes = min_t(unsigned, n,
376 PAGE_SIZE - offset_in_page(walk->page));
377 walk->flags |= SKCIPHER_WALK_COPY;
378 err = skcipher_next_copy(walk);
379 goto set_phys_lowmem;
380 }
381
382 walk->nbytes = n;
383
384 return skcipher_next_fast(walk);
385
386set_phys_lowmem:
387 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
388 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
389 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
390 walk->src.phys.offset &= PAGE_SIZE - 1;
391 walk->dst.phys.offset &= PAGE_SIZE - 1;
392 }
393 return err;
394}
Herbert Xub286d8b2016-11-22 20:08:12 +0800395
396static int skcipher_copy_iv(struct skcipher_walk *walk)
397{
398 unsigned a = crypto_tfm_ctx_alignment() - 1;
399 unsigned alignmask = walk->alignmask;
400 unsigned ivsize = walk->ivsize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000401 unsigned bs = walk->stride;
Herbert Xub286d8b2016-11-22 20:08:12 +0800402 unsigned aligned_bs;
403 unsigned size;
404 u8 *iv;
405
Eric Biggers0567fc92018-07-23 09:57:50 -0700406 aligned_bs = ALIGN(bs, alignmask + 1);
Herbert Xub286d8b2016-11-22 20:08:12 +0800407
408 /* Minimum size to align buffer by alignmask. */
409 size = alignmask & ~a;
410
411 if (walk->flags & SKCIPHER_WALK_PHYS)
412 size += ivsize;
413 else {
414 size += aligned_bs + ivsize;
415
416 /* Minimum size to ensure buffer does not straddle a page. */
417 size += (bs - 1) & ~(alignmask | a);
418 }
419
420 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
421 if (!walk->buffer)
422 return -ENOMEM;
423
424 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
425 iv = skcipher_get_spot(iv, bs) + aligned_bs;
426
427 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
428 return 0;
429}
430
431static int skcipher_walk_first(struct skcipher_walk *walk)
432{
Herbert Xub286d8b2016-11-22 20:08:12 +0800433 if (WARN_ON_ONCE(in_irq()))
434 return -EDEADLK;
435
Herbert Xub286d8b2016-11-22 20:08:12 +0800436 walk->buffer = NULL;
437 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
438 int err = skcipher_copy_iv(walk);
439 if (err)
440 return err;
441 }
442
443 walk->page = NULL;
Herbert Xub286d8b2016-11-22 20:08:12 +0800444
445 return skcipher_walk_next(walk);
446}
447
448static int skcipher_walk_skcipher(struct skcipher_walk *walk,
449 struct skcipher_request *req)
450{
451 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
452
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800453 walk->total = req->cryptlen;
454 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800455 walk->iv = req->iv;
456 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800457
458 if (unlikely(!walk->total))
459 return 0;
460
Herbert Xub286d8b2016-11-22 20:08:12 +0800461 scatterwalk_start(&walk->in, req->src);
462 scatterwalk_start(&walk->out, req->dst);
463
Herbert Xub286d8b2016-11-22 20:08:12 +0800464 walk->flags &= ~SKCIPHER_WALK_SLEEP;
465 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
466 SKCIPHER_WALK_SLEEP : 0;
467
468 walk->blocksize = crypto_skcipher_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000469 walk->stride = crypto_skcipher_walksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800470 walk->ivsize = crypto_skcipher_ivsize(tfm);
471 walk->alignmask = crypto_skcipher_alignmask(tfm);
472
473 return skcipher_walk_first(walk);
474}
475
476int skcipher_walk_virt(struct skcipher_walk *walk,
477 struct skcipher_request *req, bool atomic)
478{
479 int err;
480
Eric Biggersbb648292018-12-15 12:41:53 -0800481 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
482
Herbert Xub286d8b2016-11-22 20:08:12 +0800483 walk->flags &= ~SKCIPHER_WALK_PHYS;
484
485 err = skcipher_walk_skcipher(walk, req);
486
487 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
488
489 return err;
490}
491EXPORT_SYMBOL_GPL(skcipher_walk_virt);
492
493void skcipher_walk_atomise(struct skcipher_walk *walk)
494{
495 walk->flags &= ~SKCIPHER_WALK_SLEEP;
496}
497EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
498
499int skcipher_walk_async(struct skcipher_walk *walk,
500 struct skcipher_request *req)
501{
502 walk->flags |= SKCIPHER_WALK_PHYS;
503
504 INIT_LIST_HEAD(&walk->buffers);
505
506 return skcipher_walk_skcipher(walk, req);
507}
508EXPORT_SYMBOL_GPL(skcipher_walk_async);
509
Herbert Xu34bc0852016-11-30 21:14:07 +0800510static int skcipher_walk_aead_common(struct skcipher_walk *walk,
511 struct aead_request *req, bool atomic)
Herbert Xub286d8b2016-11-22 20:08:12 +0800512{
513 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
514 int err;
515
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800516 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800517 walk->iv = req->iv;
518 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800519
520 if (unlikely(!walk->total))
521 return 0;
522
Ard Biesheuvel3cbf61f2016-11-29 13:05:31 +0000523 walk->flags &= ~SKCIPHER_WALK_PHYS;
524
Herbert Xub286d8b2016-11-22 20:08:12 +0800525 scatterwalk_start(&walk->in, req->src);
526 scatterwalk_start(&walk->out, req->dst);
527
528 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
529 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
530
Ondrej Mosnáčekc14ca832017-11-23 13:49:06 +0100531 scatterwalk_done(&walk->in, 0, walk->total);
532 scatterwalk_done(&walk->out, 0, walk->total);
533
Herbert Xub286d8b2016-11-22 20:08:12 +0800534 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
535 walk->flags |= SKCIPHER_WALK_SLEEP;
536 else
537 walk->flags &= ~SKCIPHER_WALK_SLEEP;
538
539 walk->blocksize = crypto_aead_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000540 walk->stride = crypto_aead_chunksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800541 walk->ivsize = crypto_aead_ivsize(tfm);
542 walk->alignmask = crypto_aead_alignmask(tfm);
543
544 err = skcipher_walk_first(walk);
545
546 if (atomic)
547 walk->flags &= ~SKCIPHER_WALK_SLEEP;
548
549 return err;
550}
Herbert Xu34bc0852016-11-30 21:14:07 +0800551
552int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
553 bool atomic)
554{
555 walk->total = req->cryptlen;
556
557 return skcipher_walk_aead_common(walk, req, atomic);
558}
Herbert Xub286d8b2016-11-22 20:08:12 +0800559EXPORT_SYMBOL_GPL(skcipher_walk_aead);
560
Herbert Xu34bc0852016-11-30 21:14:07 +0800561int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
562 struct aead_request *req, bool atomic)
563{
564 walk->total = req->cryptlen;
565
566 return skcipher_walk_aead_common(walk, req, atomic);
567}
568EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
569
570int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
571 struct aead_request *req, bool atomic)
572{
573 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
574
575 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
576
577 return skcipher_walk_aead_common(walk, req, atomic);
578}
579EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
580
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800581static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
582{
Eric Biggersc79b4112018-12-16 15:55:06 -0800583 if (alg->cra_type == &crypto_ablkcipher_type)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800584 return sizeof(struct crypto_ablkcipher *);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800585
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800586 return crypto_alg_extsize(alg);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800587}
588
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800589static void skcipher_set_needkey(struct crypto_skcipher *tfm)
590{
591 if (tfm->keysize)
592 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
593}
594
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800595static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
596 const u8 *key, unsigned int keylen)
597{
598 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
599 struct crypto_ablkcipher *ablkcipher = *ctx;
600 int err;
601
602 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
603 crypto_ablkcipher_set_flags(ablkcipher,
604 crypto_skcipher_get_flags(tfm) &
605 CRYPTO_TFM_REQ_MASK);
606 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
607 crypto_skcipher_set_flags(tfm,
608 crypto_ablkcipher_get_flags(ablkcipher) &
609 CRYPTO_TFM_RES_MASK);
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800610 if (unlikely(err)) {
611 skcipher_set_needkey(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800612 return err;
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800613 }
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800614
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800615 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
616 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800617}
618
619static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
620 int (*crypt)(struct ablkcipher_request *))
621{
622 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
623 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
624 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
625
626 ablkcipher_request_set_tfm(subreq, *ctx);
627 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
628 req->base.complete, req->base.data);
629 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
630 req->iv);
631
632 return crypt(subreq);
633}
634
635static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
636{
637 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
638 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
639 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
640
641 return skcipher_crypt_ablkcipher(req, alg->encrypt);
642}
643
644static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
645{
646 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
647 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
648 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
649
650 return skcipher_crypt_ablkcipher(req, alg->decrypt);
651}
652
653static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
654{
655 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
656
657 crypto_free_ablkcipher(*ctx);
658}
659
Geliang Tangecdd6be2015-09-27 22:47:05 +0800660static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800661{
662 struct crypto_alg *calg = tfm->__crt_alg;
663 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
664 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
665 struct crypto_ablkcipher *ablkcipher;
666 struct crypto_tfm *abtfm;
667
668 if (!crypto_mod_get(calg))
669 return -EAGAIN;
670
671 abtfm = __crypto_alloc_tfm(calg, 0, 0);
672 if (IS_ERR(abtfm)) {
673 crypto_mod_put(calg);
674 return PTR_ERR(abtfm);
675 }
676
677 ablkcipher = __crypto_ablkcipher_cast(abtfm);
678 *ctx = ablkcipher;
679 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
680
681 skcipher->setkey = skcipher_setkey_ablkcipher;
682 skcipher->encrypt = skcipher_encrypt_ablkcipher;
683 skcipher->decrypt = skcipher_decrypt_ablkcipher;
684
685 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
686 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
687 sizeof(struct ablkcipher_request);
Herbert Xu973fb3f2016-01-21 17:10:56 +0800688 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800689
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800690 skcipher_set_needkey(skcipher);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800691
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800692 return 0;
693}
694
Herbert Xu9933e112017-05-10 03:48:23 +0800695static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
696 const u8 *key, unsigned int keylen)
697{
698 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
699 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
700 u8 *buffer, *alignbuffer;
701 unsigned long absize;
702 int ret;
703
704 absize = keylen + alignmask;
705 buffer = kmalloc(absize, GFP_ATOMIC);
706 if (!buffer)
707 return -ENOMEM;
708
709 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
710 memcpy(alignbuffer, key, keylen);
711 ret = cipher->setkey(tfm, alignbuffer, keylen);
712 kzfree(buffer);
713 return ret;
714}
715
716static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
717 unsigned int keylen)
718{
719 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
720 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800721 int err;
Herbert Xu9933e112017-05-10 03:48:23 +0800722
723 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
724 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
725 return -EINVAL;
726 }
727
728 if ((unsigned long)key & alignmask)
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800729 err = skcipher_setkey_unaligned(tfm, key, keylen);
730 else
731 err = cipher->setkey(tfm, key, keylen);
Herbert Xu9933e112017-05-10 03:48:23 +0800732
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800733 if (unlikely(err)) {
734 skcipher_set_needkey(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800735 return err;
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800736 }
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800737
738 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
739 return 0;
Herbert Xu9933e112017-05-10 03:48:23 +0800740}
741
Eric Biggers81bcbb12019-06-02 22:45:51 -0700742int crypto_skcipher_encrypt(struct skcipher_request *req)
743{
744 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
745 struct crypto_alg *alg = tfm->base.__crt_alg;
746 unsigned int cryptlen = req->cryptlen;
747 int ret;
748
749 crypto_stats_get(alg);
750 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
751 ret = -ENOKEY;
752 else
753 ret = tfm->encrypt(req);
754 crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
755 return ret;
756}
757EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
758
759int crypto_skcipher_decrypt(struct skcipher_request *req)
760{
761 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
762 struct crypto_alg *alg = tfm->base.__crt_alg;
763 unsigned int cryptlen = req->cryptlen;
764 int ret;
765
766 crypto_stats_get(alg);
767 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
768 ret = -ENOKEY;
769 else
770 ret = tfm->decrypt(req);
771 crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
772 return ret;
773}
774EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
775
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800776static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
777{
778 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
779 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
780
781 alg->exit(skcipher);
782}
783
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800784static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
785{
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800786 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
787 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
788
Eric Biggersc79b4112018-12-16 15:55:06 -0800789 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800790 return crypto_init_skcipher_ops_ablkcipher(tfm);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800791
Herbert Xu9933e112017-05-10 03:48:23 +0800792 skcipher->setkey = skcipher_setkey;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800793 skcipher->encrypt = alg->encrypt;
794 skcipher->decrypt = alg->decrypt;
795 skcipher->ivsize = alg->ivsize;
796 skcipher->keysize = alg->max_keysize;
797
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800798 skcipher_set_needkey(skcipher);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800799
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800800 if (alg->exit)
801 skcipher->base.exit = crypto_skcipher_exit_tfm;
802
803 if (alg->init)
804 return alg->init(skcipher);
805
806 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800807}
808
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800809static void crypto_skcipher_free_instance(struct crypto_instance *inst)
810{
811 struct skcipher_instance *skcipher =
812 container_of(inst, struct skcipher_instance, s.base);
813
814 skcipher->free(skcipher);
815}
816
817static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +0530818 __maybe_unused;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800819static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
820{
821 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
822 base);
823
824 seq_printf(m, "type : skcipher\n");
825 seq_printf(m, "async : %s\n",
826 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
827 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
828 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
829 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
830 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
831 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000832 seq_printf(m, "walksize : %u\n", skcipher->walksize);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800833}
834
835#ifdef CONFIG_NET
836static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
837{
838 struct crypto_report_blkcipher rblkcipher;
839 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
840 base);
841
Eric Biggers37db69e2018-11-03 14:56:03 -0700842 memset(&rblkcipher, 0, sizeof(rblkcipher));
843
844 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
845 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800846
847 rblkcipher.blocksize = alg->cra_blocksize;
848 rblkcipher.min_keysize = skcipher->min_keysize;
849 rblkcipher.max_keysize = skcipher->max_keysize;
850 rblkcipher.ivsize = skcipher->ivsize;
851
Eric Biggers37db69e2018-11-03 14:56:03 -0700852 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
853 sizeof(rblkcipher), &rblkcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800854}
855#else
856static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
857{
858 return -ENOSYS;
859}
860#endif
861
Eric Biggers53253062019-10-25 12:41:11 -0700862static const struct crypto_type crypto_skcipher_type = {
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800863 .extsize = crypto_skcipher_extsize,
864 .init_tfm = crypto_skcipher_init_tfm,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800865 .free = crypto_skcipher_free_instance,
866#ifdef CONFIG_PROC_FS
867 .show = crypto_skcipher_show,
868#endif
869 .report = crypto_skcipher_report,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800870 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
Eric Biggersc65058b2019-10-25 12:41:12 -0700871 .maskset = CRYPTO_ALG_TYPE_MASK,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800872 .type = CRYPTO_ALG_TYPE_SKCIPHER,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800873 .tfmsize = offsetof(struct crypto_skcipher, base),
874};
875
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800876int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800877 const char *name, u32 type, u32 mask)
878{
Eric Biggers53253062019-10-25 12:41:11 -0700879 spawn->base.frontend = &crypto_skcipher_type;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800880 return crypto_grab_spawn(&spawn->base, name, type, mask);
881}
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800882EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800883
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800884struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
885 u32 type, u32 mask)
886{
Eric Biggers53253062019-10-25 12:41:11 -0700887 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800888}
889EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
890
Kees Cookb350bee2018-09-18 19:10:38 -0700891struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
892 const char *alg_name, u32 type, u32 mask)
893{
894 struct crypto_skcipher *tfm;
895
896 /* Only sync algorithms allowed. */
897 mask |= CRYPTO_ALG_ASYNC;
898
Eric Biggers53253062019-10-25 12:41:11 -0700899 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
Kees Cookb350bee2018-09-18 19:10:38 -0700900
901 /*
902 * Make sure we do not allocate something that might get used with
903 * an on-stack request: check the request size.
904 */
905 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
906 MAX_SYNC_SKCIPHER_REQSIZE)) {
907 crypto_free_skcipher(tfm);
908 return ERR_PTR(-EINVAL);
909 }
910
911 return (struct crypto_sync_skcipher *)tfm;
912}
913EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
914
Eric Biggersd3ca75a2019-10-25 12:41:09 -0700915int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800916{
Eric Biggers53253062019-10-25 12:41:11 -0700917 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800918}
Eric Biggersd3ca75a2019-10-25 12:41:09 -0700919EXPORT_SYMBOL_GPL(crypto_has_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800920
921static int skcipher_prepare_alg(struct skcipher_alg *alg)
922{
923 struct crypto_alg *base = &alg->base;
924
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000925 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
926 alg->walksize > PAGE_SIZE / 8)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800927 return -EINVAL;
928
929 if (!alg->chunksize)
930 alg->chunksize = base->cra_blocksize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000931 if (!alg->walksize)
932 alg->walksize = alg->chunksize;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800933
Eric Biggers53253062019-10-25 12:41:11 -0700934 base->cra_type = &crypto_skcipher_type;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800935 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
936 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
937
938 return 0;
939}
940
941int crypto_register_skcipher(struct skcipher_alg *alg)
942{
943 struct crypto_alg *base = &alg->base;
944 int err;
945
946 err = skcipher_prepare_alg(alg);
947 if (err)
948 return err;
949
950 return crypto_register_alg(base);
951}
952EXPORT_SYMBOL_GPL(crypto_register_skcipher);
953
954void crypto_unregister_skcipher(struct skcipher_alg *alg)
955{
956 crypto_unregister_alg(&alg->base);
957}
958EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
959
960int crypto_register_skciphers(struct skcipher_alg *algs, int count)
961{
962 int i, ret;
963
964 for (i = 0; i < count; i++) {
965 ret = crypto_register_skcipher(&algs[i]);
966 if (ret)
967 goto err;
968 }
969
970 return 0;
971
972err:
973 for (--i; i >= 0; --i)
974 crypto_unregister_skcipher(&algs[i]);
975
976 return ret;
977}
978EXPORT_SYMBOL_GPL(crypto_register_skciphers);
979
980void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
981{
982 int i;
983
984 for (i = count - 1; i >= 0; --i)
985 crypto_unregister_skcipher(&algs[i]);
986}
987EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
988
989int skcipher_register_instance(struct crypto_template *tmpl,
990 struct skcipher_instance *inst)
991{
992 int err;
993
994 err = skcipher_prepare_alg(&inst->alg);
995 if (err)
996 return err;
997
998 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
999}
1000EXPORT_SYMBOL_GPL(skcipher_register_instance);
1001
Eric Biggers0872da12019-01-03 20:16:14 -08001002static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1003 unsigned int keylen)
1004{
1005 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1006 int err;
1007
1008 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1009 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1010 CRYPTO_TFM_REQ_MASK);
1011 err = crypto_cipher_setkey(cipher, key, keylen);
1012 crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
1013 CRYPTO_TFM_RES_MASK);
1014 return err;
1015}
1016
1017static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1018{
1019 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1020 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
1021 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1022 struct crypto_cipher *cipher;
1023
1024 cipher = crypto_spawn_cipher(spawn);
1025 if (IS_ERR(cipher))
1026 return PTR_ERR(cipher);
1027
1028 ctx->cipher = cipher;
1029 return 0;
1030}
1031
1032static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1033{
1034 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1035
1036 crypto_free_cipher(ctx->cipher);
1037}
1038
1039static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1040{
1041 crypto_drop_spawn(skcipher_instance_ctx(inst));
1042 kfree(inst);
1043}
1044
1045/**
1046 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1047 *
1048 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1049 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
1050 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
1051 * alignmask, and priority are set from the underlying cipher but can be
1052 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
1053 * default ->setkey(), ->init(), and ->exit() methods are installed.
1054 *
1055 * @tmpl: the template being instantiated
1056 * @tb: the template parameters
1057 * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
1058 * returned here. It must be dropped with crypto_mod_put().
1059 *
1060 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
1061 * needs to register the instance.
1062 */
1063struct skcipher_instance *
1064skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
1065 struct crypto_alg **cipher_alg_ret)
1066{
1067 struct crypto_attr_type *algt;
1068 struct crypto_alg *cipher_alg;
1069 struct skcipher_instance *inst;
1070 struct crypto_spawn *spawn;
1071 u32 mask;
1072 int err;
1073
1074 algt = crypto_get_attr_type(tb);
1075 if (IS_ERR(algt))
1076 return ERR_CAST(algt);
1077
1078 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
1079 return ERR_PTR(-EINVAL);
1080
1081 mask = CRYPTO_ALG_TYPE_MASK |
1082 crypto_requires_off(algt->type, algt->mask,
1083 CRYPTO_ALG_NEED_FALLBACK);
1084
1085 cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
1086 if (IS_ERR(cipher_alg))
1087 return ERR_CAST(cipher_alg);
1088
1089 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1090 if (!inst) {
1091 err = -ENOMEM;
1092 goto err_put_cipher_alg;
1093 }
1094 spawn = skcipher_instance_ctx(inst);
1095
1096 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1097 cipher_alg);
1098 if (err)
1099 goto err_free_inst;
1100
1101 err = crypto_init_spawn(spawn, cipher_alg,
1102 skcipher_crypto_instance(inst),
1103 CRYPTO_ALG_TYPE_MASK);
1104 if (err)
1105 goto err_free_inst;
1106 inst->free = skcipher_free_instance_simple;
1107
1108 /* Default algorithm properties, can be overridden */
1109 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1110 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1111 inst->alg.base.cra_priority = cipher_alg->cra_priority;
1112 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1113 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1114 inst->alg.ivsize = cipher_alg->cra_blocksize;
1115
1116 /* Use skcipher_ctx_simple by default, can be overridden */
1117 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1118 inst->alg.setkey = skcipher_setkey_simple;
1119 inst->alg.init = skcipher_init_tfm_simple;
1120 inst->alg.exit = skcipher_exit_tfm_simple;
1121
1122 *cipher_alg_ret = cipher_alg;
1123 return inst;
1124
1125err_free_inst:
1126 kfree(inst);
1127err_put_cipher_alg:
1128 crypto_mod_put(cipher_alg);
1129 return ERR_PTR(err);
1130}
1131EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1132
Herbert Xu7a7ffe62015-08-20 15:21:45 +08001133MODULE_LICENSE("GPL");
1134MODULE_DESCRIPTION("Symmetric key cipher type");