blob: ee192731a997a65a0acb26bddc7a09310b128de4 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Baolin Wang735d37b2016-01-26 20:25:39 +08002/*
3 * Handle async block request by crypto hardware engine.
4 *
5 * Copyright (C) 2016 Linaro, Inc.
6 *
7 * Author: Baolin Wang <baolin.wang@linaro.org>
Baolin Wang735d37b2016-01-26 20:25:39 +08008 */
9
10#include <linux/err.h>
11#include <linux/delay.h>
Corentin LABBE2589ad82016-08-31 14:02:57 +020012#include <crypto/engine.h>
Ingo Molnarae7e81c2017-02-01 18:07:51 +010013#include <uapi/linux/sched/types.h>
Baolin Wang735d37b2016-01-26 20:25:39 +080014#include "internal.h"
15
16#define CRYPTO_ENGINE_MAX_QLEN 10
17
Baolin Wang735d37b2016-01-26 20:25:39 +080018/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +010019 * crypto_finalize_request - finalize one request if the request is done
20 * @engine: the hardware engine
21 * @req: the request need to be finalized
22 * @err: error number
23 */
24static void crypto_finalize_request(struct crypto_engine *engine,
Iuliana Prodan6a89f492020-04-28 18:49:04 +030025 struct crypto_async_request *req, int err)
Corentin LABBE218d1cc2018-01-26 20:15:30 +010026{
27 unsigned long flags;
Iuliana Prodan6a89f492020-04-28 18:49:04 +030028 bool finalize_req = false;
Corentin LABBE218d1cc2018-01-26 20:15:30 +010029 int ret;
30 struct crypto_engine_ctx *enginectx;
31
Iuliana Prodan6a89f492020-04-28 18:49:04 +030032 /*
33 * If hardware cannot enqueue more requests
34 * and retry mechanism is not supported
35 * make sure we are completing the current request
36 */
37 if (!engine->retry_support) {
38 spin_lock_irqsave(&engine->queue_lock, flags);
39 if (engine->cur_req == req) {
40 finalize_req = true;
41 engine->cur_req = NULL;
42 }
43 spin_unlock_irqrestore(&engine->queue_lock, flags);
44 }
Corentin LABBE218d1cc2018-01-26 20:15:30 +010045
Iuliana Prodan6a89f492020-04-28 18:49:04 +030046 if (finalize_req || engine->retry_support) {
Corentin LABBE218d1cc2018-01-26 20:15:30 +010047 enginectx = crypto_tfm_ctx(req->tfm);
Iuliana Prodan6a89f492020-04-28 18:49:04 +030048 if (enginectx->op.prepare_request &&
Corentin LABBE218d1cc2018-01-26 20:15:30 +010049 enginectx->op.unprepare_request) {
50 ret = enginectx->op.unprepare_request(engine, req);
51 if (ret)
52 dev_err(engine->dev, "failed to unprepare request\n");
53 }
Corentin LABBE218d1cc2018-01-26 20:15:30 +010054 }
Corentin LABBE218d1cc2018-01-26 20:15:30 +010055 req->complete(req, err);
56
57 kthread_queue_work(engine->kworker, &engine->pump_requests);
58}
59
60/**
Baolin Wang735d37b2016-01-26 20:25:39 +080061 * crypto_pump_requests - dequeue one request from engine queue to process
62 * @engine: the hardware engine
63 * @in_kthread: true if we are in the context of the request pump thread
64 *
65 * This function checks if there is any request in the engine queue that
66 * needs processing and if so call out to the driver to initialize hardware
67 * and handle each request.
68 */
69static void crypto_pump_requests(struct crypto_engine *engine,
70 bool in_kthread)
71{
72 struct crypto_async_request *async_req, *backlog;
Baolin Wang735d37b2016-01-26 20:25:39 +080073 unsigned long flags;
74 bool was_busy = false;
Corentin LABBE218d1cc2018-01-26 20:15:30 +010075 int ret;
76 struct crypto_engine_ctx *enginectx;
Baolin Wang735d37b2016-01-26 20:25:39 +080077
78 spin_lock_irqsave(&engine->queue_lock, flags);
79
80 /* Make sure we are not already running a request */
Iuliana Prodan6a89f492020-04-28 18:49:04 +030081 if (!engine->retry_support && engine->cur_req)
Baolin Wang735d37b2016-01-26 20:25:39 +080082 goto out;
83
84 /* If another context is idling then defer */
85 if (engine->idling) {
Petr Mladekc4ca2b02016-10-19 13:54:30 +020086 kthread_queue_work(engine->kworker, &engine->pump_requests);
Baolin Wang735d37b2016-01-26 20:25:39 +080087 goto out;
88 }
89
90 /* Check if the engine queue is idle */
91 if (!crypto_queue_len(&engine->queue) || !engine->running) {
92 if (!engine->busy)
93 goto out;
94
95 /* Only do teardown in the thread */
96 if (!in_kthread) {
Petr Mladekc4ca2b02016-10-19 13:54:30 +020097 kthread_queue_work(engine->kworker,
Baolin Wang735d37b2016-01-26 20:25:39 +080098 &engine->pump_requests);
99 goto out;
100 }
101
102 engine->busy = false;
103 engine->idling = true;
104 spin_unlock_irqrestore(&engine->queue_lock, flags);
105
106 if (engine->unprepare_crypt_hardware &&
107 engine->unprepare_crypt_hardware(engine))
Corentin LABBE88d58ef2017-06-06 15:44:16 +0200108 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
Baolin Wang735d37b2016-01-26 20:25:39 +0800109
110 spin_lock_irqsave(&engine->queue_lock, flags);
111 engine->idling = false;
112 goto out;
113 }
114
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300115start_request:
Baolin Wang735d37b2016-01-26 20:25:39 +0800116 /* Get the fist request from the engine queue to handle */
117 backlog = crypto_get_backlog(&engine->queue);
118 async_req = crypto_dequeue_request(&engine->queue);
119 if (!async_req)
120 goto out;
121
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300122 /*
123 * If hardware doesn't support the retry mechanism,
124 * keep track of the request we are processing now.
125 * We'll need it on completion (crypto_finalize_request).
126 */
127 if (!engine->retry_support)
128 engine->cur_req = async_req;
129
Baolin Wang735d37b2016-01-26 20:25:39 +0800130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
132
133 if (engine->busy)
134 was_busy = true;
135 else
136 engine->busy = true;
137
138 spin_unlock_irqrestore(&engine->queue_lock, flags);
139
140 /* Until here we get the request need to be encrypted successfully */
141 if (!was_busy && engine->prepare_crypt_hardware) {
142 ret = engine->prepare_crypt_hardware(engine);
143 if (ret) {
Corentin LABBE88d58ef2017-06-06 15:44:16 +0200144 dev_err(engine->dev, "failed to prepare crypt hardware\n");
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300145 goto req_err_2;
Baolin Wang735d37b2016-01-26 20:25:39 +0800146 }
147 }
148
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100149 enginectx = crypto_tfm_ctx(async_req->tfm);
150
151 if (enginectx->op.prepare_request) {
152 ret = enginectx->op.prepare_request(engine, async_req);
Baolin Wang735d37b2016-01-26 20:25:39 +0800153 if (ret) {
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100154 dev_err(engine->dev, "failed to prepare request: %d\n",
155 ret);
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300156 goto req_err_2;
Baolin Wang735d37b2016-01-26 20:25:39 +0800157 }
Baolin Wang735d37b2016-01-26 20:25:39 +0800158 }
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100159 if (!enginectx->op.do_one_request) {
160 dev_err(engine->dev, "failed to do request\n");
161 ret = -EINVAL;
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300162 goto req_err_1;
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100163 }
Baolin Wang735d37b2016-01-26 20:25:39 +0800164
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300165 ret = enginectx->op.do_one_request(engine, async_req);
166
167 /* Request unsuccessfully executed by hardware */
168 if (ret < 0) {
169 /*
170 * If hardware queue is full (-ENOSPC), requeue request
171 * regardless of backlog flag.
172 * If hardware throws any other error code,
173 * requeue only backlog requests.
174 * Otherwise, unprepare and complete the request.
175 */
176 if (!engine->retry_support ||
177 ((ret != -ENOSPC) &&
178 !(async_req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
179 dev_err(engine->dev,
180 "Failed to do one request from queue: %d\n",
181 ret);
182 goto req_err_1;
183 }
184 /*
185 * If retry mechanism is supported,
186 * unprepare current request and
187 * enqueue it back into crypto-engine queue.
188 */
189 if (enginectx->op.unprepare_request) {
190 ret = enginectx->op.unprepare_request(engine,
191 async_req);
192 if (ret)
193 dev_err(engine->dev,
194 "failed to unprepare request\n");
195 }
196 spin_lock_irqsave(&engine->queue_lock, flags);
197 /*
198 * If hardware was unable to execute request, enqueue it
199 * back in front of crypto-engine queue, to keep the order
200 * of requests.
201 */
202 crypto_enqueue_request_head(&engine->queue, async_req);
203
204 kthread_queue_work(engine->kworker, &engine->pump_requests);
205 goto out;
206 }
207
208 goto retry;
209
210req_err_1:
211 if (enginectx->op.unprepare_request) {
212 ret = enginectx->op.unprepare_request(engine, async_req);
213 if (ret)
214 dev_err(engine->dev, "failed to unprepare request\n");
215 }
216
217req_err_2:
218 async_req->complete(async_req, ret);
219
220retry:
221 /* If retry mechanism is supported, send new requests to engine */
222 if (engine->retry_support) {
223 spin_lock_irqsave(&engine->queue_lock, flags);
224 goto start_request;
225 }
Baolin Wang735d37b2016-01-26 20:25:39 +0800226 return;
227
228out:
229 spin_unlock_irqrestore(&engine->queue_lock, flags);
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300230 return;
Baolin Wang735d37b2016-01-26 20:25:39 +0800231}
232
233static void crypto_pump_work(struct kthread_work *work)
234{
235 struct crypto_engine *engine =
236 container_of(work, struct crypto_engine, pump_requests);
237
238 crypto_pump_requests(engine, true);
239}
240
241/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100242 * crypto_transfer_request - transfer the new request into the engine queue
Baolin Wang735d37b2016-01-26 20:25:39 +0800243 * @engine: the hardware engine
244 * @req: the request need to be listed into the engine queue
245 */
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100246static int crypto_transfer_request(struct crypto_engine *engine,
247 struct crypto_async_request *req,
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200248 bool need_pump)
Baolin Wang735d37b2016-01-26 20:25:39 +0800249{
250 unsigned long flags;
251 int ret;
252
253 spin_lock_irqsave(&engine->queue_lock, flags);
254
255 if (!engine->running) {
256 spin_unlock_irqrestore(&engine->queue_lock, flags);
257 return -ESHUTDOWN;
258 }
259
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100260 ret = crypto_enqueue_request(&engine->queue, req);
Baolin Wang735d37b2016-01-26 20:25:39 +0800261
262 if (!engine->busy && need_pump)
Petr Mladekc4ca2b02016-10-19 13:54:30 +0200263 kthread_queue_work(engine->kworker, &engine->pump_requests);
Baolin Wang735d37b2016-01-26 20:25:39 +0800264
265 spin_unlock_irqrestore(&engine->queue_lock, flags);
266 return ret;
267}
Baolin Wang735d37b2016-01-26 20:25:39 +0800268
269/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100270 * crypto_transfer_request_to_engine - transfer one request to list
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200271 * into the engine queue
Baolin Wang735d37b2016-01-26 20:25:39 +0800272 * @engine: the hardware engine
273 * @req: the request need to be listed into the engine queue
274 */
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100275static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
276 struct crypto_async_request *req)
Baolin Wang735d37b2016-01-26 20:25:39 +0800277{
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100278 return crypto_transfer_request(engine, req, true);
Baolin Wang735d37b2016-01-26 20:25:39 +0800279}
Baolin Wang735d37b2016-01-26 20:25:39 +0800280
281/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100282 * crypto_transfer_aead_request_to_engine - transfer one aead_request
283 * to list into the engine queue
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200284 * @engine: the hardware engine
285 * @req: the request need to be listed into the engine queue
286 */
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100287int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
288 struct aead_request *req)
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200289{
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100290 return crypto_transfer_request_to_engine(engine, &req->base);
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200291}
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100292EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200293
294/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100295 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
296 * to list into the engine queue
297 * @engine: the hardware engine
298 * @req: the request need to be listed into the engine queue
299 */
300int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
301 struct akcipher_request *req)
302{
303 return crypto_transfer_request_to_engine(engine, &req->base);
304}
305EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
306
307/**
308 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
309 * to list into the engine queue
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200310 * @engine: the hardware engine
311 * @req: the request need to be listed into the engine queue
312 */
313int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
314 struct ahash_request *req)
315{
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100316 return crypto_transfer_request_to_engine(engine, &req->base);
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200317}
318EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
319
320/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100321 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
322 * to list into the engine queue
323 * @engine: the hardware engine
324 * @req: the request need to be listed into the engine queue
325 */
326int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
327 struct skcipher_request *req)
328{
329 return crypto_transfer_request_to_engine(engine, &req->base);
330}
331EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
332
333/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100334 * crypto_finalize_aead_request - finalize one aead_request if
335 * the request is done
Baolin Wang735d37b2016-01-26 20:25:39 +0800336 * @engine: the hardware engine
337 * @req: the request need to be finalized
338 * @err: error number
339 */
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100340void crypto_finalize_aead_request(struct crypto_engine *engine,
341 struct aead_request *req, int err)
Baolin Wang735d37b2016-01-26 20:25:39 +0800342{
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100343 return crypto_finalize_request(engine, &req->base, err);
Baolin Wang735d37b2016-01-26 20:25:39 +0800344}
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100345EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200346
347/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100348 * crypto_finalize_akcipher_request - finalize one akcipher_request if
349 * the request is done
350 * @engine: the hardware engine
351 * @req: the request need to be finalized
352 * @err: error number
353 */
354void crypto_finalize_akcipher_request(struct crypto_engine *engine,
355 struct akcipher_request *req, int err)
356{
357 return crypto_finalize_request(engine, &req->base, err);
358}
359EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
360
361/**
362 * crypto_finalize_hash_request - finalize one ahash_request if
363 * the request is done
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200364 * @engine: the hardware engine
365 * @req: the request need to be finalized
366 * @err: error number
367 */
368void crypto_finalize_hash_request(struct crypto_engine *engine,
369 struct ahash_request *req, int err)
370{
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100371 return crypto_finalize_request(engine, &req->base, err);
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200372}
373EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
Baolin Wang735d37b2016-01-26 20:25:39 +0800374
375/**
Corentin LABBE218d1cc2018-01-26 20:15:30 +0100376 * crypto_finalize_skcipher_request - finalize one skcipher_request if
377 * the request is done
378 * @engine: the hardware engine
379 * @req: the request need to be finalized
380 * @err: error number
381 */
382void crypto_finalize_skcipher_request(struct crypto_engine *engine,
383 struct skcipher_request *req, int err)
384{
385 return crypto_finalize_request(engine, &req->base, err);
386}
387EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
388
389/**
Baolin Wang735d37b2016-01-26 20:25:39 +0800390 * crypto_engine_start - start the hardware engine
391 * @engine: the hardware engine need to be started
392 *
393 * Return 0 on success, else on fail.
394 */
395int crypto_engine_start(struct crypto_engine *engine)
396{
397 unsigned long flags;
398
399 spin_lock_irqsave(&engine->queue_lock, flags);
400
401 if (engine->running || engine->busy) {
402 spin_unlock_irqrestore(&engine->queue_lock, flags);
403 return -EBUSY;
404 }
405
406 engine->running = true;
407 spin_unlock_irqrestore(&engine->queue_lock, flags);
408
Petr Mladekc4ca2b02016-10-19 13:54:30 +0200409 kthread_queue_work(engine->kworker, &engine->pump_requests);
Baolin Wang735d37b2016-01-26 20:25:39 +0800410
411 return 0;
412}
413EXPORT_SYMBOL_GPL(crypto_engine_start);
414
415/**
416 * crypto_engine_stop - stop the hardware engine
417 * @engine: the hardware engine need to be stopped
418 *
419 * Return 0 on success, else on fail.
420 */
421int crypto_engine_stop(struct crypto_engine *engine)
422{
423 unsigned long flags;
Corentin LABBE4cba7cf2016-08-31 14:02:58 +0200424 unsigned int limit = 500;
Baolin Wang735d37b2016-01-26 20:25:39 +0800425 int ret = 0;
426
427 spin_lock_irqsave(&engine->queue_lock, flags);
428
429 /*
430 * If the engine queue is not empty or the engine is on busy state,
431 * we need to wait for a while to pump the requests of engine queue.
432 */
433 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
434 spin_unlock_irqrestore(&engine->queue_lock, flags);
435 msleep(20);
436 spin_lock_irqsave(&engine->queue_lock, flags);
437 }
438
439 if (crypto_queue_len(&engine->queue) || engine->busy)
440 ret = -EBUSY;
441 else
442 engine->running = false;
443
444 spin_unlock_irqrestore(&engine->queue_lock, flags);
445
446 if (ret)
Corentin LABBE88d58ef2017-06-06 15:44:16 +0200447 dev_warn(engine->dev, "could not stop engine\n");
Baolin Wang735d37b2016-01-26 20:25:39 +0800448
449 return ret;
450}
451EXPORT_SYMBOL_GPL(crypto_engine_stop);
452
453/**
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300454 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
455 * and initialize it by setting the maximum number of entries in the software
456 * crypto-engine queue.
Baolin Wang735d37b2016-01-26 20:25:39 +0800457 * @dev: the device attached with one hardware engine
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300458 * @retry_support: whether hardware has support for retry mechanism
Baolin Wang735d37b2016-01-26 20:25:39 +0800459 * @rt: whether this queue is set to run as a realtime task
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300460 * @qlen: maximum size of the crypto-engine queue
Baolin Wang735d37b2016-01-26 20:25:39 +0800461 *
462 * This must be called from context that can sleep.
463 * Return: the crypto engine structure on success, else NULL.
464 */
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300465struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
466 bool retry_support,
467 bool rt, int qlen)
Baolin Wang735d37b2016-01-26 20:25:39 +0800468{
Peter Zijlstrad13dfae2019-08-01 13:13:51 +0200469 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
Baolin Wang735d37b2016-01-26 20:25:39 +0800470 struct crypto_engine *engine;
471
472 if (!dev)
473 return NULL;
474
475 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
476 if (!engine)
477 return NULL;
478
Corentin LABBE88d58ef2017-06-06 15:44:16 +0200479 engine->dev = dev;
Baolin Wang735d37b2016-01-26 20:25:39 +0800480 engine->rt = rt;
481 engine->running = false;
482 engine->busy = false;
483 engine->idling = false;
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300484 engine->retry_support = retry_support;
Baolin Wang735d37b2016-01-26 20:25:39 +0800485 engine->priv_data = dev;
486 snprintf(engine->name, sizeof(engine->name),
487 "%s-engine", dev_name(dev));
488
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300489 crypto_init_queue(&engine->queue, qlen);
Baolin Wang735d37b2016-01-26 20:25:39 +0800490 spin_lock_init(&engine->queue_lock);
491
Petr Mladekc4ca2b02016-10-19 13:54:30 +0200492 engine->kworker = kthread_create_worker(0, "%s", engine->name);
493 if (IS_ERR(engine->kworker)) {
Baolin Wang735d37b2016-01-26 20:25:39 +0800494 dev_err(dev, "failed to create crypto request pump task\n");
495 return NULL;
496 }
Petr Mladek39891442016-10-11 13:55:20 -0700497 kthread_init_work(&engine->pump_requests, crypto_pump_work);
Baolin Wang735d37b2016-01-26 20:25:39 +0800498
499 if (engine->rt) {
500 dev_info(dev, "will run requests pump with realtime priority\n");
Petr Mladekc4ca2b02016-10-19 13:54:30 +0200501 sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
Baolin Wang735d37b2016-01-26 20:25:39 +0800502 }
503
504 return engine;
505}
Iuliana Prodan6a89f492020-04-28 18:49:04 +0300506EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
507
508/**
509 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
510 * initialize it.
511 * @dev: the device attached with one hardware engine
512 * @rt: whether this queue is set to run as a realtime task
513 *
514 * This must be called from context that can sleep.
515 * Return: the crypto engine structure on success, else NULL.
516 */
517struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
518{
519 return crypto_engine_alloc_init_and_set(dev, false, rt,
520 CRYPTO_ENGINE_MAX_QLEN);
521}
Baolin Wang735d37b2016-01-26 20:25:39 +0800522EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
523
524/**
525 * crypto_engine_exit - free the resources of hardware engine when exit
526 * @engine: the hardware engine need to be freed
527 *
528 * Return 0 for success.
529 */
530int crypto_engine_exit(struct crypto_engine *engine)
531{
532 int ret;
533
534 ret = crypto_engine_stop(engine);
535 if (ret)
536 return ret;
537
Petr Mladekc4ca2b02016-10-19 13:54:30 +0200538 kthread_destroy_worker(engine->kworker);
Baolin Wang735d37b2016-01-26 20:25:39 +0800539
540 return 0;
541}
542EXPORT_SYMBOL_GPL(crypto_engine_exit);
543
544MODULE_LICENSE("GPL");
545MODULE_DESCRIPTION("Crypto hardware engine framework");