Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 2 | /* |
| 3 | * AMD Cryptographic Coprocessor (CCP) crypto API support |
| 4 | * |
Gary R Hook | 68cc652 | 2017-07-17 15:00:49 -0500 | [diff] [blame] | 5 | * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 6 | * |
| 7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/module.h> |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 11 | #include <linux/moduleparam.h> |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/list.h> |
| 14 | #include <linux/ccp.h> |
| 15 | #include <linux/scatterlist.h> |
| 16 | #include <crypto/internal/hash.h> |
Gary R Hook | ceeec0a | 2017-07-17 15:16:32 -0500 | [diff] [blame] | 17 | #include <crypto/internal/akcipher.h> |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 18 | |
| 19 | #include "ccp-crypto.h" |
| 20 | |
| 21 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); |
| 22 | MODULE_LICENSE("GPL"); |
| 23 | MODULE_VERSION("1.0.0"); |
| 24 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); |
| 25 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 26 | static unsigned int aes_disable; |
| 27 | module_param(aes_disable, uint, 0444); |
| 28 | MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); |
| 29 | |
| 30 | static unsigned int sha_disable; |
| 31 | module_param(sha_disable, uint, 0444); |
| 32 | MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); |
| 33 | |
Gary R Hook | 990672d | 2017-03-15 13:20:52 -0500 | [diff] [blame] | 34 | static unsigned int des3_disable; |
| 35 | module_param(des3_disable, uint, 0444); |
| 36 | MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value"); |
| 37 | |
Gary R Hook | ceeec0a | 2017-07-17 15:16:32 -0500 | [diff] [blame] | 38 | static unsigned int rsa_disable; |
| 39 | module_param(rsa_disable, uint, 0444); |
| 40 | MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); |
| 41 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 42 | /* List heads for the supported algorithms */ |
| 43 | static LIST_HEAD(hash_algs); |
| 44 | static LIST_HEAD(cipher_algs); |
Gary R Hook | 36cf515 | 2017-03-15 13:21:01 -0500 | [diff] [blame] | 45 | static LIST_HEAD(aead_algs); |
Gary R Hook | ceeec0a | 2017-07-17 15:16:32 -0500 | [diff] [blame] | 46 | static LIST_HEAD(akcipher_algs); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 47 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 48 | /* For any tfm, requests for that tfm must be returned on the order |
| 49 | * received. With multiple queues available, the CCP can process more |
| 50 | * than one cmd at a time. Therefore we must maintain a cmd list to insure |
| 51 | * the proper ordering of requests on a given tfm. |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 52 | */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 53 | struct ccp_crypto_queue { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 54 | struct list_head cmds; |
| 55 | struct list_head *backlog; |
| 56 | unsigned int cmd_count; |
| 57 | }; |
Tom Lendacky | 8db8846 | 2015-02-03 13:07:05 -0600 | [diff] [blame] | 58 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 59 | #define CCP_CRYPTO_MAX_QLEN 100 |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 60 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 61 | static struct ccp_crypto_queue req_queue; |
| 62 | static spinlock_t req_queue_lock; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 63 | |
| 64 | struct ccp_crypto_cmd { |
| 65 | struct list_head entry; |
| 66 | |
| 67 | struct ccp_cmd *cmd; |
| 68 | |
| 69 | /* Save the crypto_tfm and crypto_async_request addresses |
| 70 | * separately to avoid any reference to a possibly invalid |
| 71 | * crypto_async_request structure after invoking the request |
| 72 | * callback |
| 73 | */ |
| 74 | struct crypto_async_request *req; |
| 75 | struct crypto_tfm *tfm; |
| 76 | |
| 77 | /* Used for held command processing to determine state */ |
| 78 | int ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 79 | }; |
| 80 | |
| 81 | struct ccp_crypto_cpu { |
| 82 | struct work_struct work; |
| 83 | struct completion completion; |
| 84 | struct ccp_crypto_cmd *crypto_cmd; |
| 85 | int err; |
| 86 | }; |
| 87 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 88 | static inline bool ccp_crypto_success(int err) |
| 89 | { |
| 90 | if (err && (err != -EINPROGRESS) && (err != -EBUSY)) |
| 91 | return false; |
| 92 | |
| 93 | return true; |
| 94 | } |
| 95 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 96 | static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( |
| 97 | struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) |
| 98 | { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 99 | struct ccp_crypto_cmd *held = NULL, *tmp; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 100 | unsigned long flags; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 101 | |
| 102 | *backlog = NULL; |
| 103 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 104 | spin_lock_irqsave(&req_queue_lock, flags); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 105 | |
| 106 | /* Held cmds will be after the current cmd in the queue so start |
| 107 | * searching for a cmd with a matching tfm for submission. |
| 108 | */ |
| 109 | tmp = crypto_cmd; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 110 | list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 111 | if (crypto_cmd->tfm != tmp->tfm) |
| 112 | continue; |
| 113 | held = tmp; |
| 114 | break; |
| 115 | } |
| 116 | |
| 117 | /* Process the backlog: |
| 118 | * Because cmds can be executed from any point in the cmd list |
| 119 | * special precautions have to be taken when handling the backlog. |
| 120 | */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 121 | if (req_queue.backlog != &req_queue.cmds) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 122 | /* Skip over this cmd if it is the next backlog cmd */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 123 | if (req_queue.backlog == &crypto_cmd->entry) |
| 124 | req_queue.backlog = crypto_cmd->entry.next; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 125 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 126 | *backlog = container_of(req_queue.backlog, |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 127 | struct ccp_crypto_cmd, entry); |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 128 | req_queue.backlog = req_queue.backlog->next; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 129 | |
| 130 | /* Skip over this cmd if it is now the next backlog cmd */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 131 | if (req_queue.backlog == &crypto_cmd->entry) |
| 132 | req_queue.backlog = crypto_cmd->entry.next; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | /* Remove the cmd entry from the list of cmds */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 136 | req_queue.cmd_count--; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 137 | list_del(&crypto_cmd->entry); |
| 138 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 139 | spin_unlock_irqrestore(&req_queue_lock, flags); |
| 140 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 141 | return held; |
| 142 | } |
| 143 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 144 | static void ccp_crypto_complete(void *data, int err) |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 145 | { |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 146 | struct ccp_crypto_cmd *crypto_cmd = data; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 147 | struct ccp_crypto_cmd *held, *next, *backlog; |
| 148 | struct crypto_async_request *req = crypto_cmd->req; |
| 149 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 150 | int ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 151 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 152 | if (err == -EINPROGRESS) { |
Tom Lendacky | 8db8846 | 2015-02-03 13:07:05 -0600 | [diff] [blame] | 153 | /* Only propagate the -EINPROGRESS if necessary */ |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 154 | if (crypto_cmd->ret == -EBUSY) { |
| 155 | crypto_cmd->ret = -EINPROGRESS; |
| 156 | req->complete(req, -EINPROGRESS); |
| 157 | } |
| 158 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 159 | return; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | /* Operation has completed - update the queue before invoking |
| 163 | * the completion callbacks and retrieve the next cmd (cmd with |
| 164 | * a matching tfm) that can be submitted to the CCP. |
| 165 | */ |
| 166 | held = ccp_crypto_cmd_complete(crypto_cmd, &backlog); |
| 167 | if (backlog) { |
| 168 | backlog->ret = -EINPROGRESS; |
| 169 | backlog->req->complete(backlog->req, -EINPROGRESS); |
| 170 | } |
| 171 | |
| 172 | /* Transition the state from -EBUSY to -EINPROGRESS first */ |
| 173 | if (crypto_cmd->ret == -EBUSY) |
| 174 | req->complete(req, -EINPROGRESS); |
| 175 | |
| 176 | /* Completion callbacks */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 177 | ret = err; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 178 | if (ctx->complete) |
| 179 | ret = ctx->complete(req, ret); |
| 180 | req->complete(req, ret); |
| 181 | |
| 182 | /* Submit the next cmd */ |
| 183 | while (held) { |
Tom Lendacky | 0611451 | 2014-02-24 08:42:02 -0600 | [diff] [blame] | 184 | /* Since we have already queued the cmd, we must indicate that |
| 185 | * we can backlog so as not to "lose" this request. |
| 186 | */ |
| 187 | held->cmd->flags |= CCP_CMD_MAY_BACKLOG; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 188 | ret = ccp_enqueue_cmd(held->cmd); |
| 189 | if (ccp_crypto_success(ret)) |
| 190 | break; |
| 191 | |
| 192 | /* Error occurred, report it and get the next entry */ |
Tom Lendacky | 950b10b | 2014-02-24 08:42:08 -0600 | [diff] [blame] | 193 | ctx = crypto_tfm_ctx(held->req->tfm); |
| 194 | if (ctx->complete) |
| 195 | ret = ctx->complete(held->req, ret); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 196 | held->req->complete(held->req, ret); |
| 197 | |
| 198 | next = ccp_crypto_cmd_complete(held, &backlog); |
| 199 | if (backlog) { |
| 200 | backlog->ret = -EINPROGRESS; |
| 201 | backlog->req->complete(backlog->req, -EINPROGRESS); |
| 202 | } |
| 203 | |
| 204 | kfree(held); |
| 205 | held = next; |
| 206 | } |
| 207 | |
| 208 | kfree(crypto_cmd); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) |
| 212 | { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 213 | struct ccp_crypto_cmd *active = NULL, *tmp; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 214 | unsigned long flags; |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 215 | bool free_cmd = true; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 216 | int ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 217 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 218 | spin_lock_irqsave(&req_queue_lock, flags); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 219 | |
| 220 | /* Check if the cmd can/should be queued */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 221 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
Gilad Ben-Yossef | cfba73d | 2017-10-18 08:00:34 +0100 | [diff] [blame] | 222 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { |
| 223 | ret = -ENOSPC; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 224 | goto e_lock; |
Gilad Ben-Yossef | cfba73d | 2017-10-18 08:00:34 +0100 | [diff] [blame] | 225 | } |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 226 | } |
| 227 | |
| 228 | /* Look for an entry with the same tfm. If there is a cmd |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 229 | * with the same tfm in the list then the current cmd cannot |
| 230 | * be submitted to the CCP yet. |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 231 | */ |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 232 | list_for_each_entry(tmp, &req_queue.cmds, entry) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 233 | if (crypto_cmd->tfm != tmp->tfm) |
| 234 | continue; |
| 235 | active = tmp; |
| 236 | break; |
| 237 | } |
| 238 | |
| 239 | ret = -EINPROGRESS; |
| 240 | if (!active) { |
| 241 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); |
| 242 | if (!ccp_crypto_success(ret)) |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 243 | goto e_lock; /* Error, don't queue it */ |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 244 | } |
| 245 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 246 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 247 | ret = -EBUSY; |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 248 | if (req_queue.backlog == &req_queue.cmds) |
| 249 | req_queue.backlog = &crypto_cmd->entry; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 250 | } |
| 251 | crypto_cmd->ret = ret; |
| 252 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 253 | req_queue.cmd_count++; |
| 254 | list_add_tail(&crypto_cmd->entry, &req_queue.cmds); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 255 | |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 256 | free_cmd = false; |
| 257 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 258 | e_lock: |
| 259 | spin_unlock_irqrestore(&req_queue_lock, flags); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 260 | |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 261 | if (free_cmd) |
| 262 | kfree(crypto_cmd); |
| 263 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 264 | return ret; |
| 265 | } |
| 266 | |
| 267 | /** |
| 268 | * ccp_crypto_enqueue_request - queue an crypto async request for processing |
| 269 | * by the CCP |
| 270 | * |
| 271 | * @req: crypto_async_request struct to be processed |
| 272 | * @cmd: ccp_cmd struct to be sent to the CCP |
| 273 | */ |
| 274 | int ccp_crypto_enqueue_request(struct crypto_async_request *req, |
| 275 | struct ccp_cmd *cmd) |
| 276 | { |
| 277 | struct ccp_crypto_cmd *crypto_cmd; |
| 278 | gfp_t gfp; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 279 | |
| 280 | gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; |
| 281 | |
| 282 | crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp); |
| 283 | if (!crypto_cmd) |
| 284 | return -ENOMEM; |
| 285 | |
| 286 | /* The tfm pointer must be saved and not referenced from the |
| 287 | * crypto_async_request (req) pointer because it is used after |
| 288 | * completion callback for the request and the req pointer |
| 289 | * might not be valid anymore. |
| 290 | */ |
| 291 | crypto_cmd->cmd = cmd; |
| 292 | crypto_cmd->req = req; |
| 293 | crypto_cmd->tfm = req->tfm; |
| 294 | |
| 295 | cmd->callback = ccp_crypto_complete; |
| 296 | cmd->data = crypto_cmd; |
| 297 | |
| 298 | if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) |
| 299 | cmd->flags |= CCP_CMD_MAY_BACKLOG; |
| 300 | else |
| 301 | cmd->flags &= ~CCP_CMD_MAY_BACKLOG; |
| 302 | |
Tom Lendacky | c65a52f | 2014-02-24 08:42:14 -0600 | [diff] [blame] | 303 | return ccp_crypto_enqueue_cmd(crypto_cmd); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, |
| 307 | struct scatterlist *sg_add) |
| 308 | { |
| 309 | struct scatterlist *sg, *sg_last = NULL; |
| 310 | |
| 311 | for (sg = table->sgl; sg; sg = sg_next(sg)) |
| 312 | if (!sg_page(sg)) |
| 313 | break; |
Tom Lendacky | 355eba5 | 2015-10-01 16:32:31 -0500 | [diff] [blame] | 314 | if (WARN_ON(!sg)) |
| 315 | return NULL; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 316 | |
| 317 | for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { |
| 318 | sg_set_page(sg, sg_page(sg_add), sg_add->length, |
| 319 | sg_add->offset); |
| 320 | sg_last = sg; |
| 321 | } |
Tom Lendacky | 355eba5 | 2015-10-01 16:32:31 -0500 | [diff] [blame] | 322 | if (WARN_ON(sg_add)) |
| 323 | return NULL; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 324 | |
| 325 | return sg_last; |
| 326 | } |
| 327 | |
| 328 | static int ccp_register_algs(void) |
| 329 | { |
| 330 | int ret; |
| 331 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 332 | if (!aes_disable) { |
| 333 | ret = ccp_register_aes_algs(&cipher_algs); |
| 334 | if (ret) |
| 335 | return ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 336 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 337 | ret = ccp_register_aes_cmac_algs(&hash_algs); |
| 338 | if (ret) |
| 339 | return ret; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 340 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 341 | ret = ccp_register_aes_xts_algs(&cipher_algs); |
| 342 | if (ret) |
| 343 | return ret; |
Gary R Hook | 36cf515 | 2017-03-15 13:21:01 -0500 | [diff] [blame] | 344 | |
| 345 | ret = ccp_register_aes_aeads(&aead_algs); |
| 346 | if (ret) |
| 347 | return ret; |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 348 | } |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 349 | |
Gary R Hook | 990672d | 2017-03-15 13:20:52 -0500 | [diff] [blame] | 350 | if (!des3_disable) { |
| 351 | ret = ccp_register_des3_algs(&cipher_algs); |
| 352 | if (ret) |
| 353 | return ret; |
| 354 | } |
| 355 | |
Tom Lendacky | d81ed65 | 2014-01-24 16:17:56 -0600 | [diff] [blame] | 356 | if (!sha_disable) { |
| 357 | ret = ccp_register_sha_algs(&hash_algs); |
| 358 | if (ret) |
| 359 | return ret; |
| 360 | } |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 361 | |
Gary R Hook | ceeec0a | 2017-07-17 15:16:32 -0500 | [diff] [blame] | 362 | if (!rsa_disable) { |
| 363 | ret = ccp_register_rsa_algs(&akcipher_algs); |
| 364 | if (ret) |
| 365 | return ret; |
| 366 | } |
| 367 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 368 | return 0; |
| 369 | } |
| 370 | |
| 371 | static void ccp_unregister_algs(void) |
| 372 | { |
| 373 | struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; |
| 374 | struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; |
Gary R Hook | 36cf515 | 2017-03-15 13:21:01 -0500 | [diff] [blame] | 375 | struct ccp_crypto_aead *aead_alg, *aead_tmp; |
Gary R Hook | ceeec0a | 2017-07-17 15:16:32 -0500 | [diff] [blame] | 376 | struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 377 | |
| 378 | list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { |
| 379 | crypto_unregister_ahash(&ahash_alg->alg); |
| 380 | list_del(&ahash_alg->entry); |
| 381 | kfree(ahash_alg); |
| 382 | } |
| 383 | |
| 384 | list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) { |
| 385 | crypto_unregister_alg(&ablk_alg->alg); |
| 386 | list_del(&ablk_alg->entry); |
| 387 | kfree(ablk_alg); |
| 388 | } |
Gary R Hook | 36cf515 | 2017-03-15 13:21:01 -0500 | [diff] [blame] | 389 | |
| 390 | list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) { |
| 391 | crypto_unregister_aead(&aead_alg->alg); |
| 392 | list_del(&aead_alg->entry); |
| 393 | kfree(aead_alg); |
| 394 | } |
Gary R Hook | ceeec0a | 2017-07-17 15:16:32 -0500 | [diff] [blame] | 395 | |
| 396 | list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) { |
| 397 | crypto_unregister_akcipher(&akc_alg->alg); |
| 398 | list_del(&akc_alg->entry); |
| 399 | kfree(akc_alg); |
| 400 | } |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 401 | } |
| 402 | |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 403 | static int ccp_crypto_init(void) |
| 404 | { |
| 405 | int ret; |
| 406 | |
Tom Lendacky | c9f21cb | 2014-09-05 10:31:09 -0500 | [diff] [blame] | 407 | ret = ccp_present(); |
| 408 | if (ret) |
| 409 | return ret; |
| 410 | |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 411 | spin_lock_init(&req_queue_lock); |
| 412 | INIT_LIST_HEAD(&req_queue.cmds); |
| 413 | req_queue.backlog = &req_queue.cmds; |
| 414 | req_queue.cmd_count = 0; |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 415 | |
| 416 | ret = ccp_register_algs(); |
Tom Lendacky | bc38544 | 2014-01-24 16:18:08 -0600 | [diff] [blame] | 417 | if (ret) |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 418 | ccp_unregister_algs(); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 419 | |
| 420 | return ret; |
| 421 | } |
| 422 | |
| 423 | static void ccp_crypto_exit(void) |
| 424 | { |
| 425 | ccp_unregister_algs(); |
Tom Lendacky | d312359 | 2013-11-12 11:46:22 -0600 | [diff] [blame] | 426 | } |
| 427 | |
| 428 | module_init(ccp_crypto_init); |
| 429 | module_exit(ccp_crypto_exit); |