Boris BREZILLON | f63601f | 2015-06-18 15:46:20 +0200 | [diff] [blame^] | 1 | /* |
| 2 | * Cipher algorithms supported by the CESA: DES, 3DES and AES. |
| 3 | * |
| 4 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> |
| 5 | * Author: Arnaud Ebalard <arno@natisbad.org> |
| 6 | * |
| 7 | * This work is based on an initial version written by |
| 8 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify it |
| 11 | * under the terms of the GNU General Public License version 2 as published |
| 12 | * by the Free Software Foundation. |
| 13 | */ |
| 14 | |
| 15 | #include <crypto/aes.h> |
| 16 | |
| 17 | #include "cesa.h" |
| 18 | |
| 19 | struct mv_cesa_aes_ctx { |
| 20 | struct mv_cesa_ctx base; |
| 21 | struct crypto_aes_ctx aes; |
| 22 | }; |
| 23 | |
| 24 | static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) |
| 25 | { |
| 26 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); |
| 27 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; |
| 28 | struct mv_cesa_engine *engine = sreq->base.engine; |
| 29 | size_t len = min_t(size_t, req->nbytes - sreq->offset, |
| 30 | CESA_SA_SRAM_PAYLOAD_SIZE); |
| 31 | |
| 32 | len = sg_pcopy_to_buffer(req->src, creq->src_nents, |
| 33 | engine->sram + CESA_SA_DATA_SRAM_OFFSET, |
| 34 | len, sreq->offset); |
| 35 | |
| 36 | sreq->size = len; |
| 37 | mv_cesa_set_crypt_op_len(&sreq->op, len); |
| 38 | |
| 39 | /* FIXME: only update enc_len field */ |
| 40 | if (!sreq->skip_ctx) { |
| 41 | memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); |
| 42 | sreq->skip_ctx = true; |
| 43 | } else { |
| 44 | memcpy(engine->sram, &sreq->op, sizeof(sreq->op.desc)); |
| 45 | } |
| 46 | |
| 47 | mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); |
| 48 | writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); |
| 49 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); |
| 50 | } |
| 51 | |
| 52 | static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, |
| 53 | u32 status) |
| 54 | { |
| 55 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); |
| 56 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; |
| 57 | struct mv_cesa_engine *engine = sreq->base.engine; |
| 58 | size_t len; |
| 59 | |
| 60 | len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, |
| 61 | engine->sram + CESA_SA_DATA_SRAM_OFFSET, |
| 62 | sreq->size, sreq->offset); |
| 63 | |
| 64 | sreq->offset += len; |
| 65 | if (sreq->offset < req->nbytes) |
| 66 | return -EINPROGRESS; |
| 67 | |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, |
| 72 | u32 status) |
| 73 | { |
| 74 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); |
| 75 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); |
| 76 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; |
| 77 | struct mv_cesa_engine *engine = sreq->base.engine; |
| 78 | int ret; |
| 79 | |
| 80 | ret = mv_cesa_ablkcipher_std_process(ablkreq, status); |
| 81 | if (ret) |
| 82 | return ret; |
| 83 | |
| 84 | memcpy(ablkreq->info, engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, |
| 85 | crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq))); |
| 86 | |
| 87 | return 0; |
| 88 | } |
| 89 | |
| 90 | static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) |
| 91 | { |
| 92 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); |
| 93 | |
| 94 | mv_cesa_ablkcipher_std_step(ablkreq); |
| 95 | } |
| 96 | |
| 97 | static inline void |
| 98 | mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) |
| 99 | { |
| 100 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); |
| 101 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; |
| 102 | struct mv_cesa_engine *engine = sreq->base.engine; |
| 103 | |
| 104 | sreq->size = 0; |
| 105 | sreq->offset = 0; |
| 106 | mv_cesa_adjust_op(engine, &sreq->op); |
| 107 | memcpy(engine->sram, &sreq->op, sizeof(sreq->op)); |
| 108 | } |
| 109 | |
| 110 | static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, |
| 111 | struct mv_cesa_engine *engine) |
| 112 | { |
| 113 | struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); |
| 114 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); |
| 115 | |
| 116 | creq->req.base.engine = engine; |
| 117 | |
| 118 | mv_cesa_ablkcipher_std_prepare(ablkreq); |
| 119 | } |
| 120 | |
| 121 | static inline void |
| 122 | mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) |
| 123 | { |
| 124 | } |
| 125 | |
| 126 | static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { |
| 127 | .step = mv_cesa_ablkcipher_step, |
| 128 | .process = mv_cesa_ablkcipher_process, |
| 129 | .prepare = mv_cesa_ablkcipher_prepare, |
| 130 | .cleanup = mv_cesa_ablkcipher_req_cleanup, |
| 131 | }; |
| 132 | |
| 133 | static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) |
| 134 | { |
| 135 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
| 136 | |
| 137 | ctx->base.ops = &mv_cesa_ablkcipher_req_ops; |
| 138 | |
| 139 | tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); |
| 140 | |
| 141 | return 0; |
| 142 | } |
| 143 | |
| 144 | static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
| 145 | unsigned int len) |
| 146 | { |
| 147 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); |
| 148 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); |
| 149 | int remaining; |
| 150 | int offset; |
| 151 | int ret; |
| 152 | int i; |
| 153 | |
| 154 | ret = crypto_aes_expand_key(&ctx->aes, key, len); |
| 155 | if (ret) { |
| 156 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 157 | return ret; |
| 158 | } |
| 159 | |
| 160 | remaining = (ctx->aes.key_length - 16) / 4; |
| 161 | offset = ctx->aes.key_length + 24 - remaining; |
| 162 | for (i = 0; i < remaining; i++) |
| 163 | ctx->aes.key_dec[4 + i] = |
| 164 | cpu_to_le32(ctx->aes.key_enc[offset + i]); |
| 165 | |
| 166 | return 0; |
| 167 | } |
| 168 | |
| 169 | static inline int |
| 170 | mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, |
| 171 | const struct mv_cesa_op_ctx *op_templ) |
| 172 | { |
| 173 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); |
| 174 | struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std; |
| 175 | |
| 176 | sreq->base.type = CESA_STD_REQ; |
| 177 | sreq->op = *op_templ; |
| 178 | sreq->skip_ctx = false; |
| 179 | |
| 180 | return 0; |
| 181 | } |
| 182 | |
| 183 | static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, |
| 184 | struct mv_cesa_op_ctx *tmpl) |
| 185 | { |
| 186 | struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); |
| 187 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); |
| 188 | unsigned int blksize = crypto_ablkcipher_blocksize(tfm); |
| 189 | |
| 190 | if (!IS_ALIGNED(req->nbytes, blksize)) |
| 191 | return -EINVAL; |
| 192 | |
| 193 | creq->src_nents = sg_nents_for_len(req->src, req->nbytes); |
| 194 | creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); |
| 195 | |
| 196 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, |
| 197 | CESA_SA_DESC_CFG_OP_MSK); |
| 198 | |
| 199 | return mv_cesa_ablkcipher_std_req_init(req, tmpl); |
| 200 | } |
| 201 | |
| 202 | static int mv_cesa_aes_op(struct ablkcipher_request *req, |
| 203 | struct mv_cesa_op_ctx *tmpl) |
| 204 | { |
| 205 | struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 206 | int ret, i; |
| 207 | u32 *key; |
| 208 | u32 cfg; |
| 209 | |
| 210 | cfg = CESA_SA_DESC_CFG_CRYPTM_AES; |
| 211 | |
| 212 | if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) |
| 213 | key = ctx->aes.key_dec; |
| 214 | else |
| 215 | key = ctx->aes.key_enc; |
| 216 | |
| 217 | for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) |
| 218 | tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); |
| 219 | |
| 220 | if (ctx->aes.key_length == 24) |
| 221 | cfg |= CESA_SA_DESC_CFG_AES_LEN_192; |
| 222 | else if (ctx->aes.key_length == 32) |
| 223 | cfg |= CESA_SA_DESC_CFG_AES_LEN_256; |
| 224 | |
| 225 | mv_cesa_update_op_cfg(tmpl, cfg, |
| 226 | CESA_SA_DESC_CFG_CRYPTM_MSK | |
| 227 | CESA_SA_DESC_CFG_AES_LEN_MSK); |
| 228 | |
| 229 | ret = mv_cesa_ablkcipher_req_init(req, tmpl); |
| 230 | if (ret) |
| 231 | return ret; |
| 232 | |
| 233 | return mv_cesa_queue_req(&req->base); |
| 234 | } |
| 235 | |
| 236 | static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) |
| 237 | { |
| 238 | struct mv_cesa_op_ctx tmpl; |
| 239 | |
| 240 | mv_cesa_set_op_cfg(&tmpl, |
| 241 | CESA_SA_DESC_CFG_CRYPTCM_ECB | |
| 242 | CESA_SA_DESC_CFG_DIR_ENC); |
| 243 | |
| 244 | return mv_cesa_aes_op(req, &tmpl); |
| 245 | } |
| 246 | |
| 247 | static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) |
| 248 | { |
| 249 | struct mv_cesa_op_ctx tmpl; |
| 250 | |
| 251 | mv_cesa_set_op_cfg(&tmpl, |
| 252 | CESA_SA_DESC_CFG_CRYPTCM_ECB | |
| 253 | CESA_SA_DESC_CFG_DIR_DEC); |
| 254 | |
| 255 | return mv_cesa_aes_op(req, &tmpl); |
| 256 | } |
| 257 | |
| 258 | struct crypto_alg mv_cesa_ecb_aes_alg = { |
| 259 | .cra_name = "ecb(aes)", |
| 260 | .cra_driver_name = "mv-ecb-aes", |
| 261 | .cra_priority = 300, |
| 262 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 263 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
| 264 | .cra_blocksize = AES_BLOCK_SIZE, |
| 265 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), |
| 266 | .cra_alignmask = 0, |
| 267 | .cra_type = &crypto_ablkcipher_type, |
| 268 | .cra_module = THIS_MODULE, |
| 269 | .cra_init = mv_cesa_ablkcipher_cra_init, |
| 270 | .cra_u = { |
| 271 | .ablkcipher = { |
| 272 | .min_keysize = AES_MIN_KEY_SIZE, |
| 273 | .max_keysize = AES_MAX_KEY_SIZE, |
| 274 | .setkey = mv_cesa_aes_setkey, |
| 275 | .encrypt = mv_cesa_ecb_aes_encrypt, |
| 276 | .decrypt = mv_cesa_ecb_aes_decrypt, |
| 277 | }, |
| 278 | }, |
| 279 | }; |
| 280 | |
| 281 | static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, |
| 282 | struct mv_cesa_op_ctx *tmpl) |
| 283 | { |
| 284 | mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, |
| 285 | CESA_SA_DESC_CFG_CRYPTCM_MSK); |
| 286 | memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); |
| 287 | |
| 288 | return mv_cesa_aes_op(req, tmpl); |
| 289 | } |
| 290 | |
| 291 | static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) |
| 292 | { |
| 293 | struct mv_cesa_op_ctx tmpl; |
| 294 | |
| 295 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); |
| 296 | |
| 297 | return mv_cesa_cbc_aes_op(req, &tmpl); |
| 298 | } |
| 299 | |
| 300 | static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) |
| 301 | { |
| 302 | struct mv_cesa_op_ctx tmpl; |
| 303 | |
| 304 | mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); |
| 305 | |
| 306 | return mv_cesa_cbc_aes_op(req, &tmpl); |
| 307 | } |
| 308 | |
| 309 | struct crypto_alg mv_cesa_cbc_aes_alg = { |
| 310 | .cra_name = "cbc(aes)", |
| 311 | .cra_driver_name = "mv-cbc-aes", |
| 312 | .cra_priority = 300, |
| 313 | .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | |
| 314 | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, |
| 315 | .cra_blocksize = AES_BLOCK_SIZE, |
| 316 | .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), |
| 317 | .cra_alignmask = 0, |
| 318 | .cra_type = &crypto_ablkcipher_type, |
| 319 | .cra_module = THIS_MODULE, |
| 320 | .cra_init = mv_cesa_ablkcipher_cra_init, |
| 321 | .cra_u = { |
| 322 | .ablkcipher = { |
| 323 | .min_keysize = AES_MIN_KEY_SIZE, |
| 324 | .max_keysize = AES_MAX_KEY_SIZE, |
| 325 | .ivsize = AES_BLOCK_SIZE, |
| 326 | .setkey = mv_cesa_aes_setkey, |
| 327 | .encrypt = mv_cesa_cbc_aes_encrypt, |
| 328 | .decrypt = mv_cesa_cbc_aes_decrypt, |
| 329 | }, |
| 330 | }, |
| 331 | }; |