Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /* Algorithms supported by virtio crypto device
3 : *
4 : * Authors: Gonglei <arei.gonglei@huawei.com>
5 : *
6 : * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7 : */
8 :
9 : #include <linux/scatterlist.h>
10 : #include <crypto/algapi.h>
11 : #include <crypto/internal/skcipher.h>
12 : #include <linux/err.h>
13 : #include <crypto/scatterwalk.h>
14 : #include <linux/atomic.h>
15 :
16 : #include <uapi/linux/virtio_crypto.h>
17 : #include "virtio_crypto_common.h"
18 :
19 :
20 : struct virtio_crypto_skcipher_ctx {
21 : struct crypto_engine_ctx enginectx;
22 : struct virtio_crypto *vcrypto;
23 : struct crypto_skcipher *tfm;
24 :
25 : struct virtio_crypto_sym_session_info enc_sess_info;
26 : struct virtio_crypto_sym_session_info dec_sess_info;
27 : };
28 :
29 : struct virtio_crypto_sym_request {
30 : struct virtio_crypto_request base;
31 :
32 : /* Cipher or aead */
33 : uint32_t type;
34 : struct virtio_crypto_skcipher_ctx *skcipher_ctx;
35 : struct skcipher_request *skcipher_req;
36 : uint8_t *iv;
37 : /* Encryption? */
38 : bool encrypt;
39 : };
40 :
41 : struct virtio_crypto_algo {
42 : uint32_t algonum;
43 : uint32_t service;
44 : unsigned int active_devs;
45 : struct skcipher_alg algo;
46 : };
47 :
48 : /*
49 : * The algs_lock protects the below global virtio_crypto_active_devs
50 : * and crypto algorithms registion.
51 : */
52 : static DEFINE_MUTEX(algs_lock);
53 : static void virtio_crypto_skcipher_finalize_req(
54 : struct virtio_crypto_sym_request *vc_sym_req,
55 : struct skcipher_request *req,
56 : int err);
57 :
58 0 : static void virtio_crypto_dataq_sym_callback
59 : (struct virtio_crypto_request *vc_req, int len)
60 : {
61 0 : struct virtio_crypto_sym_request *vc_sym_req =
62 0 : container_of(vc_req, struct virtio_crypto_sym_request, base);
63 0 : struct skcipher_request *ablk_req;
64 0 : int error;
65 :
66 : /* Finish the encrypt or decrypt process */
67 0 : if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
68 0 : switch (vc_req->status) {
69 : case VIRTIO_CRYPTO_OK:
70 : error = 0;
71 : break;
72 : case VIRTIO_CRYPTO_INVSESS:
73 : case VIRTIO_CRYPTO_ERR:
74 : error = -EINVAL;
75 : break;
76 : case VIRTIO_CRYPTO_BADMSG:
77 : error = -EBADMSG;
78 : break;
79 : default:
80 : error = -EIO;
81 : break;
82 : }
83 0 : ablk_req = vc_sym_req->skcipher_req;
84 0 : virtio_crypto_skcipher_finalize_req(vc_sym_req,
85 : ablk_req, error);
86 : }
87 0 : }
88 :
89 0 : static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
90 : {
91 0 : u64 total = 0;
92 :
93 0 : for (total = 0; sg; sg = sg_next(sg))
94 0 : total += sg->length;
95 :
96 0 : return total;
97 : }
98 :
99 : static int
100 0 : virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
101 : {
102 0 : switch (key_len) {
103 : case AES_KEYSIZE_128:
104 : case AES_KEYSIZE_192:
105 : case AES_KEYSIZE_256:
106 0 : *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
107 0 : break;
108 : default:
109 : return -EINVAL;
110 : }
111 0 : return 0;
112 : }
113 :
114 0 : static int virtio_crypto_alg_skcipher_init_session(
115 : struct virtio_crypto_skcipher_ctx *ctx,
116 : uint32_t alg, const uint8_t *key,
117 : unsigned int keylen,
118 : int encrypt)
119 : {
120 0 : struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
121 0 : unsigned int tmp;
122 0 : struct virtio_crypto *vcrypto = ctx->vcrypto;
123 0 : int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
124 0 : int err;
125 0 : unsigned int num_out = 0, num_in = 0;
126 :
127 : /*
128 : * Avoid to do DMA from the stack, switch to using
129 : * dynamically-allocated for the key
130 : */
131 0 : uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
132 :
133 0 : if (!cipher_key)
134 : return -ENOMEM;
135 :
136 0 : spin_lock(&vcrypto->ctrl_lock);
137 : /* Pad ctrl header */
138 0 : vcrypto->ctrl.header.opcode =
139 : cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
140 0 : vcrypto->ctrl.header.algo = cpu_to_le32(alg);
141 : /* Set the default dataqueue id to 0 */
142 0 : vcrypto->ctrl.header.queue_id = 0;
143 :
144 0 : vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
145 : /* Pad cipher's parameters */
146 0 : vcrypto->ctrl.u.sym_create_session.op_type =
147 : cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
148 0 : vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
149 : vcrypto->ctrl.header.algo;
150 0 : vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
151 : cpu_to_le32(keylen);
152 0 : vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
153 0 : cpu_to_le32(op);
154 :
155 0 : sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
156 0 : sgs[num_out++] = &outhdr;
157 :
158 : /* Set key */
159 0 : sg_init_one(&key_sg, cipher_key, keylen);
160 0 : sgs[num_out++] = &key_sg;
161 :
162 : /* Return status and session id back */
163 0 : sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
164 0 : sgs[num_out + num_in++] = &inhdr;
165 :
166 0 : err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
167 : num_in, vcrypto, GFP_ATOMIC);
168 0 : if (err < 0) {
169 0 : spin_unlock(&vcrypto->ctrl_lock);
170 0 : kfree_sensitive(cipher_key);
171 0 : return err;
172 : }
173 0 : virtqueue_kick(vcrypto->ctrl_vq);
174 :
175 : /*
176 : * Trapping into the hypervisor, so the request should be
177 : * handled immediately.
178 : */
179 0 : while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
180 0 : !virtqueue_is_broken(vcrypto->ctrl_vq))
181 0 : cpu_relax();
182 :
183 0 : if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
184 0 : spin_unlock(&vcrypto->ctrl_lock);
185 0 : pr_err("virtio_crypto: Create session failed status: %u\n",
186 : le32_to_cpu(vcrypto->input.status));
187 0 : kfree_sensitive(cipher_key);
188 0 : return -EINVAL;
189 : }
190 :
191 0 : if (encrypt)
192 0 : ctx->enc_sess_info.session_id =
193 0 : le64_to_cpu(vcrypto->input.session_id);
194 : else
195 0 : ctx->dec_sess_info.session_id =
196 0 : le64_to_cpu(vcrypto->input.session_id);
197 :
198 0 : spin_unlock(&vcrypto->ctrl_lock);
199 :
200 0 : kfree_sensitive(cipher_key);
201 0 : return 0;
202 : }
203 :
204 0 : static int virtio_crypto_alg_skcipher_close_session(
205 : struct virtio_crypto_skcipher_ctx *ctx,
206 : int encrypt)
207 : {
208 0 : struct scatterlist outhdr, status_sg, *sgs[2];
209 0 : unsigned int tmp;
210 0 : struct virtio_crypto_destroy_session_req *destroy_session;
211 0 : struct virtio_crypto *vcrypto = ctx->vcrypto;
212 0 : int err;
213 0 : unsigned int num_out = 0, num_in = 0;
214 :
215 0 : spin_lock(&vcrypto->ctrl_lock);
216 0 : vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
217 : /* Pad ctrl header */
218 0 : vcrypto->ctrl.header.opcode =
219 : cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
220 : /* Set the default virtqueue id to 0 */
221 0 : vcrypto->ctrl.header.queue_id = 0;
222 :
223 0 : destroy_session = &vcrypto->ctrl.u.destroy_session;
224 :
225 0 : if (encrypt)
226 0 : destroy_session->session_id =
227 0 : cpu_to_le64(ctx->enc_sess_info.session_id);
228 : else
229 0 : destroy_session->session_id =
230 0 : cpu_to_le64(ctx->dec_sess_info.session_id);
231 :
232 0 : sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
233 0 : sgs[num_out++] = &outhdr;
234 :
235 : /* Return status and session id back */
236 0 : sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
237 : sizeof(vcrypto->ctrl_status.status));
238 0 : sgs[num_out + num_in++] = &status_sg;
239 :
240 0 : err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
241 : num_in, vcrypto, GFP_ATOMIC);
242 0 : if (err < 0) {
243 0 : spin_unlock(&vcrypto->ctrl_lock);
244 0 : return err;
245 : }
246 0 : virtqueue_kick(vcrypto->ctrl_vq);
247 :
248 0 : while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
249 0 : !virtqueue_is_broken(vcrypto->ctrl_vq))
250 0 : cpu_relax();
251 :
252 0 : if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
253 0 : spin_unlock(&vcrypto->ctrl_lock);
254 0 : pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
255 : vcrypto->ctrl_status.status,
256 : destroy_session->session_id);
257 :
258 0 : return -EINVAL;
259 : }
260 0 : spin_unlock(&vcrypto->ctrl_lock);
261 :
262 0 : return 0;
263 : }
264 :
265 0 : static int virtio_crypto_alg_skcipher_init_sessions(
266 : struct virtio_crypto_skcipher_ctx *ctx,
267 : const uint8_t *key, unsigned int keylen)
268 : {
269 0 : uint32_t alg;
270 0 : int ret;
271 0 : struct virtio_crypto *vcrypto = ctx->vcrypto;
272 :
273 0 : if (keylen > vcrypto->max_cipher_key_len) {
274 0 : pr_err("virtio_crypto: the key is too long\n");
275 0 : return -EINVAL;
276 : }
277 :
278 0 : if (virtio_crypto_alg_validate_key(keylen, &alg))
279 : return -EINVAL;
280 :
281 : /* Create encryption session */
282 0 : ret = virtio_crypto_alg_skcipher_init_session(ctx,
283 : alg, key, keylen, 1);
284 0 : if (ret)
285 : return ret;
286 : /* Create decryption session */
287 0 : ret = virtio_crypto_alg_skcipher_init_session(ctx,
288 : alg, key, keylen, 0);
289 0 : if (ret) {
290 0 : virtio_crypto_alg_skcipher_close_session(ctx, 1);
291 0 : return ret;
292 : }
293 : return 0;
294 : }
295 :
296 : /* Note: kernel crypto API realization */
297 0 : static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
298 : const uint8_t *key,
299 : unsigned int keylen)
300 : {
301 0 : struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
302 0 : uint32_t alg;
303 0 : int ret;
304 :
305 0 : ret = virtio_crypto_alg_validate_key(keylen, &alg);
306 0 : if (ret)
307 : return ret;
308 :
309 0 : if (!ctx->vcrypto) {
310 : /* New key */
311 0 : int node = virtio_crypto_get_current_node();
312 0 : struct virtio_crypto *vcrypto =
313 0 : virtcrypto_get_dev_node(node,
314 : VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
315 0 : if (!vcrypto) {
316 0 : pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
317 0 : return -ENODEV;
318 : }
319 :
320 0 : ctx->vcrypto = vcrypto;
321 : } else {
322 : /* Rekeying, we should close the created sessions previously */
323 0 : virtio_crypto_alg_skcipher_close_session(ctx, 1);
324 0 : virtio_crypto_alg_skcipher_close_session(ctx, 0);
325 : }
326 :
327 0 : ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
328 0 : if (ret) {
329 0 : virtcrypto_dev_put(ctx->vcrypto);
330 0 : ctx->vcrypto = NULL;
331 :
332 0 : return ret;
333 : }
334 :
335 : return 0;
336 : }
337 :
338 : static int
339 0 : __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
340 : struct skcipher_request *req,
341 : struct data_queue *data_vq)
342 : {
343 0 : struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
344 0 : struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
345 0 : struct virtio_crypto_request *vc_req = &vc_sym_req->base;
346 0 : unsigned int ivsize = crypto_skcipher_ivsize(tfm);
347 0 : struct virtio_crypto *vcrypto = ctx->vcrypto;
348 0 : struct virtio_crypto_op_data_req *req_data;
349 0 : int src_nents, dst_nents;
350 0 : int err;
351 0 : unsigned long flags;
352 0 : struct scatterlist outhdr, iv_sg, status_sg, **sgs;
353 0 : u64 dst_len;
354 0 : unsigned int num_out = 0, num_in = 0;
355 0 : int sg_total;
356 0 : uint8_t *iv;
357 0 : struct scatterlist *sg;
358 :
359 0 : src_nents = sg_nents_for_len(req->src, req->cryptlen);
360 0 : if (src_nents < 0) {
361 0 : pr_err("Invalid number of src SG.\n");
362 0 : return src_nents;
363 : }
364 :
365 0 : dst_nents = sg_nents(req->dst);
366 :
367 0 : pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
368 : src_nents, dst_nents);
369 :
370 : /* Why 3? outhdr + iv + inhdr */
371 0 : sg_total = src_nents + dst_nents + 3;
372 0 : sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
373 0 : dev_to_node(&vcrypto->vdev->dev));
374 0 : if (!sgs)
375 : return -ENOMEM;
376 :
377 0 : req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
378 0 : dev_to_node(&vcrypto->vdev->dev));
379 0 : if (!req_data) {
380 0 : kfree(sgs);
381 0 : return -ENOMEM;
382 : }
383 :
384 0 : vc_req->req_data = req_data;
385 0 : vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
386 : /* Head of operation */
387 0 : if (vc_sym_req->encrypt) {
388 0 : req_data->header.session_id =
389 0 : cpu_to_le64(ctx->enc_sess_info.session_id);
390 0 : req_data->header.opcode =
391 : cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
392 : } else {
393 0 : req_data->header.session_id =
394 0 : cpu_to_le64(ctx->dec_sess_info.session_id);
395 0 : req_data->header.opcode =
396 : cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
397 : }
398 0 : req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
399 0 : req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
400 0 : req_data->u.sym_req.u.cipher.para.src_data_len =
401 0 : cpu_to_le32(req->cryptlen);
402 :
403 0 : dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
404 0 : if (unlikely(dst_len > U32_MAX)) {
405 0 : pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
406 0 : err = -EINVAL;
407 0 : goto free;
408 : }
409 :
410 0 : dst_len = min_t(unsigned int, req->cryptlen, dst_len);
411 0 : pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
412 : req->cryptlen, dst_len);
413 :
414 0 : if (unlikely(req->cryptlen + dst_len + ivsize +
415 : sizeof(vc_req->status) > vcrypto->max_size)) {
416 0 : pr_err("virtio_crypto: The length is too big\n");
417 0 : err = -EINVAL;
418 0 : goto free;
419 : }
420 :
421 0 : req_data->u.sym_req.u.cipher.para.dst_data_len =
422 : cpu_to_le32((uint32_t)dst_len);
423 :
424 : /* Outhdr */
425 0 : sg_init_one(&outhdr, req_data, sizeof(*req_data));
426 0 : sgs[num_out++] = &outhdr;
427 :
428 : /* IV */
429 :
430 : /*
431 : * Avoid to do DMA from the stack, switch to using
432 : * dynamically-allocated for the IV
433 : */
434 0 : iv = kzalloc_node(ivsize, GFP_ATOMIC,
435 0 : dev_to_node(&vcrypto->vdev->dev));
436 0 : if (!iv) {
437 0 : err = -ENOMEM;
438 0 : goto free;
439 : }
440 0 : memcpy(iv, req->iv, ivsize);
441 0 : if (!vc_sym_req->encrypt)
442 0 : scatterwalk_map_and_copy(req->iv, req->src,
443 0 : req->cryptlen - AES_BLOCK_SIZE,
444 : AES_BLOCK_SIZE, 0);
445 :
446 0 : sg_init_one(&iv_sg, iv, ivsize);
447 0 : sgs[num_out++] = &iv_sg;
448 0 : vc_sym_req->iv = iv;
449 :
450 : /* Source data */
451 0 : for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
452 0 : sgs[num_out++] = sg;
453 :
454 : /* Destination data */
455 0 : for (sg = req->dst; sg; sg = sg_next(sg))
456 0 : sgs[num_out + num_in++] = sg;
457 :
458 : /* Status */
459 0 : sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
460 0 : sgs[num_out + num_in++] = &status_sg;
461 :
462 0 : vc_req->sgs = sgs;
463 :
464 0 : spin_lock_irqsave(&data_vq->lock, flags);
465 0 : err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
466 : num_in, vc_req, GFP_ATOMIC);
467 0 : virtqueue_kick(data_vq->vq);
468 0 : spin_unlock_irqrestore(&data_vq->lock, flags);
469 0 : if (unlikely(err < 0))
470 0 : goto free_iv;
471 :
472 : return 0;
473 :
474 0 : free_iv:
475 0 : kfree_sensitive(iv);
476 0 : free:
477 0 : kfree_sensitive(req_data);
478 0 : kfree(sgs);
479 0 : return err;
480 : }
481 :
482 0 : static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
483 : {
484 0 : struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
485 0 : struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
486 0 : struct virtio_crypto_sym_request *vc_sym_req =
487 0 : skcipher_request_ctx(req);
488 0 : struct virtio_crypto_request *vc_req = &vc_sym_req->base;
489 0 : struct virtio_crypto *vcrypto = ctx->vcrypto;
490 : /* Use the first data virtqueue as default */
491 0 : struct data_queue *data_vq = &vcrypto->data_vq[0];
492 :
493 0 : if (!req->cryptlen)
494 : return 0;
495 0 : if (req->cryptlen % AES_BLOCK_SIZE)
496 : return -EINVAL;
497 :
498 0 : vc_req->dataq = data_vq;
499 0 : vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
500 0 : vc_sym_req->skcipher_ctx = ctx;
501 0 : vc_sym_req->skcipher_req = req;
502 0 : vc_sym_req->encrypt = true;
503 :
504 0 : return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
505 : }
506 :
507 0 : static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
508 : {
509 0 : struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
510 0 : struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
511 0 : struct virtio_crypto_sym_request *vc_sym_req =
512 0 : skcipher_request_ctx(req);
513 0 : struct virtio_crypto_request *vc_req = &vc_sym_req->base;
514 0 : struct virtio_crypto *vcrypto = ctx->vcrypto;
515 : /* Use the first data virtqueue as default */
516 0 : struct data_queue *data_vq = &vcrypto->data_vq[0];
517 :
518 0 : if (!req->cryptlen)
519 : return 0;
520 0 : if (req->cryptlen % AES_BLOCK_SIZE)
521 : return -EINVAL;
522 :
523 0 : vc_req->dataq = data_vq;
524 0 : vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
525 0 : vc_sym_req->skcipher_ctx = ctx;
526 0 : vc_sym_req->skcipher_req = req;
527 0 : vc_sym_req->encrypt = false;
528 :
529 0 : return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
530 : }
531 :
532 0 : static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
533 : {
534 0 : struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
535 :
536 0 : crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
537 0 : ctx->tfm = tfm;
538 :
539 0 : ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
540 0 : ctx->enginectx.op.prepare_request = NULL;
541 0 : ctx->enginectx.op.unprepare_request = NULL;
542 0 : return 0;
543 : }
544 :
545 0 : static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
546 : {
547 0 : struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
548 :
549 0 : if (!ctx->vcrypto)
550 : return;
551 :
552 0 : virtio_crypto_alg_skcipher_close_session(ctx, 1);
553 0 : virtio_crypto_alg_skcipher_close_session(ctx, 0);
554 0 : virtcrypto_dev_put(ctx->vcrypto);
555 0 : ctx->vcrypto = NULL;
556 : }
557 :
558 0 : int virtio_crypto_skcipher_crypt_req(
559 : struct crypto_engine *engine, void *vreq)
560 : {
561 0 : struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
562 0 : struct virtio_crypto_sym_request *vc_sym_req =
563 0 : skcipher_request_ctx(req);
564 0 : struct virtio_crypto_request *vc_req = &vc_sym_req->base;
565 0 : struct data_queue *data_vq = vc_req->dataq;
566 0 : int ret;
567 :
568 0 : ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
569 0 : if (ret < 0)
570 : return ret;
571 :
572 0 : virtqueue_kick(data_vq->vq);
573 :
574 0 : return 0;
575 : }
576 :
577 0 : static void virtio_crypto_skcipher_finalize_req(
578 : struct virtio_crypto_sym_request *vc_sym_req,
579 : struct skcipher_request *req,
580 : int err)
581 : {
582 0 : if (vc_sym_req->encrypt)
583 0 : scatterwalk_map_and_copy(req->iv, req->dst,
584 0 : req->cryptlen - AES_BLOCK_SIZE,
585 : AES_BLOCK_SIZE, 0);
586 0 : kfree_sensitive(vc_sym_req->iv);
587 0 : virtcrypto_clear_request(&vc_sym_req->base);
588 :
589 0 : crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
590 : req, err);
591 0 : }
592 :
593 : static struct virtio_crypto_algo virtio_crypto_algs[] = { {
594 : .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
595 : .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
596 : .algo = {
597 : .base.cra_name = "cbc(aes)",
598 : .base.cra_driver_name = "virtio_crypto_aes_cbc",
599 : .base.cra_priority = 150,
600 : .base.cra_flags = CRYPTO_ALG_ASYNC |
601 : CRYPTO_ALG_ALLOCATES_MEMORY,
602 : .base.cra_blocksize = AES_BLOCK_SIZE,
603 : .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
604 : .base.cra_module = THIS_MODULE,
605 : .init = virtio_crypto_skcipher_init,
606 : .exit = virtio_crypto_skcipher_exit,
607 : .setkey = virtio_crypto_skcipher_setkey,
608 : .decrypt = virtio_crypto_skcipher_decrypt,
609 : .encrypt = virtio_crypto_skcipher_encrypt,
610 : .min_keysize = AES_MIN_KEY_SIZE,
611 : .max_keysize = AES_MAX_KEY_SIZE,
612 : .ivsize = AES_BLOCK_SIZE,
613 : },
614 : } };
615 :
616 0 : int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
617 : {
618 0 : int ret = 0;
619 0 : int i = 0;
620 :
621 0 : mutex_lock(&algs_lock);
622 :
623 0 : for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
624 :
625 0 : uint32_t service = virtio_crypto_algs[i].service;
626 0 : uint32_t algonum = virtio_crypto_algs[i].algonum;
627 :
628 0 : if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
629 0 : continue;
630 :
631 0 : if (virtio_crypto_algs[i].active_devs == 0) {
632 0 : ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
633 0 : if (ret)
634 0 : goto unlock;
635 : }
636 :
637 0 : virtio_crypto_algs[i].active_devs++;
638 0 : dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
639 : virtio_crypto_algs[i].algo.base.cra_name);
640 : }
641 :
642 0 : unlock:
643 0 : mutex_unlock(&algs_lock);
644 0 : return ret;
645 : }
646 :
647 0 : void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
648 : {
649 0 : int i = 0;
650 :
651 0 : mutex_lock(&algs_lock);
652 :
653 0 : for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
654 :
655 0 : uint32_t service = virtio_crypto_algs[i].service;
656 0 : uint32_t algonum = virtio_crypto_algs[i].algonum;
657 :
658 0 : if (virtio_crypto_algs[i].active_devs == 0 ||
659 0 : !virtcrypto_algo_is_supported(vcrypto, service, algonum))
660 0 : continue;
661 :
662 0 : if (virtio_crypto_algs[i].active_devs == 1)
663 0 : crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
664 :
665 0 : virtio_crypto_algs[i].active_devs--;
666 : }
667 :
668 0 : mutex_unlock(&algs_lock);
669 0 : }
|