Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Handle async block request by crypto hardware engine.
4 : *
5 : * Copyright (C) 2016 Linaro, Inc.
6 : *
7 : * Author: Baolin Wang <baolin.wang@linaro.org>
8 : */
9 :
10 : #include <linux/err.h>
11 : #include <linux/delay.h>
12 : #include <linux/device.h>
13 : #include <crypto/engine.h>
14 : #include <uapi/linux/sched/types.h>
15 : #include "internal.h"
16 :
17 : #define CRYPTO_ENGINE_MAX_QLEN 10
18 :
19 : /**
20 : * crypto_finalize_request - finalize one request if the request is done
21 : * @engine: the hardware engine
22 : * @req: the request need to be finalized
23 : * @err: error number
24 : */
25 0 : static void crypto_finalize_request(struct crypto_engine *engine,
26 : struct crypto_async_request *req, int err)
27 : {
28 0 : unsigned long flags;
29 0 : bool finalize_req = false;
30 0 : int ret;
31 0 : struct crypto_engine_ctx *enginectx;
32 :
33 : /*
34 : * If hardware cannot enqueue more requests
35 : * and retry mechanism is not supported
36 : * make sure we are completing the current request
37 : */
38 0 : if (!engine->retry_support) {
39 0 : spin_lock_irqsave(&engine->queue_lock, flags);
40 0 : if (engine->cur_req == req) {
41 0 : finalize_req = true;
42 0 : engine->cur_req = NULL;
43 : }
44 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
45 : }
46 :
47 0 : if (finalize_req || engine->retry_support) {
48 0 : enginectx = crypto_tfm_ctx(req->tfm);
49 0 : if (enginectx->op.prepare_request &&
50 0 : enginectx->op.unprepare_request) {
51 0 : ret = enginectx->op.unprepare_request(engine, req);
52 0 : if (ret)
53 0 : dev_err(engine->dev, "failed to unprepare request\n");
54 : }
55 : }
56 0 : req->complete(req, err);
57 :
58 0 : kthread_queue_work(engine->kworker, &engine->pump_requests);
59 0 : }
60 :
61 : /**
62 : * crypto_pump_requests - dequeue one request from engine queue to process
63 : * @engine: the hardware engine
64 : * @in_kthread: true if we are in the context of the request pump thread
65 : *
66 : * This function checks if there is any request in the engine queue that
67 : * needs processing and if so call out to the driver to initialize hardware
68 : * and handle each request.
69 : */
70 0 : static void crypto_pump_requests(struct crypto_engine *engine,
71 : bool in_kthread)
72 : {
73 0 : struct crypto_async_request *async_req, *backlog;
74 0 : unsigned long flags;
75 0 : bool was_busy = false;
76 0 : int ret;
77 0 : struct crypto_engine_ctx *enginectx;
78 :
79 0 : spin_lock_irqsave(&engine->queue_lock, flags);
80 :
81 : /* Make sure we are not already running a request */
82 0 : if (!engine->retry_support && engine->cur_req)
83 0 : goto out;
84 :
85 : /* If another context is idling then defer */
86 0 : if (engine->idling) {
87 0 : kthread_queue_work(engine->kworker, &engine->pump_requests);
88 0 : goto out;
89 : }
90 :
91 : /* Check if the engine queue is idle */
92 0 : if (!crypto_queue_len(&engine->queue) || !engine->running) {
93 0 : if (!engine->busy)
94 0 : goto out;
95 :
96 : /* Only do teardown in the thread */
97 0 : if (!in_kthread) {
98 0 : kthread_queue_work(engine->kworker,
99 : &engine->pump_requests);
100 0 : goto out;
101 : }
102 :
103 0 : engine->busy = false;
104 0 : engine->idling = true;
105 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
106 :
107 0 : if (engine->unprepare_crypt_hardware &&
108 0 : engine->unprepare_crypt_hardware(engine))
109 0 : dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110 :
111 0 : spin_lock_irqsave(&engine->queue_lock, flags);
112 0 : engine->idling = false;
113 0 : goto out;
114 : }
115 :
116 0 : start_request:
117 : /* Get the fist request from the engine queue to handle */
118 0 : backlog = crypto_get_backlog(&engine->queue);
119 0 : async_req = crypto_dequeue_request(&engine->queue);
120 0 : if (!async_req)
121 0 : goto out;
122 :
123 : /*
124 : * If hardware doesn't support the retry mechanism,
125 : * keep track of the request we are processing now.
126 : * We'll need it on completion (crypto_finalize_request).
127 : */
128 0 : if (!engine->retry_support)
129 0 : engine->cur_req = async_req;
130 :
131 0 : if (backlog)
132 0 : backlog->complete(backlog, -EINPROGRESS);
133 :
134 0 : if (engine->busy)
135 : was_busy = true;
136 : else
137 0 : engine->busy = true;
138 :
139 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
140 :
141 : /* Until here we get the request need to be encrypted successfully */
142 0 : if (!was_busy && engine->prepare_crypt_hardware) {
143 0 : ret = engine->prepare_crypt_hardware(engine);
144 0 : if (ret) {
145 0 : dev_err(engine->dev, "failed to prepare crypt hardware\n");
146 0 : goto req_err_2;
147 : }
148 : }
149 :
150 0 : enginectx = crypto_tfm_ctx(async_req->tfm);
151 :
152 0 : if (enginectx->op.prepare_request) {
153 0 : ret = enginectx->op.prepare_request(engine, async_req);
154 0 : if (ret) {
155 0 : dev_err(engine->dev, "failed to prepare request: %d\n",
156 : ret);
157 0 : goto req_err_2;
158 : }
159 : }
160 0 : if (!enginectx->op.do_one_request) {
161 0 : dev_err(engine->dev, "failed to do request\n");
162 0 : ret = -EINVAL;
163 0 : goto req_err_1;
164 : }
165 :
166 0 : ret = enginectx->op.do_one_request(engine, async_req);
167 :
168 : /* Request unsuccessfully executed by hardware */
169 0 : if (ret < 0) {
170 : /*
171 : * If hardware queue is full (-ENOSPC), requeue request
172 : * regardless of backlog flag.
173 : * Otherwise, unprepare and complete the request.
174 : */
175 0 : if (!engine->retry_support ||
176 : (ret != -ENOSPC)) {
177 0 : dev_err(engine->dev,
178 : "Failed to do one request from queue: %d\n",
179 : ret);
180 0 : goto req_err_1;
181 : }
182 : /*
183 : * If retry mechanism is supported,
184 : * unprepare current request and
185 : * enqueue it back into crypto-engine queue.
186 : */
187 0 : if (enginectx->op.unprepare_request) {
188 0 : ret = enginectx->op.unprepare_request(engine,
189 : async_req);
190 0 : if (ret)
191 0 : dev_err(engine->dev,
192 : "failed to unprepare request\n");
193 : }
194 0 : spin_lock_irqsave(&engine->queue_lock, flags);
195 : /*
196 : * If hardware was unable to execute request, enqueue it
197 : * back in front of crypto-engine queue, to keep the order
198 : * of requests.
199 : */
200 0 : crypto_enqueue_request_head(&engine->queue, async_req);
201 :
202 0 : kthread_queue_work(engine->kworker, &engine->pump_requests);
203 0 : goto out;
204 : }
205 :
206 0 : goto retry;
207 :
208 0 : req_err_1:
209 0 : if (enginectx->op.unprepare_request) {
210 0 : ret = enginectx->op.unprepare_request(engine, async_req);
211 0 : if (ret)
212 0 : dev_err(engine->dev, "failed to unprepare request\n");
213 : }
214 :
215 0 : req_err_2:
216 0 : async_req->complete(async_req, ret);
217 :
218 0 : retry:
219 : /* If retry mechanism is supported, send new requests to engine */
220 0 : if (engine->retry_support) {
221 0 : spin_lock_irqsave(&engine->queue_lock, flags);
222 0 : goto start_request;
223 : }
224 : return;
225 :
226 0 : out:
227 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
228 :
229 : /*
230 : * Batch requests is possible only if
231 : * hardware can enqueue multiple requests
232 : */
233 0 : if (engine->do_batch_requests) {
234 0 : ret = engine->do_batch_requests(engine);
235 0 : if (ret)
236 0 : dev_err(engine->dev, "failed to do batch requests: %d\n",
237 : ret);
238 : }
239 :
240 : return;
241 : }
242 :
243 0 : static void crypto_pump_work(struct kthread_work *work)
244 : {
245 0 : struct crypto_engine *engine =
246 0 : container_of(work, struct crypto_engine, pump_requests);
247 :
248 0 : crypto_pump_requests(engine, true);
249 0 : }
250 :
251 : /**
252 : * crypto_transfer_request - transfer the new request into the engine queue
253 : * @engine: the hardware engine
254 : * @req: the request need to be listed into the engine queue
255 : */
256 0 : static int crypto_transfer_request(struct crypto_engine *engine,
257 : struct crypto_async_request *req,
258 : bool need_pump)
259 : {
260 0 : unsigned long flags;
261 0 : int ret;
262 :
263 0 : spin_lock_irqsave(&engine->queue_lock, flags);
264 :
265 0 : if (!engine->running) {
266 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
267 0 : return -ESHUTDOWN;
268 : }
269 :
270 0 : ret = crypto_enqueue_request(&engine->queue, req);
271 :
272 0 : if (!engine->busy && need_pump)
273 0 : kthread_queue_work(engine->kworker, &engine->pump_requests);
274 :
275 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
276 0 : return ret;
277 : }
278 :
279 : /**
280 : * crypto_transfer_request_to_engine - transfer one request to list
281 : * into the engine queue
282 : * @engine: the hardware engine
283 : * @req: the request need to be listed into the engine queue
284 : */
285 0 : static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
286 : struct crypto_async_request *req)
287 : {
288 0 : return crypto_transfer_request(engine, req, true);
289 : }
290 :
291 : /**
292 : * crypto_transfer_aead_request_to_engine - transfer one aead_request
293 : * to list into the engine queue
294 : * @engine: the hardware engine
295 : * @req: the request need to be listed into the engine queue
296 : */
297 0 : int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
298 : struct aead_request *req)
299 : {
300 0 : return crypto_transfer_request_to_engine(engine, &req->base);
301 : }
302 : EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
303 :
304 : /**
305 : * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
306 : * to list into the engine queue
307 : * @engine: the hardware engine
308 : * @req: the request need to be listed into the engine queue
309 : */
310 0 : int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
311 : struct akcipher_request *req)
312 : {
313 0 : return crypto_transfer_request_to_engine(engine, &req->base);
314 : }
315 : EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
316 :
317 : /**
318 : * crypto_transfer_hash_request_to_engine - transfer one ahash_request
319 : * to list into the engine queue
320 : * @engine: the hardware engine
321 : * @req: the request need to be listed into the engine queue
322 : */
323 0 : int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
324 : struct ahash_request *req)
325 : {
326 0 : return crypto_transfer_request_to_engine(engine, &req->base);
327 : }
328 : EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
329 :
330 : /**
331 : * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
332 : * to list into the engine queue
333 : * @engine: the hardware engine
334 : * @req: the request need to be listed into the engine queue
335 : */
336 0 : int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
337 : struct skcipher_request *req)
338 : {
339 0 : return crypto_transfer_request_to_engine(engine, &req->base);
340 : }
341 : EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
342 :
343 : /**
344 : * crypto_finalize_aead_request - finalize one aead_request if
345 : * the request is done
346 : * @engine: the hardware engine
347 : * @req: the request need to be finalized
348 : * @err: error number
349 : */
350 0 : void crypto_finalize_aead_request(struct crypto_engine *engine,
351 : struct aead_request *req, int err)
352 : {
353 0 : return crypto_finalize_request(engine, &req->base, err);
354 : }
355 : EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
356 :
357 : /**
358 : * crypto_finalize_akcipher_request - finalize one akcipher_request if
359 : * the request is done
360 : * @engine: the hardware engine
361 : * @req: the request need to be finalized
362 : * @err: error number
363 : */
364 0 : void crypto_finalize_akcipher_request(struct crypto_engine *engine,
365 : struct akcipher_request *req, int err)
366 : {
367 0 : return crypto_finalize_request(engine, &req->base, err);
368 : }
369 : EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
370 :
371 : /**
372 : * crypto_finalize_hash_request - finalize one ahash_request if
373 : * the request is done
374 : * @engine: the hardware engine
375 : * @req: the request need to be finalized
376 : * @err: error number
377 : */
378 0 : void crypto_finalize_hash_request(struct crypto_engine *engine,
379 : struct ahash_request *req, int err)
380 : {
381 0 : return crypto_finalize_request(engine, &req->base, err);
382 : }
383 : EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
384 :
385 : /**
386 : * crypto_finalize_skcipher_request - finalize one skcipher_request if
387 : * the request is done
388 : * @engine: the hardware engine
389 : * @req: the request need to be finalized
390 : * @err: error number
391 : */
392 0 : void crypto_finalize_skcipher_request(struct crypto_engine *engine,
393 : struct skcipher_request *req, int err)
394 : {
395 0 : return crypto_finalize_request(engine, &req->base, err);
396 : }
397 : EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
398 :
399 : /**
400 : * crypto_engine_start - start the hardware engine
401 : * @engine: the hardware engine need to be started
402 : *
403 : * Return 0 on success, else on fail.
404 : */
405 0 : int crypto_engine_start(struct crypto_engine *engine)
406 : {
407 0 : unsigned long flags;
408 :
409 0 : spin_lock_irqsave(&engine->queue_lock, flags);
410 :
411 0 : if (engine->running || engine->busy) {
412 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
413 0 : return -EBUSY;
414 : }
415 :
416 0 : engine->running = true;
417 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
418 :
419 0 : kthread_queue_work(engine->kworker, &engine->pump_requests);
420 :
421 0 : return 0;
422 : }
423 : EXPORT_SYMBOL_GPL(crypto_engine_start);
424 :
425 : /**
426 : * crypto_engine_stop - stop the hardware engine
427 : * @engine: the hardware engine need to be stopped
428 : *
429 : * Return 0 on success, else on fail.
430 : */
431 0 : int crypto_engine_stop(struct crypto_engine *engine)
432 : {
433 0 : unsigned long flags;
434 0 : unsigned int limit = 500;
435 0 : int ret = 0;
436 :
437 0 : spin_lock_irqsave(&engine->queue_lock, flags);
438 :
439 : /*
440 : * If the engine queue is not empty or the engine is on busy state,
441 : * we need to wait for a while to pump the requests of engine queue.
442 : */
443 0 : while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
444 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
445 0 : msleep(20);
446 0 : spin_lock_irqsave(&engine->queue_lock, flags);
447 : }
448 :
449 0 : if (crypto_queue_len(&engine->queue) || engine->busy)
450 : ret = -EBUSY;
451 : else
452 0 : engine->running = false;
453 :
454 0 : spin_unlock_irqrestore(&engine->queue_lock, flags);
455 :
456 0 : if (ret)
457 0 : dev_warn(engine->dev, "could not stop engine\n");
458 :
459 0 : return ret;
460 : }
461 : EXPORT_SYMBOL_GPL(crypto_engine_stop);
462 :
463 : /**
464 : * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
465 : * and initialize it by setting the maximum number of entries in the software
466 : * crypto-engine queue.
467 : * @dev: the device attached with one hardware engine
468 : * @retry_support: whether hardware has support for retry mechanism
469 : * @cbk_do_batch: pointer to a callback function to be invoked when executing
470 : * a batch of requests.
471 : * This has the form:
472 : * callback(struct crypto_engine *engine)
473 : * where:
474 : * @engine: the crypto engine structure.
475 : * @rt: whether this queue is set to run as a realtime task
476 : * @qlen: maximum size of the crypto-engine queue
477 : *
478 : * This must be called from context that can sleep.
479 : * Return: the crypto engine structure on success, else NULL.
480 : */
481 0 : struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
482 : bool retry_support,
483 : int (*cbk_do_batch)(struct crypto_engine *engine),
484 : bool rt, int qlen)
485 : {
486 0 : struct crypto_engine *engine;
487 :
488 0 : if (!dev)
489 : return NULL;
490 :
491 0 : engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
492 0 : if (!engine)
493 : return NULL;
494 :
495 0 : engine->dev = dev;
496 0 : engine->rt = rt;
497 0 : engine->running = false;
498 0 : engine->busy = false;
499 0 : engine->idling = false;
500 0 : engine->retry_support = retry_support;
501 0 : engine->priv_data = dev;
502 : /*
503 : * Batch requests is possible only if
504 : * hardware has support for retry mechanism.
505 : */
506 0 : engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
507 :
508 0 : snprintf(engine->name, sizeof(engine->name),
509 : "%s-engine", dev_name(dev));
510 :
511 0 : crypto_init_queue(&engine->queue, qlen);
512 0 : spin_lock_init(&engine->queue_lock);
513 :
514 0 : engine->kworker = kthread_create_worker(0, "%s", engine->name);
515 0 : if (IS_ERR(engine->kworker)) {
516 0 : dev_err(dev, "failed to create crypto request pump task\n");
517 0 : return NULL;
518 : }
519 0 : kthread_init_work(&engine->pump_requests, crypto_pump_work);
520 :
521 0 : if (engine->rt) {
522 0 : dev_info(dev, "will run requests pump with realtime priority\n");
523 0 : sched_set_fifo(engine->kworker->task);
524 : }
525 :
526 : return engine;
527 : }
528 : EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
529 :
530 : /**
531 : * crypto_engine_alloc_init - allocate crypto hardware engine structure and
532 : * initialize it.
533 : * @dev: the device attached with one hardware engine
534 : * @rt: whether this queue is set to run as a realtime task
535 : *
536 : * This must be called from context that can sleep.
537 : * Return: the crypto engine structure on success, else NULL.
538 : */
539 0 : struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
540 : {
541 0 : return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
542 : CRYPTO_ENGINE_MAX_QLEN);
543 : }
544 : EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
545 :
546 : /**
547 : * crypto_engine_exit - free the resources of hardware engine when exit
548 : * @engine: the hardware engine need to be freed
549 : *
550 : * Return 0 for success.
551 : */
552 0 : int crypto_engine_exit(struct crypto_engine *engine)
553 : {
554 0 : int ret;
555 :
556 0 : ret = crypto_engine_stop(engine);
557 0 : if (ret)
558 : return ret;
559 :
560 0 : kthread_destroy_worker(engine->kworker);
561 :
562 0 : return 0;
563 : }
564 : EXPORT_SYMBOL_GPL(crypto_engine_exit);
565 :
566 : MODULE_LICENSE("GPL");
567 : MODULE_DESCRIPTION("Crypto hardware engine framework");
|