Line data Source code
1 : /*
2 : * hw_random/core.c: HWRNG core API
3 : *
4 : * Copyright 2006 Michael Buesch <m@bues.ch>
5 : * Copyright 2005 (c) MontaVista Software, Inc.
6 : *
7 : * Please read Documentation/admin-guide/hw_random.rst for details on use.
8 : *
9 : * This software may be used and distributed according to the terms
10 : * of the GNU General Public License, incorporated herein by reference.
11 : */
12 :
13 : #include <linux/delay.h>
14 : #include <linux/device.h>
15 : #include <linux/err.h>
16 : #include <linux/fs.h>
17 : #include <linux/hw_random.h>
18 : #include <linux/kernel.h>
19 : #include <linux/kthread.h>
20 : #include <linux/sched/signal.h>
21 : #include <linux/miscdevice.h>
22 : #include <linux/module.h>
23 : #include <linux/random.h>
24 : #include <linux/sched.h>
25 : #include <linux/slab.h>
26 : #include <linux/uaccess.h>
27 :
28 : #define RNG_MODULE_NAME "hw_random"
29 :
30 : static struct hwrng *current_rng;
31 : /* the current rng has been explicitly chosen by user via sysfs */
32 : static int cur_rng_set_by_user;
33 : static struct task_struct *hwrng_fill;
34 : /* list of registered rngs, sorted decending by quality */
35 : static LIST_HEAD(rng_list);
36 : /* Protects rng_list and current_rng */
37 : static DEFINE_MUTEX(rng_mutex);
38 : /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
39 : static DEFINE_MUTEX(reading_mutex);
40 : static int data_avail;
41 : static u8 *rng_buffer, *rng_fillbuf;
42 : static unsigned short current_quality;
43 : static unsigned short default_quality; /* = 0; default to "off" */
44 :
45 : module_param(current_quality, ushort, 0644);
46 : MODULE_PARM_DESC(current_quality,
47 : "current hwrng entropy estimation per 1024 bits of input");
48 : module_param(default_quality, ushort, 0644);
49 : MODULE_PARM_DESC(default_quality,
50 : "default entropy content of hwrng per 1024 bits of input");
51 :
52 : static void drop_current_rng(void);
53 : static int hwrng_init(struct hwrng *rng);
54 : static void start_khwrngd(void);
55 :
56 : static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
57 : int wait);
58 :
59 2 : static size_t rng_buffer_size(void)
60 : {
61 2 : return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
62 : }
63 :
64 0 : static void add_early_randomness(struct hwrng *rng)
65 : {
66 0 : int bytes_read;
67 0 : size_t size = min_t(size_t, 16, rng_buffer_size());
68 :
69 0 : mutex_lock(&reading_mutex);
70 0 : bytes_read = rng_get_data(rng, rng_buffer, size, 0);
71 0 : mutex_unlock(&reading_mutex);
72 0 : if (bytes_read > 0)
73 0 : add_device_randomness(rng_buffer, bytes_read);
74 0 : }
75 :
76 0 : static inline void cleanup_rng(struct kref *kref)
77 : {
78 0 : struct hwrng *rng = container_of(kref, struct hwrng, ref);
79 :
80 0 : if (rng->cleanup)
81 0 : rng->cleanup(rng);
82 :
83 0 : complete(&rng->cleanup_done);
84 0 : }
85 :
86 0 : static int set_current_rng(struct hwrng *rng)
87 : {
88 0 : int err;
89 :
90 0 : BUG_ON(!mutex_is_locked(&rng_mutex));
91 :
92 0 : err = hwrng_init(rng);
93 0 : if (err)
94 : return err;
95 :
96 0 : drop_current_rng();
97 0 : current_rng = rng;
98 :
99 0 : return 0;
100 : }
101 :
102 0 : static void drop_current_rng(void)
103 : {
104 0 : BUG_ON(!mutex_is_locked(&rng_mutex));
105 0 : if (!current_rng)
106 : return;
107 :
108 : /* decrease last reference for triggering the cleanup */
109 0 : kref_put(¤t_rng->ref, cleanup_rng);
110 0 : current_rng = NULL;
111 : }
112 :
113 : /* Returns ERR_PTR(), NULL or refcounted hwrng */
114 0 : static struct hwrng *get_current_rng_nolock(void)
115 : {
116 0 : if (current_rng)
117 0 : kref_get(¤t_rng->ref);
118 :
119 0 : return current_rng;
120 : }
121 :
122 0 : static struct hwrng *get_current_rng(void)
123 : {
124 0 : struct hwrng *rng;
125 :
126 0 : if (mutex_lock_interruptible(&rng_mutex))
127 0 : return ERR_PTR(-ERESTARTSYS);
128 :
129 0 : rng = get_current_rng_nolock();
130 :
131 0 : mutex_unlock(&rng_mutex);
132 0 : return rng;
133 : }
134 :
135 0 : static void put_rng(struct hwrng *rng)
136 : {
137 : /*
138 : * Hold rng_mutex here so we serialize in case they set_current_rng
139 : * on rng again immediately.
140 : */
141 0 : mutex_lock(&rng_mutex);
142 0 : if (rng)
143 0 : kref_put(&rng->ref, cleanup_rng);
144 0 : mutex_unlock(&rng_mutex);
145 0 : }
146 :
147 0 : static int hwrng_init(struct hwrng *rng)
148 : {
149 0 : if (kref_get_unless_zero(&rng->ref))
150 0 : goto skip_init;
151 :
152 0 : if (rng->init) {
153 0 : int ret;
154 :
155 0 : ret = rng->init(rng);
156 0 : if (ret)
157 : return ret;
158 : }
159 :
160 0 : kref_init(&rng->ref);
161 0 : reinit_completion(&rng->cleanup_done);
162 :
163 0 : skip_init:
164 0 : current_quality = rng->quality ? : default_quality;
165 0 : if (current_quality > 1024)
166 0 : current_quality = 1024;
167 :
168 0 : if (current_quality == 0 && hwrng_fill)
169 0 : kthread_stop(hwrng_fill);
170 0 : if (current_quality > 0 && !hwrng_fill)
171 0 : start_khwrngd();
172 :
173 : return 0;
174 : }
175 :
176 0 : static int rng_dev_open(struct inode *inode, struct file *filp)
177 : {
178 : /* enforce read-only access to this chrdev */
179 0 : if ((filp->f_mode & FMODE_READ) == 0)
180 : return -EINVAL;
181 0 : if (filp->f_mode & FMODE_WRITE)
182 0 : return -EINVAL;
183 : return 0;
184 : }
185 :
186 0 : static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
187 : int wait) {
188 0 : int present;
189 :
190 0 : BUG_ON(!mutex_is_locked(&reading_mutex));
191 0 : if (rng->read)
192 0 : return rng->read(rng, (void *)buffer, size, wait);
193 :
194 0 : if (rng->data_present)
195 0 : present = rng->data_present(rng, wait);
196 : else
197 : present = 1;
198 :
199 0 : if (present)
200 0 : return rng->data_read(rng, (u32 *)buffer);
201 :
202 : return 0;
203 : }
204 :
205 0 : static ssize_t rng_dev_read(struct file *filp, char __user *buf,
206 : size_t size, loff_t *offp)
207 : {
208 0 : ssize_t ret = 0;
209 0 : int err = 0;
210 0 : int bytes_read, len;
211 0 : struct hwrng *rng;
212 :
213 0 : while (size) {
214 0 : rng = get_current_rng();
215 0 : if (IS_ERR(rng)) {
216 0 : err = PTR_ERR(rng);
217 0 : goto out;
218 : }
219 0 : if (!rng) {
220 0 : err = -ENODEV;
221 0 : goto out;
222 : }
223 :
224 0 : if (mutex_lock_interruptible(&reading_mutex)) {
225 0 : err = -ERESTARTSYS;
226 0 : goto out_put;
227 : }
228 0 : if (!data_avail) {
229 0 : bytes_read = rng_get_data(rng, rng_buffer,
230 : rng_buffer_size(),
231 0 : !(filp->f_flags & O_NONBLOCK));
232 0 : if (bytes_read < 0) {
233 0 : err = bytes_read;
234 0 : goto out_unlock_reading;
235 : }
236 0 : data_avail = bytes_read;
237 : }
238 :
239 0 : if (!data_avail) {
240 0 : if (filp->f_flags & O_NONBLOCK) {
241 0 : err = -EAGAIN;
242 0 : goto out_unlock_reading;
243 : }
244 : } else {
245 0 : len = data_avail;
246 0 : if (len > size)
247 0 : len = size;
248 :
249 0 : data_avail -= len;
250 :
251 0 : if (copy_to_user(buf + ret, rng_buffer + data_avail,
252 : len)) {
253 0 : err = -EFAULT;
254 0 : goto out_unlock_reading;
255 : }
256 :
257 0 : size -= len;
258 0 : ret += len;
259 : }
260 :
261 0 : mutex_unlock(&reading_mutex);
262 0 : put_rng(rng);
263 :
264 0 : if (need_resched())
265 0 : schedule_timeout_interruptible(1);
266 :
267 0 : if (signal_pending(current)) {
268 0 : err = -ERESTARTSYS;
269 0 : goto out;
270 : }
271 : }
272 0 : out:
273 0 : return ret ? : err;
274 :
275 0 : out_unlock_reading:
276 0 : mutex_unlock(&reading_mutex);
277 0 : out_put:
278 0 : put_rng(rng);
279 0 : goto out;
280 : }
281 :
282 : static const struct file_operations rng_chrdev_ops = {
283 : .owner = THIS_MODULE,
284 : .open = rng_dev_open,
285 : .read = rng_dev_read,
286 : .llseek = noop_llseek,
287 : };
288 :
289 : static const struct attribute_group *rng_dev_groups[];
290 :
291 : static struct miscdevice rng_miscdev = {
292 : .minor = HWRNG_MINOR,
293 : .name = RNG_MODULE_NAME,
294 : .nodename = "hwrng",
295 : .fops = &rng_chrdev_ops,
296 : .groups = rng_dev_groups,
297 : };
298 :
299 0 : static int enable_best_rng(void)
300 : {
301 0 : int ret = -ENODEV;
302 :
303 0 : BUG_ON(!mutex_is_locked(&rng_mutex));
304 :
305 : /* rng_list is sorted by quality, use the best (=first) one */
306 0 : if (!list_empty(&rng_list)) {
307 0 : struct hwrng *new_rng;
308 :
309 0 : new_rng = list_entry(rng_list.next, struct hwrng, list);
310 0 : ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
311 0 : if (!ret)
312 0 : cur_rng_set_by_user = 0;
313 : } else {
314 0 : drop_current_rng();
315 0 : cur_rng_set_by_user = 0;
316 0 : ret = 0;
317 : }
318 :
319 0 : return ret;
320 : }
321 :
322 0 : static ssize_t hwrng_attr_current_store(struct device *dev,
323 : struct device_attribute *attr,
324 : const char *buf, size_t len)
325 : {
326 0 : int err = -ENODEV;
327 0 : struct hwrng *rng, *old_rng, *new_rng;
328 :
329 0 : err = mutex_lock_interruptible(&rng_mutex);
330 0 : if (err)
331 : return -ERESTARTSYS;
332 :
333 0 : old_rng = current_rng;
334 0 : if (sysfs_streq(buf, "")) {
335 0 : err = enable_best_rng();
336 : } else {
337 0 : list_for_each_entry(rng, &rng_list, list) {
338 0 : if (sysfs_streq(rng->name, buf)) {
339 0 : cur_rng_set_by_user = 1;
340 0 : err = set_current_rng(rng);
341 0 : break;
342 : }
343 : }
344 : }
345 0 : new_rng = get_current_rng_nolock();
346 0 : mutex_unlock(&rng_mutex);
347 :
348 0 : if (new_rng) {
349 0 : if (new_rng != old_rng)
350 0 : add_early_randomness(new_rng);
351 0 : put_rng(new_rng);
352 : }
353 :
354 0 : return err ? : len;
355 : }
356 :
357 0 : static ssize_t hwrng_attr_current_show(struct device *dev,
358 : struct device_attribute *attr,
359 : char *buf)
360 : {
361 0 : ssize_t ret;
362 0 : struct hwrng *rng;
363 :
364 0 : rng = get_current_rng();
365 0 : if (IS_ERR(rng))
366 0 : return PTR_ERR(rng);
367 :
368 0 : ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
369 0 : put_rng(rng);
370 :
371 0 : return ret;
372 : }
373 :
374 0 : static ssize_t hwrng_attr_available_show(struct device *dev,
375 : struct device_attribute *attr,
376 : char *buf)
377 : {
378 0 : int err;
379 0 : struct hwrng *rng;
380 :
381 0 : err = mutex_lock_interruptible(&rng_mutex);
382 0 : if (err)
383 : return -ERESTARTSYS;
384 0 : buf[0] = '\0';
385 0 : list_for_each_entry(rng, &rng_list, list) {
386 0 : strlcat(buf, rng->name, PAGE_SIZE);
387 0 : strlcat(buf, " ", PAGE_SIZE);
388 : }
389 0 : strlcat(buf, "\n", PAGE_SIZE);
390 0 : mutex_unlock(&rng_mutex);
391 :
392 0 : return strlen(buf);
393 : }
394 :
395 0 : static ssize_t hwrng_attr_selected_show(struct device *dev,
396 : struct device_attribute *attr,
397 : char *buf)
398 : {
399 0 : return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
400 : }
401 :
402 : static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
403 : hwrng_attr_current_show,
404 : hwrng_attr_current_store);
405 : static DEVICE_ATTR(rng_available, S_IRUGO,
406 : hwrng_attr_available_show,
407 : NULL);
408 : static DEVICE_ATTR(rng_selected, S_IRUGO,
409 : hwrng_attr_selected_show,
410 : NULL);
411 :
412 : static struct attribute *rng_dev_attrs[] = {
413 : &dev_attr_rng_current.attr,
414 : &dev_attr_rng_available.attr,
415 : &dev_attr_rng_selected.attr,
416 : NULL
417 : };
418 :
419 : ATTRIBUTE_GROUPS(rng_dev);
420 :
421 0 : static void __exit unregister_miscdev(void)
422 : {
423 0 : misc_deregister(&rng_miscdev);
424 0 : }
425 :
426 1 : static int __init register_miscdev(void)
427 : {
428 1 : return misc_register(&rng_miscdev);
429 : }
430 :
431 0 : static int hwrng_fillfn(void *unused)
432 : {
433 0 : long rc;
434 :
435 0 : while (!kthread_should_stop()) {
436 0 : struct hwrng *rng;
437 :
438 0 : rng = get_current_rng();
439 0 : if (IS_ERR(rng) || !rng)
440 : break;
441 0 : mutex_lock(&reading_mutex);
442 0 : rc = rng_get_data(rng, rng_fillbuf,
443 : rng_buffer_size(), 1);
444 0 : mutex_unlock(&reading_mutex);
445 0 : put_rng(rng);
446 0 : if (rc <= 0) {
447 0 : pr_warn("hwrng: no data available\n");
448 0 : msleep_interruptible(10000);
449 0 : continue;
450 : }
451 : /* Outside lock, sure, but y'know: randomness. */
452 0 : add_hwgenerator_randomness((void *)rng_fillbuf, rc,
453 0 : rc * current_quality * 8 >> 10);
454 : }
455 0 : hwrng_fill = NULL;
456 0 : return 0;
457 : }
458 :
459 0 : static void start_khwrngd(void)
460 : {
461 0 : hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
462 0 : if (IS_ERR(hwrng_fill)) {
463 0 : pr_err("hwrng_fill thread creation failed\n");
464 0 : hwrng_fill = NULL;
465 : }
466 0 : }
467 :
468 0 : int hwrng_register(struct hwrng *rng)
469 : {
470 0 : int err = -EINVAL;
471 0 : struct hwrng *tmp;
472 0 : struct list_head *rng_list_ptr;
473 0 : bool is_new_current = false;
474 :
475 0 : if (!rng->name || (!rng->data_read && !rng->read))
476 0 : goto out;
477 :
478 0 : mutex_lock(&rng_mutex);
479 :
480 : /* Must not register two RNGs with the same name. */
481 0 : err = -EEXIST;
482 0 : list_for_each_entry(tmp, &rng_list, list) {
483 0 : if (strcmp(tmp->name, rng->name) == 0)
484 0 : goto out_unlock;
485 : }
486 :
487 0 : init_completion(&rng->cleanup_done);
488 0 : complete(&rng->cleanup_done);
489 :
490 : /* rng_list is sorted by decreasing quality */
491 0 : list_for_each(rng_list_ptr, &rng_list) {
492 0 : tmp = list_entry(rng_list_ptr, struct hwrng, list);
493 0 : if (tmp->quality < rng->quality)
494 : break;
495 : }
496 0 : list_add_tail(&rng->list, rng_list_ptr);
497 :
498 0 : if (!current_rng ||
499 0 : (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
500 : /*
501 : * Set new rng as current as the new rng source
502 : * provides better entropy quality and was not
503 : * chosen by userspace.
504 : */
505 0 : err = set_current_rng(rng);
506 0 : if (err)
507 0 : goto out_unlock;
508 : /* to use current_rng in add_early_randomness() we need
509 : * to take a ref
510 : */
511 0 : is_new_current = true;
512 0 : kref_get(&rng->ref);
513 : }
514 0 : mutex_unlock(&rng_mutex);
515 0 : if (is_new_current || !rng->init) {
516 : /*
517 : * Use a new device's input to add some randomness to
518 : * the system. If this rng device isn't going to be
519 : * used right away, its init function hasn't been
520 : * called yet by set_current_rng(); so only use the
521 : * randomness from devices that don't need an init callback
522 : */
523 0 : add_early_randomness(rng);
524 : }
525 0 : if (is_new_current)
526 0 : put_rng(rng);
527 : return 0;
528 0 : out_unlock:
529 0 : mutex_unlock(&rng_mutex);
530 : out:
531 : return err;
532 : }
533 : EXPORT_SYMBOL_GPL(hwrng_register);
534 :
535 0 : void hwrng_unregister(struct hwrng *rng)
536 : {
537 0 : struct hwrng *old_rng, *new_rng;
538 0 : int err;
539 :
540 0 : mutex_lock(&rng_mutex);
541 :
542 0 : old_rng = current_rng;
543 0 : list_del(&rng->list);
544 0 : if (current_rng == rng) {
545 0 : err = enable_best_rng();
546 0 : if (err) {
547 0 : drop_current_rng();
548 0 : cur_rng_set_by_user = 0;
549 : }
550 : }
551 :
552 0 : new_rng = get_current_rng_nolock();
553 0 : if (list_empty(&rng_list)) {
554 0 : mutex_unlock(&rng_mutex);
555 0 : if (hwrng_fill)
556 0 : kthread_stop(hwrng_fill);
557 : } else
558 0 : mutex_unlock(&rng_mutex);
559 :
560 0 : if (new_rng) {
561 0 : if (old_rng != new_rng)
562 0 : add_early_randomness(new_rng);
563 0 : put_rng(new_rng);
564 : }
565 :
566 0 : wait_for_completion(&rng->cleanup_done);
567 0 : }
568 : EXPORT_SYMBOL_GPL(hwrng_unregister);
569 :
570 0 : static void devm_hwrng_release(struct device *dev, void *res)
571 : {
572 0 : hwrng_unregister(*(struct hwrng **)res);
573 0 : }
574 :
575 0 : static int devm_hwrng_match(struct device *dev, void *res, void *data)
576 : {
577 0 : struct hwrng **r = res;
578 :
579 0 : if (WARN_ON(!r || !*r))
580 : return 0;
581 :
582 0 : return *r == data;
583 : }
584 :
585 0 : int devm_hwrng_register(struct device *dev, struct hwrng *rng)
586 : {
587 0 : struct hwrng **ptr;
588 0 : int error;
589 :
590 0 : ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
591 0 : if (!ptr)
592 : return -ENOMEM;
593 :
594 0 : error = hwrng_register(rng);
595 0 : if (error) {
596 0 : devres_free(ptr);
597 0 : return error;
598 : }
599 :
600 0 : *ptr = rng;
601 0 : devres_add(dev, ptr);
602 0 : return 0;
603 : }
604 : EXPORT_SYMBOL_GPL(devm_hwrng_register);
605 :
606 0 : void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
607 : {
608 0 : devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
609 0 : }
610 : EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
611 :
612 1 : static int __init hwrng_modinit(void)
613 : {
614 1 : int ret;
615 :
616 : /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
617 1 : rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
618 1 : if (!rng_buffer)
619 : return -ENOMEM;
620 :
621 1 : rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
622 1 : if (!rng_fillbuf) {
623 0 : kfree(rng_buffer);
624 0 : return -ENOMEM;
625 : }
626 :
627 1 : ret = register_miscdev();
628 1 : if (ret) {
629 0 : kfree(rng_fillbuf);
630 0 : kfree(rng_buffer);
631 : }
632 :
633 : return ret;
634 : }
635 :
636 0 : static void __exit hwrng_modexit(void)
637 : {
638 0 : mutex_lock(&rng_mutex);
639 0 : BUG_ON(current_rng);
640 0 : kfree(rng_buffer);
641 0 : kfree(rng_fillbuf);
642 0 : mutex_unlock(&rng_mutex);
643 :
644 0 : unregister_miscdev();
645 0 : }
646 :
647 : module_init(hwrng_modinit);
648 : module_exit(hwrng_modexit);
649 :
650 : MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
651 : MODULE_LICENSE("GPL");
|