Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions related to mapping data to requests
4 : */
5 : #include <linux/kernel.h>
6 : #include <linux/sched/task_stack.h>
7 : #include <linux/module.h>
8 : #include <linux/bio.h>
9 : #include <linux/blkdev.h>
10 : #include <linux/uio.h>
11 :
12 : #include "blk.h"
13 :
14 : struct bio_map_data {
15 : bool is_our_pages : 1;
16 : bool is_null_mapped : 1;
17 : struct iov_iter iter;
18 : struct iovec iov[];
19 : };
20 :
21 0 : static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
22 : gfp_t gfp_mask)
23 : {
24 0 : struct bio_map_data *bmd;
25 :
26 0 : if (data->nr_segs > UIO_MAXIOV)
27 : return NULL;
28 :
29 0 : bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
30 0 : if (!bmd)
31 : return NULL;
32 0 : memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
33 0 : bmd->iter = *data;
34 0 : bmd->iter.iov = bmd->iov;
35 0 : return bmd;
36 : }
37 :
38 : /**
39 : * bio_copy_from_iter - copy all pages from iov_iter to bio
40 : * @bio: The &struct bio which describes the I/O as destination
41 : * @iter: iov_iter as source
42 : *
43 : * Copy all pages from iov_iter to bio.
44 : * Returns 0 on success, or error on failure.
45 : */
46 0 : static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
47 : {
48 0 : struct bio_vec *bvec;
49 0 : struct bvec_iter_all iter_all;
50 :
51 0 : bio_for_each_segment_all(bvec, bio, iter_all) {
52 0 : ssize_t ret;
53 :
54 0 : ret = copy_page_from_iter(bvec->bv_page,
55 0 : bvec->bv_offset,
56 0 : bvec->bv_len,
57 : iter);
58 :
59 0 : if (!iov_iter_count(iter))
60 : break;
61 :
62 0 : if (ret < bvec->bv_len)
63 : return -EFAULT;
64 : }
65 :
66 : return 0;
67 : }
68 :
69 : /**
70 : * bio_copy_to_iter - copy all pages from bio to iov_iter
71 : * @bio: The &struct bio which describes the I/O as source
72 : * @iter: iov_iter as destination
73 : *
74 : * Copy all pages from bio to iov_iter.
75 : * Returns 0 on success, or error on failure.
76 : */
77 0 : static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
78 : {
79 0 : struct bio_vec *bvec;
80 0 : struct bvec_iter_all iter_all;
81 :
82 0 : bio_for_each_segment_all(bvec, bio, iter_all) {
83 0 : ssize_t ret;
84 :
85 0 : ret = copy_page_to_iter(bvec->bv_page,
86 0 : bvec->bv_offset,
87 0 : bvec->bv_len,
88 : &iter);
89 :
90 0 : if (!iov_iter_count(&iter))
91 : break;
92 :
93 0 : if (ret < bvec->bv_len)
94 : return -EFAULT;
95 : }
96 :
97 : return 0;
98 : }
99 :
100 : /**
101 : * bio_uncopy_user - finish previously mapped bio
102 : * @bio: bio being terminated
103 : *
104 : * Free pages allocated from bio_copy_user_iov() and write back data
105 : * to user space in case of a read.
106 : */
107 0 : static int bio_uncopy_user(struct bio *bio)
108 : {
109 0 : struct bio_map_data *bmd = bio->bi_private;
110 0 : int ret = 0;
111 :
112 0 : if (!bmd->is_null_mapped) {
113 : /*
114 : * if we're in a workqueue, the request is orphaned, so
115 : * don't copy into a random user address space, just free
116 : * and return -EINTR so user space doesn't expect any data.
117 : */
118 0 : if (!current->mm)
119 : ret = -EINTR;
120 0 : else if (bio_data_dir(bio) == READ)
121 0 : ret = bio_copy_to_iter(bio, bmd->iter);
122 0 : if (bmd->is_our_pages)
123 0 : bio_free_pages(bio);
124 : }
125 0 : kfree(bmd);
126 0 : bio_put(bio);
127 0 : return ret;
128 : }
129 :
130 0 : static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
131 : struct iov_iter *iter, gfp_t gfp_mask)
132 : {
133 0 : struct bio_map_data *bmd;
134 0 : struct page *page;
135 0 : struct bio *bio, *bounce_bio;
136 0 : int i = 0, ret;
137 0 : int nr_pages;
138 0 : unsigned int len = iter->count;
139 0 : unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
140 :
141 0 : bmd = bio_alloc_map_data(iter, gfp_mask);
142 0 : if (!bmd)
143 : return -ENOMEM;
144 :
145 : /*
146 : * We need to do a deep copy of the iov_iter including the iovecs.
147 : * The caller provided iov might point to an on-stack or otherwise
148 : * shortlived one.
149 : */
150 0 : bmd->is_our_pages = !map_data;
151 0 : bmd->is_null_mapped = (map_data && map_data->null_mapped);
152 :
153 0 : nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
154 :
155 0 : ret = -ENOMEM;
156 0 : bio = bio_kmalloc(gfp_mask, nr_pages);
157 0 : if (!bio)
158 0 : goto out_bmd;
159 0 : bio->bi_opf |= req_op(rq);
160 :
161 0 : if (map_data) {
162 0 : nr_pages = 1 << map_data->page_order;
163 0 : i = map_data->offset / PAGE_SIZE;
164 : }
165 0 : while (len) {
166 0 : unsigned int bytes = PAGE_SIZE;
167 :
168 0 : bytes -= offset;
169 :
170 0 : if (bytes > len)
171 : bytes = len;
172 :
173 0 : if (map_data) {
174 0 : if (i == map_data->nr_entries * nr_pages) {
175 0 : ret = -ENOMEM;
176 0 : goto cleanup;
177 : }
178 :
179 0 : page = map_data->pages[i / nr_pages];
180 0 : page += (i % nr_pages);
181 :
182 0 : i++;
183 : } else {
184 0 : page = alloc_page(rq->q->bounce_gfp | gfp_mask);
185 0 : if (!page) {
186 0 : ret = -ENOMEM;
187 0 : goto cleanup;
188 : }
189 : }
190 :
191 0 : if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
192 0 : if (!map_data)
193 0 : __free_page(page);
194 : break;
195 : }
196 :
197 0 : len -= bytes;
198 0 : offset = 0;
199 : }
200 :
201 0 : if (map_data)
202 0 : map_data->offset += bio->bi_iter.bi_size;
203 :
204 : /*
205 : * success
206 : */
207 0 : if ((iov_iter_rw(iter) == WRITE &&
208 0 : (!map_data || !map_data->null_mapped)) ||
209 0 : (map_data && map_data->from_user)) {
210 0 : ret = bio_copy_from_iter(bio, iter);
211 0 : if (ret)
212 0 : goto cleanup;
213 : } else {
214 0 : if (bmd->is_our_pages)
215 0 : zero_fill_bio(bio);
216 0 : iov_iter_advance(iter, bio->bi_iter.bi_size);
217 : }
218 :
219 0 : bio->bi_private = bmd;
220 :
221 0 : bounce_bio = bio;
222 0 : ret = blk_rq_append_bio(rq, &bounce_bio);
223 0 : if (ret)
224 0 : goto cleanup;
225 :
226 : /*
227 : * We link the bounce buffer in and could have to traverse it later, so
228 : * we have to get a ref to prevent it from being freed
229 : */
230 0 : bio_get(bounce_bio);
231 0 : return 0;
232 0 : cleanup:
233 0 : if (!map_data)
234 0 : bio_free_pages(bio);
235 0 : bio_put(bio);
236 0 : out_bmd:
237 0 : kfree(bmd);
238 0 : return ret;
239 : }
240 :
241 0 : static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
242 : gfp_t gfp_mask)
243 : {
244 0 : unsigned int max_sectors = queue_max_hw_sectors(rq->q);
245 0 : struct bio *bio, *bounce_bio;
246 0 : int ret;
247 0 : int j;
248 :
249 0 : if (!iov_iter_count(iter))
250 : return -EINVAL;
251 :
252 0 : bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
253 0 : if (!bio)
254 : return -ENOMEM;
255 0 : bio->bi_opf |= req_op(rq);
256 :
257 0 : while (iov_iter_count(iter)) {
258 0 : struct page **pages;
259 0 : ssize_t bytes;
260 0 : size_t offs, added = 0;
261 0 : int npages;
262 :
263 0 : bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
264 0 : if (unlikely(bytes <= 0)) {
265 0 : ret = bytes ? bytes : -EFAULT;
266 0 : goto out_unmap;
267 : }
268 :
269 0 : npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
270 :
271 0 : if (unlikely(offs & queue_dma_alignment(rq->q))) {
272 0 : ret = -EINVAL;
273 0 : j = 0;
274 : } else {
275 0 : for (j = 0; j < npages; j++) {
276 0 : struct page *page = pages[j];
277 0 : unsigned int n = PAGE_SIZE - offs;
278 0 : bool same_page = false;
279 :
280 0 : if (n > bytes)
281 0 : n = bytes;
282 :
283 0 : if (!bio_add_hw_page(rq->q, bio, page, n, offs,
284 : max_sectors, &same_page)) {
285 0 : if (same_page)
286 0 : put_page(page);
287 0 : break;
288 : }
289 :
290 0 : added += n;
291 0 : bytes -= n;
292 0 : offs = 0;
293 : }
294 0 : iov_iter_advance(iter, added);
295 : }
296 : /*
297 : * release the pages we didn't map into the bio, if any
298 : */
299 0 : while (j < npages)
300 0 : put_page(pages[j++]);
301 0 : kvfree(pages);
302 : /* couldn't stuff something into bio? */
303 0 : if (bytes)
304 : break;
305 : }
306 :
307 : /*
308 : * Subtle: if we end up needing to bounce a bio, it would normally
309 : * disappear when its bi_end_io is run. However, we need the original
310 : * bio for the unmap, so grab an extra reference to it
311 : */
312 0 : bio_get(bio);
313 :
314 0 : bounce_bio = bio;
315 0 : ret = blk_rq_append_bio(rq, &bounce_bio);
316 0 : if (ret)
317 0 : goto out_put_orig;
318 :
319 : /*
320 : * We link the bounce buffer in and could have to traverse it
321 : * later, so we have to get a ref to prevent it from being freed
322 : */
323 0 : bio_get(bounce_bio);
324 0 : return 0;
325 :
326 0 : out_put_orig:
327 0 : bio_put(bio);
328 0 : out_unmap:
329 0 : bio_release_pages(bio, false);
330 0 : bio_put(bio);
331 0 : return ret;
332 : }
333 :
334 : /**
335 : * bio_unmap_user - unmap a bio
336 : * @bio: the bio being unmapped
337 : *
338 : * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
339 : * process context.
340 : *
341 : * bio_unmap_user() may sleep.
342 : */
343 0 : static void bio_unmap_user(struct bio *bio)
344 : {
345 0 : bio_release_pages(bio, bio_data_dir(bio) == READ);
346 0 : bio_put(bio);
347 0 : bio_put(bio);
348 0 : }
349 :
350 0 : static void bio_invalidate_vmalloc_pages(struct bio *bio)
351 : {
352 : #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
353 : if (bio->bi_private && !op_is_write(bio_op(bio))) {
354 : unsigned long i, len = 0;
355 :
356 : for (i = 0; i < bio->bi_vcnt; i++)
357 : len += bio->bi_io_vec[i].bv_len;
358 : invalidate_kernel_vmap_range(bio->bi_private, len);
359 : }
360 : #endif
361 0 : }
362 :
363 0 : static void bio_map_kern_endio(struct bio *bio)
364 : {
365 0 : bio_invalidate_vmalloc_pages(bio);
366 0 : bio_put(bio);
367 0 : }
368 :
369 : /**
370 : * bio_map_kern - map kernel address into bio
371 : * @q: the struct request_queue for the bio
372 : * @data: pointer to buffer to map
373 : * @len: length in bytes
374 : * @gfp_mask: allocation flags for bio allocation
375 : *
376 : * Map the kernel address into a bio suitable for io to a block
377 : * device. Returns an error pointer in case of error.
378 : */
379 0 : static struct bio *bio_map_kern(struct request_queue *q, void *data,
380 : unsigned int len, gfp_t gfp_mask)
381 : {
382 0 : unsigned long kaddr = (unsigned long)data;
383 0 : unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
384 0 : unsigned long start = kaddr >> PAGE_SHIFT;
385 0 : const int nr_pages = end - start;
386 0 : bool is_vmalloc = is_vmalloc_addr(data);
387 0 : struct page *page;
388 0 : int offset, i;
389 0 : struct bio *bio;
390 :
391 0 : bio = bio_kmalloc(gfp_mask, nr_pages);
392 0 : if (!bio)
393 0 : return ERR_PTR(-ENOMEM);
394 :
395 0 : if (is_vmalloc) {
396 0 : flush_kernel_vmap_range(data, len);
397 0 : bio->bi_private = data;
398 : }
399 :
400 0 : offset = offset_in_page(kaddr);
401 0 : for (i = 0; i < nr_pages; i++) {
402 0 : unsigned int bytes = PAGE_SIZE - offset;
403 :
404 0 : if (len <= 0)
405 : break;
406 :
407 0 : if (bytes > len)
408 : bytes = len;
409 :
410 0 : if (!is_vmalloc)
411 0 : page = virt_to_page(data);
412 : else
413 0 : page = vmalloc_to_page(data);
414 0 : if (bio_add_pc_page(q, bio, page, bytes,
415 : offset) < bytes) {
416 : /* we don't support partial mappings */
417 0 : bio_put(bio);
418 0 : return ERR_PTR(-EINVAL);
419 : }
420 :
421 0 : data += bytes;
422 0 : len -= bytes;
423 0 : offset = 0;
424 : }
425 :
426 0 : bio->bi_end_io = bio_map_kern_endio;
427 0 : return bio;
428 : }
429 :
430 2 : static void bio_copy_kern_endio(struct bio *bio)
431 : {
432 0 : bio_free_pages(bio);
433 2 : bio_put(bio);
434 0 : }
435 :
436 2 : static void bio_copy_kern_endio_read(struct bio *bio)
437 : {
438 2 : char *p = bio->bi_private;
439 2 : struct bio_vec *bvec;
440 2 : struct bvec_iter_all iter_all;
441 :
442 4 : bio_for_each_segment_all(bvec, bio, iter_all) {
443 2 : memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
444 2 : p += bvec->bv_len;
445 : }
446 :
447 2 : bio_copy_kern_endio(bio);
448 2 : }
449 :
450 : /**
451 : * bio_copy_kern - copy kernel address into bio
452 : * @q: the struct request_queue for the bio
453 : * @data: pointer to buffer to copy
454 : * @len: length in bytes
455 : * @gfp_mask: allocation flags for bio and page allocation
456 : * @reading: data direction is READ
457 : *
458 : * copy the kernel address into a bio suitable for io to a block
459 : * device. Returns an error pointer in case of error.
460 : */
461 2 : static struct bio *bio_copy_kern(struct request_queue *q, void *data,
462 : unsigned int len, gfp_t gfp_mask, int reading)
463 : {
464 2 : unsigned long kaddr = (unsigned long)data;
465 2 : unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
466 2 : unsigned long start = kaddr >> PAGE_SHIFT;
467 2 : struct bio *bio;
468 2 : void *p = data;
469 2 : int nr_pages = 0;
470 :
471 : /*
472 : * Overflow, abort
473 : */
474 2 : if (end < start)
475 2 : return ERR_PTR(-EINVAL);
476 :
477 2 : nr_pages = end - start;
478 2 : bio = bio_kmalloc(gfp_mask, nr_pages);
479 2 : if (!bio)
480 2 : return ERR_PTR(-ENOMEM);
481 :
482 4 : while (len) {
483 2 : struct page *page;
484 2 : unsigned int bytes = PAGE_SIZE;
485 :
486 2 : if (bytes > len)
487 2 : bytes = len;
488 :
489 2 : page = alloc_page(q->bounce_gfp | gfp_mask);
490 2 : if (!page)
491 0 : goto cleanup;
492 :
493 2 : if (!reading)
494 0 : memcpy(page_address(page), p, bytes);
495 :
496 2 : if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
497 : break;
498 :
499 2 : len -= bytes;
500 2 : p += bytes;
501 : }
502 :
503 2 : if (reading) {
504 2 : bio->bi_end_io = bio_copy_kern_endio_read;
505 2 : bio->bi_private = data;
506 : } else {
507 0 : bio->bi_end_io = bio_copy_kern_endio;
508 : }
509 :
510 : return bio;
511 :
512 0 : cleanup:
513 0 : bio_free_pages(bio);
514 0 : bio_put(bio);
515 0 : return ERR_PTR(-ENOMEM);
516 : }
517 :
518 : /*
519 : * Append a bio to a passthrough request. Only works if the bio can be merged
520 : * into the request based on the driver constraints.
521 : */
522 2 : int blk_rq_append_bio(struct request *rq, struct bio **bio)
523 : {
524 2 : struct bio *orig_bio = *bio;
525 2 : struct bvec_iter iter;
526 2 : struct bio_vec bv;
527 2 : unsigned int nr_segs = 0;
528 :
529 2 : blk_queue_bounce(rq->q, bio);
530 :
531 4 : bio_for_each_bvec(bv, *bio, iter)
532 2 : nr_segs++;
533 :
534 2 : if (!rq->bio) {
535 2 : blk_rq_bio_prep(rq, *bio, nr_segs);
536 : } else {
537 0 : if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
538 0 : if (orig_bio != *bio) {
539 0 : bio_put(*bio);
540 0 : *bio = orig_bio;
541 : }
542 0 : return -EINVAL;
543 : }
544 :
545 0 : rq->biotail->bi_next = *bio;
546 0 : rq->biotail = *bio;
547 0 : rq->__data_len += (*bio)->bi_iter.bi_size;
548 0 : bio_crypt_free_ctx(*bio);
549 : }
550 :
551 : return 0;
552 : }
553 : EXPORT_SYMBOL(blk_rq_append_bio);
554 :
555 : /**
556 : * blk_rq_map_user_iov - map user data to a request, for passthrough requests
557 : * @q: request queue where request should be inserted
558 : * @rq: request to map data to
559 : * @map_data: pointer to the rq_map_data holding pages (if necessary)
560 : * @iter: iovec iterator
561 : * @gfp_mask: memory allocation flags
562 : *
563 : * Description:
564 : * Data will be mapped directly for zero copy I/O, if possible. Otherwise
565 : * a kernel bounce buffer is used.
566 : *
567 : * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
568 : * still in process context.
569 : *
570 : * Note: The mapped bio may need to be bounced through blk_queue_bounce()
571 : * before being submitted to the device, as pages mapped may be out of
572 : * reach. It's the callers responsibility to make sure this happens. The
573 : * original bio must be passed back in to blk_rq_unmap_user() for proper
574 : * unmapping.
575 : */
576 0 : int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
577 : struct rq_map_data *map_data,
578 : const struct iov_iter *iter, gfp_t gfp_mask)
579 : {
580 0 : bool copy = false;
581 0 : unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
582 0 : struct bio *bio = NULL;
583 0 : struct iov_iter i;
584 0 : int ret = -EINVAL;
585 :
586 0 : if (!iter_is_iovec(iter))
587 0 : goto fail;
588 :
589 0 : if (map_data)
590 : copy = true;
591 0 : else if (iov_iter_alignment(iter) & align)
592 : copy = true;
593 0 : else if (queue_virt_boundary(q))
594 0 : copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
595 :
596 0 : i = *iter;
597 0 : do {
598 0 : if (copy)
599 0 : ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
600 : else
601 0 : ret = bio_map_user_iov(rq, &i, gfp_mask);
602 0 : if (ret)
603 0 : goto unmap_rq;
604 0 : if (!bio)
605 0 : bio = rq->bio;
606 0 : } while (iov_iter_count(&i));
607 :
608 : return 0;
609 :
610 0 : unmap_rq:
611 0 : blk_rq_unmap_user(bio);
612 0 : fail:
613 0 : rq->bio = NULL;
614 0 : return ret;
615 : }
616 : EXPORT_SYMBOL(blk_rq_map_user_iov);
617 :
618 0 : int blk_rq_map_user(struct request_queue *q, struct request *rq,
619 : struct rq_map_data *map_data, void __user *ubuf,
620 : unsigned long len, gfp_t gfp_mask)
621 : {
622 0 : struct iovec iov;
623 0 : struct iov_iter i;
624 0 : int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
625 :
626 0 : if (unlikely(ret < 0))
627 : return ret;
628 :
629 0 : return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
630 : }
631 : EXPORT_SYMBOL(blk_rq_map_user);
632 :
633 : /**
634 : * blk_rq_unmap_user - unmap a request with user data
635 : * @bio: start of bio list
636 : *
637 : * Description:
638 : * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
639 : * supply the original rq->bio from the blk_rq_map_user() return, since
640 : * the I/O completion may have changed rq->bio.
641 : */
642 0 : int blk_rq_unmap_user(struct bio *bio)
643 : {
644 0 : struct bio *mapped_bio;
645 0 : int ret = 0, ret2;
646 :
647 0 : while (bio) {
648 0 : mapped_bio = bio;
649 0 : if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
650 0 : mapped_bio = bio->bi_private;
651 :
652 0 : if (bio->bi_private) {
653 0 : ret2 = bio_uncopy_user(mapped_bio);
654 0 : if (ret2 && !ret)
655 0 : ret = ret2;
656 : } else {
657 0 : bio_unmap_user(mapped_bio);
658 : }
659 :
660 0 : mapped_bio = bio;
661 0 : bio = bio->bi_next;
662 0 : bio_put(mapped_bio);
663 : }
664 :
665 0 : return ret;
666 : }
667 : EXPORT_SYMBOL(blk_rq_unmap_user);
668 :
669 : /**
670 : * blk_rq_map_kern - map kernel data to a request, for passthrough requests
671 : * @q: request queue where request should be inserted
672 : * @rq: request to fill
673 : * @kbuf: the kernel buffer
674 : * @len: length of user data
675 : * @gfp_mask: memory allocation flags
676 : *
677 : * Description:
678 : * Data will be mapped directly if possible. Otherwise a bounce
679 : * buffer is used. Can be called multiple times to append multiple
680 : * buffers.
681 : */
682 2 : int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
683 : unsigned int len, gfp_t gfp_mask)
684 : {
685 2 : int reading = rq_data_dir(rq) == READ;
686 2 : unsigned long addr = (unsigned long) kbuf;
687 2 : struct bio *bio, *orig_bio;
688 2 : int ret;
689 :
690 2 : if (len > (queue_max_hw_sectors(q) << 9))
691 : return -EINVAL;
692 2 : if (!len || !kbuf)
693 : return -EINVAL;
694 :
695 2 : if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
696 2 : bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
697 : else
698 0 : bio = bio_map_kern(q, kbuf, len, gfp_mask);
699 :
700 2 : if (IS_ERR(bio))
701 0 : return PTR_ERR(bio);
702 :
703 2 : bio->bi_opf &= ~REQ_OP_MASK;
704 2 : bio->bi_opf |= req_op(rq);
705 :
706 2 : orig_bio = bio;
707 2 : ret = blk_rq_append_bio(rq, &bio);
708 2 : if (unlikely(ret)) {
709 : /* request is too big */
710 0 : bio_put(orig_bio);
711 0 : return ret;
712 : }
713 :
714 : return 0;
715 : }
716 : EXPORT_SYMBOL(blk_rq_map_kern);
|