LCOV - code coverage report
Current view: top level - block - blk-merge.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 230 436 52.8 %
Date: 2021-04-22 12:43:58 Functions: 23 39 59.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Functions related to segment and merge handling
       4             :  */
       5             : #include <linux/kernel.h>
       6             : #include <linux/module.h>
       7             : #include <linux/bio.h>
       8             : #include <linux/blkdev.h>
       9             : #include <linux/scatterlist.h>
      10             : 
      11             : #include <trace/events/block.h>
      12             : 
      13             : #include "blk.h"
      14             : #include "blk-rq-qos.h"
      15             : 
      16        5486 : static inline bool bio_will_gap(struct request_queue *q,
      17             :                 struct request *prev_rq, struct bio *prev, struct bio *next)
      18             : {
      19        5486 :         struct bio_vec pb, nb;
      20             : 
      21        5486 :         if (!bio_has_data(prev) || !queue_virt_boundary(q))
      22             :                 return false;
      23             : 
      24             :         /*
      25             :          * Don't merge if the 1st bio starts with non-zero offset, otherwise it
      26             :          * is quite difficult to respect the sg gap limit.  We work hard to
      27             :          * merge a huge number of small single bios in case of mkfs.
      28             :          */
      29           0 :         if (prev_rq)
      30           0 :                 bio_get_first_bvec(prev_rq->bio, &pb);
      31             :         else
      32           0 :                 bio_get_first_bvec(prev, &pb);
      33           0 :         if (pb.bv_offset & queue_virt_boundary(q))
      34             :                 return true;
      35             : 
      36             :         /*
      37             :          * We don't need to worry about the situation that the merged segment
      38             :          * ends in unaligned virt boundary:
      39             :          *
      40             :          * - if 'pb' ends aligned, the merged segment ends aligned
      41             :          * - if 'pb' ends unaligned, the next bio must include
      42             :          *   one single bvec of 'nb', otherwise the 'nb' can't
      43             :          *   merge with 'pb'
      44             :          */
      45           0 :         bio_get_last_bvec(prev, &pb);
      46           0 :         bio_get_first_bvec(next, &nb);
      47           0 :         if (biovec_phys_mergeable(q, &pb, &nb))
      48             :                 return false;
      49           0 :         return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
      50             : }
      51             : 
      52        5387 : static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
      53             : {
      54        5387 :         return bio_will_gap(req->q, req, req->biotail, bio);
      55             : }
      56             : 
      57          99 : static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
      58             : {
      59          99 :         return bio_will_gap(req->q, NULL, bio, req->bio);
      60             : }
      61             : 
      62           0 : static struct bio *blk_bio_discard_split(struct request_queue *q,
      63             :                                          struct bio *bio,
      64             :                                          struct bio_set *bs,
      65             :                                          unsigned *nsegs)
      66             : {
      67           0 :         unsigned int max_discard_sectors, granularity;
      68           0 :         int alignment;
      69           0 :         sector_t tmp;
      70           0 :         unsigned split_sectors;
      71             : 
      72           0 :         *nsegs = 1;
      73             : 
      74             :         /* Zero-sector (unknown) and one-sector granularities are the same.  */
      75           0 :         granularity = max(q->limits.discard_granularity >> 9, 1U);
      76             : 
      77           0 :         max_discard_sectors = min(q->limits.max_discard_sectors,
      78             :                         bio_allowed_max_sectors(q));
      79           0 :         max_discard_sectors -= max_discard_sectors % granularity;
      80             : 
      81           0 :         if (unlikely(!max_discard_sectors)) {
      82             :                 /* XXX: warn */
      83             :                 return NULL;
      84             :         }
      85             : 
      86           0 :         if (bio_sectors(bio) <= max_discard_sectors)
      87             :                 return NULL;
      88             : 
      89           0 :         split_sectors = max_discard_sectors;
      90             : 
      91             :         /*
      92             :          * If the next starting sector would be misaligned, stop the discard at
      93             :          * the previous aligned sector.
      94             :          */
      95           0 :         alignment = (q->limits.discard_alignment >> 9) % granularity;
      96             : 
      97           0 :         tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
      98           0 :         tmp = sector_div(tmp, granularity);
      99             : 
     100           0 :         if (split_sectors > tmp)
     101           0 :                 split_sectors -= tmp;
     102             : 
     103           0 :         return bio_split(bio, split_sectors, GFP_NOIO, bs);
     104             : }
     105             : 
     106           0 : static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
     107             :                 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
     108             : {
     109           0 :         *nsegs = 0;
     110             : 
     111           0 :         if (!q->limits.max_write_zeroes_sectors)
     112             :                 return NULL;
     113             : 
     114           0 :         if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
     115             :                 return NULL;
     116             : 
     117           0 :         return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
     118             : }
     119             : 
     120           0 : static struct bio *blk_bio_write_same_split(struct request_queue *q,
     121             :                                             struct bio *bio,
     122             :                                             struct bio_set *bs,
     123             :                                             unsigned *nsegs)
     124             : {
     125           0 :         *nsegs = 1;
     126             : 
     127           0 :         if (!q->limits.max_write_same_sectors)
     128             :                 return NULL;
     129             : 
     130           0 :         if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
     131             :                 return NULL;
     132             : 
     133           0 :         return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
     134             : }
     135             : 
     136             : /*
     137             :  * Return the maximum number of sectors from the start of a bio that may be
     138             :  * submitted as a single request to a block device. If enough sectors remain,
     139             :  * align the end to the physical block size. Otherwise align the end to the
     140             :  * logical block size. This approach minimizes the number of non-aligned
     141             :  * requests that are submitted to a block device if the start of a bio is not
     142             :  * aligned to a physical block boundary.
     143             :  */
     144        1464 : static inline unsigned get_max_io_size(struct request_queue *q,
     145             :                                        struct bio *bio)
     146             : {
     147        1464 :         unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
     148        1464 :         unsigned max_sectors = sectors;
     149        1464 :         unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
     150        1464 :         unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
     151        1464 :         unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
     152             : 
     153        1464 :         max_sectors += start_offset;
     154        1464 :         max_sectors &= ~(pbs - 1);
     155        1464 :         if (max_sectors > start_offset)
     156        1464 :                 return max_sectors - start_offset;
     157             : 
     158           0 :         return sectors & ~(lbs - 1);
     159             : }
     160             : 
     161        2660 : static inline unsigned get_max_segment_size(const struct request_queue *q,
     162             :                                             struct page *start_page,
     163             :                                             unsigned long offset)
     164             : {
     165        2660 :         unsigned long mask = queue_segment_boundary(q);
     166             : 
     167        2660 :         offset = mask & (page_to_phys(start_page) + offset);
     168             : 
     169             :         /*
     170             :          * overflow may be triggered in case of zero page physical address
     171             :          * on 32bit arch, use queue's max segment size when that happens.
     172             :          */
     173        2660 :         return min_not_zero(mask - offset + 1,
     174             :                         (unsigned long)queue_max_segment_size(q));
     175             : }
     176             : 
     177             : /**
     178             :  * bvec_split_segs - verify whether or not a bvec should be split in the middle
     179             :  * @q:        [in] request queue associated with the bio associated with @bv
     180             :  * @bv:       [in] bvec to examine
     181             :  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
     182             :  *            by the number of segments from @bv that may be appended to that
     183             :  *            bio without exceeding @max_segs
     184             :  * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
     185             :  *            by the number of sectors from @bv that may be appended to that
     186             :  *            bio without exceeding @max_sectors
     187             :  * @max_segs: [in] upper bound for *@nsegs
     188             :  * @max_sectors: [in] upper bound for *@sectors
     189             :  *
     190             :  * When splitting a bio, it can happen that a bvec is encountered that is too
     191             :  * big to fit in a single segment and hence that it has to be split in the
     192             :  * middle. This function verifies whether or not that should happen. The value
     193             :  * %true is returned if and only if appending the entire @bv to a bio with
     194             :  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
     195             :  * the block driver.
     196             :  */
     197        1330 : static bool bvec_split_segs(const struct request_queue *q,
     198             :                             const struct bio_vec *bv, unsigned *nsegs,
     199             :                             unsigned *sectors, unsigned max_segs,
     200             :                             unsigned max_sectors)
     201             : {
     202        1330 :         unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
     203        1330 :         unsigned len = min(bv->bv_len, max_len);
     204        1330 :         unsigned total_len = 0;
     205        1330 :         unsigned seg_size = 0;
     206             : 
     207        2660 :         while (len && *nsegs < max_segs) {
     208        1330 :                 seg_size = get_max_segment_size(q, bv->bv_page,
     209        1330 :                                                 bv->bv_offset + total_len);
     210        1330 :                 seg_size = min(seg_size, len);
     211             : 
     212        1330 :                 (*nsegs)++;
     213        1330 :                 total_len += seg_size;
     214        1330 :                 len -= seg_size;
     215             : 
     216        1330 :                 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
     217             :                         break;
     218             :         }
     219             : 
     220        1330 :         *sectors += total_len >> 9;
     221             : 
     222             :         /* tell the caller to split the bvec if it is too big to fit */
     223        1330 :         return len > 0 || bv->bv_len > max_len;
     224             : }
     225             : 
     226             : /**
     227             :  * blk_bio_segment_split - split a bio in two bios
     228             :  * @q:    [in] request queue pointer
     229             :  * @bio:  [in] bio to be split
     230             :  * @bs:   [in] bio set to allocate the clone from
     231             :  * @segs: [out] number of segments in the bio with the first half of the sectors
     232             :  *
     233             :  * Clone @bio, update the bi_iter of the clone to represent the first sectors
     234             :  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
     235             :  * following is guaranteed for the cloned bio:
     236             :  * - That it has at most get_max_io_size(@q, @bio) sectors.
     237             :  * - That it has at most queue_max_segments(@q) segments.
     238             :  *
     239             :  * Except for discard requests the cloned bio will point at the bi_io_vec of
     240             :  * the original bio. It is the responsibility of the caller to ensure that the
     241             :  * original bio is not freed before the cloned bio. The caller is also
     242             :  * responsible for ensuring that @bs is only destroyed after processing of the
     243             :  * split bio has finished.
     244             :  */
     245        1464 : static struct bio *blk_bio_segment_split(struct request_queue *q,
     246             :                                          struct bio *bio,
     247             :                                          struct bio_set *bs,
     248             :                                          unsigned *segs)
     249             : {
     250        1464 :         struct bio_vec bv, bvprv, *bvprvp = NULL;
     251        1464 :         struct bvec_iter iter;
     252        1464 :         unsigned nsegs = 0, sectors = 0;
     253        1464 :         const unsigned max_sectors = get_max_io_size(q, bio);
     254        1464 :         const unsigned max_segs = queue_max_segments(q);
     255             : 
     256        6779 :         bio_for_each_bvec(bv, bio, iter) {
     257             :                 /*
     258             :                  * If the queue doesn't support SG gaps and adding this
     259             :                  * offset would create a gap, disallow it.
     260             :                  */
     261        5315 :                 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
     262           0 :                         goto split;
     263             : 
     264        5315 :                 if (nsegs < max_segs &&
     265        5315 :                     sectors + (bv.bv_len >> 9) <= max_sectors &&
     266        5315 :                     bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
     267        3985 :                         nsegs++;
     268        3985 :                         sectors += bv.bv_len >> 9;
     269        1330 :                 } else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
     270             :                                          max_sectors)) {
     271           0 :                         goto split;
     272             :                 }
     273             : 
     274        5315 :                 bvprv = bv;
     275        5315 :                 bvprvp = &bvprv;
     276             :         }
     277             : 
     278        1464 :         *segs = nsegs;
     279        1464 :         return NULL;
     280           0 : split:
     281           0 :         *segs = nsegs;
     282             : 
     283             :         /*
     284             :          * Bio splitting may cause subtle trouble such as hang when doing sync
     285             :          * iopoll in direct IO routine. Given performance gain of iopoll for
     286             :          * big IO can be trival, disable iopoll when split needed.
     287             :          */
     288           0 :         bio->bi_opf &= ~REQ_HIPRI;
     289             : 
     290           0 :         return bio_split(bio, sectors, GFP_NOIO, bs);
     291             : }
     292             : 
     293             : /**
     294             :  * __blk_queue_split - split a bio and submit the second half
     295             :  * @bio:     [in, out] bio to be split
     296             :  * @nr_segs: [out] number of segments in the first bio
     297             :  *
     298             :  * Split a bio into two bios, chain the two bios, submit the second half and
     299             :  * store a pointer to the first half in *@bio. If the second bio is still too
     300             :  * big it will be split by a recursive call to this function. Since this
     301             :  * function may allocate a new bio from q->bio_split, it is the responsibility
     302             :  * of the caller to ensure that q->bio_split is only released after processing
     303             :  * of the split bio has finished.
     304             :  */
     305        8736 : void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
     306             : {
     307        8736 :         struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
     308        8736 :         struct bio *split = NULL;
     309             : 
     310        8736 :         switch (bio_op(*bio)) {
     311           0 :         case REQ_OP_DISCARD:
     312             :         case REQ_OP_SECURE_ERASE:
     313           0 :                 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
     314           0 :                 break;
     315           0 :         case REQ_OP_WRITE_ZEROES:
     316           0 :                 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
     317             :                                 nr_segs);
     318           0 :                 break;
     319           0 :         case REQ_OP_WRITE_SAME:
     320           0 :                 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
     321             :                                 nr_segs);
     322           0 :                 break;
     323        8736 :         default:
     324             :                 /*
     325             :                  * All drivers must accept single-segments bios that are <=
     326             :                  * PAGE_SIZE.  This is a quick and dirty check that relies on
     327             :                  * the fact that bi_io_vec[0] is always valid if a bio has data.
     328             :                  * The check might lead to occasional false negatives when bios
     329             :                  * are cloned, but compared to the performance impact of cloned
     330             :                  * bios themselves the loop below doesn't matter anyway.
     331             :                  */
     332        8736 :                 if (!q->limits.chunk_sectors &&
     333        8736 :                     (*bio)->bi_vcnt == 1 &&
     334        8014 :                     ((*bio)->bi_io_vec[0].bv_len +
     335        8014 :                      (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
     336        7273 :                         *nr_segs = 1;
     337        7273 :                         break;
     338             :                 }
     339        1463 :                 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
     340        1463 :                 break;
     341             :         }
     342             : 
     343        8737 :         if (split) {
     344             :                 /* there isn't chance to merge the splitted bio */
     345           0 :                 split->bi_opf |= REQ_NOMERGE;
     346             : 
     347           0 :                 bio_chain(split, *bio);
     348           0 :                 trace_block_split(split, (*bio)->bi_iter.bi_sector);
     349           0 :                 submit_bio_noacct(*bio);
     350           0 :                 *bio = split;
     351             :         }
     352        8737 : }
     353             : 
     354             : /**
     355             :  * blk_queue_split - split a bio and submit the second half
     356             :  * @bio: [in, out] bio to be split
     357             :  *
     358             :  * Split a bio into two bios, chains the two bios, submit the second half and
     359             :  * store a pointer to the first half in *@bio. Since this function may allocate
     360             :  * a new bio from q->bio_split, it is the responsibility of the caller to ensure
     361             :  * that q->bio_split is only released after processing of the split bio has
     362             :  * finished.
     363             :  */
     364           0 : void blk_queue_split(struct bio **bio)
     365             : {
     366           0 :         unsigned int nr_segs;
     367             : 
     368           0 :         __blk_queue_split(bio, &nr_segs);
     369           0 : }
     370             : EXPORT_SYMBOL(blk_queue_split);
     371             : 
     372           0 : unsigned int blk_recalc_rq_segments(struct request *rq)
     373             : {
     374           0 :         unsigned int nr_phys_segs = 0;
     375           0 :         unsigned int nr_sectors = 0;
     376           0 :         struct req_iterator iter;
     377           0 :         struct bio_vec bv;
     378             : 
     379           0 :         if (!rq->bio)
     380             :                 return 0;
     381             : 
     382           0 :         switch (bio_op(rq->bio)) {
     383             :         case REQ_OP_DISCARD:
     384             :         case REQ_OP_SECURE_ERASE:
     385             :         case REQ_OP_WRITE_ZEROES:
     386             :                 return 0;
     387           0 :         case REQ_OP_WRITE_SAME:
     388           0 :                 return 1;
     389             :         }
     390             : 
     391           0 :         rq_for_each_bvec(bv, rq, iter)
     392           0 :                 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
     393             :                                 UINT_MAX, UINT_MAX);
     394           0 :         return nr_phys_segs;
     395             : }
     396             : 
     397       10720 : static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
     398             :                 struct scatterlist *sglist)
     399             : {
     400       10720 :         if (!*sg)
     401             :                 return sglist;
     402             : 
     403             :         /*
     404             :          * If the driver previously mapped a shorter list, we could see a
     405             :          * termination bit prematurely unless it fully inits the sg table
     406             :          * on each mapping. We KNOW that there must be more entries here
     407             :          * or the driver would be buggy, so force clear the termination bit
     408             :          * to avoid doing a full sg_init_table() in drivers for each command.
     409             :          */
     410        7538 :         sg_unmark_end(*sg);
     411        7538 :         return sg_next(*sg);
     412             : }
     413             : 
     414        1330 : static unsigned blk_bvec_map_sg(struct request_queue *q,
     415             :                 struct bio_vec *bvec, struct scatterlist *sglist,
     416             :                 struct scatterlist **sg)
     417             : {
     418        1330 :         unsigned nbytes = bvec->bv_len;
     419        1330 :         unsigned nsegs = 0, total = 0;
     420             : 
     421        2660 :         while (nbytes > 0) {
     422        1330 :                 unsigned offset = bvec->bv_offset + total;
     423        1330 :                 unsigned len = min(get_max_segment_size(q, bvec->bv_page,
     424             :                                         offset), nbytes);
     425        1330 :                 struct page *page = bvec->bv_page;
     426             : 
     427             :                 /*
     428             :                  * Unfortunately a fair number of drivers barf on scatterlists
     429             :                  * that have an offset larger than PAGE_SIZE, despite other
     430             :                  * subsystems dealing with that invariant just fine.  For now
     431             :                  * stick to the legacy format where we never present those from
     432             :                  * the block layer, but the code below should be removed once
     433             :                  * these offenders (mostly MMC/SD drivers) are fixed.
     434             :                  */
     435        1330 :                 page += (offset >> PAGE_SHIFT);
     436        1330 :                 offset &= ~PAGE_MASK;
     437             : 
     438        1330 :                 *sg = blk_next_sg(sg, sglist);
     439        1330 :                 sg_set_page(*sg, page, len, offset);
     440             : 
     441        1330 :                 total += len;
     442        1330 :                 nbytes -= len;
     443        1330 :                 nsegs++;
     444             :         }
     445             : 
     446        1330 :         return nsegs;
     447             : }
     448             : 
     449        9390 : static inline int __blk_bvec_map_sg(struct bio_vec bv,
     450             :                 struct scatterlist *sglist, struct scatterlist **sg)
     451             : {
     452        9390 :         *sg = blk_next_sg(sg, sglist);
     453        9390 :         sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
     454        9390 :         return 1;
     455             : }
     456             : 
     457             : /* only try to merge bvecs into one sg if they are from two bios */
     458             : static inline bool
     459        5485 : __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
     460             :                            struct bio_vec *bvprv, struct scatterlist **sg)
     461             : {
     462             : 
     463        5485 :         int nbytes = bvec->bv_len;
     464             : 
     465        5485 :         if (!*sg)
     466             :                 return false;
     467             : 
     468        5485 :         if ((*sg)->length + nbytes > queue_max_segment_size(q))
     469             :                 return false;
     470             : 
     471        5485 :         if (!biovec_phys_mergeable(q, bvprv, bvec))
     472             :                 return false;
     473             : 
     474        1870 :         (*sg)->length += nbytes;
     475             : 
     476        1870 :         return true;
     477             : }
     478             : 
     479        3181 : static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
     480             :                              struct scatterlist *sglist,
     481             :                              struct scatterlist **sg)
     482             : {
     483        3181 :         struct bio_vec bvec, bvprv = { NULL };
     484        3181 :         struct bvec_iter iter;
     485        3181 :         int nsegs = 0;
     486        3181 :         bool new_bio = false;
     487             : 
     488       11848 :         for_each_bio(bio) {
     489       21257 :                 bio_for_each_bvec(bvec, bio, iter) {
     490             :                         /*
     491             :                          * Only try to merge bvecs from two bios given we
     492             :                          * have done bio internal merge when adding pages
     493             :                          * to bio
     494             :                          */
     495       18075 :                         if (new_bio &&
     496        5485 :                             __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
     497        1870 :                                 goto next_bvec;
     498             : 
     499       10720 :                         if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
     500        9390 :                                 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
     501             :                         else
     502        1330 :                                 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
     503       12590 :  next_bvec:
     504       12590 :                         new_bio = false;
     505             :                 }
     506        8667 :                 if (likely(bio->bi_iter.bi_size)) {
     507        8667 :                         bvprv = bvec;
     508        8667 :                         new_bio = true;
     509             :                 }
     510             :         }
     511             : 
     512        3181 :         return nsegs;
     513             : }
     514             : 
     515             : /*
     516             :  * map a request to scatterlist, return number of sg entries setup. Caller
     517             :  * must make sure sg can hold rq->nr_phys_segments entries
     518             :  */
     519        3372 : int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
     520             :                 struct scatterlist *sglist, struct scatterlist **last_sg)
     521             : {
     522        3372 :         int nsegs = 0;
     523             : 
     524        3372 :         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
     525           0 :                 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
     526        3372 :         else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
     527           0 :                 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
     528        3372 :         else if (rq->bio)
     529        3181 :                 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
     530             : 
     531        3372 :         if (*last_sg)
     532        3181 :                 sg_mark_end(*last_sg);
     533             : 
     534             :         /*
     535             :          * Something must have been wrong if the figured number of
     536             :          * segment is bigger than number of req's physical segments
     537             :          */
     538        6744 :         WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
     539             : 
     540        3372 :         return nsegs;
     541             : }
     542             : EXPORT_SYMBOL(__blk_rq_map_sg);
     543             : 
     544        5486 : static inline unsigned int blk_rq_get_max_segments(struct request *rq)
     545             : {
     546        5486 :         if (req_op(rq) == REQ_OP_DISCARD)
     547           0 :                 return queue_max_discard_segments(rq->q);
     548        5486 :         return queue_max_segments(rq->q);
     549             : }
     550             : 
     551        5486 : static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
     552             :                 unsigned int nr_phys_segs)
     553             : {
     554       10972 :         if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
     555           1 :                 goto no_merge;
     556             : 
     557        5485 :         if (blk_integrity_merge_bio(req->q, req, bio) == false)
     558             :                 goto no_merge;
     559             : 
     560             :         /*
     561             :          * This will form the start of a new hw segment.  Bump both
     562             :          * counters.
     563             :          */
     564        5485 :         req->nr_phys_segments += nr_phys_segs;
     565        5485 :         return 1;
     566             : 
     567           1 : no_merge:
     568           1 :         req_set_nomerge(req->q, req);
     569             :         return 0;
     570             : }
     571             : 
     572        5387 : int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
     573             : {
     574        5387 :         if (req_gap_back_merge(req, bio))
     575             :                 return 0;
     576        5387 :         if (blk_integrity_rq(req) &&
     577             :             integrity_req_gap_back_merge(req, bio))
     578             :                 return 0;
     579        5387 :         if (!bio_crypt_ctx_back_mergeable(req, bio))
     580             :                 return 0;
     581        5387 :         if (blk_rq_sectors(req) + bio_sectors(bio) >
     582        5387 :             blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
     583           0 :                 req_set_nomerge(req->q, req);
     584           0 :                 return 0;
     585             :         }
     586             : 
     587        5387 :         return ll_new_hw_segment(req, bio, nr_segs);
     588             : }
     589             : 
     590          99 : static int ll_front_merge_fn(struct request *req, struct bio *bio,
     591             :                 unsigned int nr_segs)
     592             : {
     593          99 :         if (req_gap_front_merge(req, bio))
     594             :                 return 0;
     595          99 :         if (blk_integrity_rq(req) &&
     596             :             integrity_req_gap_front_merge(req, bio))
     597             :                 return 0;
     598          99 :         if (!bio_crypt_ctx_front_mergeable(req, bio))
     599             :                 return 0;
     600          99 :         if (blk_rq_sectors(req) + bio_sectors(bio) >
     601          99 :             blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
     602           0 :                 req_set_nomerge(req->q, req);
     603           0 :                 return 0;
     604             :         }
     605             : 
     606          99 :         return ll_new_hw_segment(req, bio, nr_segs);
     607             : }
     608             : 
     609           0 : static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
     610             :                 struct request *next)
     611             : {
     612           0 :         unsigned short segments = blk_rq_nr_discard_segments(req);
     613             : 
     614           0 :         if (segments >= queue_max_discard_segments(q))
     615           0 :                 goto no_merge;
     616           0 :         if (blk_rq_sectors(req) + bio_sectors(next->bio) >
     617           0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
     618           0 :                 goto no_merge;
     619             : 
     620           0 :         req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
     621           0 :         return true;
     622           0 : no_merge:
     623           0 :         req_set_nomerge(q, req);
     624             :         return false;
     625             : }
     626             : 
     627           0 : static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
     628             :                                 struct request *next)
     629             : {
     630           0 :         int total_phys_segments;
     631             : 
     632           0 :         if (req_gap_back_merge(req, next->bio))
     633             :                 return 0;
     634             : 
     635             :         /*
     636             :          * Will it become too large?
     637             :          */
     638           0 :         if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
     639           0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
     640             :                 return 0;
     641             : 
     642           0 :         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
     643           0 :         if (total_phys_segments > blk_rq_get_max_segments(req))
     644             :                 return 0;
     645             : 
     646           0 :         if (blk_integrity_merge_rq(q, req, next) == false)
     647             :                 return 0;
     648             : 
     649           0 :         if (!bio_crypt_ctx_merge_rq(req, next))
     650             :                 return 0;
     651             : 
     652             :         /* Merge is OK... */
     653           0 :         req->nr_phys_segments = total_phys_segments;
     654           0 :         return 1;
     655             : }
     656             : 
     657             : /**
     658             :  * blk_rq_set_mixed_merge - mark a request as mixed merge
     659             :  * @rq: request to mark as mixed merge
     660             :  *
     661             :  * Description:
     662             :  *     @rq is about to be mixed merged.  Make sure the attributes
     663             :  *     which can be mixed are set in each bio and mark @rq as mixed
     664             :  *     merged.
     665             :  */
     666        3101 : void blk_rq_set_mixed_merge(struct request *rq)
     667             : {
     668        3101 :         unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
     669        3101 :         struct bio *bio;
     670             : 
     671        3101 :         if (rq->rq_flags & RQF_MIXED_MERGE)
     672             :                 return;
     673             : 
     674             :         /*
     675             :          * @rq will no longer represent mixable attributes for all the
     676             :          * contained bios.  It will just track those of the first one.
     677             :          * Distributes the attributs to each bio.
     678             :          */
     679         376 :         for (bio = rq->bio; bio; bio = bio->bi_next) {
     680         376 :                 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
     681             :                              (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
     682         188 :                 bio->bi_opf |= ff;
     683             :         }
     684         188 :         rq->rq_flags |= RQF_MIXED_MERGE;
     685             : }
     686             : 
     687           0 : static void blk_account_io_merge_request(struct request *req)
     688             : {
     689           0 :         if (blk_do_io_stat(req)) {
     690           0 :                 part_stat_lock();
     691           0 :                 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
     692           0 :                 part_stat_unlock();
     693             :         }
     694           0 : }
     695             : 
     696             : /*
     697             :  * Two cases of handling DISCARD merge:
     698             :  * If max_discard_segments > 1, the driver takes every bio
     699             :  * as a range and send them to controller together. The ranges
     700             :  * needn't to be contiguous.
     701             :  * Otherwise, the bios/requests will be handled as same as
     702             :  * others which should be contiguous.
     703             :  */
     704        9293 : static inline bool blk_discard_mergable(struct request *req)
     705             : {
     706           0 :         if (req_op(req) == REQ_OP_DISCARD &&
     707           0 :             queue_max_discard_segments(req->q) > 1)
     708             :                 return true;
     709             :         return false;
     710             : }
     711             : 
     712           0 : static enum elv_merge blk_try_req_merge(struct request *req,
     713             :                                         struct request *next)
     714             : {
     715           0 :         if (blk_discard_mergable(req))
     716             :                 return ELEVATOR_DISCARD_MERGE;
     717           0 :         else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
     718           0 :                 return ELEVATOR_BACK_MERGE;
     719             : 
     720             :         return ELEVATOR_NO_MERGE;
     721             : }
     722             : 
     723             : /*
     724             :  * For non-mq, this has to be called with the request spinlock acquired.
     725             :  * For mq with scheduling, the appropriate queue wide lock should be held.
     726             :  */
     727           0 : static struct request *attempt_merge(struct request_queue *q,
     728             :                                      struct request *req, struct request *next)
     729             : {
     730           0 :         if (!rq_mergeable(req) || !rq_mergeable(next))
     731             :                 return NULL;
     732             : 
     733           0 :         if (req_op(req) != req_op(next))
     734             :                 return NULL;
     735             : 
     736           0 :         if (rq_data_dir(req) != rq_data_dir(next)
     737           0 :             || req->rq_disk != next->rq_disk)
     738             :                 return NULL;
     739             : 
     740           0 :         if (req_op(req) == REQ_OP_WRITE_SAME &&
     741           0 :             !blk_write_same_mergeable(req->bio, next->bio))
     742             :                 return NULL;
     743             : 
     744             :         /*
     745             :          * Don't allow merge of different write hints, or for a hint with
     746             :          * non-hint IO.
     747             :          */
     748           0 :         if (req->write_hint != next->write_hint)
     749             :                 return NULL;
     750             : 
     751           0 :         if (req->ioprio != next->ioprio)
     752             :                 return NULL;
     753             : 
     754             :         /*
     755             :          * If we are allowed to merge, then append bio list
     756             :          * from next to rq and release next. merge_requests_fn
     757             :          * will have updated segment counts, update sector
     758             :          * counts here. Handle DISCARDs separately, as they
     759             :          * have separate settings.
     760             :          */
     761             : 
     762           0 :         switch (blk_try_req_merge(req, next)) {
     763           0 :         case ELEVATOR_DISCARD_MERGE:
     764           0 :                 if (!req_attempt_discard_merge(q, req, next))
     765             :                         return NULL;
     766             :                 break;
     767           0 :         case ELEVATOR_BACK_MERGE:
     768           0 :                 if (!ll_merge_requests_fn(q, req, next))
     769             :                         return NULL;
     770             :                 break;
     771             :         default:
     772             :                 return NULL;
     773             :         }
     774             : 
     775             :         /*
     776             :          * If failfast settings disagree or any of the two is already
     777             :          * a mixed merge, mark both as mixed before proceeding.  This
     778             :          * makes sure that all involved bios have mixable attributes
     779             :          * set properly.
     780             :          */
     781           0 :         if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
     782           0 :             (req->cmd_flags & REQ_FAILFAST_MASK) !=
     783           0 :             (next->cmd_flags & REQ_FAILFAST_MASK)) {
     784           0 :                 blk_rq_set_mixed_merge(req);
     785           0 :                 blk_rq_set_mixed_merge(next);
     786             :         }
     787             : 
     788             :         /*
     789             :          * At this point we have either done a back merge or front merge. We
     790             :          * need the smaller start_time_ns of the merged requests to be the
     791             :          * current request for accounting purposes.
     792             :          */
     793           0 :         if (next->start_time_ns < req->start_time_ns)
     794           0 :                 req->start_time_ns = next->start_time_ns;
     795             : 
     796           0 :         req->biotail->bi_next = next->bio;
     797           0 :         req->biotail = next->biotail;
     798             : 
     799           0 :         req->__data_len += blk_rq_bytes(next);
     800             : 
     801           0 :         if (!blk_discard_mergable(req))
     802           0 :                 elv_merge_requests(q, req, next);
     803             : 
     804             :         /*
     805             :          * 'next' is going away, so update stats accordingly
     806             :          */
     807           0 :         blk_account_io_merge_request(next);
     808             : 
     809           0 :         trace_block_rq_merge(next);
     810             : 
     811             :         /*
     812             :          * ownership of bio passed from next to req, return 'next' for
     813             :          * the caller to free
     814             :          */
     815           0 :         next->bio = NULL;
     816           0 :         return next;
     817             : }
     818             : 
     819           0 : static struct request *attempt_back_merge(struct request_queue *q,
     820             :                 struct request *rq)
     821             : {
     822           0 :         struct request *next = elv_latter_request(q, rq);
     823             : 
     824           0 :         if (next)
     825           0 :                 return attempt_merge(q, rq, next);
     826             : 
     827             :         return NULL;
     828             : }
     829             : 
     830           0 : static struct request *attempt_front_merge(struct request_queue *q,
     831             :                 struct request *rq)
     832             : {
     833           0 :         struct request *prev = elv_former_request(q, rq);
     834             : 
     835           0 :         if (prev)
     836           0 :                 return attempt_merge(q, prev, rq);
     837             : 
     838             :         return NULL;
     839             : }
     840             : 
     841           0 : int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
     842             :                           struct request *next)
     843             : {
     844           0 :         struct request *free;
     845             : 
     846           0 :         free = attempt_merge(q, rq, next);
     847           0 :         if (free) {
     848           0 :                 blk_put_request(free);
     849           0 :                 return 1;
     850             :         }
     851             : 
     852             :         return 0;
     853             : }
     854             : 
     855        9293 : bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
     856             : {
     857        9293 :         if (!rq_mergeable(rq) || !bio_mergeable(bio))
     858             :                 return false;
     859             : 
     860        9293 :         if (req_op(rq) != bio_op(bio))
     861             :                 return false;
     862             : 
     863             :         /* different data direction or already started, don't merge */
     864        9293 :         if (bio_data_dir(bio) != rq_data_dir(rq))
     865             :                 return false;
     866             : 
     867             :         /* must be same device */
     868        9293 :         if (rq->rq_disk != bio->bi_bdev->bd_disk)
     869             :                 return false;
     870             : 
     871             :         /* only merge integrity protected bio into ditto rq */
     872        9293 :         if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
     873             :                 return false;
     874             : 
     875             :         /* Only merge if the crypt contexts are compatible */
     876        9293 :         if (!bio_crypt_rq_ctx_compatible(rq, bio))
     877             :                 return false;
     878             : 
     879             :         /* must be using the same buffer */
     880        9293 :         if (req_op(rq) == REQ_OP_WRITE_SAME &&
     881           0 :             !blk_write_same_mergeable(rq->bio, bio))
     882             :                 return false;
     883             : 
     884             :         /*
     885             :          * Don't allow merge of different write hints, or for a hint with
     886             :          * non-hint IO.
     887             :          */
     888        9293 :         if (rq->write_hint != bio->bi_write_hint)
     889             :                 return false;
     890             : 
     891        9293 :         if (rq->ioprio != bio_prio(bio))
     892           0 :                 return false;
     893             : 
     894             :         return true;
     895             : }
     896             : 
     897        9293 : enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
     898             : {
     899        9293 :         if (blk_discard_mergable(rq))
     900             :                 return ELEVATOR_DISCARD_MERGE;
     901        9293 :         else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
     902             :                 return ELEVATOR_BACK_MERGE;
     903        3906 :         else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
     904          99 :                 return ELEVATOR_FRONT_MERGE;
     905             :         return ELEVATOR_NO_MERGE;
     906             : }
     907             : 
     908        5485 : static void blk_account_io_merge_bio(struct request *req)
     909             : {
     910       10970 :         if (!blk_do_io_stat(req))
     911             :                 return;
     912             : 
     913        5485 :         part_stat_lock();
     914       16455 :         part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
     915        5485 :         part_stat_unlock();
     916             : }
     917             : 
     918             : enum bio_merge_status {
     919             :         BIO_MERGE_OK,
     920             :         BIO_MERGE_NONE,
     921             :         BIO_MERGE_FAILED,
     922             : };
     923             : 
     924        5387 : static enum bio_merge_status bio_attempt_back_merge(struct request *req,
     925             :                 struct bio *bio, unsigned int nr_segs)
     926             : {
     927        5387 :         const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
     928             : 
     929        5387 :         if (!ll_back_merge_fn(req, bio, nr_segs))
     930             :                 return BIO_MERGE_FAILED;
     931             : 
     932        5386 :         trace_block_bio_backmerge(bio);
     933        5386 :         rq_qos_merge(req->q, req, bio);
     934             : 
     935        5386 :         if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
     936        3002 :                 blk_rq_set_mixed_merge(req);
     937             : 
     938        5386 :         req->biotail->bi_next = bio;
     939        5386 :         req->biotail = bio;
     940        5386 :         req->__data_len += bio->bi_iter.bi_size;
     941             : 
     942        5386 :         bio_crypt_free_ctx(bio);
     943             : 
     944        5386 :         blk_account_io_merge_bio(req);
     945        5386 :         return BIO_MERGE_OK;
     946             : }
     947             : 
     948          99 : static enum bio_merge_status bio_attempt_front_merge(struct request *req,
     949             :                 struct bio *bio, unsigned int nr_segs)
     950             : {
     951          99 :         const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
     952             : 
     953          99 :         if (!ll_front_merge_fn(req, bio, nr_segs))
     954             :                 return BIO_MERGE_FAILED;
     955             : 
     956          99 :         trace_block_bio_frontmerge(bio);
     957          99 :         rq_qos_merge(req->q, req, bio);
     958             : 
     959          99 :         if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
     960          99 :                 blk_rq_set_mixed_merge(req);
     961             : 
     962          99 :         bio->bi_next = req->bio;
     963          99 :         req->bio = bio;
     964             : 
     965          99 :         req->__sector = bio->bi_iter.bi_sector;
     966          99 :         req->__data_len += bio->bi_iter.bi_size;
     967             : 
     968          99 :         bio_crypt_do_front_merge(req, bio);
     969             : 
     970          99 :         blk_account_io_merge_bio(req);
     971          99 :         return BIO_MERGE_OK;
     972             : }
     973             : 
     974           0 : static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
     975             :                 struct request *req, struct bio *bio)
     976             : {
     977           0 :         unsigned short segments = blk_rq_nr_discard_segments(req);
     978             : 
     979           0 :         if (segments >= queue_max_discard_segments(q))
     980           0 :                 goto no_merge;
     981           0 :         if (blk_rq_sectors(req) + bio_sectors(bio) >
     982           0 :             blk_rq_get_max_sectors(req, blk_rq_pos(req)))
     983           0 :                 goto no_merge;
     984             : 
     985           0 :         rq_qos_merge(q, req, bio);
     986             : 
     987           0 :         req->biotail->bi_next = bio;
     988           0 :         req->biotail = bio;
     989           0 :         req->__data_len += bio->bi_iter.bi_size;
     990           0 :         req->nr_phys_segments = segments + 1;
     991             : 
     992           0 :         blk_account_io_merge_bio(req);
     993           0 :         return BIO_MERGE_OK;
     994           0 : no_merge:
     995           0 :         req_set_nomerge(q, req);
     996             :         return BIO_MERGE_FAILED;
     997             : }
     998             : 
     999        9293 : static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
    1000             :                                                    struct request *rq,
    1001             :                                                    struct bio *bio,
    1002             :                                                    unsigned int nr_segs,
    1003             :                                                    bool sched_allow_merge)
    1004             : {
    1005        9293 :         if (!blk_rq_merge_ok(rq, bio))
    1006             :                 return BIO_MERGE_NONE;
    1007             : 
    1008        9293 :         switch (blk_try_merge(rq, bio)) {
    1009        5387 :         case ELEVATOR_BACK_MERGE:
    1010        5387 :                 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
    1011        5387 :                         return bio_attempt_back_merge(rq, bio, nr_segs);
    1012             :                 break;
    1013          99 :         case ELEVATOR_FRONT_MERGE:
    1014          99 :                 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
    1015          99 :                         return bio_attempt_front_merge(rq, bio, nr_segs);
    1016             :                 break;
    1017           0 :         case ELEVATOR_DISCARD_MERGE:
    1018           0 :                 return bio_attempt_discard_merge(q, rq, bio);
    1019             :         default:
    1020             :                 return BIO_MERGE_NONE;
    1021             :         }
    1022             : 
    1023             :         return BIO_MERGE_FAILED;
    1024             : }
    1025             : 
    1026             : /**
    1027             :  * blk_attempt_plug_merge - try to merge with %current's plugged list
    1028             :  * @q: request_queue new bio is being queued at
    1029             :  * @bio: new bio being queued
    1030             :  * @nr_segs: number of segments in @bio
    1031             :  * @same_queue_rq: pointer to &struct request that gets filled in when
    1032             :  * another request associated with @q is found on the plug list
    1033             :  * (optional, may be %NULL)
    1034             :  *
    1035             :  * Determine whether @bio being queued on @q can be merged with a request
    1036             :  * on %current's plugged list.  Returns %true if merge was successful,
    1037             :  * otherwise %false.
    1038             :  *
    1039             :  * Plugging coalesces IOs from the same issuer for the same purpose without
    1040             :  * going through @q->queue_lock.  As such it's more of an issuing mechanism
    1041             :  * than scheduling, and the request, while may have elvpriv data, is not
    1042             :  * added on the elevator at this point.  In addition, we don't have
    1043             :  * reliable access to the elevator outside queue lock.  Only check basic
    1044             :  * merging parameters without querying the elevator.
    1045             :  *
    1046             :  * Caller must ensure !blk_queue_nomerges(q) beforehand.
    1047             :  */
    1048        8605 : bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
    1049             :                 unsigned int nr_segs, struct request **same_queue_rq)
    1050             : {
    1051        8605 :         struct blk_plug *plug;
    1052        8605 :         struct request *rq;
    1053        8605 :         struct list_head *plug_list;
    1054             : 
    1055        8605 :         plug = blk_mq_plug(q, bio);
    1056        8605 :         if (!plug)
    1057             :                 return false;
    1058             : 
    1059        7999 :         plug_list = &plug->mq_list;
    1060             : 
    1061       11807 :         list_for_each_entry_reverse(rq, plug_list, queuelist) {
    1062        9293 :                 if (rq->q == q && same_queue_rq) {
    1063             :                         /*
    1064             :                          * Only blk-mq multiple hardware queues case checks the
    1065             :                          * rq in the same queue, there should be only one such
    1066             :                          * rq in a queue
    1067             :                          **/
    1068        9293 :                         *same_queue_rq = rq;
    1069             :                 }
    1070             : 
    1071        9293 :                 if (rq->q != q)
    1072           0 :                         continue;
    1073             : 
    1074        9293 :                 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
    1075             :                     BIO_MERGE_OK)
    1076             :                         return true;
    1077             :         }
    1078             : 
    1079             :         return false;
    1080             : }
    1081             : 
    1082             : /*
    1083             :  * Iterate list of requests and see if we can merge this bio with any
    1084             :  * of them.
    1085             :  */
    1086           0 : bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
    1087             :                         struct bio *bio, unsigned int nr_segs)
    1088             : {
    1089           0 :         struct request *rq;
    1090           0 :         int checked = 8;
    1091             : 
    1092           0 :         list_for_each_entry_reverse(rq, list, queuelist) {
    1093           0 :                 if (!checked--)
    1094             :                         break;
    1095             : 
    1096           0 :                 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
    1097           0 :                 case BIO_MERGE_NONE:
    1098           0 :                         continue;
    1099             :                 case BIO_MERGE_OK:
    1100             :                         return true;
    1101           0 :                 case BIO_MERGE_FAILED:
    1102           0 :                         return false;
    1103             :                 }
    1104             : 
    1105             :         }
    1106             : 
    1107             :         return false;
    1108             : }
    1109             : EXPORT_SYMBOL_GPL(blk_bio_list_merge);
    1110             : 
    1111           0 : bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
    1112             :                 unsigned int nr_segs, struct request **merged_request)
    1113             : {
    1114           0 :         struct request *rq;
    1115             : 
    1116           0 :         switch (elv_merge(q, &rq, bio)) {
    1117           0 :         case ELEVATOR_BACK_MERGE:
    1118           0 :                 if (!blk_mq_sched_allow_merge(q, rq, bio))
    1119             :                         return false;
    1120           0 :                 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
    1121             :                         return false;
    1122           0 :                 *merged_request = attempt_back_merge(q, rq);
    1123           0 :                 if (!*merged_request)
    1124           0 :                         elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
    1125             :                 return true;
    1126           0 :         case ELEVATOR_FRONT_MERGE:
    1127           0 :                 if (!blk_mq_sched_allow_merge(q, rq, bio))
    1128             :                         return false;
    1129           0 :                 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
    1130             :                         return false;
    1131           0 :                 *merged_request = attempt_front_merge(q, rq);
    1132           0 :                 if (!*merged_request)
    1133           0 :                         elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
    1134             :                 return true;
    1135           0 :         case ELEVATOR_DISCARD_MERGE:
    1136           0 :                 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
    1137             :         default:
    1138             :                 return false;
    1139             :         }
    1140             : }
    1141             : EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);

Generated by: LCOV version 1.14