LCOV - code coverage report
Current view: top level - block - blk-mq.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 35 74 47.3 %
Date: 2021-04-22 12:43:58 Functions: 2 7 28.6 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef INT_BLK_MQ_H
       3             : #define INT_BLK_MQ_H
       4             : 
       5             : #include "blk-stat.h"
       6             : #include "blk-mq-tag.h"
       7             : 
       8             : struct blk_mq_tag_set;
       9             : 
      10             : struct blk_mq_ctxs {
      11             :         struct kobject kobj;
      12             :         struct blk_mq_ctx __percpu      *queue_ctx;
      13             : };
      14             : 
      15             : /**
      16             :  * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
      17             :  */
      18             : struct blk_mq_ctx {
      19             :         struct {
      20             :                 spinlock_t              lock;
      21             :                 struct list_head        rq_lists[HCTX_MAX_TYPES];
      22             :         } ____cacheline_aligned_in_smp;
      23             : 
      24             :         unsigned int            cpu;
      25             :         unsigned short          index_hw[HCTX_MAX_TYPES];
      26             :         struct blk_mq_hw_ctx    *hctxs[HCTX_MAX_TYPES];
      27             : 
      28             :         /* incremented at dispatch time */
      29             :         unsigned long           rq_dispatched[2];
      30             :         unsigned long           rq_merged;
      31             : 
      32             :         /* incremented at completion time */
      33             :         unsigned long           ____cacheline_aligned_in_smp rq_completed[2];
      34             : 
      35             :         struct request_queue    *queue;
      36             :         struct blk_mq_ctxs      *ctxs;
      37             :         struct kobject          kobj;
      38             : } ____cacheline_aligned_in_smp;
      39             : 
      40             : void blk_mq_exit_queue(struct request_queue *q);
      41             : int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
      42             : void blk_mq_wake_waiters(struct request_queue *q);
      43             : bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
      44             :                              unsigned int);
      45             : void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
      46             :                                 bool kick_requeue_list);
      47             : void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
      48             : struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
      49             :                                         struct blk_mq_ctx *start);
      50             : 
      51             : /*
      52             :  * Internal helpers for allocating/freeing the request map
      53             :  */
      54             : void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
      55             :                      unsigned int hctx_idx);
      56             : void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
      57             : struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
      58             :                                         unsigned int hctx_idx,
      59             :                                         unsigned int nr_tags,
      60             :                                         unsigned int reserved_tags,
      61             :                                         unsigned int flags);
      62             : int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
      63             :                      unsigned int hctx_idx, unsigned int depth);
      64             : 
      65             : /*
      66             :  * Internal helpers for request insertion into sw queues
      67             :  */
      68             : void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
      69             :                                 bool at_head);
      70             : void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
      71             :                                   bool run_queue);
      72             : void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
      73             :                                 struct list_head *list);
      74             : 
      75             : /* Used by blk_insert_cloned_request() to issue request directly */
      76             : blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
      77             : void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
      78             :                                     struct list_head *list);
      79             : 
      80             : /*
      81             :  * CPU -> queue mappings
      82             :  */
      83             : extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
      84             : 
      85             : /*
      86             :  * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
      87             :  * @q: request queue
      88             :  * @type: the hctx type index
      89             :  * @cpu: CPU
      90             :  */
      91         144 : static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
      92             :                                                           enum hctx_type type,
      93             :                                                           unsigned int cpu)
      94             : {
      95         144 :         return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
      96             : }
      97             : 
      98             : /*
      99             :  * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
     100             :  * @q: request queue
     101             :  * @flags: request command flags
     102             :  * @ctx: software queue cpu ctx
     103             :  */
     104        6757 : static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
     105             :                                                      unsigned int flags,
     106             :                                                      struct blk_mq_ctx *ctx)
     107             : {
     108        6757 :         enum hctx_type type = HCTX_TYPE_DEFAULT;
     109             : 
     110             :         /*
     111             :          * The caller ensure that if REQ_HIPRI, poll must be enabled.
     112             :          */
     113        6757 :         if (flags & REQ_HIPRI)
     114             :                 type = HCTX_TYPE_POLL;
     115        6757 :         else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
     116        5122 :                 type = HCTX_TYPE_READ;
     117             :         
     118        6757 :         return ctx->hctxs[type];
     119             : }
     120             : 
     121             : /*
     122             :  * sysfs helpers
     123             :  */
     124             : extern void blk_mq_sysfs_init(struct request_queue *q);
     125             : extern void blk_mq_sysfs_deinit(struct request_queue *q);
     126             : extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
     127             : extern int blk_mq_sysfs_register(struct request_queue *q);
     128             : extern void blk_mq_sysfs_unregister(struct request_queue *q);
     129             : extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
     130             : 
     131             : void blk_mq_release(struct request_queue *q);
     132             : 
     133        6374 : static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
     134             :                                            unsigned int cpu)
     135             : {
     136        3120 :         return per_cpu_ptr(q->queue_ctx, cpu);
     137             : }
     138             : 
     139             : /*
     140             :  * This assumes per-cpu software queueing queues. They could be per-node
     141             :  * as well, for instance. For now this is hardcoded as-is. Note that we don't
     142             :  * care about preemption, since we know the ctx's are persistent. This does
     143             :  * mean that we can't rely on ctx always matching the currently running CPU.
     144             :  */
     145        6374 : static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
     146             : {
     147        6374 :         return __blk_mq_get_ctx(q, raw_smp_processor_id());
     148             : }
     149             : 
     150             : struct blk_mq_alloc_data {
     151             :         /* input parameter */
     152             :         struct request_queue *q;
     153             :         blk_mq_req_flags_t flags;
     154             :         unsigned int shallow_depth;
     155             :         unsigned int cmd_flags;
     156             : 
     157             :         /* input & output parameter */
     158             :         struct blk_mq_ctx *ctx;
     159             :         struct blk_mq_hw_ctx *hctx;
     160             : };
     161             : 
     162           9 : static inline bool blk_mq_is_sbitmap_shared(unsigned int flags)
     163             : {
     164           9 :         return flags & BLK_MQ_F_TAG_HCTX_SHARED;
     165             : }
     166             : 
     167        6508 : static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
     168             : {
     169        6508 :         if (data->q->elevator)
     170           1 :                 return data->hctx->sched_tags;
     171             : 
     172        6507 :         return data->hctx->tags;
     173             : }
     174             : 
     175        6577 : static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
     176             : {
     177        6577 :         return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
     178             : }
     179             : 
     180          16 : static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
     181             : {
     182          16 :         return hctx->nr_ctx && hctx->tags;
     183             : }
     184             : 
     185             : unsigned int blk_mq_in_flight(struct request_queue *q,
     186             :                 struct block_device *part);
     187             : void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
     188             :                 unsigned int inflight[2]);
     189             : 
     190           0 : static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
     191             : {
     192           0 :         if (q->mq_ops->put_budget)
     193           0 :                 q->mq_ops->put_budget(q);
     194             : }
     195             : 
     196        3373 : static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
     197             : {
     198        3373 :         if (q->mq_ops->get_budget)
     199           0 :                 return q->mq_ops->get_budget(q);
     200             :         return true;
     201             : }
     202             : 
     203           0 : static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
     204             : {
     205           0 :         if (blk_mq_is_sbitmap_shared(hctx->flags))
     206           0 :                 atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap);
     207             :         else
     208           0 :                 atomic_inc(&hctx->nr_active);
     209           0 : }
     210             : 
     211           0 : static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
     212             : {
     213           0 :         if (blk_mq_is_sbitmap_shared(hctx->flags))
     214           0 :                 atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap);
     215             :         else
     216           0 :                 atomic_dec(&hctx->nr_active);
     217           0 : }
     218             : 
     219           0 : static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
     220             : {
     221           0 :         if (blk_mq_is_sbitmap_shared(hctx->flags))
     222           0 :                 return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap);
     223           0 :         return atomic_read(&hctx->nr_active);
     224             : }
     225           0 : static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
     226             :                                            struct request *rq)
     227             : {
     228           0 :         blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
     229           0 :         rq->tag = BLK_MQ_NO_TAG;
     230             : 
     231           0 :         if (rq->rq_flags & RQF_MQ_INFLIGHT) {
     232           0 :                 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
     233           0 :                 __blk_mq_dec_active_requests(hctx);
     234             :         }
     235           0 : }
     236             : 
     237           0 : static inline void blk_mq_put_driver_tag(struct request *rq)
     238             : {
     239           0 :         if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
     240             :                 return;
     241             : 
     242           0 :         __blk_mq_put_driver_tag(rq->mq_hctx, rq);
     243             : }
     244             : 
     245           1 : static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
     246             : {
     247           1 :         int cpu;
     248             : 
     249           5 :         for_each_possible_cpu(cpu)
     250           4 :                 qmap->mq_map[cpu] = 0;
     251           1 : }
     252             : 
     253             : /*
     254             :  * blk_mq_plug() - Get caller context plug
     255             :  * @q: request queue
     256             :  * @bio : the bio being submitted by the caller context
     257             :  *
     258             :  * Plugging, by design, may delay the insertion of BIOs into the elevator in
     259             :  * order to increase BIO merging opportunities. This however can cause BIO
     260             :  * insertion order to change from the order in which submit_bio() is being
     261             :  * executed in the case of multiple contexts concurrently issuing BIOs to a
     262             :  * device, even if these context are synchronized to tightly control BIO issuing
     263             :  * order. While this is not a problem with regular block devices, this ordering
     264             :  * change can cause write BIO failures with zoned block devices as these
     265             :  * require sequential write patterns to zones. Prevent this from happening by
     266             :  * ignoring the plug state of a BIO issuing context if the target request queue
     267             :  * is for a zoned block device and the BIO to plug is a write operation.
     268             :  *
     269             :  * Return current->plug if the bio can be plugged and NULL otherwise
     270             :  */
     271       20594 : static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
     272             :                                            struct bio *bio)
     273             : {
     274             :         /*
     275             :          * For regular block devices or read operations, use the context plug
     276             :          * which may be NULL if blk_start_plug() was not executed.
     277             :          */
     278       20594 :         if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
     279       20594 :                 return current->plug;
     280             : 
     281             :         /* Zoned block device write operation case: do not plug the BIO */
     282             :         return NULL;
     283             : }
     284             : 
     285             : /*
     286             :  * For shared tag users, we track the number of currently active users
     287             :  * and attempt to provide a fair share of the tag depth for each of them.
     288             :  */
     289        3253 : static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
     290             :                                   struct sbitmap_queue *bt)
     291             : {
     292        3253 :         unsigned int depth, users;
     293             : 
     294        3253 :         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
     295             :                 return true;
     296             : 
     297             :         /*
     298             :          * Don't try dividing an ant
     299             :          */
     300           0 :         if (bt->sb.depth == 1)
     301             :                 return true;
     302             : 
     303           0 :         if (blk_mq_is_sbitmap_shared(hctx->flags)) {
     304           0 :                 struct request_queue *q = hctx->queue;
     305           0 :                 struct blk_mq_tag_set *set = q->tag_set;
     306             : 
     307           0 :                 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
     308             :                         return true;
     309           0 :                 users = atomic_read(&set->active_queues_shared_sbitmap);
     310             :         } else {
     311           0 :                 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
     312             :                         return true;
     313           0 :                 users = atomic_read(&hctx->tags->active_queues);
     314             :         }
     315             : 
     316           0 :         if (!users)
     317             :                 return true;
     318             : 
     319             :         /*
     320             :          * Allow at least some tags
     321             :          */
     322           0 :         depth = max((bt->sb.depth + users - 1) / users, 4U);
     323           0 :         return __blk_mq_active_requests(hctx) < depth;
     324             : }
     325             : 
     326             : 
     327             : #endif

Generated by: LCOV version 1.14