LCOV - code coverage report
Current view: top level - block - blk.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 29 56 51.8 %
Date: 2021-04-22 12:43:58 Functions: 1 2 50.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef BLK_INTERNAL_H
       3             : #define BLK_INTERNAL_H
       4             : 
       5             : #include <linux/idr.h>
       6             : #include <linux/blk-mq.h>
       7             : #include <linux/part_stat.h>
       8             : #include <linux/blk-crypto.h>
       9             : #include <xen/xen.h>
      10             : #include "blk-crypto-internal.h"
      11             : #include "blk-mq.h"
      12             : #include "blk-mq-sched.h"
      13             : 
      14             : /* Max future timer expiry for timeouts */
      15             : #define BLK_MAX_TIMEOUT         (5 * HZ)
      16             : 
      17             : extern struct dentry *blk_debugfs_root;
      18             : 
      19             : struct blk_flush_queue {
      20             :         unsigned int            flush_pending_idx:1;
      21             :         unsigned int            flush_running_idx:1;
      22             :         blk_status_t            rq_status;
      23             :         unsigned long           flush_pending_since;
      24             :         struct list_head        flush_queue[2];
      25             :         struct list_head        flush_data_in_flight;
      26             :         struct request          *flush_rq;
      27             : 
      28             :         spinlock_t              mq_flush_lock;
      29             : };
      30             : 
      31             : extern struct kmem_cache *blk_requestq_cachep;
      32             : extern struct kobj_type blk_queue_ktype;
      33             : extern struct ida blk_queue_ida;
      34             : 
      35             : static inline struct blk_flush_queue *
      36         383 : blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
      37             : {
      38         383 :         return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
      39             : }
      40             : 
      41           9 : static inline void __blk_get_queue(struct request_queue *q)
      42             : {
      43           9 :         kobject_get(&q->kobj);
      44             : }
      45             : 
      46             : static inline bool
      47           0 : is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
      48             : {
      49           0 :         return hctx->fq->flush_rq == req;
      50             : }
      51             : 
      52             : struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
      53             :                                               gfp_t flags);
      54             : void blk_free_flush_queue(struct blk_flush_queue *q);
      55             : 
      56             : void blk_freeze_queue(struct request_queue *q);
      57             : 
      58             : #define BIO_INLINE_VECS 4
      59             : struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
      60             :                 gfp_t gfp_mask);
      61             : void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
      62             : 
      63        5485 : static inline bool biovec_phys_mergeable(struct request_queue *q,
      64             :                 struct bio_vec *vec1, struct bio_vec *vec2)
      65             : {
      66        5485 :         unsigned long mask = queue_segment_boundary(q);
      67        5485 :         phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
      68        5485 :         phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
      69             : 
      70        5485 :         if (addr1 + vec1->bv_len != addr2)
      71             :                 return false;
      72        1870 :         if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
      73             :                 return false;
      74        1870 :         if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
      75           0 :                 return false;
      76             :         return true;
      77             : }
      78             : 
      79           0 : static inline bool __bvec_gap_to_prev(struct request_queue *q,
      80             :                 struct bio_vec *bprv, unsigned int offset)
      81             : {
      82           0 :         return (offset & queue_virt_boundary(q)) ||
      83           0 :                 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
      84             : }
      85             : 
      86             : /*
      87             :  * Check if adding a bio_vec after bprv with offset would create a gap in
      88             :  * the SG list. Most drivers don't care about this, but some do.
      89             :  */
      90        3923 : static inline bool bvec_gap_to_prev(struct request_queue *q,
      91             :                 struct bio_vec *bprv, unsigned int offset)
      92             : {
      93        3923 :         if (!queue_virt_boundary(q))
      94             :                 return false;
      95           0 :         return __bvec_gap_to_prev(q, bprv, offset);
      96             : }
      97             : 
      98             : #ifdef CONFIG_BLK_DEV_INTEGRITY
      99             : void blk_flush_integrity(void);
     100             : bool __bio_integrity_endio(struct bio *);
     101             : void bio_integrity_free(struct bio *bio);
     102             : static inline bool bio_integrity_endio(struct bio *bio)
     103             : {
     104             :         if (bio_integrity(bio))
     105             :                 return __bio_integrity_endio(bio);
     106             :         return true;
     107             : }
     108             : 
     109             : bool blk_integrity_merge_rq(struct request_queue *, struct request *,
     110             :                 struct request *);
     111             : bool blk_integrity_merge_bio(struct request_queue *, struct request *,
     112             :                 struct bio *);
     113             : 
     114             : static inline bool integrity_req_gap_back_merge(struct request *req,
     115             :                 struct bio *next)
     116             : {
     117             :         struct bio_integrity_payload *bip = bio_integrity(req->bio);
     118             :         struct bio_integrity_payload *bip_next = bio_integrity(next);
     119             : 
     120             :         return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
     121             :                                 bip_next->bip_vec[0].bv_offset);
     122             : }
     123             : 
     124             : static inline bool integrity_req_gap_front_merge(struct request *req,
     125             :                 struct bio *bio)
     126             : {
     127             :         struct bio_integrity_payload *bip = bio_integrity(bio);
     128             :         struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
     129             : 
     130             :         return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
     131             :                                 bip_next->bip_vec[0].bv_offset);
     132             : }
     133             : 
     134             : void blk_integrity_add(struct gendisk *);
     135             : void blk_integrity_del(struct gendisk *);
     136             : #else /* CONFIG_BLK_DEV_INTEGRITY */
     137           0 : static inline bool blk_integrity_merge_rq(struct request_queue *rq,
     138             :                 struct request *r1, struct request *r2)
     139             : {
     140           0 :         return true;
     141             : }
     142       14778 : static inline bool blk_integrity_merge_bio(struct request_queue *rq,
     143             :                 struct request *r, struct bio *b)
     144             : {
     145       14778 :         return true;
     146             : }
     147             : static inline bool integrity_req_gap_back_merge(struct request *req,
     148             :                 struct bio *next)
     149             : {
     150             :         return false;
     151             : }
     152             : static inline bool integrity_req_gap_front_merge(struct request *req,
     153             :                 struct bio *bio)
     154             : {
     155             :         return false;
     156             : }
     157             : 
     158           0 : static inline void blk_flush_integrity(void)
     159             : {
     160           0 : }
     161        8738 : static inline bool bio_integrity_endio(struct bio *bio)
     162             : {
     163        8738 :         return true;
     164             : }
     165             : static inline void bio_integrity_free(struct bio *bio)
     166             : {
     167             : }
     168           9 : static inline void blk_integrity_add(struct gendisk *disk)
     169             : {
     170           9 : }
     171           0 : static inline void blk_integrity_del(struct gendisk *disk)
     172             : {
     173           0 : }
     174             : #endif /* CONFIG_BLK_DEV_INTEGRITY */
     175             : 
     176             : unsigned long blk_rq_timeout(unsigned long timeout);
     177             : void blk_add_timer(struct request *req);
     178             : 
     179             : bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
     180             :                 unsigned int nr_segs, struct request **same_queue_rq);
     181             : bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
     182             :                         struct bio *bio, unsigned int nr_segs);
     183             : 
     184             : void blk_account_io_start(struct request *req);
     185             : void blk_account_io_done(struct request *req, u64 now);
     186             : 
     187             : /*
     188             :  * Internal elevator interface
     189             :  */
     190             : #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
     191             : 
     192             : void blk_insert_flush(struct request *rq);
     193             : 
     194             : void elevator_init_mq(struct request_queue *q);
     195             : int elevator_switch_mq(struct request_queue *q,
     196             :                               struct elevator_type *new_e);
     197             : void __elevator_exit(struct request_queue *, struct elevator_queue *);
     198             : int elv_register_queue(struct request_queue *q, bool uevent);
     199             : void elv_unregister_queue(struct request_queue *q);
     200             : 
     201           0 : static inline void elevator_exit(struct request_queue *q,
     202             :                 struct elevator_queue *e)
     203             : {
     204           0 :         lockdep_assert_held(&q->sysfs_lock);
     205             : 
     206           0 :         blk_mq_sched_free_requests(q);
     207           0 :         __elevator_exit(q, e);
     208           0 : }
     209             : 
     210             : ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
     211             :                 char *buf);
     212             : ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
     213             :                 char *buf);
     214             : ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
     215             :                 char *buf);
     216             : ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
     217             :                 char *buf);
     218             : ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
     219             :                 const char *buf, size_t count);
     220             : ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
     221             : ssize_t part_timeout_store(struct device *, struct device_attribute *,
     222             :                                 const char *, size_t);
     223             : 
     224             : void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
     225             : int ll_back_merge_fn(struct request *req, struct bio *bio,
     226             :                 unsigned int nr_segs);
     227             : int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
     228             :                                 struct request *next);
     229             : unsigned int blk_recalc_rq_segments(struct request *rq);
     230             : void blk_rq_set_mixed_merge(struct request *rq);
     231             : bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
     232             : enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
     233             : 
     234             : int blk_dev_init(void);
     235             : 
     236             : /*
     237             :  * Contribute to IO statistics IFF:
     238             :  *
     239             :  *      a) it's attached to a gendisk, and
     240             :  *      b) the queue had IO stats enabled when this request was started
     241             :  */
     242       15366 : static inline bool blk_do_io_stat(struct request *rq)
     243             : {
     244       15366 :         return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
     245             : }
     246             : 
     247           1 : static inline void req_set_nomerge(struct request_queue *q, struct request *req)
     248             : {
     249           1 :         req->cmd_flags |= REQ_NOMERGE;
     250           1 :         if (req == q->last_merge)
     251           0 :                 q->last_merge = NULL;
     252             : }
     253             : 
     254             : /*
     255             :  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
     256             :  * is defined as 'unsigned int', meantime it has to aligned to with logical
     257             :  * block size which is the minimum accepted unit by hardware.
     258             :  */
     259           0 : static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
     260             : {
     261           0 :         return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
     262             : }
     263             : 
     264             : /*
     265             :  * The max bio size which is aligned to q->limits.discard_granularity. This
     266             :  * is a hint to split large discard bio in generic block layer, then if device
     267             :  * driver needs to split the discard bio into smaller ones, their bi_size can
     268             :  * be very probably and easily aligned to discard_granularity of the device's
     269             :  * queue.
     270             :  */
     271           0 : static inline unsigned int bio_aligned_discard_max_sectors(
     272             :                                         struct request_queue *q)
     273             : {
     274           0 :         return round_down(UINT_MAX, q->limits.discard_granularity) >>
     275             :                         SECTOR_SHIFT;
     276             : }
     277             : 
     278             : /*
     279             :  * Internal io_context interface
     280             :  */
     281             : void get_io_context(struct io_context *ioc);
     282             : struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
     283             : struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
     284             :                              gfp_t gfp_mask);
     285             : void ioc_clear_queue(struct request_queue *q);
     286             : 
     287             : int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
     288             : 
     289             : /*
     290             :  * Internal throttling interface
     291             :  */
     292             : #ifdef CONFIG_BLK_DEV_THROTTLING
     293             : extern int blk_throtl_init(struct request_queue *q);
     294             : extern void blk_throtl_exit(struct request_queue *q);
     295             : extern void blk_throtl_register_queue(struct request_queue *q);
     296             : bool blk_throtl_bio(struct bio *bio);
     297             : #else /* CONFIG_BLK_DEV_THROTTLING */
     298             : static inline int blk_throtl_init(struct request_queue *q) { return 0; }
     299             : static inline void blk_throtl_exit(struct request_queue *q) { }
     300           9 : static inline void blk_throtl_register_queue(struct request_queue *q) { }
     301        8737 : static inline bool blk_throtl_bio(struct bio *bio) { return false; }
     302             : #endif /* CONFIG_BLK_DEV_THROTTLING */
     303             : #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
     304             : extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
     305             : extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
     306             :         const char *page, size_t count);
     307             : extern void blk_throtl_bio_endio(struct bio *bio);
     308             : extern void blk_throtl_stat_add(struct request *rq, u64 time);
     309             : #else
     310        8739 : static inline void blk_throtl_bio_endio(struct bio *bio) { }
     311           0 : static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
     312             : #endif
     313             : 
     314             : #ifdef CONFIG_BOUNCE
     315             : extern int init_emergency_isa_pool(void);
     316             : extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
     317             : #else
     318           0 : static inline int init_emergency_isa_pool(void)
     319             : {
     320           0 :         return 0;
     321             : }
     322        8739 : static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
     323             : {
     324        8739 : }
     325             : #endif /* CONFIG_BOUNCE */
     326             : 
     327             : #ifdef CONFIG_BLK_CGROUP_IOLATENCY
     328             : extern int blk_iolatency_init(struct request_queue *q);
     329             : #else
     330             : static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
     331             : #endif
     332             : 
     333             : struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
     334             : 
     335             : #ifdef CONFIG_BLK_DEV_ZONED
     336             : void blk_queue_free_zone_bitmaps(struct request_queue *q);
     337             : void blk_queue_clear_zone_settings(struct request_queue *q);
     338             : #else
     339           0 : static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
     340             : static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
     341             : #endif
     342             : 
     343             : int blk_alloc_devt(struct block_device *part, dev_t *devt);
     344             : void blk_free_devt(dev_t devt);
     345             : char *disk_name(struct gendisk *hd, int partno, char *buf);
     346             : #define ADDPART_FLAG_NONE       0
     347             : #define ADDPART_FLAG_RAID       1
     348             : #define ADDPART_FLAG_WHOLEDISK  2
     349             : void delete_partition(struct block_device *part);
     350             : int bdev_add_partition(struct block_device *bdev, int partno,
     351             :                 sector_t start, sector_t length);
     352             : int bdev_del_partition(struct block_device *bdev, int partno);
     353             : int bdev_resize_partition(struct block_device *bdev, int partno,
     354             :                 sector_t start, sector_t length);
     355             : 
     356             : int bio_add_hw_page(struct request_queue *q, struct bio *bio,
     357             :                 struct page *page, unsigned int len, unsigned int offset,
     358             :                 unsigned int max_sectors, bool *same_page);
     359             : 
     360             : #endif /* BLK_INTERNAL_H */

Generated by: LCOV version 1.14