LCOV - code coverage report
Current view: top level - include/linux - blkdev.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 113 231 48.9 %
Date: 2021-04-22 12:43:58 Functions: 6 13 46.2 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_BLKDEV_H
       3             : #define _LINUX_BLKDEV_H
       4             : 
       5             : #include <linux/sched.h>
       6             : #include <linux/sched/clock.h>
       7             : #include <linux/major.h>
       8             : #include <linux/genhd.h>
       9             : #include <linux/list.h>
      10             : #include <linux/llist.h>
      11             : #include <linux/minmax.h>
      12             : #include <linux/timer.h>
      13             : #include <linux/workqueue.h>
      14             : #include <linux/pagemap.h>
      15             : #include <linux/backing-dev-defs.h>
      16             : #include <linux/wait.h>
      17             : #include <linux/mempool.h>
      18             : #include <linux/pfn.h>
      19             : #include <linux/bio.h>
      20             : #include <linux/stringify.h>
      21             : #include <linux/gfp.h>
      22             : #include <linux/bsg.h>
      23             : #include <linux/smp.h>
      24             : #include <linux/rcupdate.h>
      25             : #include <linux/percpu-refcount.h>
      26             : #include <linux/scatterlist.h>
      27             : #include <linux/blkzoned.h>
      28             : #include <linux/pm.h>
      29             : 
      30             : struct module;
      31             : struct scsi_ioctl_command;
      32             : 
      33             : struct request_queue;
      34             : struct elevator_queue;
      35             : struct blk_trace;
      36             : struct request;
      37             : struct sg_io_hdr;
      38             : struct bsg_job;
      39             : struct blkcg_gq;
      40             : struct blk_flush_queue;
      41             : struct pr_ops;
      42             : struct rq_qos;
      43             : struct blk_queue_stats;
      44             : struct blk_stat_callback;
      45             : struct blk_keyslot_manager;
      46             : 
      47             : #define BLKDEV_MIN_RQ   4
      48             : #define BLKDEV_MAX_RQ   128     /* Default maximum */
      49             : 
      50             : /* Must be consistent with blk_mq_poll_stats_bkt() */
      51             : #define BLK_MQ_POLL_STATS_BKTS 16
      52             : 
      53             : /* Doing classic polling */
      54             : #define BLK_MQ_POLL_CLASSIC -1
      55             : 
      56             : /*
      57             :  * Maximum number of blkcg policies allowed to be registered concurrently.
      58             :  * Defined here to simplify include dependency.
      59             :  */
      60             : #define BLKCG_MAX_POLS          5
      61             : 
      62             : typedef void (rq_end_io_fn)(struct request *, blk_status_t);
      63             : 
      64             : /*
      65             :  * request flags */
      66             : typedef __u32 __bitwise req_flags_t;
      67             : 
      68             : /* drive already may have started this one */
      69             : #define RQF_STARTED             ((__force req_flags_t)(1 << 1))
      70             : /* may not be passed by ioscheduler */
      71             : #define RQF_SOFTBARRIER         ((__force req_flags_t)(1 << 3))
      72             : /* request for flush sequence */
      73             : #define RQF_FLUSH_SEQ           ((__force req_flags_t)(1 << 4))
      74             : /* merge of different types, fail separately */
      75             : #define RQF_MIXED_MERGE         ((__force req_flags_t)(1 << 5))
      76             : /* track inflight for MQ */
      77             : #define RQF_MQ_INFLIGHT         ((__force req_flags_t)(1 << 6))
      78             : /* don't call prep for this one */
      79             : #define RQF_DONTPREP            ((__force req_flags_t)(1 << 7))
      80             : /* vaguely specified driver internal error.  Ignored by the block layer */
      81             : #define RQF_FAILED              ((__force req_flags_t)(1 << 10))
      82             : /* don't warn about errors */
      83             : #define RQF_QUIET               ((__force req_flags_t)(1 << 11))
      84             : /* elevator private data attached */
      85             : #define RQF_ELVPRIV             ((__force req_flags_t)(1 << 12))
      86             : /* account into disk and partition IO statistics */
      87             : #define RQF_IO_STAT             ((__force req_flags_t)(1 << 13))
      88             : /* request came from our alloc pool */
      89             : #define RQF_ALLOCED             ((__force req_flags_t)(1 << 14))
      90             : /* runtime pm request */
      91             : #define RQF_PM                  ((__force req_flags_t)(1 << 15))
      92             : /* on IO scheduler merge hash */
      93             : #define RQF_HASHED              ((__force req_flags_t)(1 << 16))
      94             : /* track IO completion time */
      95             : #define RQF_STATS               ((__force req_flags_t)(1 << 17))
      96             : /* Look at ->special_vec for the actual data payload instead of the
      97             :    bio chain. */
      98             : #define RQF_SPECIAL_PAYLOAD     ((__force req_flags_t)(1 << 18))
      99             : /* The per-zone write lock is held for this request */
     100             : #define RQF_ZONE_WRITE_LOCKED   ((__force req_flags_t)(1 << 19))
     101             : /* already slept for hybrid poll */
     102             : #define RQF_MQ_POLL_SLEPT       ((__force req_flags_t)(1 << 20))
     103             : /* ->timeout has been called, don't expire again */
     104             : #define RQF_TIMED_OUT           ((__force req_flags_t)(1 << 21))
     105             : 
     106             : /* flags that prevent us from merging requests: */
     107             : #define RQF_NOMERGE_FLAGS \
     108             :         (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
     109             : 
     110             : /*
     111             :  * Request state for blk-mq.
     112             :  */
     113             : enum mq_rq_state {
     114             :         MQ_RQ_IDLE              = 0,
     115             :         MQ_RQ_IN_FLIGHT         = 1,
     116             :         MQ_RQ_COMPLETE          = 2,
     117             : };
     118             : 
     119             : /*
     120             :  * Try to put the fields that are referenced together in the same cacheline.
     121             :  *
     122             :  * If you modify this structure, make sure to update blk_rq_init() and
     123             :  * especially blk_mq_rq_ctx_init() to take care of the added fields.
     124             :  */
     125             : struct request {
     126             :         struct request_queue *q;
     127             :         struct blk_mq_ctx *mq_ctx;
     128             :         struct blk_mq_hw_ctx *mq_hctx;
     129             : 
     130             :         unsigned int cmd_flags;         /* op and common flags */
     131             :         req_flags_t rq_flags;
     132             : 
     133             :         int tag;
     134             :         int internal_tag;
     135             : 
     136             :         /* the following two fields are internal, NEVER access directly */
     137             :         unsigned int __data_len;        /* total data len */
     138             :         sector_t __sector;              /* sector cursor */
     139             : 
     140             :         struct bio *bio;
     141             :         struct bio *biotail;
     142             : 
     143             :         struct list_head queuelist;
     144             : 
     145             :         /*
     146             :          * The hash is used inside the scheduler, and killed once the
     147             :          * request reaches the dispatch list. The ipi_list is only used
     148             :          * to queue the request for softirq completion, which is long
     149             :          * after the request has been unhashed (and even removed from
     150             :          * the dispatch list).
     151             :          */
     152             :         union {
     153             :                 struct hlist_node hash; /* merge hash */
     154             :                 struct llist_node ipi_list;
     155             :         };
     156             : 
     157             :         /*
     158             :          * The rb_node is only used inside the io scheduler, requests
     159             :          * are pruned when moved to the dispatch queue. So let the
     160             :          * completion_data share space with the rb_node.
     161             :          */
     162             :         union {
     163             :                 struct rb_node rb_node; /* sort/lookup */
     164             :                 struct bio_vec special_vec;
     165             :                 void *completion_data;
     166             :                 int error_count; /* for legacy drivers, don't use */
     167             :         };
     168             : 
     169             :         /*
     170             :          * Three pointers are available for the IO schedulers, if they need
     171             :          * more they have to dynamically allocate it.  Flush requests are
     172             :          * never put on the IO scheduler. So let the flush fields share
     173             :          * space with the elevator data.
     174             :          */
     175             :         union {
     176             :                 struct {
     177             :                         struct io_cq            *icq;
     178             :                         void                    *priv[2];
     179             :                 } elv;
     180             : 
     181             :                 struct {
     182             :                         unsigned int            seq;
     183             :                         struct list_head        list;
     184             :                         rq_end_io_fn            *saved_end_io;
     185             :                 } flush;
     186             :         };
     187             : 
     188             :         struct gendisk *rq_disk;
     189             :         struct block_device *part;
     190             : #ifdef CONFIG_BLK_RQ_ALLOC_TIME
     191             :         /* Time that the first bio started allocating this request. */
     192             :         u64 alloc_time_ns;
     193             : #endif
     194             :         /* Time that this request was allocated for this IO. */
     195             :         u64 start_time_ns;
     196             :         /* Time that I/O was submitted to the device. */
     197             :         u64 io_start_time_ns;
     198             : 
     199             : #ifdef CONFIG_BLK_WBT
     200             :         unsigned short wbt_flags;
     201             : #endif
     202             :         /*
     203             :          * rq sectors used for blk stats. It has the same value
     204             :          * with blk_rq_sectors(rq), except that it never be zeroed
     205             :          * by completion.
     206             :          */
     207             :         unsigned short stats_sectors;
     208             : 
     209             :         /*
     210             :          * Number of scatter-gather DMA addr+len pairs after
     211             :          * physical address coalescing is performed.
     212             :          */
     213             :         unsigned short nr_phys_segments;
     214             : 
     215             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
     216             :         unsigned short nr_integrity_segments;
     217             : #endif
     218             : 
     219             : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
     220             :         struct bio_crypt_ctx *crypt_ctx;
     221             :         struct blk_ksm_keyslot *crypt_keyslot;
     222             : #endif
     223             : 
     224             :         unsigned short write_hint;
     225             :         unsigned short ioprio;
     226             : 
     227             :         enum mq_rq_state state;
     228             :         refcount_t ref;
     229             : 
     230             :         unsigned int timeout;
     231             :         unsigned long deadline;
     232             : 
     233             :         union {
     234             :                 struct __call_single_data csd;
     235             :                 u64 fifo_time;
     236             :         };
     237             : 
     238             :         /*
     239             :          * completion callback.
     240             :          */
     241             :         rq_end_io_fn *end_io;
     242             :         void *end_io_data;
     243             : };
     244             : 
     245       14783 : static inline bool blk_op_is_scsi(unsigned int op)
     246             : {
     247       14783 :         return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
     248             : }
     249             : 
     250       14783 : static inline bool blk_op_is_private(unsigned int op)
     251             : {
     252       14783 :         return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
     253             : }
     254             : 
     255       14783 : static inline bool blk_rq_is_scsi(struct request *rq)
     256             : {
     257       14783 :         return blk_op_is_scsi(req_op(rq));
     258             : }
     259             : 
     260       14783 : static inline bool blk_rq_is_private(struct request *rq)
     261             : {
     262       14783 :         return blk_op_is_private(req_op(rq));
     263             : }
     264             : 
     265       14783 : static inline bool blk_rq_is_passthrough(struct request *rq)
     266             : {
     267       14787 :         return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
     268             : }
     269             : 
     270             : static inline bool bio_is_passthrough(struct bio *bio)
     271             : {
     272             :         unsigned op = bio_op(bio);
     273             : 
     274             :         return blk_op_is_scsi(op) || blk_op_is_private(op);
     275             : }
     276             : 
     277        3435 : static inline unsigned short req_get_ioprio(struct request *req)
     278             : {
     279        3435 :         return req->ioprio;
     280             : }
     281             : 
     282             : #include <linux/elevator.h>
     283             : 
     284             : struct blk_queue_ctx;
     285             : 
     286             : struct bio_vec;
     287             : 
     288             : enum blk_eh_timer_return {
     289             :         BLK_EH_DONE,            /* drivers has completed the command */
     290             :         BLK_EH_RESET_TIMER,     /* reset timer and try again */
     291             : };
     292             : 
     293             : enum blk_queue_state {
     294             :         Queue_down,
     295             :         Queue_up,
     296             : };
     297             : 
     298             : #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
     299             : #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
     300             : 
     301             : #define BLK_SCSI_MAX_CMDS       (256)
     302             : #define BLK_SCSI_CMD_PER_LONG   (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
     303             : 
     304             : /*
     305             :  * Zoned block device models (zoned limit).
     306             :  *
     307             :  * Note: This needs to be ordered from the least to the most severe
     308             :  * restrictions for the inheritance in blk_stack_limits() to work.
     309             :  */
     310             : enum blk_zoned_model {
     311             :         BLK_ZONED_NONE = 0,     /* Regular block device */
     312             :         BLK_ZONED_HA,           /* Host-aware zoned block device */
     313             :         BLK_ZONED_HM,           /* Host-managed zoned block device */
     314             : };
     315             : 
     316             : struct queue_limits {
     317             :         unsigned long           bounce_pfn;
     318             :         unsigned long           seg_boundary_mask;
     319             :         unsigned long           virt_boundary_mask;
     320             : 
     321             :         unsigned int            max_hw_sectors;
     322             :         unsigned int            max_dev_sectors;
     323             :         unsigned int            chunk_sectors;
     324             :         unsigned int            max_sectors;
     325             :         unsigned int            max_segment_size;
     326             :         unsigned int            physical_block_size;
     327             :         unsigned int            logical_block_size;
     328             :         unsigned int            alignment_offset;
     329             :         unsigned int            io_min;
     330             :         unsigned int            io_opt;
     331             :         unsigned int            max_discard_sectors;
     332             :         unsigned int            max_hw_discard_sectors;
     333             :         unsigned int            max_write_same_sectors;
     334             :         unsigned int            max_write_zeroes_sectors;
     335             :         unsigned int            max_zone_append_sectors;
     336             :         unsigned int            discard_granularity;
     337             :         unsigned int            discard_alignment;
     338             :         unsigned int            zone_write_granularity;
     339             : 
     340             :         unsigned short          max_segments;
     341             :         unsigned short          max_integrity_segments;
     342             :         unsigned short          max_discard_segments;
     343             : 
     344             :         unsigned char           misaligned;
     345             :         unsigned char           discard_misaligned;
     346             :         unsigned char           raid_partial_stripes_expensive;
     347             :         enum blk_zoned_model    zoned;
     348             : };
     349             : 
     350             : typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
     351             :                                void *data);
     352             : 
     353             : void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
     354             : 
     355             : #ifdef CONFIG_BLK_DEV_ZONED
     356             : 
     357             : #define BLK_ALL_ZONES  ((unsigned int)-1)
     358             : int blkdev_report_zones(struct block_device *bdev, sector_t sector,
     359             :                         unsigned int nr_zones, report_zones_cb cb, void *data);
     360             : unsigned int blkdev_nr_zones(struct gendisk *disk);
     361             : extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
     362             :                             sector_t sectors, sector_t nr_sectors,
     363             :                             gfp_t gfp_mask);
     364             : int blk_revalidate_disk_zones(struct gendisk *disk,
     365             :                               void (*update_driver_data)(struct gendisk *disk));
     366             : 
     367             : extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
     368             :                                      unsigned int cmd, unsigned long arg);
     369             : extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
     370             :                                   unsigned int cmd, unsigned long arg);
     371             : 
     372             : #else /* CONFIG_BLK_DEV_ZONED */
     373             : 
     374           0 : static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
     375             : {
     376           0 :         return 0;
     377             : }
     378             : 
     379             : static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
     380             :                                             fmode_t mode, unsigned int cmd,
     381             :                                             unsigned long arg)
     382             : {
     383             :         return -ENOTTY;
     384             : }
     385             : 
     386             : static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
     387             :                                          fmode_t mode, unsigned int cmd,
     388             :                                          unsigned long arg)
     389             : {
     390             :         return -ENOTTY;
     391             : }
     392             : 
     393             : #endif /* CONFIG_BLK_DEV_ZONED */
     394             : 
     395             : struct request_queue {
     396             :         struct request          *last_merge;
     397             :         struct elevator_queue   *elevator;
     398             : 
     399             :         struct percpu_ref       q_usage_counter;
     400             : 
     401             :         struct blk_queue_stats  *stats;
     402             :         struct rq_qos           *rq_qos;
     403             : 
     404             :         const struct blk_mq_ops *mq_ops;
     405             : 
     406             :         /* sw queues */
     407             :         struct blk_mq_ctx __percpu      *queue_ctx;
     408             : 
     409             :         unsigned int            queue_depth;
     410             : 
     411             :         /* hw dispatch queues */
     412             :         struct blk_mq_hw_ctx    **queue_hw_ctx;
     413             :         unsigned int            nr_hw_queues;
     414             : 
     415             :         struct backing_dev_info *backing_dev_info;
     416             : 
     417             :         /*
     418             :          * The queue owner gets to use this for whatever they like.
     419             :          * ll_rw_blk doesn't touch it.
     420             :          */
     421             :         void                    *queuedata;
     422             : 
     423             :         /*
     424             :          * various queue flags, see QUEUE_* below
     425             :          */
     426             :         unsigned long           queue_flags;
     427             :         /*
     428             :          * Number of contexts that have called blk_set_pm_only(). If this
     429             :          * counter is above zero then only RQF_PM requests are processed.
     430             :          */
     431             :         atomic_t                pm_only;
     432             : 
     433             :         /*
     434             :          * ida allocated id for this queue.  Used to index queues from
     435             :          * ioctx.
     436             :          */
     437             :         int                     id;
     438             : 
     439             :         /*
     440             :          * queue needs bounce pages for pages above this limit
     441             :          */
     442             :         gfp_t                   bounce_gfp;
     443             : 
     444             :         spinlock_t              queue_lock;
     445             : 
     446             :         /*
     447             :          * queue kobject
     448             :          */
     449             :         struct kobject kobj;
     450             : 
     451             :         /*
     452             :          * mq queue kobject
     453             :          */
     454             :         struct kobject *mq_kobj;
     455             : 
     456             : #ifdef  CONFIG_BLK_DEV_INTEGRITY
     457             :         struct blk_integrity integrity;
     458             : #endif  /* CONFIG_BLK_DEV_INTEGRITY */
     459             : 
     460             : #ifdef CONFIG_PM
     461             :         struct device           *dev;
     462             :         enum rpm_status         rpm_status;
     463             : #endif
     464             : 
     465             :         /*
     466             :          * queue settings
     467             :          */
     468             :         unsigned long           nr_requests;    /* Max # of requests */
     469             : 
     470             :         unsigned int            dma_pad_mask;
     471             :         unsigned int            dma_alignment;
     472             : 
     473             : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
     474             :         /* Inline crypto capabilities */
     475             :         struct blk_keyslot_manager *ksm;
     476             : #endif
     477             : 
     478             :         unsigned int            rq_timeout;
     479             :         int                     poll_nsec;
     480             : 
     481             :         struct blk_stat_callback        *poll_cb;
     482             :         struct blk_rq_stat      poll_stat[BLK_MQ_POLL_STATS_BKTS];
     483             : 
     484             :         struct timer_list       timeout;
     485             :         struct work_struct      timeout_work;
     486             : 
     487             :         atomic_t                nr_active_requests_shared_sbitmap;
     488             : 
     489             :         struct list_head        icq_list;
     490             : #ifdef CONFIG_BLK_CGROUP
     491             :         DECLARE_BITMAP          (blkcg_pols, BLKCG_MAX_POLS);
     492             :         struct blkcg_gq         *root_blkg;
     493             :         struct list_head        blkg_list;
     494             : #endif
     495             : 
     496             :         struct queue_limits     limits;
     497             : 
     498             :         unsigned int            required_elevator_features;
     499             : 
     500             : #ifdef CONFIG_BLK_DEV_ZONED
     501             :         /*
     502             :          * Zoned block device information for request dispatch control.
     503             :          * nr_zones is the total number of zones of the device. This is always
     504             :          * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
     505             :          * bits which indicates if a zone is conventional (bit set) or
     506             :          * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
     507             :          * bits which indicates if a zone is write locked, that is, if a write
     508             :          * request targeting the zone was dispatched. All three fields are
     509             :          * initialized by the low level device driver (e.g. scsi/sd.c).
     510             :          * Stacking drivers (device mappers) may or may not initialize
     511             :          * these fields.
     512             :          *
     513             :          * Reads of this information must be protected with blk_queue_enter() /
     514             :          * blk_queue_exit(). Modifying this information is only allowed while
     515             :          * no requests are being processed. See also blk_mq_freeze_queue() and
     516             :          * blk_mq_unfreeze_queue().
     517             :          */
     518             :         unsigned int            nr_zones;
     519             :         unsigned long           *conv_zones_bitmap;
     520             :         unsigned long           *seq_zones_wlock;
     521             :         unsigned int            max_open_zones;
     522             :         unsigned int            max_active_zones;
     523             : #endif /* CONFIG_BLK_DEV_ZONED */
     524             : 
     525             :         /*
     526             :          * sg stuff
     527             :          */
     528             :         unsigned int            sg_timeout;
     529             :         unsigned int            sg_reserved_size;
     530             :         int                     node;
     531             :         struct mutex            debugfs_mutex;
     532             : #ifdef CONFIG_BLK_DEV_IO_TRACE
     533             :         struct blk_trace __rcu  *blk_trace;
     534             : #endif
     535             :         /*
     536             :          * for flush operations
     537             :          */
     538             :         struct blk_flush_queue  *fq;
     539             : 
     540             :         struct list_head        requeue_list;
     541             :         spinlock_t              requeue_lock;
     542             :         struct delayed_work     requeue_work;
     543             : 
     544             :         struct mutex            sysfs_lock;
     545             :         struct mutex            sysfs_dir_lock;
     546             : 
     547             :         /*
     548             :          * for reusing dead hctx instance in case of updating
     549             :          * nr_hw_queues
     550             :          */
     551             :         struct list_head        unused_hctx_list;
     552             :         spinlock_t              unused_hctx_lock;
     553             : 
     554             :         int                     mq_freeze_depth;
     555             : 
     556             : #if defined(CONFIG_BLK_DEV_BSG)
     557             :         struct bsg_class_device bsg_dev;
     558             : #endif
     559             : 
     560             : #ifdef CONFIG_BLK_DEV_THROTTLING
     561             :         /* Throttle data */
     562             :         struct throtl_data *td;
     563             : #endif
     564             :         struct rcu_head         rcu_head;
     565             :         wait_queue_head_t       mq_freeze_wq;
     566             :         /*
     567             :          * Protect concurrent access to q_usage_counter by
     568             :          * percpu_ref_kill() and percpu_ref_reinit().
     569             :          */
     570             :         struct mutex            mq_freeze_lock;
     571             : 
     572             :         struct blk_mq_tag_set   *tag_set;
     573             :         struct list_head        tag_set_list;
     574             :         struct bio_set          bio_split;
     575             : 
     576             :         struct dentry           *debugfs_dir;
     577             : 
     578             : #ifdef CONFIG_BLK_DEBUG_FS
     579             :         struct dentry           *sched_debugfs_dir;
     580             :         struct dentry           *rqos_debugfs_dir;
     581             : #endif
     582             : 
     583             :         bool                    mq_sysfs_init_done;
     584             : 
     585             :         size_t                  cmd_size;
     586             : 
     587             : #define BLK_MAX_WRITE_HINTS     5
     588             :         u64                     write_hints[BLK_MAX_WRITE_HINTS];
     589             : };
     590             : 
     591             : /* Keep blk_queue_flag_name[] in sync with the definitions below */
     592             : #define QUEUE_FLAG_STOPPED      0       /* queue is stopped */
     593             : #define QUEUE_FLAG_DYING        1       /* queue being torn down */
     594             : #define QUEUE_FLAG_NOMERGES     3       /* disable merge attempts */
     595             : #define QUEUE_FLAG_SAME_COMP    4       /* complete on same CPU-group */
     596             : #define QUEUE_FLAG_FAIL_IO      5       /* fake timeout */
     597             : #define QUEUE_FLAG_NONROT       6       /* non-rotational device (SSD) */
     598             : #define QUEUE_FLAG_VIRT         QUEUE_FLAG_NONROT /* paravirt device */
     599             : #define QUEUE_FLAG_IO_STAT      7       /* do disk/partitions IO accounting */
     600             : #define QUEUE_FLAG_DISCARD      8       /* supports DISCARD */
     601             : #define QUEUE_FLAG_NOXMERGES    9       /* No extended merges */
     602             : #define QUEUE_FLAG_ADD_RANDOM   10      /* Contributes to random pool */
     603             : #define QUEUE_FLAG_SECERASE     11      /* supports secure erase */
     604             : #define QUEUE_FLAG_SAME_FORCE   12      /* force complete on same CPU */
     605             : #define QUEUE_FLAG_DEAD         13      /* queue tear-down finished */
     606             : #define QUEUE_FLAG_INIT_DONE    14      /* queue is initialized */
     607             : #define QUEUE_FLAG_STABLE_WRITES 15     /* don't modify blks until WB is done */
     608             : #define QUEUE_FLAG_POLL         16      /* IO polling enabled if set */
     609             : #define QUEUE_FLAG_WC           17      /* Write back caching */
     610             : #define QUEUE_FLAG_FUA          18      /* device supports FUA writes */
     611             : #define QUEUE_FLAG_DAX          19      /* device supports DAX */
     612             : #define QUEUE_FLAG_STATS        20      /* track IO start and completion times */
     613             : #define QUEUE_FLAG_POLL_STATS   21      /* collecting stats for hybrid polling */
     614             : #define QUEUE_FLAG_REGISTERED   22      /* queue has been registered to a disk */
     615             : #define QUEUE_FLAG_SCSI_PASSTHROUGH 23  /* queue supports SCSI commands */
     616             : #define QUEUE_FLAG_QUIESCED     24      /* queue has been quiesced */
     617             : #define QUEUE_FLAG_PCI_P2PDMA   25      /* device supports PCI p2p requests */
     618             : #define QUEUE_FLAG_ZONE_RESETALL 26     /* supports Zone Reset All */
     619             : #define QUEUE_FLAG_RQ_ALLOC_TIME 27     /* record rq->alloc_time_ns */
     620             : #define QUEUE_FLAG_HCTX_ACTIVE  28      /* at least one blk-mq hctx is active */
     621             : #define QUEUE_FLAG_NOWAIT       29      /* device supports NOWAIT */
     622             : 
     623             : #define QUEUE_FLAG_MQ_DEFAULT   ((1 << QUEUE_FLAG_IO_STAT) |              \
     624             :                                  (1 << QUEUE_FLAG_SAME_COMP) |            \
     625             :                                  (1 << QUEUE_FLAG_NOWAIT))
     626             : 
     627             : void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
     628             : void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
     629             : bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
     630             : 
     631             : #define blk_queue_stopped(q)    test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
     632             : #define blk_queue_dying(q)      test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
     633             : #define blk_queue_dead(q)       test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
     634             : #define blk_queue_init_done(q)  test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
     635             : #define blk_queue_nomerges(q)   test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
     636             : #define blk_queue_noxmerges(q)  \
     637             :         test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
     638             : #define blk_queue_nonrot(q)     test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
     639             : #define blk_queue_stable_writes(q) \
     640             :         test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
     641             : #define blk_queue_io_stat(q)    test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
     642             : #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
     643             : #define blk_queue_discard(q)    test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
     644             : #define blk_queue_zone_resetall(q)      \
     645             :         test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
     646             : #define blk_queue_secure_erase(q) \
     647             :         (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
     648             : #define blk_queue_dax(q)        test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
     649             : #define blk_queue_scsi_passthrough(q)   \
     650             :         test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
     651             : #define blk_queue_pci_p2pdma(q) \
     652             :         test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
     653             : #ifdef CONFIG_BLK_RQ_ALLOC_TIME
     654             : #define blk_queue_rq_alloc_time(q)      \
     655             :         test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
     656             : #else
     657             : #define blk_queue_rq_alloc_time(q)      false
     658             : #endif
     659             : 
     660             : #define blk_noretry_request(rq) \
     661             :         ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
     662             :                              REQ_FAILFAST_DRIVER))
     663             : #define blk_queue_quiesced(q)   test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
     664             : #define blk_queue_pm_only(q)    atomic_read(&(q)->pm_only)
     665             : #define blk_queue_fua(q)        test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
     666             : #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
     667             : #define blk_queue_nowait(q)     test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
     668             : 
     669             : extern void blk_set_pm_only(struct request_queue *q);
     670             : extern void blk_clear_pm_only(struct request_queue *q);
     671             : 
     672             : static inline bool blk_account_rq(struct request *rq)
     673             : {
     674             :         return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
     675             : }
     676             : 
     677             : #define list_entry_rq(ptr)      list_entry((ptr), struct request, queuelist)
     678             : 
     679             : #define rq_data_dir(rq)         (op_is_write(req_op(rq)) ? WRITE : READ)
     680             : 
     681             : #define rq_dma_dir(rq) \
     682             :         (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
     683             : 
     684             : #define dma_map_bvec(dev, bv, dir, attrs) \
     685             :         dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
     686             :         (dir), (attrs))
     687             : 
     688          18 : static inline bool queue_is_mq(struct request_queue *q)
     689             : {
     690          18 :         return q->mq_ops;
     691             : }
     692             : 
     693             : #ifdef CONFIG_PM
     694             : static inline enum rpm_status queue_rpm_status(struct request_queue *q)
     695             : {
     696             :         return q->rpm_status;
     697             : }
     698             : #else
     699             : static inline enum rpm_status queue_rpm_status(struct request_queue *q)
     700             : {
     701             :         return RPM_ACTIVE;
     702             : }
     703             : #endif
     704             : 
     705             : static inline enum blk_zoned_model
     706       20594 : blk_queue_zoned_model(struct request_queue *q)
     707             : {
     708       20594 :         if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
     709             :                 return q->limits.zoned;
     710       20594 :         return BLK_ZONED_NONE;
     711             : }
     712             : 
     713       20594 : static inline bool blk_queue_is_zoned(struct request_queue *q)
     714             : {
     715       20954 :         switch (blk_queue_zoned_model(q)) {
     716             :         case BLK_ZONED_HA:
     717             :         case BLK_ZONED_HM:
     718             :                 return true;
     719             :         default:
     720       20594 :                 return false;
     721             :         }
     722             : }
     723             : 
     724           0 : static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
     725             : {
     726           0 :         return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
     727             : }
     728             : 
     729             : #ifdef CONFIG_BLK_DEV_ZONED
     730             : static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
     731             : {
     732             :         return blk_queue_is_zoned(q) ? q->nr_zones : 0;
     733             : }
     734             : 
     735             : static inline unsigned int blk_queue_zone_no(struct request_queue *q,
     736             :                                              sector_t sector)
     737             : {
     738             :         if (!blk_queue_is_zoned(q))
     739             :                 return 0;
     740             :         return sector >> ilog2(q->limits.chunk_sectors);
     741             : }
     742             : 
     743             : static inline bool blk_queue_zone_is_seq(struct request_queue *q,
     744             :                                          sector_t sector)
     745             : {
     746             :         if (!blk_queue_is_zoned(q))
     747             :                 return false;
     748             :         if (!q->conv_zones_bitmap)
     749             :                 return true;
     750             :         return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
     751             : }
     752             : 
     753             : static inline void blk_queue_max_open_zones(struct request_queue *q,
     754             :                 unsigned int max_open_zones)
     755             : {
     756             :         q->max_open_zones = max_open_zones;
     757             : }
     758             : 
     759             : static inline unsigned int queue_max_open_zones(const struct request_queue *q)
     760             : {
     761             :         return q->max_open_zones;
     762             : }
     763             : 
     764             : static inline void blk_queue_max_active_zones(struct request_queue *q,
     765             :                 unsigned int max_active_zones)
     766             : {
     767             :         q->max_active_zones = max_active_zones;
     768             : }
     769             : 
     770             : static inline unsigned int queue_max_active_zones(const struct request_queue *q)
     771             : {
     772             :         return q->max_active_zones;
     773             : }
     774             : #else /* CONFIG_BLK_DEV_ZONED */
     775           0 : static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
     776             : {
     777           0 :         return 0;
     778             : }
     779             : static inline bool blk_queue_zone_is_seq(struct request_queue *q,
     780             :                                          sector_t sector)
     781             : {
     782             :         return false;
     783             : }
     784             : static inline unsigned int blk_queue_zone_no(struct request_queue *q,
     785             :                                              sector_t sector)
     786             : {
     787             :         return 0;
     788             : }
     789           0 : static inline unsigned int queue_max_open_zones(const struct request_queue *q)
     790             : {
     791           0 :         return 0;
     792             : }
     793           0 : static inline unsigned int queue_max_active_zones(const struct request_queue *q)
     794             : {
     795           0 :         return 0;
     796             : }
     797             : #endif /* CONFIG_BLK_DEV_ZONED */
     798             : 
     799        3254 : static inline bool rq_is_sync(struct request *rq)
     800             : {
     801        3947 :         return op_is_sync(rq->cmd_flags);
     802             : }
     803             : 
     804        9293 : static inline bool rq_mergeable(struct request *rq)
     805             : {
     806       18586 :         if (blk_rq_is_passthrough(rq))
     807             :                 return false;
     808             : 
     809        9293 :         if (req_op(rq) == REQ_OP_FLUSH)
     810             :                 return false;
     811             : 
     812        9293 :         if (req_op(rq) == REQ_OP_WRITE_ZEROES)
     813             :                 return false;
     814             : 
     815        9293 :         if (req_op(rq) == REQ_OP_ZONE_APPEND)
     816             :                 return false;
     817             : 
     818        9293 :         if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
     819             :                 return false;
     820        9293 :         if (rq->rq_flags & RQF_NOMERGE_FLAGS)
     821           0 :                 return false;
     822             : 
     823             :         return true;
     824             : }
     825             : 
     826           0 : static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
     827             : {
     828           0 :         if (bio_page(a) == bio_page(b) &&
     829           0 :             bio_offset(a) == bio_offset(b))
     830           0 :                 return true;
     831             : 
     832             :         return false;
     833             : }
     834             : 
     835             : static inline unsigned int blk_queue_depth(struct request_queue *q)
     836             : {
     837             :         if (q->queue_depth)
     838             :                 return q->queue_depth;
     839             : 
     840             :         return q->nr_requests;
     841             : }
     842             : 
     843             : extern unsigned long blk_max_low_pfn, blk_max_pfn;
     844             : 
     845             : /*
     846             :  * standard bounce addresses:
     847             :  *
     848             :  * BLK_BOUNCE_HIGH      : bounce all highmem pages
     849             :  * BLK_BOUNCE_ANY       : don't bounce anything
     850             :  * BLK_BOUNCE_ISA       : bounce pages above ISA DMA boundary
     851             :  */
     852             : 
     853             : #if BITS_PER_LONG == 32
     854             : #define BLK_BOUNCE_HIGH         ((u64)blk_max_low_pfn << PAGE_SHIFT)
     855             : #else
     856             : #define BLK_BOUNCE_HIGH         -1ULL
     857             : #endif
     858             : #define BLK_BOUNCE_ANY          (-1ULL)
     859             : #define BLK_BOUNCE_ISA          (DMA_BIT_MASK(24))
     860             : 
     861             : /*
     862             :  * default timeout for SG_IO if none specified
     863             :  */
     864             : #define BLK_DEFAULT_SG_TIMEOUT  (60 * HZ)
     865             : #define BLK_MIN_SG_TIMEOUT      (7 * HZ)
     866             : 
     867             : struct rq_map_data {
     868             :         struct page **pages;
     869             :         int page_order;
     870             :         int nr_entries;
     871             :         unsigned long offset;
     872             :         int null_mapped;
     873             :         int from_user;
     874             : };
     875             : 
     876             : struct req_iterator {
     877             :         struct bvec_iter iter;
     878             :         struct bio *bio;
     879             : };
     880             : 
     881             : /* This should not be used directly - use rq_for_each_segment */
     882             : #define for_each_bio(_bio)              \
     883             :         for (; _bio; _bio = _bio->bi_next)
     884             : #define __rq_for_each_bio(_bio, rq)     \
     885             :         if ((rq->bio))                       \
     886             :                 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
     887             : 
     888             : #define rq_for_each_segment(bvl, _rq, _iter)                    \
     889             :         __rq_for_each_bio(_iter.bio, _rq)                       \
     890             :                 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
     891             : 
     892             : #define rq_for_each_bvec(bvl, _rq, _iter)                       \
     893             :         __rq_for_each_bio(_iter.bio, _rq)                       \
     894             :                 bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
     895             : 
     896             : #define rq_iter_last(bvec, _iter)                               \
     897             :                 (_iter.bio->bi_next == NULL &&                       \
     898             :                  bio_iter_last(bvec, _iter.iter))
     899             : 
     900             : #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
     901             : # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
     902             : #endif
     903             : #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
     904             : extern void rq_flush_dcache_pages(struct request *rq);
     905             : #else
     906             : static inline void rq_flush_dcache_pages(struct request *rq)
     907             : {
     908             : }
     909             : #endif
     910             : 
     911             : extern int blk_register_queue(struct gendisk *disk);
     912             : extern void blk_unregister_queue(struct gendisk *disk);
     913             : blk_qc_t submit_bio_noacct(struct bio *bio);
     914             : extern void blk_rq_init(struct request_queue *q, struct request *rq);
     915             : extern void blk_put_request(struct request *);
     916             : extern struct request *blk_get_request(struct request_queue *, unsigned int op,
     917             :                                        blk_mq_req_flags_t flags);
     918             : extern int blk_lld_busy(struct request_queue *q);
     919             : extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
     920             :                              struct bio_set *bs, gfp_t gfp_mask,
     921             :                              int (*bio_ctr)(struct bio *, struct bio *, void *),
     922             :                              void *data);
     923             : extern void blk_rq_unprep_clone(struct request *rq);
     924             : extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
     925             :                                      struct request *rq);
     926             : extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
     927             : extern void blk_queue_split(struct bio **);
     928             : extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
     929             : extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
     930             :                               unsigned int, void __user *);
     931             : extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
     932             :                           unsigned int, void __user *);
     933             : extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
     934             :                          struct scsi_ioctl_command __user *);
     935             : extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
     936             : extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
     937             : 
     938             : extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
     939             : extern void blk_queue_exit(struct request_queue *q);
     940             : extern void blk_sync_queue(struct request_queue *q);
     941             : extern int blk_rq_map_user(struct request_queue *, struct request *,
     942             :                            struct rq_map_data *, void __user *, unsigned long,
     943             :                            gfp_t);
     944             : extern int blk_rq_unmap_user(struct bio *);
     945             : extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
     946             : extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
     947             :                                struct rq_map_data *, const struct iov_iter *,
     948             :                                gfp_t);
     949             : extern void blk_execute_rq(struct gendisk *, struct request *, int);
     950             : extern void blk_execute_rq_nowait(struct gendisk *,
     951             :                                   struct request *, int, rq_end_io_fn *);
     952             : 
     953             : /* Helper to convert REQ_OP_XXX to its string format XXX */
     954             : extern const char *blk_op_str(unsigned int op);
     955             : 
     956             : int blk_status_to_errno(blk_status_t status);
     957             : blk_status_t errno_to_blk_status(int errno);
     958             : 
     959             : int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
     960             : 
     961        4158 : static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
     962             : {
     963        4130 :         return bdev->bd_disk->queue;      /* this is never NULL */
     964             : }
     965             : 
     966             : /*
     967             :  * The basic unit of block I/O is a sector. It is used in a number of contexts
     968             :  * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
     969             :  * bytes. Variables of type sector_t represent an offset or size that is a
     970             :  * multiple of 512 bytes. Hence these two constants.
     971             :  */
     972             : #ifndef SECTOR_SHIFT
     973             : #define SECTOR_SHIFT 9
     974             : #endif
     975             : #ifndef SECTOR_SIZE
     976             : #define SECTOR_SIZE (1 << SECTOR_SHIFT)
     977             : #endif
     978             : 
     979             : /*
     980             :  * blk_rq_pos()                 : the current sector
     981             :  * blk_rq_bytes()               : bytes left in the entire request
     982             :  * blk_rq_cur_bytes()           : bytes left in the current segment
     983             :  * blk_rq_err_bytes()           : bytes left till the next error boundary
     984             :  * blk_rq_sectors()             : sectors left in the entire request
     985             :  * blk_rq_cur_sectors()         : sectors left in the current segment
     986             :  * blk_rq_stats_sectors()       : sectors of the entire request used for stats
     987             :  */
     988       21826 : static inline sector_t blk_rq_pos(const struct request *rq)
     989             : {
     990        7146 :         return rq->__sector;
     991             : }
     992             : 
     993       19054 : static inline unsigned int blk_rq_bytes(const struct request *rq)
     994             : {
     995        4275 :         return rq->__data_len;
     996             : }
     997             : 
     998           0 : static inline int blk_rq_cur_bytes(const struct request *rq)
     999             : {
    1000           0 :         return rq->bio ? bio_cur_bytes(rq->bio) : 0;
    1001             : }
    1002             : 
    1003             : extern unsigned int blk_rq_err_bytes(const struct request *rq);
    1004             : 
    1005       14911 : static inline unsigned int blk_rq_sectors(const struct request *rq)
    1006             : {
    1007       14911 :         return blk_rq_bytes(rq) >> SECTOR_SHIFT;
    1008             : }
    1009             : 
    1010           0 : static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
    1011             : {
    1012           0 :         return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
    1013             : }
    1014             : 
    1015           0 : static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
    1016             : {
    1017           0 :         return rq->stats_sectors;
    1018             : }
    1019             : 
    1020             : #ifdef CONFIG_BLK_DEV_ZONED
    1021             : 
    1022             : /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
    1023             : const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
    1024             : 
    1025             : static inline unsigned int blk_rq_zone_no(struct request *rq)
    1026             : {
    1027             :         return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
    1028             : }
    1029             : 
    1030             : static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
    1031             : {
    1032             :         return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
    1033             : }
    1034             : #endif /* CONFIG_BLK_DEV_ZONED */
    1035             : 
    1036             : /*
    1037             :  * Some commands like WRITE SAME have a payload or data transfer size which
    1038             :  * is different from the size of the request.  Any driver that supports such
    1039             :  * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
    1040             :  * calculate the data transfer size.
    1041             :  */
    1042           0 : static inline unsigned int blk_rq_payload_bytes(struct request *rq)
    1043             : {
    1044           0 :         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
    1045           0 :                 return rq->special_vec.bv_len;
    1046           0 :         return blk_rq_bytes(rq);
    1047             : }
    1048             : 
    1049             : /*
    1050             :  * Return the first full biovec in the request.  The caller needs to check that
    1051             :  * there are any bvecs before calling this helper.
    1052             :  */
    1053             : static inline struct bio_vec req_bvec(struct request *rq)
    1054             : {
    1055             :         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
    1056             :                 return rq->special_vec;
    1057             :         return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
    1058             : }
    1059             : 
    1060        5486 : static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
    1061             :                                                      int op)
    1062             : {
    1063        5486 :         if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
    1064           0 :                 return min(q->limits.max_discard_sectors,
    1065             :                            UINT_MAX >> SECTOR_SHIFT);
    1066             : 
    1067        5486 :         if (unlikely(op == REQ_OP_WRITE_SAME))
    1068           0 :                 return q->limits.max_write_same_sectors;
    1069             : 
    1070        5486 :         if (unlikely(op == REQ_OP_WRITE_ZEROES))
    1071           0 :                 return q->limits.max_write_zeroes_sectors;
    1072             : 
    1073        5486 :         return q->limits.max_sectors;
    1074             : }
    1075             : 
    1076             : /*
    1077             :  * Return maximum size of a request at given offset. Only valid for
    1078             :  * file system requests.
    1079             :  */
    1080        1464 : static inline unsigned int blk_max_size_offset(struct request_queue *q,
    1081             :                                                sector_t offset,
    1082             :                                                unsigned int chunk_sectors)
    1083             : {
    1084        1464 :         if (!chunk_sectors) {
    1085        1464 :                 if (q->limits.chunk_sectors)
    1086             :                         chunk_sectors = q->limits.chunk_sectors;
    1087             :                 else
    1088        1464 :                         return q->limits.max_sectors;
    1089             :         }
    1090             : 
    1091           0 :         if (likely(is_power_of_2(chunk_sectors)))
    1092           0 :                 chunk_sectors -= offset & (chunk_sectors - 1);
    1093             :         else
    1094           0 :                 chunk_sectors -= sector_div(offset, chunk_sectors);
    1095             : 
    1096           0 :         return min(q->limits.max_sectors, chunk_sectors);
    1097             : }
    1098             : 
    1099        5486 : static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
    1100             :                                                   sector_t offset)
    1101             : {
    1102        5486 :         struct request_queue *q = rq->q;
    1103             : 
    1104       10972 :         if (blk_rq_is_passthrough(rq))
    1105           0 :                 return q->limits.max_hw_sectors;
    1106             : 
    1107        5486 :         if (!q->limits.chunk_sectors ||
    1108           0 :             req_op(rq) == REQ_OP_DISCARD ||
    1109             :             req_op(rq) == REQ_OP_SECURE_ERASE)
    1110        5486 :                 return blk_queue_get_max_sectors(q, req_op(rq));
    1111             : 
    1112           0 :         return min(blk_max_size_offset(q, offset, 0),
    1113             :                         blk_queue_get_max_sectors(q, req_op(rq)));
    1114             : }
    1115             : 
    1116           0 : static inline unsigned int blk_rq_count_bios(struct request *rq)
    1117             : {
    1118           0 :         unsigned int nr_bios = 0;
    1119           0 :         struct bio *bio;
    1120             : 
    1121           0 :         __rq_for_each_bio(bio, rq)
    1122           0 :                 nr_bios++;
    1123             : 
    1124           0 :         return nr_bios;
    1125             : }
    1126             : 
    1127             : void blk_steal_bios(struct bio_list *list, struct request *rq);
    1128             : 
    1129             : /*
    1130             :  * Request completion related functions.
    1131             :  *
    1132             :  * blk_update_request() completes given number of bytes and updates
    1133             :  * the request without completing it.
    1134             :  */
    1135             : extern bool blk_update_request(struct request *rq, blk_status_t error,
    1136             :                                unsigned int nr_bytes);
    1137             : 
    1138             : extern void blk_abort_request(struct request *);
    1139             : 
    1140             : /*
    1141             :  * Access functions for manipulating queue properties
    1142             :  */
    1143             : extern void blk_cleanup_queue(struct request_queue *);
    1144             : extern void blk_queue_bounce_limit(struct request_queue *, u64);
    1145             : extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
    1146             : extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
    1147             : extern void blk_queue_max_segments(struct request_queue *, unsigned short);
    1148             : extern void blk_queue_max_discard_segments(struct request_queue *,
    1149             :                 unsigned short);
    1150             : extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
    1151             : extern void blk_queue_max_discard_sectors(struct request_queue *q,
    1152             :                 unsigned int max_discard_sectors);
    1153             : extern void blk_queue_max_write_same_sectors(struct request_queue *q,
    1154             :                 unsigned int max_write_same_sectors);
    1155             : extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
    1156             :                 unsigned int max_write_same_sectors);
    1157             : extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
    1158             : extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
    1159             :                 unsigned int max_zone_append_sectors);
    1160             : extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
    1161             : void blk_queue_zone_write_granularity(struct request_queue *q,
    1162             :                                       unsigned int size);
    1163             : extern void blk_queue_alignment_offset(struct request_queue *q,
    1164             :                                        unsigned int alignment);
    1165             : void blk_queue_update_readahead(struct request_queue *q);
    1166             : extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
    1167             : extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
    1168             : extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
    1169             : extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
    1170             : extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
    1171             : extern void blk_set_default_limits(struct queue_limits *lim);
    1172             : extern void blk_set_stacking_limits(struct queue_limits *lim);
    1173             : extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
    1174             :                             sector_t offset);
    1175             : extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
    1176             :                               sector_t offset);
    1177             : extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
    1178             : extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
    1179             : extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
    1180             : extern void blk_queue_dma_alignment(struct request_queue *, int);
    1181             : extern void blk_queue_update_dma_alignment(struct request_queue *, int);
    1182             : extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
    1183             : extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
    1184             : extern void blk_queue_required_elevator_features(struct request_queue *q,
    1185             :                                                  unsigned int features);
    1186             : extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
    1187             :                                               struct device *dev);
    1188             : 
    1189             : /*
    1190             :  * Number of physical segments as sent to the device.
    1191             :  *
    1192             :  * Normally this is the number of discontiguous data segments sent by the
    1193             :  * submitter.  But for data-less command like discard we might have no
    1194             :  * actual data segments submitted, but the driver might have to add it's
    1195             :  * own special payload.  In that case we still return 1 here so that this
    1196             :  * special payload will be mapped.
    1197             :  */
    1198        3372 : static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
    1199             : {
    1200        3372 :         if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
    1201             :                 return 1;
    1202        3372 :         return rq->nr_phys_segments;
    1203             : }
    1204             : 
    1205             : /*
    1206             :  * Number of discard segments (or ranges) the driver needs to fill in.
    1207             :  * Each discard bio merged into a request is counted as one segment.
    1208             :  */
    1209           0 : static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
    1210             : {
    1211           0 :         return max_t(unsigned short, rq->nr_phys_segments, 1);
    1212             : }
    1213             : 
    1214             : int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
    1215             :                 struct scatterlist *sglist, struct scatterlist **last_sg);
    1216        3434 : static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
    1217             :                 struct scatterlist *sglist)
    1218             : {
    1219        3434 :         struct scatterlist *last_sg = NULL;
    1220             : 
    1221        3434 :         return __blk_rq_map_sg(q, rq, sglist, &last_sg);
    1222             : }
    1223             : extern void blk_dump_rq_flags(struct request *, char *);
    1224             : 
    1225             : bool __must_check blk_get_queue(struct request_queue *);
    1226             : struct request_queue *blk_alloc_queue(int node_id);
    1227             : extern void blk_put_queue(struct request_queue *);
    1228             : extern void blk_set_queue_dying(struct request_queue *);
    1229             : 
    1230             : #ifdef CONFIG_BLOCK
    1231             : /*
    1232             :  * blk_plug permits building a queue of related requests by holding the I/O
    1233             :  * fragments for a short period. This allows merging of sequential requests
    1234             :  * into single larger request. As the requests are moved from a per-task list to
    1235             :  * the device's request_queue in a batch, this results in improved scalability
    1236             :  * as the lock contention for request_queue lock is reduced.
    1237             :  *
    1238             :  * It is ok not to disable preemption when adding the request to the plug list
    1239             :  * or when attempting a merge, because blk_schedule_flush_list() will only flush
    1240             :  * the plug list when the task sleeps by itself. For details, please see
    1241             :  * schedule() where blk_schedule_flush_plug() is called.
    1242             :  */
    1243             : struct blk_plug {
    1244             :         struct list_head mq_list; /* blk-mq requests */
    1245             :         struct list_head cb_list; /* md requires an unplug callback */
    1246             :         unsigned short rq_count;
    1247             :         bool multiple_queues;
    1248             :         bool nowait;
    1249             : };
    1250             : #define BLK_MAX_REQUEST_COUNT 16
    1251             : #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
    1252             : 
    1253             : struct blk_plug_cb;
    1254             : typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
    1255             : struct blk_plug_cb {
    1256             :         struct list_head list;
    1257             :         blk_plug_cb_fn callback;
    1258             :         void *data;
    1259             : };
    1260             : extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
    1261             :                                              void *data, int size);
    1262             : extern void blk_start_plug(struct blk_plug *);
    1263             : extern void blk_finish_plug(struct blk_plug *);
    1264             : extern void blk_flush_plug_list(struct blk_plug *, bool);
    1265             : 
    1266           2 : static inline void blk_flush_plug(struct task_struct *tsk)
    1267             : {
    1268           2 :         struct blk_plug *plug = tsk->plug;
    1269             : 
    1270           2 :         if (plug)
    1271           2 :                 blk_flush_plug_list(plug, false);
    1272           2 : }
    1273             : 
    1274        1875 : static inline void blk_schedule_flush_plug(struct task_struct *tsk)
    1275             : {
    1276        1875 :         struct blk_plug *plug = tsk->plug;
    1277             : 
    1278        1875 :         if (plug)
    1279          14 :                 blk_flush_plug_list(plug, true);
    1280        1875 : }
    1281             : 
    1282       13944 : static inline bool blk_needs_flush_plug(struct task_struct *tsk)
    1283             : {
    1284       13944 :         struct blk_plug *plug = tsk->plug;
    1285             : 
    1286       13958 :         return plug &&
    1287          14 :                  (!list_empty(&plug->mq_list) ||
    1288          14 :                  !list_empty(&plug->cb_list));
    1289             : }
    1290             : 
    1291             : int blkdev_issue_flush(struct block_device *bdev);
    1292             : long nr_blockdev_pages(void);
    1293             : #else /* CONFIG_BLOCK */
    1294             : struct blk_plug {
    1295             : };
    1296             : 
    1297             : static inline void blk_start_plug(struct blk_plug *plug)
    1298             : {
    1299             : }
    1300             : 
    1301             : static inline void blk_finish_plug(struct blk_plug *plug)
    1302             : {
    1303             : }
    1304             : 
    1305             : static inline void blk_flush_plug(struct task_struct *task)
    1306             : {
    1307             : }
    1308             : 
    1309             : static inline void blk_schedule_flush_plug(struct task_struct *task)
    1310             : {
    1311             : }
    1312             : 
    1313             : 
    1314             : static inline bool blk_needs_flush_plug(struct task_struct *tsk)
    1315             : {
    1316             :         return false;
    1317             : }
    1318             : 
    1319             : static inline int blkdev_issue_flush(struct block_device *bdev)
    1320             : {
    1321             :         return 0;
    1322             : }
    1323             : 
    1324             : static inline long nr_blockdev_pages(void)
    1325             : {
    1326             :         return 0;
    1327             : }
    1328             : #endif /* CONFIG_BLOCK */
    1329             : 
    1330             : extern void blk_io_schedule(void);
    1331             : 
    1332             : extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
    1333             :                 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
    1334             : 
    1335             : #define BLKDEV_DISCARD_SECURE   (1 << 0)  /* issue a secure erase */
    1336             : 
    1337             : extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
    1338             :                 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
    1339             : extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
    1340             :                 sector_t nr_sects, gfp_t gfp_mask, int flags,
    1341             :                 struct bio **biop);
    1342             : 
    1343             : #define BLKDEV_ZERO_NOUNMAP     (1 << 0)  /* do not free blocks */
    1344             : #define BLKDEV_ZERO_NOFALLBACK  (1 << 1)  /* don't write explicit zeroes */
    1345             : 
    1346             : extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
    1347             :                 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
    1348             :                 unsigned flags);
    1349             : extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
    1350             :                 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
    1351             : 
    1352           0 : static inline int sb_issue_discard(struct super_block *sb, sector_t block,
    1353             :                 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
    1354             : {
    1355           0 :         return blkdev_issue_discard(sb->s_bdev,
    1356             :                                     block << (sb->s_blocksize_bits -
    1357             :                                               SECTOR_SHIFT),
    1358           0 :                                     nr_blocks << (sb->s_blocksize_bits -
    1359             :                                                   SECTOR_SHIFT),
    1360             :                                     gfp_mask, flags);
    1361             : }
    1362           0 : static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
    1363             :                 sector_t nr_blocks, gfp_t gfp_mask)
    1364             : {
    1365           0 :         return blkdev_issue_zeroout(sb->s_bdev,
    1366             :                                     block << (sb->s_blocksize_bits -
    1367             :                                               SECTOR_SHIFT),
    1368           0 :                                     nr_blocks << (sb->s_blocksize_bits -
    1369             :                                                   SECTOR_SHIFT),
    1370             :                                     gfp_mask, 0);
    1371             : }
    1372             : 
    1373             : extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
    1374             : 
    1375          75 : static inline bool bdev_is_partition(struct block_device *bdev)
    1376             : {
    1377          65 :         return bdev->bd_partno;
    1378             : }
    1379             : 
    1380             : enum blk_default_limits {
    1381             :         BLK_MAX_SEGMENTS        = 128,
    1382             :         BLK_SAFE_MAX_SECTORS    = 255,
    1383             :         BLK_DEF_MAX_SECTORS     = 2560,
    1384             :         BLK_MAX_SEGMENT_SIZE    = 65536,
    1385             :         BLK_SEG_BOUNDARY_MASK   = 0xFFFFFFFFUL,
    1386             : };
    1387             : 
    1388        8145 : static inline unsigned long queue_segment_boundary(const struct request_queue *q)
    1389             : {
    1390        8145 :         return q->limits.seg_boundary_mask;
    1391             : }
    1392             : 
    1393       10739 : static inline unsigned long queue_virt_boundary(const struct request_queue *q)
    1394             : {
    1395       10739 :         return q->limits.virt_boundary_mask;
    1396             : }
    1397             : 
    1398           9 : static inline unsigned int queue_max_sectors(const struct request_queue *q)
    1399             : {
    1400           9 :         return q->limits.max_sectors;
    1401             : }
    1402             : 
    1403           4 : static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
    1404             : {
    1405           4 :         return q->limits.max_hw_sectors;
    1406             : }
    1407             : 
    1408        6952 : static inline unsigned short queue_max_segments(const struct request_queue *q)
    1409             : {
    1410        6952 :         return q->limits.max_segments;
    1411             : }
    1412             : 
    1413           0 : static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
    1414             : {
    1415           0 :         return q->limits.max_discard_segments;
    1416             : }
    1417             : 
    1418        8145 : static inline unsigned int queue_max_segment_size(const struct request_queue *q)
    1419             : {
    1420        8145 :         return q->limits.max_segment_size;
    1421             : }
    1422             : 
    1423           0 : static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
    1424             : {
    1425             : 
    1426           0 :         const struct queue_limits *l = &q->limits;
    1427             : 
    1428           0 :         return min(l->max_zone_append_sectors, l->max_sectors);
    1429             : }
    1430             : 
    1431        5627 : static inline unsigned queue_logical_block_size(const struct request_queue *q)
    1432             : {
    1433        5627 :         int retval = 512;
    1434             : 
    1435        5627 :         if (q && q->limits.logical_block_size)
    1436        5627 :                 retval = q->limits.logical_block_size;
    1437             : 
    1438        5627 :         return retval;
    1439             : }
    1440             : 
    1441        4158 : static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
    1442             : {
    1443        8316 :         return queue_logical_block_size(bdev_get_queue(bdev));
    1444             : }
    1445             : 
    1446        1464 : static inline unsigned int queue_physical_block_size(const struct request_queue *q)
    1447             : {
    1448        1464 :         return q->limits.physical_block_size;
    1449             : }
    1450             : 
    1451           0 : static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
    1452             : {
    1453           0 :         return queue_physical_block_size(bdev_get_queue(bdev));
    1454             : }
    1455             : 
    1456           0 : static inline unsigned int queue_io_min(const struct request_queue *q)
    1457             : {
    1458           0 :         return q->limits.io_min;
    1459             : }
    1460             : 
    1461           0 : static inline int bdev_io_min(struct block_device *bdev)
    1462             : {
    1463           0 :         return queue_io_min(bdev_get_queue(bdev));
    1464             : }
    1465             : 
    1466           9 : static inline unsigned int queue_io_opt(const struct request_queue *q)
    1467             : {
    1468           9 :         return q->limits.io_opt;
    1469             : }
    1470             : 
    1471           0 : static inline int bdev_io_opt(struct block_device *bdev)
    1472             : {
    1473           0 :         return queue_io_opt(bdev_get_queue(bdev));
    1474             : }
    1475             : 
    1476             : static inline unsigned int
    1477           0 : queue_zone_write_granularity(const struct request_queue *q)
    1478             : {
    1479           0 :         return q->limits.zone_write_granularity;
    1480             : }
    1481             : 
    1482             : static inline unsigned int
    1483             : bdev_zone_write_granularity(struct block_device *bdev)
    1484             : {
    1485             :         return queue_zone_write_granularity(bdev_get_queue(bdev));
    1486             : }
    1487             : 
    1488           0 : static inline int queue_alignment_offset(const struct request_queue *q)
    1489             : {
    1490           0 :         if (q->limits.misaligned)
    1491             :                 return -1;
    1492             : 
    1493           0 :         return q->limits.alignment_offset;
    1494             : }
    1495             : 
    1496           0 : static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
    1497             : {
    1498           0 :         unsigned int granularity = max(lim->physical_block_size, lim->io_min);
    1499           0 :         unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
    1500             :                 << SECTOR_SHIFT;
    1501             : 
    1502           0 :         return (granularity + lim->alignment_offset - alignment) % granularity;
    1503             : }
    1504             : 
    1505           0 : static inline int bdev_alignment_offset(struct block_device *bdev)
    1506             : {
    1507           0 :         struct request_queue *q = bdev_get_queue(bdev);
    1508             : 
    1509           0 :         if (q->limits.misaligned)
    1510             :                 return -1;
    1511           0 :         if (bdev_is_partition(bdev))
    1512           0 :                 return queue_limit_alignment_offset(&q->limits,
    1513             :                                 bdev->bd_start_sect);
    1514           0 :         return q->limits.alignment_offset;
    1515             : }
    1516             : 
    1517           0 : static inline int queue_discard_alignment(const struct request_queue *q)
    1518             : {
    1519           0 :         if (q->limits.discard_misaligned)
    1520             :                 return -1;
    1521             : 
    1522           0 :         return q->limits.discard_alignment;
    1523             : }
    1524             : 
    1525           0 : static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
    1526             : {
    1527           0 :         unsigned int alignment, granularity, offset;
    1528             : 
    1529           0 :         if (!lim->max_discard_sectors)
    1530             :                 return 0;
    1531             : 
    1532             :         /* Why are these in bytes, not sectors? */
    1533           0 :         alignment = lim->discard_alignment >> SECTOR_SHIFT;
    1534           0 :         granularity = lim->discard_granularity >> SECTOR_SHIFT;
    1535           0 :         if (!granularity)
    1536             :                 return 0;
    1537             : 
    1538             :         /* Offset of the partition start in 'granularity' sectors */
    1539           0 :         offset = sector_div(sector, granularity);
    1540             : 
    1541             :         /* And why do we do this modulus *again* in blkdev_issue_discard()? */
    1542           0 :         offset = (granularity + alignment - offset) % granularity;
    1543             : 
    1544             :         /* Turn it back into bytes, gaah */
    1545           0 :         return offset << SECTOR_SHIFT;
    1546             : }
    1547             : 
    1548             : static inline int bdev_discard_alignment(struct block_device *bdev)
    1549             : {
    1550             :         struct request_queue *q = bdev_get_queue(bdev);
    1551             : 
    1552             :         if (bdev_is_partition(bdev))
    1553             :                 return queue_limit_discard_alignment(&q->limits,
    1554             :                                 bdev->bd_start_sect);
    1555             :         return q->limits.discard_alignment;
    1556             : }
    1557             : 
    1558           0 : static inline unsigned int bdev_write_same(struct block_device *bdev)
    1559             : {
    1560           0 :         struct request_queue *q = bdev_get_queue(bdev);
    1561             : 
    1562           0 :         if (q)
    1563           0 :                 return q->limits.max_write_same_sectors;
    1564             : 
    1565             :         return 0;
    1566             : }
    1567             : 
    1568           0 : static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
    1569             : {
    1570           0 :         struct request_queue *q = bdev_get_queue(bdev);
    1571             : 
    1572           0 :         if (q)
    1573           0 :                 return q->limits.max_write_zeroes_sectors;
    1574             : 
    1575             :         return 0;
    1576             : }
    1577             : 
    1578           0 : static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
    1579             : {
    1580           0 :         struct request_queue *q = bdev_get_queue(bdev);
    1581             : 
    1582           0 :         if (q)
    1583             :                 return blk_queue_zoned_model(q);
    1584             : 
    1585             :         return BLK_ZONED_NONE;
    1586             : }
    1587             : 
    1588             : static inline bool bdev_is_zoned(struct block_device *bdev)
    1589             : {
    1590             :         struct request_queue *q = bdev_get_queue(bdev);
    1591             : 
    1592             :         if (q)
    1593             :                 return blk_queue_is_zoned(q);
    1594             : 
    1595             :         return false;
    1596             : }
    1597             : 
    1598           0 : static inline sector_t bdev_zone_sectors(struct block_device *bdev)
    1599             : {
    1600           0 :         struct request_queue *q = bdev_get_queue(bdev);
    1601             : 
    1602           0 :         if (q)
    1603             :                 return blk_queue_zone_sectors(q);
    1604             :         return 0;
    1605             : }
    1606             : 
    1607             : static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
    1608             : {
    1609             :         struct request_queue *q = bdev_get_queue(bdev);
    1610             : 
    1611             :         if (q)
    1612             :                 return queue_max_open_zones(q);
    1613             :         return 0;
    1614             : }
    1615             : 
    1616             : static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
    1617             : {
    1618             :         struct request_queue *q = bdev_get_queue(bdev);
    1619             : 
    1620             :         if (q)
    1621             :                 return queue_max_active_zones(q);
    1622             :         return 0;
    1623             : }
    1624             : 
    1625           2 : static inline int queue_dma_alignment(const struct request_queue *q)
    1626             : {
    1627           2 :         return q ? q->dma_alignment : 511;
    1628             : }
    1629             : 
    1630           2 : static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
    1631             :                                  unsigned int len)
    1632             : {
    1633           2 :         unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
    1634           2 :         return !(addr & alignment) && !(len & alignment);
    1635             : }
    1636             : 
    1637             : /* assumes size > 256 */
    1638           3 : static inline unsigned int blksize_bits(unsigned int size)
    1639             : {
    1640          10 :         unsigned int bits = 8;
    1641       16604 :         do {
    1642       16604 :                 bits++;
    1643       16604 :                 size >>= 1;
    1644       16604 :         } while (size > 256);
    1645        4142 :         return bits;
    1646             : }
    1647             : 
    1648           2 : static inline unsigned int block_size(struct block_device *bdev)
    1649             : {
    1650           2 :         return 1 << bdev->bd_inode->i_blkbits;
    1651             : }
    1652             : 
    1653             : int kblockd_schedule_work(struct work_struct *work);
    1654             : int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
    1655             : 
    1656             : #define MODULE_ALIAS_BLOCKDEV(major,minor) \
    1657             :         MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
    1658             : #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
    1659             :         MODULE_ALIAS("block-major-" __stringify(major) "-*")
    1660             : 
    1661             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
    1662             : 
    1663             : enum blk_integrity_flags {
    1664             :         BLK_INTEGRITY_VERIFY            = 1 << 0,
    1665             :         BLK_INTEGRITY_GENERATE          = 1 << 1,
    1666             :         BLK_INTEGRITY_DEVICE_CAPABLE    = 1 << 2,
    1667             :         BLK_INTEGRITY_IP_CHECKSUM       = 1 << 3,
    1668             : };
    1669             : 
    1670             : struct blk_integrity_iter {
    1671             :         void                    *prot_buf;
    1672             :         void                    *data_buf;
    1673             :         sector_t                seed;
    1674             :         unsigned int            data_size;
    1675             :         unsigned short          interval;
    1676             :         const char              *disk_name;
    1677             : };
    1678             : 
    1679             : typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
    1680             : typedef void (integrity_prepare_fn) (struct request *);
    1681             : typedef void (integrity_complete_fn) (struct request *, unsigned int);
    1682             : 
    1683             : struct blk_integrity_profile {
    1684             :         integrity_processing_fn         *generate_fn;
    1685             :         integrity_processing_fn         *verify_fn;
    1686             :         integrity_prepare_fn            *prepare_fn;
    1687             :         integrity_complete_fn           *complete_fn;
    1688             :         const char                      *name;
    1689             : };
    1690             : 
    1691             : extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
    1692             : extern void blk_integrity_unregister(struct gendisk *);
    1693             : extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
    1694             : extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
    1695             :                                    struct scatterlist *);
    1696             : extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
    1697             : 
    1698             : static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
    1699             : {
    1700             :         struct blk_integrity *bi = &disk->queue->integrity;
    1701             : 
    1702             :         if (!bi->profile)
    1703             :                 return NULL;
    1704             : 
    1705             :         return bi;
    1706             : }
    1707             : 
    1708             : static inline
    1709             : struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
    1710             : {
    1711             :         return blk_get_integrity(bdev->bd_disk);
    1712             : }
    1713             : 
    1714             : static inline bool
    1715             : blk_integrity_queue_supports_integrity(struct request_queue *q)
    1716             : {
    1717             :         return q->integrity.profile;
    1718             : }
    1719             : 
    1720             : static inline bool blk_integrity_rq(struct request *rq)
    1721             : {
    1722             :         return rq->cmd_flags & REQ_INTEGRITY;
    1723             : }
    1724             : 
    1725             : static inline void blk_queue_max_integrity_segments(struct request_queue *q,
    1726             :                                                     unsigned int segs)
    1727             : {
    1728             :         q->limits.max_integrity_segments = segs;
    1729             : }
    1730             : 
    1731             : static inline unsigned short
    1732             : queue_max_integrity_segments(const struct request_queue *q)
    1733             : {
    1734             :         return q->limits.max_integrity_segments;
    1735             : }
    1736             : 
    1737             : /**
    1738             :  * bio_integrity_intervals - Return number of integrity intervals for a bio
    1739             :  * @bi:         blk_integrity profile for device
    1740             :  * @sectors:    Size of the bio in 512-byte sectors
    1741             :  *
    1742             :  * Description: The block layer calculates everything in 512 byte
    1743             :  * sectors but integrity metadata is done in terms of the data integrity
    1744             :  * interval size of the storage device.  Convert the block layer sectors
    1745             :  * to the appropriate number of integrity intervals.
    1746             :  */
    1747             : static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
    1748             :                                                    unsigned int sectors)
    1749             : {
    1750             :         return sectors >> (bi->interval_exp - 9);
    1751             : }
    1752             : 
    1753             : static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
    1754             :                                                unsigned int sectors)
    1755             : {
    1756             :         return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
    1757             : }
    1758             : 
    1759             : /*
    1760             :  * Return the first bvec that contains integrity data.  Only drivers that are
    1761             :  * limited to a single integrity segment should use this helper.
    1762             :  */
    1763             : static inline struct bio_vec *rq_integrity_vec(struct request *rq)
    1764             : {
    1765             :         if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
    1766             :                 return NULL;
    1767             :         return rq->bio->bi_integrity->bip_vec;
    1768             : }
    1769             : 
    1770             : #else /* CONFIG_BLK_DEV_INTEGRITY */
    1771             : 
    1772             : struct bio;
    1773             : struct block_device;
    1774             : struct gendisk;
    1775             : struct blk_integrity;
    1776             : 
    1777        5486 : static inline int blk_integrity_rq(struct request *rq)
    1778             : {
    1779        5486 :         return 0;
    1780             : }
    1781             : static inline int blk_rq_count_integrity_sg(struct request_queue *q,
    1782             :                                             struct bio *b)
    1783             : {
    1784             :         return 0;
    1785             : }
    1786             : static inline int blk_rq_map_integrity_sg(struct request_queue *q,
    1787             :                                           struct bio *b,
    1788             :                                           struct scatterlist *s)
    1789             : {
    1790             :         return 0;
    1791             : }
    1792           0 : static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
    1793             : {
    1794           0 :         return NULL;
    1795             : }
    1796           0 : static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
    1797             : {
    1798           0 :         return NULL;
    1799             : }
    1800             : static inline bool
    1801             : blk_integrity_queue_supports_integrity(struct request_queue *q)
    1802             : {
    1803             :         return false;
    1804             : }
    1805           0 : static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
    1806             : {
    1807           0 :         return 0;
    1808             : }
    1809           0 : static inline void blk_integrity_register(struct gendisk *d,
    1810             :                                          struct blk_integrity *b)
    1811             : {
    1812           0 : }
    1813             : static inline void blk_integrity_unregister(struct gendisk *d)
    1814             : {
    1815             : }
    1816             : static inline void blk_queue_max_integrity_segments(struct request_queue *q,
    1817             :                                                     unsigned int segs)
    1818             : {
    1819             : }
    1820             : static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
    1821             : {
    1822             :         return 0;
    1823             : }
    1824             : 
    1825             : static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
    1826             :                                                    unsigned int sectors)
    1827             : {
    1828             :         return 0;
    1829             : }
    1830             : 
    1831             : static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
    1832             :                                                unsigned int sectors)
    1833             : {
    1834             :         return 0;
    1835             : }
    1836             : 
    1837             : static inline struct bio_vec *rq_integrity_vec(struct request *rq)
    1838             : {
    1839             :         return NULL;
    1840             : }
    1841             : 
    1842             : #endif /* CONFIG_BLK_DEV_INTEGRITY */
    1843             : 
    1844             : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
    1845             : 
    1846             : bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
    1847             : 
    1848             : void blk_ksm_unregister(struct request_queue *q);
    1849             : 
    1850             : #else /* CONFIG_BLK_INLINE_ENCRYPTION */
    1851             : 
    1852             : static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
    1853             :                                     struct request_queue *q)
    1854             : {
    1855             :         return true;
    1856             : }
    1857             : 
    1858             : static inline void blk_ksm_unregister(struct request_queue *q) { }
    1859             : 
    1860             : #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
    1861             : 
    1862             : 
    1863             : struct block_device_operations {
    1864             :         blk_qc_t (*submit_bio) (struct bio *bio);
    1865             :         int (*open) (struct block_device *, fmode_t);
    1866             :         void (*release) (struct gendisk *, fmode_t);
    1867             :         int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
    1868             :         int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
    1869             :         int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
    1870             :         unsigned int (*check_events) (struct gendisk *disk,
    1871             :                                       unsigned int clearing);
    1872             :         void (*unlock_native_capacity) (struct gendisk *);
    1873             :         int (*revalidate_disk) (struct gendisk *);
    1874             :         int (*getgeo)(struct block_device *, struct hd_geometry *);
    1875             :         int (*set_read_only)(struct block_device *bdev, bool ro);
    1876             :         /* this callback is with swap_lock and sometimes page table lock held */
    1877             :         void (*swap_slot_free_notify) (struct block_device *, unsigned long);
    1878             :         int (*report_zones)(struct gendisk *, sector_t sector,
    1879             :                         unsigned int nr_zones, report_zones_cb cb, void *data);
    1880             :         char *(*devnode)(struct gendisk *disk, umode_t *mode);
    1881             :         struct module *owner;
    1882             :         const struct pr_ops *pr_ops;
    1883             : };
    1884             : 
    1885             : #ifdef CONFIG_COMPAT
    1886             : extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
    1887             :                                       unsigned int, unsigned long);
    1888             : #else
    1889             : #define blkdev_compat_ptr_ioctl NULL
    1890             : #endif
    1891             : 
    1892             : extern int bdev_read_page(struct block_device *, sector_t, struct page *);
    1893             : extern int bdev_write_page(struct block_device *, sector_t, struct page *,
    1894             :                                                 struct writeback_control *);
    1895             : 
    1896             : #ifdef CONFIG_BLK_DEV_ZONED
    1897             : bool blk_req_needs_zone_write_lock(struct request *rq);
    1898             : bool blk_req_zone_write_trylock(struct request *rq);
    1899             : void __blk_req_zone_write_lock(struct request *rq);
    1900             : void __blk_req_zone_write_unlock(struct request *rq);
    1901             : 
    1902             : static inline void blk_req_zone_write_lock(struct request *rq)
    1903             : {
    1904             :         if (blk_req_needs_zone_write_lock(rq))
    1905             :                 __blk_req_zone_write_lock(rq);
    1906             : }
    1907             : 
    1908             : static inline void blk_req_zone_write_unlock(struct request *rq)
    1909             : {
    1910             :         if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
    1911             :                 __blk_req_zone_write_unlock(rq);
    1912             : }
    1913             : 
    1914             : static inline bool blk_req_zone_is_write_locked(struct request *rq)
    1915             : {
    1916             :         return rq->q->seq_zones_wlock &&
    1917             :                 test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
    1918             : }
    1919             : 
    1920             : static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
    1921             : {
    1922             :         if (!blk_req_needs_zone_write_lock(rq))
    1923             :                 return true;
    1924             :         return !blk_req_zone_is_write_locked(rq);
    1925             : }
    1926             : #else
    1927             : static inline bool blk_req_needs_zone_write_lock(struct request *rq)
    1928             : {
    1929             :         return false;
    1930             : }
    1931             : 
    1932             : static inline void blk_req_zone_write_lock(struct request *rq)
    1933             : {
    1934             : }
    1935             : 
    1936             : static inline void blk_req_zone_write_unlock(struct request *rq)
    1937             : {
    1938             : }
    1939             : static inline bool blk_req_zone_is_write_locked(struct request *rq)
    1940             : {
    1941             :         return false;
    1942             : }
    1943             : 
    1944             : static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
    1945             : {
    1946             :         return true;
    1947             : }
    1948             : #endif /* CONFIG_BLK_DEV_ZONED */
    1949             : 
    1950           0 : static inline void blk_wake_io_task(struct task_struct *waiter)
    1951             : {
    1952             :         /*
    1953             :          * If we're polling, the task itself is doing the completions. For
    1954             :          * that case, we don't need to signal a wakeup, it's enough to just
    1955             :          * mark us as RUNNING.
    1956             :          */
    1957           0 :         if (waiter == current)
    1958           0 :                 __set_current_state(TASK_RUNNING);
    1959             :         else
    1960           0 :                 wake_up_process(waiter);
    1961           0 : }
    1962             : 
    1963             : unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
    1964             :                 unsigned int op);
    1965             : void disk_end_io_acct(struct gendisk *disk, unsigned int op,
    1966             :                 unsigned long start_time);
    1967             : 
    1968             : unsigned long bio_start_io_acct(struct bio *bio);
    1969             : void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
    1970             :                 struct block_device *orig_bdev);
    1971             : 
    1972             : /**
    1973             :  * bio_end_io_acct - end I/O accounting for bio based drivers
    1974             :  * @bio:        bio to end account for
    1975             :  * @start:      start time returned by bio_start_io_acct()
    1976             :  */
    1977           0 : static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
    1978             : {
    1979           0 :         return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
    1980             : }
    1981             : 
    1982             : int bdev_read_only(struct block_device *bdev);
    1983             : int set_blocksize(struct block_device *bdev, int size);
    1984             : 
    1985             : const char *bdevname(struct block_device *bdev, char *buffer);
    1986             : int lookup_bdev(const char *pathname, dev_t *dev);
    1987             : 
    1988             : void blkdev_show(struct seq_file *seqf, off_t offset);
    1989             : 
    1990             : #define BDEVNAME_SIZE   32      /* Largest string for a blockdev identifier */
    1991             : #define BDEVT_SIZE      10      /* Largest string for MAJ:MIN for blkdev */
    1992             : #ifdef CONFIG_BLOCK
    1993             : #define BLKDEV_MAJOR_MAX        512
    1994             : #else
    1995             : #define BLKDEV_MAJOR_MAX        0
    1996             : #endif
    1997             : 
    1998             : struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
    1999             :                 void *holder);
    2000             : struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
    2001             : int bd_prepare_to_claim(struct block_device *bdev, void *holder);
    2002             : void bd_abort_claiming(struct block_device *bdev, void *holder);
    2003             : void blkdev_put(struct block_device *bdev, fmode_t mode);
    2004             : 
    2005             : /* just for blk-cgroup, don't use elsewhere */
    2006             : struct block_device *blkdev_get_no_open(dev_t dev);
    2007             : void blkdev_put_no_open(struct block_device *bdev);
    2008             : 
    2009             : struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
    2010             : void bdev_add(struct block_device *bdev, dev_t dev);
    2011             : struct block_device *I_BDEV(struct inode *inode);
    2012             : struct block_device *bdgrab(struct block_device *bdev);
    2013             : void bdput(struct block_device *);
    2014             : int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
    2015             :                 loff_t lend);
    2016             : 
    2017             : #ifdef CONFIG_BLOCK
    2018             : void invalidate_bdev(struct block_device *bdev);
    2019             : int sync_blockdev(struct block_device *bdev);
    2020             : #else
    2021             : static inline void invalidate_bdev(struct block_device *bdev)
    2022             : {
    2023             : }
    2024             : static inline int sync_blockdev(struct block_device *bdev)
    2025             : {
    2026             :         return 0;
    2027             : }
    2028             : #endif
    2029             : int fsync_bdev(struct block_device *bdev);
    2030             : 
    2031             : int freeze_bdev(struct block_device *bdev);
    2032             : int thaw_bdev(struct block_device *bdev);
    2033             : 
    2034             : #endif /* _LINUX_BLKDEV_H */

Generated by: LCOV version 1.14