Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef BLK_MQ_H
3 : #define BLK_MQ_H
4 :
5 : #include <linux/blkdev.h>
6 : #include <linux/sbitmap.h>
7 : #include <linux/srcu.h>
8 : #include <linux/lockdep.h>
9 :
10 : struct blk_mq_tags;
11 : struct blk_flush_queue;
12 :
13 : /**
14 : * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
15 : * block device
16 : */
17 : struct blk_mq_hw_ctx {
18 : struct {
19 : /** @lock: Protects the dispatch list. */
20 : spinlock_t lock;
21 : /**
22 : * @dispatch: Used for requests that are ready to be
23 : * dispatched to the hardware but for some reason (e.g. lack of
24 : * resources) could not be sent to the hardware. As soon as the
25 : * driver can send new requests, requests at this list will
26 : * be sent first for a fairer dispatch.
27 : */
28 : struct list_head dispatch;
29 : /**
30 : * @state: BLK_MQ_S_* flags. Defines the state of the hw
31 : * queue (active, scheduled to restart, stopped).
32 : */
33 : unsigned long state;
34 : } ____cacheline_aligned_in_smp;
35 :
36 : /**
37 : * @run_work: Used for scheduling a hardware queue run at a later time.
38 : */
39 : struct delayed_work run_work;
40 : /** @cpumask: Map of available CPUs where this hctx can run. */
41 : cpumask_var_t cpumask;
42 : /**
43 : * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
44 : * selection from @cpumask.
45 : */
46 : int next_cpu;
47 : /**
48 : * @next_cpu_batch: Counter of how many works left in the batch before
49 : * changing to the next CPU.
50 : */
51 : int next_cpu_batch;
52 :
53 : /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
54 : unsigned long flags;
55 :
56 : /**
57 : * @sched_data: Pointer owned by the IO scheduler attached to a request
58 : * queue. It's up to the IO scheduler how to use this pointer.
59 : */
60 : void *sched_data;
61 : /**
62 : * @queue: Pointer to the request queue that owns this hardware context.
63 : */
64 : struct request_queue *queue;
65 : /** @fq: Queue of requests that need to perform a flush operation. */
66 : struct blk_flush_queue *fq;
67 :
68 : /**
69 : * @driver_data: Pointer to data owned by the block driver that created
70 : * this hctx
71 : */
72 : void *driver_data;
73 :
74 : /**
75 : * @ctx_map: Bitmap for each software queue. If bit is on, there is a
76 : * pending request in that software queue.
77 : */
78 : struct sbitmap ctx_map;
79 :
80 : /**
81 : * @dispatch_from: Software queue to be used when no scheduler was
82 : * selected.
83 : */
84 : struct blk_mq_ctx *dispatch_from;
85 : /**
86 : * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
87 : * decide if the hw_queue is busy using Exponential Weighted Moving
88 : * Average algorithm.
89 : */
90 : unsigned int dispatch_busy;
91 :
92 : /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
93 : unsigned short type;
94 : /** @nr_ctx: Number of software queues. */
95 : unsigned short nr_ctx;
96 : /** @ctxs: Array of software queues. */
97 : struct blk_mq_ctx **ctxs;
98 :
99 : /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
100 : spinlock_t dispatch_wait_lock;
101 : /**
102 : * @dispatch_wait: Waitqueue to put requests when there is no tag
103 : * available at the moment, to wait for another try in the future.
104 : */
105 : wait_queue_entry_t dispatch_wait;
106 :
107 : /**
108 : * @wait_index: Index of next available dispatch_wait queue to insert
109 : * requests.
110 : */
111 : atomic_t wait_index;
112 :
113 : /**
114 : * @tags: Tags owned by the block driver. A tag at this set is only
115 : * assigned when a request is dispatched from a hardware queue.
116 : */
117 : struct blk_mq_tags *tags;
118 : /**
119 : * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
120 : * scheduler associated with a request queue, a tag is assigned when
121 : * that request is allocated. Else, this member is not used.
122 : */
123 : struct blk_mq_tags *sched_tags;
124 :
125 : /** @queued: Number of queued requests. */
126 : unsigned long queued;
127 : /** @run: Number of dispatched requests. */
128 : unsigned long run;
129 : #define BLK_MQ_MAX_DISPATCH_ORDER 7
130 : /** @dispatched: Number of dispatch requests by queue. */
131 : unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
132 :
133 : /** @numa_node: NUMA node the storage adapter has been connected to. */
134 : unsigned int numa_node;
135 : /** @queue_num: Index of this hardware queue. */
136 : unsigned int queue_num;
137 :
138 : /**
139 : * @nr_active: Number of active requests. Only used when a tag set is
140 : * shared across request queues.
141 : */
142 : atomic_t nr_active;
143 :
144 : /** @cpuhp_online: List to store request if CPU is going to die */
145 : struct hlist_node cpuhp_online;
146 : /** @cpuhp_dead: List to store request if some CPU die. */
147 : struct hlist_node cpuhp_dead;
148 : /** @kobj: Kernel object for sysfs. */
149 : struct kobject kobj;
150 :
151 : /** @poll_considered: Count times blk_poll() was called. */
152 : unsigned long poll_considered;
153 : /** @poll_invoked: Count how many requests blk_poll() polled. */
154 : unsigned long poll_invoked;
155 : /** @poll_success: Count how many polled requests were completed. */
156 : unsigned long poll_success;
157 :
158 : #ifdef CONFIG_BLK_DEBUG_FS
159 : /**
160 : * @debugfs_dir: debugfs directory for this hardware queue. Named
161 : * as cpu<cpu_number>.
162 : */
163 : struct dentry *debugfs_dir;
164 : /** @sched_debugfs_dir: debugfs directory for the scheduler. */
165 : struct dentry *sched_debugfs_dir;
166 : #endif
167 :
168 : /**
169 : * @hctx_list: if this hctx is not in use, this is an entry in
170 : * q->unused_hctx_list.
171 : */
172 : struct list_head hctx_list;
173 :
174 : /**
175 : * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
176 : * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
177 : * blk_mq_hw_ctx_size().
178 : */
179 : struct srcu_struct srcu[];
180 : };
181 :
182 : /**
183 : * struct blk_mq_queue_map - Map software queues to hardware queues
184 : * @mq_map: CPU ID to hardware queue index map. This is an array
185 : * with nr_cpu_ids elements. Each element has a value in the range
186 : * [@queue_offset, @queue_offset + @nr_queues).
187 : * @nr_queues: Number of hardware queues to map CPU IDs onto.
188 : * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
189 : * driver to map each hardware queue type (enum hctx_type) onto a distinct
190 : * set of hardware queues.
191 : */
192 : struct blk_mq_queue_map {
193 : unsigned int *mq_map;
194 : unsigned int nr_queues;
195 : unsigned int queue_offset;
196 : };
197 :
198 : /**
199 : * enum hctx_type - Type of hardware queue
200 : * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
201 : * @HCTX_TYPE_READ: Just for READ I/O.
202 : * @HCTX_TYPE_POLL: Polled I/O of any kind.
203 : * @HCTX_MAX_TYPES: Number of types of hctx.
204 : */
205 : enum hctx_type {
206 : HCTX_TYPE_DEFAULT,
207 : HCTX_TYPE_READ,
208 : HCTX_TYPE_POLL,
209 :
210 : HCTX_MAX_TYPES,
211 : };
212 :
213 : /**
214 : * struct blk_mq_tag_set - tag set that can be shared between request queues
215 : * @map: One or more ctx -> hctx mappings. One map exists for each
216 : * hardware queue type (enum hctx_type) that the driver wishes
217 : * to support. There are no restrictions on maps being of the
218 : * same size, and it's perfectly legal to share maps between
219 : * types.
220 : * @nr_maps: Number of elements in the @map array. A number in the range
221 : * [1, HCTX_MAX_TYPES].
222 : * @ops: Pointers to functions that implement block driver behavior.
223 : * @nr_hw_queues: Number of hardware queues supported by the block driver that
224 : * owns this data structure.
225 : * @queue_depth: Number of tags per hardware queue, reserved tags included.
226 : * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
227 : * allocations.
228 : * @cmd_size: Number of additional bytes to allocate per request. The block
229 : * driver owns these additional bytes.
230 : * @numa_node: NUMA node the storage adapter has been connected to.
231 : * @timeout: Request processing timeout in jiffies.
232 : * @flags: Zero or more BLK_MQ_F_* flags.
233 : * @driver_data: Pointer to data owned by the block driver that created this
234 : * tag set.
235 : * @active_queues_shared_sbitmap:
236 : * number of active request queues per tag set.
237 : * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
238 : * @__breserved_tags:
239 : * A shared reserved tags sbitmap, used over all hctx's
240 : * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
241 : * elements.
242 : * @tag_list_lock: Serializes tag_list accesses.
243 : * @tag_list: List of the request queues that use this tag set. See also
244 : * request_queue.tag_set_list.
245 : */
246 : struct blk_mq_tag_set {
247 : struct blk_mq_queue_map map[HCTX_MAX_TYPES];
248 : unsigned int nr_maps;
249 : const struct blk_mq_ops *ops;
250 : unsigned int nr_hw_queues;
251 : unsigned int queue_depth;
252 : unsigned int reserved_tags;
253 : unsigned int cmd_size;
254 : int numa_node;
255 : unsigned int timeout;
256 : unsigned int flags;
257 : void *driver_data;
258 : atomic_t active_queues_shared_sbitmap;
259 :
260 : struct sbitmap_queue __bitmap_tags;
261 : struct sbitmap_queue __breserved_tags;
262 : struct blk_mq_tags **tags;
263 :
264 : struct mutex tag_list_lock;
265 : struct list_head tag_list;
266 : };
267 :
268 : /**
269 : * struct blk_mq_queue_data - Data about a request inserted in a queue
270 : *
271 : * @rq: Request pointer.
272 : * @last: If it is the last request in the queue.
273 : */
274 : struct blk_mq_queue_data {
275 : struct request *rq;
276 : bool last;
277 : };
278 :
279 : typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
280 : bool);
281 : typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
282 :
283 : /**
284 : * struct blk_mq_ops - Callback functions that implements block driver
285 : * behaviour.
286 : */
287 : struct blk_mq_ops {
288 : /**
289 : * @queue_rq: Queue a new request from block IO.
290 : */
291 : blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *,
292 : const struct blk_mq_queue_data *);
293 :
294 : /**
295 : * @commit_rqs: If a driver uses bd->last to judge when to submit
296 : * requests to hardware, it must define this function. In case of errors
297 : * that make us stop issuing further requests, this hook serves the
298 : * purpose of kicking the hardware (which the last request otherwise
299 : * would have done).
300 : */
301 : void (*commit_rqs)(struct blk_mq_hw_ctx *);
302 :
303 : /**
304 : * @get_budget: Reserve budget before queue request, once .queue_rq is
305 : * run, it is driver's responsibility to release the
306 : * reserved budget. Also we have to handle failure case
307 : * of .get_budget for avoiding I/O deadlock.
308 : */
309 : bool (*get_budget)(struct request_queue *);
310 :
311 : /**
312 : * @put_budget: Release the reserved budget.
313 : */
314 : void (*put_budget)(struct request_queue *);
315 :
316 : /**
317 : * @timeout: Called on request timeout.
318 : */
319 : enum blk_eh_timer_return (*timeout)(struct request *, bool);
320 :
321 : /**
322 : * @poll: Called to poll for completion of a specific tag.
323 : */
324 : int (*poll)(struct blk_mq_hw_ctx *);
325 :
326 : /**
327 : * @complete: Mark the request as complete.
328 : */
329 : void (*complete)(struct request *);
330 :
331 : /**
332 : * @init_hctx: Called when the block layer side of a hardware queue has
333 : * been set up, allowing the driver to allocate/init matching
334 : * structures.
335 : */
336 : int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int);
337 : /**
338 : * @exit_hctx: Ditto for exit/teardown.
339 : */
340 : void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
341 :
342 : /**
343 : * @init_request: Called for every command allocated by the block layer
344 : * to allow the driver to set up driver specific data.
345 : *
346 : * Tag greater than or equal to queue_depth is for setting up
347 : * flush request.
348 : */
349 : int (*init_request)(struct blk_mq_tag_set *set, struct request *,
350 : unsigned int, unsigned int);
351 : /**
352 : * @exit_request: Ditto for exit/teardown.
353 : */
354 : void (*exit_request)(struct blk_mq_tag_set *set, struct request *,
355 : unsigned int);
356 :
357 : /**
358 : * @initialize_rq_fn: Called from inside blk_get_request().
359 : */
360 : void (*initialize_rq_fn)(struct request *rq);
361 :
362 : /**
363 : * @cleanup_rq: Called before freeing one request which isn't completed
364 : * yet, and usually for freeing the driver private data.
365 : */
366 : void (*cleanup_rq)(struct request *);
367 :
368 : /**
369 : * @busy: If set, returns whether or not this queue currently is busy.
370 : */
371 : bool (*busy)(struct request_queue *);
372 :
373 : /**
374 : * @map_queues: This allows drivers specify their own queue mapping by
375 : * overriding the setup-time function that builds the mq_map.
376 : */
377 : int (*map_queues)(struct blk_mq_tag_set *set);
378 :
379 : #ifdef CONFIG_BLK_DEBUG_FS
380 : /**
381 : * @show_rq: Used by the debugfs implementation to show driver-specific
382 : * information about a request.
383 : */
384 : void (*show_rq)(struct seq_file *m, struct request *rq);
385 : #endif
386 : };
387 :
388 : enum {
389 : BLK_MQ_F_SHOULD_MERGE = 1 << 0,
390 : BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
391 : /*
392 : * Set when this device requires underlying blk-mq device for
393 : * completing IO:
394 : */
395 : BLK_MQ_F_STACKING = 1 << 2,
396 : BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
397 : BLK_MQ_F_BLOCKING = 1 << 5,
398 : BLK_MQ_F_NO_SCHED = 1 << 6,
399 : BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
400 : BLK_MQ_F_ALLOC_POLICY_BITS = 1,
401 :
402 : BLK_MQ_S_STOPPED = 0,
403 : BLK_MQ_S_TAG_ACTIVE = 1,
404 : BLK_MQ_S_SCHED_RESTART = 2,
405 :
406 : /* hw queue is inactive after all its CPUs become offline */
407 : BLK_MQ_S_INACTIVE = 3,
408 :
409 : BLK_MQ_MAX_DEPTH = 10240,
410 :
411 : BLK_MQ_CPU_WORK_BATCH = 8,
412 : };
413 : #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
414 : ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
415 : ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
416 : #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
417 : ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
418 : << BLK_MQ_F_ALLOC_POLICY_START_BIT)
419 :
420 : struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
421 : struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
422 : void *queuedata);
423 : struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
424 : struct request_queue *q,
425 : bool elevator_init);
426 : struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
427 : const struct blk_mq_ops *ops,
428 : unsigned int queue_depth,
429 : unsigned int set_flags);
430 : void blk_mq_unregister_dev(struct device *, struct request_queue *);
431 :
432 : int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
433 : void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
434 :
435 : void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
436 :
437 : void blk_mq_free_request(struct request *rq);
438 :
439 : bool blk_mq_queue_inflight(struct request_queue *q);
440 :
441 : enum {
442 : /* return when out of requests */
443 : BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
444 : /* allocate from reserved pool */
445 : BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
446 : /* set RQF_PM */
447 : BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
448 : };
449 :
450 : struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
451 : blk_mq_req_flags_t flags);
452 : struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
453 : unsigned int op, blk_mq_req_flags_t flags,
454 : unsigned int hctx_idx);
455 : struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
456 :
457 : enum {
458 : BLK_MQ_UNIQUE_TAG_BITS = 16,
459 : BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
460 : };
461 :
462 : u32 blk_mq_unique_tag(struct request *rq);
463 :
464 0 : static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
465 : {
466 0 : return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
467 : }
468 :
469 : static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
470 : {
471 : return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
472 : }
473 :
474 : /**
475 : * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
476 : * @rq: target request.
477 : */
478 3374 : static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
479 : {
480 3374 : return READ_ONCE(rq->state);
481 : }
482 :
483 0 : static inline int blk_mq_request_started(struct request *rq)
484 : {
485 0 : return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
486 : }
487 :
488 0 : static inline int blk_mq_request_completed(struct request *rq)
489 : {
490 0 : return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
491 : }
492 :
493 : /*
494 : *
495 : * Set the state to complete when completing a request from inside ->queue_rq.
496 : * This is used by drivers that want to ensure special complete actions that
497 : * need access to the request are called on failure, e.g. by nvme for
498 : * multipathing.
499 : */
500 : static inline void blk_mq_set_request_complete(struct request *rq)
501 : {
502 : WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
503 : }
504 :
505 : void blk_mq_start_request(struct request *rq);
506 : void blk_mq_end_request(struct request *rq, blk_status_t error);
507 : void __blk_mq_end_request(struct request *rq, blk_status_t error);
508 :
509 : void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
510 : void blk_mq_kick_requeue_list(struct request_queue *q);
511 : void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
512 : void blk_mq_complete_request(struct request *rq);
513 : bool blk_mq_complete_request_remote(struct request *rq);
514 : bool blk_mq_queue_stopped(struct request_queue *q);
515 : void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
516 : void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
517 : void blk_mq_stop_hw_queues(struct request_queue *q);
518 : void blk_mq_start_hw_queues(struct request_queue *q);
519 : void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
520 : void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
521 : void blk_mq_quiesce_queue(struct request_queue *q);
522 : void blk_mq_unquiesce_queue(struct request_queue *q);
523 : void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
524 : void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
525 : void blk_mq_run_hw_queues(struct request_queue *q, bool async);
526 : void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
527 : void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
528 : busy_tag_iter_fn *fn, void *priv);
529 : void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
530 : void blk_mq_freeze_queue(struct request_queue *q);
531 : void blk_mq_unfreeze_queue(struct request_queue *q);
532 : void blk_freeze_queue_start(struct request_queue *q);
533 : void blk_mq_freeze_queue_wait(struct request_queue *q);
534 : int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
535 : unsigned long timeout);
536 :
537 : int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
538 : void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
539 :
540 : void blk_mq_quiesce_queue_nowait(struct request_queue *q);
541 :
542 : unsigned int blk_mq_rq_cpu(struct request *rq);
543 :
544 : bool __blk_should_fake_timeout(struct request_queue *q);
545 3435 : static inline bool blk_should_fake_timeout(struct request_queue *q)
546 : {
547 3435 : if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) &&
548 : test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
549 : return __blk_should_fake_timeout(q);
550 3435 : return false;
551 : }
552 :
553 : /**
554 : * blk_mq_rq_from_pdu - cast a PDU to a request
555 : * @pdu: the PDU (Protocol Data Unit) to be casted
556 : *
557 : * Return: request
558 : *
559 : * Driver command data is immediately after the request. So subtract request
560 : * size to get back to the original request.
561 : */
562 3435 : static inline struct request *blk_mq_rq_from_pdu(void *pdu)
563 : {
564 3435 : return pdu - sizeof(struct request);
565 : }
566 :
567 : /**
568 : * blk_mq_rq_to_pdu - cast a request to a PDU
569 : * @rq: the request to be casted
570 : *
571 : * Return: pointer to the PDU
572 : *
573 : * Driver command data is immediately after the request. So add request to get
574 : * the PDU.
575 : */
576 8929 : static inline void *blk_mq_rq_to_pdu(struct request *rq)
577 : {
578 7897 : return rq + 1;
579 : }
580 :
581 : #define queue_for_each_hw_ctx(q, hctx, i) \
582 : for ((i) = 0; (i) < (q)->nr_hw_queues && \
583 : ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
584 :
585 : #define hctx_for_each_ctx(hctx, ctx, i) \
586 : for ((i) = 0; (i) < (hctx)->nr_ctx && \
587 : ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
588 :
589 6358 : static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
590 : struct request *rq)
591 : {
592 6358 : if (rq->tag != -1)
593 6358 : return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
594 :
595 0 : return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
596 : BLK_QC_T_INTERNAL;
597 : }
598 :
599 0 : static inline void blk_mq_cleanup_rq(struct request *rq)
600 : {
601 0 : if (rq->q->mq_ops->cleanup_rq)
602 0 : rq->q->mq_ops->cleanup_rq(rq);
603 0 : }
604 :
605 3254 : static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
606 : unsigned int nr_segs)
607 : {
608 3254 : rq->nr_phys_segments = nr_segs;
609 3254 : rq->__data_len = bio->bi_iter.bi_size;
610 3254 : rq->bio = rq->biotail = bio;
611 3254 : rq->ioprio = bio_prio(bio);
612 :
613 3254 : if (bio->bi_bdev)
614 3252 : rq->rq_disk = bio->bi_bdev->bd_disk;
615 : }
616 :
617 : blk_qc_t blk_mq_submit_bio(struct bio *bio);
618 : void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
619 : struct lock_class_key *key);
620 :
621 : #endif
|