Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * blk-mq scheduling framework
4 : *
5 : * Copyright (C) 2016 Jens Axboe
6 : */
7 : #include <linux/kernel.h>
8 : #include <linux/module.h>
9 : #include <linux/blk-mq.h>
10 : #include <linux/list_sort.h>
11 :
12 : #include <trace/events/block.h>
13 :
14 : #include "blk.h"
15 : #include "blk-mq.h"
16 : #include "blk-mq-debugfs.h"
17 : #include "blk-mq-sched.h"
18 : #include "blk-mq-tag.h"
19 : #include "blk-wbt.h"
20 :
21 0 : void blk_mq_sched_assign_ioc(struct request *rq)
22 : {
23 0 : struct request_queue *q = rq->q;
24 0 : struct io_context *ioc;
25 0 : struct io_cq *icq;
26 :
27 : /*
28 : * May not have an IO context if it's a passthrough request
29 : */
30 0 : ioc = current->io_context;
31 0 : if (!ioc)
32 : return;
33 :
34 0 : spin_lock_irq(&q->queue_lock);
35 0 : icq = ioc_lookup_icq(ioc, q);
36 0 : spin_unlock_irq(&q->queue_lock);
37 :
38 0 : if (!icq) {
39 0 : icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
40 0 : if (!icq)
41 : return;
42 : }
43 0 : get_io_context(icq->ioc);
44 0 : rq->elv.icq = icq;
45 : }
46 :
47 : /*
48 : * Mark a hardware queue as needing a restart. For shared queues, maintain
49 : * a count of how many hardware queues are marked for restart.
50 : */
51 253 : void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
52 : {
53 253 : if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
54 : return;
55 :
56 200 : set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
57 : }
58 : EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
59 :
60 3314 : void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
61 : {
62 3314 : if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
63 : return;
64 200 : clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
65 :
66 : /*
67 : * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
68 : * in blk_mq_run_hw_queue(). Its pair is the barrier in
69 : * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
70 : * meantime new request added to hctx->dispatch is missed to check in
71 : * blk_mq_run_hw_queue().
72 : */
73 200 : smp_mb();
74 :
75 200 : blk_mq_run_hw_queue(hctx, true);
76 : }
77 :
78 0 : static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
79 : {
80 0 : struct request *rqa = container_of(a, struct request, queuelist);
81 0 : struct request *rqb = container_of(b, struct request, queuelist);
82 :
83 0 : return rqa->mq_hctx > rqb->mq_hctx;
84 : }
85 :
86 0 : static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
87 : {
88 0 : struct blk_mq_hw_ctx *hctx =
89 0 : list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
90 0 : struct request *rq;
91 0 : LIST_HEAD(hctx_list);
92 0 : unsigned int count = 0;
93 :
94 0 : list_for_each_entry(rq, rq_list, queuelist) {
95 0 : if (rq->mq_hctx != hctx) {
96 0 : list_cut_before(&hctx_list, rq_list, &rq->queuelist);
97 0 : goto dispatch;
98 : }
99 0 : count++;
100 : }
101 0 : list_splice_tail_init(rq_list, &hctx_list);
102 :
103 0 : dispatch:
104 0 : return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
105 : }
106 :
107 : #define BLK_MQ_BUDGET_DELAY 3 /* ms units */
108 :
109 : /*
110 : * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
111 : * its queue by itself in its completion handler, so we don't need to
112 : * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
113 : *
114 : * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
115 : * be run again. This is necessary to avoid starving flushes.
116 : */
117 0 : static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
118 : {
119 0 : struct request_queue *q = hctx->queue;
120 0 : struct elevator_queue *e = q->elevator;
121 0 : bool multi_hctxs = false, run_queue = false;
122 0 : bool dispatched = false, busy = false;
123 0 : unsigned int max_dispatch;
124 0 : LIST_HEAD(rq_list);
125 0 : int count = 0;
126 :
127 0 : if (hctx->dispatch_busy)
128 0 : max_dispatch = 1;
129 : else
130 0 : max_dispatch = hctx->queue->nr_requests;
131 :
132 0 : do {
133 0 : struct request *rq;
134 :
135 0 : if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
136 : break;
137 :
138 0 : if (!list_empty_careful(&hctx->dispatch)) {
139 : busy = true;
140 : break;
141 : }
142 :
143 0 : if (!blk_mq_get_dispatch_budget(q))
144 : break;
145 :
146 0 : rq = e->type->ops.dispatch_request(hctx);
147 0 : if (!rq) {
148 0 : blk_mq_put_dispatch_budget(q);
149 : /*
150 : * We're releasing without dispatching. Holding the
151 : * budget could have blocked any "hctx"s with the
152 : * same queue and if we didn't dispatch then there's
153 : * no guarantee anyone will kick the queue. Kick it
154 : * ourselves.
155 : */
156 : run_queue = true;
157 : break;
158 : }
159 :
160 : /*
161 : * Now this rq owns the budget which has to be released
162 : * if this rq won't be queued to driver via .queue_rq()
163 : * in blk_mq_dispatch_rq_list().
164 : */
165 0 : list_add_tail(&rq->queuelist, &rq_list);
166 0 : if (rq->mq_hctx != hctx)
167 0 : multi_hctxs = true;
168 0 : } while (++count < max_dispatch);
169 :
170 0 : if (!count) {
171 0 : if (run_queue)
172 0 : blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
173 0 : } else if (multi_hctxs) {
174 : /*
175 : * Requests from different hctx may be dequeued from some
176 : * schedulers, such as bfq and deadline.
177 : *
178 : * Sort the requests in the list according to their hctx,
179 : * dispatch batching requests from same hctx at a time.
180 : */
181 0 : list_sort(NULL, &rq_list, sched_rq_cmp);
182 0 : do {
183 0 : dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
184 0 : } while (!list_empty(&rq_list));
185 : } else {
186 0 : dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
187 : }
188 :
189 0 : if (busy)
190 : return -EAGAIN;
191 0 : return !!dispatched;
192 : }
193 :
194 : static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
195 : {
196 0 : int ret;
197 :
198 0 : do {
199 0 : ret = __blk_mq_do_dispatch_sched(hctx);
200 0 : } while (ret == 1);
201 :
202 : return ret;
203 : }
204 :
205 0 : static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
206 : struct blk_mq_ctx *ctx)
207 : {
208 0 : unsigned short idx = ctx->index_hw[hctx->type];
209 :
210 0 : if (++idx == hctx->nr_ctx)
211 0 : idx = 0;
212 :
213 0 : return hctx->ctxs[idx];
214 : }
215 :
216 : /*
217 : * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
218 : * its queue by itself in its completion handler, so we don't need to
219 : * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
220 : *
221 : * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
222 : * be run again. This is necessary to avoid starving flushes.
223 : */
224 253 : static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
225 : {
226 253 : struct request_queue *q = hctx->queue;
227 253 : LIST_HEAD(rq_list);
228 253 : struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
229 253 : int ret = 0;
230 253 : struct request *rq;
231 :
232 253 : do {
233 253 : if (!list_empty_careful(&hctx->dispatch)) {
234 : ret = -EAGAIN;
235 : break;
236 : }
237 :
238 253 : if (!sbitmap_any_bit_set(&hctx->ctx_map))
239 : break;
240 :
241 0 : if (!blk_mq_get_dispatch_budget(q))
242 : break;
243 :
244 0 : rq = blk_mq_dequeue_from_ctx(hctx, ctx);
245 0 : if (!rq) {
246 0 : blk_mq_put_dispatch_budget(q);
247 : /*
248 : * We're releasing without dispatching. Holding the
249 : * budget could have blocked any "hctx"s with the
250 : * same queue and if we didn't dispatch then there's
251 : * no guarantee anyone will kick the queue. Kick it
252 : * ourselves.
253 : */
254 0 : blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
255 0 : break;
256 : }
257 :
258 : /*
259 : * Now this rq owns the budget which has to be released
260 : * if this rq won't be queued to driver via .queue_rq()
261 : * in blk_mq_dispatch_rq_list().
262 : */
263 0 : list_add(&rq->queuelist, &rq_list);
264 :
265 : /* round robin for fair dispatch */
266 0 : ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
267 :
268 0 : } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
269 :
270 253 : WRITE_ONCE(hctx->dispatch_from, ctx);
271 253 : return ret;
272 : }
273 :
274 267 : static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
275 : {
276 267 : struct request_queue *q = hctx->queue;
277 267 : struct elevator_queue *e = q->elevator;
278 267 : const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
279 267 : int ret = 0;
280 267 : LIST_HEAD(rq_list);
281 :
282 : /*
283 : * If we have previous entries on our dispatch list, grab them first for
284 : * more fair dispatch.
285 : */
286 267 : if (!list_empty_careful(&hctx->dispatch)) {
287 253 : spin_lock(&hctx->lock);
288 253 : if (!list_empty(&hctx->dispatch))
289 253 : list_splice_init(&hctx->dispatch, &rq_list);
290 253 : spin_unlock(&hctx->lock);
291 : }
292 :
293 : /*
294 : * Only ask the scheduler for requests, if we didn't have residual
295 : * requests from the dispatch list. This is to avoid the case where
296 : * we only ever dispatch a fraction of the requests available because
297 : * of low device queue depth. Once we pull requests out of the IO
298 : * scheduler, we can no longer merge or sort them. So it's best to
299 : * leave them there for as long as we can. Mark the hw queue as
300 : * needing a restart in that case.
301 : *
302 : * We want to dispatch from the scheduler if there was nothing
303 : * on the dispatch list or we were able to dispatch from the
304 : * dispatch list.
305 : */
306 267 : if (!list_empty(&rq_list)) {
307 253 : blk_mq_sched_mark_restart_hctx(hctx);
308 253 : if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
309 253 : if (has_sched_dispatch)
310 0 : ret = blk_mq_do_dispatch_sched(hctx);
311 : else
312 253 : ret = blk_mq_do_dispatch_ctx(hctx);
313 : }
314 14 : } else if (has_sched_dispatch) {
315 0 : ret = blk_mq_do_dispatch_sched(hctx);
316 14 : } else if (hctx->dispatch_busy) {
317 : /* dequeue request one by one from sw queue if queue is busy */
318 0 : ret = blk_mq_do_dispatch_ctx(hctx);
319 : } else {
320 14 : blk_mq_flush_busy_ctxs(hctx, &rq_list);
321 14 : blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
322 : }
323 :
324 267 : return ret;
325 : }
326 :
327 267 : void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
328 : {
329 267 : struct request_queue *q = hctx->queue;
330 :
331 : /* RCU or SRCU read lock is needed before checking quiesced flag */
332 267 : if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
333 0 : return;
334 :
335 267 : hctx->run++;
336 :
337 : /*
338 : * A return of -EAGAIN is an indication that hctx->dispatch is not
339 : * empty and we must run again in order to avoid starving flushes.
340 : */
341 267 : if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
342 0 : if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
343 0 : blk_mq_run_hw_queue(hctx, true);
344 : }
345 : }
346 :
347 3120 : bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
348 : unsigned int nr_segs)
349 : {
350 3120 : struct elevator_queue *e = q->elevator;
351 3120 : struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
352 3120 : struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
353 3120 : bool ret = false;
354 3120 : enum hctx_type type;
355 :
356 3120 : if (e && e->type->ops.bio_merge)
357 0 : return e->type->ops.bio_merge(hctx, bio, nr_segs);
358 :
359 3120 : type = hctx->type;
360 3120 : if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
361 3120 : list_empty_careful(&ctx->rq_lists[type]))
362 3120 : return false;
363 :
364 : /* default per sw-queue merge */
365 0 : spin_lock(&ctx->lock);
366 : /*
367 : * Reverse check our software queue for entries that we could
368 : * potentially merge with. Currently includes a hand-wavy stop
369 : * count of 8, to not spend too much time checking for merges.
370 : */
371 0 : if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
372 0 : ctx->rq_merged++;
373 0 : ret = true;
374 : }
375 :
376 0 : spin_unlock(&ctx->lock);
377 :
378 0 : return ret;
379 : }
380 :
381 0 : bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
382 : {
383 0 : return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
384 : }
385 : EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
386 :
387 253 : static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
388 : struct request *rq)
389 : {
390 : /*
391 : * dispatch flush and passthrough rq directly
392 : *
393 : * passthrough request has to be added to hctx->dispatch directly.
394 : * For some reason, device may be in one situation which can't
395 : * handle FS request, so STS_RESOURCE is always returned and the
396 : * FS request will be added to hctx->dispatch. However passthrough
397 : * request may be required at that time for fixing the problem. If
398 : * passthrough request is added to scheduler queue, there isn't any
399 : * chance to dispatch it given we prioritize requests in hctx->dispatch.
400 : */
401 255 : if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
402 253 : return true;
403 :
404 : return false;
405 : }
406 :
407 253 : void blk_mq_sched_insert_request(struct request *rq, bool at_head,
408 : bool run_queue, bool async)
409 : {
410 253 : struct request_queue *q = rq->q;
411 253 : struct elevator_queue *e = q->elevator;
412 253 : struct blk_mq_ctx *ctx = rq->mq_ctx;
413 253 : struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
414 :
415 506 : WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
416 :
417 253 : if (blk_mq_sched_bypass_insert(hctx, rq)) {
418 : /*
419 : * Firstly normal IO request is inserted to scheduler queue or
420 : * sw queue, meantime we add flush request to dispatch queue(
421 : * hctx->dispatch) directly and there is at most one in-flight
422 : * flush request for each hw queue, so it doesn't matter to add
423 : * flush request to tail or front of the dispatch queue.
424 : *
425 : * Secondly in case of NCQ, flush request belongs to non-NCQ
426 : * command, and queueing it will fail when there is any
427 : * in-flight normal IO request(NCQ command). When adding flush
428 : * rq to the front of hctx->dispatch, it is easier to introduce
429 : * extra time to flush rq's latency because of S_SCHED_RESTART
430 : * compared with adding to the tail of dispatch queue, then
431 : * chance of flush merge is increased, and less flush requests
432 : * will be issued to controller. It is observed that ~10% time
433 : * is saved in blktests block/004 on disk attached to AHCI/NCQ
434 : * drive when adding flush rq to the front of hctx->dispatch.
435 : *
436 : * Simply queue flush rq to the front of hctx->dispatch so that
437 : * intensive flush workloads can benefit in case of NCQ HW.
438 : */
439 253 : at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
440 253 : blk_mq_request_bypass_insert(rq, at_head, false);
441 253 : goto run;
442 : }
443 :
444 0 : if (e && e->type->ops.insert_requests) {
445 0 : LIST_HEAD(list);
446 :
447 0 : list_add(&rq->queuelist, &list);
448 0 : e->type->ops.insert_requests(hctx, &list, at_head);
449 : } else {
450 0 : spin_lock(&ctx->lock);
451 0 : __blk_mq_insert_request(hctx, rq, at_head);
452 0 : spin_unlock(&ctx->lock);
453 : }
454 :
455 253 : run:
456 253 : if (run_queue)
457 2 : blk_mq_run_hw_queue(hctx, async);
458 253 : }
459 :
460 1882 : void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
461 : struct blk_mq_ctx *ctx,
462 : struct list_head *list, bool run_queue_async)
463 : {
464 1882 : struct elevator_queue *e;
465 1882 : struct request_queue *q = hctx->queue;
466 :
467 : /*
468 : * blk_mq_sched_insert_requests() is called from flush plug
469 : * context only, and hold one usage counter to prevent queue
470 : * from being released.
471 : */
472 1882 : percpu_ref_get(&q->q_usage_counter);
473 :
474 1882 : e = hctx->queue->elevator;
475 1882 : if (e && e->type->ops.insert_requests)
476 0 : e->type->ops.insert_requests(hctx, list, false);
477 : else {
478 : /*
479 : * try to issue requests directly if the hw queue isn't
480 : * busy in case of 'none' scheduler, and this way may save
481 : * us one extra enqueue & dequeue to sw queue.
482 : */
483 1882 : if (!hctx->dispatch_busy && !e && !run_queue_async) {
484 1868 : blk_mq_try_issue_list_directly(hctx, list);
485 1868 : if (list_empty(list))
486 1868 : goto out;
487 : }
488 14 : blk_mq_insert_requests(hctx, ctx, list);
489 : }
490 :
491 14 : blk_mq_run_hw_queue(hctx, run_queue_async);
492 1882 : out:
493 1882 : percpu_ref_put(&q->q_usage_counter);
494 1882 : }
495 :
496 0 : static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
497 : struct blk_mq_hw_ctx *hctx,
498 : unsigned int hctx_idx)
499 : {
500 0 : unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
501 :
502 0 : if (hctx->sched_tags) {
503 0 : blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
504 0 : blk_mq_free_rq_map(hctx->sched_tags, flags);
505 0 : hctx->sched_tags = NULL;
506 : }
507 0 : }
508 :
509 0 : static int blk_mq_sched_alloc_tags(struct request_queue *q,
510 : struct blk_mq_hw_ctx *hctx,
511 : unsigned int hctx_idx)
512 : {
513 0 : struct blk_mq_tag_set *set = q->tag_set;
514 : /* Clear HCTX_SHARED so tags are init'ed */
515 0 : unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
516 0 : int ret;
517 :
518 0 : hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
519 : set->reserved_tags, flags);
520 0 : if (!hctx->sched_tags)
521 : return -ENOMEM;
522 :
523 0 : ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
524 0 : if (ret)
525 0 : blk_mq_sched_free_tags(set, hctx, hctx_idx);
526 :
527 : return ret;
528 : }
529 :
530 : /* called in queue's release handler, tagset has gone away */
531 0 : static void blk_mq_sched_tags_teardown(struct request_queue *q)
532 : {
533 0 : struct blk_mq_hw_ctx *hctx;
534 0 : int i;
535 :
536 0 : queue_for_each_hw_ctx(q, hctx, i) {
537 : /* Clear HCTX_SHARED so tags are freed */
538 0 : unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
539 :
540 0 : if (hctx->sched_tags) {
541 0 : blk_mq_free_rq_map(hctx->sched_tags, flags);
542 0 : hctx->sched_tags = NULL;
543 : }
544 : }
545 0 : }
546 :
547 0 : int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
548 : {
549 0 : struct blk_mq_hw_ctx *hctx;
550 0 : struct elevator_queue *eq;
551 0 : unsigned int i;
552 0 : int ret;
553 :
554 0 : if (!e) {
555 0 : q->elevator = NULL;
556 0 : q->nr_requests = q->tag_set->queue_depth;
557 0 : return 0;
558 : }
559 :
560 : /*
561 : * Default to double of smaller one between hw queue_depth and 128,
562 : * since we don't split into sync/async like the old code did.
563 : * Additionally, this is a per-hw queue depth.
564 : */
565 0 : q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
566 : BLKDEV_MAX_RQ);
567 :
568 0 : queue_for_each_hw_ctx(q, hctx, i) {
569 0 : ret = blk_mq_sched_alloc_tags(q, hctx, i);
570 0 : if (ret)
571 0 : goto err;
572 : }
573 :
574 0 : ret = e->ops.init_sched(q, e);
575 0 : if (ret)
576 0 : goto err;
577 :
578 0 : blk_mq_debugfs_register_sched(q);
579 :
580 0 : queue_for_each_hw_ctx(q, hctx, i) {
581 0 : if (e->ops.init_hctx) {
582 0 : ret = e->ops.init_hctx(hctx, i);
583 0 : if (ret) {
584 0 : eq = q->elevator;
585 0 : blk_mq_sched_free_requests(q);
586 0 : blk_mq_exit_sched(q, eq);
587 0 : kobject_put(&eq->kobj);
588 0 : return ret;
589 : }
590 : }
591 0 : blk_mq_debugfs_register_sched_hctx(q, hctx);
592 : }
593 :
594 : return 0;
595 :
596 0 : err:
597 0 : blk_mq_sched_free_requests(q);
598 0 : blk_mq_sched_tags_teardown(q);
599 0 : q->elevator = NULL;
600 0 : return ret;
601 : }
602 :
603 : /*
604 : * called in either blk_queue_cleanup or elevator_switch, tagset
605 : * is required for freeing requests
606 : */
607 0 : void blk_mq_sched_free_requests(struct request_queue *q)
608 : {
609 0 : struct blk_mq_hw_ctx *hctx;
610 0 : int i;
611 :
612 0 : queue_for_each_hw_ctx(q, hctx, i) {
613 0 : if (hctx->sched_tags)
614 0 : blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
615 : }
616 0 : }
617 :
618 0 : void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
619 : {
620 0 : struct blk_mq_hw_ctx *hctx;
621 0 : unsigned int i;
622 :
623 0 : queue_for_each_hw_ctx(q, hctx, i) {
624 0 : blk_mq_debugfs_unregister_sched_hctx(hctx);
625 0 : if (e->type->ops.exit_hctx && hctx->sched_data) {
626 0 : e->type->ops.exit_hctx(hctx, i);
627 0 : hctx->sched_data = NULL;
628 : }
629 : }
630 0 : blk_mq_debugfs_unregister_sched(q);
631 0 : if (e->type->ops.exit_sched)
632 0 : e->type->ops.exit_sched(e);
633 0 : blk_mq_sched_tags_teardown(q);
634 0 : q->elevator = NULL;
635 0 : }
|