Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions to sequence PREFLUSH and FUA writes.
4 : *
5 : * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6 : * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
7 : *
8 : * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9 : * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 : * properties and hardware capability.
11 : *
12 : * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 : * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14 : * that the device cache should be flushed before the data is executed, and
15 : * REQ_FUA means that the data must be on non-volatile media on request
16 : * completion.
17 : *
18 : * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 : * difference. The requests are either completed immediately if there's no data
20 : * or executed as normal requests otherwise.
21 : *
22 : * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23 : * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24 : *
25 : * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 : * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27 : *
28 : * The actual execution of flush is double buffered. Whenever a request
29 : * needs to execute PRE or POSTFLUSH, it queues at
30 : * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31 : * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32 : * completes, all the requests which were pending are proceeded to the next
33 : * step. This allows arbitrary merging of different types of PREFLUSH/FUA
34 : * requests.
35 : *
36 : * Currently, the following conditions are used to determine when to issue
37 : * flush.
38 : *
39 : * C1. At any given time, only one flush shall be in progress. This makes
40 : * double buffering sufficient.
41 : *
42 : * C2. Flush is deferred if any request is executing DATA of its sequence.
43 : * This avoids issuing separate POSTFLUSHes for requests which shared
44 : * PREFLUSH.
45 : *
46 : * C3. The second condition is ignored if there is a request which has
47 : * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48 : * starvation in the unlikely case where there are continuous stream of
49 : * FUA (without PREFLUSH) requests.
50 : *
51 : * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52 : * is beneficial.
53 : *
54 : * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55 : * Once while executing DATA and again after the whole sequence is
56 : * complete. The first completion updates the contained bio but doesn't
57 : * finish it so that the bio submitter is notified only after the whole
58 : * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
59 : * req_bio_endio().
60 : *
61 : * The above peculiarity requires that each PREFLUSH/FUA request has only one
62 : * bio attached to it, which is guaranteed as they aren't allowed to be
63 : * merged in the usual way.
64 : */
65 :
66 : #include <linux/kernel.h>
67 : #include <linux/module.h>
68 : #include <linux/bio.h>
69 : #include <linux/blkdev.h>
70 : #include <linux/gfp.h>
71 : #include <linux/blk-mq.h>
72 :
73 : #include "blk.h"
74 : #include "blk-mq.h"
75 : #include "blk-mq-tag.h"
76 : #include "blk-mq-sched.h"
77 :
78 : /* PREFLUSH/FUA sequences */
79 : enum {
80 : REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
81 : REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
82 : REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
83 : REQ_FSEQ_DONE = (1 << 3),
84 :
85 : REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86 : REQ_FSEQ_POSTFLUSH,
87 :
88 : /*
89 : * If flush has been pending longer than the following timeout,
90 : * it's issued even if flush_data requests are still in flight.
91 : */
92 : FLUSH_PENDING_TIMEOUT = 5 * HZ,
93 : };
94 :
95 : static void blk_kick_flush(struct request_queue *q,
96 : struct blk_flush_queue *fq, unsigned int flags);
97 :
98 132 : static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
99 : {
100 132 : unsigned int policy = 0;
101 :
102 132 : if (blk_rq_sectors(rq))
103 60 : policy |= REQ_FSEQ_DATA;
104 :
105 132 : if (fflags & (1UL << QUEUE_FLAG_WC)) {
106 132 : if (rq->cmd_flags & REQ_PREFLUSH)
107 131 : policy |= REQ_FSEQ_PREFLUSH;
108 132 : if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109 : (rq->cmd_flags & REQ_FUA))
110 60 : policy |= REQ_FSEQ_POSTFLUSH;
111 : }
112 132 : return policy;
113 : }
114 :
115 574 : static unsigned int blk_flush_cur_seq(struct request *rq)
116 : {
117 574 : return 1 << ffz(rq->flush.seq);
118 : }
119 :
120 132 : static void blk_flush_restore_request(struct request *rq)
121 : {
122 : /*
123 : * After flush data completion, @rq->bio is %NULL but we need to
124 : * complete the bio again. @rq->biotail is guaranteed to equal the
125 : * original @rq->bio. Restore it.
126 : */
127 132 : rq->bio = rq->biotail;
128 :
129 : /* make @rq a normal request */
130 132 : rq->rq_flags &= ~RQF_FLUSH_SEQ;
131 132 : rq->end_io = rq->flush.saved_end_io;
132 : }
133 :
134 251 : static void blk_flush_queue_rq(struct request *rq, bool add_front)
135 : {
136 251 : blk_mq_add_to_requeue_list(rq, add_front, true);
137 251 : }
138 :
139 191 : static void blk_account_io_flush(struct request *rq)
140 : {
141 191 : struct block_device *part = rq->rq_disk->part0;
142 :
143 191 : part_stat_lock();
144 191 : part_stat_inc(part, ios[STAT_FLUSH]);
145 191 : part_stat_add(part, nsecs[STAT_FLUSH],
146 : ktime_get_ns() - rq->start_time_ns);
147 191 : part_stat_unlock();
148 191 : }
149 :
150 : /**
151 : * blk_flush_complete_seq - complete flush sequence
152 : * @rq: PREFLUSH/FUA request being sequenced
153 : * @fq: flush queue
154 : * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
155 : * @error: whether an error occurred
156 : *
157 : * @rq just completed @seq part of its flush sequence, record the
158 : * completion and trigger the next step.
159 : *
160 : * CONTEXT:
161 : * spin_lock_irq(fq->mq_flush_lock)
162 : */
163 383 : static void blk_flush_complete_seq(struct request *rq,
164 : struct blk_flush_queue *fq,
165 : unsigned int seq, blk_status_t error)
166 : {
167 383 : struct request_queue *q = rq->q;
168 383 : struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
169 383 : unsigned int cmd_flags;
170 :
171 383 : BUG_ON(rq->flush.seq & seq);
172 383 : rq->flush.seq |= seq;
173 383 : cmd_flags = rq->cmd_flags;
174 :
175 383 : if (likely(!error))
176 383 : seq = blk_flush_cur_seq(rq);
177 : else
178 : seq = REQ_FSEQ_DONE;
179 :
180 383 : switch (seq) {
181 : case REQ_FSEQ_PREFLUSH:
182 : case REQ_FSEQ_POSTFLUSH:
183 : /* queue for flush */
184 191 : if (list_empty(pending))
185 191 : fq->flush_pending_since = jiffies;
186 191 : list_move_tail(&rq->flush.list, pending);
187 : break;
188 :
189 60 : case REQ_FSEQ_DATA:
190 60 : list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
191 60 : blk_flush_queue_rq(rq, true);
192 : break;
193 :
194 132 : case REQ_FSEQ_DONE:
195 : /*
196 : * @rq was previously adjusted by blk_insert_flush() for
197 : * flush sequencing and may already have gone through the
198 : * flush data request completion path. Restore @rq for
199 : * normal completion and end it.
200 : */
201 132 : BUG_ON(!list_empty(&rq->queuelist));
202 132 : list_del_init(&rq->flush.list);
203 132 : blk_flush_restore_request(rq);
204 132 : blk_mq_end_request(rq, error);
205 132 : break;
206 :
207 0 : default:
208 0 : BUG();
209 : }
210 :
211 383 : blk_kick_flush(q, fq, cmd_flags);
212 383 : }
213 :
214 191 : static void flush_end_io(struct request *flush_rq, blk_status_t error)
215 : {
216 191 : struct request_queue *q = flush_rq->q;
217 191 : struct list_head *running;
218 191 : struct request *rq, *n;
219 191 : unsigned long flags = 0;
220 191 : struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
221 :
222 191 : blk_account_io_flush(flush_rq);
223 :
224 : /* release the tag's ownership to the req cloned from */
225 191 : spin_lock_irqsave(&fq->mq_flush_lock, flags);
226 :
227 191 : if (!refcount_dec_and_test(&flush_rq->ref)) {
228 0 : fq->rq_status = error;
229 0 : spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
230 0 : return;
231 : }
232 :
233 : /*
234 : * Flush request has to be marked as IDLE when it is really ended
235 : * because its .end_io() is called from timeout code path too for
236 : * avoiding use-after-free.
237 : */
238 191 : WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
239 191 : if (fq->rq_status != BLK_STS_OK)
240 0 : error = fq->rq_status;
241 :
242 191 : if (!q->elevator) {
243 191 : flush_rq->tag = BLK_MQ_NO_TAG;
244 : } else {
245 0 : blk_mq_put_driver_tag(flush_rq);
246 0 : flush_rq->internal_tag = BLK_MQ_NO_TAG;
247 : }
248 :
249 191 : running = &fq->flush_queue[fq->flush_running_idx];
250 191 : BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
251 :
252 : /* account completion of the flush request */
253 191 : fq->flush_running_idx ^= 1;
254 :
255 : /* and push the waiting requests to the next stage */
256 382 : list_for_each_entry_safe(rq, n, running, flush.list) {
257 191 : unsigned int seq = blk_flush_cur_seq(rq);
258 :
259 191 : BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
260 191 : blk_flush_complete_seq(rq, fq, seq, error);
261 : }
262 :
263 191 : spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
264 : }
265 :
266 : /**
267 : * blk_kick_flush - consider issuing flush request
268 : * @q: request_queue being kicked
269 : * @fq: flush queue
270 : * @flags: cmd_flags of the original request
271 : *
272 : * Flush related states of @q have changed, consider issuing flush request.
273 : * Please read the comment at the top of this file for more info.
274 : *
275 : * CONTEXT:
276 : * spin_lock_irq(fq->mq_flush_lock)
277 : *
278 : */
279 383 : static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
280 : unsigned int flags)
281 : {
282 383 : struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
283 383 : struct request *first_rq =
284 383 : list_first_entry(pending, struct request, flush.list);
285 383 : struct request *flush_rq = fq->flush_rq;
286 :
287 : /* C1 described at the top of this file */
288 383 : if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
289 : return;
290 :
291 : /* C2 and C3 */
292 191 : if (!list_empty(&fq->flush_data_in_flight) &&
293 0 : time_before(jiffies,
294 : fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
295 : return;
296 :
297 : /*
298 : * Issue flush and toggle pending_idx. This makes pending_idx
299 : * different from running_idx, which means flush is in flight.
300 : */
301 191 : fq->flush_pending_idx ^= 1;
302 :
303 191 : blk_rq_init(q, flush_rq);
304 :
305 : /*
306 : * In case of none scheduler, borrow tag from the first request
307 : * since they can't be in flight at the same time. And acquire
308 : * the tag's ownership for flush req.
309 : *
310 : * In case of IO scheduler, flush rq need to borrow scheduler tag
311 : * just for cheating put/get driver tag.
312 : */
313 191 : flush_rq->mq_ctx = first_rq->mq_ctx;
314 191 : flush_rq->mq_hctx = first_rq->mq_hctx;
315 :
316 191 : if (!q->elevator) {
317 191 : flush_rq->tag = first_rq->tag;
318 :
319 : /*
320 : * We borrow data request's driver tag, so have to mark
321 : * this flush request as INFLIGHT for avoiding double
322 : * account of this driver tag
323 : */
324 191 : flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
325 : } else
326 0 : flush_rq->internal_tag = first_rq->internal_tag;
327 :
328 191 : flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
329 191 : flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
330 191 : flush_rq->rq_flags |= RQF_FLUSH_SEQ;
331 191 : flush_rq->rq_disk = first_rq->rq_disk;
332 191 : flush_rq->end_io = flush_end_io;
333 :
334 191 : blk_flush_queue_rq(flush_rq, false);
335 : }
336 :
337 60 : static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
338 : {
339 60 : struct request_queue *q = rq->q;
340 60 : struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
341 60 : struct blk_mq_ctx *ctx = rq->mq_ctx;
342 60 : unsigned long flags;
343 60 : struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
344 :
345 60 : if (q->elevator) {
346 0 : WARN_ON(rq->tag < 0);
347 0 : blk_mq_put_driver_tag(rq);
348 : }
349 :
350 : /*
351 : * After populating an empty queue, kick it to avoid stall. Read
352 : * the comment in flush_end_io().
353 : */
354 60 : spin_lock_irqsave(&fq->mq_flush_lock, flags);
355 60 : blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
356 60 : spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
357 :
358 60 : blk_mq_sched_restart(hctx);
359 60 : }
360 :
361 : /**
362 : * blk_insert_flush - insert a new PREFLUSH/FUA request
363 : * @rq: request to insert
364 : *
365 : * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
366 : * or __blk_mq_run_hw_queue() to dispatch request.
367 : * @rq is being submitted. Analyze what needs to be done and put it on the
368 : * right queue.
369 : */
370 132 : void blk_insert_flush(struct request *rq)
371 : {
372 132 : struct request_queue *q = rq->q;
373 132 : unsigned long fflags = q->queue_flags; /* may change, cache */
374 132 : unsigned int policy = blk_flush_policy(fflags, rq);
375 132 : struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
376 :
377 : /*
378 : * @policy now records what operations need to be done. Adjust
379 : * REQ_PREFLUSH and FUA for the driver.
380 : */
381 132 : rq->cmd_flags &= ~REQ_PREFLUSH;
382 132 : if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
383 132 : rq->cmd_flags &= ~REQ_FUA;
384 :
385 : /*
386 : * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
387 : * of those flags, we have to set REQ_SYNC to avoid skewing
388 : * the request accounting.
389 : */
390 132 : rq->cmd_flags |= REQ_SYNC;
391 :
392 : /*
393 : * An empty flush handed down from a stacking driver may
394 : * translate into nothing if the underlying device does not
395 : * advertise a write-back cache. In this case, simply
396 : * complete the request.
397 : */
398 132 : if (!policy) {
399 0 : blk_mq_end_request(rq, 0);
400 0 : return;
401 : }
402 :
403 132 : BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
404 :
405 : /*
406 : * If there's data but flush is not necessary, the request can be
407 : * processed directly without going through flush machinery. Queue
408 : * for normal execution.
409 : */
410 132 : if ((policy & REQ_FSEQ_DATA) &&
411 : !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
412 0 : blk_mq_request_bypass_insert(rq, false, false);
413 0 : return;
414 : }
415 :
416 : /*
417 : * @rq should go through flush machinery. Mark it part of flush
418 : * sequence and submit for further processing.
419 : */
420 132 : memset(&rq->flush, 0, sizeof(rq->flush));
421 132 : INIT_LIST_HEAD(&rq->flush.list);
422 132 : rq->rq_flags |= RQF_FLUSH_SEQ;
423 132 : rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
424 :
425 132 : rq->end_io = mq_flush_data_end_io;
426 :
427 132 : spin_lock_irq(&fq->mq_flush_lock);
428 132 : blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
429 132 : spin_unlock_irq(&fq->mq_flush_lock);
430 : }
431 :
432 : /**
433 : * blkdev_issue_flush - queue a flush
434 : * @bdev: blockdev to issue flush for
435 : *
436 : * Description:
437 : * Issue a flush for the block device in question.
438 : */
439 72 : int blkdev_issue_flush(struct block_device *bdev)
440 : {
441 72 : struct bio bio;
442 :
443 72 : bio_init(&bio, NULL, 0);
444 72 : bio_set_dev(&bio, bdev);
445 72 : bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
446 72 : return submit_bio_wait(&bio);
447 : }
448 : EXPORT_SYMBOL(blkdev_issue_flush);
449 :
450 9 : struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
451 : gfp_t flags)
452 : {
453 9 : struct blk_flush_queue *fq;
454 9 : int rq_sz = sizeof(struct request);
455 :
456 9 : fq = kzalloc_node(sizeof(*fq), flags, node);
457 9 : if (!fq)
458 0 : goto fail;
459 :
460 9 : spin_lock_init(&fq->mq_flush_lock);
461 :
462 9 : rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
463 9 : fq->flush_rq = kzalloc_node(rq_sz, flags, node);
464 9 : if (!fq->flush_rq)
465 0 : goto fail_rq;
466 :
467 9 : INIT_LIST_HEAD(&fq->flush_queue[0]);
468 9 : INIT_LIST_HEAD(&fq->flush_queue[1]);
469 9 : INIT_LIST_HEAD(&fq->flush_data_in_flight);
470 :
471 9 : return fq;
472 :
473 0 : fail_rq:
474 0 : kfree(fq);
475 : fail:
476 : return NULL;
477 : }
478 :
479 0 : void blk_free_flush_queue(struct blk_flush_queue *fq)
480 : {
481 : /* bio based request queue hasn't flush queue */
482 0 : if (!fq)
483 : return;
484 :
485 0 : kfree(fq->flush_rq);
486 0 : kfree(fq);
487 : }
488 :
489 : /*
490 : * Allow driver to set its own lock class to fq->mq_flush_lock for
491 : * avoiding lockdep complaint.
492 : *
493 : * flush_end_io() may be called recursively from some driver, such as
494 : * nvme-loop, so lockdep may complain 'possible recursive locking' because
495 : * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
496 : * key. We need to assign different lock class for these driver's
497 : * fq->mq_flush_lock for avoiding the lockdep warning.
498 : *
499 : * Use dynamically allocated lock class key for each 'blk_flush_queue'
500 : * instance is over-kill, and more worse it introduces horrible boot delay
501 : * issue because synchronize_rcu() is implied in lockdep_unregister_key which
502 : * is called for each hctx release. SCSI probing may synchronously create and
503 : * destroy lots of MQ request_queues for non-existent devices, and some robot
504 : * test kernel always enable lockdep option. It is observed that more than half
505 : * an hour is taken during SCSI MQ probe with per-fq lock class.
506 : */
507 0 : void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
508 : struct lock_class_key *key)
509 : {
510 0 : lockdep_set_class(&hctx->fq->mq_flush_lock, key);
511 0 : }
512 : EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
|