Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Functions related to setting various queue properties from drivers
4 : */
5 : #include <linux/kernel.h>
6 : #include <linux/module.h>
7 : #include <linux/init.h>
8 : #include <linux/bio.h>
9 : #include <linux/blkdev.h>
10 : #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
11 : #include <linux/gcd.h>
12 : #include <linux/lcm.h>
13 : #include <linux/jiffies.h>
14 : #include <linux/gfp.h>
15 : #include <linux/dma-mapping.h>
16 :
17 : #include "blk.h"
18 : #include "blk-wbt.h"
19 :
20 : unsigned long blk_max_low_pfn;
21 : EXPORT_SYMBOL(blk_max_low_pfn);
22 :
23 : unsigned long blk_max_pfn;
24 :
25 9 : void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
26 : {
27 9 : q->rq_timeout = timeout;
28 9 : }
29 : EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
30 :
31 : /**
32 : * blk_set_default_limits - reset limits to default values
33 : * @lim: the queue_limits structure to reset
34 : *
35 : * Description:
36 : * Returns a queue_limit struct to its default state.
37 : */
38 9 : void blk_set_default_limits(struct queue_limits *lim)
39 : {
40 9 : lim->max_segments = BLK_MAX_SEGMENTS;
41 9 : lim->max_discard_segments = 1;
42 9 : lim->max_integrity_segments = 0;
43 9 : lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44 9 : lim->virt_boundary_mask = 0;
45 9 : lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
46 9 : lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
47 9 : lim->max_dev_sectors = 0;
48 9 : lim->chunk_sectors = 0;
49 9 : lim->max_write_same_sectors = 0;
50 9 : lim->max_write_zeroes_sectors = 0;
51 9 : lim->max_zone_append_sectors = 0;
52 9 : lim->max_discard_sectors = 0;
53 9 : lim->max_hw_discard_sectors = 0;
54 9 : lim->discard_granularity = 0;
55 9 : lim->discard_alignment = 0;
56 9 : lim->discard_misaligned = 0;
57 9 : lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
58 9 : lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
59 9 : lim->alignment_offset = 0;
60 9 : lim->io_opt = 0;
61 9 : lim->misaligned = 0;
62 9 : lim->zoned = BLK_ZONED_NONE;
63 9 : lim->zone_write_granularity = 0;
64 9 : }
65 : EXPORT_SYMBOL(blk_set_default_limits);
66 :
67 : /**
68 : * blk_set_stacking_limits - set default limits for stacking devices
69 : * @lim: the queue_limits structure to reset
70 : *
71 : * Description:
72 : * Returns a queue_limit struct to its default state. Should be used
73 : * by stacking drivers like DM that have no internal limits.
74 : */
75 0 : void blk_set_stacking_limits(struct queue_limits *lim)
76 : {
77 0 : blk_set_default_limits(lim);
78 :
79 : /* Inherit limits from component devices */
80 0 : lim->max_segments = USHRT_MAX;
81 0 : lim->max_discard_segments = USHRT_MAX;
82 0 : lim->max_hw_sectors = UINT_MAX;
83 0 : lim->max_segment_size = UINT_MAX;
84 0 : lim->max_sectors = UINT_MAX;
85 0 : lim->max_dev_sectors = UINT_MAX;
86 0 : lim->max_write_same_sectors = UINT_MAX;
87 0 : lim->max_write_zeroes_sectors = UINT_MAX;
88 0 : lim->max_zone_append_sectors = UINT_MAX;
89 0 : }
90 : EXPORT_SYMBOL(blk_set_stacking_limits);
91 :
92 : /**
93 : * blk_queue_bounce_limit - set bounce buffer limit for queue
94 : * @q: the request queue for the device
95 : * @max_addr: the maximum address the device can handle
96 : *
97 : * Description:
98 : * Different hardware can have different requirements as to what pages
99 : * it can do I/O directly to. A low level driver can call
100 : * blk_queue_bounce_limit to have lower memory pages allocated as bounce
101 : * buffers for doing I/O to pages residing above @max_addr.
102 : **/
103 0 : void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
104 : {
105 0 : unsigned long b_pfn = max_addr >> PAGE_SHIFT;
106 0 : int dma = 0;
107 :
108 0 : q->bounce_gfp = GFP_NOIO;
109 : #if BITS_PER_LONG == 64
110 : /*
111 : * Assume anything <= 4GB can be handled by IOMMU. Actually
112 : * some IOMMUs can handle everything, but I don't know of a
113 : * way to test this here.
114 : */
115 0 : if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
116 0 : dma = 1;
117 0 : q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
118 : #else
119 : if (b_pfn < blk_max_low_pfn)
120 : dma = 1;
121 : q->limits.bounce_pfn = b_pfn;
122 : #endif
123 0 : if (dma) {
124 0 : init_emergency_isa_pool();
125 0 : q->bounce_gfp = GFP_NOIO | GFP_DMA;
126 0 : q->limits.bounce_pfn = b_pfn;
127 : }
128 0 : }
129 : EXPORT_SYMBOL(blk_queue_bounce_limit);
130 :
131 : /**
132 : * blk_queue_max_hw_sectors - set max sectors for a request for this queue
133 : * @q: the request queue for the device
134 : * @max_hw_sectors: max hardware sectors in the usual 512b unit
135 : *
136 : * Description:
137 : * Enables a low level driver to set a hard upper limit,
138 : * max_hw_sectors, on the size of requests. max_hw_sectors is set by
139 : * the device driver based upon the capabilities of the I/O
140 : * controller.
141 : *
142 : * max_dev_sectors is a hard limit imposed by the storage device for
143 : * READ/WRITE requests. It is set by the disk driver.
144 : *
145 : * max_sectors is a soft limit imposed by the block layer for
146 : * filesystem type requests. This value can be overridden on a
147 : * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
148 : * The soft limit can not exceed max_hw_sectors.
149 : **/
150 9 : void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
151 : {
152 9 : struct queue_limits *limits = &q->limits;
153 9 : unsigned int max_sectors;
154 :
155 9 : if ((max_hw_sectors << 9) < PAGE_SIZE) {
156 0 : max_hw_sectors = 1 << (PAGE_SHIFT - 9);
157 0 : printk(KERN_INFO "%s: set to minimum %d\n",
158 : __func__, max_hw_sectors);
159 : }
160 :
161 9 : max_hw_sectors = round_down(max_hw_sectors,
162 : limits->logical_block_size >> SECTOR_SHIFT);
163 9 : limits->max_hw_sectors = max_hw_sectors;
164 :
165 9 : max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
166 9 : max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
167 9 : max_sectors = round_down(max_sectors,
168 : limits->logical_block_size >> SECTOR_SHIFT);
169 9 : limits->max_sectors = max_sectors;
170 :
171 9 : q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
172 9 : }
173 : EXPORT_SYMBOL(blk_queue_max_hw_sectors);
174 :
175 : /**
176 : * blk_queue_chunk_sectors - set size of the chunk for this queue
177 : * @q: the request queue for the device
178 : * @chunk_sectors: chunk sectors in the usual 512b unit
179 : *
180 : * Description:
181 : * If a driver doesn't want IOs to cross a given chunk size, it can set
182 : * this limit and prevent merging across chunks. Note that the block layer
183 : * must accept a page worth of data at any offset. So if the crossing of
184 : * chunks is a hard limitation in the driver, it must still be prepared
185 : * to split single page bios.
186 : **/
187 0 : void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
188 : {
189 0 : q->limits.chunk_sectors = chunk_sectors;
190 0 : }
191 : EXPORT_SYMBOL(blk_queue_chunk_sectors);
192 :
193 : /**
194 : * blk_queue_max_discard_sectors - set max sectors for a single discard
195 : * @q: the request queue for the device
196 : * @max_discard_sectors: maximum number of sectors to discard
197 : **/
198 1 : void blk_queue_max_discard_sectors(struct request_queue *q,
199 : unsigned int max_discard_sectors)
200 : {
201 1 : q->limits.max_hw_discard_sectors = max_discard_sectors;
202 1 : q->limits.max_discard_sectors = max_discard_sectors;
203 1 : }
204 : EXPORT_SYMBOL(blk_queue_max_discard_sectors);
205 :
206 : /**
207 : * blk_queue_max_write_same_sectors - set max sectors for a single write same
208 : * @q: the request queue for the device
209 : * @max_write_same_sectors: maximum number of sectors to write per command
210 : **/
211 0 : void blk_queue_max_write_same_sectors(struct request_queue *q,
212 : unsigned int max_write_same_sectors)
213 : {
214 0 : q->limits.max_write_same_sectors = max_write_same_sectors;
215 0 : }
216 : EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
217 :
218 : /**
219 : * blk_queue_max_write_zeroes_sectors - set max sectors for a single
220 : * write zeroes
221 : * @q: the request queue for the device
222 : * @max_write_zeroes_sectors: maximum number of sectors to write per command
223 : **/
224 1 : void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
225 : unsigned int max_write_zeroes_sectors)
226 : {
227 1 : q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
228 1 : }
229 : EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
230 :
231 : /**
232 : * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
233 : * @q: the request queue for the device
234 : * @max_zone_append_sectors: maximum number of sectors to write per command
235 : **/
236 0 : void blk_queue_max_zone_append_sectors(struct request_queue *q,
237 : unsigned int max_zone_append_sectors)
238 : {
239 0 : unsigned int max_sectors;
240 :
241 0 : if (WARN_ON(!blk_queue_is_zoned(q)))
242 0 : return;
243 :
244 : max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
245 : max_sectors = min(q->limits.chunk_sectors, max_sectors);
246 :
247 : /*
248 : * Signal eventual driver bugs resulting in the max_zone_append sectors limit
249 : * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
250 : * or the max_hw_sectors limit not set.
251 : */
252 : WARN_ON(!max_sectors);
253 :
254 : q->limits.max_zone_append_sectors = max_sectors;
255 : }
256 : EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
257 :
258 : /**
259 : * blk_queue_max_segments - set max hw segments for a request for this queue
260 : * @q: the request queue for the device
261 : * @max_segments: max number of segments
262 : *
263 : * Description:
264 : * Enables a low level driver to set an upper limit on the number of
265 : * hw data segments in a request.
266 : **/
267 1 : void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
268 : {
269 1 : if (!max_segments) {
270 0 : max_segments = 1;
271 0 : printk(KERN_INFO "%s: set to minimum %d\n",
272 : __func__, max_segments);
273 : }
274 :
275 1 : q->limits.max_segments = max_segments;
276 1 : }
277 : EXPORT_SYMBOL(blk_queue_max_segments);
278 :
279 : /**
280 : * blk_queue_max_discard_segments - set max segments for discard requests
281 : * @q: the request queue for the device
282 : * @max_segments: max number of segments
283 : *
284 : * Description:
285 : * Enables a low level driver to set an upper limit on the number of
286 : * segments in a discard request.
287 : **/
288 1 : void blk_queue_max_discard_segments(struct request_queue *q,
289 : unsigned short max_segments)
290 : {
291 1 : q->limits.max_discard_segments = max_segments;
292 1 : }
293 : EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
294 :
295 : /**
296 : * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
297 : * @q: the request queue for the device
298 : * @max_size: max size of segment in bytes
299 : *
300 : * Description:
301 : * Enables a low level driver to set an upper limit on the size of a
302 : * coalesced segment
303 : **/
304 1 : void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
305 : {
306 1 : if (max_size < PAGE_SIZE) {
307 0 : max_size = PAGE_SIZE;
308 0 : printk(KERN_INFO "%s: set to minimum %d\n",
309 : __func__, max_size);
310 : }
311 :
312 : /* see blk_queue_virt_boundary() for the explanation */
313 1 : WARN_ON_ONCE(q->limits.virt_boundary_mask);
314 :
315 1 : q->limits.max_segment_size = max_size;
316 1 : }
317 : EXPORT_SYMBOL(blk_queue_max_segment_size);
318 :
319 : /**
320 : * blk_queue_logical_block_size - set logical block size for the queue
321 : * @q: the request queue for the device
322 : * @size: the logical block size, in bytes
323 : *
324 : * Description:
325 : * This should be set to the lowest possible block size that the
326 : * storage device can address. The default of 512 covers most
327 : * hardware.
328 : **/
329 1 : void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
330 : {
331 1 : struct queue_limits *limits = &q->limits;
332 :
333 1 : limits->logical_block_size = size;
334 :
335 1 : if (limits->physical_block_size < size)
336 0 : limits->physical_block_size = size;
337 :
338 1 : if (limits->io_min < limits->physical_block_size)
339 0 : limits->io_min = limits->physical_block_size;
340 :
341 1 : limits->max_hw_sectors =
342 1 : round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
343 1 : limits->max_sectors =
344 1 : round_down(limits->max_sectors, size >> SECTOR_SHIFT);
345 1 : }
346 : EXPORT_SYMBOL(blk_queue_logical_block_size);
347 :
348 : /**
349 : * blk_queue_physical_block_size - set physical block size for the queue
350 : * @q: the request queue for the device
351 : * @size: the physical block size, in bytes
352 : *
353 : * Description:
354 : * This should be set to the lowest possible sector size that the
355 : * hardware can operate on without reverting to read-modify-write
356 : * operations.
357 : */
358 0 : void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
359 : {
360 0 : q->limits.physical_block_size = size;
361 :
362 0 : if (q->limits.physical_block_size < q->limits.logical_block_size)
363 0 : q->limits.physical_block_size = q->limits.logical_block_size;
364 :
365 0 : if (q->limits.io_min < q->limits.physical_block_size)
366 0 : q->limits.io_min = q->limits.physical_block_size;
367 0 : }
368 : EXPORT_SYMBOL(blk_queue_physical_block_size);
369 :
370 : /**
371 : * blk_queue_zone_write_granularity - set zone write granularity for the queue
372 : * @q: the request queue for the zoned device
373 : * @size: the zone write granularity size, in bytes
374 : *
375 : * Description:
376 : * This should be set to the lowest possible size allowing to write in
377 : * sequential zones of a zoned block device.
378 : */
379 0 : void blk_queue_zone_write_granularity(struct request_queue *q,
380 : unsigned int size)
381 : {
382 0 : if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
383 0 : return;
384 :
385 : q->limits.zone_write_granularity = size;
386 :
387 : if (q->limits.zone_write_granularity < q->limits.logical_block_size)
388 : q->limits.zone_write_granularity = q->limits.logical_block_size;
389 : }
390 : EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
391 :
392 : /**
393 : * blk_queue_alignment_offset - set physical block alignment offset
394 : * @q: the request queue for the device
395 : * @offset: alignment offset in bytes
396 : *
397 : * Description:
398 : * Some devices are naturally misaligned to compensate for things like
399 : * the legacy DOS partition table 63-sector offset. Low-level drivers
400 : * should call this function for devices whose first sector is not
401 : * naturally aligned.
402 : */
403 0 : void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
404 : {
405 0 : q->limits.alignment_offset =
406 0 : offset & (q->limits.physical_block_size - 1);
407 0 : q->limits.misaligned = 0;
408 0 : }
409 : EXPORT_SYMBOL(blk_queue_alignment_offset);
410 :
411 9 : void blk_queue_update_readahead(struct request_queue *q)
412 : {
413 : /*
414 : * For read-ahead of large files to be effective, we need to read ahead
415 : * at least twice the optimal I/O size.
416 : */
417 9 : q->backing_dev_info->ra_pages =
418 9 : max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
419 9 : q->backing_dev_info->io_pages =
420 9 : queue_max_sectors(q) >> (PAGE_SHIFT - 9);
421 9 : }
422 : EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
423 :
424 : /**
425 : * blk_limits_io_min - set minimum request size for a device
426 : * @limits: the queue limits
427 : * @min: smallest I/O size in bytes
428 : *
429 : * Description:
430 : * Some devices have an internal block size bigger than the reported
431 : * hardware sector size. This function can be used to signal the
432 : * smallest I/O the device can perform without incurring a performance
433 : * penalty.
434 : */
435 0 : void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
436 : {
437 0 : limits->io_min = min;
438 :
439 0 : if (limits->io_min < limits->logical_block_size)
440 0 : limits->io_min = limits->logical_block_size;
441 :
442 0 : if (limits->io_min < limits->physical_block_size)
443 0 : limits->io_min = limits->physical_block_size;
444 0 : }
445 : EXPORT_SYMBOL(blk_limits_io_min);
446 :
447 : /**
448 : * blk_queue_io_min - set minimum request size for the queue
449 : * @q: the request queue for the device
450 : * @min: smallest I/O size in bytes
451 : *
452 : * Description:
453 : * Storage devices may report a granularity or preferred minimum I/O
454 : * size which is the smallest request the device can perform without
455 : * incurring a performance penalty. For disk drives this is often the
456 : * physical block size. For RAID arrays it is often the stripe chunk
457 : * size. A properly aligned multiple of minimum_io_size is the
458 : * preferred request size for workloads where a high number of I/O
459 : * operations is desired.
460 : */
461 0 : void blk_queue_io_min(struct request_queue *q, unsigned int min)
462 : {
463 0 : blk_limits_io_min(&q->limits, min);
464 0 : }
465 : EXPORT_SYMBOL(blk_queue_io_min);
466 :
467 : /**
468 : * blk_limits_io_opt - set optimal request size for a device
469 : * @limits: the queue limits
470 : * @opt: smallest I/O size in bytes
471 : *
472 : * Description:
473 : * Storage devices may report an optimal I/O size, which is the
474 : * device's preferred unit for sustained I/O. This is rarely reported
475 : * for disk drives. For RAID arrays it is usually the stripe width or
476 : * the internal track size. A properly aligned multiple of
477 : * optimal_io_size is the preferred request size for workloads where
478 : * sustained throughput is desired.
479 : */
480 0 : void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
481 : {
482 0 : limits->io_opt = opt;
483 0 : }
484 : EXPORT_SYMBOL(blk_limits_io_opt);
485 :
486 : /**
487 : * blk_queue_io_opt - set optimal request size for the queue
488 : * @q: the request queue for the device
489 : * @opt: optimal request size in bytes
490 : *
491 : * Description:
492 : * Storage devices may report an optimal I/O size, which is the
493 : * device's preferred unit for sustained I/O. This is rarely reported
494 : * for disk drives. For RAID arrays it is usually the stripe width or
495 : * the internal track size. A properly aligned multiple of
496 : * optimal_io_size is the preferred request size for workloads where
497 : * sustained throughput is desired.
498 : */
499 0 : void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
500 : {
501 0 : blk_limits_io_opt(&q->limits, opt);
502 0 : q->backing_dev_info->ra_pages =
503 0 : max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
504 0 : }
505 : EXPORT_SYMBOL(blk_queue_io_opt);
506 :
507 0 : static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
508 : {
509 0 : sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
510 0 : if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
511 : sectors = PAGE_SIZE >> SECTOR_SHIFT;
512 0 : return sectors;
513 : }
514 :
515 : /**
516 : * blk_stack_limits - adjust queue_limits for stacked devices
517 : * @t: the stacking driver limits (top device)
518 : * @b: the underlying queue limits (bottom, component device)
519 : * @start: first data sector within component device
520 : *
521 : * Description:
522 : * This function is used by stacking drivers like MD and DM to ensure
523 : * that all component devices have compatible block sizes and
524 : * alignments. The stacking driver must provide a queue_limits
525 : * struct (top) and then iteratively call the stacking function for
526 : * all component (bottom) devices. The stacking function will
527 : * attempt to combine the values and ensure proper alignment.
528 : *
529 : * Returns 0 if the top and bottom queue_limits are compatible. The
530 : * top device's block sizes and alignment offsets may be adjusted to
531 : * ensure alignment with the bottom device. If no compatible sizes
532 : * and alignments exist, -1 is returned and the resulting top
533 : * queue_limits will have the misaligned flag set to indicate that
534 : * the alignment_offset is undefined.
535 : */
536 0 : int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
537 : sector_t start)
538 : {
539 0 : unsigned int top, bottom, alignment, ret = 0;
540 :
541 0 : t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
542 0 : t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
543 0 : t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
544 0 : t->max_write_same_sectors = min(t->max_write_same_sectors,
545 : b->max_write_same_sectors);
546 0 : t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
547 : b->max_write_zeroes_sectors);
548 0 : t->max_zone_append_sectors = min(t->max_zone_append_sectors,
549 : b->max_zone_append_sectors);
550 0 : t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
551 :
552 0 : t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
553 : b->seg_boundary_mask);
554 0 : t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
555 : b->virt_boundary_mask);
556 :
557 0 : t->max_segments = min_not_zero(t->max_segments, b->max_segments);
558 0 : t->max_discard_segments = min_not_zero(t->max_discard_segments,
559 : b->max_discard_segments);
560 0 : t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
561 : b->max_integrity_segments);
562 :
563 0 : t->max_segment_size = min_not_zero(t->max_segment_size,
564 : b->max_segment_size);
565 :
566 0 : t->misaligned |= b->misaligned;
567 :
568 0 : alignment = queue_limit_alignment_offset(b, start);
569 :
570 : /* Bottom device has different alignment. Check that it is
571 : * compatible with the current top alignment.
572 : */
573 0 : if (t->alignment_offset != alignment) {
574 :
575 0 : top = max(t->physical_block_size, t->io_min)
576 : + t->alignment_offset;
577 0 : bottom = max(b->physical_block_size, b->io_min) + alignment;
578 :
579 : /* Verify that top and bottom intervals line up */
580 0 : if (max(top, bottom) % min(top, bottom)) {
581 0 : t->misaligned = 1;
582 0 : ret = -1;
583 : }
584 : }
585 :
586 0 : t->logical_block_size = max(t->logical_block_size,
587 : b->logical_block_size);
588 :
589 0 : t->physical_block_size = max(t->physical_block_size,
590 : b->physical_block_size);
591 :
592 0 : t->io_min = max(t->io_min, b->io_min);
593 0 : t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
594 :
595 : /* Set non-power-of-2 compatible chunk_sectors boundary */
596 0 : if (b->chunk_sectors)
597 0 : t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
598 :
599 : /* Physical block size a multiple of the logical block size? */
600 0 : if (t->physical_block_size & (t->logical_block_size - 1)) {
601 0 : t->physical_block_size = t->logical_block_size;
602 0 : t->misaligned = 1;
603 0 : ret = -1;
604 : }
605 :
606 : /* Minimum I/O a multiple of the physical block size? */
607 0 : if (t->io_min & (t->physical_block_size - 1)) {
608 0 : t->io_min = t->physical_block_size;
609 0 : t->misaligned = 1;
610 0 : ret = -1;
611 : }
612 :
613 : /* Optimal I/O a multiple of the physical block size? */
614 0 : if (t->io_opt & (t->physical_block_size - 1)) {
615 0 : t->io_opt = 0;
616 0 : t->misaligned = 1;
617 0 : ret = -1;
618 : }
619 :
620 : /* chunk_sectors a multiple of the physical block size? */
621 0 : if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
622 0 : t->chunk_sectors = 0;
623 0 : t->misaligned = 1;
624 0 : ret = -1;
625 : }
626 :
627 0 : t->raid_partial_stripes_expensive =
628 0 : max(t->raid_partial_stripes_expensive,
629 : b->raid_partial_stripes_expensive);
630 :
631 : /* Find lowest common alignment_offset */
632 0 : t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
633 0 : % max(t->physical_block_size, t->io_min);
634 :
635 : /* Verify that new alignment_offset is on a logical block boundary */
636 0 : if (t->alignment_offset & (t->logical_block_size - 1)) {
637 0 : t->misaligned = 1;
638 0 : ret = -1;
639 : }
640 :
641 0 : t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
642 0 : t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
643 0 : t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
644 :
645 : /* Discard alignment and granularity */
646 0 : if (b->discard_granularity) {
647 0 : alignment = queue_limit_discard_alignment(b, start);
648 :
649 0 : if (t->discard_granularity != 0 &&
650 0 : t->discard_alignment != alignment) {
651 0 : top = t->discard_granularity + t->discard_alignment;
652 0 : bottom = b->discard_granularity + alignment;
653 :
654 : /* Verify that top and bottom intervals line up */
655 0 : if ((max(top, bottom) % min(top, bottom)) != 0)
656 0 : t->discard_misaligned = 1;
657 : }
658 :
659 0 : t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
660 : b->max_discard_sectors);
661 0 : t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
662 : b->max_hw_discard_sectors);
663 0 : t->discard_granularity = max(t->discard_granularity,
664 : b->discard_granularity);
665 0 : t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
666 0 : t->discard_granularity;
667 : }
668 :
669 0 : t->zone_write_granularity = max(t->zone_write_granularity,
670 : b->zone_write_granularity);
671 0 : t->zoned = max(t->zoned, b->zoned);
672 0 : return ret;
673 : }
674 : EXPORT_SYMBOL(blk_stack_limits);
675 :
676 : /**
677 : * disk_stack_limits - adjust queue limits for stacked drivers
678 : * @disk: MD/DM gendisk (top)
679 : * @bdev: the underlying block device (bottom)
680 : * @offset: offset to beginning of data within component device
681 : *
682 : * Description:
683 : * Merges the limits for a top level gendisk and a bottom level
684 : * block_device.
685 : */
686 0 : void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
687 : sector_t offset)
688 : {
689 0 : struct request_queue *t = disk->queue;
690 :
691 0 : if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
692 0 : get_start_sect(bdev) + (offset >> 9)) < 0) {
693 0 : char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
694 :
695 0 : disk_name(disk, 0, top);
696 0 : bdevname(bdev, bottom);
697 :
698 0 : printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
699 : top, bottom);
700 : }
701 :
702 0 : blk_queue_update_readahead(disk->queue);
703 0 : }
704 : EXPORT_SYMBOL(disk_stack_limits);
705 :
706 : /**
707 : * blk_queue_update_dma_pad - update pad mask
708 : * @q: the request queue for the device
709 : * @mask: pad mask
710 : *
711 : * Update dma pad mask.
712 : *
713 : * Appending pad buffer to a request modifies the last entry of a
714 : * scatter list such that it includes the pad buffer.
715 : **/
716 0 : void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
717 : {
718 0 : if (mask > q->dma_pad_mask)
719 0 : q->dma_pad_mask = mask;
720 0 : }
721 : EXPORT_SYMBOL(blk_queue_update_dma_pad);
722 :
723 : /**
724 : * blk_queue_segment_boundary - set boundary rules for segment merging
725 : * @q: the request queue for the device
726 : * @mask: the memory boundary mask
727 : **/
728 0 : void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
729 : {
730 0 : if (mask < PAGE_SIZE - 1) {
731 0 : mask = PAGE_SIZE - 1;
732 0 : printk(KERN_INFO "%s: set to minimum %lx\n",
733 : __func__, mask);
734 : }
735 :
736 0 : q->limits.seg_boundary_mask = mask;
737 0 : }
738 : EXPORT_SYMBOL(blk_queue_segment_boundary);
739 :
740 : /**
741 : * blk_queue_virt_boundary - set boundary rules for bio merging
742 : * @q: the request queue for the device
743 : * @mask: the memory boundary mask
744 : **/
745 0 : void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
746 : {
747 0 : q->limits.virt_boundary_mask = mask;
748 :
749 : /*
750 : * Devices that require a virtual boundary do not support scatter/gather
751 : * I/O natively, but instead require a descriptor list entry for each
752 : * page (which might not be idential to the Linux PAGE_SIZE). Because
753 : * of that they are not limited by our notion of "segment size".
754 : */
755 0 : if (mask)
756 0 : q->limits.max_segment_size = UINT_MAX;
757 0 : }
758 : EXPORT_SYMBOL(blk_queue_virt_boundary);
759 :
760 : /**
761 : * blk_queue_dma_alignment - set dma length and memory alignment
762 : * @q: the request queue for the device
763 : * @mask: alignment mask
764 : *
765 : * description:
766 : * set required memory and length alignment for direct dma transactions.
767 : * this is used when building direct io requests for the queue.
768 : *
769 : **/
770 9 : void blk_queue_dma_alignment(struct request_queue *q, int mask)
771 : {
772 9 : q->dma_alignment = mask;
773 9 : }
774 : EXPORT_SYMBOL(blk_queue_dma_alignment);
775 :
776 : /**
777 : * blk_queue_update_dma_alignment - update dma length and memory alignment
778 : * @q: the request queue for the device
779 : * @mask: alignment mask
780 : *
781 : * description:
782 : * update required memory and length alignment for direct dma transactions.
783 : * If the requested alignment is larger than the current alignment, then
784 : * the current queue alignment is updated to the new value, otherwise it
785 : * is left alone. The design of this is to allow multiple objects
786 : * (driver, device, transport etc) to set their respective
787 : * alignments without having them interfere.
788 : *
789 : **/
790 0 : void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
791 : {
792 0 : BUG_ON(mask > PAGE_SIZE);
793 :
794 0 : if (mask > q->dma_alignment)
795 0 : q->dma_alignment = mask;
796 0 : }
797 : EXPORT_SYMBOL(blk_queue_update_dma_alignment);
798 :
799 : /**
800 : * blk_set_queue_depth - tell the block layer about the device queue depth
801 : * @q: the request queue for the device
802 : * @depth: queue depth
803 : *
804 : */
805 0 : void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
806 : {
807 0 : q->queue_depth = depth;
808 0 : rq_qos_queue_depth_changed(q);
809 0 : }
810 : EXPORT_SYMBOL(blk_set_queue_depth);
811 :
812 : /**
813 : * blk_queue_write_cache - configure queue's write cache
814 : * @q: the request queue for the device
815 : * @wc: write back cache on or off
816 : * @fua: device supports FUA writes, if true
817 : *
818 : * Tell the block layer about the write cache of @q.
819 : */
820 1 : void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
821 : {
822 1 : if (wc)
823 1 : blk_queue_flag_set(QUEUE_FLAG_WC, q);
824 : else
825 0 : blk_queue_flag_clear(QUEUE_FLAG_WC, q);
826 1 : if (fua)
827 0 : blk_queue_flag_set(QUEUE_FLAG_FUA, q);
828 : else
829 1 : blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
830 :
831 1 : wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
832 1 : }
833 : EXPORT_SYMBOL_GPL(blk_queue_write_cache);
834 :
835 : /**
836 : * blk_queue_required_elevator_features - Set a queue required elevator features
837 : * @q: the request queue for the target device
838 : * @features: Required elevator features OR'ed together
839 : *
840 : * Tell the block layer that for the device controlled through @q, only the
841 : * only elevators that can be used are those that implement at least the set of
842 : * features specified by @features.
843 : */
844 0 : void blk_queue_required_elevator_features(struct request_queue *q,
845 : unsigned int features)
846 : {
847 0 : q->required_elevator_features = features;
848 0 : }
849 : EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
850 :
851 : /**
852 : * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
853 : * @q: the request queue for the device
854 : * @dev: the device pointer for dma
855 : *
856 : * Tell the block layer about merging the segments by dma map of @q.
857 : */
858 0 : bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
859 : struct device *dev)
860 : {
861 0 : unsigned long boundary = dma_get_merge_boundary(dev);
862 :
863 0 : if (!boundary)
864 : return false;
865 :
866 : /* No need to update max_segment_size. see blk_queue_virt_boundary() */
867 0 : blk_queue_virt_boundary(q, boundary);
868 :
869 0 : return true;
870 : }
871 : EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
872 :
873 : /**
874 : * blk_queue_set_zoned - configure a disk queue zoned model.
875 : * @disk: the gendisk of the queue to configure
876 : * @model: the zoned model to set
877 : *
878 : * Set the zoned model of the request queue of @disk according to @model.
879 : * When @model is BLK_ZONED_HM (host managed), this should be called only
880 : * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
881 : * If @model specifies BLK_ZONED_HA (host aware), the effective model used
882 : * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
883 : * on the disk.
884 : */
885 0 : void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
886 : {
887 0 : struct request_queue *q = disk->queue;
888 :
889 0 : switch (model) {
890 : case BLK_ZONED_HM:
891 : /*
892 : * Host managed devices are supported only if
893 : * CONFIG_BLK_DEV_ZONED is enabled.
894 : */
895 0 : WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
896 0 : break;
897 : case BLK_ZONED_HA:
898 : /*
899 : * Host aware devices can be treated either as regular block
900 : * devices (similar to drive managed devices) or as zoned block
901 : * devices to take advantage of the zone command set, similarly
902 : * to host managed devices. We try the latter if there are no
903 : * partitions and zoned block device support is enabled, else
904 : * we do nothing special as far as the block layer is concerned.
905 : */
906 : if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
907 : !xa_empty(&disk->part_tbl))
908 : model = BLK_ZONED_NONE;
909 : break;
910 0 : case BLK_ZONED_NONE:
911 : default:
912 0 : if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
913 0 : model = BLK_ZONED_NONE;
914 : break;
915 : }
916 :
917 0 : q->limits.zoned = model;
918 0 : if (model != BLK_ZONED_NONE) {
919 : /*
920 : * Set the zone write granularity to the device logical block
921 : * size by default. The driver can change this value if needed.
922 : */
923 0 : blk_queue_zone_write_granularity(q,
924 : queue_logical_block_size(q));
925 : } else {
926 0 : blk_queue_clear_zone_settings(q);
927 : }
928 0 : }
929 : EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
930 :
931 1 : static int __init blk_settings_init(void)
932 : {
933 1 : blk_max_low_pfn = max_low_pfn - 1;
934 1 : blk_max_pfn = max_pfn - 1;
935 1 : return 0;
936 : }
937 : subsys_initcall(blk_settings_init);
|