Line data Source code
1 : /*
2 : * Copyright (C) 2001 Sistina Software (UK) Limited.
3 : * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 : *
5 : * This file is released under the LGPL.
6 : */
7 :
8 : #ifndef _LINUX_DEVICE_MAPPER_H
9 : #define _LINUX_DEVICE_MAPPER_H
10 :
11 : #include <linux/bio.h>
12 : #include <linux/blkdev.h>
13 : #include <linux/dm-ioctl.h>
14 : #include <linux/math64.h>
15 : #include <linux/ratelimit.h>
16 :
17 : struct dm_dev;
18 : struct dm_target;
19 : struct dm_table;
20 : struct dm_report_zones_args;
21 : struct mapped_device;
22 : struct bio_vec;
23 :
24 : /*
25 : * Type of table, mapped_device's mempool and request_queue
26 : */
27 : enum dm_queue_mode {
28 : DM_TYPE_NONE = 0,
29 : DM_TYPE_BIO_BASED = 1,
30 : DM_TYPE_REQUEST_BASED = 2,
31 : DM_TYPE_DAX_BIO_BASED = 3,
32 : };
33 :
34 : typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
35 :
36 : union map_info {
37 : void *ptr;
38 : };
39 :
40 : /*
41 : * In the constructor the target parameter will already have the
42 : * table, type, begin and len fields filled in.
43 : */
44 : typedef int (*dm_ctr_fn) (struct dm_target *target,
45 : unsigned int argc, char **argv);
46 :
47 : /*
48 : * The destructor doesn't need to free the dm_target, just
49 : * anything hidden ti->private.
50 : */
51 : typedef void (*dm_dtr_fn) (struct dm_target *ti);
52 :
53 : /*
54 : * The map function must return:
55 : * < 0: error
56 : * = 0: The target will handle the io by resubmitting it later
57 : * = 1: simple remap complete
58 : * = 2: The target wants to push back the io
59 : */
60 : typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
61 : typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
62 : struct request *rq,
63 : union map_info *map_context,
64 : struct request **clone);
65 : typedef void (*dm_release_clone_request_fn) (struct request *clone,
66 : union map_info *map_context);
67 :
68 : /*
69 : * Returns:
70 : * < 0 : error (currently ignored)
71 : * 0 : ended successfully
72 : * 1 : for some reason the io has still not completed (eg,
73 : * multipath target might want to requeue a failed io).
74 : * 2 : The target wants to push back the io
75 : */
76 : typedef int (*dm_endio_fn) (struct dm_target *ti,
77 : struct bio *bio, blk_status_t *error);
78 : typedef int (*dm_request_endio_fn) (struct dm_target *ti,
79 : struct request *clone, blk_status_t error,
80 : union map_info *map_context);
81 :
82 : typedef void (*dm_presuspend_fn) (struct dm_target *ti);
83 : typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
84 : typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
85 : typedef int (*dm_preresume_fn) (struct dm_target *ti);
86 : typedef void (*dm_resume_fn) (struct dm_target *ti);
87 :
88 : typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
89 : unsigned status_flags, char *result, unsigned maxlen);
90 :
91 : typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
92 : char *result, unsigned maxlen);
93 :
94 : typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
95 :
96 : #ifdef CONFIG_BLK_DEV_ZONED
97 : typedef int (*dm_report_zones_fn) (struct dm_target *ti,
98 : struct dm_report_zones_args *args,
99 : unsigned int nr_zones);
100 : #else
101 : /*
102 : * Define dm_report_zones_fn so that targets can assign to NULL if
103 : * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
104 : * awkward #ifdefs in their target_type, etc.
105 : */
106 : typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
107 : #endif
108 :
109 : /*
110 : * These iteration functions are typically used to check (and combine)
111 : * properties of underlying devices.
112 : * E.g. Does at least one underlying device support flush?
113 : * Does any underlying device not support WRITE_SAME?
114 : *
115 : * The callout function is called once for each contiguous section of
116 : * an underlying device. State can be maintained in *data.
117 : * Return non-zero to stop iterating through any further devices.
118 : */
119 : typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
120 : struct dm_dev *dev,
121 : sector_t start, sector_t len,
122 : void *data);
123 :
124 : /*
125 : * This function must iterate through each section of device used by the
126 : * target until it encounters a non-zero return code, which it then returns.
127 : * Returns zero if no callout returned non-zero.
128 : */
129 : typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
130 : iterate_devices_callout_fn fn,
131 : void *data);
132 :
133 : typedef void (*dm_io_hints_fn) (struct dm_target *ti,
134 : struct queue_limits *limits);
135 :
136 : /*
137 : * Returns:
138 : * 0: The target can handle the next I/O immediately.
139 : * 1: The target can't handle the next I/O immediately.
140 : */
141 : typedef int (*dm_busy_fn) (struct dm_target *ti);
142 :
143 : /*
144 : * Returns:
145 : * < 0 : error
146 : * >= 0 : the number of bytes accessible at the address
147 : */
148 : typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
149 : long nr_pages, void **kaddr, pfn_t *pfn);
150 : typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
151 : void *addr, size_t bytes, struct iov_iter *i);
152 : typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
153 : size_t nr_pages);
154 : #define PAGE_SECTORS (PAGE_SIZE / 512)
155 :
156 : void dm_error(const char *message);
157 :
158 : struct dm_dev {
159 : struct block_device *bdev;
160 : struct dax_device *dax_dev;
161 : fmode_t mode;
162 : char name[16];
163 : };
164 :
165 : dev_t dm_get_dev_t(const char *path);
166 :
167 : /*
168 : * Constructors should call these functions to ensure destination devices
169 : * are opened/closed correctly.
170 : */
171 : int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
172 : struct dm_dev **result);
173 : void dm_put_device(struct dm_target *ti, struct dm_dev *d);
174 :
175 : /*
176 : * Information about a target type
177 : */
178 :
179 : struct target_type {
180 : uint64_t features;
181 : const char *name;
182 : struct module *module;
183 : unsigned version[3];
184 : dm_ctr_fn ctr;
185 : dm_dtr_fn dtr;
186 : dm_map_fn map;
187 : dm_clone_and_map_request_fn clone_and_map_rq;
188 : dm_release_clone_request_fn release_clone_rq;
189 : dm_endio_fn end_io;
190 : dm_request_endio_fn rq_end_io;
191 : dm_presuspend_fn presuspend;
192 : dm_presuspend_undo_fn presuspend_undo;
193 : dm_postsuspend_fn postsuspend;
194 : dm_preresume_fn preresume;
195 : dm_resume_fn resume;
196 : dm_status_fn status;
197 : dm_message_fn message;
198 : dm_prepare_ioctl_fn prepare_ioctl;
199 : dm_report_zones_fn report_zones;
200 : dm_busy_fn busy;
201 : dm_iterate_devices_fn iterate_devices;
202 : dm_io_hints_fn io_hints;
203 : dm_dax_direct_access_fn direct_access;
204 : dm_dax_copy_iter_fn dax_copy_from_iter;
205 : dm_dax_copy_iter_fn dax_copy_to_iter;
206 : dm_dax_zero_page_range_fn dax_zero_page_range;
207 :
208 : /* For internal device-mapper use. */
209 : struct list_head list;
210 : };
211 :
212 : /*
213 : * Target features
214 : */
215 :
216 : /*
217 : * Any table that contains an instance of this target must have only one.
218 : */
219 : #define DM_TARGET_SINGLETON 0x00000001
220 : #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
221 :
222 : /*
223 : * Indicates that a target does not support read-only devices.
224 : */
225 : #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
226 : #define dm_target_always_writeable(type) \
227 : ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
228 :
229 : /*
230 : * Any device that contains a table with an instance of this target may never
231 : * have tables containing any different target type.
232 : */
233 : #define DM_TARGET_IMMUTABLE 0x00000004
234 : #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
235 :
236 : /*
237 : * Indicates that a target may replace any target; even immutable targets.
238 : * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
239 : */
240 : #define DM_TARGET_WILDCARD 0x00000008
241 : #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
242 :
243 : /*
244 : * A target implements own bio data integrity.
245 : */
246 : #define DM_TARGET_INTEGRITY 0x00000010
247 : #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
248 :
249 : /*
250 : * A target passes integrity data to the lower device.
251 : */
252 : #define DM_TARGET_PASSES_INTEGRITY 0x00000020
253 : #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
254 :
255 : /*
256 : * Indicates that a target supports host-managed zoned block devices.
257 : */
258 : #ifdef CONFIG_BLK_DEV_ZONED
259 : #define DM_TARGET_ZONED_HM 0x00000040
260 : #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
261 : #else
262 : #define DM_TARGET_ZONED_HM 0x00000000
263 : #define dm_target_supports_zoned_hm(type) (false)
264 : #endif
265 :
266 : /*
267 : * A target handles REQ_NOWAIT
268 : */
269 : #define DM_TARGET_NOWAIT 0x00000080
270 : #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
271 :
272 : /*
273 : * A target supports passing through inline crypto support.
274 : */
275 : #define DM_TARGET_PASSES_CRYPTO 0x00000100
276 : #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
277 :
278 : struct dm_target {
279 : struct dm_table *table;
280 : struct target_type *type;
281 :
282 : /* target limits */
283 : sector_t begin;
284 : sector_t len;
285 :
286 : /* If non-zero, maximum size of I/O submitted to a target. */
287 : uint32_t max_io_len;
288 :
289 : /*
290 : * A number of zero-length barrier bios that will be submitted
291 : * to the target for the purpose of flushing cache.
292 : *
293 : * The bio number can be accessed with dm_bio_get_target_bio_nr.
294 : * It is a responsibility of the target driver to remap these bios
295 : * to the real underlying devices.
296 : */
297 : unsigned num_flush_bios;
298 :
299 : /*
300 : * The number of discard bios that will be submitted to the target.
301 : * The bio number can be accessed with dm_bio_get_target_bio_nr.
302 : */
303 : unsigned num_discard_bios;
304 :
305 : /*
306 : * The number of secure erase bios that will be submitted to the target.
307 : * The bio number can be accessed with dm_bio_get_target_bio_nr.
308 : */
309 : unsigned num_secure_erase_bios;
310 :
311 : /*
312 : * The number of WRITE SAME bios that will be submitted to the target.
313 : * The bio number can be accessed with dm_bio_get_target_bio_nr.
314 : */
315 : unsigned num_write_same_bios;
316 :
317 : /*
318 : * The number of WRITE ZEROES bios that will be submitted to the target.
319 : * The bio number can be accessed with dm_bio_get_target_bio_nr.
320 : */
321 : unsigned num_write_zeroes_bios;
322 :
323 : /*
324 : * The minimum number of extra bytes allocated in each io for the
325 : * target to use.
326 : */
327 : unsigned per_io_data_size;
328 :
329 : /* target specific data */
330 : void *private;
331 :
332 : /* Used to provide an error string from the ctr */
333 : char *error;
334 :
335 : /*
336 : * Set if this target needs to receive flushes regardless of
337 : * whether or not its underlying devices have support.
338 : */
339 : bool flush_supported:1;
340 :
341 : /*
342 : * Set if this target needs to receive discards regardless of
343 : * whether or not its underlying devices have support.
344 : */
345 : bool discards_supported:1;
346 :
347 : /*
348 : * Set if we need to limit the number of in-flight bios when swapping.
349 : */
350 : bool limit_swap_bios:1;
351 : };
352 :
353 : void *dm_per_bio_data(struct bio *bio, size_t data_size);
354 : struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
355 : unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
356 :
357 : u64 dm_start_time_ns_from_clone(struct bio *bio);
358 :
359 : int dm_register_target(struct target_type *t);
360 : void dm_unregister_target(struct target_type *t);
361 :
362 : /*
363 : * Target argument parsing.
364 : */
365 : struct dm_arg_set {
366 : unsigned argc;
367 : char **argv;
368 : };
369 :
370 : /*
371 : * The minimum and maximum value of a numeric argument, together with
372 : * the error message to use if the number is found to be outside that range.
373 : */
374 : struct dm_arg {
375 : unsigned min;
376 : unsigned max;
377 : char *error;
378 : };
379 :
380 : /*
381 : * Validate the next argument, either returning it as *value or, if invalid,
382 : * returning -EINVAL and setting *error.
383 : */
384 : int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
385 : unsigned *value, char **error);
386 :
387 : /*
388 : * Process the next argument as the start of a group containing between
389 : * arg->min and arg->max further arguments. Either return the size as
390 : * *num_args or, if invalid, return -EINVAL and set *error.
391 : */
392 : int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
393 : unsigned *num_args, char **error);
394 :
395 : /*
396 : * Return the current argument and shift to the next.
397 : */
398 : const char *dm_shift_arg(struct dm_arg_set *as);
399 :
400 : /*
401 : * Move through num_args arguments.
402 : */
403 : void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
404 :
405 : /*-----------------------------------------------------------------
406 : * Functions for creating and manipulating mapped devices.
407 : * Drop the reference with dm_put when you finish with the object.
408 : *---------------------------------------------------------------*/
409 :
410 : /*
411 : * DM_ANY_MINOR chooses the next available minor number.
412 : */
413 : #define DM_ANY_MINOR (-1)
414 : int dm_create(int minor, struct mapped_device **md);
415 :
416 : /*
417 : * Reference counting for md.
418 : */
419 : struct mapped_device *dm_get_md(dev_t dev);
420 : void dm_get(struct mapped_device *md);
421 : int dm_hold(struct mapped_device *md);
422 : void dm_put(struct mapped_device *md);
423 :
424 : /*
425 : * An arbitrary pointer may be stored alongside a mapped device.
426 : */
427 : void dm_set_mdptr(struct mapped_device *md, void *ptr);
428 : void *dm_get_mdptr(struct mapped_device *md);
429 :
430 : /*
431 : * A device can still be used while suspended, but I/O is deferred.
432 : */
433 : int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
434 : int dm_resume(struct mapped_device *md);
435 :
436 : /*
437 : * Event functions.
438 : */
439 : uint32_t dm_get_event_nr(struct mapped_device *md);
440 : int dm_wait_event(struct mapped_device *md, int event_nr);
441 : uint32_t dm_next_uevent_seq(struct mapped_device *md);
442 : void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
443 :
444 : /*
445 : * Info functions.
446 : */
447 : const char *dm_device_name(struct mapped_device *md);
448 : int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
449 : struct gendisk *dm_disk(struct mapped_device *md);
450 : int dm_suspended(struct dm_target *ti);
451 : int dm_post_suspending(struct dm_target *ti);
452 : int dm_noflush_suspending(struct dm_target *ti);
453 : void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
454 : union map_info *dm_get_rq_mapinfo(struct request *rq);
455 :
456 : #ifdef CONFIG_BLK_DEV_ZONED
457 : struct dm_report_zones_args {
458 : struct dm_target *tgt;
459 : sector_t next_sector;
460 :
461 : void *orig_data;
462 : report_zones_cb orig_cb;
463 : unsigned int zone_idx;
464 :
465 : /* must be filled by ->report_zones before calling dm_report_zones_cb */
466 : sector_t start;
467 : };
468 : int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
469 : #endif /* CONFIG_BLK_DEV_ZONED */
470 :
471 : /*
472 : * Device mapper functions to parse and create devices specified by the
473 : * parameter "dm-mod.create="
474 : */
475 : int __init dm_early_create(struct dm_ioctl *dmi,
476 : struct dm_target_spec **spec_array,
477 : char **target_params_array);
478 :
479 : struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
480 :
481 : /*
482 : * Geometry functions.
483 : */
484 : int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
485 : int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
486 :
487 : /*-----------------------------------------------------------------
488 : * Functions for manipulating device-mapper tables.
489 : *---------------------------------------------------------------*/
490 :
491 : /*
492 : * First create an empty table.
493 : */
494 : int dm_table_create(struct dm_table **result, fmode_t mode,
495 : unsigned num_targets, struct mapped_device *md);
496 :
497 : /*
498 : * Then call this once for each target.
499 : */
500 : int dm_table_add_target(struct dm_table *t, const char *type,
501 : sector_t start, sector_t len, char *params);
502 :
503 : /*
504 : * Target can use this to set the table's type.
505 : * Can only ever be called from a target's ctr.
506 : * Useful for "hybrid" target (supports both bio-based
507 : * and request-based).
508 : */
509 : void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
510 :
511 : /*
512 : * Finally call this to make the table ready for use.
513 : */
514 : int dm_table_complete(struct dm_table *t);
515 :
516 : /*
517 : * Destroy the table when finished.
518 : */
519 : void dm_table_destroy(struct dm_table *t);
520 :
521 : /*
522 : * Target may require that it is never sent I/O larger than len.
523 : */
524 : int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
525 :
526 : /*
527 : * Table reference counting.
528 : */
529 : struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
530 : void dm_put_live_table(struct mapped_device *md, int srcu_idx);
531 : void dm_sync_table(struct mapped_device *md);
532 :
533 : /*
534 : * Queries
535 : */
536 : sector_t dm_table_get_size(struct dm_table *t);
537 : unsigned int dm_table_get_num_targets(struct dm_table *t);
538 : fmode_t dm_table_get_mode(struct dm_table *t);
539 : struct mapped_device *dm_table_get_md(struct dm_table *t);
540 : const char *dm_table_device_name(struct dm_table *t);
541 :
542 : /*
543 : * Trigger an event.
544 : */
545 : void dm_table_event(struct dm_table *t);
546 :
547 : /*
548 : * Run the queue for request-based targets.
549 : */
550 : void dm_table_run_md_queue_async(struct dm_table *t);
551 :
552 : /*
553 : * The device must be suspended before calling this method.
554 : * Returns the previous table, which the caller must destroy.
555 : */
556 : struct dm_table *dm_swap_table(struct mapped_device *md,
557 : struct dm_table *t);
558 :
559 : /*
560 : * Table keyslot manager functions
561 : */
562 : void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
563 :
564 : /*
565 : * A wrapper around vmalloc.
566 : */
567 : void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
568 :
569 : /*-----------------------------------------------------------------
570 : * Macros.
571 : *---------------------------------------------------------------*/
572 : #define DM_NAME "device-mapper"
573 :
574 : #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
575 :
576 : #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
577 :
578 : #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
579 : #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
580 : #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
581 : #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
582 : #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
583 : #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
584 :
585 : #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
586 : #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
587 :
588 : #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
589 : 0 : scnprintf(result + sz, maxlen - sz, x))
590 :
591 : /*
592 : * Definitions of return values from target end_io function.
593 : */
594 : #define DM_ENDIO_DONE 0
595 : #define DM_ENDIO_INCOMPLETE 1
596 : #define DM_ENDIO_REQUEUE 2
597 : #define DM_ENDIO_DELAY_REQUEUE 3
598 :
599 : /*
600 : * Definitions of return values from target map function.
601 : */
602 : #define DM_MAPIO_SUBMITTED 0
603 : #define DM_MAPIO_REMAPPED 1
604 : #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
605 : #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
606 : #define DM_MAPIO_KILL 4
607 :
608 : #define dm_sector_div64(x, y)( \
609 : { \
610 : u64 _res; \
611 : (x) = div64_u64_rem(x, y, &_res); \
612 : _res; \
613 : } \
614 : )
615 :
616 : /*
617 : * Ceiling(n / sz)
618 : */
619 : #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
620 :
621 : #define dm_sector_div_up(n, sz) ( \
622 : { \
623 : sector_t _r = ((n) + (sz) - 1); \
624 : sector_div(_r, (sz)); \
625 : _r; \
626 : } \
627 : )
628 :
629 : /*
630 : * ceiling(n / size) * size
631 : */
632 : #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
633 :
634 : /*
635 : * Sector offset taken relative to the start of the target instead of
636 : * relative to the start of the device.
637 : */
638 : #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
639 :
640 0 : static inline sector_t to_sector(unsigned long long n)
641 : {
642 0 : return (n >> SECTOR_SHIFT);
643 : }
644 :
645 0 : static inline unsigned long to_bytes(sector_t n)
646 : {
647 0 : return (n << SECTOR_SHIFT);
648 : }
649 :
650 : #endif /* _LINUX_DEVICE_MAPPER_H */
|