LCOV - code coverage report
Current view: top level - drivers/md - dm-core.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 8 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Internal header file _only_ for device mapper core
       3             :  *
       4             :  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
       5             :  *
       6             :  * This file is released under the LGPL.
       7             :  */
       8             : 
       9             : #ifndef DM_CORE_INTERNAL_H
      10             : #define DM_CORE_INTERNAL_H
      11             : 
      12             : #include <linux/kthread.h>
      13             : #include <linux/ktime.h>
      14             : #include <linux/genhd.h>
      15             : #include <linux/blk-mq.h>
      16             : #include <linux/keyslot-manager.h>
      17             : 
      18             : #include <trace/events/block.h>
      19             : 
      20             : #include "dm.h"
      21             : 
      22             : #define DM_RESERVED_MAX_IOS             1024
      23             : 
      24             : struct dm_kobject_holder {
      25             :         struct kobject kobj;
      26             :         struct completion completion;
      27             : };
      28             : 
      29             : /*
      30             :  * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
      31             :  * DM targets must _not_ deference a mapped_device or dm_table to directly
      32             :  * access their members!
      33             :  */
      34             : 
      35             : struct mapped_device {
      36             :         struct mutex suspend_lock;
      37             : 
      38             :         struct mutex table_devices_lock;
      39             :         struct list_head table_devices;
      40             : 
      41             :         /*
      42             :          * The current mapping (struct dm_table *).
      43             :          * Use dm_get_live_table{_fast} or take suspend_lock for
      44             :          * dereference.
      45             :          */
      46             :         void __rcu *map;
      47             : 
      48             :         unsigned long flags;
      49             : 
      50             :         /* Protect queue and type against concurrent access. */
      51             :         struct mutex type_lock;
      52             :         enum dm_queue_mode type;
      53             : 
      54             :         int numa_node_id;
      55             :         struct request_queue *queue;
      56             : 
      57             :         atomic_t holders;
      58             :         atomic_t open_count;
      59             : 
      60             :         struct dm_target *immutable_target;
      61             :         struct target_type *immutable_target_type;
      62             : 
      63             :         char name[16];
      64             :         struct gendisk *disk;
      65             :         struct dax_device *dax_dev;
      66             : 
      67             :         /*
      68             :          * A list of ios that arrived while we were suspended.
      69             :          */
      70             :         struct work_struct work;
      71             :         wait_queue_head_t wait;
      72             :         spinlock_t deferred_lock;
      73             :         struct bio_list deferred;
      74             : 
      75             :         void *interface_ptr;
      76             : 
      77             :         /*
      78             :          * Event handling.
      79             :          */
      80             :         wait_queue_head_t eventq;
      81             :         atomic_t event_nr;
      82             :         atomic_t uevent_seq;
      83             :         struct list_head uevent_list;
      84             :         spinlock_t uevent_lock; /* Protect access to uevent_list */
      85             : 
      86             :         /* the number of internal suspends */
      87             :         unsigned internal_suspend_count;
      88             : 
      89             :         /*
      90             :          * io objects are allocated from here.
      91             :          */
      92             :         struct bio_set io_bs;
      93             :         struct bio_set bs;
      94             : 
      95             :         /*
      96             :          * Processing queue (flush)
      97             :          */
      98             :         struct workqueue_struct *wq;
      99             : 
     100             :         /* forced geometry settings */
     101             :         struct hd_geometry geometry;
     102             : 
     103             :         /* kobject and completion */
     104             :         struct dm_kobject_holder kobj_holder;
     105             : 
     106             :         int swap_bios;
     107             :         struct semaphore swap_bios_semaphore;
     108             :         struct mutex swap_bios_lock;
     109             : 
     110             :         struct dm_stats stats;
     111             : 
     112             :         /* for blk-mq request-based DM support */
     113             :         struct blk_mq_tag_set *tag_set;
     114             :         bool init_tio_pdu:1;
     115             : 
     116             :         struct srcu_struct io_barrier;
     117             : };
     118             : 
     119             : void disable_discard(struct mapped_device *md);
     120             : void disable_write_same(struct mapped_device *md);
     121             : void disable_write_zeroes(struct mapped_device *md);
     122             : 
     123           0 : static inline sector_t dm_get_size(struct mapped_device *md)
     124             : {
     125           0 :         return get_capacity(md->disk);
     126             : }
     127             : 
     128           0 : static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
     129             : {
     130           0 :         return &md->stats;
     131             : }
     132             : 
     133             : #define DM_TABLE_MAX_DEPTH 16
     134             : 
     135             : struct dm_table {
     136             :         struct mapped_device *md;
     137             :         enum dm_queue_mode type;
     138             : 
     139             :         /* btree table */
     140             :         unsigned int depth;
     141             :         unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
     142             :         sector_t *index[DM_TABLE_MAX_DEPTH];
     143             : 
     144             :         unsigned int num_targets;
     145             :         unsigned int num_allocated;
     146             :         sector_t *highs;
     147             :         struct dm_target *targets;
     148             : 
     149             :         struct target_type *immutable_target_type;
     150             : 
     151             :         bool integrity_supported:1;
     152             :         bool singleton:1;
     153             :         unsigned integrity_added:1;
     154             : 
     155             :         /*
     156             :          * Indicates the rw permissions for the new logical
     157             :          * device.  This should be a combination of FMODE_READ
     158             :          * and FMODE_WRITE.
     159             :          */
     160             :         fmode_t mode;
     161             : 
     162             :         /* a list of devices used by this table */
     163             :         struct list_head devices;
     164             : 
     165             :         /* events get handed up using this callback */
     166             :         void (*event_fn)(void *);
     167             :         void *event_context;
     168             : 
     169             :         struct dm_md_mempools *mempools;
     170             : 
     171             : #ifdef CONFIG_BLK_INLINE_ENCRYPTION
     172             :         struct blk_keyslot_manager *ksm;
     173             : #endif
     174             : };
     175             : 
     176           0 : static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
     177             : {
     178           0 :         return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
     179             : }
     180             : 
     181             : unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
     182             : 
     183           0 : static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
     184             : {
     185           0 :         return !maxlen || strlen(result) + 1 >= maxlen;
     186             : }
     187             : 
     188             : extern atomic_t dm_global_event_nr;
     189             : extern wait_queue_head_t dm_global_eventq;
     190             : void dm_issue_global_event(void);
     191             : 
     192             : #endif

Generated by: LCOV version 1.14