LCOV - code coverage report
Current view: top level - include/linux - buffer_head.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 57 71 80.3 %
Date: 2021-04-22 12:43:58 Functions: 5 5 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * include/linux/buffer_head.h
       4             :  *
       5             :  * Everything to do with buffer_heads.
       6             :  */
       7             : 
       8             : #ifndef _LINUX_BUFFER_HEAD_H
       9             : #define _LINUX_BUFFER_HEAD_H
      10             : 
      11             : #include <linux/types.h>
      12             : #include <linux/fs.h>
      13             : #include <linux/linkage.h>
      14             : #include <linux/pagemap.h>
      15             : #include <linux/wait.h>
      16             : #include <linux/atomic.h>
      17             : 
      18             : #ifdef CONFIG_BLOCK
      19             : 
      20             : enum bh_state_bits {
      21             :         BH_Uptodate,    /* Contains valid data */
      22             :         BH_Dirty,       /* Is dirty */
      23             :         BH_Lock,        /* Is locked */
      24             :         BH_Req,         /* Has been submitted for I/O */
      25             : 
      26             :         BH_Mapped,      /* Has a disk mapping */
      27             :         BH_New,         /* Disk mapping was newly created by get_block */
      28             :         BH_Async_Read,  /* Is under end_buffer_async_read I/O */
      29             :         BH_Async_Write, /* Is under end_buffer_async_write I/O */
      30             :         BH_Delay,       /* Buffer is not yet allocated on disk */
      31             :         BH_Boundary,    /* Block is followed by a discontiguity */
      32             :         BH_Write_EIO,   /* I/O error on write */
      33             :         BH_Unwritten,   /* Buffer is allocated on disk but not written */
      34             :         BH_Quiet,       /* Buffer Error Prinks to be quiet */
      35             :         BH_Meta,        /* Buffer contains metadata */
      36             :         BH_Prio,        /* Buffer should be submitted with REQ_PRIO */
      37             :         BH_Defer_Completion, /* Defer AIO completion to workqueue */
      38             : 
      39             :         BH_PrivateStart,/* not a state bit, but the first bit available
      40             :                          * for private allocation by other entities
      41             :                          */
      42             : };
      43             : 
      44             : #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
      45             : 
      46             : struct page;
      47             : struct buffer_head;
      48             : struct address_space;
      49             : typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
      50             : 
      51             : /*
      52             :  * Historically, a buffer_head was used to map a single block
      53             :  * within a page, and of course as the unit of I/O through the
      54             :  * filesystem and block layers.  Nowadays the basic I/O unit
      55             :  * is the bio, and buffer_heads are used for extracting block
      56             :  * mappings (via a get_block_t call), for tracking state within
      57             :  * a page (via a page_mapping) and for wrapping bio submission
      58             :  * for backward compatibility reasons (e.g. submit_bh).
      59             :  */
      60             : struct buffer_head {
      61             :         unsigned long b_state;          /* buffer state bitmap (see above) */
      62             :         struct buffer_head *b_this_page;/* circular list of page's buffers */
      63             :         struct page *b_page;            /* the page this bh is mapped to */
      64             : 
      65             :         sector_t b_blocknr;             /* start block number */
      66             :         size_t b_size;                  /* size of mapping */
      67             :         char *b_data;                   /* pointer to data within the page */
      68             : 
      69             :         struct block_device *b_bdev;
      70             :         bh_end_io_t *b_end_io;          /* I/O completion */
      71             :         void *b_private;                /* reserved for b_end_io */
      72             :         struct list_head b_assoc_buffers; /* associated with another mapping */
      73             :         struct address_space *b_assoc_map;      /* mapping this buffer is
      74             :                                                    associated with */
      75             :         atomic_t b_count;               /* users using this buffer_head */
      76             :         spinlock_t b_uptodate_lock;     /* Used by the first bh in a page, to
      77             :                                          * serialise IO completion of other
      78             :                                          * buffers in the page */
      79             : };
      80             : 
      81             : /*
      82             :  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
      83             :  * and buffer_foo() functions.
      84             :  * To avoid reset buffer flags that are already set, because that causes
      85             :  * a costly cache line transition, check the flag first.
      86             :  */
      87             : #define BUFFER_FNS(bit, name)                                           \
      88             : static __always_inline void set_buffer_##name(struct buffer_head *bh)   \
      89             : {                                                                       \
      90             :         if (!test_bit(BH_##bit, &(bh)->b_state))                 \
      91             :                 set_bit(BH_##bit, &(bh)->b_state);                       \
      92             : }                                                                       \
      93             : static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
      94             : {                                                                       \
      95             :         clear_bit(BH_##bit, &(bh)->b_state);                             \
      96             : }                                                                       \
      97             : static __always_inline int buffer_##name(const struct buffer_head *bh)  \
      98             : {                                                                       \
      99             :         return test_bit(BH_##bit, &(bh)->b_state);                       \
     100             : }
     101             : 
     102             : /*
     103             :  * test_set_buffer_foo() and test_clear_buffer_foo()
     104             :  */
     105             : #define TAS_BUFFER_FNS(bit, name)                                       \
     106             : static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
     107             : {                                                                       \
     108             :         return test_and_set_bit(BH_##bit, &(bh)->b_state);               \
     109             : }                                                                       \
     110             : static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
     111             : {                                                                       \
     112             :         return test_and_clear_bit(BH_##bit, &(bh)->b_state);             \
     113             : }                                                                       \
     114             : 
     115             : /*
     116             :  * Emit the buffer bitops functions.   Note that there are also functions
     117             :  * of the form "mark_buffer_foo()".  These are higher-level functions which
     118             :  * do something in addition to setting a b_state bit.
     119             :  */
     120       72968 : BUFFER_FNS(Uptodate, uptodate)
     121       40227 : BUFFER_FNS(Dirty, dirty)
     122       11975 : TAS_BUFFER_FNS(Dirty, dirty)
     123       25050 : BUFFER_FNS(Lock, locked)
     124           0 : BUFFER_FNS(Req, req)
     125        6715 : TAS_BUFFER_FNS(Req, req)
     126       39088 : BUFFER_FNS(Mapped, mapped)
     127        9255 : BUFFER_FNS(New, new)
     128           4 : BUFFER_FNS(Async_Read, async_read)
     129        5015 : BUFFER_FNS(Async_Write, async_write)
     130       13527 : BUFFER_FNS(Delay, delay)
     131         517 : BUFFER_FNS(Boundary, boundary)
     132        6010 : BUFFER_FNS(Write_EIO, write_io_error)
     133        7982 : BUFFER_FNS(Unwritten, unwritten)
     134       21423 : BUFFER_FNS(Meta, meta)
     135       21423 : BUFFER_FNS(Prio, prio)
     136           0 : BUFFER_FNS(Defer_Completion, defer_completion)
     137             : 
     138             : #define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
     139             : 
     140             : /* If we *know* page->private refers to buffer_heads */
     141             : #define page_buffers(page)                                      \
     142             :         ({                                                      \
     143             :                 BUG_ON(!PagePrivate(page));                     \
     144             :                 ((struct buffer_head *)page_private(page));     \
     145             :         })
     146             : #define page_has_buffers(page)  PagePrivate(page)
     147             : 
     148             : void buffer_check_dirty_writeback(struct page *page,
     149             :                                      bool *dirty, bool *writeback);
     150             : 
     151             : /*
     152             :  * Declarations
     153             :  */
     154             : 
     155             : void mark_buffer_dirty(struct buffer_head *bh);
     156             : void mark_buffer_write_io_error(struct buffer_head *bh);
     157             : void touch_buffer(struct buffer_head *bh);
     158             : void set_bh_page(struct buffer_head *bh,
     159             :                 struct page *page, unsigned long offset);
     160             : int try_to_free_buffers(struct page *);
     161             : struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
     162             :                 bool retry);
     163             : void create_empty_buffers(struct page *, unsigned long,
     164             :                         unsigned long b_state);
     165             : void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
     166             : void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
     167             : void end_buffer_async_write(struct buffer_head *bh, int uptodate);
     168             : 
     169             : /* Things to do with buffers at mapping->private_list */
     170             : void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
     171             : int inode_has_buffers(struct inode *);
     172             : void invalidate_inode_buffers(struct inode *);
     173             : int remove_inode_buffers(struct inode *inode);
     174             : int sync_mapping_buffers(struct address_space *mapping);
     175             : void clean_bdev_aliases(struct block_device *bdev, sector_t block,
     176             :                         sector_t len);
     177        1607 : static inline void clean_bdev_bh_alias(struct buffer_head *bh)
     178             : {
     179        1607 :         clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
     180           0 : }
     181             : 
     182             : void mark_buffer_async_write(struct buffer_head *bh);
     183             : void __wait_on_buffer(struct buffer_head *);
     184             : wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
     185             : struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
     186             :                         unsigned size);
     187             : struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
     188             :                                   unsigned size, gfp_t gfp);
     189             : void __brelse(struct buffer_head *);
     190             : void __bforget(struct buffer_head *);
     191             : void __breadahead(struct block_device *, sector_t block, unsigned int size);
     192             : void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
     193             :                   gfp_t gfp);
     194             : struct buffer_head *__bread_gfp(struct block_device *,
     195             :                                 sector_t block, unsigned size, gfp_t gfp);
     196             : void invalidate_bh_lrus(void);
     197             : struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
     198             : void free_buffer_head(struct buffer_head * bh);
     199             : void unlock_buffer(struct buffer_head *bh);
     200             : void __lock_buffer(struct buffer_head *bh);
     201             : void ll_rw_block(int, int, int, struct buffer_head * bh[]);
     202             : int sync_dirty_buffer(struct buffer_head *bh);
     203             : int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
     204             : void write_dirty_buffer(struct buffer_head *bh, int op_flags);
     205             : int submit_bh(int, int, struct buffer_head *);
     206             : void write_boundary_block(struct block_device *bdev,
     207             :                         sector_t bblock, unsigned blocksize);
     208             : int bh_uptodate_or_lock(struct buffer_head *bh);
     209             : int bh_submit_read(struct buffer_head *bh);
     210             : 
     211             : extern int buffer_heads_over_limit;
     212             : 
     213             : /*
     214             :  * Generic address_space_operations implementations for buffer_head-backed
     215             :  * address_spaces.
     216             :  */
     217             : void block_invalidatepage(struct page *page, unsigned int offset,
     218             :                           unsigned int length);
     219             : int block_write_full_page(struct page *page, get_block_t *get_block,
     220             :                                 struct writeback_control *wbc);
     221             : int __block_write_full_page(struct inode *inode, struct page *page,
     222             :                         get_block_t *get_block, struct writeback_control *wbc,
     223             :                         bh_end_io_t *handler);
     224             : int block_read_full_page(struct page*, get_block_t*);
     225             : int block_is_partially_uptodate(struct page *page, unsigned long from,
     226             :                                 unsigned long count);
     227             : int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
     228             :                 unsigned flags, struct page **pagep, get_block_t *get_block);
     229             : int __block_write_begin(struct page *page, loff_t pos, unsigned len,
     230             :                 get_block_t *get_block);
     231             : int block_write_end(struct file *, struct address_space *,
     232             :                                 loff_t, unsigned, unsigned,
     233             :                                 struct page *, void *);
     234             : int generic_write_end(struct file *, struct address_space *,
     235             :                                 loff_t, unsigned, unsigned,
     236             :                                 struct page *, void *);
     237             : void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
     238             : void clean_page_buffers(struct page *page);
     239             : int cont_write_begin(struct file *, struct address_space *, loff_t,
     240             :                         unsigned, unsigned, struct page **, void **,
     241             :                         get_block_t *, loff_t *);
     242             : int generic_cont_expand_simple(struct inode *inode, loff_t size);
     243             : int block_commit_write(struct page *page, unsigned from, unsigned to);
     244             : int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
     245             :                                 get_block_t get_block);
     246             : /* Convert errno to return value from ->page_mkwrite() call */
     247         304 : static inline vm_fault_t block_page_mkwrite_return(int err)
     248             : {
     249         304 :         if (err == 0)
     250             :                 return VM_FAULT_LOCKED;
     251           0 :         if (err == -EFAULT || err == -EAGAIN)
     252             :                 return VM_FAULT_NOPAGE;
     253           0 :         if (err == -ENOMEM)
     254           0 :                 return VM_FAULT_OOM;
     255             :         /* -ENOSPC, -EDQUOT, -EIO ... */
     256             :         return VM_FAULT_SIGBUS;
     257             : }
     258             : sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
     259             : int block_truncate_page(struct address_space *, loff_t, get_block_t *);
     260             : int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
     261             :                                 struct page **, void **, get_block_t*);
     262             : int nobh_write_end(struct file *, struct address_space *,
     263             :                                 loff_t, unsigned, unsigned,
     264             :                                 struct page *, void *);
     265             : int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
     266             : int nobh_writepage(struct page *page, get_block_t *get_block,
     267             :                         struct writeback_control *wbc);
     268             : 
     269             : void buffer_init(void);
     270             : 
     271             : /*
     272             :  * inline definitions
     273             :  */
     274             : 
     275       61839 : static inline void get_bh(struct buffer_head *bh)
     276             : {
     277       61839 :         atomic_inc(&bh->b_count);
     278       61838 : }
     279             : 
     280       64603 : static inline void put_bh(struct buffer_head *bh)
     281             : {
     282       64603 :         smp_mb__before_atomic();
     283       64603 :         atomic_dec(&bh->b_count);
     284       64603 : }
     285             : 
     286       47718 : static inline void brelse(struct buffer_head *bh)
     287             : {
     288       37252 :         if (bh)
     289       44174 :                 __brelse(bh);
     290          15 : }
     291             : 
     292           0 : static inline void bforget(struct buffer_head *bh)
     293             : {
     294           0 :         if (bh)
     295           0 :                 __bforget(bh);
     296             : }
     297             : 
     298             : static inline struct buffer_head *
     299           0 : sb_bread(struct super_block *sb, sector_t block)
     300             : {
     301           0 :         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
     302             : }
     303             : 
     304             : static inline struct buffer_head *
     305             : sb_bread_unmovable(struct super_block *sb, sector_t block)
     306             : {
     307             :         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
     308             : }
     309             : 
     310             : static inline void
     311             : sb_breadahead(struct super_block *sb, sector_t block)
     312             : {
     313             :         __breadahead(sb->s_bdev, block, sb->s_blocksize);
     314             : }
     315             : 
     316             : static inline void
     317             : sb_breadahead_unmovable(struct super_block *sb, sector_t block)
     318             : {
     319             :         __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
     320             : }
     321             : 
     322             : static inline struct buffer_head *
     323       29021 : sb_getblk(struct super_block *sb, sector_t block)
     324             : {
     325       29021 :         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
     326             : }
     327             : 
     328             : 
     329             : static inline struct buffer_head *
     330        3427 : sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
     331             : {
     332        3427 :         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
     333             : }
     334             : 
     335             : static inline struct buffer_head *
     336          77 : sb_find_get_block(struct super_block *sb, sector_t block)
     337             : {
     338          77 :         return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
     339             : }
     340             : 
     341             : static inline void
     342        1821 : map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
     343             : {
     344        1821 :         set_buffer_mapped(bh);
     345        1821 :         bh->b_bdev = sb->s_bdev;
     346        1821 :         bh->b_blocknr = block;
     347        1821 :         bh->b_size = sb->s_blocksize;
     348        1821 : }
     349             : 
     350        6779 : static inline void wait_on_buffer(struct buffer_head *bh)
     351             : {
     352        6779 :         might_sleep();
     353        6779 :         if (buffer_locked(bh))
     354         799 :                 __wait_on_buffer(bh);
     355        6779 : }
     356             : 
     357        9921 : static inline int trylock_buffer(struct buffer_head *bh)
     358             : {
     359       13930 :         return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
     360             : }
     361             : 
     362        5097 : static inline void lock_buffer(struct buffer_head *bh)
     363             : {
     364        5097 :         might_sleep();
     365        5097 :         if (!trylock_buffer(bh))
     366           0 :                 __lock_buffer(bh);
     367        5097 : }
     368             : 
     369           1 : static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
     370             :                                                    sector_t block,
     371             :                                                    unsigned size)
     372             : {
     373           1 :         return __getblk_gfp(bdev, block, size, 0);
     374             : }
     375             : 
     376         126 : static inline struct buffer_head *__getblk(struct block_device *bdev,
     377             :                                            sector_t block,
     378             :                                            unsigned size)
     379             : {
     380         126 :         return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
     381             : }
     382             : 
     383             : /**
     384             :  *  __bread() - reads a specified block and returns the bh
     385             :  *  @bdev: the block_device to read from
     386             :  *  @block: number of block
     387             :  *  @size: size (in bytes) to read
     388             :  *
     389             :  *  Reads a specified block, and returns buffer head that contains it.
     390             :  *  The page cache is allocated from movable area so that it can be migrated.
     391             :  *  It returns NULL if the block was unreadable.
     392             :  */
     393             : static inline struct buffer_head *
     394           0 : __bread(struct block_device *bdev, sector_t block, unsigned size)
     395             : {
     396           0 :         return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
     397             : }
     398             : 
     399             : extern int __set_page_dirty_buffers(struct page *page);
     400             : 
     401             : #else /* CONFIG_BLOCK */
     402             : 
     403             : static inline void buffer_init(void) {}
     404             : static inline int try_to_free_buffers(struct page *page) { return 1; }
     405             : static inline int inode_has_buffers(struct inode *inode) { return 0; }
     406             : static inline void invalidate_inode_buffers(struct inode *inode) {}
     407             : static inline int remove_inode_buffers(struct inode *inode) { return 1; }
     408             : static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
     409             : #define buffer_heads_over_limit 0
     410             : 
     411             : #endif /* CONFIG_BLOCK */
     412             : #endif /* _LINUX_BUFFER_HEAD_H */

Generated by: LCOV version 1.14