LCOV - code coverage report
Current view: top level - mm - truncate.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 98 313 31.3 %
Date: 2021-04-22 12:43:58 Functions: 8 23 34.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * mm/truncate.c - code for taking down pages from address_spaces
       4             :  *
       5             :  * Copyright (C) 2002, Linus Torvalds
       6             :  *
       7             :  * 10Sep2002    Andrew Morton
       8             :  *              Initial version.
       9             :  */
      10             : 
      11             : #include <linux/kernel.h>
      12             : #include <linux/backing-dev.h>
      13             : #include <linux/dax.h>
      14             : #include <linux/gfp.h>
      15             : #include <linux/mm.h>
      16             : #include <linux/swap.h>
      17             : #include <linux/export.h>
      18             : #include <linux/pagemap.h>
      19             : #include <linux/highmem.h>
      20             : #include <linux/pagevec.h>
      21             : #include <linux/task_io_accounting_ops.h>
      22             : #include <linux/buffer_head.h>    /* grr. try_to_release_page,
      23             :                                    do_invalidatepage */
      24             : #include <linux/shmem_fs.h>
      25             : #include <linux/cleancache.h>
      26             : #include <linux/rmap.h>
      27             : #include "internal.h"
      28             : 
      29             : /*
      30             :  * Regular page slots are stabilized by the page lock even without the tree
      31             :  * itself locked.  These unlocked entries need verification under the tree
      32             :  * lock.
      33             :  */
      34           0 : static inline void __clear_shadow_entry(struct address_space *mapping,
      35             :                                 pgoff_t index, void *entry)
      36             : {
      37           0 :         XA_STATE(xas, &mapping->i_pages, index);
      38             : 
      39           0 :         xas_set_update(&xas, workingset_update_node);
      40           0 :         if (xas_load(&xas) != entry)
      41           0 :                 return;
      42           0 :         xas_store(&xas, NULL);
      43           0 :         mapping->nrexceptional--;
      44             : }
      45             : 
      46           0 : static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
      47             :                                void *entry)
      48             : {
      49           0 :         xa_lock_irq(&mapping->i_pages);
      50           0 :         __clear_shadow_entry(mapping, index, entry);
      51           0 :         xa_unlock_irq(&mapping->i_pages);
      52           0 : }
      53             : 
      54             : /*
      55             :  * Unconditionally remove exceptional entries. Usually called from truncate
      56             :  * path. Note that the pagevec may be altered by this function by removing
      57             :  * exceptional entries similar to what pagevec_remove_exceptionals does.
      58             :  */
      59         144 : static void truncate_exceptional_pvec_entries(struct address_space *mapping,
      60             :                                 struct pagevec *pvec, pgoff_t *indices)
      61             : {
      62         144 :         int i, j;
      63         144 :         bool dax;
      64             : 
      65             :         /* Handled by shmem itself */
      66         144 :         if (shmem_mapping(mapping))
      67             :                 return;
      68             : 
      69        1455 :         for (j = 0; j < pagevec_count(pvec); j++)
      70        1311 :                 if (xa_is_value(pvec->pages[j]))
      71             :                         break;
      72             : 
      73         144 :         if (j == pagevec_count(pvec))
      74             :                 return;
      75             : 
      76           0 :         dax = dax_mapping(mapping);
      77           0 :         if (!dax)
      78           0 :                 xa_lock_irq(&mapping->i_pages);
      79             : 
      80           0 :         for (i = j; i < pagevec_count(pvec); i++) {
      81           0 :                 struct page *page = pvec->pages[i];
      82           0 :                 pgoff_t index = indices[i];
      83             : 
      84           0 :                 if (!xa_is_value(page)) {
      85           0 :                         pvec->pages[j++] = page;
      86           0 :                         continue;
      87             :                 }
      88             : 
      89           0 :                 if (unlikely(dax)) {
      90             :                         dax_delete_mapping_entry(mapping, index);
      91             :                         continue;
      92             :                 }
      93             : 
      94           0 :                 __clear_shadow_entry(mapping, index, page);
      95             :         }
      96             : 
      97           0 :         if (!dax)
      98           0 :                 xa_unlock_irq(&mapping->i_pages);
      99           0 :         pvec->nr = j;
     100             : }
     101             : 
     102             : /*
     103             :  * Invalidate exceptional entry if easily possible. This handles exceptional
     104             :  * entries for invalidate_inode_pages().
     105             :  */
     106           0 : static int invalidate_exceptional_entry(struct address_space *mapping,
     107             :                                         pgoff_t index, void *entry)
     108             : {
     109             :         /* Handled by shmem itself, or for DAX we do nothing. */
     110           0 :         if (shmem_mapping(mapping) || dax_mapping(mapping))
     111             :                 return 1;
     112           0 :         clear_shadow_entry(mapping, index, entry);
     113           0 :         return 1;
     114             : }
     115             : 
     116             : /*
     117             :  * Invalidate exceptional entry if clean. This handles exceptional entries for
     118             :  * invalidate_inode_pages2() so for DAX it evicts only clean entries.
     119             :  */
     120           0 : static int invalidate_exceptional_entry2(struct address_space *mapping,
     121             :                                          pgoff_t index, void *entry)
     122             : {
     123             :         /* Handled by shmem itself */
     124           0 :         if (shmem_mapping(mapping))
     125             :                 return 1;
     126           0 :         if (dax_mapping(mapping))
     127             :                 return dax_invalidate_mapping_entry_sync(mapping, index);
     128           0 :         clear_shadow_entry(mapping, index, entry);
     129           0 :         return 1;
     130             : }
     131             : 
     132             : /**
     133             :  * do_invalidatepage - invalidate part or all of a page
     134             :  * @page: the page which is affected
     135             :  * @offset: start of the range to invalidate
     136             :  * @length: length of the range to invalidate
     137             :  *
     138             :  * do_invalidatepage() is called when all or part of the page has become
     139             :  * invalidated by a truncate operation.
     140             :  *
     141             :  * do_invalidatepage() does not have to release all buffers, but it must
     142             :  * ensure that no dirty buffer is left outside @offset and that no I/O
     143             :  * is underway against any of the blocks which are outside the truncation
     144             :  * point.  Because the caller is about to free (and possibly reuse) those
     145             :  * blocks on-disk.
     146             :  */
     147         501 : void do_invalidatepage(struct page *page, unsigned int offset,
     148             :                        unsigned int length)
     149             : {
     150         501 :         void (*invalidatepage)(struct page *, unsigned int, unsigned int);
     151             : 
     152         501 :         invalidatepage = page->mapping->a_ops->invalidatepage;
     153             : #ifdef CONFIG_BLOCK
     154         501 :         if (!invalidatepage)
     155           3 :                 invalidatepage = block_invalidatepage;
     156             : #endif
     157         501 :         if (invalidatepage)
     158         501 :                 (*invalidatepage)(page, offset, length);
     159         501 : }
     160             : 
     161             : /*
     162             :  * If truncate cannot remove the fs-private metadata from the page, the page
     163             :  * becomes orphaned.  It will be left on the LRU and may even be mapped into
     164             :  * user pagetables if we're racing with filemap_fault().
     165             :  *
     166             :  * We need to bail out if page->mapping is no longer equal to the original
     167             :  * mapping.  This happens a) when the VM reclaimed the page while we waited on
     168             :  * its lock, b) when a concurrent invalidate_mapping_pages got there first and
     169             :  * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
     170             :  */
     171             : static void
     172        1979 : truncate_cleanup_page(struct address_space *mapping, struct page *page)
     173             : {
     174        1979 :         if (page_mapped(page)) {
     175           0 :                 unsigned int nr = thp_nr_pages(page);
     176           0 :                 unmap_mapping_pages(mapping, page->index, nr, false);
     177             :         }
     178             : 
     179        1979 :         if (page_has_private(page))
     180         499 :                 do_invalidatepage(page, 0, thp_size(page));
     181             : 
     182             :         /*
     183             :          * Some filesystems seem to re-dirty the page even after
     184             :          * the VM has canceled the dirty bit (eg ext3 journaling).
     185             :          * Hence dirty accounting check is placed after invalidation.
     186             :          */
     187        1979 :         cancel_dirty_page(page);
     188        1979 :         ClearPageMappedToDisk(page);
     189        1979 : }
     190             : 
     191             : /*
     192             :  * This is for invalidate_mapping_pages().  That function can be called at
     193             :  * any time, and is not supposed to throw away dirty pages.  But pages can
     194             :  * be marked dirty at any time too, so use remove_mapping which safely
     195             :  * discards clean, unused pages.
     196             :  *
     197             :  * Returns non-zero if the page was successfully invalidated.
     198             :  */
     199             : static int
     200           0 : invalidate_complete_page(struct address_space *mapping, struct page *page)
     201             : {
     202           0 :         int ret;
     203             : 
     204           0 :         if (page->mapping != mapping)
     205             :                 return 0;
     206             : 
     207           0 :         if (page_has_private(page) && !try_to_release_page(page, 0))
     208             :                 return 0;
     209             : 
     210           0 :         ret = remove_mapping(mapping, page);
     211             : 
     212           0 :         return ret;
     213             : }
     214             : 
     215         668 : int truncate_inode_page(struct address_space *mapping, struct page *page)
     216             : {
     217         668 :         VM_BUG_ON_PAGE(PageTail(page), page);
     218             : 
     219         668 :         if (page->mapping != mapping)
     220             :                 return -EIO;
     221             : 
     222         668 :         truncate_cleanup_page(mapping, page);
     223         668 :         delete_from_page_cache(page);
     224         668 :         return 0;
     225             : }
     226             : 
     227             : /*
     228             :  * Used to get rid of pages on hardware memory corruption.
     229             :  */
     230           0 : int generic_error_remove_page(struct address_space *mapping, struct page *page)
     231             : {
     232           0 :         if (!mapping)
     233             :                 return -EINVAL;
     234             :         /*
     235             :          * Only punch for normal data pages for now.
     236             :          * Handling other types like directories would need more auditing.
     237             :          */
     238           0 :         if (!S_ISREG(mapping->host->i_mode))
     239             :                 return -EIO;
     240           0 :         return truncate_inode_page(mapping, page);
     241             : }
     242             : EXPORT_SYMBOL(generic_error_remove_page);
     243             : 
     244             : /*
     245             :  * Safely invalidate one page from its pagecache mapping.
     246             :  * It only drops clean, unused pages. The page must be locked.
     247             :  *
     248             :  * Returns 1 if the page is successfully invalidated, otherwise 0.
     249             :  */
     250           0 : int invalidate_inode_page(struct page *page)
     251             : {
     252           0 :         struct address_space *mapping = page_mapping(page);
     253           0 :         if (!mapping)
     254             :                 return 0;
     255           0 :         if (PageDirty(page) || PageWriteback(page))
     256           0 :                 return 0;
     257           0 :         if (page_mapped(page))
     258             :                 return 0;
     259           0 :         return invalidate_complete_page(mapping, page);
     260             : }
     261             : 
     262             : /**
     263             :  * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
     264             :  * @mapping: mapping to truncate
     265             :  * @lstart: offset from which to truncate
     266             :  * @lend: offset to which to truncate (inclusive)
     267             :  *
     268             :  * Truncate the page cache, removing the pages that are between
     269             :  * specified offsets (and zeroing out partial pages
     270             :  * if lstart or lend + 1 is not page aligned).
     271             :  *
     272             :  * Truncate takes two passes - the first pass is nonblocking.  It will not
     273             :  * block on page locks and it will not block on writeback.  The second pass
     274             :  * will wait.  This is to prevent as much IO as possible in the affected region.
     275             :  * The first pass will remove most pages, so the search cost of the second pass
     276             :  * is low.
     277             :  *
     278             :  * We pass down the cache-hot hint to the page freeing code.  Even if the
     279             :  * mapping is large, it is probably the case that the final pages are the most
     280             :  * recently touched, and freeing happens in ascending file offset order.
     281             :  *
     282             :  * Note that since ->invalidatepage() accepts range to invalidate
     283             :  * truncate_inode_pages_range is able to handle cases where lend + 1 is not
     284             :  * page aligned properly.
     285             :  */
     286        3955 : void truncate_inode_pages_range(struct address_space *mapping,
     287             :                                 loff_t lstart, loff_t lend)
     288             : {
     289        3955 :         pgoff_t         start;          /* inclusive */
     290        3955 :         pgoff_t         end;            /* exclusive */
     291        3955 :         unsigned int    partial_start;  /* inclusive */
     292        3955 :         unsigned int    partial_end;    /* exclusive */
     293        3955 :         struct pagevec  pvec;
     294        3955 :         pgoff_t         indices[PAGEVEC_SIZE];
     295        3955 :         pgoff_t         index;
     296        3955 :         int             i;
     297             : 
     298        3955 :         if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
     299        3869 :                 goto out;
     300             : 
     301             :         /* Offsets within partial pages */
     302          86 :         partial_start = lstart & (PAGE_SIZE - 1);
     303          86 :         partial_end = (lend + 1) & (PAGE_SIZE - 1);
     304             : 
     305             :         /*
     306             :          * 'start' and 'end' always covers the range of pages to be fully
     307             :          * truncated. Partial pages are covered with 'partial_start' at the
     308             :          * start of the range and 'partial_end' at the end of the range.
     309             :          * Note that 'end' is exclusive while 'lend' is inclusive.
     310             :          */
     311          86 :         start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
     312          86 :         if (lend == -1)
     313             :                 /*
     314             :                  * lend == -1 indicates end-of-file so we have to set 'end'
     315             :                  * to the highest possible pgoff_t and since the type is
     316             :                  * unsigned we're using -1.
     317             :                  */
     318             :                 end = -1;
     319             :         else
     320           0 :                 end = (lend + 1) >> PAGE_SHIFT;
     321             : 
     322          86 :         pagevec_init(&pvec);
     323          86 :         index = start;
     324         230 :         while (index < end && find_lock_entries(mapping, index, end - 1,
     325             :                         &pvec, indices)) {
     326         144 :                 index = indices[pagevec_count(&pvec) - 1] + 1;
     327         144 :                 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
     328        1599 :                 for (i = 0; i < pagevec_count(&pvec); i++)
     329        1311 :                         truncate_cleanup_page(mapping, pvec.pages[i]);
     330         144 :                 delete_from_page_cache_batch(mapping, &pvec);
     331        1599 :                 for (i = 0; i < pagevec_count(&pvec); i++)
     332        1311 :                         unlock_page(pvec.pages[i]);
     333         144 :                 pagevec_release(&pvec);
     334         144 :                 cond_resched();
     335             :         }
     336             : 
     337          86 :         if (partial_start) {
     338           2 :                 struct page *page = find_lock_page(mapping, start - 1);
     339           2 :                 if (page) {
     340           2 :                         unsigned int top = PAGE_SIZE;
     341           2 :                         if (start > end) {
     342             :                                 /* Truncation within a single page */
     343           0 :                                 top = partial_end;
     344           0 :                                 partial_end = 0;
     345             :                         }
     346           2 :                         wait_on_page_writeback(page);
     347           2 :                         zero_user_segment(page, partial_start, top);
     348           2 :                         cleancache_invalidate_page(mapping, page);
     349           2 :                         if (page_has_private(page))
     350           2 :                                 do_invalidatepage(page, partial_start,
     351             :                                                   top - partial_start);
     352           2 :                         unlock_page(page);
     353           2 :                         put_page(page);
     354             :                 }
     355             :         }
     356          86 :         if (partial_end) {
     357           0 :                 struct page *page = find_lock_page(mapping, end);
     358           0 :                 if (page) {
     359           0 :                         wait_on_page_writeback(page);
     360           0 :                         zero_user_segment(page, 0, partial_end);
     361           0 :                         cleancache_invalidate_page(mapping, page);
     362           0 :                         if (page_has_private(page))
     363           0 :                                 do_invalidatepage(page, 0,
     364             :                                                   partial_end);
     365           0 :                         unlock_page(page);
     366           0 :                         put_page(page);
     367             :                 }
     368             :         }
     369             :         /*
     370             :          * If the truncation happened within a single page no pages
     371             :          * will be released, just zeroed, so we can bail out now.
     372             :          */
     373          86 :         if (start >= end)
     374           0 :                 goto out;
     375             : 
     376             :         index = start;
     377          86 :         for ( ; ; ) {
     378          86 :                 cond_resched();
     379          86 :                 if (!find_get_entries(mapping, index, end - 1, &pvec,
     380             :                                 indices)) {
     381             :                         /* If all gone from start onwards, we're done */
     382          86 :                         if (index == start)
     383             :                                 break;
     384             :                         /* Otherwise restart to make sure all gone */
     385           0 :                         index = start;
     386           0 :                         continue;
     387             :                 }
     388             : 
     389           0 :                 for (i = 0; i < pagevec_count(&pvec); i++) {
     390           0 :                         struct page *page = pvec.pages[i];
     391             : 
     392             :                         /* We rely upon deletion not changing page->index */
     393           0 :                         index = indices[i];
     394             : 
     395           0 :                         if (xa_is_value(page))
     396           0 :                                 continue;
     397             : 
     398           0 :                         lock_page(page);
     399           0 :                         WARN_ON(page_to_index(page) != index);
     400           0 :                         wait_on_page_writeback(page);
     401           0 :                         truncate_inode_page(mapping, page);
     402           0 :                         unlock_page(page);
     403             :                 }
     404           0 :                 truncate_exceptional_pvec_entries(mapping, &pvec, indices);
     405           0 :                 pagevec_release(&pvec);
     406           0 :                 index++;
     407             :         }
     408             : 
     409          86 : out:
     410        3955 :         cleancache_invalidate_inode(mapping);
     411        3955 : }
     412             : EXPORT_SYMBOL(truncate_inode_pages_range);
     413             : 
     414             : /**
     415             :  * truncate_inode_pages - truncate *all* the pages from an offset
     416             :  * @mapping: mapping to truncate
     417             :  * @lstart: offset from which to truncate
     418             :  *
     419             :  * Called under (and serialised by) inode->i_mutex.
     420             :  *
     421             :  * Note: When this function returns, there can be a page in the process of
     422             :  * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
     423             :  * mapping->nrpages can be non-zero when this function returns even after
     424             :  * truncation of the whole mapping.
     425             :  */
     426        3956 : void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
     427             : {
     428           3 :         truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
     429           3 : }
     430             : EXPORT_SYMBOL(truncate_inode_pages);
     431             : 
     432             : /**
     433             :  * truncate_inode_pages_final - truncate *all* pages before inode dies
     434             :  * @mapping: mapping to truncate
     435             :  *
     436             :  * Called under (and serialized by) inode->i_mutex.
     437             :  *
     438             :  * Filesystems have to use this in the .evict_inode path to inform the
     439             :  * VM that this is the final truncate and the inode is going away.
     440             :  */
     441        3947 : void truncate_inode_pages_final(struct address_space *mapping)
     442             : {
     443        3947 :         unsigned long nrexceptional;
     444        3947 :         unsigned long nrpages;
     445             : 
     446             :         /*
     447             :          * Page reclaim can not participate in regular inode lifetime
     448             :          * management (can't call iput()) and thus can race with the
     449             :          * inode teardown.  Tell it when the address space is exiting,
     450             :          * so that it does not install eviction information after the
     451             :          * final truncate has begun.
     452             :          */
     453        3947 :         mapping_set_exiting(mapping);
     454             : 
     455             :         /*
     456             :          * When reclaim installs eviction entries, it increases
     457             :          * nrexceptional first, then decreases nrpages.  Make sure we see
     458             :          * this in the right order or we might miss an entry.
     459             :          */
     460        3948 :         nrpages = mapping->nrpages;
     461        3948 :         smp_rmb();
     462        3948 :         nrexceptional = mapping->nrexceptional;
     463             : 
     464        3948 :         if (nrpages || nrexceptional) {
     465             :                 /*
     466             :                  * As truncation uses a lockless tree lookup, cycle
     467             :                  * the tree lock to make sure any ongoing tree
     468             :                  * modification that does not see AS_EXITING is
     469             :                  * completed before starting the final truncate.
     470             :                  */
     471          80 :                 xa_lock_irq(&mapping->i_pages);
     472          80 :                 xa_unlock_irq(&mapping->i_pages);
     473             :         }
     474             : 
     475             :         /*
     476             :          * Cleancache needs notification even if there are no pages or shadow
     477             :          * entries.
     478             :          */
     479        3948 :         truncate_inode_pages(mapping, 0);
     480        3947 : }
     481             : EXPORT_SYMBOL(truncate_inode_pages_final);
     482             : 
     483           0 : static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
     484             :                 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
     485             : {
     486           0 :         pgoff_t indices[PAGEVEC_SIZE];
     487           0 :         struct pagevec pvec;
     488           0 :         pgoff_t index = start;
     489           0 :         unsigned long ret;
     490           0 :         unsigned long count = 0;
     491           0 :         int i;
     492             : 
     493           0 :         pagevec_init(&pvec);
     494           0 :         while (find_lock_entries(mapping, index, end, &pvec, indices)) {
     495           0 :                 for (i = 0; i < pagevec_count(&pvec); i++) {
     496           0 :                         struct page *page = pvec.pages[i];
     497             : 
     498             :                         /* We rely upon deletion not changing page->index */
     499           0 :                         index = indices[i];
     500             : 
     501           0 :                         if (xa_is_value(page)) {
     502           0 :                                 invalidate_exceptional_entry(mapping, index,
     503             :                                                              page);
     504           0 :                                 continue;
     505             :                         }
     506           0 :                         index += thp_nr_pages(page) - 1;
     507             : 
     508           0 :                         ret = invalidate_inode_page(page);
     509           0 :                         unlock_page(page);
     510             :                         /*
     511             :                          * Invalidation is a hint that the page is no longer
     512             :                          * of interest and try to speed up its reclaim.
     513             :                          */
     514           0 :                         if (!ret) {
     515           0 :                                 deactivate_file_page(page);
     516             :                                 /* It is likely on the pagevec of a remote CPU */
     517           0 :                                 if (nr_pagevec)
     518           0 :                                         (*nr_pagevec)++;
     519             :                         }
     520           0 :                         count += ret;
     521             :                 }
     522           0 :                 pagevec_remove_exceptionals(&pvec);
     523           0 :                 pagevec_release(&pvec);
     524           0 :                 cond_resched();
     525           0 :                 index++;
     526             :         }
     527           0 :         return count;
     528             : }
     529             : 
     530             : /**
     531             :  * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
     532             :  * @mapping: the address_space which holds the pages to invalidate
     533             :  * @start: the offset 'from' which to invalidate
     534             :  * @end: the offset 'to' which to invalidate (inclusive)
     535             :  *
     536             :  * This function only removes the unlocked pages, if you want to
     537             :  * remove all the pages of one inode, you must call truncate_inode_pages.
     538             :  *
     539             :  * invalidate_mapping_pages() will not block on IO activity. It will not
     540             :  * invalidate pages which are dirty, locked, under writeback or mapped into
     541             :  * pagetables.
     542             :  *
     543             :  * Return: the number of the pages that were invalidated
     544             :  */
     545           0 : unsigned long invalidate_mapping_pages(struct address_space *mapping,
     546             :                 pgoff_t start, pgoff_t end)
     547             : {
     548           0 :         return __invalidate_mapping_pages(mapping, start, end, NULL);
     549             : }
     550             : EXPORT_SYMBOL(invalidate_mapping_pages);
     551             : 
     552             : /**
     553             :  * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode
     554             :  * @mapping: the address_space which holds the pages to invalidate
     555             :  * @start: the offset 'from' which to invalidate
     556             :  * @end: the offset 'to' which to invalidate (inclusive)
     557             :  * @nr_pagevec: invalidate failed page number for caller
     558             :  *
     559             :  * This helper is similar to invalidate_mapping_pages(), except that it accounts
     560             :  * for pages that are likely on a pagevec and counts them in @nr_pagevec, which
     561             :  * will be used by the caller.
     562             :  */
     563           0 : void invalidate_mapping_pagevec(struct address_space *mapping,
     564             :                 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
     565             : {
     566           0 :         __invalidate_mapping_pages(mapping, start, end, nr_pagevec);
     567           0 : }
     568             : 
     569             : /*
     570             :  * This is like invalidate_complete_page(), except it ignores the page's
     571             :  * refcount.  We do this because invalidate_inode_pages2() needs stronger
     572             :  * invalidation guarantees, and cannot afford to leave pages behind because
     573             :  * shrink_page_list() has a temp ref on them, or because they're transiently
     574             :  * sitting in the lru_cache_add() pagevecs.
     575             :  */
     576             : static int
     577           0 : invalidate_complete_page2(struct address_space *mapping, struct page *page)
     578             : {
     579           0 :         unsigned long flags;
     580             : 
     581           0 :         if (page->mapping != mapping)
     582             :                 return 0;
     583             : 
     584           0 :         if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
     585             :                 return 0;
     586             : 
     587           0 :         xa_lock_irqsave(&mapping->i_pages, flags);
     588           0 :         if (PageDirty(page))
     589           0 :                 goto failed;
     590             : 
     591           0 :         BUG_ON(page_has_private(page));
     592           0 :         __delete_from_page_cache(page, NULL);
     593           0 :         xa_unlock_irqrestore(&mapping->i_pages, flags);
     594             : 
     595           0 :         if (mapping->a_ops->freepage)
     596           0 :                 mapping->a_ops->freepage(page);
     597             : 
     598           0 :         put_page(page); /* pagecache ref */
     599           0 :         return 1;
     600           0 : failed:
     601           0 :         xa_unlock_irqrestore(&mapping->i_pages, flags);
     602           0 :         return 0;
     603             : }
     604             : 
     605           0 : static int do_launder_page(struct address_space *mapping, struct page *page)
     606             : {
     607           0 :         if (!PageDirty(page))
     608             :                 return 0;
     609           0 :         if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
     610             :                 return 0;
     611           0 :         return mapping->a_ops->launder_page(page);
     612             : }
     613             : 
     614             : /**
     615             :  * invalidate_inode_pages2_range - remove range of pages from an address_space
     616             :  * @mapping: the address_space
     617             :  * @start: the page offset 'from' which to invalidate
     618             :  * @end: the page offset 'to' which to invalidate (inclusive)
     619             :  *
     620             :  * Any pages which are found to be mapped into pagetables are unmapped prior to
     621             :  * invalidation.
     622             :  *
     623             :  * Return: -EBUSY if any pages could not be invalidated.
     624             :  */
     625           0 : int invalidate_inode_pages2_range(struct address_space *mapping,
     626             :                                   pgoff_t start, pgoff_t end)
     627             : {
     628           0 :         pgoff_t indices[PAGEVEC_SIZE];
     629           0 :         struct pagevec pvec;
     630           0 :         pgoff_t index;
     631           0 :         int i;
     632           0 :         int ret = 0;
     633           0 :         int ret2 = 0;
     634           0 :         int did_range_unmap = 0;
     635             : 
     636           0 :         if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
     637           0 :                 goto out;
     638             : 
     639           0 :         pagevec_init(&pvec);
     640           0 :         index = start;
     641           0 :         while (find_get_entries(mapping, index, end, &pvec, indices)) {
     642           0 :                 for (i = 0; i < pagevec_count(&pvec); i++) {
     643           0 :                         struct page *page = pvec.pages[i];
     644             : 
     645             :                         /* We rely upon deletion not changing page->index */
     646           0 :                         index = indices[i];
     647             : 
     648           0 :                         if (xa_is_value(page)) {
     649           0 :                                 if (!invalidate_exceptional_entry2(mapping,
     650             :                                                                    index, page))
     651             :                                         ret = -EBUSY;
     652           0 :                                 continue;
     653             :                         }
     654             : 
     655           0 :                         lock_page(page);
     656           0 :                         WARN_ON(page_to_index(page) != index);
     657           0 :                         if (page->mapping != mapping) {
     658           0 :                                 unlock_page(page);
     659           0 :                                 continue;
     660             :                         }
     661           0 :                         wait_on_page_writeback(page);
     662           0 :                         if (page_mapped(page)) {
     663           0 :                                 if (!did_range_unmap) {
     664             :                                         /*
     665             :                                          * Zap the rest of the file in one hit.
     666             :                                          */
     667           0 :                                         unmap_mapping_pages(mapping, index,
     668           0 :                                                 (1 + end - index), false);
     669           0 :                                         did_range_unmap = 1;
     670             :                                 } else {
     671             :                                         /*
     672             :                                          * Just zap this page
     673             :                                          */
     674           0 :                                         unmap_mapping_pages(mapping, index,
     675             :                                                                 1, false);
     676             :                                 }
     677             :                         }
     678           0 :                         BUG_ON(page_mapped(page));
     679           0 :                         ret2 = do_launder_page(mapping, page);
     680           0 :                         if (ret2 == 0) {
     681           0 :                                 if (!invalidate_complete_page2(mapping, page))
     682             :                                         ret2 = -EBUSY;
     683             :                         }
     684           0 :                         if (ret2 < 0)
     685             :                                 ret = ret2;
     686           0 :                         unlock_page(page);
     687             :                 }
     688           0 :                 pagevec_remove_exceptionals(&pvec);
     689           0 :                 pagevec_release(&pvec);
     690           0 :                 cond_resched();
     691           0 :                 index++;
     692             :         }
     693             :         /*
     694             :          * For DAX we invalidate page tables after invalidating page cache.  We
     695             :          * could invalidate page tables while invalidating each entry however
     696             :          * that would be expensive. And doing range unmapping before doesn't
     697             :          * work as we have no cheap way to find whether page cache entry didn't
     698             :          * get remapped later.
     699             :          */
     700           0 :         if (dax_mapping(mapping)) {
     701             :                 unmap_mapping_pages(mapping, start, end - start + 1, false);
     702             :         }
     703           0 : out:
     704           0 :         cleancache_invalidate_inode(mapping);
     705           0 :         return ret;
     706             : }
     707             : EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
     708             : 
     709             : /**
     710             :  * invalidate_inode_pages2 - remove all pages from an address_space
     711             :  * @mapping: the address_space
     712             :  *
     713             :  * Any pages which are found to be mapped into pagetables are unmapped prior to
     714             :  * invalidation.
     715             :  *
     716             :  * Return: -EBUSY if any pages could not be invalidated.
     717             :  */
     718           0 : int invalidate_inode_pages2(struct address_space *mapping)
     719             : {
     720           0 :         return invalidate_inode_pages2_range(mapping, 0, -1);
     721             : }
     722             : EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
     723             : 
     724             : /**
     725             :  * truncate_pagecache - unmap and remove pagecache that has been truncated
     726             :  * @inode: inode
     727             :  * @newsize: new file size
     728             :  *
     729             :  * inode's new i_size must already be written before truncate_pagecache
     730             :  * is called.
     731             :  *
     732             :  * This function should typically be called before the filesystem
     733             :  * releases resources associated with the freed range (eg. deallocates
     734             :  * blocks). This way, pagecache will always stay logically coherent
     735             :  * with on-disk format, and the filesystem would not have to deal with
     736             :  * situations such as writepage being called for a page that has already
     737             :  * had its underlying blocks deallocated.
     738             :  */
     739           5 : void truncate_pagecache(struct inode *inode, loff_t newsize)
     740             : {
     741           5 :         struct address_space *mapping = inode->i_mapping;
     742           5 :         loff_t holebegin = round_up(newsize, PAGE_SIZE);
     743             : 
     744             :         /*
     745             :          * unmap_mapping_range is called twice, first simply for
     746             :          * efficiency so that truncate_inode_pages does fewer
     747             :          * single-page unmaps.  However after this first call, and
     748             :          * before truncate_inode_pages finishes, it is possible for
     749             :          * private pages to be COWed, which remain after
     750             :          * truncate_inode_pages finishes, hence the second
     751             :          * unmap_mapping_range call must be made for correctness.
     752             :          */
     753           5 :         unmap_mapping_range(mapping, holebegin, 0, 1);
     754           5 :         truncate_inode_pages(mapping, newsize);
     755           5 :         unmap_mapping_range(mapping, holebegin, 0, 1);
     756           5 : }
     757             : EXPORT_SYMBOL(truncate_pagecache);
     758             : 
     759             : /**
     760             :  * truncate_setsize - update inode and pagecache for a new file size
     761             :  * @inode: inode
     762             :  * @newsize: new file size
     763             :  *
     764             :  * truncate_setsize updates i_size and performs pagecache truncation (if
     765             :  * necessary) to @newsize. It will be typically be called from the filesystem's
     766             :  * setattr function when ATTR_SIZE is passed in.
     767             :  *
     768             :  * Must be called with a lock serializing truncates and writes (generally
     769             :  * i_mutex but e.g. xfs uses a different lock) and before all filesystem
     770             :  * specific block truncation has been performed.
     771             :  */
     772           0 : void truncate_setsize(struct inode *inode, loff_t newsize)
     773             : {
     774           0 :         loff_t oldsize = inode->i_size;
     775             : 
     776           0 :         i_size_write(inode, newsize);
     777           0 :         if (newsize > oldsize)
     778           0 :                 pagecache_isize_extended(inode, oldsize, newsize);
     779           0 :         truncate_pagecache(inode, newsize);
     780           0 : }
     781             : EXPORT_SYMBOL(truncate_setsize);
     782             : 
     783             : /**
     784             :  * pagecache_isize_extended - update pagecache after extension of i_size
     785             :  * @inode:      inode for which i_size was extended
     786             :  * @from:       original inode size
     787             :  * @to:         new inode size
     788             :  *
     789             :  * Handle extension of inode size either caused by extending truncate or by
     790             :  * write starting after current i_size. We mark the page straddling current
     791             :  * i_size RO so that page_mkwrite() is called on the nearest write access to
     792             :  * the page.  This way filesystem can be sure that page_mkwrite() is called on
     793             :  * the page before user writes to the page via mmap after the i_size has been
     794             :  * changed.
     795             :  *
     796             :  * The function must be called after i_size is updated so that page fault
     797             :  * coming after we unlock the page will already see the new i_size.
     798             :  * The function must be called while we still hold i_mutex - this not only
     799             :  * makes sure i_size is stable but also that userspace cannot observe new
     800             :  * i_size value before we are prepared to store mmap writes at new inode size.
     801             :  */
     802           0 : void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
     803             : {
     804           0 :         int bsize = i_blocksize(inode);
     805           0 :         loff_t rounded_from;
     806           0 :         struct page *page;
     807           0 :         pgoff_t index;
     808             : 
     809           0 :         WARN_ON(to > inode->i_size);
     810             : 
     811           0 :         if (from >= to || bsize == PAGE_SIZE)
     812             :                 return;
     813             :         /* Page straddling @from will not have any hole block created? */
     814           0 :         rounded_from = round_up(from, bsize);
     815           0 :         if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
     816             :                 return;
     817             : 
     818           0 :         index = from >> PAGE_SHIFT;
     819           0 :         page = find_lock_page(inode->i_mapping, index);
     820             :         /* Page not cached? Nothing to do */
     821           0 :         if (!page)
     822             :                 return;
     823             :         /*
     824             :          * See clear_page_dirty_for_io() for details why set_page_dirty()
     825             :          * is needed.
     826             :          */
     827           0 :         if (page_mkclean(page))
     828           0 :                 set_page_dirty(page);
     829           0 :         unlock_page(page);
     830           0 :         put_page(page);
     831             : }
     832             : EXPORT_SYMBOL(pagecache_isize_extended);
     833             : 
     834             : /**
     835             :  * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
     836             :  * @inode: inode
     837             :  * @lstart: offset of beginning of hole
     838             :  * @lend: offset of last byte of hole
     839             :  *
     840             :  * This function should typically be called before the filesystem
     841             :  * releases resources associated with the freed range (eg. deallocates
     842             :  * blocks). This way, pagecache will always stay logically coherent
     843             :  * with on-disk format, and the filesystem would not have to deal with
     844             :  * situations such as writepage being called for a page that has already
     845             :  * had its underlying blocks deallocated.
     846             :  */
     847           0 : void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
     848             : {
     849           0 :         struct address_space *mapping = inode->i_mapping;
     850           0 :         loff_t unmap_start = round_up(lstart, PAGE_SIZE);
     851           0 :         loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
     852             :         /*
     853             :          * This rounding is currently just for example: unmap_mapping_range
     854             :          * expands its hole outwards, whereas we want it to contract the hole
     855             :          * inwards.  However, existing callers of truncate_pagecache_range are
     856             :          * doing their own page rounding first.  Note that unmap_mapping_range
     857             :          * allows holelen 0 for all, and we allow lend -1 for end of file.
     858             :          */
     859             : 
     860             :         /*
     861             :          * Unlike in truncate_pagecache, unmap_mapping_range is called only
     862             :          * once (before truncating pagecache), and without "even_cows" flag:
     863             :          * hole-punching should not remove private COWed pages from the hole.
     864             :          */
     865           0 :         if ((u64)unmap_end > (u64)unmap_start)
     866           0 :                 unmap_mapping_range(mapping, unmap_start,
     867           0 :                                     1 + unmap_end - unmap_start, 0);
     868           0 :         truncate_inode_pages_range(mapping, lstart, lend);
     869           0 : }
     870             : EXPORT_SYMBOL(truncate_pagecache_range);

Generated by: LCOV version 1.14