Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Cleancache frontend
4 : *
5 : * This code provides the generic "frontend" layer to call a matching
6 : * "backend" driver implementation of cleancache. See
7 : * Documentation/vm/cleancache.rst for more information.
8 : *
9 : * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
10 : * Author: Dan Magenheimer
11 : */
12 :
13 : #include <linux/module.h>
14 : #include <linux/fs.h>
15 : #include <linux/exportfs.h>
16 : #include <linux/mm.h>
17 : #include <linux/debugfs.h>
18 : #include <linux/cleancache.h>
19 :
20 : /*
21 : * cleancache_ops is set by cleancache_register_ops to contain the pointers
22 : * to the cleancache "backend" implementation functions.
23 : */
24 : static const struct cleancache_ops *cleancache_ops __read_mostly;
25 :
26 : /*
27 : * Counters available via /sys/kernel/debug/cleancache (if debugfs is
28 : * properly configured. These are for information only so are not protected
29 : * against increment races.
30 : */
31 : static u64 cleancache_succ_gets;
32 : static u64 cleancache_failed_gets;
33 : static u64 cleancache_puts;
34 : static u64 cleancache_invalidates;
35 :
36 0 : static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
37 : {
38 0 : switch (sb->cleancache_poolid) {
39 0 : case CLEANCACHE_NO_BACKEND:
40 0 : __cleancache_init_fs(sb);
41 0 : break;
42 0 : case CLEANCACHE_NO_BACKEND_SHARED:
43 0 : __cleancache_init_shared_fs(sb);
44 0 : break;
45 : }
46 0 : }
47 :
48 : /*
49 : * Register operations for cleancache. Returns 0 on success.
50 : */
51 0 : int cleancache_register_ops(const struct cleancache_ops *ops)
52 : {
53 0 : if (cmpxchg(&cleancache_ops, NULL, ops))
54 : return -EBUSY;
55 :
56 : /*
57 : * A cleancache backend can be built as a module and hence loaded after
58 : * a cleancache enabled filesystem has called cleancache_init_fs. To
59 : * handle such a scenario, here we call ->init_fs or ->init_shared_fs
60 : * for each active super block. To differentiate between local and
61 : * shared filesystems, we temporarily initialize sb->cleancache_poolid
62 : * to CLEANCACHE_NO_BACKEND or CLEANCACHE_NO_BACKEND_SHARED
63 : * respectively in case there is no backend registered at the time
64 : * cleancache_init_fs or cleancache_init_shared_fs is called.
65 : *
66 : * Since filesystems can be mounted concurrently with cleancache
67 : * backend registration, we have to be careful to guarantee that all
68 : * cleancache enabled filesystems that has been mounted by the time
69 : * cleancache_register_ops is called has got and all mounted later will
70 : * get cleancache_poolid. This is assured by the following statements
71 : * tied together:
72 : *
73 : * a) iterate_supers skips only those super blocks that has started
74 : * ->kill_sb
75 : *
76 : * b) if iterate_supers encounters a super block that has not finished
77 : * ->mount yet, it waits until it is finished
78 : *
79 : * c) cleancache_init_fs is called from ->mount and
80 : * cleancache_invalidate_fs is called from ->kill_sb
81 : *
82 : * d) we call iterate_supers after cleancache_ops has been set
83 : *
84 : * From a) it follows that if iterate_supers skips a super block, then
85 : * either the super block is already dead, in which case we do not need
86 : * to bother initializing cleancache for it, or it was mounted after we
87 : * initiated iterate_supers. In the latter case, it must have seen
88 : * cleancache_ops set according to d) and initialized cleancache from
89 : * ->mount by itself according to c). This proves that we call
90 : * ->init_fs at least once for each active super block.
91 : *
92 : * From b) and c) it follows that if iterate_supers encounters a super
93 : * block that has already started ->init_fs, it will wait until ->mount
94 : * and hence ->init_fs has finished, then check cleancache_poolid, see
95 : * that it has already been set and therefore do nothing. This proves
96 : * that we call ->init_fs no more than once for each super block.
97 : *
98 : * Combined together, the last two paragraphs prove the function
99 : * correctness.
100 : *
101 : * Note that various cleancache callbacks may proceed before this
102 : * function is called or even concurrently with it, but since
103 : * CLEANCACHE_NO_BACKEND is negative, they will all result in a noop
104 : * until the corresponding ->init_fs has been actually called and
105 : * cleancache_ops has been set.
106 : */
107 0 : iterate_supers(cleancache_register_ops_sb, NULL);
108 0 : return 0;
109 : }
110 : EXPORT_SYMBOL(cleancache_register_ops);
111 :
112 : /* Called by a cleancache-enabled filesystem at time of mount */
113 2 : void __cleancache_init_fs(struct super_block *sb)
114 : {
115 2 : int pool_id = CLEANCACHE_NO_BACKEND;
116 :
117 2 : if (cleancache_ops) {
118 0 : pool_id = cleancache_ops->init_fs(PAGE_SIZE);
119 0 : if (pool_id < 0)
120 : pool_id = CLEANCACHE_NO_POOL;
121 : }
122 2 : sb->cleancache_poolid = pool_id;
123 2 : }
124 : EXPORT_SYMBOL(__cleancache_init_fs);
125 :
126 : /* Called by a cleancache-enabled clustered filesystem at time of mount */
127 0 : void __cleancache_init_shared_fs(struct super_block *sb)
128 : {
129 0 : int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
130 :
131 0 : if (cleancache_ops) {
132 0 : pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
133 0 : if (pool_id < 0)
134 : pool_id = CLEANCACHE_NO_POOL;
135 : }
136 0 : sb->cleancache_poolid = pool_id;
137 0 : }
138 : EXPORT_SYMBOL(__cleancache_init_shared_fs);
139 :
140 : /*
141 : * If the filesystem uses exportable filehandles, use the filehandle as
142 : * the key, else use the inode number.
143 : */
144 0 : static int cleancache_get_key(struct inode *inode,
145 : struct cleancache_filekey *key)
146 : {
147 0 : int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
148 0 : int len = 0, maxlen = CLEANCACHE_KEY_MAX;
149 0 : struct super_block *sb = inode->i_sb;
150 :
151 0 : key->u.ino = inode->i_ino;
152 0 : if (sb->s_export_op != NULL) {
153 0 : fhfn = sb->s_export_op->encode_fh;
154 0 : if (fhfn) {
155 0 : len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
156 0 : if (len <= FILEID_ROOT || len == FILEID_INVALID)
157 : return -1;
158 0 : if (maxlen > CLEANCACHE_KEY_MAX)
159 0 : return -1;
160 : }
161 : }
162 : return 0;
163 : }
164 :
165 : /*
166 : * "Get" data from cleancache associated with the poolid/inode/index
167 : * that were specified when the data was put to cleanache and, if
168 : * successful, use it to fill the specified page with data and return 0.
169 : * The pageframe is unchanged and returns -1 if the get fails.
170 : * Page must be locked by caller.
171 : *
172 : * The function has two checks before any action is taken - whether
173 : * a backend is registered and whether the sb->cleancache_poolid
174 : * is correct.
175 : */
176 0 : int __cleancache_get_page(struct page *page)
177 : {
178 0 : int ret = -1;
179 0 : int pool_id;
180 0 : struct cleancache_filekey key = { .u.key = { 0 } };
181 :
182 0 : if (!cleancache_ops) {
183 0 : cleancache_failed_gets++;
184 0 : goto out;
185 : }
186 :
187 0 : VM_BUG_ON_PAGE(!PageLocked(page), page);
188 0 : pool_id = page->mapping->host->i_sb->cleancache_poolid;
189 0 : if (pool_id < 0)
190 0 : goto out;
191 :
192 0 : if (cleancache_get_key(page->mapping->host, &key) < 0)
193 0 : goto out;
194 :
195 0 : ret = cleancache_ops->get_page(pool_id, key, page->index, page);
196 0 : if (ret == 0)
197 0 : cleancache_succ_gets++;
198 : else
199 0 : cleancache_failed_gets++;
200 0 : out:
201 0 : return ret;
202 : }
203 : EXPORT_SYMBOL(__cleancache_get_page);
204 :
205 : /*
206 : * "Put" data from a page to cleancache and associate it with the
207 : * (previously-obtained per-filesystem) poolid and the page's,
208 : * inode and page index. Page must be locked. Note that a put_page
209 : * always "succeeds", though a subsequent get_page may succeed or fail.
210 : *
211 : * The function has two checks before any action is taken - whether
212 : * a backend is registered and whether the sb->cleancache_poolid
213 : * is correct.
214 : */
215 0 : void __cleancache_put_page(struct page *page)
216 : {
217 0 : int pool_id;
218 0 : struct cleancache_filekey key = { .u.key = { 0 } };
219 :
220 0 : if (!cleancache_ops) {
221 0 : cleancache_puts++;
222 0 : return;
223 : }
224 :
225 0 : VM_BUG_ON_PAGE(!PageLocked(page), page);
226 0 : pool_id = page->mapping->host->i_sb->cleancache_poolid;
227 0 : if (pool_id >= 0 &&
228 0 : cleancache_get_key(page->mapping->host, &key) >= 0) {
229 0 : cleancache_ops->put_page(pool_id, key, page->index, page);
230 0 : cleancache_puts++;
231 : }
232 : }
233 : EXPORT_SYMBOL(__cleancache_put_page);
234 :
235 : /*
236 : * Invalidate any data from cleancache associated with the poolid and the
237 : * page's inode and page index so that a subsequent "get" will fail.
238 : *
239 : * The function has two checks before any action is taken - whether
240 : * a backend is registered and whether the sb->cleancache_poolid
241 : * is correct.
242 : */
243 0 : void __cleancache_invalidate_page(struct address_space *mapping,
244 : struct page *page)
245 : {
246 : /* careful... page->mapping is NULL sometimes when this is called */
247 0 : int pool_id = mapping->host->i_sb->cleancache_poolid;
248 0 : struct cleancache_filekey key = { .u.key = { 0 } };
249 :
250 0 : if (!cleancache_ops)
251 0 : return;
252 :
253 0 : if (pool_id >= 0) {
254 0 : VM_BUG_ON_PAGE(!PageLocked(page), page);
255 0 : if (cleancache_get_key(mapping->host, &key) >= 0) {
256 0 : cleancache_ops->invalidate_page(pool_id,
257 : key, page->index);
258 0 : cleancache_invalidates++;
259 : }
260 : }
261 : }
262 : EXPORT_SYMBOL(__cleancache_invalidate_page);
263 :
264 : /*
265 : * Invalidate all data from cleancache associated with the poolid and the
266 : * mappings's inode so that all subsequent gets to this poolid/inode
267 : * will fail.
268 : *
269 : * The function has two checks before any action is taken - whether
270 : * a backend is registered and whether the sb->cleancache_poolid
271 : * is correct.
272 : */
273 0 : void __cleancache_invalidate_inode(struct address_space *mapping)
274 : {
275 0 : int pool_id = mapping->host->i_sb->cleancache_poolid;
276 0 : struct cleancache_filekey key = { .u.key = { 0 } };
277 :
278 0 : if (!cleancache_ops)
279 0 : return;
280 :
281 0 : if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
282 0 : cleancache_ops->invalidate_inode(pool_id, key);
283 : }
284 : EXPORT_SYMBOL(__cleancache_invalidate_inode);
285 :
286 : /*
287 : * Called by any cleancache-enabled filesystem at time of unmount;
288 : * note that pool_id is surrendered and may be returned by a subsequent
289 : * cleancache_init_fs or cleancache_init_shared_fs.
290 : */
291 99 : void __cleancache_invalidate_fs(struct super_block *sb)
292 : {
293 99 : int pool_id;
294 :
295 99 : pool_id = sb->cleancache_poolid;
296 99 : sb->cleancache_poolid = CLEANCACHE_NO_POOL;
297 :
298 99 : if (cleancache_ops && pool_id >= 0)
299 0 : cleancache_ops->invalidate_fs(pool_id);
300 99 : }
301 : EXPORT_SYMBOL(__cleancache_invalidate_fs);
302 :
303 1 : static int __init init_cleancache(void)
304 : {
305 : #ifdef CONFIG_DEBUG_FS
306 1 : struct dentry *root = debugfs_create_dir("cleancache", NULL);
307 :
308 1 : debugfs_create_u64("succ_gets", 0444, root, &cleancache_succ_gets);
309 1 : debugfs_create_u64("failed_gets", 0444, root, &cleancache_failed_gets);
310 1 : debugfs_create_u64("puts", 0444, root, &cleancache_puts);
311 1 : debugfs_create_u64("invalidates", 0444, root, &cleancache_invalidates);
312 : #endif
313 1 : return 0;
314 : }
315 : module_init(init_cleancache)
|