Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : #include <linux/spinlock.h>
3 : #include <linux/slab.h>
4 : #include <linux/list.h>
5 : #include <linux/list_bl.h>
6 : #include <linux/module.h>
7 : #include <linux/sched.h>
8 : #include <linux/workqueue.h>
9 : #include <linux/mbcache.h>
10 :
11 : /*
12 : * Mbcache is a simple key-value store. Keys need not be unique, however
13 : * key-value pairs are expected to be unique (we use this fact in
14 : * mb_cache_entry_delete()).
15 : *
16 : * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17 : * Ext4 also uses it for deduplication of xattr values stored in inodes.
18 : * They use hash of data as a key and provide a value that may represent a
19 : * block or inode number. That's why keys need not be unique (hash of different
20 : * data may be the same). However user provided value always uniquely
21 : * identifies a cache entry.
22 : *
23 : * We provide functions for creation and removal of entries, search by key,
24 : * and a special "delete entry with given key-value pair" operation. Fixed
25 : * size hash table is used for fast key lookups.
26 : */
27 :
28 : struct mb_cache {
29 : /* Hash table of entries */
30 : struct hlist_bl_head *c_hash;
31 : /* log2 of hash table size */
32 : int c_bucket_bits;
33 : /* Maximum entries in cache to avoid degrading hash too much */
34 : unsigned long c_max_entries;
35 : /* Protects c_list, c_entry_count */
36 : spinlock_t c_list_lock;
37 : struct list_head c_list;
38 : /* Number of entries in cache */
39 : unsigned long c_entry_count;
40 : struct shrinker c_shrink;
41 : /* Work for shrinking when the cache has too many entries */
42 : struct work_struct c_shrink_work;
43 : };
44 :
45 : static struct kmem_cache *mb_entry_cache;
46 :
47 : static unsigned long mb_cache_shrink(struct mb_cache *cache,
48 : unsigned long nr_to_scan);
49 :
50 0 : static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
51 : u32 key)
52 : {
53 0 : return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
54 : }
55 :
56 : /*
57 : * Number of entries to reclaim synchronously when there are too many entries
58 : * in cache
59 : */
60 : #define SYNC_SHRINK_BATCH 64
61 :
62 : /*
63 : * mb_cache_entry_create - create entry in cache
64 : * @cache - cache where the entry should be created
65 : * @mask - gfp mask with which the entry should be allocated
66 : * @key - key of the entry
67 : * @value - value of the entry
68 : * @reusable - is the entry reusable by others?
69 : *
70 : * Creates entry in @cache with key @key and value @value. The function returns
71 : * -EBUSY if entry with the same key and value already exists in cache.
72 : * Otherwise 0 is returned.
73 : */
74 0 : int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75 : u64 value, bool reusable)
76 : {
77 0 : struct mb_cache_entry *entry, *dup;
78 0 : struct hlist_bl_node *dup_node;
79 0 : struct hlist_bl_head *head;
80 :
81 : /* Schedule background reclaim if there are too many entries */
82 0 : if (cache->c_entry_count >= cache->c_max_entries)
83 0 : schedule_work(&cache->c_shrink_work);
84 : /* Do some sync reclaim if background reclaim cannot keep up */
85 0 : if (cache->c_entry_count >= 2*cache->c_max_entries)
86 0 : mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
87 :
88 0 : entry = kmem_cache_alloc(mb_entry_cache, mask);
89 0 : if (!entry)
90 : return -ENOMEM;
91 :
92 0 : INIT_LIST_HEAD(&entry->e_list);
93 : /* One ref for hash, one ref returned */
94 0 : atomic_set(&entry->e_refcnt, 1);
95 0 : entry->e_key = key;
96 0 : entry->e_value = value;
97 0 : entry->e_reusable = reusable;
98 0 : entry->e_referenced = 0;
99 0 : head = mb_cache_entry_head(cache, key);
100 0 : hlist_bl_lock(head);
101 0 : hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
102 0 : if (dup->e_key == key && dup->e_value == value) {
103 0 : hlist_bl_unlock(head);
104 0 : kmem_cache_free(mb_entry_cache, entry);
105 0 : return -EBUSY;
106 : }
107 : }
108 0 : hlist_bl_add_head(&entry->e_hash_list, head);
109 0 : hlist_bl_unlock(head);
110 :
111 0 : spin_lock(&cache->c_list_lock);
112 0 : list_add_tail(&entry->e_list, &cache->c_list);
113 : /* Grab ref for LRU list */
114 0 : atomic_inc(&entry->e_refcnt);
115 0 : cache->c_entry_count++;
116 0 : spin_unlock(&cache->c_list_lock);
117 :
118 0 : return 0;
119 : }
120 : EXPORT_SYMBOL(mb_cache_entry_create);
121 :
122 0 : void __mb_cache_entry_free(struct mb_cache_entry *entry)
123 : {
124 0 : kmem_cache_free(mb_entry_cache, entry);
125 0 : }
126 : EXPORT_SYMBOL(__mb_cache_entry_free);
127 :
128 0 : static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
129 : struct mb_cache_entry *entry,
130 : u32 key)
131 : {
132 0 : struct mb_cache_entry *old_entry = entry;
133 0 : struct hlist_bl_node *node;
134 0 : struct hlist_bl_head *head;
135 :
136 0 : head = mb_cache_entry_head(cache, key);
137 0 : hlist_bl_lock(head);
138 0 : if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
139 0 : node = entry->e_hash_list.next;
140 : else
141 0 : node = hlist_bl_first(head);
142 0 : while (node) {
143 0 : entry = hlist_bl_entry(node, struct mb_cache_entry,
144 : e_hash_list);
145 0 : if (entry->e_key == key && entry->e_reusable) {
146 0 : atomic_inc(&entry->e_refcnt);
147 0 : goto out;
148 : }
149 0 : node = node->next;
150 : }
151 : entry = NULL;
152 0 : out:
153 0 : hlist_bl_unlock(head);
154 0 : if (old_entry)
155 0 : mb_cache_entry_put(cache, old_entry);
156 :
157 0 : return entry;
158 : }
159 :
160 : /*
161 : * mb_cache_entry_find_first - find the first reusable entry with the given key
162 : * @cache: cache where we should search
163 : * @key: key to look for
164 : *
165 : * Search in @cache for a reusable entry with key @key. Grabs reference to the
166 : * first reusable entry found and returns the entry.
167 : */
168 0 : struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
169 : u32 key)
170 : {
171 0 : return __entry_find(cache, NULL, key);
172 : }
173 : EXPORT_SYMBOL(mb_cache_entry_find_first);
174 :
175 : /*
176 : * mb_cache_entry_find_next - find next reusable entry with the same key
177 : * @cache: cache where we should search
178 : * @entry: entry to start search from
179 : *
180 : * Finds next reusable entry in the hash chain which has the same key as @entry.
181 : * If @entry is unhashed (which can happen when deletion of entry races with the
182 : * search), finds the first reusable entry in the hash chain. The function drops
183 : * reference to @entry and returns with a reference to the found entry.
184 : */
185 0 : struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
186 : struct mb_cache_entry *entry)
187 : {
188 0 : return __entry_find(cache, entry, entry->e_key);
189 : }
190 : EXPORT_SYMBOL(mb_cache_entry_find_next);
191 :
192 : /*
193 : * mb_cache_entry_get - get a cache entry by value (and key)
194 : * @cache - cache we work with
195 : * @key - key
196 : * @value - value
197 : */
198 0 : struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
199 : u64 value)
200 : {
201 0 : struct hlist_bl_node *node;
202 0 : struct hlist_bl_head *head;
203 0 : struct mb_cache_entry *entry;
204 :
205 0 : head = mb_cache_entry_head(cache, key);
206 0 : hlist_bl_lock(head);
207 0 : hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
208 0 : if (entry->e_key == key && entry->e_value == value) {
209 0 : atomic_inc(&entry->e_refcnt);
210 0 : goto out;
211 : }
212 : }
213 : entry = NULL;
214 0 : out:
215 0 : hlist_bl_unlock(head);
216 0 : return entry;
217 : }
218 : EXPORT_SYMBOL(mb_cache_entry_get);
219 :
220 : /* mb_cache_entry_delete - remove a cache entry
221 : * @cache - cache we work with
222 : * @key - key
223 : * @value - value
224 : *
225 : * Remove entry from cache @cache with key @key and value @value.
226 : */
227 0 : void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
228 : {
229 0 : struct hlist_bl_node *node;
230 0 : struct hlist_bl_head *head;
231 0 : struct mb_cache_entry *entry;
232 :
233 0 : head = mb_cache_entry_head(cache, key);
234 0 : hlist_bl_lock(head);
235 0 : hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
236 0 : if (entry->e_key == key && entry->e_value == value) {
237 : /* We keep hash list reference to keep entry alive */
238 0 : hlist_bl_del_init(&entry->e_hash_list);
239 0 : hlist_bl_unlock(head);
240 0 : spin_lock(&cache->c_list_lock);
241 0 : if (!list_empty(&entry->e_list)) {
242 0 : list_del_init(&entry->e_list);
243 0 : if (!WARN_ONCE(cache->c_entry_count == 0,
244 : "mbcache: attempt to decrement c_entry_count past zero"))
245 0 : cache->c_entry_count--;
246 0 : atomic_dec(&entry->e_refcnt);
247 : }
248 0 : spin_unlock(&cache->c_list_lock);
249 0 : mb_cache_entry_put(cache, entry);
250 0 : return;
251 : }
252 : }
253 0 : hlist_bl_unlock(head);
254 : }
255 : EXPORT_SYMBOL(mb_cache_entry_delete);
256 :
257 : /* mb_cache_entry_touch - cache entry got used
258 : * @cache - cache the entry belongs to
259 : * @entry - entry that got used
260 : *
261 : * Marks entry as used to give hit higher chances of surviving in cache.
262 : */
263 0 : void mb_cache_entry_touch(struct mb_cache *cache,
264 : struct mb_cache_entry *entry)
265 : {
266 0 : entry->e_referenced = 1;
267 0 : }
268 : EXPORT_SYMBOL(mb_cache_entry_touch);
269 :
270 0 : static unsigned long mb_cache_count(struct shrinker *shrink,
271 : struct shrink_control *sc)
272 : {
273 0 : struct mb_cache *cache = container_of(shrink, struct mb_cache,
274 : c_shrink);
275 :
276 0 : return cache->c_entry_count;
277 : }
278 :
279 : /* Shrink number of entries in cache */
280 0 : static unsigned long mb_cache_shrink(struct mb_cache *cache,
281 : unsigned long nr_to_scan)
282 : {
283 0 : struct mb_cache_entry *entry;
284 0 : struct hlist_bl_head *head;
285 0 : unsigned long shrunk = 0;
286 :
287 0 : spin_lock(&cache->c_list_lock);
288 0 : while (nr_to_scan-- && !list_empty(&cache->c_list)) {
289 0 : entry = list_first_entry(&cache->c_list,
290 : struct mb_cache_entry, e_list);
291 0 : if (entry->e_referenced) {
292 0 : entry->e_referenced = 0;
293 0 : list_move_tail(&entry->e_list, &cache->c_list);
294 0 : continue;
295 : }
296 0 : list_del_init(&entry->e_list);
297 0 : cache->c_entry_count--;
298 : /*
299 : * We keep LRU list reference so that entry doesn't go away
300 : * from under us.
301 : */
302 0 : spin_unlock(&cache->c_list_lock);
303 0 : head = mb_cache_entry_head(cache, entry->e_key);
304 0 : hlist_bl_lock(head);
305 0 : if (!hlist_bl_unhashed(&entry->e_hash_list)) {
306 0 : hlist_bl_del_init(&entry->e_hash_list);
307 0 : atomic_dec(&entry->e_refcnt);
308 : }
309 0 : hlist_bl_unlock(head);
310 0 : if (mb_cache_entry_put(cache, entry))
311 0 : shrunk++;
312 0 : cond_resched();
313 0 : spin_lock(&cache->c_list_lock);
314 : }
315 0 : spin_unlock(&cache->c_list_lock);
316 :
317 0 : return shrunk;
318 : }
319 :
320 0 : static unsigned long mb_cache_scan(struct shrinker *shrink,
321 : struct shrink_control *sc)
322 : {
323 0 : struct mb_cache *cache = container_of(shrink, struct mb_cache,
324 : c_shrink);
325 0 : return mb_cache_shrink(cache, sc->nr_to_scan);
326 : }
327 :
328 : /* We shrink 1/X of the cache when we have too many entries in it */
329 : #define SHRINK_DIVISOR 16
330 :
331 0 : static void mb_cache_shrink_worker(struct work_struct *work)
332 : {
333 0 : struct mb_cache *cache = container_of(work, struct mb_cache,
334 : c_shrink_work);
335 0 : mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
336 0 : }
337 :
338 : /*
339 : * mb_cache_create - create cache
340 : * @bucket_bits: log2 of the hash table size
341 : *
342 : * Create cache for keys with 2^bucket_bits hash entries.
343 : */
344 1 : struct mb_cache *mb_cache_create(int bucket_bits)
345 : {
346 1 : struct mb_cache *cache;
347 1 : unsigned long bucket_count = 1UL << bucket_bits;
348 1 : unsigned long i;
349 :
350 1 : cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
351 1 : if (!cache)
352 0 : goto err_out;
353 1 : cache->c_bucket_bits = bucket_bits;
354 1 : cache->c_max_entries = bucket_count << 4;
355 1 : INIT_LIST_HEAD(&cache->c_list);
356 1 : spin_lock_init(&cache->c_list_lock);
357 1 : cache->c_hash = kmalloc_array(bucket_count,
358 : sizeof(struct hlist_bl_head),
359 : GFP_KERNEL);
360 1 : if (!cache->c_hash) {
361 0 : kfree(cache);
362 0 : goto err_out;
363 : }
364 1025 : for (i = 0; i < bucket_count; i++)
365 1024 : INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
366 :
367 1 : cache->c_shrink.count_objects = mb_cache_count;
368 1 : cache->c_shrink.scan_objects = mb_cache_scan;
369 1 : cache->c_shrink.seeks = DEFAULT_SEEKS;
370 1 : if (register_shrinker(&cache->c_shrink)) {
371 0 : kfree(cache->c_hash);
372 0 : kfree(cache);
373 0 : goto err_out;
374 : }
375 :
376 1 : INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
377 :
378 1 : return cache;
379 :
380 : err_out:
381 : return NULL;
382 : }
383 : EXPORT_SYMBOL(mb_cache_create);
384 :
385 : /*
386 : * mb_cache_destroy - destroy cache
387 : * @cache: the cache to destroy
388 : *
389 : * Free all entries in cache and cache itself. Caller must make sure nobody
390 : * (except shrinker) can reach @cache when calling this.
391 : */
392 0 : void mb_cache_destroy(struct mb_cache *cache)
393 : {
394 0 : struct mb_cache_entry *entry, *next;
395 :
396 0 : unregister_shrinker(&cache->c_shrink);
397 :
398 : /*
399 : * We don't bother with any locking. Cache must not be used at this
400 : * point.
401 : */
402 0 : list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
403 0 : if (!hlist_bl_unhashed(&entry->e_hash_list)) {
404 0 : hlist_bl_del_init(&entry->e_hash_list);
405 0 : atomic_dec(&entry->e_refcnt);
406 : } else
407 0 : WARN_ON(1);
408 0 : list_del(&entry->e_list);
409 0 : WARN_ON(atomic_read(&entry->e_refcnt) != 1);
410 0 : mb_cache_entry_put(cache, entry);
411 : }
412 0 : kfree(cache->c_hash);
413 0 : kfree(cache);
414 0 : }
415 : EXPORT_SYMBOL(mb_cache_destroy);
416 :
417 1 : static int __init mbcache_init(void)
418 : {
419 1 : mb_entry_cache = kmem_cache_create("mbcache",
420 : sizeof(struct mb_cache_entry), 0,
421 : SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
422 1 : if (!mb_entry_cache)
423 0 : return -ENOMEM;
424 : return 0;
425 : }
426 :
427 0 : static void __exit mbcache_exit(void)
428 : {
429 0 : kmem_cache_destroy(mb_entry_cache);
430 0 : }
431 :
432 : module_init(mbcache_init)
433 : module_exit(mbcache_exit)
434 :
435 : MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
436 : MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
437 : MODULE_LICENSE("GPL");
|