Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4 : */
5 :
6 : /*
7 : * fsnotify inode mark locking/lifetime/and refcnting
8 : *
9 : * REFCNT:
10 : * The group->recnt and mark->refcnt tell how many "things" in the kernel
11 : * currently are referencing the objects. Both kind of objects typically will
12 : * live inside the kernel with a refcnt of 2, one for its creation and one for
13 : * the reference a group and a mark hold to each other.
14 : * If you are holding the appropriate locks, you can take a reference and the
15 : * object itself is guaranteed to survive until the reference is dropped.
16 : *
17 : * LOCKING:
18 : * There are 3 locks involved with fsnotify inode marks and they MUST be taken
19 : * in order as follows:
20 : *
21 : * group->mark_mutex
22 : * mark->lock
23 : * mark->connector->lock
24 : *
25 : * group->mark_mutex protects the marks_list anchored inside a given group and
26 : * each mark is hooked via the g_list. It also protects the groups private
27 : * data (i.e group limits).
28 :
29 : * mark->lock protects the marks attributes like its masks and flags.
30 : * Furthermore it protects the access to a reference of the group that the mark
31 : * is assigned to as well as the access to a reference of the inode/vfsmount
32 : * that is being watched by the mark.
33 : *
34 : * mark->connector->lock protects the list of marks anchored inside an
35 : * inode / vfsmount and each mark is hooked via the i_list.
36 : *
37 : * A list of notification marks relating to inode / mnt is contained in
38 : * fsnotify_mark_connector. That structure is alive as long as there are any
39 : * marks in the list and is also protected by fsnotify_mark_srcu. A mark gets
40 : * detached from fsnotify_mark_connector when last reference to the mark is
41 : * dropped. Thus having mark reference is enough to protect mark->connector
42 : * pointer and to make sure fsnotify_mark_connector cannot disappear. Also
43 : * because we remove mark from g_list before dropping mark reference associated
44 : * with that, any mark found through g_list is guaranteed to have
45 : * mark->connector set until we drop group->mark_mutex.
46 : *
47 : * LIFETIME:
48 : * Inode marks survive between when they are added to an inode and when their
49 : * refcnt==0. Marks are also protected by fsnotify_mark_srcu.
50 : *
51 : * The inode mark can be cleared for a number of different reasons including:
52 : * - The inode is unlinked for the last time. (fsnotify_inode_remove)
53 : * - The inode is being evicted from cache. (fsnotify_inode_delete)
54 : * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
55 : * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
56 : * - The fsnotify_group associated with the mark is going away and all such marks
57 : * need to be cleaned up. (fsnotify_clear_marks_by_group)
58 : *
59 : * This has the very interesting property of being able to run concurrently with
60 : * any (or all) other directions.
61 : */
62 :
63 : #include <linux/fs.h>
64 : #include <linux/init.h>
65 : #include <linux/kernel.h>
66 : #include <linux/kthread.h>
67 : #include <linux/module.h>
68 : #include <linux/mutex.h>
69 : #include <linux/slab.h>
70 : #include <linux/spinlock.h>
71 : #include <linux/srcu.h>
72 : #include <linux/ratelimit.h>
73 :
74 : #include <linux/atomic.h>
75 :
76 : #include <linux/fsnotify_backend.h>
77 : #include "fsnotify.h"
78 :
79 : #define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
80 :
81 : struct srcu_struct fsnotify_mark_srcu;
82 : struct kmem_cache *fsnotify_mark_connector_cachep;
83 :
84 : static DEFINE_SPINLOCK(destroy_lock);
85 : static LIST_HEAD(destroy_list);
86 : static struct fsnotify_mark_connector *connector_destroy_list;
87 :
88 : static void fsnotify_mark_destroy_workfn(struct work_struct *work);
89 : static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn);
90 :
91 : static void fsnotify_connector_destroy_workfn(struct work_struct *work);
92 : static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
93 :
94 260 : void fsnotify_get_mark(struct fsnotify_mark *mark)
95 : {
96 260 : WARN_ON_ONCE(!refcount_read(&mark->refcnt));
97 260 : refcount_inc(&mark->refcnt);
98 260 : }
99 :
100 99 : static __u32 *fsnotify_conn_mask_p(struct fsnotify_mark_connector *conn)
101 : {
102 99 : if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
103 99 : return &fsnotify_conn_inode(conn)->i_fsnotify_mask;
104 0 : else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT)
105 0 : return &fsnotify_conn_mount(conn)->mnt_fsnotify_mask;
106 0 : else if (conn->type == FSNOTIFY_OBJ_TYPE_SB)
107 0 : return &fsnotify_conn_sb(conn)->s_fsnotify_mask;
108 : return NULL;
109 : }
110 :
111 0 : __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn)
112 : {
113 0 : if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
114 : return 0;
115 :
116 0 : return *fsnotify_conn_mask_p(conn);
117 : }
118 :
119 99 : static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
120 : {
121 99 : u32 new_mask = 0;
122 99 : struct fsnotify_mark *mark;
123 :
124 99 : assert_spin_locked(&conn->lock);
125 : /* We can get detached connector here when inode is getting unlinked. */
126 99 : if (!fsnotify_valid_obj_type(conn->type))
127 : return;
128 452 : hlist_for_each_entry(mark, &conn->list, obj_list) {
129 127 : if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
130 127 : new_mask |= mark->mask;
131 : }
132 198 : *fsnotify_conn_mask_p(conn) = new_mask;
133 : }
134 :
135 : /*
136 : * Calculate mask of events for a list of marks. The caller must make sure
137 : * connector and connector->obj cannot disappear under us. Callers achieve
138 : * this by holding a mark->lock or mark->group->mark_mutex for a mark on this
139 : * list.
140 : */
141 92 : void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
142 : {
143 92 : if (!conn)
144 : return;
145 :
146 92 : spin_lock(&conn->lock);
147 92 : __fsnotify_recalc_mask(conn);
148 92 : spin_unlock(&conn->lock);
149 92 : if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
150 92 : __fsnotify_update_child_dentry_flags(
151 : fsnotify_conn_inode(conn));
152 : }
153 :
154 : /* Free all connectors queued for freeing once SRCU period ends */
155 30 : static void fsnotify_connector_destroy_workfn(struct work_struct *work)
156 : {
157 30 : struct fsnotify_mark_connector *conn, *free;
158 :
159 30 : spin_lock(&destroy_lock);
160 30 : conn = connector_destroy_list;
161 30 : connector_destroy_list = NULL;
162 30 : spin_unlock(&destroy_lock);
163 :
164 30 : synchronize_srcu(&fsnotify_mark_srcu);
165 60 : while (conn) {
166 30 : free = conn;
167 30 : conn = conn->destroy_next;
168 30 : kmem_cache_free(fsnotify_mark_connector_cachep, free);
169 : }
170 30 : }
171 :
172 30 : static void *fsnotify_detach_connector_from_object(
173 : struct fsnotify_mark_connector *conn,
174 : unsigned int *type)
175 : {
176 30 : struct inode *inode = NULL;
177 :
178 30 : *type = conn->type;
179 30 : if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED)
180 : return NULL;
181 :
182 30 : if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
183 30 : inode = fsnotify_conn_inode(conn);
184 30 : inode->i_fsnotify_mask = 0;
185 30 : atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
186 0 : } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
187 0 : fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
188 0 : } else if (conn->type == FSNOTIFY_OBJ_TYPE_SB) {
189 0 : fsnotify_conn_sb(conn)->s_fsnotify_mask = 0;
190 : }
191 :
192 30 : rcu_assign_pointer(*(conn->obj), NULL);
193 30 : conn->obj = NULL;
194 30 : conn->type = FSNOTIFY_OBJ_TYPE_DETACHED;
195 :
196 30 : return inode;
197 : }
198 :
199 37 : static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
200 : {
201 37 : struct fsnotify_group *group = mark->group;
202 :
203 37 : if (WARN_ON_ONCE(!group))
204 : return;
205 37 : group->ops->free_mark(mark);
206 37 : fsnotify_put_group(group);
207 : }
208 :
209 : /* Drop object reference originally held by a connector */
210 37 : static void fsnotify_drop_object(unsigned int type, void *objp)
211 : {
212 37 : struct inode *inode;
213 37 : struct super_block *sb;
214 :
215 37 : if (!objp)
216 : return;
217 : /* Currently only inode references are passed to be dropped */
218 30 : if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
219 : return;
220 30 : inode = objp;
221 30 : sb = inode->i_sb;
222 30 : iput(inode);
223 60 : if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
224 30 : wake_up_var(&sb->s_fsnotify_inode_refs);
225 : }
226 :
227 248 : void fsnotify_put_mark(struct fsnotify_mark *mark)
228 : {
229 248 : struct fsnotify_mark_connector *conn = READ_ONCE(mark->connector);
230 248 : void *objp = NULL;
231 248 : unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
232 248 : bool free_conn = false;
233 :
234 : /* Catch marks that were actually never attached to object */
235 248 : if (!conn) {
236 0 : if (refcount_dec_and_test(&mark->refcnt))
237 0 : fsnotify_final_mark_destroy(mark);
238 211 : return;
239 : }
240 :
241 : /*
242 : * We have to be careful so that traversals of obj_list under lock can
243 : * safely grab mark reference.
244 : */
245 248 : if (!refcount_dec_and_lock(&mark->refcnt, &conn->lock))
246 : return;
247 :
248 37 : hlist_del_init_rcu(&mark->obj_list);
249 37 : if (hlist_empty(&conn->list)) {
250 30 : objp = fsnotify_detach_connector_from_object(conn, &type);
251 30 : free_conn = true;
252 : } else {
253 7 : __fsnotify_recalc_mask(conn);
254 : }
255 37 : WRITE_ONCE(mark->connector, NULL);
256 37 : spin_unlock(&conn->lock);
257 :
258 37 : fsnotify_drop_object(type, objp);
259 :
260 37 : if (free_conn) {
261 30 : spin_lock(&destroy_lock);
262 30 : conn->destroy_next = connector_destroy_list;
263 30 : connector_destroy_list = conn;
264 30 : spin_unlock(&destroy_lock);
265 30 : queue_work(system_unbound_wq, &connector_reaper_work);
266 : }
267 : /*
268 : * Note that we didn't update flags telling whether inode cares about
269 : * what's happening with children. We update these flags from
270 : * __fsnotify_parent() lazily when next event happens on one of our
271 : * children.
272 : */
273 37 : spin_lock(&destroy_lock);
274 37 : list_add(&mark->g_list, &destroy_list);
275 37 : spin_unlock(&destroy_lock);
276 37 : queue_delayed_work(system_unbound_wq, &reaper_work,
277 : FSNOTIFY_REAPER_DELAY);
278 : }
279 : EXPORT_SYMBOL_GPL(fsnotify_put_mark);
280 :
281 : /*
282 : * Get mark reference when we found the mark via lockless traversal of object
283 : * list. Mark can be already removed from the list by now and on its way to be
284 : * destroyed once SRCU period ends.
285 : *
286 : * Also pin the group so it doesn't disappear under us.
287 : */
288 0 : static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
289 : {
290 0 : if (!mark)
291 : return true;
292 :
293 0 : if (refcount_inc_not_zero(&mark->refcnt)) {
294 0 : spin_lock(&mark->lock);
295 0 : if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
296 : /* mark is attached, group is still alive then */
297 0 : atomic_inc(&mark->group->user_waits);
298 0 : spin_unlock(&mark->lock);
299 0 : return true;
300 : }
301 0 : spin_unlock(&mark->lock);
302 0 : fsnotify_put_mark(mark);
303 : }
304 : return false;
305 : }
306 :
307 : /*
308 : * Puts marks and wakes up group destruction if necessary.
309 : *
310 : * Pairs with fsnotify_get_mark_safe()
311 : */
312 0 : static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
313 : {
314 0 : if (mark) {
315 0 : struct fsnotify_group *group = mark->group;
316 :
317 0 : fsnotify_put_mark(mark);
318 : /*
319 : * We abuse notification_waitq on group shutdown for waiting for
320 : * all marks pinned when waiting for userspace.
321 : */
322 0 : if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
323 0 : wake_up(&group->notification_waitq);
324 : }
325 0 : }
326 :
327 0 : bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
328 : __releases(&fsnotify_mark_srcu)
329 : {
330 0 : int type;
331 :
332 0 : fsnotify_foreach_obj_type(type) {
333 : /* This can fail if mark is being removed */
334 0 : if (!fsnotify_get_mark_safe(iter_info->marks[type])) {
335 0 : __release(&fsnotify_mark_srcu);
336 0 : goto fail;
337 : }
338 : }
339 :
340 : /*
341 : * Now that both marks are pinned by refcount in the inode / vfsmount
342 : * lists, we can drop SRCU lock, and safely resume the list iteration
343 : * once userspace returns.
344 : */
345 0 : srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
346 :
347 0 : return true;
348 :
349 0 : fail:
350 0 : for (type--; type >= 0; type--)
351 0 : fsnotify_put_mark_wake(iter_info->marks[type]);
352 : return false;
353 : }
354 :
355 0 : void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
356 : __acquires(&fsnotify_mark_srcu)
357 : {
358 0 : int type;
359 :
360 0 : iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
361 0 : fsnotify_foreach_obj_type(type)
362 0 : fsnotify_put_mark_wake(iter_info->marks[type]);
363 0 : }
364 :
365 : /*
366 : * Mark mark as detached, remove it from group list. Mark still stays in object
367 : * list until its last reference is dropped. Note that we rely on mark being
368 : * removed from group list before corresponding reference to it is dropped. In
369 : * particular we rely on mark->connector being valid while we hold
370 : * group->mark_mutex if we found the mark through g_list.
371 : *
372 : * Must be called with group->mark_mutex held. The caller must either hold
373 : * reference to the mark or be protected by fsnotify_mark_srcu.
374 : */
375 37 : void fsnotify_detach_mark(struct fsnotify_mark *mark)
376 : {
377 37 : struct fsnotify_group *group = mark->group;
378 :
379 37 : WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
380 74 : WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
381 : refcount_read(&mark->refcnt) < 1 +
382 : !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
383 :
384 37 : spin_lock(&mark->lock);
385 : /* something else already called this function on this mark */
386 37 : if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
387 0 : spin_unlock(&mark->lock);
388 0 : return;
389 : }
390 37 : mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
391 37 : list_del_init(&mark->g_list);
392 37 : spin_unlock(&mark->lock);
393 :
394 37 : atomic_dec(&group->num_marks);
395 :
396 : /* Drop mark reference acquired in fsnotify_add_mark_locked() */
397 37 : fsnotify_put_mark(mark);
398 : }
399 :
400 : /*
401 : * Free fsnotify mark. The mark is actually only marked as being freed. The
402 : * freeing is actually happening only once last reference to the mark is
403 : * dropped from a workqueue which first waits for srcu period end.
404 : *
405 : * Caller must have a reference to the mark or be protected by
406 : * fsnotify_mark_srcu.
407 : */
408 37 : void fsnotify_free_mark(struct fsnotify_mark *mark)
409 : {
410 37 : struct fsnotify_group *group = mark->group;
411 :
412 37 : spin_lock(&mark->lock);
413 : /* something else already called this function on this mark */
414 37 : if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
415 0 : spin_unlock(&mark->lock);
416 0 : return;
417 : }
418 37 : mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
419 37 : spin_unlock(&mark->lock);
420 :
421 : /*
422 : * Some groups like to know that marks are being freed. This is a
423 : * callback to the group function to let it know that this mark
424 : * is being freed.
425 : */
426 37 : if (group->ops->freeing_mark)
427 37 : group->ops->freeing_mark(mark, group);
428 : }
429 :
430 31 : void fsnotify_destroy_mark(struct fsnotify_mark *mark,
431 : struct fsnotify_group *group)
432 : {
433 31 : mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
434 31 : fsnotify_detach_mark(mark);
435 31 : mutex_unlock(&group->mark_mutex);
436 31 : fsnotify_free_mark(mark);
437 31 : }
438 : EXPORT_SYMBOL_GPL(fsnotify_destroy_mark);
439 :
440 : /*
441 : * Sorting function for lists of fsnotify marks.
442 : *
443 : * Fanotify supports different notification classes (reflected as priority of
444 : * notification group). Events shall be passed to notification groups in
445 : * decreasing priority order. To achieve this marks in notification lists for
446 : * inodes and vfsmounts are sorted so that priorities of corresponding groups
447 : * are descending.
448 : *
449 : * Furthermore correct handling of the ignore mask requires processing inode
450 : * and vfsmount marks of each group together. Using the group address as
451 : * further sort criterion provides a unique sorting order and thus we can
452 : * merge inode and vfsmount lists of marks in linear time and find groups
453 : * present in both lists.
454 : *
455 : * A return value of 1 signifies that b has priority over a.
456 : * A return value of 0 signifies that the two marks have to be handled together.
457 : * A return value of -1 signifies that a has priority over b.
458 : */
459 245 : int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
460 : {
461 245 : if (a == b)
462 : return 0;
463 131 : if (!a)
464 : return 1;
465 17 : if (!b)
466 : return -1;
467 17 : if (a->priority < b->priority)
468 : return 1;
469 17 : if (a->priority > b->priority)
470 : return -1;
471 17 : if (a < b)
472 8 : return 1;
473 : return -1;
474 : }
475 :
476 73 : static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
477 : unsigned int type,
478 : __kernel_fsid_t *fsid)
479 : {
480 73 : struct inode *inode = NULL;
481 73 : struct fsnotify_mark_connector *conn;
482 :
483 73 : conn = kmem_cache_alloc(fsnotify_mark_connector_cachep, GFP_KERNEL);
484 73 : if (!conn)
485 : return -ENOMEM;
486 73 : spin_lock_init(&conn->lock);
487 73 : INIT_HLIST_HEAD(&conn->list);
488 73 : conn->type = type;
489 73 : conn->obj = connp;
490 : /* Cache fsid of filesystem containing the object */
491 73 : if (fsid) {
492 0 : conn->fsid = *fsid;
493 0 : conn->flags = FSNOTIFY_CONN_FLAG_HAS_FSID;
494 : } else {
495 73 : conn->fsid.val[0] = conn->fsid.val[1] = 0;
496 73 : conn->flags = 0;
497 : }
498 73 : if (conn->type == FSNOTIFY_OBJ_TYPE_INODE)
499 73 : inode = igrab(fsnotify_conn_inode(conn));
500 : /*
501 : * cmpxchg() provides the barrier so that readers of *connp can see
502 : * only initialized structure
503 : */
504 73 : if (cmpxchg(connp, NULL, conn)) {
505 : /* Someone else created list structure for us */
506 0 : if (inode)
507 0 : iput(inode);
508 0 : kmem_cache_free(fsnotify_mark_connector_cachep, conn);
509 : }
510 :
511 : return 0;
512 : }
513 :
514 : /*
515 : * Get mark connector, make sure it is alive and return with its lock held.
516 : * This is for users that get connector pointer from inode or mount. Users that
517 : * hold reference to a mark on the list may directly lock connector->lock as
518 : * they are sure list cannot go away under them.
519 : */
520 8044 : static struct fsnotify_mark_connector *fsnotify_grab_connector(
521 : fsnotify_connp_t *connp)
522 : {
523 8044 : struct fsnotify_mark_connector *conn;
524 8044 : int idx;
525 :
526 8044 : idx = srcu_read_lock(&fsnotify_mark_srcu);
527 8044 : conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
528 8044 : if (!conn)
529 7931 : goto out;
530 113 : spin_lock(&conn->lock);
531 113 : if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED) {
532 0 : spin_unlock(&conn->lock);
533 0 : srcu_read_unlock(&fsnotify_mark_srcu, idx);
534 0 : return NULL;
535 : }
536 113 : out:
537 8044 : srcu_read_unlock(&fsnotify_mark_srcu, idx);
538 8044 : return conn;
539 : }
540 :
541 : /*
542 : * Add mark into proper place in given list of marks. These marks may be used
543 : * for the fsnotify backend to determine which event types should be delivered
544 : * to which group and for which inodes. These marks are ordered according to
545 : * priority, highest number first, and then by the group's location in memory.
546 : */
547 86 : static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
548 : fsnotify_connp_t *connp, unsigned int type,
549 : int allow_dups, __kernel_fsid_t *fsid)
550 : {
551 86 : struct fsnotify_mark *lmark, *last = NULL;
552 86 : struct fsnotify_mark_connector *conn;
553 86 : int cmp;
554 86 : int err = 0;
555 :
556 86 : if (WARN_ON(!fsnotify_valid_obj_type(type)))
557 : return -EINVAL;
558 :
559 : /* Backend is expected to check for zero fsid (e.g. tmpfs) */
560 86 : if (fsid && WARN_ON_ONCE(!fsid->val[0] && !fsid->val[1]))
561 : return -ENODEV;
562 :
563 86 : restart:
564 159 : spin_lock(&mark->lock);
565 159 : conn = fsnotify_grab_connector(connp);
566 159 : if (!conn) {
567 73 : spin_unlock(&mark->lock);
568 73 : err = fsnotify_attach_connector_to_object(connp, type, fsid);
569 73 : if (err)
570 0 : return err;
571 73 : goto restart;
572 86 : } else if (fsid && !(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID)) {
573 0 : conn->fsid = *fsid;
574 : /* Pairs with smp_rmb() in fanotify_get_fsid() */
575 0 : smp_wmb();
576 0 : conn->flags |= FSNOTIFY_CONN_FLAG_HAS_FSID;
577 86 : } else if (fsid && (conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID) &&
578 0 : (fsid->val[0] != conn->fsid.val[0] ||
579 0 : fsid->val[1] != conn->fsid.val[1])) {
580 : /*
581 : * Backend is expected to check for non uniform fsid
582 : * (e.g. btrfs), but maybe we missed something?
583 : * Only allow setting conn->fsid once to non zero fsid.
584 : * inotify and non-fid fanotify groups do not set nor test
585 : * conn->fsid.
586 : */
587 0 : pr_warn_ratelimited("%s: fsid mismatch on object of type %u: "
588 : "%x.%x != %x.%x\n", __func__, conn->type,
589 : fsid->val[0], fsid->val[1],
590 : conn->fsid.val[0], conn->fsid.val[1]);
591 0 : err = -EXDEV;
592 0 : goto out_err;
593 : }
594 :
595 : /* is mark the first mark? */
596 86 : if (hlist_empty(&conn->list)) {
597 73 : hlist_add_head_rcu(&mark->obj_list, &conn->list);
598 73 : goto added;
599 : }
600 :
601 : /* should mark be in the middle of the current list? */
602 44 : hlist_for_each_entry(lmark, &conn->list, obj_list) {
603 17 : last = lmark;
604 :
605 17 : if ((lmark->group == mark->group) &&
606 0 : (lmark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) &&
607 : !allow_dups) {
608 0 : err = -EEXIST;
609 0 : goto out_err;
610 : }
611 :
612 17 : cmp = fsnotify_compare_groups(lmark->group, mark->group);
613 17 : if (cmp >= 0) {
614 8 : hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
615 8 : goto added;
616 : }
617 : }
618 :
619 5 : BUG_ON(last == NULL);
620 : /* mark should be the last entry. last is the current last entry */
621 5 : hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
622 86 : added:
623 : /*
624 : * Since connector is attached to object using cmpxchg() we are
625 : * guaranteed that connector initialization is fully visible by anyone
626 : * seeing mark->connector set.
627 : */
628 86 : WRITE_ONCE(mark->connector, conn);
629 86 : out_err:
630 86 : spin_unlock(&conn->lock);
631 86 : spin_unlock(&mark->lock);
632 86 : return err;
633 : }
634 :
635 : /*
636 : * Attach an initialized mark to a given group and fs object.
637 : * These marks may be used for the fsnotify backend to determine which
638 : * event types should be delivered to which group.
639 : */
640 86 : int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
641 : fsnotify_connp_t *connp, unsigned int type,
642 : int allow_dups, __kernel_fsid_t *fsid)
643 : {
644 86 : struct fsnotify_group *group = mark->group;
645 86 : int ret = 0;
646 :
647 86 : BUG_ON(!mutex_is_locked(&group->mark_mutex));
648 :
649 : /*
650 : * LOCKING ORDER!!!!
651 : * group->mark_mutex
652 : * mark->lock
653 : * mark->connector->lock
654 : */
655 86 : spin_lock(&mark->lock);
656 86 : mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
657 :
658 86 : list_add(&mark->g_list, &group->marks_list);
659 86 : atomic_inc(&group->num_marks);
660 86 : fsnotify_get_mark(mark); /* for g_list */
661 86 : spin_unlock(&mark->lock);
662 :
663 86 : ret = fsnotify_add_mark_list(mark, connp, type, allow_dups, fsid);
664 86 : if (ret)
665 0 : goto err;
666 :
667 86 : if (mark->mask)
668 86 : fsnotify_recalc_mask(mark->connector);
669 :
670 : return ret;
671 0 : err:
672 0 : spin_lock(&mark->lock);
673 0 : mark->flags &= ~(FSNOTIFY_MARK_FLAG_ALIVE |
674 : FSNOTIFY_MARK_FLAG_ATTACHED);
675 0 : list_del_init(&mark->g_list);
676 0 : spin_unlock(&mark->lock);
677 0 : atomic_dec(&group->num_marks);
678 :
679 0 : fsnotify_put_mark(mark);
680 0 : return ret;
681 : }
682 :
683 0 : int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
684 : unsigned int type, int allow_dups, __kernel_fsid_t *fsid)
685 : {
686 0 : int ret;
687 0 : struct fsnotify_group *group = mark->group;
688 :
689 0 : mutex_lock(&group->mark_mutex);
690 0 : ret = fsnotify_add_mark_locked(mark, connp, type, allow_dups, fsid);
691 0 : mutex_unlock(&group->mark_mutex);
692 0 : return ret;
693 : }
694 : EXPORT_SYMBOL_GPL(fsnotify_add_mark);
695 :
696 : /*
697 : * Given a list of marks, find the mark associated with given group. If found
698 : * take a reference to that mark and return it, else return NULL.
699 : */
700 100 : struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
701 : struct fsnotify_group *group)
702 : {
703 100 : struct fsnotify_mark_connector *conn;
704 100 : struct fsnotify_mark *mark;
705 :
706 100 : conn = fsnotify_grab_connector(connp);
707 100 : if (!conn)
708 : return NULL;
709 :
710 80 : hlist_for_each_entry(mark, &conn->list, obj_list) {
711 40 : if (mark->group == group &&
712 14 : (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
713 14 : fsnotify_get_mark(mark);
714 14 : spin_unlock(&conn->lock);
715 14 : return mark;
716 : }
717 : }
718 13 : spin_unlock(&conn->lock);
719 13 : return NULL;
720 : }
721 : EXPORT_SYMBOL_GPL(fsnotify_find_mark);
722 :
723 : /* Clear any marks in a group with given type mask */
724 7 : void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
725 : unsigned int type_mask)
726 : {
727 7 : struct fsnotify_mark *lmark, *mark;
728 7 : LIST_HEAD(to_free);
729 7 : struct list_head *head = &to_free;
730 :
731 : /* Skip selection step if we want to clear all marks. */
732 7 : if (type_mask == FSNOTIFY_OBJ_ALL_TYPES_MASK) {
733 7 : head = &group->marks_list;
734 7 : goto clear;
735 : }
736 : /*
737 : * We have to be really careful here. Anytime we drop mark_mutex, e.g.
738 : * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
739 : * to_free list so we have to use mark_mutex even when accessing that
740 : * list. And freeing mark requires us to drop mark_mutex. So we can
741 : * reliably free only the first mark in the list. That's why we first
742 : * move marks to free to to_free list in one go and then free marks in
743 : * to_free list one by one.
744 : */
745 0 : mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
746 0 : list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
747 0 : if ((1U << mark->connector->type) & type_mask)
748 0 : list_move(&mark->g_list, &to_free);
749 : }
750 0 : mutex_unlock(&group->mark_mutex);
751 :
752 7 : clear:
753 19 : while (1) {
754 13 : mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
755 13 : if (list_empty(head)) {
756 7 : mutex_unlock(&group->mark_mutex);
757 7 : break;
758 : }
759 6 : mark = list_first_entry(head, struct fsnotify_mark, g_list);
760 6 : fsnotify_get_mark(mark);
761 6 : fsnotify_detach_mark(mark);
762 6 : mutex_unlock(&group->mark_mutex);
763 6 : fsnotify_free_mark(mark);
764 6 : fsnotify_put_mark(mark);
765 : }
766 7 : }
767 :
768 : /* Destroy all marks attached to an object via connector */
769 7785 : void fsnotify_destroy_marks(fsnotify_connp_t *connp)
770 : {
771 7785 : struct fsnotify_mark_connector *conn;
772 7785 : struct fsnotify_mark *mark, *old_mark = NULL;
773 7785 : void *objp;
774 7785 : unsigned int type;
775 :
776 7785 : conn = fsnotify_grab_connector(connp);
777 7785 : if (!conn)
778 7785 : return;
779 : /*
780 : * We have to be careful since we can race with e.g.
781 : * fsnotify_clear_marks_by_group() and once we drop the conn->lock, the
782 : * list can get modified. However we are holding mark reference and
783 : * thus our mark cannot be removed from obj_list so we can continue
784 : * iteration after regaining conn->lock.
785 : */
786 0 : hlist_for_each_entry(mark, &conn->list, obj_list) {
787 0 : fsnotify_get_mark(mark);
788 0 : spin_unlock(&conn->lock);
789 0 : if (old_mark)
790 0 : fsnotify_put_mark(old_mark);
791 0 : old_mark = mark;
792 0 : fsnotify_destroy_mark(mark, mark->group);
793 0 : spin_lock(&conn->lock);
794 : }
795 : /*
796 : * Detach list from object now so that we don't pin inode until all
797 : * mark references get dropped. It would lead to strange results such
798 : * as delaying inode deletion or blocking unmount.
799 : */
800 0 : objp = fsnotify_detach_connector_from_object(conn, &type);
801 0 : spin_unlock(&conn->lock);
802 0 : if (old_mark)
803 0 : fsnotify_put_mark(old_mark);
804 0 : fsnotify_drop_object(type, objp);
805 : }
806 :
807 : /*
808 : * Nothing fancy, just initialize lists and locks and counters.
809 : */
810 86 : void fsnotify_init_mark(struct fsnotify_mark *mark,
811 : struct fsnotify_group *group)
812 : {
813 86 : memset(mark, 0, sizeof(*mark));
814 86 : spin_lock_init(&mark->lock);
815 86 : refcount_set(&mark->refcnt, 1);
816 86 : fsnotify_get_group(group);
817 86 : mark->group = group;
818 86 : WRITE_ONCE(mark->connector, NULL);
819 86 : }
820 : EXPORT_SYMBOL_GPL(fsnotify_init_mark);
821 :
822 : /*
823 : * Destroy all marks in destroy_list, waits for SRCU period to finish before
824 : * actually freeing marks.
825 : */
826 34 : static void fsnotify_mark_destroy_workfn(struct work_struct *work)
827 : {
828 34 : struct fsnotify_mark *mark, *next;
829 34 : struct list_head private_destroy_list;
830 :
831 34 : spin_lock(&destroy_lock);
832 : /* exchange the list head */
833 34 : list_replace_init(&destroy_list, &private_destroy_list);
834 34 : spin_unlock(&destroy_lock);
835 :
836 34 : synchronize_srcu(&fsnotify_mark_srcu);
837 :
838 71 : list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
839 37 : list_del_init(&mark->g_list);
840 37 : fsnotify_final_mark_destroy(mark);
841 : }
842 34 : }
843 :
844 : /* Wait for all marks queued for destruction to be actually destroyed */
845 7 : void fsnotify_wait_marks_destroyed(void)
846 : {
847 7 : flush_delayed_work(&reaper_work);
848 7 : }
849 : EXPORT_SYMBOL_GPL(fsnotify_wait_marks_destroyed);
|