Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later 2 : /* 3 : * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> 4 : */ 5 : 6 : #include <linux/list.h> 7 : #include <linux/mutex.h> 8 : #include <linux/slab.h> 9 : #include <linux/srcu.h> 10 : #include <linux/rculist.h> 11 : #include <linux/wait.h> 12 : #include <linux/memcontrol.h> 13 : 14 : #include <linux/fsnotify_backend.h> 15 : #include "fsnotify.h" 16 : 17 : #include <linux/atomic.h> 18 : 19 : /* 20 : * Final freeing of a group 21 : */ 22 7 : static void fsnotify_final_destroy_group(struct fsnotify_group *group) 23 : { 24 7 : if (group->ops->free_group_priv) 25 7 : group->ops->free_group_priv(group); 26 : 27 7 : mem_cgroup_put(group->memcg); 28 7 : mutex_destroy(&group->mark_mutex); 29 : 30 7 : kfree(group); 31 7 : } 32 : 33 : /* 34 : * Stop queueing new events for this group. Once this function returns 35 : * fsnotify_add_event() will not add any new events to the group's queue. 36 : */ 37 7 : void fsnotify_group_stop_queueing(struct fsnotify_group *group) 38 : { 39 7 : spin_lock(&group->notification_lock); 40 7 : group->shutdown = true; 41 7 : spin_unlock(&group->notification_lock); 42 7 : } 43 : 44 : /* 45 : * Trying to get rid of a group. Remove all marks, flush all events and release 46 : * the group reference. 47 : * Note that another thread calling fsnotify_clear_marks_by_group() may still 48 : * hold a ref to the group. 49 : */ 50 7 : void fsnotify_destroy_group(struct fsnotify_group *group) 51 : { 52 : /* 53 : * Stop queueing new events. The code below is careful enough to not 54 : * require this but fanotify needs to stop queuing events even before 55 : * fsnotify_destroy_group() is called and this makes the other callers 56 : * of fsnotify_destroy_group() to see the same behavior. 57 : */ 58 7 : fsnotify_group_stop_queueing(group); 59 : 60 : /* Clear all marks for this group and queue them for destruction */ 61 7 : fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES_MASK); 62 : 63 : /* 64 : * Some marks can still be pinned when waiting for response from 65 : * userspace. Wait for those now. fsnotify_prepare_user_wait() will 66 : * not succeed now so this wait is race-free. 67 : */ 68 7 : wait_event(group->notification_waitq, !atomic_read(&group->user_waits)); 69 : 70 : /* 71 : * Wait until all marks get really destroyed. We could actually destroy 72 : * them ourselves instead of waiting for worker to do it, however that 73 : * would be racy as worker can already be processing some marks before 74 : * we even entered fsnotify_destroy_group(). 75 : */ 76 7 : fsnotify_wait_marks_destroyed(); 77 : 78 : /* 79 : * Since we have waited for fsnotify_mark_srcu in 80 : * fsnotify_mark_destroy_list() there can be no outstanding event 81 : * notification against this group. So clearing the notification queue 82 : * of all events is reliable now. 83 : */ 84 7 : fsnotify_flush_notify(group); 85 : 86 : /* 87 : * Destroy overflow event (we cannot use fsnotify_destroy_event() as 88 : * that deliberately ignores overflow events. 89 : */ 90 7 : if (group->overflow_event) 91 7 : group->ops->free_event(group->overflow_event); 92 : 93 7 : fsnotify_put_group(group); 94 7 : } 95 : 96 : /* 97 : * Get reference to a group. 98 : */ 99 86 : void fsnotify_get_group(struct fsnotify_group *group) 100 : { 101 86 : refcount_inc(&group->refcnt); 102 86 : } 103 : 104 : /* 105 : * Drop a reference to a group. Free it if it's through. 106 : */ 107 44 : void fsnotify_put_group(struct fsnotify_group *group) 108 : { 109 44 : if (refcount_dec_and_test(&group->refcnt)) 110 7 : fsnotify_final_destroy_group(group); 111 44 : } 112 : EXPORT_SYMBOL_GPL(fsnotify_put_group); 113 : 114 20 : static struct fsnotify_group *__fsnotify_alloc_group( 115 : const struct fsnotify_ops *ops, gfp_t gfp) 116 : { 117 20 : struct fsnotify_group *group; 118 : 119 20 : group = kzalloc(sizeof(struct fsnotify_group), gfp); 120 20 : if (!group) 121 20 : return ERR_PTR(-ENOMEM); 122 : 123 : /* set to 0 when there a no external references to this group */ 124 20 : refcount_set(&group->refcnt, 1); 125 20 : atomic_set(&group->num_marks, 0); 126 20 : atomic_set(&group->user_waits, 0); 127 : 128 20 : spin_lock_init(&group->notification_lock); 129 20 : INIT_LIST_HEAD(&group->notification_list); 130 20 : init_waitqueue_head(&group->notification_waitq); 131 20 : group->max_events = UINT_MAX; 132 : 133 20 : mutex_init(&group->mark_mutex); 134 20 : INIT_LIST_HEAD(&group->marks_list); 135 : 136 20 : group->ops = ops; 137 : 138 20 : return group; 139 : } 140 : 141 : /* 142 : * Create a new fsnotify_group and hold a reference for the group returned. 143 : */ 144 0 : struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops) 145 : { 146 0 : return __fsnotify_alloc_group(ops, GFP_KERNEL); 147 : } 148 : EXPORT_SYMBOL_GPL(fsnotify_alloc_group); 149 : 150 : /* 151 : * Create a new fsnotify_group and hold a reference for the group returned. 152 : */ 153 20 : struct fsnotify_group *fsnotify_alloc_user_group(const struct fsnotify_ops *ops) 154 : { 155 20 : return __fsnotify_alloc_group(ops, GFP_KERNEL_ACCOUNT); 156 : } 157 : EXPORT_SYMBOL_GPL(fsnotify_alloc_user_group); 158 : 159 0 : int fsnotify_fasync(int fd, struct file *file, int on) 160 : { 161 0 : struct fsnotify_group *group = file->private_data; 162 : 163 0 : return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO; 164 : }