2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 * Basic idea behind the notification queue: An fsnotify group (like inotify)
21 * sends the userspace notification about events asyncronously some time after
22 * the event happened. When inotify gets an event it will need to add that
23 * event to the group notify queue. Since a single event might need to be on
24 * multiple group's notification queues we can't add the event directly to each
25 * queue and instead add a small "event_holder" to each queue. This event_holder
26 * has a pointer back to the original event. Since the majority of events are
27 * going to end up on one, and only one, notification queue we embed one
28 * event_holder into each event. This means we have a single allocation instead
29 * of always needing two. If the embedded event_holder is already in use by
30 * another group a new event_holder (from fsnotify_event_holder_cachep) will be
35 #include <linux/init.h>
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/mount.h>
39 #include <linux/mutex.h>
40 #include <linux/namei.h>
41 #include <linux/path.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
45 #include <asm/atomic.h>
47 #include <linux/fsnotify_backend.h>
50 static struct kmem_cache *fsnotify_event_cachep;
51 static struct kmem_cache *fsnotify_event_holder_cachep;
53 * This is a magic event we send when the q is too full. Since it doesn't
54 * hold real event information we just keep one system wide and use it any time
55 * it is needed. It's refcnt is set 1 at kernel init time and will never
56 * get set to 0 so it will never get 'freed'
58 static struct fsnotify_event q_overflow_event;
60 /* return true if the notify queue is empty, false otherwise */
61 bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
63 BUG_ON(!mutex_is_locked(&group->notification_mutex));
64 return list_empty(&group->notification_list) ? true : false;
67 void fsnotify_get_event(struct fsnotify_event *event)
69 atomic_inc(&event->refcnt);
72 void fsnotify_put_event(struct fsnotify_event *event)
77 if (atomic_dec_and_test(&event->refcnt)) {
78 if (event->data_type == FSNOTIFY_EVENT_PATH)
79 path_put(&event->path);
81 kmem_cache_free(fsnotify_event_cachep, event);
85 struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
87 return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL);
90 void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
92 kmem_cache_free(fsnotify_event_holder_cachep, holder);
96 * check if 2 events contain the same information.
98 static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
100 if ((old->mask == new->mask) &&
101 (old->to_tell == new->to_tell) &&
102 (old->data_type == new->data_type)) {
103 switch (old->data_type) {
104 case (FSNOTIFY_EVENT_INODE):
105 if (old->inode == new->inode)
108 case (FSNOTIFY_EVENT_PATH):
109 if ((old->path.mnt == new->path.mnt) &&
110 (old->path.dentry == new->path.dentry))
112 case (FSNOTIFY_EVENT_NONE):
120 * Add an event to the group notification queue. The group can later pull this
121 * event off the queue to deal with. If the event is successfully added to the
122 * group's notification queue, a reference is taken on event.
124 int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event)
126 struct fsnotify_event_holder *holder = NULL;
127 struct list_head *list = &group->notification_list;
128 struct fsnotify_event_holder *last_holder;
129 struct fsnotify_event *last_event;
132 * There is one fsnotify_event_holder embedded inside each fsnotify_event.
133 * Check if we expect to be able to use that holder. If not alloc a new
135 * For the overflow event it's possible that something will use the in
136 * event holder before we get the lock so we may need to jump back and
137 * alloc a new holder, this can't happen for most events...
139 if (!list_empty(&event->holder.event_list)) {
141 holder = fsnotify_alloc_event_holder();
146 mutex_lock(&group->notification_mutex);
148 if (group->q_len >= group->max_events)
149 event = &q_overflow_event;
151 spin_lock(&event->lock);
153 if (list_empty(&event->holder.event_list)) {
154 if (unlikely(holder))
155 fsnotify_destroy_event_holder(holder);
156 holder = &event->holder;
157 } else if (unlikely(!holder)) {
158 /* between the time we checked above and got the lock the in
159 * event holder was used, go back and get a new one */
160 spin_unlock(&event->lock);
161 mutex_unlock(&group->notification_mutex);
165 if (!list_empty(list)) {
166 last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
167 last_event = last_holder->event;
168 if (event_compare(last_event, event)) {
169 spin_unlock(&event->lock);
170 mutex_unlock(&group->notification_mutex);
171 if (holder != &event->holder)
172 fsnotify_destroy_event_holder(holder);
178 holder->event = event;
180 fsnotify_get_event(event);
181 list_add_tail(&holder->event_list, list);
182 spin_unlock(&event->lock);
183 mutex_unlock(&group->notification_mutex);
185 wake_up(&group->notification_waitq);
190 * Remove and return the first event from the notification list. There is a
191 * reference held on this event since it was on the list. It is the responsibility
192 * of the caller to drop this reference.
194 struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
196 struct fsnotify_event *event;
197 struct fsnotify_event_holder *holder;
199 BUG_ON(!mutex_is_locked(&group->notification_mutex));
201 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
203 event = holder->event;
205 spin_lock(&event->lock);
206 holder->event = NULL;
207 list_del_init(&holder->event_list);
208 spin_unlock(&event->lock);
210 /* event == holder means we are referenced through the in event holder */
211 if (holder != &event->holder)
212 fsnotify_destroy_event_holder(holder);
220 * This will not remove the event, that must be done with fsnotify_remove_notify_event()
222 struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
224 struct fsnotify_event *event;
225 struct fsnotify_event_holder *holder;
227 BUG_ON(!mutex_is_locked(&group->notification_mutex));
229 holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
230 event = holder->event;
236 * Called when a group is being torn down to clean up any outstanding
237 * event notifications.
239 void fsnotify_flush_notify(struct fsnotify_group *group)
241 struct fsnotify_event *event;
243 mutex_lock(&group->notification_mutex);
244 while (!fsnotify_notify_queue_is_empty(group)) {
245 event = fsnotify_remove_notify_event(group);
246 fsnotify_put_event(event); /* matches fsnotify_add_notify_event */
248 mutex_unlock(&group->notification_mutex);
251 static void initialize_event(struct fsnotify_event *event)
253 event->holder.event = NULL;
254 INIT_LIST_HEAD(&event->holder.event_list);
255 atomic_set(&event->refcnt, 1);
257 spin_lock_init(&event->lock);
259 event->path.dentry = NULL;
260 event->path.mnt = NULL;
262 event->data_type = FSNOTIFY_EVENT_NONE;
264 event->to_tell = NULL;
268 * fsnotify_create_event - Allocate a new event which will be sent to each
269 * group's handle_event function if the group was interested in this
272 * @to_tell the inode which is supposed to receive the event (sometimes a
273 * parent of the inode to which the event happened.
274 * @mask what actually happened.
275 * @data pointer to the object which was actually affected
276 * @data_type flag indication if the data is a file, path, inode, nothing...
278 struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
279 void *data, int data_type)
281 struct fsnotify_event *event;
283 event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
287 initialize_event(event);
288 event->to_tell = to_tell;
291 case FSNOTIFY_EVENT_FILE: {
292 struct file *file = data;
293 struct path *path = &file->f_path;
294 event->path.dentry = path->dentry;
295 event->path.mnt = path->mnt;
296 path_get(&event->path);
297 event->data_type = FSNOTIFY_EVENT_PATH;
300 case FSNOTIFY_EVENT_PATH: {
301 struct path *path = data;
302 event->path.dentry = path->dentry;
303 event->path.mnt = path->mnt;
304 path_get(&event->path);
305 event->data_type = FSNOTIFY_EVENT_PATH;
308 case FSNOTIFY_EVENT_INODE:
310 event->data_type = FSNOTIFY_EVENT_INODE;
312 case FSNOTIFY_EVENT_NONE:
314 event->path.dentry = NULL;
315 event->path.mnt = NULL;
326 __init int fsnotify_notification_init(void)
328 fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
329 fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
331 initialize_event(&q_overflow_event);
332 q_overflow_event.mask = FS_Q_OVERFLOW;
336 subsys_initcall(fsnotify_notification_init);