2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/magic.h> /* superblock magic number */
33 #include <linux/mount.h> /* mntget */
34 #include <linux/namei.h> /* LOOKUP_FOLLOW */
35 #include <linux/path.h> /* struct path */
36 #include <linux/sched.h> /* struct user */
37 #include <linux/slab.h> /* struct kmem_cache */
38 #include <linux/syscalls.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
46 #include <asm/ioctls.h>
48 static struct vfsmount *inotify_mnt __read_mostly;
50 /* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
51 static struct inotify_event nul_inotify_event;
53 /* these are configurable via /proc/sys/fs/inotify/ */
54 static int inotify_max_user_instances __read_mostly;
55 static int inotify_max_queued_events __read_mostly;
56 int inotify_max_user_watches __read_mostly;
58 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
59 struct kmem_cache *event_priv_cachep __read_mostly;
60 static struct fsnotify_event *inotify_ignored_event;
63 * When inotify registers a new group it increments this and uses that
64 * value as an offset to set the fsnotify group "name" and priority.
66 static atomic_t inotify_grp_num;
70 #include <linux/sysctl.h>
74 ctl_table inotify_table[] = {
76 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
77 .procname = "max_user_instances",
78 .data = &inotify_max_user_instances,
79 .maxlen = sizeof(int),
81 .proc_handler = &proc_dointvec_minmax,
82 .strategy = &sysctl_intvec,
86 .ctl_name = INOTIFY_MAX_USER_WATCHES,
87 .procname = "max_user_watches",
88 .data = &inotify_max_user_watches,
89 .maxlen = sizeof(int),
91 .proc_handler = &proc_dointvec_minmax,
92 .strategy = &sysctl_intvec,
96 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
97 .procname = "max_queued_events",
98 .data = &inotify_max_queued_events,
99 .maxlen = sizeof(int),
101 .proc_handler = &proc_dointvec_minmax,
102 .strategy = &sysctl_intvec,
107 #endif /* CONFIG_SYSCTL */
109 static inline __u32 inotify_arg_to_mask(u32 arg)
113 /* everything should accept their own ignored and cares about children */
114 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
116 /* mask off the flags used to open the fd */
117 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
122 static inline u32 inotify_mask_to_arg(__u32 mask)
124 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
128 /* intofiy userspace file descriptor functions */
129 static unsigned int inotify_poll(struct file *file, poll_table *wait)
131 struct fsnotify_group *group = file->private_data;
134 poll_wait(file, &group->notification_waitq, wait);
135 mutex_lock(&group->notification_mutex);
136 if (!fsnotify_notify_queue_is_empty(group))
137 ret = POLLIN | POLLRDNORM;
138 mutex_unlock(&group->notification_mutex);
144 * Get an inotify_kernel_event if one exists and is small
145 * enough to fit in "count". Return an error pointer if
148 * Called with the group->notification_mutex held.
150 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
153 size_t event_size = sizeof(struct inotify_event);
154 struct fsnotify_event *event;
156 if (fsnotify_notify_queue_is_empty(group))
159 event = fsnotify_peek_notify_event(group);
161 event_size += roundup(event->name_len, event_size);
163 if (event_size > count)
164 return ERR_PTR(-EINVAL);
166 /* held the notification_mutex the whole time, so this is the
167 * same event we peeked above */
168 fsnotify_remove_notify_event(group);
174 * Copy an event to user space, returning how much we copied.
176 * We already checked that the event size is smaller than the
177 * buffer we had in "get_one_event()" above.
179 static ssize_t copy_event_to_user(struct fsnotify_group *group,
180 struct fsnotify_event *event,
183 struct inotify_event inotify_event;
184 struct fsnotify_event_private_data *fsn_priv;
185 struct inotify_event_private_data *priv;
186 size_t event_size = sizeof(struct inotify_event);
189 /* we get the inotify watch descriptor from the event private data */
190 spin_lock(&event->lock);
191 fsn_priv = fsnotify_remove_priv_from_event(group, event);
192 spin_unlock(&event->lock);
195 inotify_event.wd = -1;
197 priv = container_of(fsn_priv, struct inotify_event_private_data,
198 fsnotify_event_priv_data);
199 inotify_event.wd = priv->wd;
200 inotify_free_event_priv(fsn_priv);
203 /* round up event->name_len so it is a multiple of event_size */
204 name_len = roundup(event->name_len, event_size);
205 inotify_event.len = name_len;
207 inotify_event.mask = inotify_mask_to_arg(event->mask);
208 inotify_event.cookie = event->sync_cookie;
210 /* send the main event */
211 if (copy_to_user(buf, &inotify_event, event_size))
217 * fsnotify only stores the pathname, so here we have to send the pathname
218 * and then pad that pathname out to a multiple of sizeof(inotify_event)
219 * with zeros. I get my zeros from the nul_inotify_event.
222 unsigned int len_to_zero = name_len - event->name_len;
223 /* copy the path name */
224 if (copy_to_user(buf, event->file_name, event->name_len))
226 buf += event->name_len;
228 /* fill userspace with 0's from nul_inotify_event */
229 if (copy_to_user(buf, &nul_inotify_event, len_to_zero))
232 event_size += name_len;
238 static ssize_t inotify_read(struct file *file, char __user *buf,
239 size_t count, loff_t *pos)
241 struct fsnotify_group *group;
242 struct fsnotify_event *kevent;
248 group = file->private_data;
251 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
253 mutex_lock(&group->notification_mutex);
254 kevent = get_one_event(group, count);
255 mutex_unlock(&group->notification_mutex);
258 ret = PTR_ERR(kevent);
261 ret = copy_event_to_user(group, kevent, buf);
262 fsnotify_put_event(kevent);
271 if (file->f_flags & O_NONBLOCK)
274 if (signal_pending(current))
283 finish_wait(&group->notification_waitq, &wait);
284 if (start != buf && ret != -EFAULT)
289 static int inotify_fasync(int fd, struct file *file, int on)
291 struct fsnotify_group *group = file->private_data;
293 return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
296 static int inotify_release(struct inode *ignored, struct file *file)
298 struct fsnotify_group *group = file->private_data;
300 fsnotify_clear_marks_by_group(group);
302 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
303 fsnotify_put_group(group);
308 static long inotify_ioctl(struct file *file, unsigned int cmd,
311 struct fsnotify_group *group;
312 struct fsnotify_event_holder *holder;
313 struct fsnotify_event *event;
318 group = file->private_data;
319 p = (void __user *) arg;
323 mutex_lock(&group->notification_mutex);
324 list_for_each_entry(holder, &group->notification_list, event_list) {
325 event = holder->event;
326 send_len += sizeof(struct inotify_event);
327 send_len += roundup(event->name_len,
328 sizeof(struct inotify_event));
330 mutex_unlock(&group->notification_mutex);
331 ret = put_user(send_len, (int __user *) p);
338 static const struct file_operations inotify_fops = {
339 .poll = inotify_poll,
340 .read = inotify_read,
341 .fasync = inotify_fasync,
342 .release = inotify_release,
343 .unlocked_ioctl = inotify_ioctl,
344 .compat_ioctl = inotify_ioctl,
349 * find_inode - resolve a user-given path to a specific inode
351 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
355 error = user_path_at(AT_FDCWD, dirname, flags, path);
358 /* you can only watch an inode if you have read permissions on it */
359 error = inode_permission(path->dentry->d_inode, MAY_READ);
366 * When, for whatever reason, inotify is done with a mark (or what used to be a
367 * watch) we need to remove that watch from the idr and we need to send IN_IGNORED
370 * There is a bit of recursion here. The loop looks like:
371 * inotify_destroy_mark_entry -> fsnotify_destroy_mark_by_entry ->
372 * inotify_freeing_mark -> inotify_destory_mark_entry -> restart
373 * But the loop is broken in 2 places. fsnotify_destroy_mark_by_entry sets
374 * entry->group = NULL before the call to inotify_freeing_mark, so the if (egroup)
375 * test below will not call back to fsnotify again. But even if that test wasn't
376 * there this would still be safe since fsnotify_destroy_mark_by_entry() is
377 * safe from recursion.
379 void inotify_destroy_mark_entry(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
381 struct inotify_inode_mark_entry *ientry;
382 struct inotify_event_private_data *event_priv;
383 struct fsnotify_event_private_data *fsn_event_priv;
384 struct fsnotify_group *egroup;
387 spin_lock(&entry->lock);
388 egroup = entry->group;
390 /* if egroup we aren't really done and something might still send events
391 * for this inode, on the callback we'll send the IN_IGNORED */
393 spin_unlock(&entry->lock);
394 fsnotify_destroy_mark_by_entry(entry);
397 spin_unlock(&entry->lock);
399 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
401 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
402 if (unlikely(!event_priv))
403 goto skip_send_ignore;
405 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
407 fsn_event_priv->group = group;
408 event_priv->wd = ientry->wd;
410 fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv);
412 /* did the private data get added? */
413 if (list_empty(&fsn_event_priv->event_list))
414 inotify_free_event_priv(fsn_event_priv);
418 /* remove this entry from the idr */
419 spin_lock(&group->inotify_data.idr_lock);
420 idr = &group->inotify_data.idr;
421 idr_remove(idr, ientry->wd);
422 spin_unlock(&group->inotify_data.idr_lock);
424 /* removed from idr, drop that reference */
425 fsnotify_put_mark(entry);
428 /* ding dong the mark is dead */
429 static void inotify_free_mark(struct fsnotify_mark_entry *entry)
431 struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
433 kmem_cache_free(inotify_inode_mark_cachep, ientry);
436 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
438 struct fsnotify_mark_entry *entry = NULL;
439 struct inotify_inode_mark_entry *ientry;
441 int add = (arg & IN_MASK_ADD);
443 __u32 old_mask, new_mask;
445 /* don't allow invalid bits: we don't want flags set */
446 mask = inotify_arg_to_mask(arg);
450 ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
451 if (unlikely(!ientry))
453 /* we set the mask at the end after attaching it */
454 fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark);
458 spin_lock(&inode->i_lock);
459 entry = fsnotify_find_mark_entry(group, inode);
460 spin_unlock(&inode->i_lock);
462 kmem_cache_free(inotify_inode_mark_cachep, ientry);
463 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
465 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) {
470 ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode);
476 entry = &ientry->fsn_entry;
479 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
482 spin_lock(&group->inotify_data.idr_lock);
483 /* if entry is added to the idr we keep the reference obtained
484 * through fsnotify_mark_add. remember to drop this reference
485 * when entry is removed from idr */
486 ret = idr_get_new_above(&group->inotify_data.idr, entry,
487 ++group->inotify_data.last_wd,
489 spin_unlock(&group->inotify_data.idr_lock);
495 atomic_inc(&group->inotify_data.user->inotify_watches);
498 spin_lock(&entry->lock);
500 old_mask = entry->mask;
503 new_mask = entry->mask;
506 new_mask = entry->mask;
509 spin_unlock(&entry->lock);
511 if (old_mask != new_mask) {
512 /* more bits in old than in new? */
513 int dropped = (old_mask & ~new_mask);
514 /* more bits in this entry than the inode's mask? */
515 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
516 /* more bits in this entry than the group? */
517 int do_group = (new_mask & ~group->mask);
519 /* update the inode with this new entry */
520 if (dropped || do_inode)
521 fsnotify_recalc_inode_mask(inode);
523 /* update the group mask with the new mask */
524 if (dropped || do_group)
525 fsnotify_recalc_group_mask(group);
531 /* see this isn't supposed to happen, just kill the watch */
533 fsnotify_destroy_mark_by_entry(entry);
534 fsnotify_put_mark(entry);
539 static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
541 struct fsnotify_group *group;
542 unsigned int grp_num;
544 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
545 grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
546 group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
550 group->max_events = max_events;
552 spin_lock_init(&group->inotify_data.idr_lock);
553 idr_init(&group->inotify_data.idr);
554 group->inotify_data.last_wd = 0;
555 group->inotify_data.user = user;
556 group->inotify_data.fa = NULL;
562 /* inotify syscalls */
563 SYSCALL_DEFINE1(inotify_init1, int, flags)
565 struct fsnotify_group *group;
566 struct user_struct *user;
570 /* Check the IN_* constants for consistency. */
571 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
572 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
574 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
577 fd = get_unused_fd_flags(flags & O_CLOEXEC);
581 filp = get_empty_filp();
587 user = get_current_user();
588 if (unlikely(atomic_read(&user->inotify_devs) >=
589 inotify_max_user_instances)) {
594 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
595 group = inotify_new_group(user, inotify_max_queued_events);
597 ret = PTR_ERR(group);
601 filp->f_op = &inotify_fops;
602 filp->f_path.mnt = mntget(inotify_mnt);
603 filp->f_path.dentry = dget(inotify_mnt->mnt_root);
604 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
605 filp->f_mode = FMODE_READ;
606 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
607 filp->private_data = group;
609 atomic_inc(&user->inotify_devs);
611 fd_install(fd, filp);
623 SYSCALL_DEFINE0(inotify_init)
625 return sys_inotify_init1(0);
628 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
631 struct fsnotify_group *group;
635 int ret, fput_needed;
638 filp = fget_light(fd, &fput_needed);
642 /* verify that this is indeed an inotify instance */
643 if (unlikely(filp->f_op != &inotify_fops)) {
648 if (!(mask & IN_DONT_FOLLOW))
649 flags |= LOOKUP_FOLLOW;
650 if (mask & IN_ONLYDIR)
651 flags |= LOOKUP_DIRECTORY;
653 ret = inotify_find_inode(pathname, &path, flags);
657 /* inode held in place by reference to path; group by fget on fd */
658 inode = path.dentry->d_inode;
659 group = filp->private_data;
661 /* create/update an inode mark */
662 ret = inotify_update_watch(group, inode, mask);
664 goto path_put_and_out;
669 fput_light(filp, fput_needed);
673 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
675 struct fsnotify_group *group;
676 struct fsnotify_mark_entry *entry;
678 int ret = 0, fput_needed;
680 filp = fget_light(fd, &fput_needed);
684 /* verify that this is indeed an inotify instance */
685 if (unlikely(filp->f_op != &inotify_fops)) {
690 group = filp->private_data;
692 spin_lock(&group->inotify_data.idr_lock);
693 entry = idr_find(&group->inotify_data.idr, wd);
694 if (unlikely(!entry)) {
695 spin_unlock(&group->inotify_data.idr_lock);
699 fsnotify_get_mark(entry);
700 spin_unlock(&group->inotify_data.idr_lock);
702 inotify_destroy_mark_entry(entry, group);
703 fsnotify_put_mark(entry);
706 fput_light(filp, fput_needed);
711 inotify_get_sb(struct file_system_type *fs_type, int flags,
712 const char *dev_name, void *data, struct vfsmount *mnt)
714 return get_sb_pseudo(fs_type, "inotify", NULL,
715 INOTIFYFS_SUPER_MAGIC, mnt);
718 static struct file_system_type inotify_fs_type = {
720 .get_sb = inotify_get_sb,
721 .kill_sb = kill_anon_super,
725 * inotify_user_setup - Our initialization function. Note that we cannnot return
726 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
727 * must result in panic().
729 static int __init inotify_user_setup(void)
733 ret = register_filesystem(&inotify_fs_type);
735 panic("inotify: register_filesystem returned %d!\n", ret);
737 inotify_mnt = kern_mount(&inotify_fs_type);
738 if (IS_ERR(inotify_mnt))
739 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
741 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
742 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
743 inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
744 if (!inotify_ignored_event)
745 panic("unable to allocate the inotify ignored event\n");
747 inotify_max_queued_events = 16384;
748 inotify_max_user_instances = 128;
749 inotify_max_user_watches = 8192;
753 module_init(inotify_user_setup);