2 * fs/inotify.c - inode-based file event notifications
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2, or (at your option) any
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
25 #include <linux/idr.h>
26 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/mount.h>
30 #include <linux/namei.h>
31 #include <linux/poll.h>
32 #include <linux/init.h>
33 #include <linux/list.h>
34 #include <linux/writeback.h>
35 #include <linux/inotify.h>
37 #include <asm/ioctls.h>
39 static atomic_t inotify_cookie;
41 static kmem_cache_t *watch_cachep;
42 static kmem_cache_t *event_cachep;
44 static struct vfsmount *inotify_mnt;
46 /* these are configurable via /proc/sys/fs/inotify/ */
47 int inotify_max_user_instances;
48 int inotify_max_user_watches;
49 int inotify_max_queued_events;
54 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
55 * iprune_sem (synchronize shrink_icache_memory())
56 * inode_lock (protects the super_block->s_inodes list)
57 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
58 * inotify_dev->sem (protects inotify_device and watches->d_list)
62 * Lifetimes of the three main data structures--inotify_device, inode, and
63 * inotify_watch--are managed by reference count.
65 * inotify_device: Lifetime is from inotify_init() until release. Additional
66 * references can bump the count via get_inotify_dev() and drop the count via
69 * inotify_watch: Lifetime is from create_watch() to destory_watch().
70 * Additional references can bump the count via get_inotify_watch() and drop
71 * the count via put_inotify_watch().
73 * inode: Pinned so long as the inode is associated with a watch, from
74 * create_watch() to put_inotify_watch().
78 * struct inotify_device - represents an inotify instance
80 * This structure is protected by the semaphore 'sem'.
82 struct inotify_device {
83 wait_queue_head_t wq; /* wait queue for i/o */
84 struct idr idr; /* idr mapping wd -> watch */
85 struct semaphore sem; /* protects this bad boy */
86 struct list_head events; /* list of queued events */
87 struct list_head watches; /* list of watches */
88 atomic_t count; /* reference count */
89 struct user_struct *user; /* user who opened this dev */
90 unsigned int queue_size; /* size of the queue (bytes) */
91 unsigned int event_count; /* number of pending events */
92 unsigned int max_events; /* maximum number of events */
93 u32 last_wd; /* the last wd allocated */
97 * struct inotify_kernel_event - An inotify event, originating from a watch and
98 * queued for user-space. A list of these is attached to each instance of the
99 * device. In read(), this list is walked and all events that can fit in the
100 * buffer are returned.
102 * Protected by dev->sem of the device in which we are queued.
104 struct inotify_kernel_event {
105 struct inotify_event event; /* the user-space event */
106 struct list_head list; /* entry in inotify_device's list */
107 char *name; /* filename, if any */
111 * struct inotify_watch - represents a watch request on a specific inode
113 * d_list is protected by dev->sem of the associated watch->dev.
114 * i_list and mask are protected by inode->inotify_sem of the associated inode.
115 * dev, inode, and wd are never written to once the watch is created.
117 struct inotify_watch {
118 struct list_head d_list; /* entry in inotify_device's list */
119 struct list_head i_list; /* entry in inode's list */
120 atomic_t count; /* reference count */
121 struct inotify_device *dev; /* associated device */
122 struct inode *inode; /* associated inode */
123 s32 wd; /* watch descriptor */
124 u32 mask; /* event mask for this watch */
129 #include <linux/sysctl.h>
133 ctl_table inotify_table[] = {
135 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
136 .procname = "max_user_instances",
137 .data = &inotify_max_user_instances,
138 .maxlen = sizeof(int),
140 .proc_handler = &proc_dointvec_minmax,
141 .strategy = &sysctl_intvec,
145 .ctl_name = INOTIFY_MAX_USER_WATCHES,
146 .procname = "max_user_watches",
147 .data = &inotify_max_user_watches,
148 .maxlen = sizeof(int),
150 .proc_handler = &proc_dointvec_minmax,
151 .strategy = &sysctl_intvec,
155 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
156 .procname = "max_queued_events",
157 .data = &inotify_max_queued_events,
158 .maxlen = sizeof(int),
160 .proc_handler = &proc_dointvec_minmax,
161 .strategy = &sysctl_intvec,
166 #endif /* CONFIG_SYSCTL */
168 static inline void get_inotify_dev(struct inotify_device *dev)
170 atomic_inc(&dev->count);
173 static inline void put_inotify_dev(struct inotify_device *dev)
175 if (atomic_dec_and_test(&dev->count)) {
176 atomic_dec(&dev->user->inotify_devs);
182 static inline void get_inotify_watch(struct inotify_watch *watch)
184 atomic_inc(&watch->count);
188 * put_inotify_watch - decrements the ref count on a given watch. cleans up
189 * the watch and its references if the count reaches zero.
191 static inline void put_inotify_watch(struct inotify_watch *watch)
193 if (atomic_dec_and_test(&watch->count)) {
194 put_inotify_dev(watch->dev);
196 kmem_cache_free(watch_cachep, watch);
201 * kernel_event - create a new kernel event with the given parameters
203 * This function can sleep.
205 static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
208 struct inotify_kernel_event *kevent;
210 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
211 if (unlikely(!kevent))
214 /* we hand this out to user-space, so zero it just in case */
215 memset(&kevent->event, 0, sizeof(struct inotify_event));
217 kevent->event.wd = wd;
218 kevent->event.mask = mask;
219 kevent->event.cookie = cookie;
221 INIT_LIST_HEAD(&kevent->list);
224 size_t len, rem, event_size = sizeof(struct inotify_event);
227 * We need to pad the filename so as to properly align an
228 * array of inotify_event structures. Because the structure is
229 * small and the common case is a small filename, we just round
230 * up to the next multiple of the structure's sizeof. This is
231 * simple and safe for all architectures.
233 len = strlen(name) + 1;
234 rem = event_size - len;
235 if (len > event_size) {
236 rem = event_size - (len % event_size);
237 if (len % event_size == 0)
241 kevent->name = kmalloc(len + rem, GFP_KERNEL);
242 if (unlikely(!kevent->name)) {
243 kmem_cache_free(event_cachep, kevent);
246 memcpy(kevent->name, name, len);
248 memset(kevent->name + len, 0, rem);
249 kevent->event.len = len + rem;
251 kevent->event.len = 0;
259 * inotify_dev_get_event - return the next event in the given dev's queue
261 * Caller must hold dev->sem.
263 static inline struct inotify_kernel_event *
264 inotify_dev_get_event(struct inotify_device *dev)
266 return list_entry(dev->events.next, struct inotify_kernel_event, list);
270 * inotify_dev_queue_event - add a new event to the given device
272 * Caller must hold dev->sem. Can sleep (calls kernel_event()).
274 static void inotify_dev_queue_event(struct inotify_device *dev,
275 struct inotify_watch *watch, u32 mask,
276 u32 cookie, const char *name)
278 struct inotify_kernel_event *kevent, *last;
280 /* coalescing: drop this event if it is a dupe of the previous */
281 last = inotify_dev_get_event(dev);
282 if (last && last->event.mask == mask && last->event.wd == watch->wd &&
283 last->event.cookie == cookie) {
284 const char *lastname = last->name;
286 if (!name && !lastname)
288 if (name && lastname && !strcmp(lastname, name))
292 /* the queue overflowed and we already sent the Q_OVERFLOW event */
293 if (unlikely(dev->event_count > dev->max_events))
296 /* if the queue overflows, we need to notify user space */
297 if (unlikely(dev->event_count == dev->max_events))
298 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
300 kevent = kernel_event(watch->wd, mask, cookie, name);
302 if (unlikely(!kevent))
305 /* queue the event and wake up anyone waiting */
307 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
308 list_add_tail(&kevent->list, &dev->events);
309 wake_up_interruptible(&dev->wq);
313 * remove_kevent - cleans up and ultimately frees the given kevent
315 * Caller must hold dev->sem.
317 static void remove_kevent(struct inotify_device *dev,
318 struct inotify_kernel_event *kevent)
320 list_del(&kevent->list);
323 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
326 kmem_cache_free(event_cachep, kevent);
330 * inotify_dev_event_dequeue - destroy an event on the given device
332 * Caller must hold dev->sem.
334 static void inotify_dev_event_dequeue(struct inotify_device *dev)
336 if (!list_empty(&dev->events)) {
337 struct inotify_kernel_event *kevent;
338 kevent = inotify_dev_get_event(dev);
339 remove_kevent(dev, kevent);
344 * inotify_dev_get_wd - returns the next WD for use by the given dev
346 * Callers must hold dev->sem. This function can sleep.
348 static int inotify_dev_get_wd(struct inotify_device *dev,
349 struct inotify_watch *watch)
354 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
356 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd, &watch->wd);
357 } while (ret == -EAGAIN);
363 * find_inode - resolve a user-given path to a specific inode and return a nd
365 static int find_inode(const char __user *dirname, struct nameidata *nd)
369 error = __user_walk(dirname, LOOKUP_FOLLOW, nd);
372 /* you can only watch an inode if you have read permissions on it */
373 error = permission(nd->dentry->d_inode, MAY_READ, NULL);
380 * create_watch - creates a watch on the given device.
382 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
383 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
385 static struct inotify_watch *create_watch(struct inotify_device *dev,
386 u32 mask, struct inode *inode)
388 struct inotify_watch *watch;
391 if (atomic_read(&dev->user->inotify_watches) >=
392 inotify_max_user_watches)
393 return ERR_PTR(-ENOSPC);
395 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
396 if (unlikely(!watch))
397 return ERR_PTR(-ENOMEM);
399 ret = inotify_dev_get_wd(dev, watch);
401 kmem_cache_free(watch_cachep, watch);
407 atomic_set(&watch->count, 0);
408 INIT_LIST_HEAD(&watch->d_list);
409 INIT_LIST_HEAD(&watch->i_list);
411 /* save a reference to device and bump the count to make it official */
412 get_inotify_dev(dev);
416 * Save a reference to the inode and bump the ref count to make it
417 * official. We hold a reference to nameidata, which makes this safe.
419 watch->inode = igrab(inode);
421 /* bump our own count, corresponding to our entry in dev->watches */
422 get_inotify_watch(watch);
424 atomic_inc(&dev->user->inotify_watches);
430 * inotify_find_dev - find the watch associated with the given inode and dev
432 * Callers must hold inode->inotify_sem.
434 static struct inotify_watch *inode_find_dev(struct inode *inode,
435 struct inotify_device *dev)
437 struct inotify_watch *watch;
439 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
440 if (watch->dev == dev)
448 * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
450 static void remove_watch_no_event(struct inotify_watch *watch,
451 struct inotify_device *dev)
453 list_del(&watch->i_list);
454 list_del(&watch->d_list);
456 atomic_dec(&dev->user->inotify_watches);
457 idr_remove(&dev->idr, watch->wd);
458 put_inotify_watch(watch);
462 * remove_watch - Remove a watch from both the device and the inode. Sends
463 * the IN_IGNORED event to the given device signifying that the inode is no
466 * Callers must hold both inode->inotify_sem and dev->sem. We drop a
467 * reference to the inode before returning.
469 * The inode is not iput() so as to remain atomic. If the inode needs to be
470 * iput(), the call returns one. Otherwise, it returns zero.
472 static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev)
474 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
475 remove_watch_no_event(watch, dev);
479 * inotify_inode_watched - returns nonzero if there are watches on this inode
480 * and zero otherwise. We call this lockless, we do not care if we race.
482 static inline int inotify_inode_watched(struct inode *inode)
484 return !list_empty(&inode->inotify_watches);
490 * inotify_inode_queue_event - queue an event to all watches on this inode
491 * @inode: inode event is originating from
492 * @mask: event mask describing this event
493 * @cookie: cookie for synchronization, or zero
494 * @name: filename, if any
496 void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
499 struct inotify_watch *watch, *next;
501 if (!inotify_inode_watched(inode))
504 down(&inode->inotify_sem);
505 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
506 u32 watch_mask = watch->mask;
507 if (watch_mask & mask) {
508 struct inotify_device *dev = watch->dev;
509 get_inotify_watch(watch);
511 inotify_dev_queue_event(dev, watch, mask, cookie, name);
512 if (watch_mask & IN_ONESHOT)
513 remove_watch_no_event(watch, dev);
515 put_inotify_watch(watch);
518 up(&inode->inotify_sem);
520 EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
523 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
524 * @dentry: the dentry in question, we queue against this dentry's parent
525 * @mask: event mask describing this event
526 * @cookie: cookie for synchronization, or zero
527 * @name: filename, if any
529 void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
530 u32 cookie, const char *name)
532 struct dentry *parent;
535 spin_lock(&dentry->d_lock);
536 parent = dentry->d_parent;
537 inode = parent->d_inode;
539 if (inotify_inode_watched(inode)) {
541 spin_unlock(&dentry->d_lock);
542 inotify_inode_queue_event(inode, mask, cookie, name);
545 spin_unlock(&dentry->d_lock);
547 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
550 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
552 u32 inotify_get_cookie(void)
554 return atomic_inc_return(&inotify_cookie);
556 EXPORT_SYMBOL_GPL(inotify_get_cookie);
559 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
560 * @list: list of inodes being unmounted (sb->s_inodes)
562 * Called with inode_lock held, protecting the unmounting super block's list
563 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay.
564 * We temporarily drop inode_lock, however, and CAN block.
566 void inotify_unmount_inodes(struct list_head *list)
568 struct inode *inode, *next_i, *need_iput = NULL;
570 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
571 struct inotify_watch *watch, *next_w;
572 struct inode *need_iput_tmp;
573 struct list_head *watches;
576 * If i_count is zero, the inode cannot have any watches and
577 * doing an __iget/iput with MS_ACTIVE clear would actually
578 * evict all inodes with zero i_count from icache which is
579 * unnecessarily violent and may in fact be illegal to do.
581 if (!atomic_read(&inode->i_count))
585 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
586 * I_WILL_FREE which is fine because by that point the inode
587 * cannot have any associated watches.
589 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
592 need_iput_tmp = need_iput;
594 /* In case the remove_watch() drops a reference. */
595 if (inode != need_iput_tmp)
598 need_iput_tmp = NULL;
599 /* In case the dropping of a reference would nuke next_i. */
600 if ((&next_i->i_sb_list != list) &&
601 atomic_read(&next_i->i_count) &&
602 !(next_i->i_state & (I_CLEAR | I_FREEING |
609 * We can safely drop inode_lock here because we hold
610 * references on both inode and next_i. Also no new inodes
611 * will be added since the umount has begun. Finally,
612 * iprune_sem keeps shrink_icache_memory() away.
614 spin_unlock(&inode_lock);
619 /* for each watch, send IN_UNMOUNT and then remove it */
620 down(&inode->inotify_sem);
621 watches = &inode->inotify_watches;
622 list_for_each_entry_safe(watch, next_w, watches, i_list) {
623 struct inotify_device *dev = watch->dev;
625 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
626 remove_watch(watch, dev);
629 up(&inode->inotify_sem);
632 spin_lock(&inode_lock);
635 EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
638 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
639 * @inode: inode that is about to be removed
641 void inotify_inode_is_dead(struct inode *inode)
643 struct inotify_watch *watch, *next;
645 down(&inode->inotify_sem);
646 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
647 struct inotify_device *dev = watch->dev;
649 remove_watch(watch, dev);
652 up(&inode->inotify_sem);
654 EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
656 /* Device Interface */
658 static unsigned int inotify_poll(struct file *file, poll_table *wait)
660 struct inotify_device *dev = file->private_data;
663 poll_wait(file, &dev->wq, wait);
665 if (!list_empty(&dev->events))
666 ret = POLLIN | POLLRDNORM;
672 static ssize_t inotify_read(struct file *file, char __user *buf,
673 size_t count, loff_t *pos)
675 size_t event_size = sizeof (struct inotify_event);
676 struct inotify_device *dev;
682 dev = file->private_data;
687 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
690 events = !list_empty(&dev->events);
697 if (file->f_flags & O_NONBLOCK) {
702 if (signal_pending(current)) {
710 finish_wait(&dev->wq, &wait);
716 struct inotify_kernel_event *kevent;
719 if (list_empty(&dev->events))
722 kevent = inotify_dev_get_event(dev);
723 if (event_size + kevent->event.len > count)
726 if (copy_to_user(buf, &kevent->event, event_size)) {
734 if (copy_to_user(buf, kevent->name, kevent->event.len)){
738 buf += kevent->event.len;
739 count -= kevent->event.len;
742 remove_kevent(dev, kevent);
749 static int inotify_release(struct inode *ignored, struct file *file)
751 struct inotify_device *dev = file->private_data;
754 * Destroy all of the watches on this device. Unfortunately, not very
755 * pretty. We cannot do a simple iteration over the list, because we
756 * do not know the inode until we iterate to the watch. But we need to
757 * hold inode->inotify_sem before dev->sem. The following works.
760 struct inotify_watch *watch;
761 struct list_head *watches;
765 watches = &dev->watches;
766 if (list_empty(watches)) {
770 watch = list_entry(watches->next, struct inotify_watch, d_list);
771 get_inotify_watch(watch);
774 inode = watch->inode;
775 down(&inode->inotify_sem);
777 remove_watch_no_event(watch, dev);
779 up(&inode->inotify_sem);
780 put_inotify_watch(watch);
783 /* destroy all of the events on this device */
785 while (!list_empty(&dev->events))
786 inotify_dev_event_dequeue(dev);
789 /* free this device: the put matching the get in inotify_init() */
790 put_inotify_dev(dev);
796 * inotify_ignore - remove a given wd from this inotify instance.
800 static int inotify_ignore(struct inotify_device *dev, s32 wd)
802 struct inotify_watch *watch;
806 watch = idr_find(&dev->idr, wd);
807 if (unlikely(!watch)) {
811 get_inotify_watch(watch);
812 inode = watch->inode;
815 down(&inode->inotify_sem);
818 /* make sure that we did not race */
819 watch = idr_find(&dev->idr, wd);
821 remove_watch(watch, dev);
824 up(&inode->inotify_sem);
825 put_inotify_watch(watch);
830 static long inotify_ioctl(struct file *file, unsigned int cmd,
833 struct inotify_device *dev;
837 dev = file->private_data;
838 p = (void __user *) arg;
842 ret = put_user(dev->queue_size, (int __user *) p);
849 static struct file_operations inotify_fops = {
850 .poll = inotify_poll,
851 .read = inotify_read,
852 .release = inotify_release,
853 .unlocked_ioctl = inotify_ioctl,
854 .compat_ioctl = inotify_ioctl,
857 asmlinkage long sys_inotify_init(void)
859 struct inotify_device *dev;
860 struct user_struct *user;
864 fd = get_unused_fd();
868 filp = get_empty_filp();
874 user = get_uid(current->user);
875 if (unlikely(atomic_read(&user->inotify_devs) >=
876 inotify_max_user_instances)) {
881 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
882 if (unlikely(!dev)) {
887 filp->f_op = &inotify_fops;
888 filp->f_vfsmnt = mntget(inotify_mnt);
889 filp->f_dentry = dget(inotify_mnt->mnt_root);
890 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
891 filp->f_mode = FMODE_READ;
892 filp->f_flags = O_RDONLY;
893 filp->private_data = dev;
896 INIT_LIST_HEAD(&dev->events);
897 INIT_LIST_HEAD(&dev->watches);
898 init_waitqueue_head(&dev->wq);
899 sema_init(&dev->sem, 1);
900 dev->event_count = 0;
902 dev->max_events = inotify_max_queued_events;
905 atomic_set(&dev->count, 0);
907 get_inotify_dev(dev);
908 atomic_inc(&user->inotify_devs);
909 fd_install(fd, filp);
920 asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
922 struct inotify_watch *watch, *old;
924 struct inotify_device *dev;
927 int ret, fput_needed;
929 filp = fget_light(fd, &fput_needed);
933 /* verify that this is indeed an inotify instance */
934 if (unlikely(filp->f_op != &inotify_fops)) {
939 ret = find_inode(path, &nd);
943 /* inode held in place by reference to nd; dev by fget on fd */
944 inode = nd.dentry->d_inode;
945 dev = filp->private_data;
947 down(&inode->inotify_sem);
950 /* don't let user-space set invalid bits: we don't want flags set */
951 mask &= IN_ALL_EVENTS;
952 if (unlikely(!mask)) {
958 * Handle the case of re-adding a watch on an (inode,dev) pair that we
959 * are already watching. We just update the mask and return its wd.
961 old = inode_find_dev(inode, dev);
968 watch = create_watch(dev, mask, inode);
969 if (unlikely(IS_ERR(watch))) {
970 ret = PTR_ERR(watch);
974 /* Add the watch to the device's and the inode's list */
975 list_add(&watch->d_list, &dev->watches);
976 list_add(&watch->i_list, &inode->inotify_watches);
980 up(&inode->inotify_sem);
983 fput_light(filp, fput_needed);
987 asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
990 struct inotify_device *dev;
991 int ret, fput_needed;
993 filp = fget_light(fd, &fput_needed);
997 /* verify that this is indeed an inotify instance */
998 if (unlikely(filp->f_op != &inotify_fops)) {
1003 dev = filp->private_data;
1004 ret = inotify_ignore(dev, wd);
1007 fput_light(filp, fput_needed);
1011 static struct super_block *
1012 inotify_get_sb(struct file_system_type *fs_type, int flags,
1013 const char *dev_name, void *data)
1015 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
1018 static struct file_system_type inotify_fs_type = {
1019 .name = "inotifyfs",
1020 .get_sb = inotify_get_sb,
1021 .kill_sb = kill_anon_super,
1025 * inotify_setup - Our initialization function. Note that we cannnot return
1026 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1027 * must result in panic().
1029 static int __init inotify_setup(void)
1033 ret = register_filesystem(&inotify_fs_type);
1035 panic("inotify: register_filesystem returned %d!\n", ret);
1037 inotify_mnt = kern_mount(&inotify_fs_type);
1038 if (IS_ERR(inotify_mnt))
1039 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
1041 inotify_max_queued_events = 16384;
1042 inotify_max_user_instances = 128;
1043 inotify_max_user_watches = 8192;
1045 atomic_set(&inotify_cookie, 0);
1047 watch_cachep = kmem_cache_create("inotify_watch_cache",
1048 sizeof(struct inotify_watch),
1049 0, SLAB_PANIC, NULL, NULL);
1050 event_cachep = kmem_cache_create("inotify_event_cache",
1051 sizeof(struct inotify_kernel_event),
1052 0, SLAB_PANIC, NULL, NULL);
1057 module_init(inotify_setup);