2 * fs/inotify.c - inode-based file event notifications
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2, or (at your option) any
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
25 #include <linux/idr.h>
26 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/mount.h>
30 #include <linux/namei.h>
31 #include <linux/poll.h>
32 #include <linux/init.h>
33 #include <linux/list.h>
34 #include <linux/writeback.h>
35 #include <linux/inotify.h>
37 #include <asm/ioctls.h>
39 static atomic_t inotify_cookie;
41 static kmem_cache_t *watch_cachep;
42 static kmem_cache_t *event_cachep;
44 static struct vfsmount *inotify_mnt;
46 /* these are configurable via /proc/sys/fs/inotify/ */
47 int inotify_max_user_instances;
48 int inotify_max_user_watches;
49 int inotify_max_queued_events;
54 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
55 * iprune_sem (synchronize shrink_icache_memory())
56 * inode_lock (protects the super_block->s_inodes list)
57 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
58 * inotify_dev->sem (protects inotify_device and watches->d_list)
62 * Lifetimes of the three main data structures--inotify_device, inode, and
63 * inotify_watch--are managed by reference count.
65 * inotify_device: Lifetime is from inotify_init() until release. Additional
66 * references can bump the count via get_inotify_dev() and drop the count via
69 * inotify_watch: Lifetime is from create_watch() to destory_watch().
70 * Additional references can bump the count via get_inotify_watch() and drop
71 * the count via put_inotify_watch().
73 * inode: Pinned so long as the inode is associated with a watch, from
74 * create_watch() to put_inotify_watch().
78 * struct inotify_device - represents an inotify instance
80 * This structure is protected by the semaphore 'sem'.
82 struct inotify_device {
83 wait_queue_head_t wq; /* wait queue for i/o */
84 struct idr idr; /* idr mapping wd -> watch */
85 struct semaphore sem; /* protects this bad boy */
86 struct list_head events; /* list of queued events */
87 struct list_head watches; /* list of watches */
88 atomic_t count; /* reference count */
89 struct user_struct *user; /* user who opened this dev */
90 unsigned int queue_size; /* size of the queue (bytes) */
91 unsigned int event_count; /* number of pending events */
92 unsigned int max_events; /* maximum number of events */
96 * struct inotify_kernel_event - An inotify event, originating from a watch and
97 * queued for user-space. A list of these is attached to each instance of the
98 * device. In read(), this list is walked and all events that can fit in the
99 * buffer are returned.
101 * Protected by dev->sem of the device in which we are queued.
103 struct inotify_kernel_event {
104 struct inotify_event event; /* the user-space event */
105 struct list_head list; /* entry in inotify_device's list */
106 char *name; /* filename, if any */
110 * struct inotify_watch - represents a watch request on a specific inode
112 * d_list is protected by dev->sem of the associated watch->dev.
113 * i_list and mask are protected by inode->inotify_sem of the associated inode.
114 * dev, inode, and wd are never written to once the watch is created.
116 struct inotify_watch {
117 struct list_head d_list; /* entry in inotify_device's list */
118 struct list_head i_list; /* entry in inode's list */
119 atomic_t count; /* reference count */
120 struct inotify_device *dev; /* associated device */
121 struct inode *inode; /* associated inode */
122 s32 wd; /* watch descriptor */
123 u32 mask; /* event mask for this watch */
128 #include <linux/sysctl.h>
132 ctl_table inotify_table[] = {
134 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
135 .procname = "max_user_instances",
136 .data = &inotify_max_user_instances,
137 .maxlen = sizeof(int),
139 .proc_handler = &proc_dointvec_minmax,
140 .strategy = &sysctl_intvec,
144 .ctl_name = INOTIFY_MAX_USER_WATCHES,
145 .procname = "max_user_watches",
146 .data = &inotify_max_user_watches,
147 .maxlen = sizeof(int),
149 .proc_handler = &proc_dointvec_minmax,
150 .strategy = &sysctl_intvec,
154 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
155 .procname = "max_queued_events",
156 .data = &inotify_max_queued_events,
157 .maxlen = sizeof(int),
159 .proc_handler = &proc_dointvec_minmax,
160 .strategy = &sysctl_intvec,
165 #endif /* CONFIG_SYSCTL */
167 static inline void get_inotify_dev(struct inotify_device *dev)
169 atomic_inc(&dev->count);
172 static inline void put_inotify_dev(struct inotify_device *dev)
174 if (atomic_dec_and_test(&dev->count)) {
175 atomic_dec(&dev->user->inotify_devs);
181 static inline void get_inotify_watch(struct inotify_watch *watch)
183 atomic_inc(&watch->count);
187 * put_inotify_watch - decrements the ref count on a given watch. cleans up
188 * the watch and its references if the count reaches zero.
190 static inline void put_inotify_watch(struct inotify_watch *watch)
192 if (atomic_dec_and_test(&watch->count)) {
193 put_inotify_dev(watch->dev);
195 kmem_cache_free(watch_cachep, watch);
200 * kernel_event - create a new kernel event with the given parameters
202 * This function can sleep.
204 static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
207 struct inotify_kernel_event *kevent;
209 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
210 if (unlikely(!kevent))
213 /* we hand this out to user-space, so zero it just in case */
214 memset(&kevent->event, 0, sizeof(struct inotify_event));
216 kevent->event.wd = wd;
217 kevent->event.mask = mask;
218 kevent->event.cookie = cookie;
220 INIT_LIST_HEAD(&kevent->list);
223 size_t len, rem, event_size = sizeof(struct inotify_event);
226 * We need to pad the filename so as to properly align an
227 * array of inotify_event structures. Because the structure is
228 * small and the common case is a small filename, we just round
229 * up to the next multiple of the structure's sizeof. This is
230 * simple and safe for all architectures.
232 len = strlen(name) + 1;
233 rem = event_size - len;
234 if (len > event_size) {
235 rem = event_size - (len % event_size);
236 if (len % event_size == 0)
240 kevent->name = kmalloc(len + rem, GFP_KERNEL);
241 if (unlikely(!kevent->name)) {
242 kmem_cache_free(event_cachep, kevent);
245 memcpy(kevent->name, name, len);
247 memset(kevent->name + len, 0, rem);
248 kevent->event.len = len + rem;
250 kevent->event.len = 0;
258 * inotify_dev_get_event - return the next event in the given dev's queue
260 * Caller must hold dev->sem.
262 static inline struct inotify_kernel_event *
263 inotify_dev_get_event(struct inotify_device *dev)
265 return list_entry(dev->events.next, struct inotify_kernel_event, list);
269 * inotify_dev_queue_event - add a new event to the given device
271 * Caller must hold dev->sem. Can sleep (calls kernel_event()).
273 static void inotify_dev_queue_event(struct inotify_device *dev,
274 struct inotify_watch *watch, u32 mask,
275 u32 cookie, const char *name)
277 struct inotify_kernel_event *kevent, *last;
279 /* coalescing: drop this event if it is a dupe of the previous */
280 last = inotify_dev_get_event(dev);
281 if (last && last->event.mask == mask && last->event.wd == watch->wd &&
282 last->event.cookie == cookie) {
283 const char *lastname = last->name;
285 if (!name && !lastname)
287 if (name && lastname && !strcmp(lastname, name))
291 /* the queue overflowed and we already sent the Q_OVERFLOW event */
292 if (unlikely(dev->event_count > dev->max_events))
295 /* if the queue overflows, we need to notify user space */
296 if (unlikely(dev->event_count == dev->max_events))
297 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
299 kevent = kernel_event(watch->wd, mask, cookie, name);
301 if (unlikely(!kevent))
304 /* queue the event and wake up anyone waiting */
306 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
307 list_add_tail(&kevent->list, &dev->events);
308 wake_up_interruptible(&dev->wq);
312 * remove_kevent - cleans up and ultimately frees the given kevent
314 * Caller must hold dev->sem.
316 static void remove_kevent(struct inotify_device *dev,
317 struct inotify_kernel_event *kevent)
319 list_del(&kevent->list);
322 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
325 kmem_cache_free(event_cachep, kevent);
329 * inotify_dev_event_dequeue - destroy an event on the given device
331 * Caller must hold dev->sem.
333 static void inotify_dev_event_dequeue(struct inotify_device *dev)
335 if (!list_empty(&dev->events)) {
336 struct inotify_kernel_event *kevent;
337 kevent = inotify_dev_get_event(dev);
338 remove_kevent(dev, kevent);
343 * inotify_dev_get_wd - returns the next WD for use by the given dev
345 * Callers must hold dev->sem. This function can sleep.
347 static int inotify_dev_get_wd(struct inotify_device *dev,
348 struct inotify_watch *watch)
353 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
355 ret = idr_get_new(&dev->idr, watch, &watch->wd);
356 } while (ret == -EAGAIN);
362 * find_inode - resolve a user-given path to a specific inode and return a nd
364 static int find_inode(const char __user *dirname, struct nameidata *nd)
368 error = __user_walk(dirname, LOOKUP_FOLLOW, nd);
371 /* you can only watch an inode if you have read permissions on it */
372 error = permission(nd->dentry->d_inode, MAY_READ, NULL);
379 * create_watch - creates a watch on the given device.
381 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
382 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
384 static struct inotify_watch *create_watch(struct inotify_device *dev,
385 u32 mask, struct inode *inode)
387 struct inotify_watch *watch;
390 if (atomic_read(&dev->user->inotify_watches) >=
391 inotify_max_user_watches)
392 return ERR_PTR(-ENOSPC);
394 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
395 if (unlikely(!watch))
396 return ERR_PTR(-ENOMEM);
398 ret = inotify_dev_get_wd(dev, watch);
400 kmem_cache_free(watch_cachep, watch);
405 atomic_set(&watch->count, 0);
406 INIT_LIST_HEAD(&watch->d_list);
407 INIT_LIST_HEAD(&watch->i_list);
409 /* save a reference to device and bump the count to make it official */
410 get_inotify_dev(dev);
414 * Save a reference to the inode and bump the ref count to make it
415 * official. We hold a reference to nameidata, which makes this safe.
417 watch->inode = igrab(inode);
419 /* bump our own count, corresponding to our entry in dev->watches */
420 get_inotify_watch(watch);
422 atomic_inc(&dev->user->inotify_watches);
428 * inotify_find_dev - find the watch associated with the given inode and dev
430 * Callers must hold inode->inotify_sem.
432 static struct inotify_watch *inode_find_dev(struct inode *inode,
433 struct inotify_device *dev)
435 struct inotify_watch *watch;
437 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
438 if (watch->dev == dev)
446 * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
448 static void remove_watch_no_event(struct inotify_watch *watch,
449 struct inotify_device *dev)
451 list_del(&watch->i_list);
452 list_del(&watch->d_list);
454 atomic_dec(&dev->user->inotify_watches);
455 idr_remove(&dev->idr, watch->wd);
456 put_inotify_watch(watch);
460 * remove_watch - Remove a watch from both the device and the inode. Sends
461 * the IN_IGNORED event to the given device signifying that the inode is no
464 * Callers must hold both inode->inotify_sem and dev->sem. We drop a
465 * reference to the inode before returning.
467 * The inode is not iput() so as to remain atomic. If the inode needs to be
468 * iput(), the call returns one. Otherwise, it returns zero.
470 static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev)
472 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
473 remove_watch_no_event(watch, dev);
477 * inotify_inode_watched - returns nonzero if there are watches on this inode
478 * and zero otherwise. We call this lockless, we do not care if we race.
480 static inline int inotify_inode_watched(struct inode *inode)
482 return !list_empty(&inode->inotify_watches);
488 * inotify_inode_queue_event - queue an event to all watches on this inode
489 * @inode: inode event is originating from
490 * @mask: event mask describing this event
491 * @cookie: cookie for synchronization, or zero
492 * @name: filename, if any
494 void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
497 struct inotify_watch *watch, *next;
499 if (!inotify_inode_watched(inode))
502 down(&inode->inotify_sem);
503 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
504 u32 watch_mask = watch->mask;
505 if (watch_mask & mask) {
506 struct inotify_device *dev = watch->dev;
507 get_inotify_watch(watch);
509 inotify_dev_queue_event(dev, watch, mask, cookie, name);
510 if (watch_mask & IN_ONESHOT)
511 remove_watch_no_event(watch, dev);
513 put_inotify_watch(watch);
516 up(&inode->inotify_sem);
518 EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
521 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
522 * @dentry: the dentry in question, we queue against this dentry's parent
523 * @mask: event mask describing this event
524 * @cookie: cookie for synchronization, or zero
525 * @name: filename, if any
527 void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
528 u32 cookie, const char *name)
530 struct dentry *parent;
533 spin_lock(&dentry->d_lock);
534 parent = dentry->d_parent;
535 inode = parent->d_inode;
537 if (inotify_inode_watched(inode)) {
539 spin_unlock(&dentry->d_lock);
540 inotify_inode_queue_event(inode, mask, cookie, name);
543 spin_unlock(&dentry->d_lock);
545 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
548 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
550 u32 inotify_get_cookie(void)
552 return atomic_inc_return(&inotify_cookie);
554 EXPORT_SYMBOL_GPL(inotify_get_cookie);
557 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
558 * @list: list of inodes being unmounted (sb->s_inodes)
560 * Called with inode_lock held, protecting the unmounting super block's list
561 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay.
562 * We temporarily drop inode_lock, however, and CAN block.
564 void inotify_unmount_inodes(struct list_head *list)
566 struct inode *inode, *next_i, *need_iput = NULL;
568 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
569 struct inotify_watch *watch, *next_w;
570 struct inode *need_iput_tmp;
571 struct list_head *watches;
574 * If i_count is zero, the inode cannot have any watches and
575 * doing an __iget/iput with MS_ACTIVE clear would actually
576 * evict all inodes with zero i_count from icache which is
577 * unnecessarily violent and may in fact be illegal to do.
579 if (!atomic_read(&inode->i_count))
583 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
584 * I_WILL_FREE which is fine because by that point the inode
585 * cannot have any associated watches.
587 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
590 need_iput_tmp = need_iput;
592 /* In case the remove_watch() drops a reference. */
593 if (inode != need_iput_tmp)
596 need_iput_tmp = NULL;
597 /* In case the dropping of a reference would nuke next_i. */
598 if ((&next_i->i_sb_list != list) &&
599 atomic_read(&next_i->i_count) &&
600 !(next_i->i_state & (I_CLEAR | I_FREEING |
607 * We can safely drop inode_lock here because we hold
608 * references on both inode and next_i. Also no new inodes
609 * will be added since the umount has begun. Finally,
610 * iprune_sem keeps shrink_icache_memory() away.
612 spin_unlock(&inode_lock);
617 /* for each watch, send IN_UNMOUNT and then remove it */
618 down(&inode->inotify_sem);
619 watches = &inode->inotify_watches;
620 list_for_each_entry_safe(watch, next_w, watches, i_list) {
621 struct inotify_device *dev = watch->dev;
623 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
624 remove_watch(watch, dev);
627 up(&inode->inotify_sem);
630 spin_lock(&inode_lock);
633 EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
636 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
637 * @inode: inode that is about to be removed
639 void inotify_inode_is_dead(struct inode *inode)
641 struct inotify_watch *watch, *next;
643 down(&inode->inotify_sem);
644 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
645 struct inotify_device *dev = watch->dev;
647 remove_watch(watch, dev);
650 up(&inode->inotify_sem);
652 EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
654 /* Device Interface */
656 static unsigned int inotify_poll(struct file *file, poll_table *wait)
658 struct inotify_device *dev = file->private_data;
661 poll_wait(file, &dev->wq, wait);
663 if (!list_empty(&dev->events))
664 ret = POLLIN | POLLRDNORM;
670 static ssize_t inotify_read(struct file *file, char __user *buf,
671 size_t count, loff_t *pos)
673 size_t event_size = sizeof (struct inotify_event);
674 struct inotify_device *dev;
680 dev = file->private_data;
685 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
688 events = !list_empty(&dev->events);
695 if (file->f_flags & O_NONBLOCK) {
700 if (signal_pending(current)) {
708 finish_wait(&dev->wq, &wait);
714 struct inotify_kernel_event *kevent;
717 if (list_empty(&dev->events))
720 kevent = inotify_dev_get_event(dev);
721 if (event_size + kevent->event.len > count)
724 if (copy_to_user(buf, &kevent->event, event_size)) {
732 if (copy_to_user(buf, kevent->name, kevent->event.len)){
736 buf += kevent->event.len;
737 count -= kevent->event.len;
740 remove_kevent(dev, kevent);
747 static int inotify_release(struct inode *ignored, struct file *file)
749 struct inotify_device *dev = file->private_data;
752 * Destroy all of the watches on this device. Unfortunately, not very
753 * pretty. We cannot do a simple iteration over the list, because we
754 * do not know the inode until we iterate to the watch. But we need to
755 * hold inode->inotify_sem before dev->sem. The following works.
758 struct inotify_watch *watch;
759 struct list_head *watches;
763 watches = &dev->watches;
764 if (list_empty(watches)) {
768 watch = list_entry(watches->next, struct inotify_watch, d_list);
769 get_inotify_watch(watch);
772 inode = watch->inode;
773 down(&inode->inotify_sem);
775 remove_watch_no_event(watch, dev);
777 up(&inode->inotify_sem);
778 put_inotify_watch(watch);
781 /* destroy all of the events on this device */
783 while (!list_empty(&dev->events))
784 inotify_dev_event_dequeue(dev);
787 /* free this device: the put matching the get in inotify_init() */
788 put_inotify_dev(dev);
794 * inotify_ignore - remove a given wd from this inotify instance.
798 static int inotify_ignore(struct inotify_device *dev, s32 wd)
800 struct inotify_watch *watch;
804 watch = idr_find(&dev->idr, wd);
805 if (unlikely(!watch)) {
809 get_inotify_watch(watch);
810 inode = watch->inode;
813 down(&inode->inotify_sem);
816 /* make sure that we did not race */
817 watch = idr_find(&dev->idr, wd);
819 remove_watch(watch, dev);
822 up(&inode->inotify_sem);
823 put_inotify_watch(watch);
828 static long inotify_ioctl(struct file *file, unsigned int cmd,
831 struct inotify_device *dev;
835 dev = file->private_data;
836 p = (void __user *) arg;
840 ret = put_user(dev->queue_size, (int __user *) p);
847 static struct file_operations inotify_fops = {
848 .poll = inotify_poll,
849 .read = inotify_read,
850 .release = inotify_release,
851 .unlocked_ioctl = inotify_ioctl,
852 .compat_ioctl = inotify_ioctl,
855 asmlinkage long sys_inotify_init(void)
857 struct inotify_device *dev;
858 struct user_struct *user;
862 fd = get_unused_fd();
866 filp = get_empty_filp();
872 user = get_uid(current->user);
873 if (unlikely(atomic_read(&user->inotify_devs) >=
874 inotify_max_user_instances)) {
879 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
880 if (unlikely(!dev)) {
885 filp->f_op = &inotify_fops;
886 filp->f_vfsmnt = mntget(inotify_mnt);
887 filp->f_dentry = dget(inotify_mnt->mnt_root);
888 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
889 filp->f_mode = FMODE_READ;
890 filp->f_flags = O_RDONLY;
891 filp->private_data = dev;
894 INIT_LIST_HEAD(&dev->events);
895 INIT_LIST_HEAD(&dev->watches);
896 init_waitqueue_head(&dev->wq);
897 sema_init(&dev->sem, 1);
898 dev->event_count = 0;
900 dev->max_events = inotify_max_queued_events;
902 atomic_set(&dev->count, 0);
904 get_inotify_dev(dev);
905 atomic_inc(&user->inotify_devs);
906 fd_install(fd, filp);
917 asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
919 struct inotify_watch *watch, *old;
921 struct inotify_device *dev;
924 int ret, fput_needed;
926 filp = fget_light(fd, &fput_needed);
930 /* verify that this is indeed an inotify instance */
931 if (unlikely(filp->f_op != &inotify_fops)) {
936 ret = find_inode(path, &nd);
940 /* inode held in place by reference to nd; dev by fget on fd */
941 inode = nd.dentry->d_inode;
942 dev = filp->private_data;
944 down(&inode->inotify_sem);
947 /* don't let user-space set invalid bits: we don't want flags set */
948 mask &= IN_ALL_EVENTS;
949 if (unlikely(!mask)) {
955 * Handle the case of re-adding a watch on an (inode,dev) pair that we
956 * are already watching. We just update the mask and return its wd.
958 old = inode_find_dev(inode, dev);
965 watch = create_watch(dev, mask, inode);
966 if (unlikely(IS_ERR(watch))) {
967 ret = PTR_ERR(watch);
971 /* Add the watch to the device's and the inode's list */
972 list_add(&watch->d_list, &dev->watches);
973 list_add(&watch->i_list, &inode->inotify_watches);
977 up(&inode->inotify_sem);
980 fput_light(filp, fput_needed);
984 asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
987 struct inotify_device *dev;
988 int ret, fput_needed;
990 filp = fget_light(fd, &fput_needed);
994 /* verify that this is indeed an inotify instance */
995 if (unlikely(filp->f_op != &inotify_fops)) {
1000 dev = filp->private_data;
1001 ret = inotify_ignore(dev, wd);
1004 fput_light(filp, fput_needed);
1008 static struct super_block *
1009 inotify_get_sb(struct file_system_type *fs_type, int flags,
1010 const char *dev_name, void *data)
1012 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
1015 static struct file_system_type inotify_fs_type = {
1016 .name = "inotifyfs",
1017 .get_sb = inotify_get_sb,
1018 .kill_sb = kill_anon_super,
1022 * inotify_setup - Our initialization function. Note that we cannnot return
1023 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1024 * must result in panic().
1026 static int __init inotify_setup(void)
1030 ret = register_filesystem(&inotify_fs_type);
1032 panic("inotify: register_filesystem returned %d!\n", ret);
1034 inotify_mnt = kern_mount(&inotify_fs_type);
1035 if (IS_ERR(inotify_mnt))
1036 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
1038 inotify_max_queued_events = 16384;
1039 inotify_max_user_instances = 128;
1040 inotify_max_user_watches = 8192;
1042 atomic_set(&inotify_cookie, 0);
1044 watch_cachep = kmem_cache_create("inotify_watch_cache",
1045 sizeof(struct inotify_watch),
1046 0, SLAB_PANIC, NULL, NULL);
1047 event_cachep = kmem_cache_create("inotify_event_cache",
1048 sizeof(struct inotify_kernel_event),
1049 0, SLAB_PANIC, NULL, NULL);
1054 module_init(inotify_setup);