4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/acct.h>
18 #include <linux/capability.h>
19 #include <linux/cpumask.h>
20 #include <linux/module.h>
21 #include <linux/sysfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/mnt_namespace.h>
24 #include <linux/namei.h>
25 #include <linux/security.h>
26 #include <linux/mount.h>
27 #include <linux/ramfs.h>
28 #include <linux/log2.h>
29 #include <linux/idr.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
35 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
36 #define HASH_SIZE (1UL << HASH_SHIFT)
38 /* spinlock for vfsmount related operations, inplace of dcache_lock */
39 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
42 static DEFINE_IDA(mnt_id_ida);
43 static DEFINE_IDA(mnt_group_ida);
45 static struct list_head *mount_hashtable __read_mostly;
46 static struct kmem_cache *mnt_cache __read_mostly;
47 static struct rw_semaphore namespace_sem;
50 struct kobject *fs_kobj;
51 EXPORT_SYMBOL_GPL(fs_kobj);
53 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
55 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
56 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
57 tmp = tmp + (tmp >> HASH_SHIFT);
58 return tmp & (HASH_SIZE - 1);
61 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
63 /* allocation is serialized by namespace_sem */
64 static int mnt_alloc_id(struct vfsmount *mnt)
69 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
70 spin_lock(&vfsmount_lock);
71 res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
72 spin_unlock(&vfsmount_lock);
79 static void mnt_free_id(struct vfsmount *mnt)
81 spin_lock(&vfsmount_lock);
82 ida_remove(&mnt_id_ida, mnt->mnt_id);
83 spin_unlock(&vfsmount_lock);
87 * Allocate a new peer group ID
89 * mnt_group_ida is protected by namespace_sem
91 static int mnt_alloc_group_id(struct vfsmount *mnt)
93 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
96 return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
100 * Release a peer group ID
102 void mnt_release_group_id(struct vfsmount *mnt)
104 ida_remove(&mnt_group_ida, mnt->mnt_group_id);
105 mnt->mnt_group_id = 0;
108 struct vfsmount *alloc_vfsmnt(const char *name)
110 struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
114 err = mnt_alloc_id(mnt);
119 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
120 if (!mnt->mnt_devname)
124 atomic_set(&mnt->mnt_count, 1);
125 INIT_LIST_HEAD(&mnt->mnt_hash);
126 INIT_LIST_HEAD(&mnt->mnt_child);
127 INIT_LIST_HEAD(&mnt->mnt_mounts);
128 INIT_LIST_HEAD(&mnt->mnt_list);
129 INIT_LIST_HEAD(&mnt->mnt_expire);
130 INIT_LIST_HEAD(&mnt->mnt_share);
131 INIT_LIST_HEAD(&mnt->mnt_slave_list);
132 INIT_LIST_HEAD(&mnt->mnt_slave);
133 atomic_set(&mnt->__mnt_writers, 0);
140 kmem_cache_free(mnt_cache, mnt);
145 * Most r/o checks on a fs are for operations that take
146 * discrete amounts of time, like a write() or unlink().
147 * We must keep track of when those operations start
148 * (for permission checks) and when they end, so that
149 * we can determine when writes are able to occur to
153 * __mnt_is_readonly: check whether a mount is read-only
154 * @mnt: the mount to check for its write status
156 * This shouldn't be used directly ouside of the VFS.
157 * It does not guarantee that the filesystem will stay
158 * r/w, just that it is right *now*. This can not and
159 * should not be used in place of IS_RDONLY(inode).
160 * mnt_want/drop_write() will _keep_ the filesystem
163 int __mnt_is_readonly(struct vfsmount *mnt)
165 if (mnt->mnt_flags & MNT_READONLY)
167 if (mnt->mnt_sb->s_flags & MS_RDONLY)
171 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
175 * If holding multiple instances of this lock, they
176 * must be ordered by cpu number.
179 struct lock_class_key lock_class; /* compiles out with !lockdep */
181 struct vfsmount *mnt;
182 } ____cacheline_aligned_in_smp;
183 static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
185 static int __init init_mnt_writers(void)
188 for_each_possible_cpu(cpu) {
189 struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
190 spin_lock_init(&writer->lock);
191 lockdep_set_class(&writer->lock, &writer->lock_class);
196 fs_initcall(init_mnt_writers);
198 static void unlock_mnt_writers(void)
201 struct mnt_writer *cpu_writer;
203 for_each_possible_cpu(cpu) {
204 cpu_writer = &per_cpu(mnt_writers, cpu);
205 spin_unlock(&cpu_writer->lock);
209 static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
211 if (!cpu_writer->mnt)
214 * This is in case anyone ever leaves an invalid,
215 * old ->mnt and a count of 0.
217 if (!cpu_writer->count)
219 atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
220 cpu_writer->count = 0;
223 * must hold cpu_writer->lock
225 static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
226 struct vfsmount *mnt)
228 if (cpu_writer->mnt == mnt)
230 __clear_mnt_count(cpu_writer);
231 cpu_writer->mnt = mnt;
235 * Most r/o checks on a fs are for operations that take
236 * discrete amounts of time, like a write() or unlink().
237 * We must keep track of when those operations start
238 * (for permission checks) and when they end, so that
239 * we can determine when writes are able to occur to
243 * mnt_want_write - get write access to a mount
244 * @mnt: the mount on which to take a write
246 * This tells the low-level filesystem that a write is
247 * about to be performed to it, and makes sure that
248 * writes are allowed before returning success. When
249 * the write operation is finished, mnt_drop_write()
250 * must be called. This is effectively a refcount.
252 int mnt_want_write(struct vfsmount *mnt)
255 struct mnt_writer *cpu_writer;
257 cpu_writer = &get_cpu_var(mnt_writers);
258 spin_lock(&cpu_writer->lock);
259 if (__mnt_is_readonly(mnt)) {
263 use_cpu_writer_for_mount(cpu_writer, mnt);
266 spin_unlock(&cpu_writer->lock);
267 put_cpu_var(mnt_writers);
270 EXPORT_SYMBOL_GPL(mnt_want_write);
272 static void lock_mnt_writers(void)
275 struct mnt_writer *cpu_writer;
277 for_each_possible_cpu(cpu) {
278 cpu_writer = &per_cpu(mnt_writers, cpu);
279 spin_lock(&cpu_writer->lock);
280 __clear_mnt_count(cpu_writer);
281 cpu_writer->mnt = NULL;
286 * These per-cpu write counts are not guaranteed to have
287 * matched increments and decrements on any given cpu.
288 * A file open()ed for write on one cpu and close()d on
289 * another cpu will imbalance this count. Make sure it
290 * does not get too far out of whack.
292 static void handle_write_count_underflow(struct vfsmount *mnt)
294 if (atomic_read(&mnt->__mnt_writers) >=
295 MNT_WRITER_UNDERFLOW_LIMIT)
298 * It isn't necessary to hold all of the locks
299 * at the same time, but doing it this way makes
300 * us share a lot more code.
304 * vfsmount_lock is for mnt_flags.
306 spin_lock(&vfsmount_lock);
308 * If coalescing the per-cpu writer counts did not
309 * get us back to a positive writer count, we have
312 if ((atomic_read(&mnt->__mnt_writers) < 0) &&
313 !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
314 WARN(1, KERN_DEBUG "leak detected on mount(%p) writers "
316 mnt, atomic_read(&mnt->__mnt_writers));
317 /* use the flag to keep the dmesg spam down */
318 mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
320 spin_unlock(&vfsmount_lock);
321 unlock_mnt_writers();
325 * mnt_drop_write - give up write access to a mount
326 * @mnt: the mount on which to give up write access
328 * Tells the low-level filesystem that we are done
329 * performing writes to it. Must be matched with
330 * mnt_want_write() call above.
332 void mnt_drop_write(struct vfsmount *mnt)
334 int must_check_underflow = 0;
335 struct mnt_writer *cpu_writer;
337 cpu_writer = &get_cpu_var(mnt_writers);
338 spin_lock(&cpu_writer->lock);
340 use_cpu_writer_for_mount(cpu_writer, mnt);
341 if (cpu_writer->count > 0) {
344 must_check_underflow = 1;
345 atomic_dec(&mnt->__mnt_writers);
348 spin_unlock(&cpu_writer->lock);
350 * Logically, we could call this each time,
351 * but the __mnt_writers cacheline tends to
352 * be cold, and makes this expensive.
354 if (must_check_underflow)
355 handle_write_count_underflow(mnt);
357 * This could be done right after the spinlock
358 * is taken because the spinlock keeps us on
359 * the cpu, and disables preemption. However,
360 * putting it here bounds the amount that
361 * __mnt_writers can underflow. Without it,
362 * we could theoretically wrap __mnt_writers.
364 put_cpu_var(mnt_writers);
366 EXPORT_SYMBOL_GPL(mnt_drop_write);
368 static int mnt_make_readonly(struct vfsmount *mnt)
374 * With all the locks held, this value is stable
376 if (atomic_read(&mnt->__mnt_writers) > 0) {
381 * nobody can do a successful mnt_want_write() with all
382 * of the counts in MNT_DENIED_WRITE and the locks held.
384 spin_lock(&vfsmount_lock);
386 mnt->mnt_flags |= MNT_READONLY;
387 spin_unlock(&vfsmount_lock);
389 unlock_mnt_writers();
393 static void __mnt_unmake_readonly(struct vfsmount *mnt)
395 spin_lock(&vfsmount_lock);
396 mnt->mnt_flags &= ~MNT_READONLY;
397 spin_unlock(&vfsmount_lock);
400 void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
403 mnt->mnt_root = dget(sb->s_root);
406 EXPORT_SYMBOL(simple_set_mnt);
408 void free_vfsmnt(struct vfsmount *mnt)
410 kfree(mnt->mnt_devname);
412 kmem_cache_free(mnt_cache, mnt);
416 * find the first or last mount at @dentry on vfsmount @mnt depending on
417 * @dir. If @dir is set return the first mount else return the last mount.
419 struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
422 struct list_head *head = mount_hashtable + hash(mnt, dentry);
423 struct list_head *tmp = head;
424 struct vfsmount *p, *found = NULL;
427 tmp = dir ? tmp->next : tmp->prev;
431 p = list_entry(tmp, struct vfsmount, mnt_hash);
432 if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
441 * lookup_mnt increments the ref count before returning
442 * the vfsmount struct.
444 struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
446 struct vfsmount *child_mnt;
447 spin_lock(&vfsmount_lock);
448 if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
450 spin_unlock(&vfsmount_lock);
454 static inline int check_mnt(struct vfsmount *mnt)
456 return mnt->mnt_ns == current->nsproxy->mnt_ns;
459 static void touch_mnt_namespace(struct mnt_namespace *ns)
463 wake_up_interruptible(&ns->poll);
467 static void __touch_mnt_namespace(struct mnt_namespace *ns)
469 if (ns && ns->event != event) {
471 wake_up_interruptible(&ns->poll);
475 static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
477 old_path->dentry = mnt->mnt_mountpoint;
478 old_path->mnt = mnt->mnt_parent;
479 mnt->mnt_parent = mnt;
480 mnt->mnt_mountpoint = mnt->mnt_root;
481 list_del_init(&mnt->mnt_child);
482 list_del_init(&mnt->mnt_hash);
483 old_path->dentry->d_mounted--;
486 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
487 struct vfsmount *child_mnt)
489 child_mnt->mnt_parent = mntget(mnt);
490 child_mnt->mnt_mountpoint = dget(dentry);
494 static void attach_mnt(struct vfsmount *mnt, struct path *path)
496 mnt_set_mountpoint(path->mnt, path->dentry, mnt);
497 list_add_tail(&mnt->mnt_hash, mount_hashtable +
498 hash(path->mnt, path->dentry));
499 list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
503 * the caller must hold vfsmount_lock
505 static void commit_tree(struct vfsmount *mnt)
507 struct vfsmount *parent = mnt->mnt_parent;
510 struct mnt_namespace *n = parent->mnt_ns;
512 BUG_ON(parent == mnt);
514 list_add_tail(&head, &mnt->mnt_list);
515 list_for_each_entry(m, &head, mnt_list)
517 list_splice(&head, n->list.prev);
519 list_add_tail(&mnt->mnt_hash, mount_hashtable +
520 hash(parent, mnt->mnt_mountpoint));
521 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
522 touch_mnt_namespace(n);
525 static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
527 struct list_head *next = p->mnt_mounts.next;
528 if (next == &p->mnt_mounts) {
532 next = p->mnt_child.next;
533 if (next != &p->mnt_parent->mnt_mounts)
538 return list_entry(next, struct vfsmount, mnt_child);
541 static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
543 struct list_head *prev = p->mnt_mounts.prev;
544 while (prev != &p->mnt_mounts) {
545 p = list_entry(prev, struct vfsmount, mnt_child);
546 prev = p->mnt_mounts.prev;
551 static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
554 struct super_block *sb = old->mnt_sb;
555 struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
558 if (flag & (CL_SLAVE | CL_PRIVATE))
559 mnt->mnt_group_id = 0; /* not a peer of original */
561 mnt->mnt_group_id = old->mnt_group_id;
563 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
564 int err = mnt_alloc_group_id(mnt);
569 mnt->mnt_flags = old->mnt_flags;
570 atomic_inc(&sb->s_active);
572 mnt->mnt_root = dget(root);
573 mnt->mnt_mountpoint = mnt->mnt_root;
574 mnt->mnt_parent = mnt;
576 if (flag & CL_SLAVE) {
577 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
578 mnt->mnt_master = old;
579 CLEAR_MNT_SHARED(mnt);
580 } else if (!(flag & CL_PRIVATE)) {
581 if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
582 list_add(&mnt->mnt_share, &old->mnt_share);
583 if (IS_MNT_SLAVE(old))
584 list_add(&mnt->mnt_slave, &old->mnt_slave);
585 mnt->mnt_master = old->mnt_master;
587 if (flag & CL_MAKE_SHARED)
590 /* stick the duplicate mount on the same expiry list
591 * as the original if that was on one */
592 if (flag & CL_EXPIRE) {
593 if (!list_empty(&old->mnt_expire))
594 list_add(&mnt->mnt_expire, &old->mnt_expire);
604 static inline void __mntput(struct vfsmount *mnt)
607 struct super_block *sb = mnt->mnt_sb;
609 * We don't have to hold all of the locks at the
610 * same time here because we know that we're the
611 * last reference to mnt and that no new writers
614 for_each_possible_cpu(cpu) {
615 struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
616 spin_lock(&cpu_writer->lock);
617 if (cpu_writer->mnt != mnt) {
618 spin_unlock(&cpu_writer->lock);
621 atomic_add(cpu_writer->count, &mnt->__mnt_writers);
622 cpu_writer->count = 0;
624 * Might as well do this so that no one
625 * ever sees the pointer and expects
628 cpu_writer->mnt = NULL;
629 spin_unlock(&cpu_writer->lock);
632 * This probably indicates that somebody messed
633 * up a mnt_want/drop_write() pair. If this
634 * happens, the filesystem was probably unable
635 * to make r/w->r/o transitions.
637 WARN_ON(atomic_read(&mnt->__mnt_writers));
640 deactivate_super(sb);
643 void mntput_no_expire(struct vfsmount *mnt)
646 if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
647 if (likely(!mnt->mnt_pinned)) {
648 spin_unlock(&vfsmount_lock);
652 atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
654 spin_unlock(&vfsmount_lock);
655 acct_auto_close_mnt(mnt);
656 security_sb_umount_close(mnt);
661 EXPORT_SYMBOL(mntput_no_expire);
663 void mnt_pin(struct vfsmount *mnt)
665 spin_lock(&vfsmount_lock);
667 spin_unlock(&vfsmount_lock);
670 EXPORT_SYMBOL(mnt_pin);
672 void mnt_unpin(struct vfsmount *mnt)
674 spin_lock(&vfsmount_lock);
675 if (mnt->mnt_pinned) {
676 atomic_inc(&mnt->mnt_count);
679 spin_unlock(&vfsmount_lock);
682 EXPORT_SYMBOL(mnt_unpin);
684 static inline void mangle(struct seq_file *m, const char *s)
686 seq_escape(m, s, " \t\n\\");
690 * Simple .show_options callback for filesystems which don't want to
691 * implement more complex mount option showing.
693 * See also save_mount_options().
695 int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
697 const char *options = mnt->mnt_sb->s_options;
699 if (options != NULL && options[0]) {
706 EXPORT_SYMBOL(generic_show_options);
709 * If filesystem uses generic_show_options(), this function should be
710 * called from the fill_super() callback.
712 * The .remount_fs callback usually needs to be handled in a special
713 * way, to make sure, that previous options are not overwritten if the
716 * Also note, that if the filesystem's .remount_fs function doesn't
717 * reset all options to their default value, but changes only newly
718 * given options, then the displayed options will not reflect reality
721 void save_mount_options(struct super_block *sb, char *options)
723 kfree(sb->s_options);
724 sb->s_options = kstrdup(options, GFP_KERNEL);
726 EXPORT_SYMBOL(save_mount_options);
728 #ifdef CONFIG_PROC_FS
730 static void *m_start(struct seq_file *m, loff_t *pos)
732 struct proc_mounts *p = m->private;
734 down_read(&namespace_sem);
735 return seq_list_start(&p->ns->list, *pos);
738 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
740 struct proc_mounts *p = m->private;
742 return seq_list_next(v, &p->ns->list, pos);
745 static void m_stop(struct seq_file *m, void *v)
747 up_read(&namespace_sem);
750 struct proc_fs_info {
755 static int show_sb_opts(struct seq_file *m, struct super_block *sb)
757 static const struct proc_fs_info fs_info[] = {
758 { MS_SYNCHRONOUS, ",sync" },
759 { MS_DIRSYNC, ",dirsync" },
760 { MS_MANDLOCK, ",mand" },
763 const struct proc_fs_info *fs_infop;
765 for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
766 if (sb->s_flags & fs_infop->flag)
767 seq_puts(m, fs_infop->str);
770 return security_sb_show_options(m, sb);
773 static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
775 static const struct proc_fs_info mnt_info[] = {
776 { MNT_NOSUID, ",nosuid" },
777 { MNT_NODEV, ",nodev" },
778 { MNT_NOEXEC, ",noexec" },
779 { MNT_NOATIME, ",noatime" },
780 { MNT_NODIRATIME, ",nodiratime" },
781 { MNT_RELATIME, ",relatime" },
782 { MNT_STRICTATIME, ",strictatime" },
785 const struct proc_fs_info *fs_infop;
787 for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
788 if (mnt->mnt_flags & fs_infop->flag)
789 seq_puts(m, fs_infop->str);
793 static void show_type(struct seq_file *m, struct super_block *sb)
795 mangle(m, sb->s_type->name);
796 if (sb->s_subtype && sb->s_subtype[0]) {
798 mangle(m, sb->s_subtype);
802 static int show_vfsmnt(struct seq_file *m, void *v)
804 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
806 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
808 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
810 seq_path(m, &mnt_path, " \t\n\\");
812 show_type(m, mnt->mnt_sb);
813 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
814 err = show_sb_opts(m, mnt->mnt_sb);
817 show_mnt_opts(m, mnt);
818 if (mnt->mnt_sb->s_op->show_options)
819 err = mnt->mnt_sb->s_op->show_options(m, mnt);
820 seq_puts(m, " 0 0\n");
825 const struct seq_operations mounts_op = {
832 static int show_mountinfo(struct seq_file *m, void *v)
834 struct proc_mounts *p = m->private;
835 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
836 struct super_block *sb = mnt->mnt_sb;
837 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
838 struct path root = p->root;
841 seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
842 MAJOR(sb->s_dev), MINOR(sb->s_dev));
843 seq_dentry(m, mnt->mnt_root, " \t\n\\");
845 seq_path_root(m, &mnt_path, &root, " \t\n\\");
846 if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
848 * Mountpoint is outside root, discard that one. Ugly,
849 * but less so than trying to do that in iterator in a
850 * race-free way (due to renames).
854 seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
855 show_mnt_opts(m, mnt);
857 /* Tagged fields ("foo:X" or "bar") */
858 if (IS_MNT_SHARED(mnt))
859 seq_printf(m, " shared:%i", mnt->mnt_group_id);
860 if (IS_MNT_SLAVE(mnt)) {
861 int master = mnt->mnt_master->mnt_group_id;
862 int dom = get_dominating_id(mnt, &p->root);
863 seq_printf(m, " master:%i", master);
864 if (dom && dom != master)
865 seq_printf(m, " propagate_from:%i", dom);
867 if (IS_MNT_UNBINDABLE(mnt))
868 seq_puts(m, " unbindable");
870 /* Filesystem specific data */
874 mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
875 seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
876 err = show_sb_opts(m, sb);
879 if (sb->s_op->show_options)
880 err = sb->s_op->show_options(m, mnt);
886 const struct seq_operations mountinfo_op = {
890 .show = show_mountinfo,
893 static int show_vfsstat(struct seq_file *m, void *v)
895 struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
896 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
900 if (mnt->mnt_devname) {
901 seq_puts(m, "device ");
902 mangle(m, mnt->mnt_devname);
904 seq_puts(m, "no device");
907 seq_puts(m, " mounted on ");
908 seq_path(m, &mnt_path, " \t\n\\");
911 /* file system type */
912 seq_puts(m, "with fstype ");
913 show_type(m, mnt->mnt_sb);
915 /* optional statistics */
916 if (mnt->mnt_sb->s_op->show_stats) {
918 err = mnt->mnt_sb->s_op->show_stats(m, mnt);
925 const struct seq_operations mountstats_op = {
929 .show = show_vfsstat,
931 #endif /* CONFIG_PROC_FS */
934 * may_umount_tree - check if a mount tree is busy
935 * @mnt: root of mount tree
937 * This is called to check if a tree of mounts has any
938 * open files, pwds, chroots or sub mounts that are
941 int may_umount_tree(struct vfsmount *mnt)
944 int minimum_refs = 0;
947 spin_lock(&vfsmount_lock);
948 for (p = mnt; p; p = next_mnt(p, mnt)) {
949 actual_refs += atomic_read(&p->mnt_count);
952 spin_unlock(&vfsmount_lock);
954 if (actual_refs > minimum_refs)
960 EXPORT_SYMBOL(may_umount_tree);
963 * may_umount - check if a mount point is busy
964 * @mnt: root of mount
966 * This is called to check if a mount point has any
967 * open files, pwds, chroots or sub mounts. If the
968 * mount has sub mounts this will return busy
969 * regardless of whether the sub mounts are busy.
971 * Doesn't take quota and stuff into account. IOW, in some cases it will
972 * give false negatives. The main reason why it's here is that we need
973 * a non-destructive way to look for easily umountable filesystems.
975 int may_umount(struct vfsmount *mnt)
978 spin_lock(&vfsmount_lock);
979 if (propagate_mount_busy(mnt, 2))
981 spin_unlock(&vfsmount_lock);
985 EXPORT_SYMBOL(may_umount);
987 void release_mounts(struct list_head *head)
989 struct vfsmount *mnt;
990 while (!list_empty(head)) {
991 mnt = list_first_entry(head, struct vfsmount, mnt_hash);
992 list_del_init(&mnt->mnt_hash);
993 if (mnt->mnt_parent != mnt) {
994 struct dentry *dentry;
996 spin_lock(&vfsmount_lock);
997 dentry = mnt->mnt_mountpoint;
999 mnt->mnt_mountpoint = mnt->mnt_root;
1000 mnt->mnt_parent = mnt;
1002 spin_unlock(&vfsmount_lock);
1010 void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
1014 for (p = mnt; p; p = next_mnt(p, mnt))
1015 list_move(&p->mnt_hash, kill);
1018 propagate_umount(kill);
1020 list_for_each_entry(p, kill, mnt_hash) {
1021 list_del_init(&p->mnt_expire);
1022 list_del_init(&p->mnt_list);
1023 __touch_mnt_namespace(p->mnt_ns);
1025 list_del_init(&p->mnt_child);
1026 if (p->mnt_parent != p) {
1027 p->mnt_parent->mnt_ghosts++;
1028 p->mnt_mountpoint->d_mounted--;
1030 change_mnt_propagation(p, MS_PRIVATE);
1034 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
1036 static int do_umount(struct vfsmount *mnt, int flags)
1038 struct super_block *sb = mnt->mnt_sb;
1040 LIST_HEAD(umount_list);
1042 retval = security_sb_umount(mnt, flags);
1047 * Allow userspace to request a mountpoint be expired rather than
1048 * unmounting unconditionally. Unmount only happens if:
1049 * (1) the mark is already set (the mark is cleared by mntput())
1050 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1052 if (flags & MNT_EXPIRE) {
1053 if (mnt == current->fs->root.mnt ||
1054 flags & (MNT_FORCE | MNT_DETACH))
1057 if (atomic_read(&mnt->mnt_count) != 2)
1060 if (!xchg(&mnt->mnt_expiry_mark, 1))
1065 * If we may have to abort operations to get out of this
1066 * mount, and they will themselves hold resources we must
1067 * allow the fs to do things. In the Unix tradition of
1068 * 'Gee thats tricky lets do it in userspace' the umount_begin
1069 * might fail to complete on the first run through as other tasks
1070 * must return, and the like. Thats for the mount program to worry
1071 * about for the moment.
1074 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1076 sb->s_op->umount_begin(sb);
1081 * No sense to grab the lock for this test, but test itself looks
1082 * somewhat bogus. Suggestions for better replacement?
1083 * Ho-hum... In principle, we might treat that as umount + switch
1084 * to rootfs. GC would eventually take care of the old vfsmount.
1085 * Actually it makes sense, especially if rootfs would contain a
1086 * /reboot - static binary that would close all descriptors and
1087 * call reboot(9). Then init(8) could umount root and exec /reboot.
1089 if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1091 * Special case for "unmounting" root ...
1092 * we just try to remount it readonly.
1094 down_write(&sb->s_umount);
1095 if (!(sb->s_flags & MS_RDONLY)) {
1097 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1100 up_write(&sb->s_umount);
1104 down_write(&namespace_sem);
1105 spin_lock(&vfsmount_lock);
1108 if (!(flags & MNT_DETACH))
1109 shrink_submounts(mnt, &umount_list);
1112 if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
1113 if (!list_empty(&mnt->mnt_list))
1114 umount_tree(mnt, 1, &umount_list);
1117 spin_unlock(&vfsmount_lock);
1119 security_sb_umount_busy(mnt);
1120 up_write(&namespace_sem);
1121 release_mounts(&umount_list);
1126 * Now umount can handle mount points as well as block devices.
1127 * This is important for filesystems which use unnamed block devices.
1129 * We now support a flag for forced unmount like the other 'big iron'
1130 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1133 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1138 retval = user_path(name, &path);
1142 if (path.dentry != path.mnt->mnt_root)
1144 if (!check_mnt(path.mnt))
1148 if (!capable(CAP_SYS_ADMIN))
1151 retval = do_umount(path.mnt, flags);
1153 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1155 mntput_no_expire(path.mnt);
1160 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1163 * The 2.0 compatible umount. No flags.
1165 SYSCALL_DEFINE1(oldumount, char __user *, name)
1167 return sys_umount(name, 0);
1172 static int mount_is_safe(struct path *path)
1174 if (capable(CAP_SYS_ADMIN))
1178 if (S_ISLNK(path->dentry->d_inode->i_mode))
1180 if (path->dentry->d_inode->i_mode & S_ISVTX) {
1181 if (current_uid() != path->dentry->d_inode->i_uid)
1184 if (inode_permission(path->dentry->d_inode, MAY_WRITE))
1190 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
1193 struct vfsmount *res, *p, *q, *r, *s;
1196 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
1199 res = q = clone_mnt(mnt, dentry, flag);
1202 q->mnt_mountpoint = mnt->mnt_mountpoint;
1205 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1206 if (!is_subdir(r->mnt_mountpoint, dentry))
1209 for (s = r; s; s = next_mnt(s, r)) {
1210 if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
1211 s = skip_mnt_tree(s);
1214 while (p != s->mnt_parent) {
1220 path.dentry = p->mnt_mountpoint;
1221 q = clone_mnt(p, p->mnt_root, flag);
1224 spin_lock(&vfsmount_lock);
1225 list_add_tail(&q->mnt_list, &res->mnt_list);
1226 attach_mnt(q, &path);
1227 spin_unlock(&vfsmount_lock);
1233 LIST_HEAD(umount_list);
1234 spin_lock(&vfsmount_lock);
1235 umount_tree(res, 0, &umount_list);
1236 spin_unlock(&vfsmount_lock);
1237 release_mounts(&umount_list);
1242 struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
1244 struct vfsmount *tree;
1245 down_write(&namespace_sem);
1246 tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
1247 up_write(&namespace_sem);
1251 void drop_collected_mounts(struct vfsmount *mnt)
1253 LIST_HEAD(umount_list);
1254 down_write(&namespace_sem);
1255 spin_lock(&vfsmount_lock);
1256 umount_tree(mnt, 0, &umount_list);
1257 spin_unlock(&vfsmount_lock);
1258 up_write(&namespace_sem);
1259 release_mounts(&umount_list);
1262 static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
1266 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1267 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1268 mnt_release_group_id(p);
1272 static int invent_group_ids(struct vfsmount *mnt, bool recurse)
1276 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1277 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1278 int err = mnt_alloc_group_id(p);
1280 cleanup_group_ids(mnt, p);
1290 * @source_mnt : mount tree to be attached
1291 * @nd : place the mount tree @source_mnt is attached
1292 * @parent_nd : if non-null, detach the source_mnt from its parent and
1293 * store the parent mount and mountpoint dentry.
1294 * (done when source_mnt is moved)
1296 * NOTE: in the table below explains the semantics when a source mount
1297 * of a given type is attached to a destination mount of a given type.
1298 * ---------------------------------------------------------------------------
1299 * | BIND MOUNT OPERATION |
1300 * |**************************************************************************
1301 * | source-->| shared | private | slave | unbindable |
1305 * |**************************************************************************
1306 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1308 * |non-shared| shared (+) | private | slave (*) | invalid |
1309 * ***************************************************************************
1310 * A bind operation clones the source mount and mounts the clone on the
1311 * destination mount.
1313 * (++) the cloned mount is propagated to all the mounts in the propagation
1314 * tree of the destination mount and the cloned mount is added to
1315 * the peer group of the source mount.
1316 * (+) the cloned mount is created under the destination mount and is marked
1317 * as shared. The cloned mount is added to the peer group of the source
1319 * (+++) the mount is propagated to all the mounts in the propagation tree
1320 * of the destination mount and the cloned mount is made slave
1321 * of the same master as that of the source mount. The cloned mount
1322 * is marked as 'shared and slave'.
1323 * (*) the cloned mount is made a slave of the same master as that of the
1326 * ---------------------------------------------------------------------------
1327 * | MOVE MOUNT OPERATION |
1328 * |**************************************************************************
1329 * | source-->| shared | private | slave | unbindable |
1333 * |**************************************************************************
1334 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1336 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1337 * ***************************************************************************
1339 * (+) the mount is moved to the destination. And is then propagated to
1340 * all the mounts in the propagation tree of the destination mount.
1341 * (+*) the mount is moved to the destination.
1342 * (+++) the mount is moved to the destination and is then propagated to
1343 * all the mounts belonging to the destination mount's propagation tree.
1344 * the mount is marked as 'shared and slave'.
1345 * (*) the mount continues to be a slave at the new location.
1347 * if the source mount is a tree, the operations explained above is
1348 * applied to each mount in the tree.
1349 * Must be called without spinlocks held, since this function can sleep
1352 static int attach_recursive_mnt(struct vfsmount *source_mnt,
1353 struct path *path, struct path *parent_path)
1355 LIST_HEAD(tree_list);
1356 struct vfsmount *dest_mnt = path->mnt;
1357 struct dentry *dest_dentry = path->dentry;
1358 struct vfsmount *child, *p;
1361 if (IS_MNT_SHARED(dest_mnt)) {
1362 err = invent_group_ids(source_mnt, true);
1366 err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
1368 goto out_cleanup_ids;
1370 if (IS_MNT_SHARED(dest_mnt)) {
1371 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1375 spin_lock(&vfsmount_lock);
1377 detach_mnt(source_mnt, parent_path);
1378 attach_mnt(source_mnt, path);
1379 touch_mnt_namespace(current->nsproxy->mnt_ns);
1381 mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
1382 commit_tree(source_mnt);
1385 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1386 list_del_init(&child->mnt_hash);
1389 spin_unlock(&vfsmount_lock);
1393 if (IS_MNT_SHARED(dest_mnt))
1394 cleanup_group_ids(source_mnt, NULL);
1399 static int graft_tree(struct vfsmount *mnt, struct path *path)
1402 if (mnt->mnt_sb->s_flags & MS_NOUSER)
1405 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1406 S_ISDIR(mnt->mnt_root->d_inode->i_mode))
1410 mutex_lock(&path->dentry->d_inode->i_mutex);
1411 if (IS_DEADDIR(path->dentry->d_inode))
1414 err = security_sb_check_sb(mnt, path);
1419 if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
1420 err = attach_recursive_mnt(mnt, path, NULL);
1422 mutex_unlock(&path->dentry->d_inode->i_mutex);
1424 security_sb_post_addmount(mnt, path);
1429 * recursively change the type of the mountpoint.
1431 static int do_change_type(struct path *path, int flag)
1433 struct vfsmount *m, *mnt = path->mnt;
1434 int recurse = flag & MS_REC;
1435 int type = flag & ~MS_REC;
1438 if (!capable(CAP_SYS_ADMIN))
1441 if (path->dentry != path->mnt->mnt_root)
1444 down_write(&namespace_sem);
1445 if (type == MS_SHARED) {
1446 err = invent_group_ids(mnt, recurse);
1451 spin_lock(&vfsmount_lock);
1452 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1453 change_mnt_propagation(m, type);
1454 spin_unlock(&vfsmount_lock);
1457 up_write(&namespace_sem);
1462 * do loopback mount.
1464 static int do_loopback(struct path *path, char *old_name,
1467 struct path old_path;
1468 struct vfsmount *mnt = NULL;
1469 int err = mount_is_safe(path);
1472 if (!old_name || !*old_name)
1474 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1478 down_write(&namespace_sem);
1480 if (IS_MNT_UNBINDABLE(old_path.mnt))
1483 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
1488 mnt = copy_tree(old_path.mnt, old_path.dentry, 0);
1490 mnt = clone_mnt(old_path.mnt, old_path.dentry, 0);
1495 err = graft_tree(mnt, path);
1497 LIST_HEAD(umount_list);
1498 spin_lock(&vfsmount_lock);
1499 umount_tree(mnt, 0, &umount_list);
1500 spin_unlock(&vfsmount_lock);
1501 release_mounts(&umount_list);
1505 up_write(&namespace_sem);
1506 path_put(&old_path);
1510 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1513 int readonly_request = 0;
1515 if (ms_flags & MS_RDONLY)
1516 readonly_request = 1;
1517 if (readonly_request == __mnt_is_readonly(mnt))
1520 if (readonly_request)
1521 error = mnt_make_readonly(mnt);
1523 __mnt_unmake_readonly(mnt);
1528 * change filesystem flags. dir should be a physical root of filesystem.
1529 * If you've mounted a non-root directory somewhere and want to do remount
1530 * on it - tough luck.
1532 static int do_remount(struct path *path, int flags, int mnt_flags,
1536 struct super_block *sb = path->mnt->mnt_sb;
1538 if (!capable(CAP_SYS_ADMIN))
1541 if (!check_mnt(path->mnt))
1544 if (path->dentry != path->mnt->mnt_root)
1547 down_write(&sb->s_umount);
1548 if (flags & MS_BIND)
1549 err = change_mount_flags(path->mnt, flags);
1551 err = do_remount_sb(sb, flags, data, 0);
1553 path->mnt->mnt_flags = mnt_flags;
1554 up_write(&sb->s_umount);
1556 security_sb_post_remount(path->mnt, flags, data);
1558 spin_lock(&vfsmount_lock);
1559 touch_mnt_namespace(path->mnt->mnt_ns);
1560 spin_unlock(&vfsmount_lock);
1565 static inline int tree_contains_unbindable(struct vfsmount *mnt)
1568 for (p = mnt; p; p = next_mnt(p, mnt)) {
1569 if (IS_MNT_UNBINDABLE(p))
1575 static int do_move_mount(struct path *path, char *old_name)
1577 struct path old_path, parent_path;
1580 if (!capable(CAP_SYS_ADMIN))
1582 if (!old_name || !*old_name)
1584 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1588 down_write(&namespace_sem);
1589 while (d_mountpoint(path->dentry) &&
1590 follow_down(&path->mnt, &path->dentry))
1593 if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt))
1597 mutex_lock(&path->dentry->d_inode->i_mutex);
1598 if (IS_DEADDIR(path->dentry->d_inode))
1601 if (!IS_ROOT(path->dentry) && d_unhashed(path->dentry))
1605 if (old_path.dentry != old_path.mnt->mnt_root)
1608 if (old_path.mnt == old_path.mnt->mnt_parent)
1611 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1612 S_ISDIR(old_path.dentry->d_inode->i_mode))
1615 * Don't move a mount residing in a shared parent.
1617 if (old_path.mnt->mnt_parent &&
1618 IS_MNT_SHARED(old_path.mnt->mnt_parent))
1621 * Don't move a mount tree containing unbindable mounts to a destination
1622 * mount which is shared.
1624 if (IS_MNT_SHARED(path->mnt) &&
1625 tree_contains_unbindable(old_path.mnt))
1628 for (p = path->mnt; p->mnt_parent != p; p = p->mnt_parent)
1629 if (p == old_path.mnt)
1632 err = attach_recursive_mnt(old_path.mnt, path, &parent_path);
1636 /* if the mount is moved, it should no longer be expire
1638 list_del_init(&old_path.mnt->mnt_expire);
1640 mutex_unlock(&path->dentry->d_inode->i_mutex);
1642 up_write(&namespace_sem);
1644 path_put(&parent_path);
1645 path_put(&old_path);
1650 * create a new mount for userspace and request it to be added into the
1653 static int do_new_mount(struct path *path, char *type, int flags,
1654 int mnt_flags, char *name, void *data)
1656 struct vfsmount *mnt;
1658 if (!type || !memchr(type, 0, PAGE_SIZE))
1661 /* we need capabilities... */
1662 if (!capable(CAP_SYS_ADMIN))
1665 mnt = do_kern_mount(type, flags, name, data);
1667 return PTR_ERR(mnt);
1669 return do_add_mount(mnt, path, mnt_flags, NULL);
1673 * add a mount into a namespace's mount tree
1674 * - provide the option of adding the new mount to an expiration list
1676 int do_add_mount(struct vfsmount *newmnt, struct path *path,
1677 int mnt_flags, struct list_head *fslist)
1681 down_write(&namespace_sem);
1682 /* Something was mounted here while we slept */
1683 while (d_mountpoint(path->dentry) &&
1684 follow_down(&path->mnt, &path->dentry))
1687 if (!check_mnt(path->mnt))
1690 /* Refuse the same filesystem on the same mount point */
1692 if (path->mnt->mnt_sb == newmnt->mnt_sb &&
1693 path->mnt->mnt_root == path->dentry)
1697 if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
1700 newmnt->mnt_flags = mnt_flags;
1701 if ((err = graft_tree(newmnt, path)))
1704 if (fslist) /* add to the specified expiration list */
1705 list_add_tail(&newmnt->mnt_expire, fslist);
1707 up_write(&namespace_sem);
1711 up_write(&namespace_sem);
1716 EXPORT_SYMBOL_GPL(do_add_mount);
1719 * process a list of expirable mountpoints with the intent of discarding any
1720 * mountpoints that aren't in use and haven't been touched since last we came
1723 void mark_mounts_for_expiry(struct list_head *mounts)
1725 struct vfsmount *mnt, *next;
1726 LIST_HEAD(graveyard);
1729 if (list_empty(mounts))
1732 down_write(&namespace_sem);
1733 spin_lock(&vfsmount_lock);
1735 /* extract from the expiration list every vfsmount that matches the
1736 * following criteria:
1737 * - only referenced by its parent vfsmount
1738 * - still marked for expiry (marked on the last call here; marks are
1739 * cleared by mntput())
1741 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
1742 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
1743 propagate_mount_busy(mnt, 1))
1745 list_move(&mnt->mnt_expire, &graveyard);
1747 while (!list_empty(&graveyard)) {
1748 mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
1749 touch_mnt_namespace(mnt->mnt_ns);
1750 umount_tree(mnt, 1, &umounts);
1752 spin_unlock(&vfsmount_lock);
1753 up_write(&namespace_sem);
1755 release_mounts(&umounts);
1758 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
1761 * Ripoff of 'select_parent()'
1763 * search the list of submounts for a given mountpoint, and move any
1764 * shrinkable submounts to the 'graveyard' list.
1766 static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
1768 struct vfsmount *this_parent = parent;
1769 struct list_head *next;
1773 next = this_parent->mnt_mounts.next;
1775 while (next != &this_parent->mnt_mounts) {
1776 struct list_head *tmp = next;
1777 struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);
1780 if (!(mnt->mnt_flags & MNT_SHRINKABLE))
1783 * Descend a level if the d_mounts list is non-empty.
1785 if (!list_empty(&mnt->mnt_mounts)) {
1790 if (!propagate_mount_busy(mnt, 1)) {
1791 list_move_tail(&mnt->mnt_expire, graveyard);
1796 * All done at this level ... ascend and resume the search
1798 if (this_parent != parent) {
1799 next = this_parent->mnt_child.next;
1800 this_parent = this_parent->mnt_parent;
1807 * process a list of expirable mountpoints with the intent of discarding any
1808 * submounts of a specific parent mountpoint
1810 static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
1812 LIST_HEAD(graveyard);
1815 /* extract submounts of 'mountpoint' from the expiration list */
1816 while (select_submounts(mnt, &graveyard)) {
1817 while (!list_empty(&graveyard)) {
1818 m = list_first_entry(&graveyard, struct vfsmount,
1820 touch_mnt_namespace(m->mnt_ns);
1821 umount_tree(m, 1, umounts);
1827 * Some copy_from_user() implementations do not return the exact number of
1828 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1829 * Note that this function differs from copy_from_user() in that it will oops
1830 * on bad values of `to', rather than returning a short copy.
1832 static long exact_copy_from_user(void *to, const void __user * from,
1836 const char __user *f = from;
1839 if (!access_ok(VERIFY_READ, from, n))
1843 if (__get_user(c, f)) {
1854 int copy_mount_options(const void __user * data, unsigned long *where)
1864 if (!(page = __get_free_page(GFP_KERNEL)))
1867 /* We only care that *some* data at the address the user
1868 * gave us is valid. Just in case, we'll zero
1869 * the remainder of the page.
1871 /* copy_from_user cannot cross TASK_SIZE ! */
1872 size = TASK_SIZE - (unsigned long)data;
1873 if (size > PAGE_SIZE)
1876 i = size - exact_copy_from_user((void *)page, data, size);
1882 memset((char *)page + i, 0, PAGE_SIZE - i);
1888 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1889 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1891 * data is a (void *) that can point to any structure up to
1892 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1893 * information (or be NULL).
1895 * Pre-0.97 versions of mount() didn't have a flags word.
1896 * When the flags word was introduced its top half was required
1897 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1898 * Therefore, if this magic number is present, it carries no information
1899 * and must be discarded.
1901 long do_mount(char *dev_name, char *dir_name, char *type_page,
1902 unsigned long flags, void *data_page)
1909 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
1910 flags &= ~MS_MGC_MSK;
1912 /* Basic sanity checks */
1914 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
1916 if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
1920 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1922 /* Default to relatime */
1923 mnt_flags |= MNT_RELATIME;
1925 /* Separate the per-mountpoint flags */
1926 if (flags & MS_NOSUID)
1927 mnt_flags |= MNT_NOSUID;
1928 if (flags & MS_NODEV)
1929 mnt_flags |= MNT_NODEV;
1930 if (flags & MS_NOEXEC)
1931 mnt_flags |= MNT_NOEXEC;
1932 if (flags & MS_NOATIME)
1933 mnt_flags |= MNT_NOATIME;
1934 if (flags & MS_NODIRATIME)
1935 mnt_flags |= MNT_NODIRATIME;
1936 if (flags & MS_STRICTATIME)
1937 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
1938 if (flags & MS_RDONLY)
1939 mnt_flags |= MNT_READONLY;
1941 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
1942 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
1945 /* ... and get the mountpoint */
1946 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1950 retval = security_sb_mount(dev_name, &path,
1951 type_page, flags, data_page);
1955 if (flags & MS_REMOUNT)
1956 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
1958 else if (flags & MS_BIND)
1959 retval = do_loopback(&path, dev_name, flags & MS_REC);
1960 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1961 retval = do_change_type(&path, flags);
1962 else if (flags & MS_MOVE)
1963 retval = do_move_mount(&path, dev_name);
1965 retval = do_new_mount(&path, type_page, flags, mnt_flags,
1966 dev_name, data_page);
1973 * Allocate a new namespace structure and populate it with contents
1974 * copied from the namespace of the passed in task structure.
1976 static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
1977 struct fs_struct *fs)
1979 struct mnt_namespace *new_ns;
1980 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
1981 struct vfsmount *p, *q;
1983 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
1985 return ERR_PTR(-ENOMEM);
1987 atomic_set(&new_ns->count, 1);
1988 INIT_LIST_HEAD(&new_ns->list);
1989 init_waitqueue_head(&new_ns->poll);
1992 down_write(&namespace_sem);
1993 /* First pass: copy the tree topology */
1994 new_ns->root = copy_tree(mnt_ns->root, mnt_ns->root->mnt_root,
1995 CL_COPY_ALL | CL_EXPIRE);
1996 if (!new_ns->root) {
1997 up_write(&namespace_sem);
1999 return ERR_PTR(-ENOMEM);
2001 spin_lock(&vfsmount_lock);
2002 list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
2003 spin_unlock(&vfsmount_lock);
2006 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2007 * as belonging to new namespace. We have already acquired a private
2008 * fs_struct, so tsk->fs->lock is not needed.
2015 if (p == fs->root.mnt) {
2017 fs->root.mnt = mntget(q);
2019 if (p == fs->pwd.mnt) {
2021 fs->pwd.mnt = mntget(q);
2024 p = next_mnt(p, mnt_ns->root);
2025 q = next_mnt(q, new_ns->root);
2027 up_write(&namespace_sem);
2037 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2038 struct fs_struct *new_fs)
2040 struct mnt_namespace *new_ns;
2045 if (!(flags & CLONE_NEWNS))
2048 new_ns = dup_mnt_ns(ns, new_fs);
2054 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2055 char __user *, type, unsigned long, flags, void __user *, data)
2058 unsigned long data_page;
2059 unsigned long type_page;
2060 unsigned long dev_page;
2063 retval = copy_mount_options(type, &type_page);
2067 dir_page = getname(dir_name);
2068 retval = PTR_ERR(dir_page);
2069 if (IS_ERR(dir_page))
2072 retval = copy_mount_options(dev_name, &dev_page);
2076 retval = copy_mount_options(data, &data_page);
2081 retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
2082 flags, (void *)data_page);
2084 free_page(data_page);
2087 free_page(dev_page);
2091 free_page(type_page);
2096 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2097 * It can block. Requires the big lock held.
2099 void set_fs_root(struct fs_struct *fs, struct path *path)
2101 struct path old_root;
2103 write_lock(&fs->lock);
2104 old_root = fs->root;
2107 write_unlock(&fs->lock);
2108 if (old_root.dentry)
2109 path_put(&old_root);
2113 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
2114 * It can block. Requires the big lock held.
2116 void set_fs_pwd(struct fs_struct *fs, struct path *path)
2118 struct path old_pwd;
2120 write_lock(&fs->lock);
2124 write_unlock(&fs->lock);
2130 static void chroot_fs_refs(struct path *old_root, struct path *new_root)
2132 struct task_struct *g, *p;
2133 struct fs_struct *fs;
2135 read_lock(&tasklist_lock);
2136 do_each_thread(g, p) {
2140 atomic_inc(&fs->count);
2142 if (fs->root.dentry == old_root->dentry
2143 && fs->root.mnt == old_root->mnt)
2144 set_fs_root(fs, new_root);
2145 if (fs->pwd.dentry == old_root->dentry
2146 && fs->pwd.mnt == old_root->mnt)
2147 set_fs_pwd(fs, new_root);
2151 } while_each_thread(g, p);
2152 read_unlock(&tasklist_lock);
2156 * pivot_root Semantics:
2157 * Moves the root file system of the current process to the directory put_old,
2158 * makes new_root as the new root file system of the current process, and sets
2159 * root/cwd of all processes which had them on the current root to new_root.
2162 * The new_root and put_old must be directories, and must not be on the
2163 * same file system as the current process root. The put_old must be
2164 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2165 * pointed to by put_old must yield the same directory as new_root. No other
2166 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2168 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2169 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2170 * in this situation.
2173 * - we don't move root/cwd if they are not at the root (reason: if something
2174 * cared enough to change them, it's probably wrong to force them elsewhere)
2175 * - it's okay to pick a root that isn't the root of a file system, e.g.
2176 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2177 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2180 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2181 const char __user *, put_old)
2183 struct vfsmount *tmp;
2184 struct path new, old, parent_path, root_parent, root;
2187 if (!capable(CAP_SYS_ADMIN))
2190 error = user_path_dir(new_root, &new);
2194 if (!check_mnt(new.mnt))
2197 error = user_path_dir(put_old, &old);
2201 error = security_sb_pivotroot(&old, &new);
2207 read_lock(¤t->fs->lock);
2208 root = current->fs->root;
2209 path_get(¤t->fs->root);
2210 read_unlock(¤t->fs->lock);
2211 down_write(&namespace_sem);
2212 mutex_lock(&old.dentry->d_inode->i_mutex);
2214 if (IS_MNT_SHARED(old.mnt) ||
2215 IS_MNT_SHARED(new.mnt->mnt_parent) ||
2216 IS_MNT_SHARED(root.mnt->mnt_parent))
2218 if (!check_mnt(root.mnt))
2221 if (IS_DEADDIR(new.dentry->d_inode))
2223 if (d_unhashed(new.dentry) && !IS_ROOT(new.dentry))
2225 if (d_unhashed(old.dentry) && !IS_ROOT(old.dentry))
2228 if (new.mnt == root.mnt ||
2229 old.mnt == root.mnt)
2230 goto out2; /* loop, on the same file system */
2232 if (root.mnt->mnt_root != root.dentry)
2233 goto out2; /* not a mountpoint */
2234 if (root.mnt->mnt_parent == root.mnt)
2235 goto out2; /* not attached */
2236 if (new.mnt->mnt_root != new.dentry)
2237 goto out2; /* not a mountpoint */
2238 if (new.mnt->mnt_parent == new.mnt)
2239 goto out2; /* not attached */
2240 /* make sure we can reach put_old from new_root */
2242 spin_lock(&vfsmount_lock);
2243 if (tmp != new.mnt) {
2245 if (tmp->mnt_parent == tmp)
2246 goto out3; /* already mounted on put_old */
2247 if (tmp->mnt_parent == new.mnt)
2249 tmp = tmp->mnt_parent;
2251 if (!is_subdir(tmp->mnt_mountpoint, new.dentry))
2253 } else if (!is_subdir(old.dentry, new.dentry))
2255 detach_mnt(new.mnt, &parent_path);
2256 detach_mnt(root.mnt, &root_parent);
2257 /* mount old root on put_old */
2258 attach_mnt(root.mnt, &old);
2259 /* mount new_root on / */
2260 attach_mnt(new.mnt, &root_parent);
2261 touch_mnt_namespace(current->nsproxy->mnt_ns);
2262 spin_unlock(&vfsmount_lock);
2263 chroot_fs_refs(&root, &new);
2264 security_sb_post_pivotroot(&root, &new);
2266 path_put(&root_parent);
2267 path_put(&parent_path);
2269 mutex_unlock(&old.dentry->d_inode->i_mutex);
2270 up_write(&namespace_sem);
2278 spin_unlock(&vfsmount_lock);
2282 static void __init init_mount_tree(void)
2284 struct vfsmount *mnt;
2285 struct mnt_namespace *ns;
2288 mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
2290 panic("Can't create rootfs");
2291 ns = kmalloc(sizeof(*ns), GFP_KERNEL);
2293 panic("Can't allocate initial namespace");
2294 atomic_set(&ns->count, 1);
2295 INIT_LIST_HEAD(&ns->list);
2296 init_waitqueue_head(&ns->poll);
2298 list_add(&mnt->mnt_list, &ns->list);
2302 init_task.nsproxy->mnt_ns = ns;
2305 root.mnt = ns->root;
2306 root.dentry = ns->root->mnt_root;
2308 set_fs_pwd(current->fs, &root);
2309 set_fs_root(current->fs, &root);
2312 void __init mnt_init(void)
2317 init_rwsem(&namespace_sem);
2319 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
2320 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2322 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
2324 if (!mount_hashtable)
2325 panic("Failed to allocate mount hash table\n");
2327 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
2329 for (u = 0; u < HASH_SIZE; u++)
2330 INIT_LIST_HEAD(&mount_hashtable[u]);
2334 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2336 fs_kobj = kobject_create_and_add("fs", NULL);
2338 printk(KERN_WARNING "%s: kobj create error\n", __func__);
2343 void __put_mnt_ns(struct mnt_namespace *ns)
2345 struct vfsmount *root = ns->root;
2346 LIST_HEAD(umount_list);
2348 spin_unlock(&vfsmount_lock);
2349 down_write(&namespace_sem);
2350 spin_lock(&vfsmount_lock);
2351 umount_tree(root, 0, &umount_list);
2352 spin_unlock(&vfsmount_lock);
2353 up_write(&namespace_sem);
2354 release_mounts(&umount_list);