]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - fs/namespace.c
vfs: remove lives_below_in_same_fs()
[linux-2.6.git] / fs / namespace.c
index 72bb1062bfe77886394a79c0822ec141aad55045..061e5edb4d271395a37709b4503a007a70c7bdd7 100644 (file)
@@ -14,9 +14,9 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/quotaops.h>
 #include <linux/acct.h>
 #include <linux/capability.h>
+#include <linux/cpumask.h>
 #include <linux/module.h>
 #include <linux/sysfs.h>
 #include <linux/seq_file.h>
 #include <linux/security.h>
 #include <linux/mount.h>
 #include <linux/ramfs.h>
+#include <linux/log2.h>
+#include <linux/idr.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include "pnode.h"
+#include "internal.h"
+
+#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
+#define HASH_SIZE (1UL << HASH_SHIFT)
 
 /* spinlock for vfsmount related operations, inplace of dcache_lock */
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
 
 static int event;
+static DEFINE_IDA(mnt_id_ida);
+static DEFINE_IDA(mnt_group_ida);
 
 static struct list_head *mount_hashtable __read_mostly;
-static int hash_mask __read_mostly, hash_bits __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
 static struct rw_semaphore namespace_sem;
 
 /* /sys/fs */
-decl_subsys(fs, NULL, NULL);
-EXPORT_SYMBOL_GPL(fs_subsys);
+struct kobject *fs_kobj;
+EXPORT_SYMBOL_GPL(fs_kobj);
 
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
        unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
        tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
-       tmp = tmp + (tmp >> hash_bits);
-       return tmp & hash_mask;
+       tmp = tmp + (tmp >> HASH_SHIFT);
+       return tmp & (HASH_SIZE - 1);
+}
+
+#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
+
+/* allocation is serialized by namespace_sem */
+static int mnt_alloc_id(struct vfsmount *mnt)
+{
+       int res;
+
+retry:
+       ida_pre_get(&mnt_id_ida, GFP_KERNEL);
+       spin_lock(&vfsmount_lock);
+       res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
+       spin_unlock(&vfsmount_lock);
+       if (res == -EAGAIN)
+               goto retry;
+
+       return res;
+}
+
+static void mnt_free_id(struct vfsmount *mnt)
+{
+       spin_lock(&vfsmount_lock);
+       ida_remove(&mnt_id_ida, mnt->mnt_id);
+       spin_unlock(&vfsmount_lock);
+}
+
+/*
+ * Allocate a new peer group ID
+ *
+ * mnt_group_ida is protected by namespace_sem
+ */
+static int mnt_alloc_group_id(struct vfsmount *mnt)
+{
+       if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
+               return -ENOMEM;
+
+       return ida_get_new_above(&mnt_group_ida, 1, &mnt->mnt_group_id);
+}
+
+/*
+ * Release a peer group ID
+ */
+void mnt_release_group_id(struct vfsmount *mnt)
+{
+       ida_remove(&mnt_group_ida, mnt->mnt_group_id);
+       mnt->mnt_group_id = 0;
 }
 
 struct vfsmount *alloc_vfsmnt(const char *name)
 {
        struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
        if (mnt) {
+               int err;
+
+               err = mnt_alloc_id(mnt);
+               if (err) {
+                       kmem_cache_free(mnt_cache, mnt);
+                       return NULL;
+               }
+
                atomic_set(&mnt->mnt_count, 1);
                INIT_LIST_HEAD(&mnt->mnt_hash);
                INIT_LIST_HEAD(&mnt->mnt_child);
@@ -64,6 +126,7 @@ struct vfsmount *alloc_vfsmnt(const char *name)
                INIT_LIST_HEAD(&mnt->mnt_share);
                INIT_LIST_HEAD(&mnt->mnt_slave_list);
                INIT_LIST_HEAD(&mnt->mnt_slave);
+               atomic_set(&mnt->__mnt_writers, 0);
                if (name) {
                        int size = strlen(name) + 1;
                        char *newname = kmalloc(size, GFP_KERNEL);
@@ -76,6 +139,263 @@ struct vfsmount *alloc_vfsmnt(const char *name)
        return mnt;
 }
 
+/*
+ * Most r/o checks on a fs are for operations that take
+ * discrete amounts of time, like a write() or unlink().
+ * We must keep track of when those operations start
+ * (for permission checks) and when they end, so that
+ * we can determine when writes are able to occur to
+ * a filesystem.
+ */
+/*
+ * __mnt_is_readonly: check whether a mount is read-only
+ * @mnt: the mount to check for its write status
+ *
+ * This shouldn't be used directly ouside of the VFS.
+ * It does not guarantee that the filesystem will stay
+ * r/w, just that it is right *now*.  This can not and
+ * should not be used in place of IS_RDONLY(inode).
+ * mnt_want/drop_write() will _keep_ the filesystem
+ * r/w.
+ */
+int __mnt_is_readonly(struct vfsmount *mnt)
+{
+       if (mnt->mnt_flags & MNT_READONLY)
+               return 1;
+       if (mnt->mnt_sb->s_flags & MS_RDONLY)
+               return 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__mnt_is_readonly);
+
+struct mnt_writer {
+       /*
+        * If holding multiple instances of this lock, they
+        * must be ordered by cpu number.
+        */
+       spinlock_t lock;
+       struct lock_class_key lock_class; /* compiles out with !lockdep */
+       unsigned long count;
+       struct vfsmount *mnt;
+} ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU(struct mnt_writer, mnt_writers);
+
+static int __init init_mnt_writers(void)
+{
+       int cpu;
+       for_each_possible_cpu(cpu) {
+               struct mnt_writer *writer = &per_cpu(mnt_writers, cpu);
+               spin_lock_init(&writer->lock);
+               lockdep_set_class(&writer->lock, &writer->lock_class);
+               writer->count = 0;
+       }
+       return 0;
+}
+fs_initcall(init_mnt_writers);
+
+static void unlock_mnt_writers(void)
+{
+       int cpu;
+       struct mnt_writer *cpu_writer;
+
+       for_each_possible_cpu(cpu) {
+               cpu_writer = &per_cpu(mnt_writers, cpu);
+               spin_unlock(&cpu_writer->lock);
+       }
+}
+
+static inline void __clear_mnt_count(struct mnt_writer *cpu_writer)
+{
+       if (!cpu_writer->mnt)
+               return;
+       /*
+        * This is in case anyone ever leaves an invalid,
+        * old ->mnt and a count of 0.
+        */
+       if (!cpu_writer->count)
+               return;
+       atomic_add(cpu_writer->count, &cpu_writer->mnt->__mnt_writers);
+       cpu_writer->count = 0;
+}
+ /*
+ * must hold cpu_writer->lock
+ */
+static inline void use_cpu_writer_for_mount(struct mnt_writer *cpu_writer,
+                                         struct vfsmount *mnt)
+{
+       if (cpu_writer->mnt == mnt)
+               return;
+       __clear_mnt_count(cpu_writer);
+       cpu_writer->mnt = mnt;
+}
+
+/*
+ * Most r/o checks on a fs are for operations that take
+ * discrete amounts of time, like a write() or unlink().
+ * We must keep track of when those operations start
+ * (for permission checks) and when they end, so that
+ * we can determine when writes are able to occur to
+ * a filesystem.
+ */
+/**
+ * mnt_want_write - get write access to a mount
+ * @mnt: the mount on which to take a write
+ *
+ * This tells the low-level filesystem that a write is
+ * about to be performed to it, and makes sure that
+ * writes are allowed before returning success.  When
+ * the write operation is finished, mnt_drop_write()
+ * must be called.  This is effectively a refcount.
+ */
+int mnt_want_write(struct vfsmount *mnt)
+{
+       int ret = 0;
+       struct mnt_writer *cpu_writer;
+
+       cpu_writer = &get_cpu_var(mnt_writers);
+       spin_lock(&cpu_writer->lock);
+       if (__mnt_is_readonly(mnt)) {
+               ret = -EROFS;
+               goto out;
+       }
+       use_cpu_writer_for_mount(cpu_writer, mnt);
+       cpu_writer->count++;
+out:
+       spin_unlock(&cpu_writer->lock);
+       put_cpu_var(mnt_writers);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mnt_want_write);
+
+static void lock_mnt_writers(void)
+{
+       int cpu;
+       struct mnt_writer *cpu_writer;
+
+       for_each_possible_cpu(cpu) {
+               cpu_writer = &per_cpu(mnt_writers, cpu);
+               spin_lock(&cpu_writer->lock);
+               __clear_mnt_count(cpu_writer);
+               cpu_writer->mnt = NULL;
+       }
+}
+
+/*
+ * These per-cpu write counts are not guaranteed to have
+ * matched increments and decrements on any given cpu.
+ * A file open()ed for write on one cpu and close()d on
+ * another cpu will imbalance this count.  Make sure it
+ * does not get too far out of whack.
+ */
+static void handle_write_count_underflow(struct vfsmount *mnt)
+{
+       if (atomic_read(&mnt->__mnt_writers) >=
+           MNT_WRITER_UNDERFLOW_LIMIT)
+               return;
+       /*
+        * It isn't necessary to hold all of the locks
+        * at the same time, but doing it this way makes
+        * us share a lot more code.
+        */
+       lock_mnt_writers();
+       /*
+        * vfsmount_lock is for mnt_flags.
+        */
+       spin_lock(&vfsmount_lock);
+       /*
+        * If coalescing the per-cpu writer counts did not
+        * get us back to a positive writer count, we have
+        * a bug.
+        */
+       if ((atomic_read(&mnt->__mnt_writers) < 0) &&
+           !(mnt->mnt_flags & MNT_IMBALANCED_WRITE_COUNT)) {
+               printk(KERN_DEBUG "leak detected on mount(%p) writers "
+                               "count: %d\n",
+                       mnt, atomic_read(&mnt->__mnt_writers));
+               WARN_ON(1);
+               /* use the flag to keep the dmesg spam down */
+               mnt->mnt_flags |= MNT_IMBALANCED_WRITE_COUNT;
+       }
+       spin_unlock(&vfsmount_lock);
+       unlock_mnt_writers();
+}
+
+/**
+ * mnt_drop_write - give up write access to a mount
+ * @mnt: the mount on which to give up write access
+ *
+ * Tells the low-level filesystem that we are done
+ * performing writes to it.  Must be matched with
+ * mnt_want_write() call above.
+ */
+void mnt_drop_write(struct vfsmount *mnt)
+{
+       int must_check_underflow = 0;
+       struct mnt_writer *cpu_writer;
+
+       cpu_writer = &get_cpu_var(mnt_writers);
+       spin_lock(&cpu_writer->lock);
+
+       use_cpu_writer_for_mount(cpu_writer, mnt);
+       if (cpu_writer->count > 0) {
+               cpu_writer->count--;
+       } else {
+               must_check_underflow = 1;
+               atomic_dec(&mnt->__mnt_writers);
+       }
+
+       spin_unlock(&cpu_writer->lock);
+       /*
+        * Logically, we could call this each time,
+        * but the __mnt_writers cacheline tends to
+        * be cold, and makes this expensive.
+        */
+       if (must_check_underflow)
+               handle_write_count_underflow(mnt);
+       /*
+        * This could be done right after the spinlock
+        * is taken because the spinlock keeps us on
+        * the cpu, and disables preemption.  However,
+        * putting it here bounds the amount that
+        * __mnt_writers can underflow.  Without it,
+        * we could theoretically wrap __mnt_writers.
+        */
+       put_cpu_var(mnt_writers);
+}
+EXPORT_SYMBOL_GPL(mnt_drop_write);
+
+static int mnt_make_readonly(struct vfsmount *mnt)
+{
+       int ret = 0;
+
+       lock_mnt_writers();
+       /*
+        * With all the locks held, this value is stable
+        */
+       if (atomic_read(&mnt->__mnt_writers) > 0) {
+               ret = -EBUSY;
+               goto out;
+       }
+       /*
+        * nobody can do a successful mnt_want_write() with all
+        * of the counts in MNT_DENIED_WRITE and the locks held.
+        */
+       spin_lock(&vfsmount_lock);
+       if (!ret)
+               mnt->mnt_flags |= MNT_READONLY;
+       spin_unlock(&vfsmount_lock);
+out:
+       unlock_mnt_writers();
+       return ret;
+}
+
+static void __mnt_unmake_readonly(struct vfsmount *mnt)
+{
+       spin_lock(&vfsmount_lock);
+       mnt->mnt_flags &= ~MNT_READONLY;
+       spin_unlock(&vfsmount_lock);
+}
+
 int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
 {
        mnt->mnt_sb = sb;
@@ -88,6 +408,7 @@ EXPORT_SYMBOL(simple_set_mnt);
 void free_vfsmnt(struct vfsmount *mnt)
 {
        kfree(mnt->mnt_devname);
+       mnt_free_id(mnt);
        kmem_cache_free(mnt_cache, mnt);
 }
 
@@ -151,15 +472,15 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
        }
 }
 
-static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
+static void detach_mnt(struct vfsmount *mnt, struct path *old_path)
 {
-       old_nd->dentry = mnt->mnt_mountpoint;
-       old_nd->mnt = mnt->mnt_parent;
+       old_path->dentry = mnt->mnt_mountpoint;
+       old_path->mnt = mnt->mnt_parent;
        mnt->mnt_parent = mnt;
        mnt->mnt_mountpoint = mnt->mnt_root;
        list_del_init(&mnt->mnt_child);
        list_del_init(&mnt->mnt_hash);
-       old_nd->dentry->d_mounted--;
+       old_path->dentry->d_mounted--;
 }
 
 void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
@@ -170,12 +491,12 @@ void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
        dentry->d_mounted++;
 }
 
-static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
+static void attach_mnt(struct vfsmount *mnt, struct path *path)
 {
-       mnt_set_mountpoint(nd->mnt, nd->dentry, mnt);
+       mnt_set_mountpoint(path->mnt, path->dentry, mnt);
        list_add_tail(&mnt->mnt_hash, mount_hashtable +
-                       hash(nd->mnt, nd->dentry));
-       list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
+                       hash(path->mnt, path->dentry));
+       list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts);
 }
 
 /*
@@ -234,6 +555,17 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
        struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
 
        if (mnt) {
+               if (flag & (CL_SLAVE | CL_PRIVATE))
+                       mnt->mnt_group_id = 0; /* not a peer of original */
+               else
+                       mnt->mnt_group_id = old->mnt_group_id;
+
+               if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
+                       int err = mnt_alloc_group_id(mnt);
+                       if (err)
+                               goto out_free;
+               }
+
                mnt->mnt_flags = old->mnt_flags;
                atomic_inc(&sb->s_active);
                mnt->mnt_sb = sb;
@@ -245,7 +577,7 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
                        list_add(&mnt->mnt_slave, &old->mnt_slave_list);
                        mnt->mnt_master = old;
                        CLEAR_MNT_SHARED(mnt);
-               } else {
+               } else if (!(flag & CL_PRIVATE)) {
                        if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
                                list_add(&mnt->mnt_share, &old->mnt_share);
                        if (IS_MNT_SLAVE(old))
@@ -258,18 +590,49 @@ static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
                /* stick the duplicate mount on the same expiry list
                 * as the original if that was on one */
                if (flag & CL_EXPIRE) {
-                       spin_lock(&vfsmount_lock);
                        if (!list_empty(&old->mnt_expire))
                                list_add(&mnt->mnt_expire, &old->mnt_expire);
-                       spin_unlock(&vfsmount_lock);
                }
        }
        return mnt;
+
+ out_free:
+       free_vfsmnt(mnt);
+       return NULL;
 }
 
 static inline void __mntput(struct vfsmount *mnt)
 {
+       int cpu;
        struct super_block *sb = mnt->mnt_sb;
+       /*
+        * We don't have to hold all of the locks at the
+        * same time here because we know that we're the
+        * last reference to mnt and that no new writers
+        * can come in.
+        */
+       for_each_possible_cpu(cpu) {
+               struct mnt_writer *cpu_writer = &per_cpu(mnt_writers, cpu);
+               if (cpu_writer->mnt != mnt)
+                       continue;
+               spin_lock(&cpu_writer->lock);
+               atomic_add(cpu_writer->count, &mnt->__mnt_writers);
+               cpu_writer->count = 0;
+               /*
+                * Might as well do this so that no one
+                * ever sees the pointer and expects
+                * it to be valid.
+                */
+               cpu_writer->mnt = NULL;
+               spin_unlock(&cpu_writer->lock);
+       }
+       /*
+        * This probably indicates that somebody messed
+        * up a mnt_want/drop_write() pair.  If this
+        * happens, the filesystem was probably unable
+        * to make r/w->r/o transitions.
+        */
+       WARN_ON(atomic_read(&mnt->__mnt_writers));
        dput(mnt->mnt_root);
        free_vfsmnt(mnt);
        deactivate_super(sb);
@@ -316,26 +679,65 @@ void mnt_unpin(struct vfsmount *mnt)
 
 EXPORT_SYMBOL(mnt_unpin);
 
+static inline void mangle(struct seq_file *m, const char *s)
+{
+       seq_escape(m, s, " \t\n\\");
+}
+
+/*
+ * Simple .show_options callback for filesystems which don't want to
+ * implement more complex mount option showing.
+ *
+ * See also save_mount_options().
+ */
+int generic_show_options(struct seq_file *m, struct vfsmount *mnt)
+{
+       const char *options = mnt->mnt_sb->s_options;
+
+       if (options != NULL && options[0]) {
+               seq_putc(m, ',');
+               mangle(m, options);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(generic_show_options);
+
+/*
+ * If filesystem uses generic_show_options(), this function should be
+ * called from the fill_super() callback.
+ *
+ * The .remount_fs callback usually needs to be handled in a special
+ * way, to make sure, that previous options are not overwritten if the
+ * remount fails.
+ *
+ * Also note, that if the filesystem's .remount_fs function doesn't
+ * reset all options to their default value, but changes only newly
+ * given options, then the displayed options will not reflect reality
+ * any more.
+ */
+void save_mount_options(struct super_block *sb, char *options)
+{
+       kfree(sb->s_options);
+       sb->s_options = kstrdup(options, GFP_KERNEL);
+}
+EXPORT_SYMBOL(save_mount_options);
+
+#ifdef CONFIG_PROC_FS
 /* iterator */
 static void *m_start(struct seq_file *m, loff_t *pos)
 {
-       struct mnt_namespace *n = m->private;
-       struct list_head *p;
-       loff_t l = *pos;
+       struct proc_mounts *p = m->private;
 
        down_read(&namespace_sem);
-       list_for_each(p, &n->list)
-               if (!l--)
-                       return list_entry(p, struct vfsmount, mnt_list);
-       return NULL;
+       return seq_list_start(&p->ns->list, *pos);
 }
 
 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       struct mnt_namespace *n = m->private;
-       struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
-       (*pos)++;
-       return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
+       struct proc_mounts *p = m->private;
+
+       return seq_list_next(v, &p->ns->list, pos);
 }
 
 static void m_stop(struct seq_file *m, void *v)
@@ -343,25 +745,30 @@ static void m_stop(struct seq_file *m, void *v)
        up_read(&namespace_sem);
 }
 
-static inline void mangle(struct seq_file *m, const char *s)
-{
-       seq_escape(m, s, " \t\n\\");
-}
+struct proc_fs_info {
+       int flag;
+       const char *str;
+};
 
-static int show_vfsmnt(struct seq_file *m, void *v)
+static void show_sb_opts(struct seq_file *m, struct super_block *sb)
 {
-       struct vfsmount *mnt = v;
-       int err = 0;
-       static struct proc_fs_info {
-               int flag;
-               char *str;
-       } fs_info[] = {
+       static const struct proc_fs_info fs_info[] = {
                { MS_SYNCHRONOUS, ",sync" },
                { MS_DIRSYNC, ",dirsync" },
                { MS_MANDLOCK, ",mand" },
                { 0, NULL }
        };
-       static struct proc_fs_info mnt_info[] = {
+       const struct proc_fs_info *fs_infop;
+
+       for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+               if (sb->s_flags & fs_infop->flag)
+                       seq_puts(m, fs_infop->str);
+       }
+}
+
+static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
+{
+       static const struct proc_fs_info mnt_info[] = {
                { MNT_NOSUID, ",nosuid" },
                { MNT_NODEV, ",nodev" },
                { MNT_NOEXEC, ",noexec" },
@@ -370,42 +777,112 @@ static int show_vfsmnt(struct seq_file *m, void *v)
                { MNT_RELATIME, ",relatime" },
                { 0, NULL }
        };
-       struct proc_fs_info *fs_infop;
+       const struct proc_fs_info *fs_infop;
 
-       mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
-       seq_putc(m, ' ');
-       seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
-       seq_putc(m, ' ');
-       mangle(m, mnt->mnt_sb->s_type->name);
-       if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
-               seq_putc(m, '.');
-               mangle(m, mnt->mnt_sb->s_subtype);
-       }
-       seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
-       for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
-               if (mnt->mnt_sb->s_flags & fs_infop->flag)
-                       seq_puts(m, fs_infop->str);
-       }
        for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
                if (mnt->mnt_flags & fs_infop->flag)
                        seq_puts(m, fs_infop->str);
        }
+}
+
+static void show_type(struct seq_file *m, struct super_block *sb)
+{
+       mangle(m, sb->s_type->name);
+       if (sb->s_subtype && sb->s_subtype[0]) {
+               seq_putc(m, '.');
+               mangle(m, sb->s_subtype);
+       }
+}
+
+static int show_vfsmnt(struct seq_file *m, void *v)
+{
+       struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
+       int err = 0;
+       struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+
+       mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+       seq_putc(m, ' ');
+       seq_path(m, &mnt_path, " \t\n\\");
+       seq_putc(m, ' ');
+       show_type(m, mnt->mnt_sb);
+       seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
+       show_sb_opts(m, mnt->mnt_sb);
+       show_mnt_opts(m, mnt);
        if (mnt->mnt_sb->s_op->show_options)
                err = mnt->mnt_sb->s_op->show_options(m, mnt);
        seq_puts(m, " 0 0\n");
        return err;
 }
 
-struct seq_operations mounts_op = {
+const struct seq_operations mounts_op = {
        .start  = m_start,
        .next   = m_next,
        .stop   = m_stop,
        .show   = show_vfsmnt
 };
 
+static int show_mountinfo(struct seq_file *m, void *v)
+{
+       struct proc_mounts *p = m->private;
+       struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
+       struct super_block *sb = mnt->mnt_sb;
+       struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+       struct path root = p->root;
+       int err = 0;
+
+       seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
+                  MAJOR(sb->s_dev), MINOR(sb->s_dev));
+       seq_dentry(m, mnt->mnt_root, " \t\n\\");
+       seq_putc(m, ' ');
+       seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
+               /*
+                * Mountpoint is outside root, discard that one.  Ugly,
+                * but less so than trying to do that in iterator in a
+                * race-free way (due to renames).
+                */
+               return SEQ_SKIP;
+       }
+       seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
+       show_mnt_opts(m, mnt);
+
+       /* Tagged fields ("foo:X" or "bar") */
+       if (IS_MNT_SHARED(mnt))
+               seq_printf(m, " shared:%i", mnt->mnt_group_id);
+       if (IS_MNT_SLAVE(mnt)) {
+               int master = mnt->mnt_master->mnt_group_id;
+               int dom = get_dominating_id(mnt, &p->root);
+               seq_printf(m, " master:%i", master);
+               if (dom && dom != master)
+                       seq_printf(m, " propagate_from:%i", dom);
+       }
+       if (IS_MNT_UNBINDABLE(mnt))
+               seq_puts(m, " unbindable");
+
+       /* Filesystem specific data */
+       seq_puts(m, " - ");
+       show_type(m, sb);
+       seq_putc(m, ' ');
+       mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+       seq_puts(m, sb->s_flags & MS_RDONLY ? " ro" : " rw");
+       show_sb_opts(m, sb);
+       if (sb->s_op->show_options)
+               err = sb->s_op->show_options(m, mnt);
+       seq_putc(m, '\n');
+       return err;
+}
+
+const struct seq_operations mountinfo_op = {
+       .start  = m_start,
+       .next   = m_next,
+       .stop   = m_stop,
+       .show   = show_mountinfo,
+};
+
 static int show_vfsstat(struct seq_file *m, void *v)
 {
-       struct vfsmount *mnt = v;
+       struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
+       struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
        int err = 0;
 
        /* device */
@@ -417,12 +894,12 @@ static int show_vfsstat(struct seq_file *m, void *v)
 
        /* mount point */
        seq_puts(m, " mounted on ");
-       seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
+       seq_path(m, &mnt_path, " \t\n\\");
        seq_putc(m, ' ');
 
        /* file system type */
        seq_puts(m, "with fstype ");
-       mangle(m, mnt->mnt_sb->s_type->name);
+       show_type(m, mnt->mnt_sb);
 
        /* optional statistics */
        if (mnt->mnt_sb->s_op->show_stats) {
@@ -434,12 +911,13 @@ static int show_vfsstat(struct seq_file *m, void *v)
        return err;
 }
 
-struct seq_operations mountstats_op = {
+const struct seq_operations mountstats_op = {
        .start  = m_start,
        .next   = m_next,
        .stop   = m_stop,
        .show   = show_vfsstat,
 };
+#endif  /* CONFIG_PROC_FS */
 
 /**
  * may_umount_tree - check if a mount tree is busy
@@ -509,6 +987,7 @@ void release_mounts(struct list_head *head)
                        m = mnt->mnt_parent;
                        mnt->mnt_mountpoint = mnt->mnt_root;
                        mnt->mnt_parent = mnt;
+                       m->mnt_ghosts--;
                        spin_unlock(&vfsmount_lock);
                        dput(dentry);
                        mntput(m);
@@ -533,12 +1012,16 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
                __touch_mnt_namespace(p->mnt_ns);
                p->mnt_ns = NULL;
                list_del_init(&p->mnt_child);
-               if (p->mnt_parent != p)
+               if (p->mnt_parent != p) {
+                       p->mnt_parent->mnt_ghosts++;
                        p->mnt_mountpoint->d_mounted--;
+               }
                change_mnt_propagation(p, MS_PRIVATE);
        }
 }
 
+static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts);
+
 static int do_umount(struct vfsmount *mnt, int flags)
 {
        struct super_block *sb = mnt->mnt_sb;
@@ -556,7 +1039,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
         *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
         */
        if (flags & MNT_EXPIRE) {
-               if (mnt == current->fs->rootmnt ||
+               if (mnt == current->fs->root.mnt ||
                    flags & (MNT_FORCE | MNT_DETACH))
                        return -EINVAL;
 
@@ -577,10 +1060,11 @@ static int do_umount(struct vfsmount *mnt, int flags)
         * about for the moment.
         */
 
-       lock_kernel();
-       if (sb->s_op->umount_begin)
-               sb->s_op->umount_begin(mnt, flags);
-       unlock_kernel();
+       if (flags & MNT_FORCE && sb->s_op->umount_begin) {
+               lock_kernel();
+               sb->s_op->umount_begin(sb);
+               unlock_kernel();
+       }
 
        /*
         * No sense to grab the lock for this test, but test itself looks
@@ -591,7 +1075,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
         * /reboot - static binary that would close all descriptors and
         * call reboot(9). Then init(8) could umount root and exec /reboot.
         */
-       if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
+       if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
                /*
                 * Special case for "unmounting" root ...
                 * we just try to remount it readonly.
@@ -599,7 +1083,6 @@ static int do_umount(struct vfsmount *mnt, int flags)
                down_write(&sb->s_umount);
                if (!(sb->s_flags & MS_RDONLY)) {
                        lock_kernel();
-                       DQUOT_OFF(sb);
                        retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
                        unlock_kernel();
                }
@@ -611,6 +1094,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
        spin_lock(&vfsmount_lock);
        event++;
 
+       if (!(flags & MNT_DETACH))
+               shrink_submounts(mnt, &umount_list);
+
        retval = -EBUSY;
        if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
                if (!list_empty(&mnt->mnt_list))
@@ -642,18 +1128,20 @@ asmlinkage long sys_umount(char __user * name, int flags)
        if (retval)
                goto out;
        retval = -EINVAL;
-       if (nd.dentry != nd.mnt->mnt_root)
+       if (nd.path.dentry != nd.path.mnt->mnt_root)
                goto dput_and_out;
-       if (!check_mnt(nd.mnt))
+       if (!check_mnt(nd.path.mnt))
                goto dput_and_out;
 
        retval = -EPERM;
        if (!capable(CAP_SYS_ADMIN))
                goto dput_and_out;
 
-       retval = do_umount(nd.mnt, flags);
+       retval = do_umount(nd.path.mnt, flags);
 dput_and_out:
-       path_release_on_umount(&nd);
+       /* we mustn't call path_put() as that would clear mnt_expiry_mark */
+       dput(nd.path.dentry);
+       mntput_no_expire(nd.path.mnt);
 out:
        return retval;
 }
@@ -676,10 +1164,10 @@ static int mount_is_safe(struct nameidata *nd)
                return 0;
        return -EPERM;
 #ifdef notyet
-       if (S_ISLNK(nd->dentry->d_inode->i_mode))
+       if (S_ISLNK(nd->path.dentry->d_inode->i_mode))
                return -EPERM;
-       if (nd->dentry->d_inode->i_mode & S_ISVTX) {
-               if (current->uid != nd->dentry->d_inode->i_uid)
+       if (nd->path.dentry->d_inode->i_mode & S_ISVTX) {
+               if (current->uid != nd->path.dentry->d_inode->i_uid)
                        return -EPERM;
        }
        if (vfs_permission(nd, MAY_WRITE))
@@ -688,22 +1176,11 @@ static int mount_is_safe(struct nameidata *nd)
 #endif
 }
 
-static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
-{
-       while (1) {
-               if (d == dentry)
-                       return 1;
-               if (d == NULL || d == d->d_parent)
-                       return 0;
-               d = d->d_parent;
-       }
-}
-
 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
                                        int flag)
 {
        struct vfsmount *res, *p, *q, *r, *s;
-       struct nameidata nd;
+       struct path path;
 
        if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
                return NULL;
@@ -715,7 +1192,7 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
 
        p = mnt;
        list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
-               if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
+               if (!is_subdir(r->mnt_mountpoint, dentry))
                        continue;
 
                for (s = r; s; s = next_mnt(s, r)) {
@@ -728,14 +1205,14 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
                                q = q->mnt_parent;
                        }
                        p = s;
-                       nd.mnt = q;
-                       nd.dentry = p->mnt_mountpoint;
+                       path.mnt = q;
+                       path.dentry = p->mnt_mountpoint;
                        q = clone_mnt(p, p->mnt_root, flag);
                        if (!q)
                                goto Enomem;
                        spin_lock(&vfsmount_lock);
                        list_add_tail(&q->mnt_list, &res->mnt_list);
-                       attach_mnt(q, &nd);
+                       attach_mnt(q, &path);
                        spin_unlock(&vfsmount_lock);
                }
        }
@@ -751,6 +1228,53 @@ Enomem:
        return NULL;
 }
 
+struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry)
+{
+       struct vfsmount *tree;
+       down_write(&namespace_sem);
+       tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE);
+       up_write(&namespace_sem);
+       return tree;
+}
+
+void drop_collected_mounts(struct vfsmount *mnt)
+{
+       LIST_HEAD(umount_list);
+       down_write(&namespace_sem);
+       spin_lock(&vfsmount_lock);
+       umount_tree(mnt, 0, &umount_list);
+       spin_unlock(&vfsmount_lock);
+       up_write(&namespace_sem);
+       release_mounts(&umount_list);
+}
+
+static void cleanup_group_ids(struct vfsmount *mnt, struct vfsmount *end)
+{
+       struct vfsmount *p;
+
+       for (p = mnt; p != end; p = next_mnt(p, mnt)) {
+               if (p->mnt_group_id && !IS_MNT_SHARED(p))
+                       mnt_release_group_id(p);
+       }
+}
+
+static int invent_group_ids(struct vfsmount *mnt, bool recurse)
+{
+       struct vfsmount *p;
+
+       for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
+               if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
+                       int err = mnt_alloc_group_id(p);
+                       if (err) {
+                               cleanup_group_ids(mnt, p);
+                               return err;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 /*
  *  @source_mnt : mount tree to be attached
  *  @nd         : place the mount tree @source_mnt is attached
@@ -815,15 +1339,22 @@ Enomem:
  * in allocations.
  */
 static int attach_recursive_mnt(struct vfsmount *source_mnt,
-                       struct nameidata *nd, struct nameidata *parent_nd)
+                       struct path *path, struct path *parent_path)
 {
        LIST_HEAD(tree_list);
-       struct vfsmount *dest_mnt = nd->mnt;
-       struct dentry *dest_dentry = nd->dentry;
+       struct vfsmount *dest_mnt = path->mnt;
+       struct dentry *dest_dentry = path->dentry;
        struct vfsmount *child, *p;
+       int err;
 
-       if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
-               return -EINVAL;
+       if (IS_MNT_SHARED(dest_mnt)) {
+               err = invent_group_ids(source_mnt, true);
+               if (err)
+                       goto out;
+       }
+       err = propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list);
+       if (err)
+               goto out_cleanup_ids;
 
        if (IS_MNT_SHARED(dest_mnt)) {
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -831,9 +1362,9 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
        }
 
        spin_lock(&vfsmount_lock);
-       if (parent_nd) {
-               detach_mnt(source_mnt, parent_nd);
-               attach_mnt(source_mnt, nd);
+       if (parent_path) {
+               detach_mnt(source_mnt, parent_path);
+               attach_mnt(source_mnt, path);
                touch_mnt_namespace(current->nsproxy->mnt_ns);
        } else {
                mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
@@ -846,62 +1377,83 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
        }
        spin_unlock(&vfsmount_lock);
        return 0;
+
+ out_cleanup_ids:
+       if (IS_MNT_SHARED(dest_mnt))
+               cleanup_group_ids(source_mnt, NULL);
+ out:
+       return err;
 }
 
-static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
+static int graft_tree(struct vfsmount *mnt, struct path *path)
 {
        int err;
        if (mnt->mnt_sb->s_flags & MS_NOUSER)
                return -EINVAL;
 
-       if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
+       if (S_ISDIR(path->dentry->d_inode->i_mode) !=
              S_ISDIR(mnt->mnt_root->d_inode->i_mode))
                return -ENOTDIR;
 
        err = -ENOENT;
-       mutex_lock(&nd->dentry->d_inode->i_mutex);
-       if (IS_DEADDIR(nd->dentry->d_inode))
+       mutex_lock(&path->dentry->d_inode->i_mutex);
+       if (IS_DEADDIR(path->dentry->d_inode))
                goto out_unlock;
 
-       err = security_sb_check_sb(mnt, nd);
+       err = security_sb_check_sb(mnt, path);
        if (err)
                goto out_unlock;
 
        err = -ENOENT;
-       if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
-               err = attach_recursive_mnt(mnt, nd, NULL);
+       if (IS_ROOT(path->dentry) || !d_unhashed(path->dentry))
+               err = attach_recursive_mnt(mnt, path, NULL);
 out_unlock:
-       mutex_unlock(&nd->dentry->d_inode->i_mutex);
+       mutex_unlock(&path->dentry->d_inode->i_mutex);
        if (!err)
-               security_sb_post_addmount(mnt, nd);
+               security_sb_post_addmount(mnt, path);
        return err;
 }
 
 /*
  * recursively change the type of the mountpoint.
+ * noinline this do_mount helper to save do_mount stack space.
  */
-static int do_change_type(struct nameidata *nd, int flag)
+static noinline int do_change_type(struct nameidata *nd, int flag)
 {
-       struct vfsmount *m, *mnt = nd->mnt;
+       struct vfsmount *m, *mnt = nd->path.mnt;
        int recurse = flag & MS_REC;
        int type = flag & ~MS_REC;
+       int err = 0;
 
-       if (nd->dentry != nd->mnt->mnt_root)
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (nd->path.dentry != nd->path.mnt->mnt_root)
                return -EINVAL;
 
        down_write(&namespace_sem);
+       if (type == MS_SHARED) {
+               err = invent_group_ids(mnt, recurse);
+               if (err)
+                       goto out_unlock;
+       }
+
        spin_lock(&vfsmount_lock);
        for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
                change_mnt_propagation(m, type);
        spin_unlock(&vfsmount_lock);
+
+ out_unlock:
        up_write(&namespace_sem);
-       return 0;
+       return err;
 }
 
 /*
  * do loopback mount.
+ * noinline this do_mount helper to save do_mount stack space.
  */
-static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
+static noinline int do_loopback(struct nameidata *nd, char *old_name,
+                               int recurse)
 {
        struct nameidata old_nd;
        struct vfsmount *mnt = NULL;
@@ -916,22 +1468,22 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
 
        down_write(&namespace_sem);
        err = -EINVAL;
-       if (IS_MNT_UNBINDABLE(old_nd.mnt))
-               goto out;
+       if (IS_MNT_UNBINDABLE(old_nd.path.mnt))
+               goto out;
 
-       if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+       if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
                goto out;
 
        err = -ENOMEM;
        if (recurse)
-               mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0);
+               mnt = copy_tree(old_nd.path.mnt, old_nd.path.dentry, 0);
        else
-               mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0);
+               mnt = clone_mnt(old_nd.path.mnt, old_nd.path.dentry, 0);
 
        if (!mnt)
                goto out;
 
-       err = graft_tree(mnt, nd);
+       err = graft_tree(mnt, &nd->path);
        if (err) {
                LIST_HEAD(umount_list);
                spin_lock(&vfsmount_lock);
@@ -942,37 +1494,58 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
 
 out:
        up_write(&namespace_sem);
-       path_release(&old_nd);
+       path_put(&old_nd.path);
        return err;
 }
 
+static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
+{
+       int error = 0;
+       int readonly_request = 0;
+
+       if (ms_flags & MS_RDONLY)
+               readonly_request = 1;
+       if (readonly_request == __mnt_is_readonly(mnt))
+               return 0;
+
+       if (readonly_request)
+               error = mnt_make_readonly(mnt);
+       else
+               __mnt_unmake_readonly(mnt);
+       return error;
+}
+
 /*
  * change filesystem flags. dir should be a physical root of filesystem.
  * If you've mounted a non-root directory somewhere and want to do remount
  * on it - tough luck.
+ * noinline this do_mount helper to save do_mount stack space.
  */
-static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
+static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
                      void *data)
 {
        int err;
-       struct super_block *sb = nd->mnt->mnt_sb;
+       struct super_block *sb = nd->path.mnt->mnt_sb;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       if (!check_mnt(nd->mnt))
+       if (!check_mnt(nd->path.mnt))
                return -EINVAL;
 
-       if (nd->dentry != nd->mnt->mnt_root)
+       if (nd->path.dentry != nd->path.mnt->mnt_root)
                return -EINVAL;
 
        down_write(&sb->s_umount);
-       err = do_remount_sb(sb, flags, data, 0);
+       if (flags & MS_BIND)
+               err = change_mount_flags(nd->path.mnt, flags);
+       else
+               err = do_remount_sb(sb, flags, data, 0);
        if (!err)
-               nd->mnt->mnt_flags = mnt_flags;
+               nd->path.mnt->mnt_flags = mnt_flags;
        up_write(&sb->s_umount);
        if (!err)
-               security_sb_post_remount(nd->mnt, flags, data);
+               security_sb_post_remount(nd->path.mnt, flags, data);
        return err;
 }
 
@@ -986,9 +1559,13 @@ static inline int tree_contains_unbindable(struct vfsmount *mnt)
        return 0;
 }
 
-static int do_move_mount(struct nameidata *nd, char *old_name)
+/*
+ * noinline this do_mount helper to save do_mount stack space.
+ */
+static noinline int do_move_mount(struct nameidata *nd, char *old_name)
 {
-       struct nameidata old_nd, parent_nd;
+       struct nameidata old_nd;
+       struct path parent_path;
        struct vfsmount *p;
        int err = 0;
        if (!capable(CAP_SYS_ADMIN))
@@ -1000,69 +1577,72 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
                return err;
 
        down_write(&namespace_sem);
-       while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+       while (d_mountpoint(nd->path.dentry) &&
+              follow_down(&nd->path.mnt, &nd->path.dentry))
                ;
        err = -EINVAL;
-       if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+       if (!check_mnt(nd->path.mnt) || !check_mnt(old_nd.path.mnt))
                goto out;
 
        err = -ENOENT;
-       mutex_lock(&nd->dentry->d_inode->i_mutex);
-       if (IS_DEADDIR(nd->dentry->d_inode))
+       mutex_lock(&nd->path.dentry->d_inode->i_mutex);
+       if (IS_DEADDIR(nd->path.dentry->d_inode))
                goto out1;
 
-       if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
+       if (!IS_ROOT(nd->path.dentry) && d_unhashed(nd->path.dentry))
                goto out1;
 
        err = -EINVAL;
-       if (old_nd.dentry != old_nd.mnt->mnt_root)
+       if (old_nd.path.dentry != old_nd.path.mnt->mnt_root)
                goto out1;
 
-       if (old_nd.mnt == old_nd.mnt->mnt_parent)
+       if (old_nd.path.mnt == old_nd.path.mnt->mnt_parent)
                goto out1;
 
-       if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
-             S_ISDIR(old_nd.dentry->d_inode->i_mode))
+       if (S_ISDIR(nd->path.dentry->d_inode->i_mode) !=
+             S_ISDIR(old_nd.path.dentry->d_inode->i_mode))
                goto out1;
        /*
         * Don't move a mount residing in a shared parent.
         */
-       if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent))
+       if (old_nd.path.mnt->mnt_parent &&
+           IS_MNT_SHARED(old_nd.path.mnt->mnt_parent))
                goto out1;
        /*
         * Don't move a mount tree containing unbindable mounts to a destination
         * mount which is shared.
         */
-       if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt))
+       if (IS_MNT_SHARED(nd->path.mnt) &&
+           tree_contains_unbindable(old_nd.path.mnt))
                goto out1;
        err = -ELOOP;
-       for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent)
-               if (p == old_nd.mnt)
+       for (p = nd->path.mnt; p->mnt_parent != p; p = p->mnt_parent)
+               if (p == old_nd.path.mnt)
                        goto out1;
 
-       if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd)))
+       err = attach_recursive_mnt(old_nd.path.mnt, &nd->path, &parent_path);
+       if (err)
                goto out1;
 
-       spin_lock(&vfsmount_lock);
        /* if the mount is moved, it should no longer be expire
         * automatically */
-       list_del_init(&old_nd.mnt->mnt_expire);
-       spin_unlock(&vfsmount_lock);
+       list_del_init(&old_nd.path.mnt->mnt_expire);
 out1:
-       mutex_unlock(&nd->dentry->d_inode->i_mutex);
+       mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
 out:
        up_write(&namespace_sem);
        if (!err)
-               path_release(&parent_nd);
-       path_release(&old_nd);
+               path_put(&parent_path);
+       path_put(&old_nd.path);
        return err;
 }
 
 /*
  * create a new mount for userspace and request it to be added into the
  * namespace's tree
+ * noinline this do_mount helper to save do_mount stack space.
  */
-static int do_new_mount(struct nameidata *nd, char *type, int flags,
+static noinline int do_new_mount(struct nameidata *nd, char *type, int flags,
                        int mnt_flags, char *name, void *data)
 {
        struct vfsmount *mnt;
@@ -1092,16 +1672,17 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
 
        down_write(&namespace_sem);
        /* Something was mounted here while we slept */
-       while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+       while (d_mountpoint(nd->path.dentry) &&
+              follow_down(&nd->path.mnt, &nd->path.dentry))
                ;
        err = -EINVAL;
-       if (!check_mnt(nd->mnt))
+       if (!check_mnt(nd->path.mnt))
                goto unlock;
 
        /* Refuse the same filesystem on the same mount point */
        err = -EBUSY;
-       if (nd->mnt->mnt_sb == newmnt->mnt_sb &&
-           nd->mnt->mnt_root == nd->dentry)
+       if (nd->path.mnt->mnt_sb == newmnt->mnt_sb &&
+           nd->path.mnt->mnt_root == nd->path.dentry)
                goto unlock;
 
        err = -EINVAL;
@@ -1109,15 +1690,12 @@ int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
                goto unlock;
 
        newmnt->mnt_flags = mnt_flags;
-       if ((err = graft_tree(newmnt, nd)))
+       if ((err = graft_tree(newmnt, &nd->path)))
                goto unlock;
 
-       if (fslist) {
-               /* add to the specified expiration list */
-               spin_lock(&vfsmount_lock);
+       if (fslist) /* add to the specified expiration list */
                list_add_tail(&newmnt->mnt_expire, fslist);
-               spin_unlock(&vfsmount_lock);
-       }
+
        up_write(&namespace_sem);
        return 0;
 
@@ -1129,75 +1707,6 @@ unlock:
 
 EXPORT_SYMBOL_GPL(do_add_mount);
 
-static void expire_mount(struct vfsmount *mnt, struct list_head *mounts,
-                               struct list_head *umounts)
-{
-       spin_lock(&vfsmount_lock);
-
-       /*
-        * Check if mount is still attached, if not, let whoever holds it deal
-        * with the sucker
-        */
-       if (mnt->mnt_parent == mnt) {
-               spin_unlock(&vfsmount_lock);
-               return;
-       }
-
-       /*
-        * Check that it is still dead: the count should now be 2 - as
-        * contributed by the vfsmount parent and the mntget above
-        */
-       if (!propagate_mount_busy(mnt, 2)) {
-               /* delete from the namespace */
-               touch_mnt_namespace(mnt->mnt_ns);
-               list_del_init(&mnt->mnt_list);
-               mnt->mnt_ns = NULL;
-               umount_tree(mnt, 1, umounts);
-               spin_unlock(&vfsmount_lock);
-       } else {
-               /*
-                * Someone brought it back to life whilst we didn't have any
-                * locks held so return it to the expiration list
-                */
-               list_add_tail(&mnt->mnt_expire, mounts);
-               spin_unlock(&vfsmount_lock);
-       }
-}
-
-/*
- * go through the vfsmounts we've just consigned to the graveyard to
- * - check that they're still dead
- * - delete the vfsmount from the appropriate namespace under lock
- * - dispose of the corpse
- */
-static void expire_mount_list(struct list_head *graveyard, struct list_head *mounts)
-{
-       struct mnt_namespace *ns;
-       struct vfsmount *mnt;
-
-       while (!list_empty(graveyard)) {
-               LIST_HEAD(umounts);
-               mnt = list_first_entry(graveyard, struct vfsmount, mnt_expire);
-               list_del_init(&mnt->mnt_expire);
-
-               /* don't do anything if the namespace is dead - all the
-                * vfsmounts from it are going away anyway */
-               ns = mnt->mnt_ns;
-               if (!ns || !ns->root)
-                       continue;
-               get_mnt_ns(ns);
-
-               spin_unlock(&vfsmount_lock);
-               down_write(&namespace_sem);
-               expire_mount(mnt, mounts, &umounts);
-               up_write(&namespace_sem);
-               release_mounts(&umounts);
-               mntput(mnt);
-               put_mnt_ns(ns);
-               spin_lock(&vfsmount_lock);
-       }
-}
-
 /*
  * process a list of expirable mountpoints with the intent of discarding any
  * mountpoints that aren't in use and haven't been touched since last we came
@@ -1207,10 +1716,12 @@ void mark_mounts_for_expiry(struct list_head *mounts)
 {
        struct vfsmount *mnt, *next;
        LIST_HEAD(graveyard);
+       LIST_HEAD(umounts);
 
        if (list_empty(mounts))
                return;
 
+       down_write(&namespace_sem);
        spin_lock(&vfsmount_lock);
 
        /* extract from the expiration list every vfsmount that matches the
@@ -1221,16 +1732,19 @@ void mark_mounts_for_expiry(struct list_head *mounts)
         */
        list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
                if (!xchg(&mnt->mnt_expiry_mark, 1) ||
-                   atomic_read(&mnt->mnt_count) != 1)
+                       propagate_mount_busy(mnt, 1))
                        continue;
-
-               mntget(mnt);
                list_move(&mnt->mnt_expire, &graveyard);
        }
-
-       expire_mount_list(&graveyard, mounts);
-
+       while (!list_empty(&graveyard)) {
+               mnt = list_first_entry(&graveyard, struct vfsmount, mnt_expire);
+               touch_mnt_namespace(mnt->mnt_ns);
+               umount_tree(mnt, 1, &umounts);
+       }
        spin_unlock(&vfsmount_lock);
+       up_write(&namespace_sem);
+
+       release_mounts(&umounts);
 }
 
 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
@@ -1266,7 +1780,6 @@ resume:
                }
 
                if (!propagate_mount_busy(mnt, 1)) {
-                       mntget(mnt);
                        list_move_tail(&mnt->mnt_expire, graveyard);
                        found++;
                }
@@ -1286,22 +1799,22 @@ resume:
  * process a list of expirable mountpoints with the intent of discarding any
  * submounts of a specific parent mountpoint
  */
-void shrink_submounts(struct vfsmount *mountpoint, struct list_head *mounts)
+static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts)
 {
        LIST_HEAD(graveyard);
-       int found;
-
-       spin_lock(&vfsmount_lock);
+       struct vfsmount *m;
 
        /* extract submounts of 'mountpoint' from the expiration list */
-       while ((found = select_submounts(mountpoint, &graveyard)) != 0)
-               expire_mount_list(&graveyard, mounts);
-
-       spin_unlock(&vfsmount_lock);
+       while (select_submounts(mnt, &graveyard)) {
+               while (!list_empty(&graveyard)) {
+                       m = list_first_entry(&graveyard, struct vfsmount,
+                                               mnt_expire);
+                       touch_mnt_namespace(mnt->mnt_ns);
+                       umount_tree(mnt, 1, umounts);
+               }
+       }
 }
 
-EXPORT_SYMBOL_GPL(shrink_submounts);
-
 /*
  * Some copy_from_user() implementations do not return the exact number of
  * bytes remaining to copy on a fault.  But copy_mount_options() requires that.
@@ -1411,16 +1924,19 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
                mnt_flags |= MNT_NODIRATIME;
        if (flags & MS_RELATIME)
                mnt_flags |= MNT_RELATIME;
+       if (flags & MS_RDONLY)
+               mnt_flags |= MNT_READONLY;
 
        flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
-                  MS_NOATIME | MS_NODIRATIME | MS_RELATIME);
+                  MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT);
 
        /* ... and get the mountpoint */
        retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
        if (retval)
                return retval;
 
-       retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
+       retval = security_sb_mount(dev_name, &nd.path,
+                                  type_page, flags, data_page);
        if (retval)
                goto dput_out;
 
@@ -1437,7 +1953,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
                retval = do_new_mount(&nd, type_page, flags, mnt_flags,
                                      dev_name, data_page);
 dput_out:
-       path_release(&nd);
+       path_put(&nd.path);
        return retval;
 }
 
@@ -1454,7 +1970,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
 
        new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
        if (!new_ns)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        atomic_set(&new_ns->count, 1);
        INIT_LIST_HEAD(&new_ns->list);
@@ -1468,7 +1984,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
        if (!new_ns->root) {
                up_write(&namespace_sem);
                kfree(new_ns);
-               return NULL;
+               return ERR_PTR(-ENOMEM);;
        }
        spin_lock(&vfsmount_lock);
        list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
@@ -1484,17 +2000,17 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
        while (p) {
                q->mnt_ns = new_ns;
                if (fs) {
-                       if (p == fs->rootmnt) {
+                       if (p == fs->root.mnt) {
                                rootmnt = p;
-                               fs->rootmnt = mntget(q);
+                               fs->root.mnt = mntget(q);
                        }
-                       if (p == fs->pwdmnt) {
+                       if (p == fs->pwd.mnt) {
                                pwdmnt = p;
-                               fs->pwdmnt = mntget(q);
+                               fs->pwd.mnt = mntget(q);
                        }
-                       if (p == fs->altrootmnt) {
+                       if (p == fs->altroot.mnt) {
                                altrootmnt = p;
-                               fs->altrootmnt = mntget(q);
+                               fs->altroot.mnt = mntget(q);
                        }
                }
                p = next_mnt(p, mnt_ns->root);
@@ -1512,7 +2028,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
        return new_ns;
 }
 
-struct mnt_namespace *copy_mnt_ns(int flags, struct mnt_namespace *ns,
+struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
                struct fs_struct *new_fs)
 {
        struct mnt_namespace *new_ns;
@@ -1575,47 +2091,38 @@ out1:
  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
  * It can block. Requires the big lock held.
  */
-void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
-                struct dentry *dentry)
+void set_fs_root(struct fs_struct *fs, struct path *path)
 {
-       struct dentry *old_root;
-       struct vfsmount *old_rootmnt;
+       struct path old_root;
+
        write_lock(&fs->lock);
        old_root = fs->root;
-       old_rootmnt = fs->rootmnt;
-       fs->rootmnt = mntget(mnt);
-       fs->root = dget(dentry);
+       fs->root = *path;
+       path_get(path);
        write_unlock(&fs->lock);
-       if (old_root) {
-               dput(old_root);
-               mntput(old_rootmnt);
-       }
+       if (old_root.dentry)
+               path_put(&old_root);
 }
 
 /*
  * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
  * It can block. Requires the big lock held.
  */
-void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
-               struct dentry *dentry)
+void set_fs_pwd(struct fs_struct *fs, struct path *path)
 {
-       struct dentry *old_pwd;
-       struct vfsmount *old_pwdmnt;
+       struct path old_pwd;
 
        write_lock(&fs->lock);
        old_pwd = fs->pwd;
-       old_pwdmnt = fs->pwdmnt;
-       fs->pwdmnt = mntget(mnt);
-       fs->pwd = dget(dentry);
+       fs->pwd = *path;
+       path_get(path);
        write_unlock(&fs->lock);
 
-       if (old_pwd) {
-               dput(old_pwd);
-               mntput(old_pwdmnt);
-       }
+       if (old_pwd.dentry)
+               path_put(&old_pwd);
 }
 
-static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
+static void chroot_fs_refs(struct path *old_root, struct path *new_root)
 {
        struct task_struct *g, *p;
        struct fs_struct *fs;
@@ -1627,12 +2134,12 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
                if (fs) {
                        atomic_inc(&fs->count);
                        task_unlock(p);
-                       if (fs->root == old_nd->dentry
-                           && fs->rootmnt == old_nd->mnt)
-                               set_fs_root(fs, new_nd->mnt, new_nd->dentry);
-                       if (fs->pwd == old_nd->dentry
-                           && fs->pwdmnt == old_nd->mnt)
-                               set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
+                       if (fs->root.dentry == old_root->dentry
+                           && fs->root.mnt == old_root->mnt)
+                               set_fs_root(fs, new_root);
+                       if (fs->pwd.dentry == old_root->dentry
+                           && fs->pwd.mnt == old_root->mnt)
+                               set_fs_pwd(fs, new_root);
                        put_fs_struct(fs);
                } else
                        task_unlock(p);
@@ -1669,98 +2176,100 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
                               const char __user * put_old)
 {
        struct vfsmount *tmp;
-       struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
+       struct nameidata new_nd, old_nd;
+       struct path parent_path, root_parent, root;
        int error;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       lock_kernel();
-
        error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
                            &new_nd);
        if (error)
                goto out0;
        error = -EINVAL;
-       if (!check_mnt(new_nd.mnt))
+       if (!check_mnt(new_nd.path.mnt))
                goto out1;
 
        error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
        if (error)
                goto out1;
 
-       error = security_sb_pivotroot(&old_nd, &new_nd);
+       error = security_sb_pivotroot(&old_nd.path, &new_nd.path);
        if (error) {
-               path_release(&old_nd);
+               path_put(&old_nd.path);
                goto out1;
        }
 
        read_lock(&current->fs->lock);
-       user_nd.mnt = mntget(current->fs->rootmnt);
-       user_nd.dentry = dget(current->fs->root);
+       root = current->fs->root;
+       path_get(&current->fs->root);
        read_unlock(&current->fs->lock);
        down_write(&namespace_sem);
-       mutex_lock(&old_nd.dentry->d_inode->i_mutex);
+       mutex_lock(&old_nd.path.dentry->d_inode->i_mutex);
        error = -EINVAL;
-       if (IS_MNT_SHARED(old_nd.mnt) ||
-               IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
-               IS_MNT_SHARED(user_nd.mnt->mnt_parent))
+       if (IS_MNT_SHARED(old_nd.path.mnt) ||
+               IS_MNT_SHARED(new_nd.path.mnt->mnt_parent) ||
+               IS_MNT_SHARED(root.mnt->mnt_parent))
                goto out2;
-       if (!check_mnt(user_nd.mnt))
+       if (!check_mnt(root.mnt))
                goto out2;
        error = -ENOENT;
-       if (IS_DEADDIR(new_nd.dentry->d_inode))
+       if (IS_DEADDIR(new_nd.path.dentry->d_inode))
                goto out2;
-       if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
+       if (d_unhashed(new_nd.path.dentry) && !IS_ROOT(new_nd.path.dentry))
                goto out2;
-       if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
+       if (d_unhashed(old_nd.path.dentry) && !IS_ROOT(old_nd.path.dentry))
                goto out2;
        error = -EBUSY;
-       if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
+       if (new_nd.path.mnt == root.mnt ||
+           old_nd.path.mnt == root.mnt)
                goto out2; /* loop, on the same file system  */
        error = -EINVAL;
-       if (user_nd.mnt->mnt_root != user_nd.dentry)
+       if (root.mnt->mnt_root != root.dentry)
                goto out2; /* not a mountpoint */
-       if (user_nd.mnt->mnt_parent == user_nd.mnt)
+       if (root.mnt->mnt_parent == root.mnt)
                goto out2; /* not attached */
-       if (new_nd.mnt->mnt_root != new_nd.dentry)
+       if (new_nd.path.mnt->mnt_root != new_nd.path.dentry)
                goto out2; /* not a mountpoint */
-       if (new_nd.mnt->mnt_parent == new_nd.mnt)
+       if (new_nd.path.mnt->mnt_parent == new_nd.path.mnt)
                goto out2; /* not attached */
-       tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
+       /* make sure we can reach put_old from new_root */
+       tmp = old_nd.path.mnt;
        spin_lock(&vfsmount_lock);
-       if (tmp != new_nd.mnt) {
+       if (tmp != new_nd.path.mnt) {
                for (;;) {
                        if (tmp->mnt_parent == tmp)
                                goto out3; /* already mounted on put_old */
-                       if (tmp->mnt_parent == new_nd.mnt)
+                       if (tmp->mnt_parent == new_nd.path.mnt)
                                break;
                        tmp = tmp->mnt_parent;
                }
-               if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
+               if (!is_subdir(tmp->mnt_mountpoint, new_nd.path.dentry))
                        goto out3;
-       } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
+       } else if (!is_subdir(old_nd.path.dentry, new_nd.path.dentry))
                goto out3;
-       detach_mnt(new_nd.mnt, &parent_nd);
-       detach_mnt(user_nd.mnt, &root_parent);
-       attach_mnt(user_nd.mnt, &old_nd);     /* mount old root on put_old */
-       attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
+       detach_mnt(new_nd.path.mnt, &parent_path);
+       detach_mnt(root.mnt, &root_parent);
+       /* mount old root on put_old */
+       attach_mnt(root.mnt, &old_nd.path);
+       /* mount new_root on / */
+       attach_mnt(new_nd.path.mnt, &root_parent);
        touch_mnt_namespace(current->nsproxy->mnt_ns);
        spin_unlock(&vfsmount_lock);
-       chroot_fs_refs(&user_nd, &new_nd);
-       security_sb_post_pivotroot(&user_nd, &new_nd);
+       chroot_fs_refs(&root, &new_nd.path);
+       security_sb_post_pivotroot(&root, &new_nd.path);
        error = 0;
-       path_release(&root_parent);
-       path_release(&parent_nd);
+       path_put(&root_parent);
+       path_put(&parent_path);
 out2:
-       mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
+       mutex_unlock(&old_nd.path.dentry->d_inode->i_mutex);
        up_write(&namespace_sem);
-       path_release(&user_nd);
-       path_release(&old_nd);
+       path_put(&root);
+       path_put(&old_nd.path);
 out1:
-       path_release(&new_nd);
+       path_put(&new_nd.path);
 out0:
-       unlock_kernel();
        return error;
 out3:
        spin_unlock(&vfsmount_lock);
@@ -1771,6 +2280,7 @@ static void __init init_mount_tree(void)
 {
        struct vfsmount *mnt;
        struct mnt_namespace *ns;
+       struct path root;
 
        mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
        if (IS_ERR(mnt))
@@ -1789,64 +2299,40 @@ static void __init init_mount_tree(void)
        init_task.nsproxy->mnt_ns = ns;
        get_mnt_ns(ns);
 
-       set_fs_pwd(current->fs, ns->root, ns->root->mnt_root);
-       set_fs_root(current->fs, ns->root, ns->root->mnt_root);
+       root.mnt = ns->root;
+       root.dentry = ns->root->mnt_root;
+
+       set_fs_pwd(current->fs, &root);
+       set_fs_root(current->fs, &root);
 }
 
-void __init mnt_init(unsigned long mempages)
+void __init mnt_init(void)
 {
-       struct list_head *d;
-       unsigned int nr_hash;
-       int i;
+       unsigned u;
        int err;
 
        init_rwsem(&namespace_sem);
 
        mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
-                       0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
+                       0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
        mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
 
        if (!mount_hashtable)
                panic("Failed to allocate mount hash table\n");
 
-       /*
-        * Find the power-of-two list-heads that can fit into the allocation..
-        * We don't guarantee that "sizeof(struct list_head)" is necessarily
-        * a power-of-two.
-        */
-       nr_hash = PAGE_SIZE / sizeof(struct list_head);
-       hash_bits = 0;
-       do {
-               hash_bits++;
-       } while ((nr_hash >> hash_bits) != 0);
-       hash_bits--;
+       printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
+
+       for (u = 0; u < HASH_SIZE; u++)
+               INIT_LIST_HEAD(&mount_hashtable[u]);
 
-       /*
-        * Re-calculate the actual number of entries and the mask
-        * from the number of bits we can fit.
-        */
-       nr_hash = 1UL << hash_bits;
-       hash_mask = nr_hash - 1;
-
-       printk("Mount-cache hash table entries: %d\n", nr_hash);
-
-       /* And initialize the newly allocated array */
-       d = mount_hashtable;
-       i = nr_hash;
-       do {
-               INIT_LIST_HEAD(d);
-               d++;
-               i--;
-       } while (i);
        err = sysfs_init();
        if (err)
                printk(KERN_WARNING "%s: sysfs_init error: %d\n",
                        __FUNCTION__, err);
-       err = subsystem_register(&fs_subsys);
-       if (err)
-               printk(KERN_WARNING "%s: subsystem_register error: %d\n",
-                       __FUNCTION__, err);
+       fs_kobj = kobject_create_and_add("fs", NULL);
+       if (!fs_kobj)
+               printk(KERN_WARNING "%s: kobj create error\n", __FUNCTION__);
        init_rootfs();
        init_mount_tree();
 }