* Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/acct.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/backing-dev.h>
#include <linux/rculist_bl.h>
+#include <linux/cleancache.h>
+#include <linux/fsnotify.h>
#include "internal.h"
LIST_HEAD(super_blocks);
DEFINE_SPINLOCK(sb_lock);
+/*
+ * One thing we have to be careful of with a per-sb shrinker is that we don't
+ * drop the last active reference to the superblock from within the shrinker.
+ * If that happens we could trigger unregistering the shrinker from within the
+ * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
+ * take a passive reference to the superblock to avoid this from occurring.
+ */
+static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct super_block *sb;
+ int fs_objects = 0;
+ int total_objects;
+
+ sb = container_of(shrink, struct super_block, s_shrink);
+
+ /*
+ * Deadlock avoidance. We may hold various FS locks, and we don't want
+ * to recurse into the FS that called us in clear_inode() and friends..
+ */
+ if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+ return -1;
+
+ if (!grab_super_passive(sb))
+ return !sc->nr_to_scan ? 0 : -1;
+
+ if (sb->s_op && sb->s_op->nr_cached_objects)
+ fs_objects = sb->s_op->nr_cached_objects(sb);
+
+ total_objects = sb->s_nr_dentry_unused +
+ sb->s_nr_inodes_unused + fs_objects + 1;
+
+ if (sc->nr_to_scan) {
+ int dentries;
+ int inodes;
+
+ /* proportion the scan between the caches */
+ dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
+ total_objects;
+ inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
+ total_objects;
+ if (fs_objects)
+ fs_objects = (sc->nr_to_scan * fs_objects) /
+ total_objects;
+ /*
+ * prune the dcache first as the icache is pinned by it, then
+ * prune the icache, followed by the filesystem specific caches
+ */
+ prune_dcache_sb(sb, dentries);
+ prune_icache_sb(sb, inodes);
+
+ if (fs_objects && sb->s_op->free_cached_objects) {
+ sb->s_op->free_cached_objects(sb, fs_objects);
+ fs_objects = sb->s_op->nr_cached_objects(sb);
+ }
+ total_objects = sb->s_nr_dentry_unused +
+ sb->s_nr_inodes_unused + fs_objects;
+ }
+
+ total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
+ drop_super(sb);
+ return total_objects;
+}
+
/**
* alloc_super - create new superblock
* @type: filesystem type superblock should belong to
#else
INIT_LIST_HEAD(&s->s_files);
#endif
- INIT_LIST_HEAD(&s->s_instances);
+ s->s_bdi = &default_backing_dev_info;
+ INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
INIT_LIST_HEAD(&s->s_dentry_lru);
+ INIT_LIST_HEAD(&s->s_inode_lru);
+ spin_lock_init(&s->s_inode_lru_lock);
+ INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount);
mutex_init(&s->s_lock);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op;
s->s_time_gran = 1000000000;
+ s->cleancache_poolid = -1;
+
+ s->s_shrink.seeks = DEFAULT_SEEKS;
+ s->s_shrink.shrink = prune_super;
+ s->s_shrink.batch = 1024;
}
out:
return s;
free_percpu(s->s_files);
#endif
security_sb_free(s);
+ WARN_ON(!list_empty(&s->s_mounts));
kfree(s->s_subtype);
kfree(s->s_options);
kfree(s);
/*
* Drop a superblock's refcount. The caller must hold sb_lock.
*/
-void __put_super(struct super_block *sb)
+static void __put_super(struct super_block *sb)
{
if (!--sb->s_count) {
list_del_init(&sb->s_list);
* Drops a temporary reference, frees superblock if there's no
* references left.
*/
-void put_super(struct super_block *sb)
+static void put_super(struct super_block *sb)
{
spin_lock(&sb_lock);
__put_super(sb);
{
struct file_system_type *fs = s->s_type;
if (atomic_dec_and_test(&s->s_active)) {
+ cleancache_invalidate_fs(s);
fs->kill_sb(s);
+
+ /* caches are now gone, we can safely kill the shrinker now */
+ unregister_shrinker(&s->s_shrink);
+
/*
* We need to call rcu_barrier so all the delayed rcu free
* inodes are flushed before we release the fs module.
return 0;
}
+/*
+ * grab_super_passive - acquire a passive reference
+ * @s: reference we are trying to grab
+ *
+ * Tries to acquire a passive reference. This is used in places where we
+ * cannot take an active reference but we need to ensure that the
+ * superblock does not go away while we are working on it. It returns
+ * false if a reference was not gained, and returns true with the s_umount
+ * lock held in read mode if a reference is gained. On successful return,
+ * the caller must drop the s_umount lock and the passive reference when
+ * done.
+ */
+bool grab_super_passive(struct super_block *sb)
+{
+ spin_lock(&sb_lock);
+ if (hlist_unhashed(&sb->s_instances)) {
+ spin_unlock(&sb_lock);
+ return false;
+ }
+
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+
+ if (down_read_trylock(&sb->s_umount)) {
+ if (sb->s_root && (sb->s_flags & MS_BORN))
+ return true;
+ up_read(&sb->s_umount);
+ }
+
+ put_super(sb);
+ return false;
+}
+
/*
* Superblock locking. We really ought to get rid of these two.
*/
void lock_super(struct super_block * sb)
{
- get_fs_excl();
mutex_lock(&sb->s_lock);
}
void unlock_super(struct super_block * sb)
{
- put_fs_excl();
mutex_unlock(&sb->s_lock);
}
{
const struct super_operations *sop = sb->s_op;
-
if (sb->s_root) {
shrink_dcache_for_umount(sb);
sync_filesystem(sb);
- get_fs_excl();
sb->s_flags &= ~MS_ACTIVE;
fsnotify_unmount_inodes(&sb->s_inodes);
"Self-destruct in 5 seconds. Have a nice day...\n",
sb->s_id);
}
- put_fs_excl();
}
spin_lock(&sb_lock);
/* should be initialized for __put_super_and_need_restart() */
- list_del_init(&sb->s_instances);
+ hlist_del_init(&sb->s_instances);
spin_unlock(&sb_lock);
up_write(&sb->s_umount);
}
void *data)
{
struct super_block *s = NULL;
+ struct hlist_node *node;
struct super_block *old;
int err;
retry:
spin_lock(&sb_lock);
if (test) {
- list_for_each_entry(old, &type->fs_supers, s_instances) {
+ hlist_for_each_entry(old, node, &type->fs_supers, s_instances) {
if (!test(old, data))
continue;
if (!grab_super(old))
s->s_type = type;
strlcpy(s->s_id, type->name, sizeof(s->s_id));
list_add_tail(&s->s_list, &super_blocks);
- list_add(&s->s_instances, &type->fs_supers);
+ hlist_add_head(&s->s_instances, &type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(type);
+ register_shrinker(&s->s_shrink);
return s;
}
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_op->write_super && sb->s_dirt) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
- if (sb->s_root && sb->s_dirt)
+ if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
sb->s_op->write_super(sb);
up_read(&sb->s_umount);
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
f(sb, arg);
up_read(&sb->s_umount);
spin_unlock(&sb_lock);
}
+/**
+ * iterate_supers_type - call function for superblocks of given type
+ * @type: fs type
+ * @f: function to call
+ * @arg: argument to pass to it
+ *
+ * Scans the superblock list and calls given function, passing it
+ * locked superblock and given argument.
+ */
+void iterate_supers_type(struct file_system_type *type,
+ void (*f)(struct super_block *, void *), void *arg)
+{
+ struct super_block *sb, *p = NULL;
+ struct hlist_node *node;
+
+ spin_lock(&sb_lock);
+ hlist_for_each_entry(sb, node, &type->fs_supers, s_instances) {
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+
+ down_read(&sb->s_umount);
+ if (sb->s_root && (sb->s_flags & MS_BORN))
+ f(sb, arg);
+ up_read(&sb->s_umount);
+
+ spin_lock(&sb_lock);
+ if (p)
+ __put_super(p);
+ p = sb;
+ }
+ if (p)
+ __put_super(p);
+ spin_unlock(&sb_lock);
+}
+
+EXPORT_SYMBOL(iterate_supers_type);
+
/**
* get_super - get the superblock of a device
* @bdev: device to get the superblock for
spin_lock(&sb_lock);
rescan:
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_bdev == bdev) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
/* still alive? */
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
return sb;
up_read(&sb->s_umount);
/* nope, got unmounted */
EXPORT_SYMBOL(get_super);
+/**
+ * get_super_thawed - get thawed superblock of a device
+ * @bdev: device to get the superblock for
+ *
+ * Scans the superblock list and finds the superblock of the file system
+ * mounted on the device. The superblock is returned once it is thawed
+ * (or immediately if it was not frozen). %NULL is returned if no match
+ * is found.
+ */
+struct super_block *get_super_thawed(struct block_device *bdev)
+{
+ while (1) {
+ struct super_block *s = get_super(bdev);
+ if (!s || s->s_frozen == SB_UNFROZEN)
+ return s;
+ up_read(&s->s_umount);
+ vfs_check_frozen(s, SB_FREEZE_WRITE);
+ put_super(s);
+ }
+}
+EXPORT_SYMBOL(get_super_thawed);
+
/**
* get_active_super - get an active reference to the superblock of a device
* @bdev: device to get the superblock for
restart:
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_bdev == bdev) {
if (grab_super(sb)) /* drops sb_lock */
spin_lock(&sb_lock);
rescan:
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
if (sb->s_dev == dev) {
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
/* still alive? */
- if (sb->s_root)
+ if (sb->s_root && (sb->s_flags & MS_BORN))
return sb;
up_read(&sb->s_umount);
/* nope, got unmounted */
/* If we are remounting RDONLY and current sb is read/write,
make sure there are no rw files opened */
if (remount_ro) {
- if (force)
+ if (force) {
mark_files_ro(sb);
- else if (!fs_may_remount_ro(sb))
- return -EBUSY;
+ } else {
+ retval = sb_prepare_remount_readonly(sb);
+ if (retval)
+ return retval;
+ }
}
if (sb->s_op->remount_fs) {
retval = sb->s_op->remount_fs(sb, &flags, data);
- if (retval)
- return retval;
+ if (retval) {
+ if (!force)
+ goto cancel_readonly;
+ /* If forced remount, go ahead despite any errors */
+ WARN(1, "forced remount of a %s fs returned %i\n",
+ sb->s_type->name, retval);
+ }
}
sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
+ /* Needs to be ordered wrt mnt_is_readonly() */
+ smp_wmb();
+ sb->s_readonly_remount = 0;
/*
* Some filesystems modify their metadata via some other path than the
if (remount_ro && sb->s_bdev)
invalidate_bdev(sb->s_bdev);
return 0;
+
+cancel_readonly:
+ sb->s_readonly_remount = 0;
+ return retval;
}
static void do_emergency_remount(struct work_struct *work)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
+ if (hlist_unhashed(&sb->s_instances))
continue;
sb->s_count++;
spin_unlock(&sb_lock);
down_write(&sb->s_umount);
- if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
+ if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
+ !(sb->s_flags & MS_RDONLY)) {
/*
* What lock protects sb->s_flags??
*/
static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
static int unnamed_dev_start = 0; /* don't bother trying below it */
-int set_anon_super(struct super_block *s, void *data)
+int get_anon_bdev(dev_t *p)
{
int dev;
int error;
spin_unlock(&unnamed_dev_lock);
return -EMFILE;
}
- s->s_dev = MKDEV(0, dev & MINORMASK);
- s->s_bdi = &noop_backing_dev_info;
+ *p = MKDEV(0, dev & MINORMASK);
return 0;
}
+EXPORT_SYMBOL(get_anon_bdev);
-EXPORT_SYMBOL(set_anon_super);
-
-void kill_anon_super(struct super_block *sb)
+void free_anon_bdev(dev_t dev)
{
- int slot = MINOR(sb->s_dev);
-
- generic_shutdown_super(sb);
+ int slot = MINOR(dev);
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, slot);
if (slot < unnamed_dev_start)
unnamed_dev_start = slot;
spin_unlock(&unnamed_dev_lock);
}
+EXPORT_SYMBOL(free_anon_bdev);
+
+int set_anon_super(struct super_block *s, void *data)
+{
+ int error = get_anon_bdev(&s->s_dev);
+ if (!error)
+ s->s_bdi = &noop_backing_dev_info;
+ return error;
+}
+
+EXPORT_SYMBOL(set_anon_super);
+
+void kill_anon_super(struct super_block *sb)
+{
+ dev_t dev = sb->s_dev;
+ generic_shutdown_super(sb);
+ free_anon_bdev(dev);
+}
EXPORT_SYMBOL(kill_anon_super);
} else {
char b[BDEVNAME_SIZE];
- s->s_flags = flags;
+ s->s_flags = flags | MS_NOSEC;
s->s_mode = mode;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(bdev));
}
EXPORT_SYMBOL(mount_single);
-struct vfsmount *
-vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
+struct dentry *
+mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
{
- struct vfsmount *mnt;
struct dentry *root;
+ struct super_block *sb;
char *secdata = NULL;
- int error;
-
- if (!type)
- return ERR_PTR(-ENODEV);
-
- error = -ENOMEM;
- mnt = alloc_vfsmnt(name);
- if (!mnt)
- goto out;
-
- if (flags & MS_KERNMOUNT)
- mnt->mnt_flags = MNT_INTERNAL;
+ int error = -ENOMEM;
if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
secdata = alloc_secdata();
if (!secdata)
- goto out_mnt;
+ goto out;
error = security_sb_copy_data(data, secdata);
if (error)
error = PTR_ERR(root);
goto out_free_secdata;
}
- mnt->mnt_root = root;
- mnt->mnt_sb = root->d_sb;
- BUG_ON(!mnt->mnt_sb);
- WARN_ON(!mnt->mnt_sb->s_bdi);
- mnt->mnt_sb->s_flags |= MS_BORN;
+ sb = root->d_sb;
+ BUG_ON(!sb);
+ WARN_ON(!sb->s_bdi);
+ WARN_ON(sb->s_bdi == &default_backing_dev_info);
+ sb->s_flags |= MS_BORN;
- error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
+ error = security_sb_kern_mount(sb, flags, secdata);
if (error)
goto out_sb;
* filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
* but s_maxbytes was an unsigned long long for many releases. Throw
* this warning for a little while to try and catch filesystems that
- * violate this rule. This warning should be either removed or
- * converted to a BUG() in 2.6.34.
+ * violate this rule.
*/
- WARN((mnt->mnt_sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
- "negative value (%lld)\n", type->name, mnt->mnt_sb->s_maxbytes);
+ WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
+ "negative value (%lld)\n", type->name, sb->s_maxbytes);
- mnt->mnt_mountpoint = mnt->mnt_root;
- mnt->mnt_parent = mnt;
- up_write(&mnt->mnt_sb->s_umount);
+ up_write(&sb->s_umount);
free_secdata(secdata);
- return mnt;
+ return root;
out_sb:
- dput(mnt->mnt_root);
- deactivate_locked_super(mnt->mnt_sb);
+ dput(root);
+ deactivate_locked_super(sb);
out_free_secdata:
free_secdata(secdata);
-out_mnt:
- free_vfsmnt(mnt);
out:
return ERR_PTR(error);
}
-EXPORT_SYMBOL_GPL(vfs_kern_mount);
-
/**
* freeze_super - lock the filesystem and force it into a consistent state
* @sb: the super to lock
return -EBUSY;
}
+ if (!(sb->s_flags & MS_BORN)) {
+ up_write(&sb->s_umount);
+ return 0; /* sic - it's "nothing to do" */
+ }
+
if (sb->s_flags & MS_RDONLY) {
sb->s_frozen = SB_FREEZE_TRANS;
smp_wmb();
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
sb->s_frozen = SB_UNFROZEN;
+ smp_wmb();
+ wake_up(&sb->s_wait_unfrozen);
deactivate_locked_super(sb);
return ret;
}
return 0;
}
EXPORT_SYMBOL(thaw_super);
-
-static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
-{
- int err;
- const char *subtype = strchr(fstype, '.');
- if (subtype) {
- subtype++;
- err = -EINVAL;
- if (!subtype[0])
- goto err;
- } else
- subtype = "";
-
- mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
- err = -ENOMEM;
- if (!mnt->mnt_sb->s_subtype)
- goto err;
- return mnt;
-
- err:
- mntput(mnt);
- return ERR_PTR(err);
-}
-
-struct vfsmount *
-do_kern_mount(const char *fstype, int flags, const char *name, void *data)
-{
- struct file_system_type *type = get_fs_type(fstype);
- struct vfsmount *mnt;
- if (!type)
- return ERR_PTR(-ENODEV);
- mnt = vfs_kern_mount(type, flags, name, data);
- if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
- !mnt->mnt_sb->s_subtype)
- mnt = fs_set_subtype(mnt, fstype);
- put_filesystem(type);
- return mnt;
-}
-EXPORT_SYMBOL_GPL(do_kern_mount);
-
-struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
-{
- return vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
-}
-
-EXPORT_SYMBOL_GPL(kern_mount_data);