|
@@ -39,7 +39,7 @@ static int mnt_group_start = 1;
|
|
static struct list_head *mount_hashtable __read_mostly;
|
|
static struct list_head *mount_hashtable __read_mostly;
|
|
static struct list_head *mountpoint_hashtable __read_mostly;
|
|
static struct list_head *mountpoint_hashtable __read_mostly;
|
|
static struct kmem_cache *mnt_cache __read_mostly;
|
|
static struct kmem_cache *mnt_cache __read_mostly;
|
|
-static struct rw_semaphore namespace_sem;
|
|
|
|
|
|
+static DECLARE_RWSEM(namespace_sem);
|
|
|
|
|
|
/* /sys/fs */
|
|
/* /sys/fs */
|
|
struct kobject *fs_kobj;
|
|
struct kobject *fs_kobj;
|
|
@@ -53,7 +53,7 @@ EXPORT_SYMBOL_GPL(fs_kobj);
|
|
* It should be taken for write in all cases where the vfsmount
|
|
* It should be taken for write in all cases where the vfsmount
|
|
* tree or hash is modified or when a vfsmount structure is modified.
|
|
* tree or hash is modified or when a vfsmount structure is modified.
|
|
*/
|
|
*/
|
|
-DEFINE_BRLOCK(vfsmount_lock);
|
|
|
|
|
|
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
|
|
|
|
|
|
static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
|
|
static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
|
|
{
|
|
{
|
|
@@ -63,8 +63,6 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
|
|
return tmp & (HASH_SIZE - 1);
|
|
return tmp & (HASH_SIZE - 1);
|
|
}
|
|
}
|
|
|
|
|
|
-#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* allocation is serialized by namespace_sem, but we need the spinlock to
|
|
* allocation is serialized by namespace_sem, but we need the spinlock to
|
|
* serialize with freeing.
|
|
* serialize with freeing.
|
|
@@ -458,7 +456,7 @@ static int mnt_make_readonly(struct mount *mnt)
|
|
{
|
|
{
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
|
|
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
|
|
/*
|
|
/*
|
|
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
|
|
* After storing MNT_WRITE_HOLD, we'll read the counters. This store
|
|
@@ -492,15 +490,15 @@ static int mnt_make_readonly(struct mount *mnt)
|
|
*/
|
|
*/
|
|
smp_wmb();
|
|
smp_wmb();
|
|
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
|
|
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static void __mnt_unmake_readonly(struct mount *mnt)
|
|
static void __mnt_unmake_readonly(struct mount *mnt)
|
|
{
|
|
{
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
mnt->mnt.mnt_flags &= ~MNT_READONLY;
|
|
mnt->mnt.mnt_flags &= ~MNT_READONLY;
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
}
|
|
}
|
|
|
|
|
|
int sb_prepare_remount_readonly(struct super_block *sb)
|
|
int sb_prepare_remount_readonly(struct super_block *sb)
|
|
@@ -512,7 +510,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
|
|
if (atomic_long_read(&sb->s_remove_count))
|
|
if (atomic_long_read(&sb->s_remove_count))
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
|
|
list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
|
|
if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
|
|
if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
|
|
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
|
|
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
|
|
@@ -534,7 +532,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
|
|
if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
|
|
if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
|
|
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
|
|
mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
|
|
}
|
|
}
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
@@ -549,30 +547,56 @@ static void free_vfsmnt(struct mount *mnt)
|
|
kmem_cache_free(mnt_cache, mnt);
|
|
kmem_cache_free(mnt_cache, mnt);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* call under rcu_read_lock */
|
|
|
|
+bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
|
|
|
|
+{
|
|
|
|
+ struct mount *mnt;
|
|
|
|
+ if (read_seqretry(&mount_lock, seq))
|
|
|
|
+ return false;
|
|
|
|
+ if (bastard == NULL)
|
|
|
|
+ return true;
|
|
|
|
+ mnt = real_mount(bastard);
|
|
|
|
+ mnt_add_count(mnt, 1);
|
|
|
|
+ if (likely(!read_seqretry(&mount_lock, seq)))
|
|
|
|
+ return true;
|
|
|
|
+ if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
|
|
|
|
+ mnt_add_count(mnt, -1);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ mntput(bastard);
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
- * find the first or last mount at @dentry on vfsmount @mnt depending on
|
|
|
|
- * @dir. If @dir is set return the first mount else return the last mount.
|
|
|
|
- * vfsmount_lock must be held for read or write.
|
|
|
|
|
|
+ * find the first mount at @dentry on vfsmount @mnt.
|
|
|
|
+ * call under rcu_read_lock()
|
|
*/
|
|
*/
|
|
-struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
|
|
|
|
- int dir)
|
|
|
|
|
|
+struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
|
|
{
|
|
{
|
|
struct list_head *head = mount_hashtable + hash(mnt, dentry);
|
|
struct list_head *head = mount_hashtable + hash(mnt, dentry);
|
|
- struct list_head *tmp = head;
|
|
|
|
- struct mount *p, *found = NULL;
|
|
|
|
|
|
+ struct mount *p;
|
|
|
|
|
|
- for (;;) {
|
|
|
|
- tmp = dir ? tmp->next : tmp->prev;
|
|
|
|
- p = NULL;
|
|
|
|
- if (tmp == head)
|
|
|
|
- break;
|
|
|
|
- p = list_entry(tmp, struct mount, mnt_hash);
|
|
|
|
- if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) {
|
|
|
|
- found = p;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- return found;
|
|
|
|
|
|
+ list_for_each_entry_rcu(p, head, mnt_hash)
|
|
|
|
+ if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
|
|
|
|
+ return p;
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * find the last mount at @dentry on vfsmount @mnt.
|
|
|
|
+ * mount_lock must be held.
|
|
|
|
+ */
|
|
|
|
+struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
|
|
|
|
+{
|
|
|
|
+ struct list_head *head = mount_hashtable + hash(mnt, dentry);
|
|
|
|
+ struct mount *p;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_reverse(p, head, mnt_hash)
|
|
|
|
+ if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
|
|
|
|
+ return p;
|
|
|
|
+ return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -594,17 +618,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
|
|
struct vfsmount *lookup_mnt(struct path *path)
|
|
struct vfsmount *lookup_mnt(struct path *path)
|
|
{
|
|
{
|
|
struct mount *child_mnt;
|
|
struct mount *child_mnt;
|
|
|
|
+ struct vfsmount *m;
|
|
|
|
+ unsigned seq;
|
|
|
|
|
|
- br_read_lock(&vfsmount_lock);
|
|
|
|
- child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
|
|
|
|
- if (child_mnt) {
|
|
|
|
- mnt_add_count(child_mnt, 1);
|
|
|
|
- br_read_unlock(&vfsmount_lock);
|
|
|
|
- return &child_mnt->mnt;
|
|
|
|
- } else {
|
|
|
|
- br_read_unlock(&vfsmount_lock);
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ do {
|
|
|
|
+ seq = read_seqbegin(&mount_lock);
|
|
|
|
+ child_mnt = __lookup_mnt(path->mnt, path->dentry);
|
|
|
|
+ m = child_mnt ? &child_mnt->mnt : NULL;
|
|
|
|
+ } while (!legitimize_mnt(m, seq));
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ return m;
|
|
}
|
|
}
|
|
|
|
|
|
static struct mountpoint *new_mountpoint(struct dentry *dentry)
|
|
static struct mountpoint *new_mountpoint(struct dentry *dentry)
|
|
@@ -796,9 +820,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
|
|
mnt->mnt.mnt_sb = root->d_sb;
|
|
mnt->mnt.mnt_sb = root->d_sb;
|
|
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
|
|
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
|
|
mnt->mnt_parent = mnt;
|
|
mnt->mnt_parent = mnt;
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
|
|
list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
return &mnt->mnt;
|
|
return &mnt->mnt;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(vfs_kern_mount);
|
|
EXPORT_SYMBOL_GPL(vfs_kern_mount);
|
|
@@ -839,9 +863,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
|
|
mnt->mnt.mnt_root = dget(root);
|
|
mnt->mnt.mnt_root = dget(root);
|
|
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
|
|
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
|
|
mnt->mnt_parent = mnt;
|
|
mnt->mnt_parent = mnt;
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
|
|
list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
|
|
if ((flag & CL_SLAVE) ||
|
|
if ((flag & CL_SLAVE) ||
|
|
((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
|
|
((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
|
|
@@ -872,64 +896,66 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
|
|
return ERR_PTR(err);
|
|
return ERR_PTR(err);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void mntfree(struct mount *mnt)
|
|
|
|
|
|
+static void delayed_free(struct rcu_head *head)
|
|
{
|
|
{
|
|
- struct vfsmount *m = &mnt->mnt;
|
|
|
|
- struct super_block *sb = m->mnt_sb;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * This probably indicates that somebody messed
|
|
|
|
- * up a mnt_want/drop_write() pair. If this
|
|
|
|
- * happens, the filesystem was probably unable
|
|
|
|
- * to make r/w->r/o transitions.
|
|
|
|
- */
|
|
|
|
- /*
|
|
|
|
- * The locking used to deal with mnt_count decrement provides barriers,
|
|
|
|
- * so mnt_get_writers() below is safe.
|
|
|
|
- */
|
|
|
|
- WARN_ON(mnt_get_writers(mnt));
|
|
|
|
- fsnotify_vfsmount_delete(m);
|
|
|
|
- dput(m->mnt_root);
|
|
|
|
- free_vfsmnt(mnt);
|
|
|
|
- deactivate_super(sb);
|
|
|
|
|
|
+ struct mount *mnt = container_of(head, struct mount, mnt_rcu);
|
|
|
|
+ kfree(mnt->mnt_devname);
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+ free_percpu(mnt->mnt_pcp);
|
|
|
|
+#endif
|
|
|
|
+ kmem_cache_free(mnt_cache, mnt);
|
|
}
|
|
}
|
|
|
|
|
|
static void mntput_no_expire(struct mount *mnt)
|
|
static void mntput_no_expire(struct mount *mnt)
|
|
{
|
|
{
|
|
put_again:
|
|
put_again:
|
|
-#ifdef CONFIG_SMP
|
|
|
|
- br_read_lock(&vfsmount_lock);
|
|
|
|
- if (likely(mnt->mnt_ns)) {
|
|
|
|
- /* shouldn't be the last one */
|
|
|
|
- mnt_add_count(mnt, -1);
|
|
|
|
- br_read_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ mnt_add_count(mnt, -1);
|
|
|
|
+ if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
|
|
|
|
+ rcu_read_unlock();
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
- br_read_unlock(&vfsmount_lock);
|
|
|
|
-
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
- mnt_add_count(mnt, -1);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
if (mnt_get_count(mnt)) {
|
|
if (mnt_get_count(mnt)) {
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ unlock_mount_hash();
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
-#else
|
|
|
|
- mnt_add_count(mnt, -1);
|
|
|
|
- if (likely(mnt_get_count(mnt)))
|
|
|
|
- return;
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
-#endif
|
|
|
|
if (unlikely(mnt->mnt_pinned)) {
|
|
if (unlikely(mnt->mnt_pinned)) {
|
|
mnt_add_count(mnt, mnt->mnt_pinned + 1);
|
|
mnt_add_count(mnt, mnt->mnt_pinned + 1);
|
|
mnt->mnt_pinned = 0;
|
|
mnt->mnt_pinned = 0;
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ unlock_mount_hash();
|
|
acct_auto_close_mnt(&mnt->mnt);
|
|
acct_auto_close_mnt(&mnt->mnt);
|
|
goto put_again;
|
|
goto put_again;
|
|
}
|
|
}
|
|
|
|
+ if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ mnt->mnt.mnt_flags |= MNT_DOOMED;
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
list_del(&mnt->mnt_instance);
|
|
list_del(&mnt->mnt_instance);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
- mntfree(mnt);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This probably indicates that somebody messed
|
|
|
|
+ * up a mnt_want/drop_write() pair. If this
|
|
|
|
+ * happens, the filesystem was probably unable
|
|
|
|
+ * to make r/w->r/o transitions.
|
|
|
|
+ */
|
|
|
|
+ /*
|
|
|
|
+ * The locking used to deal with mnt_count decrement provides barriers,
|
|
|
|
+ * so mnt_get_writers() below is safe.
|
|
|
|
+ */
|
|
|
|
+ WARN_ON(mnt_get_writers(mnt));
|
|
|
|
+ fsnotify_vfsmount_delete(&mnt->mnt);
|
|
|
|
+ dput(mnt->mnt.mnt_root);
|
|
|
|
+ deactivate_super(mnt->mnt.mnt_sb);
|
|
|
|
+ mnt_free_id(mnt);
|
|
|
|
+ call_rcu(&mnt->mnt_rcu, delayed_free);
|
|
}
|
|
}
|
|
|
|
|
|
void mntput(struct vfsmount *mnt)
|
|
void mntput(struct vfsmount *mnt)
|
|
@@ -954,21 +980,21 @@ EXPORT_SYMBOL(mntget);
|
|
|
|
|
|
void mnt_pin(struct vfsmount *mnt)
|
|
void mnt_pin(struct vfsmount *mnt)
|
|
{
|
|
{
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
real_mount(mnt)->mnt_pinned++;
|
|
real_mount(mnt)->mnt_pinned++;
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(mnt_pin);
|
|
EXPORT_SYMBOL(mnt_pin);
|
|
|
|
|
|
void mnt_unpin(struct vfsmount *m)
|
|
void mnt_unpin(struct vfsmount *m)
|
|
{
|
|
{
|
|
struct mount *mnt = real_mount(m);
|
|
struct mount *mnt = real_mount(m);
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
if (mnt->mnt_pinned) {
|
|
if (mnt->mnt_pinned) {
|
|
mnt_add_count(mnt, 1);
|
|
mnt_add_count(mnt, 1);
|
|
mnt->mnt_pinned--;
|
|
mnt->mnt_pinned--;
|
|
}
|
|
}
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(mnt_unpin);
|
|
EXPORT_SYMBOL(mnt_unpin);
|
|
|
|
|
|
@@ -1085,12 +1111,12 @@ int may_umount_tree(struct vfsmount *m)
|
|
BUG_ON(!m);
|
|
BUG_ON(!m);
|
|
|
|
|
|
/* write lock needed for mnt_get_count */
|
|
/* write lock needed for mnt_get_count */
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
for (p = mnt; p; p = next_mnt(p, mnt)) {
|
|
for (p = mnt; p; p = next_mnt(p, mnt)) {
|
|
actual_refs += mnt_get_count(p);
|
|
actual_refs += mnt_get_count(p);
|
|
minimum_refs += 2;
|
|
minimum_refs += 2;
|
|
}
|
|
}
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
|
|
if (actual_refs > minimum_refs)
|
|
if (actual_refs > minimum_refs)
|
|
return 0;
|
|
return 0;
|
|
@@ -1117,10 +1143,10 @@ int may_umount(struct vfsmount *mnt)
|
|
{
|
|
{
|
|
int ret = 1;
|
|
int ret = 1;
|
|
down_read(&namespace_sem);
|
|
down_read(&namespace_sem);
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
if (propagate_mount_busy(real_mount(mnt), 2))
|
|
if (propagate_mount_busy(real_mount(mnt), 2))
|
|
ret = 0;
|
|
ret = 0;
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
up_read(&namespace_sem);
|
|
up_read(&namespace_sem);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1142,23 +1168,13 @@ static void namespace_unlock(void)
|
|
list_splice_init(&unmounted, &head);
|
|
list_splice_init(&unmounted, &head);
|
|
up_write(&namespace_sem);
|
|
up_write(&namespace_sem);
|
|
|
|
|
|
|
|
+ synchronize_rcu();
|
|
|
|
+
|
|
while (!list_empty(&head)) {
|
|
while (!list_empty(&head)) {
|
|
mnt = list_first_entry(&head, struct mount, mnt_hash);
|
|
mnt = list_first_entry(&head, struct mount, mnt_hash);
|
|
list_del_init(&mnt->mnt_hash);
|
|
list_del_init(&mnt->mnt_hash);
|
|
- if (mnt_has_parent(mnt)) {
|
|
|
|
- struct dentry *dentry;
|
|
|
|
- struct mount *m;
|
|
|
|
-
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
- dentry = mnt->mnt_mountpoint;
|
|
|
|
- m = mnt->mnt_parent;
|
|
|
|
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
|
|
|
|
- mnt->mnt_parent = mnt;
|
|
|
|
- m->mnt_ghosts--;
|
|
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
- dput(dentry);
|
|
|
|
- mntput(&m->mnt);
|
|
|
|
- }
|
|
|
|
|
|
+ if (mnt->mnt_ex_mountpoint.mnt)
|
|
|
|
+ path_put(&mnt->mnt_ex_mountpoint);
|
|
mntput(&mnt->mnt);
|
|
mntput(&mnt->mnt);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1169,10 +1185,13 @@ static inline void namespace_lock(void)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * vfsmount lock must be held for write
|
|
|
|
|
|
+ * mount_lock must be held
|
|
* namespace_sem must be held for write
|
|
* namespace_sem must be held for write
|
|
|
|
+ * how = 0 => just this tree, don't propagate
|
|
|
|
+ * how = 1 => propagate; we know that nobody else has reference to any victims
|
|
|
|
+ * how = 2 => lazy umount
|
|
*/
|
|
*/
|
|
-void umount_tree(struct mount *mnt, int propagate)
|
|
|
|
|
|
+void umount_tree(struct mount *mnt, int how)
|
|
{
|
|
{
|
|
LIST_HEAD(tmp_list);
|
|
LIST_HEAD(tmp_list);
|
|
struct mount *p;
|
|
struct mount *p;
|
|
@@ -1180,7 +1199,7 @@ void umount_tree(struct mount *mnt, int propagate)
|
|
for (p = mnt; p; p = next_mnt(p, mnt))
|
|
for (p = mnt; p; p = next_mnt(p, mnt))
|
|
list_move(&p->mnt_hash, &tmp_list);
|
|
list_move(&p->mnt_hash, &tmp_list);
|
|
|
|
|
|
- if (propagate)
|
|
|
|
|
|
+ if (how)
|
|
propagate_umount(&tmp_list);
|
|
propagate_umount(&tmp_list);
|
|
|
|
|
|
list_for_each_entry(p, &tmp_list, mnt_hash) {
|
|
list_for_each_entry(p, &tmp_list, mnt_hash) {
|
|
@@ -1188,10 +1207,16 @@ void umount_tree(struct mount *mnt, int propagate)
|
|
list_del_init(&p->mnt_list);
|
|
list_del_init(&p->mnt_list);
|
|
__touch_mnt_namespace(p->mnt_ns);
|
|
__touch_mnt_namespace(p->mnt_ns);
|
|
p->mnt_ns = NULL;
|
|
p->mnt_ns = NULL;
|
|
|
|
+ if (how < 2)
|
|
|
|
+ p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
|
|
list_del_init(&p->mnt_child);
|
|
list_del_init(&p->mnt_child);
|
|
if (mnt_has_parent(p)) {
|
|
if (mnt_has_parent(p)) {
|
|
- p->mnt_parent->mnt_ghosts++;
|
|
|
|
put_mountpoint(p->mnt_mp);
|
|
put_mountpoint(p->mnt_mp);
|
|
|
|
+ /* move the reference to mountpoint into ->mnt_ex_mountpoint */
|
|
|
|
+ p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
|
|
|
|
+ p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
|
|
|
|
+ p->mnt_mountpoint = p->mnt.mnt_root;
|
|
|
|
+ p->mnt_parent = p;
|
|
p->mnt_mp = NULL;
|
|
p->mnt_mp = NULL;
|
|
}
|
|
}
|
|
change_mnt_propagation(p, MS_PRIVATE);
|
|
change_mnt_propagation(p, MS_PRIVATE);
|
|
@@ -1225,12 +1250,12 @@ static int do_umount(struct mount *mnt, int flags)
|
|
* probably don't strictly need the lock here if we examined
|
|
* probably don't strictly need the lock here if we examined
|
|
* all race cases, but it's a slowpath.
|
|
* all race cases, but it's a slowpath.
|
|
*/
|
|
*/
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
if (mnt_get_count(mnt) != 2) {
|
|
if (mnt_get_count(mnt) != 2) {
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
|
|
if (!xchg(&mnt->mnt_expiry_mark, 1))
|
|
if (!xchg(&mnt->mnt_expiry_mark, 1))
|
|
return -EAGAIN;
|
|
return -EAGAIN;
|
|
@@ -1272,19 +1297,23 @@ static int do_umount(struct mount *mnt, int flags)
|
|
}
|
|
}
|
|
|
|
|
|
namespace_lock();
|
|
namespace_lock();
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
event++;
|
|
event++;
|
|
|
|
|
|
- if (!(flags & MNT_DETACH))
|
|
|
|
- shrink_submounts(mnt);
|
|
|
|
-
|
|
|
|
- retval = -EBUSY;
|
|
|
|
- if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
|
|
|
|
|
|
+ if (flags & MNT_DETACH) {
|
|
if (!list_empty(&mnt->mnt_list))
|
|
if (!list_empty(&mnt->mnt_list))
|
|
- umount_tree(mnt, 1);
|
|
|
|
|
|
+ umount_tree(mnt, 2);
|
|
retval = 0;
|
|
retval = 0;
|
|
|
|
+ } else {
|
|
|
|
+ shrink_submounts(mnt);
|
|
|
|
+ retval = -EBUSY;
|
|
|
|
+ if (!propagate_mount_busy(mnt, 2)) {
|
|
|
|
+ if (!list_empty(&mnt->mnt_list))
|
|
|
|
+ umount_tree(mnt, 1);
|
|
|
|
+ retval = 0;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
namespace_unlock();
|
|
namespace_unlock();
|
|
return retval;
|
|
return retval;
|
|
}
|
|
}
|
|
@@ -1427,18 +1456,18 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
|
|
q = clone_mnt(p, p->mnt.mnt_root, flag);
|
|
q = clone_mnt(p, p->mnt.mnt_root, flag);
|
|
if (IS_ERR(q))
|
|
if (IS_ERR(q))
|
|
goto out;
|
|
goto out;
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
list_add_tail(&q->mnt_list, &res->mnt_list);
|
|
list_add_tail(&q->mnt_list, &res->mnt_list);
|
|
attach_mnt(q, parent, p->mnt_mp);
|
|
attach_mnt(q, parent, p->mnt_mp);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return res;
|
|
return res;
|
|
out:
|
|
out:
|
|
if (res) {
|
|
if (res) {
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
umount_tree(res, 0);
|
|
umount_tree(res, 0);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
}
|
|
}
|
|
return q;
|
|
return q;
|
|
}
|
|
}
|
|
@@ -1460,9 +1489,9 @@ struct vfsmount *collect_mounts(struct path *path)
|
|
void drop_collected_mounts(struct vfsmount *mnt)
|
|
void drop_collected_mounts(struct vfsmount *mnt)
|
|
{
|
|
{
|
|
namespace_lock();
|
|
namespace_lock();
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
umount_tree(real_mount(mnt), 0);
|
|
umount_tree(real_mount(mnt), 0);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
namespace_unlock();
|
|
namespace_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1589,7 +1618,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
|
|
if (err)
|
|
if (err)
|
|
goto out_cleanup_ids;
|
|
goto out_cleanup_ids;
|
|
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
|
|
|
|
if (IS_MNT_SHARED(dest_mnt)) {
|
|
if (IS_MNT_SHARED(dest_mnt)) {
|
|
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
|
|
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
|
|
@@ -1608,7 +1637,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
|
|
list_del_init(&child->mnt_hash);
|
|
list_del_init(&child->mnt_hash);
|
|
commit_tree(child);
|
|
commit_tree(child);
|
|
}
|
|
}
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -1710,10 +1739,10 @@ static int do_change_type(struct path *path, int flag)
|
|
goto out_unlock;
|
|
goto out_unlock;
|
|
}
|
|
}
|
|
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
|
|
for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
|
|
change_mnt_propagation(m, type);
|
|
change_mnt_propagation(m, type);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
|
|
|
|
out_unlock:
|
|
out_unlock:
|
|
namespace_unlock();
|
|
namespace_unlock();
|
|
@@ -1785,9 +1814,9 @@ static int do_loopback(struct path *path, const char *old_name,
|
|
|
|
|
|
err = graft_tree(mnt, parent, mp);
|
|
err = graft_tree(mnt, parent, mp);
|
|
if (err) {
|
|
if (err) {
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
umount_tree(mnt, 0);
|
|
umount_tree(mnt, 0);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
}
|
|
}
|
|
out2:
|
|
out2:
|
|
unlock_mount(mp);
|
|
unlock_mount(mp);
|
|
@@ -1846,17 +1875,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
|
|
else
|
|
else
|
|
err = do_remount_sb(sb, flags, data, 0);
|
|
err = do_remount_sb(sb, flags, data, 0);
|
|
if (!err) {
|
|
if (!err) {
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
|
|
mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
|
|
mnt->mnt.mnt_flags = mnt_flags;
|
|
mnt->mnt.mnt_flags = mnt_flags;
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
- }
|
|
|
|
- up_write(&sb->s_umount);
|
|
|
|
- if (!err) {
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
}
|
|
}
|
|
|
|
+ up_write(&sb->s_umount);
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1972,7 +1997,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
|
|
struct mount *parent;
|
|
struct mount *parent;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
- mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL);
|
|
|
|
|
|
+ mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT);
|
|
|
|
|
|
mp = lock_mount(path);
|
|
mp = lock_mount(path);
|
|
if (IS_ERR(mp))
|
|
if (IS_ERR(mp))
|
|
@@ -2077,9 +2102,7 @@ fail:
|
|
/* remove m from any expiration list it may be on */
|
|
/* remove m from any expiration list it may be on */
|
|
if (!list_empty(&mnt->mnt_expire)) {
|
|
if (!list_empty(&mnt->mnt_expire)) {
|
|
namespace_lock();
|
|
namespace_lock();
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
list_del_init(&mnt->mnt_expire);
|
|
list_del_init(&mnt->mnt_expire);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
namespace_unlock();
|
|
namespace_unlock();
|
|
}
|
|
}
|
|
mntput(m);
|
|
mntput(m);
|
|
@@ -2095,11 +2118,9 @@ fail:
|
|
void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
|
|
void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
|
|
{
|
|
{
|
|
namespace_lock();
|
|
namespace_lock();
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
|
|
list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
|
|
list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
|
|
|
|
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
namespace_unlock();
|
|
namespace_unlock();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(mnt_set_expiry);
|
|
EXPORT_SYMBOL(mnt_set_expiry);
|
|
@@ -2118,7 +2139,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
|
|
return;
|
|
return;
|
|
|
|
|
|
namespace_lock();
|
|
namespace_lock();
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
|
|
|
|
/* extract from the expiration list every vfsmount that matches the
|
|
/* extract from the expiration list every vfsmount that matches the
|
|
* following criteria:
|
|
* following criteria:
|
|
@@ -2137,7 +2158,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
touch_mnt_namespace(mnt->mnt_ns);
|
|
umount_tree(mnt, 1);
|
|
umount_tree(mnt, 1);
|
|
}
|
|
}
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
namespace_unlock();
|
|
namespace_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2193,7 +2214,7 @@ resume:
|
|
* process a list of expirable mountpoints with the intent of discarding any
|
|
* process a list of expirable mountpoints with the intent of discarding any
|
|
* submounts of a specific parent mountpoint
|
|
* submounts of a specific parent mountpoint
|
|
*
|
|
*
|
|
- * vfsmount_lock must be held for write
|
|
|
|
|
|
+ * mount_lock must be held for write
|
|
*/
|
|
*/
|
|
static void shrink_submounts(struct mount *mnt)
|
|
static void shrink_submounts(struct mount *mnt)
|
|
{
|
|
{
|
|
@@ -2414,20 +2435,25 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
|
|
return new_ns;
|
|
return new_ns;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * Allocate a new namespace structure and populate it with contents
|
|
|
|
- * copied from the namespace of the passed in task structure.
|
|
|
|
- */
|
|
|
|
-static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
|
|
|
- struct user_namespace *user_ns, struct fs_struct *fs)
|
|
|
|
|
|
+struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
|
|
|
|
+ struct user_namespace *user_ns, struct fs_struct *new_fs)
|
|
{
|
|
{
|
|
struct mnt_namespace *new_ns;
|
|
struct mnt_namespace *new_ns;
|
|
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
|
|
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
|
|
struct mount *p, *q;
|
|
struct mount *p, *q;
|
|
- struct mount *old = mnt_ns->root;
|
|
|
|
|
|
+ struct mount *old;
|
|
struct mount *new;
|
|
struct mount *new;
|
|
int copy_flags;
|
|
int copy_flags;
|
|
|
|
|
|
|
|
+ BUG_ON(!ns);
|
|
|
|
+
|
|
|
|
+ if (likely(!(flags & CLONE_NEWNS))) {
|
|
|
|
+ get_mnt_ns(ns);
|
|
|
|
+ return ns;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ old = ns->root;
|
|
|
|
+
|
|
new_ns = alloc_mnt_ns(user_ns);
|
|
new_ns = alloc_mnt_ns(user_ns);
|
|
if (IS_ERR(new_ns))
|
|
if (IS_ERR(new_ns))
|
|
return new_ns;
|
|
return new_ns;
|
|
@@ -2435,7 +2461,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
|
namespace_lock();
|
|
namespace_lock();
|
|
/* First pass: copy the tree topology */
|
|
/* First pass: copy the tree topology */
|
|
copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
|
|
copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
|
|
- if (user_ns != mnt_ns->user_ns)
|
|
|
|
|
|
+ if (user_ns != ns->user_ns)
|
|
copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
|
|
copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
|
|
new = copy_tree(old, old->mnt.mnt_root, copy_flags);
|
|
new = copy_tree(old, old->mnt.mnt_root, copy_flags);
|
|
if (IS_ERR(new)) {
|
|
if (IS_ERR(new)) {
|
|
@@ -2444,9 +2470,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
|
return ERR_CAST(new);
|
|
return ERR_CAST(new);
|
|
}
|
|
}
|
|
new_ns->root = new;
|
|
new_ns->root = new;
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
list_add_tail(&new_ns->list, &new->mnt_list);
|
|
list_add_tail(&new_ns->list, &new->mnt_list);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
|
|
* Second pass: switch the tsk->fs->* elements and mark new vfsmounts
|
|
@@ -2457,13 +2481,13 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
|
q = new;
|
|
q = new;
|
|
while (p) {
|
|
while (p) {
|
|
q->mnt_ns = new_ns;
|
|
q->mnt_ns = new_ns;
|
|
- if (fs) {
|
|
|
|
- if (&p->mnt == fs->root.mnt) {
|
|
|
|
- fs->root.mnt = mntget(&q->mnt);
|
|
|
|
|
|
+ if (new_fs) {
|
|
|
|
+ if (&p->mnt == new_fs->root.mnt) {
|
|
|
|
+ new_fs->root.mnt = mntget(&q->mnt);
|
|
rootmnt = &p->mnt;
|
|
rootmnt = &p->mnt;
|
|
}
|
|
}
|
|
- if (&p->mnt == fs->pwd.mnt) {
|
|
|
|
- fs->pwd.mnt = mntget(&q->mnt);
|
|
|
|
|
|
+ if (&p->mnt == new_fs->pwd.mnt) {
|
|
|
|
+ new_fs->pwd.mnt = mntget(&q->mnt);
|
|
pwdmnt = &p->mnt;
|
|
pwdmnt = &p->mnt;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2484,23 +2508,6 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
|
|
return new_ns;
|
|
return new_ns;
|
|
}
|
|
}
|
|
|
|
|
|
-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
|
|
|
|
- struct user_namespace *user_ns, struct fs_struct *new_fs)
|
|
|
|
-{
|
|
|
|
- struct mnt_namespace *new_ns;
|
|
|
|
-
|
|
|
|
- BUG_ON(!ns);
|
|
|
|
- get_mnt_ns(ns);
|
|
|
|
-
|
|
|
|
- if (!(flags & CLONE_NEWNS))
|
|
|
|
- return ns;
|
|
|
|
-
|
|
|
|
- new_ns = dup_mnt_ns(ns, user_ns, new_fs);
|
|
|
|
-
|
|
|
|
- put_mnt_ns(ns);
|
|
|
|
- return new_ns;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* create_mnt_ns - creates a private namespace and adds a root filesystem
|
|
* create_mnt_ns - creates a private namespace and adds a root filesystem
|
|
* @mnt: pointer to the new root filesystem mountpoint
|
|
* @mnt: pointer to the new root filesystem mountpoint
|
|
@@ -2593,7 +2600,7 @@ out_type:
|
|
/*
|
|
/*
|
|
* Return true if path is reachable from root
|
|
* Return true if path is reachable from root
|
|
*
|
|
*
|
|
- * namespace_sem or vfsmount_lock is held
|
|
|
|
|
|
+ * namespace_sem or mount_lock is held
|
|
*/
|
|
*/
|
|
bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
|
|
bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
|
|
const struct path *root)
|
|
const struct path *root)
|
|
@@ -2608,9 +2615,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
|
|
int path_is_under(struct path *path1, struct path *path2)
|
|
int path_is_under(struct path *path1, struct path *path2)
|
|
{
|
|
{
|
|
int res;
|
|
int res;
|
|
- br_read_lock(&vfsmount_lock);
|
|
|
|
|
|
+ read_seqlock_excl(&mount_lock);
|
|
res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
|
|
res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
|
|
- br_read_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ read_sequnlock_excl(&mount_lock);
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(path_is_under);
|
|
EXPORT_SYMBOL(path_is_under);
|
|
@@ -2701,7 +2708,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
|
|
if (!is_path_reachable(old_mnt, old.dentry, &new))
|
|
if (!is_path_reachable(old_mnt, old.dentry, &new))
|
|
goto out4;
|
|
goto out4;
|
|
root_mp->m_count++; /* pin it so it won't go away */
|
|
root_mp->m_count++; /* pin it so it won't go away */
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
|
|
+ lock_mount_hash();
|
|
detach_mnt(new_mnt, &parent_path);
|
|
detach_mnt(new_mnt, &parent_path);
|
|
detach_mnt(root_mnt, &root_parent);
|
|
detach_mnt(root_mnt, &root_parent);
|
|
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
|
|
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
|
|
@@ -2713,7 +2720,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
|
|
/* mount new_root on / */
|
|
/* mount new_root on / */
|
|
attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
|
|
attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
|
|
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
|
touch_mnt_namespace(current->nsproxy->mnt_ns);
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ unlock_mount_hash();
|
|
chroot_fs_refs(&root, &new);
|
|
chroot_fs_refs(&root, &new);
|
|
put_mountpoint(root_mp);
|
|
put_mountpoint(root_mp);
|
|
error = 0;
|
|
error = 0;
|
|
@@ -2767,8 +2774,6 @@ void __init mnt_init(void)
|
|
unsigned u;
|
|
unsigned u;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
- init_rwsem(&namespace_sem);
|
|
|
|
-
|
|
|
|
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
|
|
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
|
|
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
|
|
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
|
|
|
|
|
|
@@ -2785,8 +2790,6 @@ void __init mnt_init(void)
|
|
for (u = 0; u < HASH_SIZE; u++)
|
|
for (u = 0; u < HASH_SIZE; u++)
|
|
INIT_LIST_HEAD(&mountpoint_hashtable[u]);
|
|
INIT_LIST_HEAD(&mountpoint_hashtable[u]);
|
|
|
|
|
|
- br_lock_init(&vfsmount_lock);
|
|
|
|
-
|
|
|
|
err = sysfs_init();
|
|
err = sysfs_init();
|
|
if (err)
|
|
if (err)
|
|
printk(KERN_WARNING "%s: sysfs_init error: %d\n",
|
|
printk(KERN_WARNING "%s: sysfs_init error: %d\n",
|
|
@@ -2802,11 +2805,7 @@ void put_mnt_ns(struct mnt_namespace *ns)
|
|
{
|
|
{
|
|
if (!atomic_dec_and_test(&ns->count))
|
|
if (!atomic_dec_and_test(&ns->count))
|
|
return;
|
|
return;
|
|
- namespace_lock();
|
|
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
- umount_tree(ns->root, 0);
|
|
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
- namespace_unlock();
|
|
|
|
|
|
+ drop_collected_mounts(&ns->root->mnt);
|
|
free_mnt_ns(ns);
|
|
free_mnt_ns(ns);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2829,9 +2828,8 @@ void kern_unmount(struct vfsmount *mnt)
|
|
{
|
|
{
|
|
/* release long term mount so mount point can be released */
|
|
/* release long term mount so mount point can be released */
|
|
if (!IS_ERR_OR_NULL(mnt)) {
|
|
if (!IS_ERR_OR_NULL(mnt)) {
|
|
- br_write_lock(&vfsmount_lock);
|
|
|
|
real_mount(mnt)->mnt_ns = NULL;
|
|
real_mount(mnt)->mnt_ns = NULL;
|
|
- br_write_unlock(&vfsmount_lock);
|
|
|
|
|
|
+ synchronize_rcu(); /* yecchhh... */
|
|
mntput(mnt);
|
|
mntput(mnt);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2875,7 +2873,7 @@ bool fs_fully_visible(struct file_system_type *type)
|
|
if (unlikely(!ns))
|
|
if (unlikely(!ns))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
- namespace_lock();
|
|
|
|
|
|
+ down_read(&namespace_sem);
|
|
list_for_each_entry(mnt, &ns->list, mnt_list) {
|
|
list_for_each_entry(mnt, &ns->list, mnt_list) {
|
|
struct mount *child;
|
|
struct mount *child;
|
|
if (mnt->mnt.mnt_sb->s_type != type)
|
|
if (mnt->mnt.mnt_sb->s_type != type)
|
|
@@ -2896,7 +2894,7 @@ bool fs_fully_visible(struct file_system_type *type)
|
|
next: ;
|
|
next: ;
|
|
}
|
|
}
|
|
found:
|
|
found:
|
|
- namespace_unlock();
|
|
|
|
|
|
+ up_read(&namespace_sem);
|
|
return visible;
|
|
return visible;
|
|
}
|
|
}
|
|
|
|
|