|
@@ -27,6 +27,7 @@
|
|
|
#include <linux/mount.h>
|
|
|
#include <linux/ramfs.h>
|
|
|
#include <linux/log2.h>
|
|
|
+#include <linux/idr.h>
|
|
|
#include <asm/uaccess.h>
|
|
|
#include <asm/unistd.h>
|
|
|
#include "pnode.h"
|
|
@@ -39,6 +40,7 @@
|
|
|
__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
|
|
|
|
|
|
static int event;
|
|
|
+static DEFINE_IDA(mnt_id_ida);
|
|
|
|
|
|
static struct list_head *mount_hashtable __read_mostly;
|
|
|
static struct kmem_cache *mnt_cache __read_mostly;
|
|
@@ -58,10 +60,41 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
|
|
|
|
|
|
#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
|
|
|
|
|
|
+/* allocation is serialized by namespace_sem */
|
|
|
+static int mnt_alloc_id(struct vfsmount *mnt)
|
|
|
+{
|
|
|
+ int res;
|
|
|
+
|
|
|
+retry:
|
|
|
+ ida_pre_get(&mnt_id_ida, GFP_KERNEL);
|
|
|
+ spin_lock(&vfsmount_lock);
|
|
|
+ res = ida_get_new(&mnt_id_ida, &mnt->mnt_id);
|
|
|
+ spin_unlock(&vfsmount_lock);
|
|
|
+ if (res == -EAGAIN)
|
|
|
+ goto retry;
|
|
|
+
|
|
|
+ return res;
|
|
|
+}
|
|
|
+
|
|
|
+static void mnt_free_id(struct vfsmount *mnt)
|
|
|
+{
|
|
|
+ spin_lock(&vfsmount_lock);
|
|
|
+ ida_remove(&mnt_id_ida, mnt->mnt_id);
|
|
|
+ spin_unlock(&vfsmount_lock);
|
|
|
+}
|
|
|
+
|
|
|
struct vfsmount *alloc_vfsmnt(const char *name)
|
|
|
{
|
|
|
struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
|
|
|
if (mnt) {
|
|
|
+ int err;
|
|
|
+
|
|
|
+ err = mnt_alloc_id(mnt);
|
|
|
+ if (err) {
|
|
|
+ kmem_cache_free(mnt_cache, mnt);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
atomic_set(&mnt->mnt_count, 1);
|
|
|
INIT_LIST_HEAD(&mnt->mnt_hash);
|
|
|
INIT_LIST_HEAD(&mnt->mnt_child);
|
|
@@ -353,6 +386,7 @@ EXPORT_SYMBOL(simple_set_mnt);
|
|
|
void free_vfsmnt(struct vfsmount *mnt)
|
|
|
{
|
|
|
kfree(mnt->mnt_devname);
|
|
|
+ mnt_free_id(mnt);
|
|
|
kmem_cache_free(mnt_cache, mnt);
|
|
|
}
|
|
|
|