|
@@ -3214,52 +3214,53 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
|
|
|
schedule_work(&cachep->memcg_params->destroy);
|
|
|
}
|
|
|
|
|
|
-static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
|
|
|
-{
|
|
|
- char *name;
|
|
|
- struct dentry *dentry;
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
- dentry = rcu_dereference(memcg->css.cgroup->dentry);
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
- BUG_ON(dentry == NULL);
|
|
|
-
|
|
|
- name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
|
|
|
- memcg_cache_id(memcg), dentry->d_name.name);
|
|
|
-
|
|
|
- return name;
|
|
|
-}
|
|
|
+/*
|
|
|
+ * This lock protects updaters, not readers. We want readers to be as fast as
|
|
|
+ * they can, and they will either see NULL or a valid cache value. Our model
|
|
|
+ * allow them to see NULL, in which case the root memcg will be selected.
|
|
|
+ *
|
|
|
+ * We need this lock because multiple allocations to the same cache from a non
|
|
|
+ * will span more than one worker. Only one of them can create the cache.
|
|
|
+ */
|
|
|
+static DEFINE_MUTEX(memcg_cache_mutex);
|
|
|
|
|
|
+/*
|
|
|
+ * Called with memcg_cache_mutex held
|
|
|
+ */
|
|
|
static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
|
|
|
struct kmem_cache *s)
|
|
|
{
|
|
|
- char *name;
|
|
|
struct kmem_cache *new;
|
|
|
+ static char *tmp_name = NULL;
|
|
|
|
|
|
- name = memcg_cache_name(memcg, s);
|
|
|
- if (!name)
|
|
|
- return NULL;
|
|
|
+ lockdep_assert_held(&memcg_cache_mutex);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * kmem_cache_create_memcg duplicates the given name and
|
|
|
+ * cgroup_name for this name requires RCU context.
|
|
|
+ * This static temporary buffer is used to prevent from
|
|
|
+ * pointless shortliving allocation.
|
|
|
+ */
|
|
|
+ if (!tmp_name) {
|
|
|
+ tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
|
|
|
+ if (!tmp_name)
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
|
|
|
+ memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
- new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
|
|
|
+ new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
|
|
|
(s->flags & ~SLAB_PANIC), s->ctor, s);
|
|
|
|
|
|
if (new)
|
|
|
new->allocflags |= __GFP_KMEMCG;
|
|
|
|
|
|
- kfree(name);
|
|
|
return new;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * This lock protects updaters, not readers. We want readers to be as fast as
|
|
|
- * they can, and they will either see NULL or a valid cache value. Our model
|
|
|
- * allow them to see NULL, in which case the root memcg will be selected.
|
|
|
- *
|
|
|
- * We need this lock because multiple allocations to the same cache from a non
|
|
|
- * will span more than one worker. Only one of them can create the cache.
|
|
|
- */
|
|
|
-static DEFINE_MUTEX(memcg_cache_mutex);
|
|
|
static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|
|
struct kmem_cache *cachep)
|
|
|
{
|