|
@@ -3483,7 +3483,6 @@ static void memcg_create_cache_work_func(struct work_struct *w)
|
|
|
|
|
|
/*
|
|
|
* Enqueue the creation of a per-memcg kmem_cache.
|
|
|
- * Called with rcu_read_lock.
|
|
|
*/
|
|
|
static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
|
|
|
struct kmem_cache *cachep)
|
|
@@ -3491,12 +3490,8 @@ static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
|
|
|
struct create_work *cw;
|
|
|
|
|
|
cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
|
|
|
- if (cw == NULL)
|
|
|
- return;
|
|
|
-
|
|
|
- /* The corresponding put will be done in the workqueue. */
|
|
|
- if (!css_tryget(&memcg->css)) {
|
|
|
- kfree(cw);
|
|
|
+ if (cw == NULL) {
|
|
|
+ css_put(&memcg->css);
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -3552,10 +3547,9 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
|
|
|
|
|
|
rcu_read_lock();
|
|
|
memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
|
|
|
- rcu_read_unlock();
|
|
|
|
|
|
if (!memcg_can_account_kmem(memcg))
|
|
|
- return cachep;
|
|
|
+ goto out;
|
|
|
|
|
|
idx = memcg_cache_id(memcg);
|
|
|
|
|
@@ -3564,29 +3558,38 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
|
|
|
* code updating memcg_caches will issue a write barrier to match this.
|
|
|
*/
|
|
|
read_barrier_depends();
|
|
|
- if (unlikely(cachep->memcg_params->memcg_caches[idx] == NULL)) {
|
|
|
- /*
|
|
|
- * If we are in a safe context (can wait, and not in interrupt
|
|
|
- * context), we could be be predictable and return right away.
|
|
|
- * This would guarantee that the allocation being performed
|
|
|
- * already belongs in the new cache.
|
|
|
- *
|
|
|
- * However, there are some clashes that can arrive from locking.
|
|
|
- * For instance, because we acquire the slab_mutex while doing
|
|
|
- * kmem_cache_dup, this means no further allocation could happen
|
|
|
- * with the slab_mutex held.
|
|
|
- *
|
|
|
- * Also, because cache creation issue get_online_cpus(), this
|
|
|
- * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
|
|
|
- * that ends up reversed during cpu hotplug. (cpuset allocates
|
|
|
- * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
|
|
|
- * better to defer everything.
|
|
|
- */
|
|
|
- memcg_create_cache_enqueue(memcg, cachep);
|
|
|
- return cachep;
|
|
|
+ if (likely(cachep->memcg_params->memcg_caches[idx])) {
|
|
|
+ cachep = cachep->memcg_params->memcg_caches[idx];
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
- return cachep->memcg_params->memcg_caches[idx];
|
|
|
+ /* The corresponding put will be done in the workqueue. */
|
|
|
+ if (!css_tryget(&memcg->css))
|
|
|
+ goto out;
|
|
|
+ rcu_read_unlock();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we are in a safe context (can wait, and not in interrupt
|
|
|
+ * context), we could be be predictable and return right away.
|
|
|
+ * This would guarantee that the allocation being performed
|
|
|
+ * already belongs in the new cache.
|
|
|
+ *
|
|
|
+ * However, there are some clashes that can arrive from locking.
|
|
|
+ * For instance, because we acquire the slab_mutex while doing
|
|
|
+ * kmem_cache_dup, this means no further allocation could happen
|
|
|
+ * with the slab_mutex held.
|
|
|
+ *
|
|
|
+ * Also, because cache creation issue get_online_cpus(), this
|
|
|
+ * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
|
|
|
+ * that ends up reversed during cpu hotplug. (cpuset allocates
|
|
|
+ * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
|
|
|
+ * better to defer everything.
|
|
|
+ */
|
|
|
+ memcg_create_cache_enqueue(memcg, cachep);
|
|
|
+ return cachep;
|
|
|
+out:
|
|
|
+ rcu_read_unlock();
|
|
|
+ return cachep;
|
|
|
}
|
|
|
EXPORT_SYMBOL(__memcg_kmem_get_cache);
|
|
|
|