|
@@ -2779,6 +2779,19 @@ static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
|
|
|
(memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * This is a bit cumbersome, but it is rarely used and avoids a backpointer
|
|
|
+ * in the memcg_cache_params struct.
|
|
|
+ */
|
|
|
+static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
|
|
|
+{
|
|
|
+ struct kmem_cache *cachep;
|
|
|
+
|
|
|
+ VM_BUG_ON(p->is_root_cache);
|
|
|
+ cachep = p->root_cache;
|
|
|
+ return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
|
|
|
+}
|
|
|
+
|
|
|
static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
|
|
|
{
|
|
|
struct res_counter *fail_res;
|
|
@@ -3056,6 +3069,31 @@ static inline void memcg_resume_kmem_account(void)
|
|
|
current->memcg_kmem_skip_account--;
|
|
|
}
|
|
|
|
|
|
+static void kmem_cache_destroy_work_func(struct work_struct *w)
|
|
|
+{
|
|
|
+ struct kmem_cache *cachep;
|
|
|
+ struct memcg_cache_params *p;
|
|
|
+
|
|
|
+ p = container_of(w, struct memcg_cache_params, destroy);
|
|
|
+
|
|
|
+ cachep = memcg_params_to_cache(p);
|
|
|
+
|
|
|
+ if (!atomic_read(&cachep->memcg_params->nr_pages))
|
|
|
+ kmem_cache_destroy(cachep);
|
|
|
+}
|
|
|
+
|
|
|
+void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
|
|
|
+{
|
|
|
+ if (!cachep->memcg_params->dead)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We have to defer the actual destroying to a workqueue, because
|
|
|
+ * we might currently be in a context that cannot sleep.
|
|
|
+ */
|
|
|
+ schedule_work(&cachep->memcg_params->destroy);
|
|
|
+}
|
|
|
+
|
|
|
static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
|
|
|
{
|
|
|
char *name;
|
|
@@ -3125,6 +3163,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
|
|
|
|
|
mem_cgroup_get(memcg);
|
|
|
new_cachep->memcg_params->root_cache = cachep;
|
|
|
+ atomic_set(&new_cachep->memcg_params->nr_pages , 0);
|
|
|
|
|
|
cachep->memcg_params->memcg_caches[idx] = new_cachep;
|
|
|
/*
|
|
@@ -3143,6 +3182,25 @@ struct create_work {
|
|
|
struct work_struct work;
|
|
|
};
|
|
|
|
|
|
+static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
|
|
|
+{
|
|
|
+ struct kmem_cache *cachep;
|
|
|
+ struct memcg_cache_params *params;
|
|
|
+
|
|
|
+ if (!memcg_kmem_is_active(memcg))
|
|
|
+ return;
|
|
|
+
|
|
|
+ mutex_lock(&memcg->slab_caches_mutex);
|
|
|
+ list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
|
|
|
+ cachep = memcg_params_to_cache(params);
|
|
|
+ cachep->memcg_params->dead = true;
|
|
|
+ INIT_WORK(&cachep->memcg_params->destroy,
|
|
|
+ kmem_cache_destroy_work_func);
|
|
|
+ schedule_work(&cachep->memcg_params->destroy);
|
|
|
+ }
|
|
|
+ mutex_unlock(&memcg->slab_caches_mutex);
|
|
|
+}
|
|
|
+
|
|
|
static void memcg_create_cache_work_func(struct work_struct *w)
|
|
|
{
|
|
|
struct create_work *cw;
|
|
@@ -3358,6 +3416,10 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
|
|
|
VM_BUG_ON(mem_cgroup_is_root(memcg));
|
|
|
memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
|
|
|
}
|
|
|
+#else
|
|
|
+static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
|
|
|
+{
|
|
|
+}
|
|
|
#endif /* CONFIG_MEMCG_KMEM */
|
|
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
@@ -5975,6 +6037,7 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
|
|
|
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
|
|
|
|
|
|
mem_cgroup_reparent_charges(memcg);
|
|
|
+ mem_cgroup_destroy_all_caches(memcg);
|
|
|
}
|
|
|
|
|
|
static void mem_cgroup_css_free(struct cgroup *cont)
|