|
@@ -230,10 +230,30 @@ struct mem_cgroup {
|
|
|
* the counter to account for memory usage
|
|
|
*/
|
|
|
struct res_counter res;
|
|
|
- /*
|
|
|
- * the counter to account for mem+swap usage.
|
|
|
- */
|
|
|
- struct res_counter memsw;
|
|
|
+
|
|
|
+ union {
|
|
|
+ /*
|
|
|
+ * the counter to account for mem+swap usage.
|
|
|
+ */
|
|
|
+ struct res_counter memsw;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * rcu_freeing is used only when freeing struct mem_cgroup,
|
|
|
+ * so put it into a union to avoid wasting more memory.
|
|
|
+ * It must be disjoint from the css field. It could be
|
|
|
+ * in a union with the res field, but res plays a much
|
|
|
+ * larger part in mem_cgroup life than memsw, and might
|
|
|
+ * be of interest, even at time of free, when debugging.
|
|
|
+ * So share rcu_head with the less interesting memsw.
|
|
|
+ */
|
|
|
+ struct rcu_head rcu_freeing;
|
|
|
+ /*
|
|
|
+ * But when using vfree(), that cannot be done at
|
|
|
+ * interrupt time, so we must then queue the work.
|
|
|
+ */
|
|
|
+ struct work_struct work_freeing;
|
|
|
+ };
|
|
|
+
|
|
|
/*
|
|
|
* Per cgroup active and inactive list, similar to the
|
|
|
* per zone LRU lists.
|
|
@@ -4779,6 +4799,27 @@ out_free:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
|
|
|
+ * but in process context. The work_freeing structure is overlaid
|
|
|
+ * on the rcu_freeing structure, which itself is overlaid on memsw.
|
|
|
+ */
|
|
|
+static void vfree_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct mem_cgroup *memcg;
|
|
|
+
|
|
|
+ memcg = container_of(work, struct mem_cgroup, work_freeing);
|
|
|
+ vfree(memcg);
|
|
|
+}
|
|
|
+static void vfree_rcu(struct rcu_head *rcu_head)
|
|
|
+{
|
|
|
+ struct mem_cgroup *memcg;
|
|
|
+
|
|
|
+ memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
|
|
|
+ INIT_WORK(&memcg->work_freeing, vfree_work);
|
|
|
+ schedule_work(&memcg->work_freeing);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* At destroying mem_cgroup, references from swap_cgroup can remain.
|
|
|
* (scanning all at force_empty is too costly...)
|
|
@@ -4802,9 +4843,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
|
|
|
|
|
|
free_percpu(memcg->stat);
|
|
|
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
|
|
|
- kfree(memcg);
|
|
|
+ kfree_rcu(memcg, rcu_freeing);
|
|
|
else
|
|
|
- vfree(memcg);
|
|
|
+ call_rcu(&memcg->rcu_freeing, vfree_rcu);
|
|
|
}
|
|
|
|
|
|
static void mem_cgroup_get(struct mem_cgroup *memcg)
|