|
@@ -2154,19 +2154,14 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Tries to drain stocked charges in other cpus. This function is asynchronous
|
|
|
- * and just put a work per cpu for draining localy on each cpu. Caller can
|
|
|
- * expects some charges will be back to res_counter later but cannot wait for
|
|
|
- * it.
|
|
|
+ * Drains all per-CPU charge caches for given root_mem resp. subtree
|
|
|
+ * of the hierarchy under it. sync flag says whether we should block
|
|
|
+ * until the work is done.
|
|
|
*/
|
|
|
-static void drain_all_stock_async(struct mem_cgroup *root_mem)
|
|
|
+static void drain_all_stock(struct mem_cgroup *root_mem, bool sync)
|
|
|
{
|
|
|
int cpu, curcpu;
|
|
|
- /*
|
|
|
- * If someone calls draining, avoid adding more kworker runs.
|
|
|
- */
|
|
|
- if (!mutex_trylock(&percpu_charge_mutex))
|
|
|
- return;
|
|
|
+
|
|
|
/* Notify other cpus that system-wide "drain" is running */
|
|
|
get_online_cpus();
|
|
|
/*
|
|
@@ -2197,17 +2192,42 @@ static void drain_all_stock_async(struct mem_cgroup *root_mem)
|
|
|
schedule_work_on(cpu, &stock->work);
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ if (!sync)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ for_each_online_cpu(cpu) {
|
|
|
+ struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
|
|
|
+ if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
|
|
|
+ flush_work(&stock->work);
|
|
|
+ }
|
|
|
+out:
|
|
|
put_online_cpus();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Tries to drain stocked charges in other cpus. This function is asynchronous
|
|
|
+ * and just put a work per cpu for draining localy on each cpu. Caller can
|
|
|
+ * expects some charges will be back to res_counter later but cannot wait for
|
|
|
+ * it.
|
|
|
+ */
|
|
|
+static void drain_all_stock_async(struct mem_cgroup *root_mem)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * If someone calls draining, avoid adding more kworker runs.
|
|
|
+ */
|
|
|
+ if (!mutex_trylock(&percpu_charge_mutex))
|
|
|
+ return;
|
|
|
+ drain_all_stock(root_mem, false);
|
|
|
mutex_unlock(&percpu_charge_mutex);
|
|
|
- /* We don't wait for flush_work */
|
|
|
}
|
|
|
|
|
|
/* This is a synchronous drain interface. */
|
|
|
-static void drain_all_stock_sync(void)
|
|
|
+static void drain_all_stock_sync(struct mem_cgroup *root_mem)
|
|
|
{
|
|
|
/* called when force_empty is called */
|
|
|
mutex_lock(&percpu_charge_mutex);
|
|
|
- schedule_on_each_cpu(drain_local_stock);
|
|
|
+ drain_all_stock(root_mem, true);
|
|
|
mutex_unlock(&percpu_charge_mutex);
|
|
|
}
|
|
|
|
|
@@ -3856,7 +3876,7 @@ move_account:
|
|
|
goto out;
|
|
|
/* This is for making all *used* pages to be on LRU. */
|
|
|
lru_add_drain_all();
|
|
|
- drain_all_stock_sync();
|
|
|
+ drain_all_stock_sync(mem);
|
|
|
ret = 0;
|
|
|
mem_cgroup_start_move(mem);
|
|
|
for_each_node_state(node, N_HIGH_MEMORY) {
|