|
@@ -1293,14 +1293,56 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
|
|
|
static DEFINE_MUTEX(memcg_oom_mutex);
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
|
|
|
|
|
|
+struct oom_wait_info {
|
|
|
+ struct mem_cgroup *mem;
|
|
|
+ wait_queue_t wait;
|
|
|
+};
|
|
|
+
|
|
|
+static int memcg_oom_wake_function(wait_queue_t *wait,
|
|
|
+ unsigned mode, int sync, void *arg)
|
|
|
+{
|
|
|
+ struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
|
|
|
+ struct oom_wait_info *oom_wait_info;
|
|
|
+
|
|
|
+ oom_wait_info = container_of(wait, struct oom_wait_info, wait);
|
|
|
+
|
|
|
+ if (oom_wait_info->mem == wake_mem)
|
|
|
+ goto wakeup;
|
|
|
+ /* if no hierarchy, no match */
|
|
|
+ if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
|
|
|
+ return 0;
|
|
|
+ /*
|
|
|
+ * Both of oom_wait_info->mem and wake_mem are stable under us.
|
|
|
+ * Then we can use css_is_ancestor without taking care of RCU.
|
|
|
+ */
|
|
|
+ if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
|
|
|
+ !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+wakeup:
|
|
|
+ return autoremove_wake_function(wait, mode, sync, arg);
|
|
|
+}
|
|
|
+
|
|
|
+static void memcg_wakeup_oom(struct mem_cgroup *mem)
|
|
|
+{
|
|
|
+ /* for filtering, pass "mem" as argument. */
|
|
|
+ __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* try to call OOM killer. returns false if we should exit memory-reclaim loop.
|
|
|
*/
|
|
|
bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
|
|
|
{
|
|
|
- DEFINE_WAIT(wait);
|
|
|
+ struct oom_wait_info owait;
|
|
|
bool locked;
|
|
|
|
|
|
+ owait.mem = mem;
|
|
|
+ owait.wait.flags = 0;
|
|
|
+ owait.wait.func = memcg_oom_wake_function;
|
|
|
+ owait.wait.private = current;
|
|
|
+ INIT_LIST_HEAD(&owait.wait.task_list);
|
|
|
+
|
|
|
/* At first, try to OOM lock hierarchy under mem.*/
|
|
|
mutex_lock(&memcg_oom_mutex);
|
|
|
locked = mem_cgroup_oom_lock(mem);
|
|
@@ -1310,31 +1352,18 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
|
|
|
* under OOM is always welcomed, use TASK_KILLABLE here.
|
|
|
*/
|
|
|
if (!locked)
|
|
|
- prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE);
|
|
|
+ prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
|
|
|
mutex_unlock(&memcg_oom_mutex);
|
|
|
|
|
|
if (locked)
|
|
|
mem_cgroup_out_of_memory(mem, mask);
|
|
|
else {
|
|
|
schedule();
|
|
|
- finish_wait(&memcg_oom_waitq, &wait);
|
|
|
+ finish_wait(&memcg_oom_waitq, &owait.wait);
|
|
|
}
|
|
|
mutex_lock(&memcg_oom_mutex);
|
|
|
mem_cgroup_oom_unlock(mem);
|
|
|
- /*
|
|
|
- * Here, we use global waitq .....more fine grained waitq ?
|
|
|
- * Assume following hierarchy.
|
|
|
- * A/
|
|
|
- * 01
|
|
|
- * 02
|
|
|
- * assume OOM happens both in A and 01 at the same time. Tthey are
|
|
|
- * mutually exclusive by lock. (kill in 01 helps A.)
|
|
|
- * When we use per memcg waitq, we have to wake up waiters on A and 02
|
|
|
- * in addtion to waiters on 01. We use global waitq for avoiding mess.
|
|
|
- * It will not be a big problem.
|
|
|
- * (And a task may be moved to other groups while it's waiting for OOM.)
|
|
|
- */
|
|
|
- wake_up_all(&memcg_oom_waitq);
|
|
|
+ memcg_wakeup_oom(mem);
|
|
|
mutex_unlock(&memcg_oom_mutex);
|
|
|
|
|
|
if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
|