|
@@ -1725,7 +1725,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
|
|
|
/*
|
|
|
* Check OOM-Killer is already running under our hierarchy.
|
|
|
* If someone is running, return false.
|
|
|
- * Has to be called with memcg_oom_mutex
|
|
|
+ * Has to be called with memcg_oom_lock
|
|
|
*/
|
|
|
static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
|
|
|
{
|
|
@@ -1770,7 +1770,7 @@ done:
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Has to be called with memcg_oom_mutex
|
|
|
+ * Has to be called with memcg_oom_lock
|
|
|
*/
|
|
|
static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
|
|
|
{
|
|
@@ -1802,7 +1802,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
|
|
|
atomic_add_unless(&iter->under_oom, -1, 0);
|
|
|
}
|
|
|
|
|
|
-static DEFINE_MUTEX(memcg_oom_mutex);
|
|
|
+static DEFINE_SPINLOCK(memcg_oom_lock);
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
|
|
|
|
|
|
struct oom_wait_info {
|
|
@@ -1864,7 +1864,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
|
|
|
mem_cgroup_mark_under_oom(mem);
|
|
|
|
|
|
/* At first, try to OOM lock hierarchy under mem.*/
|
|
|
- mutex_lock(&memcg_oom_mutex);
|
|
|
+ spin_lock(&memcg_oom_lock);
|
|
|
locked = mem_cgroup_oom_lock(mem);
|
|
|
/*
|
|
|
* Even if signal_pending(), we can't quit charge() loop without
|
|
@@ -1876,7 +1876,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
|
|
|
need_to_kill = false;
|
|
|
if (locked)
|
|
|
mem_cgroup_oom_notify(mem);
|
|
|
- mutex_unlock(&memcg_oom_mutex);
|
|
|
+ spin_unlock(&memcg_oom_lock);
|
|
|
|
|
|
if (need_to_kill) {
|
|
|
finish_wait(&memcg_oom_waitq, &owait.wait);
|
|
@@ -1885,11 +1885,11 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
|
|
|
schedule();
|
|
|
finish_wait(&memcg_oom_waitq, &owait.wait);
|
|
|
}
|
|
|
- mutex_lock(&memcg_oom_mutex);
|
|
|
+ spin_lock(&memcg_oom_lock);
|
|
|
if (locked)
|
|
|
mem_cgroup_oom_unlock(mem);
|
|
|
memcg_wakeup_oom(mem);
|
|
|
- mutex_unlock(&memcg_oom_mutex);
|
|
|
+ spin_unlock(&memcg_oom_lock);
|
|
|
|
|
|
mem_cgroup_unmark_under_oom(mem);
|
|
|
|
|
@@ -4553,7 +4553,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
|
|
|
if (!event)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- mutex_lock(&memcg_oom_mutex);
|
|
|
+ spin_lock(&memcg_oom_lock);
|
|
|
|
|
|
event->eventfd = eventfd;
|
|
|
list_add(&event->list, &memcg->oom_notify);
|
|
@@ -4561,7 +4561,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
|
|
|
/* already in OOM ? */
|
|
|
if (atomic_read(&memcg->under_oom))
|
|
|
eventfd_signal(eventfd, 1);
|
|
|
- mutex_unlock(&memcg_oom_mutex);
|
|
|
+ spin_unlock(&memcg_oom_lock);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -4575,7 +4575,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
|
|
|
|
|
|
BUG_ON(type != _OOM_TYPE);
|
|
|
|
|
|
- mutex_lock(&memcg_oom_mutex);
|
|
|
+ spin_lock(&memcg_oom_lock);
|
|
|
|
|
|
list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
|
|
|
if (ev->eventfd == eventfd) {
|
|
@@ -4584,7 +4584,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&memcg_oom_mutex);
|
|
|
+ spin_unlock(&memcg_oom_lock);
|
|
|
}
|
|
|
|
|
|
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
|