|
@@ -1925,19 +1925,18 @@ again:
|
|
|
|
|
|
rcu_read_lock();
|
|
|
p = rcu_dereference(mm->owner);
|
|
|
- VM_BUG_ON(!p);
|
|
|
/*
|
|
|
- * because we don't have task_lock(), "p" can exit while
|
|
|
- * we're here. In that case, "mem" can point to root
|
|
|
- * cgroup but never be NULL. (and task_struct itself is freed
|
|
|
- * by RCU, cgroup itself is RCU safe.) Then, we have small
|
|
|
- * risk here to get wrong cgroup. But such kind of mis-account
|
|
|
- * by race always happens because we don't have cgroup_mutex().
|
|
|
- * It's overkill and we allow that small race, here.
|
|
|
+ * Because we don't have task_lock(), "p" can exit.
|
|
|
+ * In that case, "mem" can point to root or p can be NULL with
|
|
|
+ * race with swapoff. Then, we have small risk of mis-accouning.
|
|
|
+ * But such kind of mis-account by race always happens because
|
|
|
+ * we don't have cgroup_mutex(). It's overkill and we allo that
|
|
|
+ * small race, here.
|
|
|
+ * (*) swapoff at el will charge against mm-struct not against
|
|
|
+ * task-struct. So, mm->owner can be NULL.
|
|
|
*/
|
|
|
mem = mem_cgroup_from_task(p);
|
|
|
- VM_BUG_ON(!mem);
|
|
|
- if (mem_cgroup_is_root(mem)) {
|
|
|
+ if (!mem || mem_cgroup_is_root(mem)) {
|
|
|
rcu_read_unlock();
|
|
|
goto done;
|
|
|
}
|