|
@@ -4277,6 +4277,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
|
|
|
DEFINE_WAIT(wait);
|
|
|
struct cgroup_event *event, *tmp;
|
|
|
struct cgroup_subsys *ss;
|
|
|
+ LIST_HEAD(tmp_list);
|
|
|
|
|
|
lockdep_assert_held(&d->d_inode->i_mutex);
|
|
|
lockdep_assert_held(&cgroup_mutex);
|
|
@@ -4331,16 +4332,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
|
|
|
/*
|
|
|
* Unregister events and notify userspace.
|
|
|
* Notify userspace about cgroup removing only after rmdir of cgroup
|
|
|
- * directory to avoid race between userspace and kernelspace
|
|
|
+ * directory to avoid race between userspace and kernelspace. Use
|
|
|
+ * a temporary list to avoid a deadlock with cgroup_event_wake(). Since
|
|
|
+ * cgroup_event_wake() is called with the wait queue head locked,
|
|
|
+ * remove_wait_queue() cannot be called while holding event_list_lock.
|
|
|
*/
|
|
|
spin_lock(&cgrp->event_list_lock);
|
|
|
- list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
|
|
|
+ list_splice_init(&cgrp->event_list, &tmp_list);
|
|
|
+ spin_unlock(&cgrp->event_list_lock);
|
|
|
+ list_for_each_entry_safe(event, tmp, &tmp_list, list) {
|
|
|
list_del(&event->list);
|
|
|
remove_wait_queue(event->wqh, &event->wait);
|
|
|
eventfd_signal(event->eventfd, 1);
|
|
|
schedule_work(&event->remove);
|
|
|
}
|
|
|
- spin_unlock(&cgrp->event_list_lock);
|
|
|
|
|
|
return 0;
|
|
|
}
|