|
@@ -795,28 +795,15 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
|
|
|
static int cgroup_call_pre_destroy(struct cgroup *cgrp)
|
|
|
{
|
|
|
struct cgroup_subsys *ss;
|
|
|
- struct cgroup_event *event, *tmp;
|
|
|
int ret = 0;
|
|
|
|
|
|
for_each_subsys(cgrp->root, ss)
|
|
|
if (ss->pre_destroy) {
|
|
|
ret = ss->pre_destroy(ss, cgrp);
|
|
|
if (ret)
|
|
|
- goto out;
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Unregister events and notify userspace.
|
|
|
- */
|
|
|
- spin_lock(&cgrp->event_list_lock);
|
|
|
- list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
|
|
|
- list_del(&event->list);
|
|
|
- eventfd_signal(event->eventfd, 1);
|
|
|
- schedule_work(&event->remove);
|
|
|
- }
|
|
|
- spin_unlock(&cgrp->event_list_lock);
|
|
|
-
|
|
|
-out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -3006,7 +2993,6 @@ static void cgroup_event_remove(struct work_struct *work)
|
|
|
event->cft->unregister_event(cgrp, event->cft, event->eventfd);
|
|
|
|
|
|
eventfd_ctx_put(event->eventfd);
|
|
|
- remove_wait_queue(event->wqh, &event->wait);
|
|
|
kfree(event);
|
|
|
}
|
|
|
|
|
@@ -3024,6 +3010,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
|
|
|
unsigned long flags = (unsigned long)key;
|
|
|
|
|
|
if (flags & POLLHUP) {
|
|
|
+ remove_wait_queue_locked(event->wqh, &event->wait);
|
|
|
spin_lock(&cgrp->event_list_lock);
|
|
|
list_del(&event->list);
|
|
|
spin_unlock(&cgrp->event_list_lock);
|
|
@@ -3472,6 +3459,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
|
|
|
struct dentry *d;
|
|
|
struct cgroup *parent;
|
|
|
DEFINE_WAIT(wait);
|
|
|
+ struct cgroup_event *event, *tmp;
|
|
|
int ret;
|
|
|
|
|
|
/* the vfs holds both inode->i_mutex already */
|
|
@@ -3555,6 +3543,20 @@ again:
|
|
|
set_bit(CGRP_RELEASABLE, &parent->flags);
|
|
|
check_for_release(parent);
|
|
|
|
|
|
+ /*
|
|
|
+ * Unregister events and notify userspace.
|
|
|
+ * Notify userspace about cgroup removing only after rmdir of cgroup
|
|
|
+ * directory to avoid race between userspace and kernelspace
|
|
|
+ */
|
|
|
+ spin_lock(&cgrp->event_list_lock);
|
|
|
+ list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
|
|
|
+ list_del(&event->list);
|
|
|
+ remove_wait_queue(event->wqh, &event->wait);
|
|
|
+ eventfd_signal(event->eventfd, 1);
|
|
|
+ schedule_work(&event->remove);
|
|
|
+ }
|
|
|
+ spin_unlock(&cgrp->event_list_lock);
|
|
|
+
|
|
|
mutex_unlock(&cgroup_mutex);
|
|
|
return 0;
|
|
|
}
|