|
@@ -635,13 +635,13 @@ struct signal_struct {
|
|
|
#endif
|
|
|
#ifdef CONFIG_CGROUPS
|
|
|
/*
|
|
|
- * The threadgroup_fork_lock prevents threads from forking with
|
|
|
+ * The group_rwsem prevents threads from forking with
|
|
|
* CLONE_THREAD while held for writing. Use this for fork-sensitive
|
|
|
* threadgroup-wide operations. It's taken for reading in fork.c in
|
|
|
* copy_process().
|
|
|
* Currently only needed write-side by cgroups.
|
|
|
*/
|
|
|
- struct rw_semaphore threadgroup_fork_lock;
|
|
|
+ struct rw_semaphore group_rwsem;
|
|
|
#endif
|
|
|
|
|
|
int oom_adj; /* OOM kill score adjustment (bit shift) */
|
|
@@ -2371,29 +2371,29 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
|
|
|
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
|
|
|
}
|
|
|
|
|
|
-/* See the declaration of threadgroup_fork_lock in signal_struct. */
|
|
|
+/* See the declaration of group_rwsem in signal_struct. */
|
|
|
#ifdef CONFIG_CGROUPS
|
|
|
-static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
|
|
|
+static inline void threadgroup_change_begin(struct task_struct *tsk)
|
|
|
{
|
|
|
- down_read(&tsk->signal->threadgroup_fork_lock);
|
|
|
+ down_read(&tsk->signal->group_rwsem);
|
|
|
}
|
|
|
-static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
|
|
|
+static inline void threadgroup_change_end(struct task_struct *tsk)
|
|
|
{
|
|
|
- up_read(&tsk->signal->threadgroup_fork_lock);
|
|
|
+ up_read(&tsk->signal->group_rwsem);
|
|
|
}
|
|
|
-static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
|
|
|
+static inline void threadgroup_lock(struct task_struct *tsk)
|
|
|
{
|
|
|
- down_write(&tsk->signal->threadgroup_fork_lock);
|
|
|
+ down_write(&tsk->signal->group_rwsem);
|
|
|
}
|
|
|
-static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
|
|
|
+static inline void threadgroup_unlock(struct task_struct *tsk)
|
|
|
{
|
|
|
- up_write(&tsk->signal->threadgroup_fork_lock);
|
|
|
+ up_write(&tsk->signal->group_rwsem);
|
|
|
}
|
|
|
#else
|
|
|
-static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
|
|
|
-static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
|
|
|
-static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
|
|
|
-static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
|
|
|
+static inline void threadgroup_change_begin(struct task_struct *tsk) {}
|
|
|
+static inline void threadgroup_change_end(struct task_struct *tsk) {}
|
|
|
+static inline void threadgroup_lock(struct task_struct *tsk) {}
|
|
|
+static inline void threadgroup_unlock(struct task_struct *tsk) {}
|
|
|
#endif
|
|
|
|
|
|
#ifndef __HAVE_THREAD_FUNCTIONS
|