|
@@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Detach all tasks we were using ptrace on.
|
|
|
|
|
|
+ * Detach all tasks we were using ptrace on. Called with tasklist held
|
|
|
|
+ * for writing, and returns with it held too. But note it can release
|
|
|
|
+ * and reacquire the lock.
|
|
*/
|
|
*/
|
|
void exit_ptrace(struct task_struct *tracer)
|
|
void exit_ptrace(struct task_struct *tracer)
|
|
{
|
|
{
|
|
struct task_struct *p, *n;
|
|
struct task_struct *p, *n;
|
|
LIST_HEAD(ptrace_dead);
|
|
LIST_HEAD(ptrace_dead);
|
|
|
|
|
|
- write_lock_irq(&tasklist_lock);
|
|
|
|
|
|
+ if (likely(list_empty(&tracer->ptraced)))
|
|
|
|
+ return;
|
|
|
|
+
|
|
list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
|
|
list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
|
|
if (__ptrace_detach(tracer, p))
|
|
if (__ptrace_detach(tracer, p))
|
|
list_add(&p->ptrace_entry, &ptrace_dead);
|
|
list_add(&p->ptrace_entry, &ptrace_dead);
|
|
}
|
|
}
|
|
- write_unlock_irq(&tasklist_lock);
|
|
|
|
|
|
|
|
|
|
+ write_unlock_irq(&tasklist_lock);
|
|
BUG_ON(!list_empty(&tracer->ptraced));
|
|
BUG_ON(!list_empty(&tracer->ptraced));
|
|
|
|
|
|
list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
|
|
list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
|
|
list_del_init(&p->ptrace_entry);
|
|
list_del_init(&p->ptrace_entry);
|
|
release_task(p);
|
|
release_task(p);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ write_lock_irq(&tasklist_lock);
|
|
}
|
|
}
|
|
|
|
|
|
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
|
|
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
|