ソースを参照

x86, ptrace, mm: fix double-free on race

Ptrace_detach() races with __ptrace_unlink() if the traced task is
reaped while detaching. This might cause a double-free of the BTS
buffer.

Change the ptrace_detach() path to only do the memory accounting in
ptrace_bts_detach() and leave the buffer free to ptrace_bts_untrace()
which will be called from __ptrace_unlink().

The fix follows a proposal from Oleg Nesterov.

Reported-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Markus Metzger 16 年 前
コミット
9f339e7028
3 ファイル変更17 行追加7 行削除
  1. 10 6
      arch/x86/kernel/ptrace.c
  2. 1 0
      include/linux/mm.h
  3. 6 1
      mm/mlock.c

+ 10 - 6
arch/x86/kernel/ptrace.c

@@ -810,12 +810,16 @@ static void ptrace_bts_untrace(struct task_struct *child)
 
 
 static void ptrace_bts_detach(struct task_struct *child)
 static void ptrace_bts_detach(struct task_struct *child)
 {
 {
-	if (unlikely(child->bts)) {
-		ds_release_bts(child->bts);
-		child->bts = NULL;
-
-		ptrace_bts_free_buffer(child);
-	}
+	/*
+	 * Ptrace_detach() races with ptrace_untrace() in case
+	 * the child dies and is reaped by another thread.
+	 *
+	 * We only do the memory accounting at this point and
+	 * leave the buffer deallocation and the bts tracer
+	 * release to ptrace_bts_untrace() which will be called
+	 * later on with tasklist_lock held.
+	 */
+	release_locked_buffer(child->bts_buffer, child->bts_size);
 }
 }
 #else
 #else
 static inline void ptrace_bts_fork(struct task_struct *tsk) {}
 static inline void ptrace_bts_fork(struct task_struct *tsk) {}

+ 1 - 0
include/linux/mm.h

@@ -1305,5 +1305,6 @@ void vmemmap_populate_print_last(void);
 
 
 extern void *alloc_locked_buffer(size_t size);
 extern void *alloc_locked_buffer(size_t size);
 extern void free_locked_buffer(void *buffer, size_t size);
 extern void free_locked_buffer(void *buffer, size_t size);
+extern void release_locked_buffer(void *buffer, size_t size);
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
 #endif /* _LINUX_MM_H */

+ 6 - 1
mm/mlock.c

@@ -657,7 +657,7 @@ void *alloc_locked_buffer(size_t size)
 	return buffer;
 	return buffer;
 }
 }
 
 
-void free_locked_buffer(void *buffer, size_t size)
+void release_locked_buffer(void *buffer, size_t size)
 {
 {
 	unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
 
 
@@ -667,6 +667,11 @@ void free_locked_buffer(void *buffer, size_t size)
 	current->mm->locked_vm -= pgsz;
 	current->mm->locked_vm -= pgsz;
 
 
 	up_write(&current->mm->mmap_sem);
 	up_write(&current->mm->mmap_sem);
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+	release_locked_buffer(buffer, size);
 
 
 	kfree(buffer);
 	kfree(buffer);
 }
 }