|
@@ -55,6 +55,8 @@
|
|
#include <asm/idle.h>
|
|
#include <asm/idle.h>
|
|
#include <asm/syscalls.h>
|
|
#include <asm/syscalls.h>
|
|
#include <asm/ds.h>
|
|
#include <asm/ds.h>
|
|
|
|
+#include <asm/debugreg.h>
|
|
|
|
+#include <asm/hw_breakpoint.h>
|
|
|
|
|
|
asmlinkage extern void ret_from_fork(void);
|
|
asmlinkage extern void ret_from_fork(void);
|
|
|
|
|
|
@@ -248,6 +250,8 @@ void release_thread(struct task_struct *dead_task)
|
|
BUG();
|
|
BUG();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ if (unlikely(dead_task->thread.debugreg7))
|
|
|
|
+ flush_thread_hw_breakpoint(dead_task);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
|
|
static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
|
|
@@ -303,12 +307,18 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|
|
|
|
|
p->thread.fs = me->thread.fs;
|
|
p->thread.fs = me->thread.fs;
|
|
p->thread.gs = me->thread.gs;
|
|
p->thread.gs = me->thread.gs;
|
|
|
|
+ p->thread.io_bitmap_ptr = NULL;
|
|
|
|
|
|
savesegment(gs, p->thread.gsindex);
|
|
savesegment(gs, p->thread.gsindex);
|
|
savesegment(fs, p->thread.fsindex);
|
|
savesegment(fs, p->thread.fsindex);
|
|
savesegment(es, p->thread.es);
|
|
savesegment(es, p->thread.es);
|
|
savesegment(ds, p->thread.ds);
|
|
savesegment(ds, p->thread.ds);
|
|
|
|
|
|
|
|
+ err = -ENOMEM;
|
|
|
|
+ if (unlikely(test_tsk_thread_flag(me, TIF_DEBUG)))
|
|
|
|
+ if (copy_thread_hw_breakpoint(me, p, clone_flags))
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
|
|
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
|
|
p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
|
|
p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
|
|
if (!p->thread.io_bitmap_ptr) {
|
|
if (!p->thread.io_bitmap_ptr) {
|
|
@@ -347,6 +357,9 @@ out:
|
|
kfree(p->thread.io_bitmap_ptr);
|
|
kfree(p->thread.io_bitmap_ptr);
|
|
p->thread.io_bitmap_max = 0;
|
|
p->thread.io_bitmap_max = 0;
|
|
}
|
|
}
|
|
|
|
+ if (err)
|
|
|
|
+ flush_thread_hw_breakpoint(p);
|
|
|
|
+
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -492,6 +505,24 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
*/
|
|
*/
|
|
if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
|
|
if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
|
|
math_state_restore();
|
|
math_state_restore();
|
|
|
|
+ /*
|
|
|
|
+ * There's a problem with moving the arch_install_thread_hw_breakpoint()
|
|
|
|
+ * call before current is updated. Suppose a kernel breakpoint is
|
|
|
|
+ * triggered in between the two, the hw-breakpoint handler will see that
|
|
|
|
+ * the 'current' task does not have TIF_DEBUG flag set and will think it
|
|
|
|
+ * is leftover from an old task (lazy switching) and will erase it. Then
|
|
|
|
+ * until the next context switch, no user-breakpoints will be installed.
|
|
|
|
+ *
|
|
|
|
+ * The real problem is that it's impossible to update both current and
|
|
|
|
+ * physical debug registers at the same instant, so there will always be
|
|
|
|
+ * a window in which they disagree and a breakpoint might get triggered.
|
|
|
|
+ * Since we use lazy switching, we are forced to assume that a
|
|
|
|
+ * disagreement means that current is correct and the exception is due
|
|
|
|
+ * to lazy debug register switching.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG)))
|
|
|
|
+ arch_install_thread_hw_breakpoint(next_p);
|
|
|
|
+
|
|
return prev_p;
|
|
return prev_p;
|
|
}
|
|
}
|
|
|
|
|