|
@@ -207,13 +207,15 @@ retry:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* Recover the probed instruction at addr for further analysis. */
|
|
|
-static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
|
|
|
+static unsigned long __recover_probed_insn(kprobe_opcode_t *buf,
|
|
|
+ unsigned long addr)
|
|
|
{
|
|
|
struct kprobe *kp;
|
|
|
+
|
|
|
kp = get_kprobe((void *)addr);
|
|
|
+ /* There is no probe, return original address */
|
|
|
if (!kp)
|
|
|
- return -EINVAL;
|
|
|
+ return addr;
|
|
|
|
|
|
/*
|
|
|
* Basically, kp->ainsn.insn has an original instruction.
|
|
@@ -230,14 +232,76 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
|
|
|
*/
|
|
|
memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
|
|
buf[0] = kp->opcode;
|
|
|
- return 0;
|
|
|
+ return (unsigned long)buf;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_OPTPROBES
|
|
|
+static unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+ struct kprobe *kp;
|
|
|
+ long offs;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
|
|
|
+ kp = get_kprobe((void *)addr - i);
|
|
|
+ /* This function only handles jump-optimized kprobe */
|
|
|
+ if (kp && kprobe_optimized(kp)) {
|
|
|
+ op = container_of(kp, struct optimized_kprobe, kp);
|
|
|
+ /* If op->list is not empty, op is under optimizing */
|
|
|
+ if (list_empty(&op->list))
|
|
|
+ goto found;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return addr;
|
|
|
+found:
|
|
|
+ /*
|
|
|
+ * If the kprobe can be optimized, original bytes which can be
|
|
|
+ * overwritten by jump destination address. In this case, original
|
|
|
+ * bytes must be recovered from op->optinsn.copied_insn buffer.
|
|
|
+ */
|
|
|
+ memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
|
|
+ if (addr == (unsigned long)kp->addr) {
|
|
|
+ buf[0] = kp->opcode;
|
|
|
+ memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
|
|
|
+ } else {
|
|
|
+ offs = addr - (unsigned long)kp->addr - 1;
|
|
|
+ memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
|
|
|
+ }
|
|
|
+
|
|
|
+ return (unsigned long)buf;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ return addr;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * Recover the probed instruction at addr for further analysis.
|
|
|
+ * Caller must lock kprobes by kprobe_mutex, or disable preemption
|
|
|
+ * for preventing to release referencing kprobes.
|
|
|
+ */
|
|
|
+static unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ unsigned long __addr;
|
|
|
+
|
|
|
+ __addr = __recover_optprobed_insn(buf, addr);
|
|
|
+ if (__addr != addr)
|
|
|
+ return __addr;
|
|
|
+
|
|
|
+ return __recover_probed_insn(buf, addr);
|
|
|
}
|
|
|
|
|
|
/* Check if paddr is at an instruction boundary */
|
|
|
static int __kprobes can_probe(unsigned long paddr)
|
|
|
{
|
|
|
- int ret;
|
|
|
- unsigned long addr, offset = 0;
|
|
|
+ unsigned long addr, __addr, offset = 0;
|
|
|
struct insn insn;
|
|
|
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
|
|
|
|
@@ -247,26 +311,24 @@ static int __kprobes can_probe(unsigned long paddr)
|
|
|
/* Decode instructions */
|
|
|
addr = paddr - offset;
|
|
|
while (addr < paddr) {
|
|
|
- kernel_insn_init(&insn, (void *)addr);
|
|
|
- insn_get_opcode(&insn);
|
|
|
-
|
|
|
/*
|
|
|
* Check if the instruction has been modified by another
|
|
|
* kprobe, in which case we replace the breakpoint by the
|
|
|
* original instruction in our buffer.
|
|
|
+ * Also, jump optimization will change the breakpoint to
|
|
|
+ * relative-jump. Since the relative-jump itself is
|
|
|
+ * normally used, we just go through if there is no kprobe.
|
|
|
*/
|
|
|
- if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
|
|
|
- ret = recover_probed_instruction(buf, addr);
|
|
|
- if (ret)
|
|
|
- /*
|
|
|
- * Another debugging subsystem might insert
|
|
|
- * this breakpoint. In that case, we can't
|
|
|
- * recover it.
|
|
|
- */
|
|
|
- return 0;
|
|
|
- kernel_insn_init(&insn, buf);
|
|
|
- }
|
|
|
+ __addr = recover_probed_instruction(buf, addr);
|
|
|
+ kernel_insn_init(&insn, (void *)__addr);
|
|
|
insn_get_length(&insn);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Another debugging subsystem might insert this breakpoint.
|
|
|
+ * In that case, we can't recover it.
|
|
|
+ */
|
|
|
+ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
|
|
+ return 0;
|
|
|
addr += insn.length;
|
|
|
}
|
|
|
|
|
@@ -302,21 +364,17 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
|
|
|
static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
|
|
|
{
|
|
|
struct insn insn;
|
|
|
- int ret;
|
|
|
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
|
|
+ u8 *orig_src = src; /* Back up original src for RIP calculation */
|
|
|
+
|
|
|
+ if (recover)
|
|
|
+ src = (u8 *)recover_probed_instruction(buf, (unsigned long)src);
|
|
|
|
|
|
kernel_insn_init(&insn, src);
|
|
|
- if (recover) {
|
|
|
- insn_get_opcode(&insn);
|
|
|
- if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
|
|
|
- ret = recover_probed_instruction(buf,
|
|
|
- (unsigned long)src);
|
|
|
- if (ret)
|
|
|
- return 0;
|
|
|
- kernel_insn_init(&insn, buf);
|
|
|
- }
|
|
|
- }
|
|
|
insn_get_length(&insn);
|
|
|
+ /* Another subsystem puts a breakpoint, failed to recover */
|
|
|
+ if (recover && insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
|
|
+ return 0;
|
|
|
memcpy(dest, insn.kaddr, insn.length);
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -337,8 +395,7 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
|
|
|
* extension of the original signed 32-bit displacement would
|
|
|
* have given.
|
|
|
*/
|
|
|
- newdisp = (u8 *) src + (s64) insn.displacement.value -
|
|
|
- (u8 *) dest;
|
|
|
+ newdisp = (u8 *) orig_src + (s64) insn.displacement.value - (u8 *) dest;
|
|
|
BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
|
|
|
disp = (u8 *) dest + insn_offset_displacement(&insn);
|
|
|
*(s32 *) disp = (s32) newdisp;
|
|
@@ -1271,8 +1328,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
|
|
|
/* Decode whole function to ensure any instructions don't jump into target */
|
|
|
static int __kprobes can_optimize(unsigned long paddr)
|
|
|
{
|
|
|
- int ret;
|
|
|
- unsigned long addr, size = 0, offset = 0;
|
|
|
+ unsigned long addr, __addr, size = 0, offset = 0;
|
|
|
struct insn insn;
|
|
|
kprobe_opcode_t buf[MAX_INSN_SIZE];
|
|
|
|
|
@@ -1301,15 +1357,12 @@ static int __kprobes can_optimize(unsigned long paddr)
|
|
|
* we can't optimize kprobe in this function.
|
|
|
*/
|
|
|
return 0;
|
|
|
- kernel_insn_init(&insn, (void *)addr);
|
|
|
- insn_get_opcode(&insn);
|
|
|
- if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
|
|
|
- ret = recover_probed_instruction(buf, addr);
|
|
|
- if (ret)
|
|
|
- return 0;
|
|
|
- kernel_insn_init(&insn, buf);
|
|
|
- }
|
|
|
+ __addr = recover_probed_instruction(buf, addr);
|
|
|
+ kernel_insn_init(&insn, (void *)__addr);
|
|
|
insn_get_length(&insn);
|
|
|
+ /* Another subsystem puts a breakpoint */
|
|
|
+ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
|
|
|
+ return 0;
|
|
|
/* Recover address */
|
|
|
insn.kaddr = (void *)addr;
|
|
|
insn.next_byte = (void *)(addr + insn.length);
|
|
@@ -1366,6 +1419,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
|
|
|
/*
|
|
|
* Copy replacing target instructions
|
|
|
* Target instructions MUST be relocatable (checked inside)
|
|
|
+ * This is called when new aggr(opt)probe is allocated or reused.
|
|
|
*/
|
|
|
int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
|
|
|
{
|