|
@@ -122,6 +122,23 @@ static inline void restore_previous_kprobe(void)
|
|
|
kprobe_saved_msr = kprobe_saved_msr_prev;
|
|
|
}
|
|
|
|
|
|
+void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct kretprobe_instance *ri;
|
|
|
+
|
|
|
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
|
|
|
+ ri->rp = rp;
|
|
|
+ ri->task = current;
|
|
|
+ ri->ret_addr = (kprobe_opcode_t *)regs->link;
|
|
|
+
|
|
|
+ /* Replace the return addr with trampoline addr */
|
|
|
+ regs->link = (unsigned long)kretprobe_trampoline;
|
|
|
+ add_rp_inst(ri);
|
|
|
+ } else {
|
|
|
+ rp->nmissed++;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static inline int kprobe_handler(struct pt_regs *regs)
|
|
|
{
|
|
|
struct kprobe *p;
|
|
@@ -211,6 +228,78 @@ no_kprobe:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Function return probe trampoline:
|
|
|
+ * - init_kprobes() establishes a probepoint here
|
|
|
+ * - When the probed function returns, this probe
|
|
|
+ * causes the handlers to fire
|
|
|
+ */
|
|
|
+void kretprobe_trampoline_holder(void)
|
|
|
+{
|
|
|
+ asm volatile(".global kretprobe_trampoline\n"
|
|
|
+ "kretprobe_trampoline:\n"
|
|
|
+ "nop\n");
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called when the probe at kretprobe trampoline is hit
|
|
|
+ */
|
|
|
+int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct kretprobe_instance *ri = NULL;
|
|
|
+ struct hlist_head *head;
|
|
|
+ struct hlist_node *node, *tmp;
|
|
|
+ unsigned long orig_ret_address = 0;
|
|
|
+ unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
|
|
|
+
|
|
|
+ head = kretprobe_inst_table_head(current);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * It is possible to have multiple instances associated with a given
|
|
|
+ * task either because an multiple functions in the call path
|
|
|
+ * have a return probe installed on them, and/or more then one return
|
|
|
+ * return probe was registered for a target function.
|
|
|
+ *
|
|
|
+ * We can handle this because:
|
|
|
+ * - instances are always inserted at the head of the list
|
|
|
+ * - when multiple return probes are registered for the same
|
|
|
+ * function, the first instance's ret_addr will point to the
|
|
|
+ * real return address, and all the rest will point to
|
|
|
+ * kretprobe_trampoline
|
|
|
+ */
|
|
|
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
|
|
+ if (ri->task != current)
|
|
|
+ /* another task is sharing our hash bucket */
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (ri->rp && ri->rp->handler)
|
|
|
+ ri->rp->handler(ri, regs);
|
|
|
+
|
|
|
+ orig_ret_address = (unsigned long)ri->ret_addr;
|
|
|
+ recycle_rp_inst(ri);
|
|
|
+
|
|
|
+ if (orig_ret_address != trampoline_address)
|
|
|
+ /*
|
|
|
+ * This is the real return address. Any other
|
|
|
+ * instances associated with this task are for
|
|
|
+ * other calls deeper on the call stack
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
|
|
|
+ regs->nip = orig_ret_address;
|
|
|
+
|
|
|
+ unlock_kprobes();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * By returning a non-zero value, we are telling
|
|
|
+ * kprobe_handler() that we have handled unlocking
|
|
|
+ * and re-enabling preemption.
|
|
|
+ */
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Called after single-stepping. p->addr is the address of the
|
|
|
* instruction whose first byte has been replaced by the "breakpoint"
|
|
@@ -349,3 +438,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|
|
memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
|
|
|
return 1;
|
|
|
}
|
|
|
+
|
|
|
+static struct kprobe trampoline_p = {
|
|
|
+ .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
|
|
|
+ .pre_handler = trampoline_probe_handler
|
|
|
+};
|
|
|
+
|
|
|
+int __init arch_init(void)
|
|
|
+{
|
|
|
+ return register_kprobe(&trampoline_p);
|
|
|
+}
|