|
@@ -1243,14 +1243,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
|
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
|
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
|
|
struct ftrace_event_call *call = &tp->call;
|
|
struct ftrace_event_call *call = &tp->call;
|
|
struct kprobe_trace_entry *entry;
|
|
struct kprobe_trace_entry *entry;
|
|
- struct trace_entry *ent;
|
|
|
|
- int size, __size, i, pc, __cpu;
|
|
|
|
|
|
+ int size, __size, i;
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
- char *trace_buf;
|
|
|
|
- char *raw_data;
|
|
|
|
int rctx;
|
|
int rctx;
|
|
|
|
|
|
- pc = preempt_count();
|
|
|
|
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
|
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
|
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
|
size -= sizeof(u32);
|
|
size -= sizeof(u32);
|
|
@@ -1258,45 +1254,16 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
|
|
"profile buffer not large enough"))
|
|
"profile buffer not large enough"))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- /*
|
|
|
|
- * Protect the non nmi buffer
|
|
|
|
- * This also protects the rcu read side
|
|
|
|
- */
|
|
|
|
- local_irq_save(irq_flags);
|
|
|
|
-
|
|
|
|
- rctx = perf_swevent_get_recursion_context();
|
|
|
|
- if (rctx < 0)
|
|
|
|
- goto end_recursion;
|
|
|
|
-
|
|
|
|
- __cpu = smp_processor_id();
|
|
|
|
-
|
|
|
|
- if (in_nmi())
|
|
|
|
- trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
|
|
|
- else
|
|
|
|
- trace_buf = rcu_dereference(perf_trace_buf);
|
|
|
|
-
|
|
|
|
- if (!trace_buf)
|
|
|
|
- goto end;
|
|
|
|
-
|
|
|
|
- raw_data = per_cpu_ptr(trace_buf, __cpu);
|
|
|
|
-
|
|
|
|
- /* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
|
|
|
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
|
|
|
- entry = (struct kprobe_trace_entry *)raw_data;
|
|
|
|
- ent = &entry->ent;
|
|
|
|
|
|
+ entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
|
|
|
+ if (!entry)
|
|
|
|
+ return 0;
|
|
|
|
|
|
- tracing_generic_entry_update(ent, irq_flags, pc);
|
|
|
|
- ent->type = call->id;
|
|
|
|
entry->nargs = tp->nr_args;
|
|
entry->nargs = tp->nr_args;
|
|
entry->ip = (unsigned long)kp->addr;
|
|
entry->ip = (unsigned long)kp->addr;
|
|
for (i = 0; i < tp->nr_args; i++)
|
|
for (i = 0; i < tp->nr_args; i++)
|
|
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
|
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
|
- perf_tp_event(call->id, entry->ip, 1, entry, size);
|
|
|
|
|
|
|
|
-end:
|
|
|
|
- perf_swevent_put_recursion_context(rctx);
|
|
|
|
-end_recursion:
|
|
|
|
- local_irq_restore(irq_flags);
|
|
|
|
|
|
+ ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1308,14 +1275,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
|
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
|
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
|
|
struct ftrace_event_call *call = &tp->call;
|
|
struct ftrace_event_call *call = &tp->call;
|
|
struct kretprobe_trace_entry *entry;
|
|
struct kretprobe_trace_entry *entry;
|
|
- struct trace_entry *ent;
|
|
|
|
- int size, __size, i, pc, __cpu;
|
|
|
|
|
|
+ int size, __size, i;
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
- char *trace_buf;
|
|
|
|
- char *raw_data;
|
|
|
|
int rctx;
|
|
int rctx;
|
|
|
|
|
|
- pc = preempt_count();
|
|
|
|
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
|
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
|
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
|
size = ALIGN(__size + sizeof(u32), sizeof(u64));
|
|
size -= sizeof(u32);
|
|
size -= sizeof(u32);
|
|
@@ -1323,46 +1286,17 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
|
|
"profile buffer not large enough"))
|
|
"profile buffer not large enough"))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- /*
|
|
|
|
- * Protect the non nmi buffer
|
|
|
|
- * This also protects the rcu read side
|
|
|
|
- */
|
|
|
|
- local_irq_save(irq_flags);
|
|
|
|
-
|
|
|
|
- rctx = perf_swevent_get_recursion_context();
|
|
|
|
- if (rctx < 0)
|
|
|
|
- goto end_recursion;
|
|
|
|
-
|
|
|
|
- __cpu = smp_processor_id();
|
|
|
|
-
|
|
|
|
- if (in_nmi())
|
|
|
|
- trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
|
|
|
- else
|
|
|
|
- trace_buf = rcu_dereference(perf_trace_buf);
|
|
|
|
-
|
|
|
|
- if (!trace_buf)
|
|
|
|
- goto end;
|
|
|
|
-
|
|
|
|
- raw_data = per_cpu_ptr(trace_buf, __cpu);
|
|
|
|
-
|
|
|
|
- /* Zero dead bytes from alignment to avoid buffer leak to userspace */
|
|
|
|
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
|
|
|
|
- entry = (struct kretprobe_trace_entry *)raw_data;
|
|
|
|
- ent = &entry->ent;
|
|
|
|
|
|
+ entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
|
|
|
|
+ if (!entry)
|
|
|
|
+ return 0;
|
|
|
|
|
|
- tracing_generic_entry_update(ent, irq_flags, pc);
|
|
|
|
- ent->type = call->id;
|
|
|
|
entry->nargs = tp->nr_args;
|
|
entry->nargs = tp->nr_args;
|
|
entry->func = (unsigned long)tp->rp.kp.addr;
|
|
entry->func = (unsigned long)tp->rp.kp.addr;
|
|
entry->ret_ip = (unsigned long)ri->ret_addr;
|
|
entry->ret_ip = (unsigned long)ri->ret_addr;
|
|
for (i = 0; i < tp->nr_args; i++)
|
|
for (i = 0; i < tp->nr_args; i++)
|
|
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
|
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
|
|
- perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
|
|
|
|
|
|
|
|
-end:
|
|
|
|
- perf_swevent_put_recursion_context(rctx);
|
|
|
|
-end_recursion:
|
|
|
|
- local_irq_restore(irq_flags);
|
|
|
|
|
|
+ ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|