|
@@ -382,20 +382,14 @@ static inline int ftrace_get_offsets_##call( \
|
|
|
*
|
|
|
* NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
|
|
|
*
|
|
|
- * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
|
|
|
+ * static int ftrace_profile_enable_<call>(void)
|
|
|
* {
|
|
|
- * int ret = 0;
|
|
|
- *
|
|
|
- * if (!atomic_inc_return(&event_call->profile_count))
|
|
|
- * ret = register_trace_<call>(ftrace_profile_<call>);
|
|
|
- *
|
|
|
- * return ret;
|
|
|
+ * return register_trace_<call>(ftrace_profile_<call>);
|
|
|
* }
|
|
|
*
|
|
|
- * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
|
|
|
+ * static void ftrace_profile_disable_<call>(void)
|
|
|
* {
|
|
|
- * if (atomic_add_negative(-1, &event->call->profile_count))
|
|
|
- * unregister_trace_<call>(ftrace_profile_<call>);
|
|
|
+ * unregister_trace_<call>(ftrace_profile_<call>);
|
|
|
* }
|
|
|
*
|
|
|
*/
|
|
@@ -405,20 +399,14 @@ static inline int ftrace_get_offsets_##call( \
|
|
|
\
|
|
|
static void ftrace_profile_##call(proto); \
|
|
|
\
|
|
|
-static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
|
|
|
+static int ftrace_profile_enable_##call(void) \
|
|
|
{ \
|
|
|
- int ret = 0; \
|
|
|
- \
|
|
|
- if (!atomic_inc_return(&event_call->profile_count)) \
|
|
|
- ret = register_trace_##call(ftrace_profile_##call); \
|
|
|
- \
|
|
|
- return ret; \
|
|
|
+ return register_trace_##call(ftrace_profile_##call); \
|
|
|
} \
|
|
|
\
|
|
|
-static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
|
|
|
+static void ftrace_profile_disable_##call(void) \
|
|
|
{ \
|
|
|
- if (atomic_add_negative(-1, &event_call->profile_count)) \
|
|
|
- unregister_trace_##call(ftrace_profile_##call); \
|
|
|
+ unregister_trace_##call(ftrace_profile_##call); \
|
|
|
}
|
|
|
|
|
|
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
|
@@ -660,11 +648,12 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
|
|
|
* struct ftrace_raw_##call *entry;
|
|
|
* u64 __addr = 0, __count = 1;
|
|
|
* unsigned long irq_flags;
|
|
|
+ * struct trace_entry *ent;
|
|
|
* int __entry_size;
|
|
|
* int __data_size;
|
|
|
+ * int __cpu
|
|
|
* int pc;
|
|
|
*
|
|
|
- * local_save_flags(irq_flags);
|
|
|
* pc = preempt_count();
|
|
|
*
|
|
|
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
|
|
@@ -675,25 +664,34 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
|
|
|
* sizeof(u64));
|
|
|
* __entry_size -= sizeof(u32);
|
|
|
*
|
|
|
- * do {
|
|
|
- * char raw_data[__entry_size]; <- allocate our sample in the stack
|
|
|
- * struct trace_entry *ent;
|
|
|
+ * // Protect the non nmi buffer
|
|
|
+ * // This also protects the rcu read side
|
|
|
+ * local_irq_save(irq_flags);
|
|
|
+ * __cpu = smp_processor_id();
|
|
|
+ *
|
|
|
+ * if (in_nmi())
|
|
|
+ * raw_data = rcu_dereference(trace_profile_buf_nmi);
|
|
|
+ * else
|
|
|
+ * raw_data = rcu_dereference(trace_profile_buf);
|
|
|
+ *
|
|
|
+ * if (!raw_data)
|
|
|
+ * goto end;
|
|
|
*
|
|
|
- * zero dead bytes from alignment to avoid stack leak to userspace:
|
|
|
+ * raw_data = per_cpu_ptr(raw_data, __cpu);
|
|
|
*
|
|
|
- * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
|
|
|
- * entry = (struct ftrace_raw_<call> *)raw_data;
|
|
|
- * ent = &entry->ent;
|
|
|
- * tracing_generic_entry_update(ent, irq_flags, pc);
|
|
|
- * ent->type = event_call->id;
|
|
|
+ * //zero dead bytes from alignment to avoid stack leak to userspace:
|
|
|
+ * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
|
|
|
+ * entry = (struct ftrace_raw_<call> *)raw_data;
|
|
|
+ * ent = &entry->ent;
|
|
|
+ * tracing_generic_entry_update(ent, irq_flags, pc);
|
|
|
+ * ent->type = event_call->id;
|
|
|
*
|
|
|
- * <tstruct> <- do some jobs with dynamic arrays
|
|
|
+ * <tstruct> <- do some jobs with dynamic arrays
|
|
|
*
|
|
|
- * <assign> <- affect our values
|
|
|
+ * <assign> <- affect our values
|
|
|
*
|
|
|
- * perf_tpcounter_event(event_call->id, __addr, __count, entry,
|
|
|
- * __entry_size); <- submit them to perf counter
|
|
|
- * } while (0);
|
|
|
+ * perf_tpcounter_event(event_call->id, __addr, __count, entry,
|
|
|
+ * __entry_size); <- submit them to perf counter
|
|
|
*
|
|
|
* }
|
|
|
*/
|
|
@@ -716,11 +714,13 @@ static void ftrace_profile_##call(proto) \
|
|
|
struct ftrace_raw_##call *entry; \
|
|
|
u64 __addr = 0, __count = 1; \
|
|
|
unsigned long irq_flags; \
|
|
|
+ struct trace_entry *ent; \
|
|
|
int __entry_size; \
|
|
|
int __data_size; \
|
|
|
+ char *raw_data; \
|
|
|
+ int __cpu; \
|
|
|
int pc; \
|
|
|
\
|
|
|
- local_save_flags(irq_flags); \
|
|
|
pc = preempt_count(); \
|
|
|
\
|
|
|
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
|
|
@@ -728,23 +728,38 @@ static void ftrace_profile_##call(proto) \
|
|
|
sizeof(u64)); \
|
|
|
__entry_size -= sizeof(u32); \
|
|
|
\
|
|
|
- do { \
|
|
|
- char raw_data[__entry_size]; \
|
|
|
- struct trace_entry *ent; \
|
|
|
+ if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
|
|
|
+ "profile buffer not large enough")) \
|
|
|
+ return; \
|
|
|
+ \
|
|
|
+ local_irq_save(irq_flags); \
|
|
|
+ __cpu = smp_processor_id(); \
|
|
|
\
|
|
|
- *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
|
|
|
- entry = (struct ftrace_raw_##call *)raw_data; \
|
|
|
- ent = &entry->ent; \
|
|
|
- tracing_generic_entry_update(ent, irq_flags, pc); \
|
|
|
- ent->type = event_call->id; \
|
|
|
+ if (in_nmi()) \
|
|
|
+ raw_data = rcu_dereference(trace_profile_buf_nmi); \
|
|
|
+ else \
|
|
|
+ raw_data = rcu_dereference(trace_profile_buf); \
|
|
|
\
|
|
|
- tstruct \
|
|
|
+ if (!raw_data) \
|
|
|
+ goto end; \
|
|
|
\
|
|
|
- { assign; } \
|
|
|
+ raw_data = per_cpu_ptr(raw_data, __cpu); \
|
|
|
\
|
|
|
- perf_tpcounter_event(event_call->id, __addr, __count, entry,\
|
|
|
+ *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
|
|
|
+ entry = (struct ftrace_raw_##call *)raw_data; \
|
|
|
+ ent = &entry->ent; \
|
|
|
+ tracing_generic_entry_update(ent, irq_flags, pc); \
|
|
|
+ ent->type = event_call->id; \
|
|
|
+ \
|
|
|
+ tstruct \
|
|
|
+ \
|
|
|
+ { assign; } \
|
|
|
+ \
|
|
|
+ perf_tpcounter_event(event_call->id, __addr, __count, entry, \
|
|
|
__entry_size); \
|
|
|
- } while (0); \
|
|
|
+ \
|
|
|
+end: \
|
|
|
+ local_irq_restore(irq_flags); \
|
|
|
\
|
|
|
}
|
|
|
|