|
@@ -14,9 +14,20 @@
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
#include "trace_output.h"
|
|
#include "trace_output.h"
|
|
|
|
|
|
-struct fgraph_data {
|
|
|
|
|
|
+struct fgraph_cpu_data {
|
|
pid_t last_pid;
|
|
pid_t last_pid;
|
|
int depth;
|
|
int depth;
|
|
|
|
+ int ignore;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct fgraph_data {
|
|
|
|
+ struct fgraph_cpu_data *cpu_data;
|
|
|
|
+
|
|
|
|
+ /* Place to preserve last processed entry. */
|
|
|
|
+ struct ftrace_graph_ent_entry ent;
|
|
|
|
+ struct ftrace_graph_ret_entry ret;
|
|
|
|
+ int failed;
|
|
|
|
+ int cpu;
|
|
};
|
|
};
|
|
|
|
|
|
#define TRACE_GRAPH_INDENT 2
|
|
#define TRACE_GRAPH_INDENT 2
|
|
@@ -384,7 +395,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
|
|
if (!data)
|
|
if (!data)
|
|
return TRACE_TYPE_HANDLED;
|
|
return TRACE_TYPE_HANDLED;
|
|
|
|
|
|
- last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
|
|
|
|
|
|
+ last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
|
|
|
|
|
|
if (*last_pid == pid)
|
|
if (*last_pid == pid)
|
|
return TRACE_TYPE_HANDLED;
|
|
return TRACE_TYPE_HANDLED;
|
|
@@ -435,26 +446,49 @@ static struct ftrace_graph_ret_entry *
|
|
get_return_for_leaf(struct trace_iterator *iter,
|
|
get_return_for_leaf(struct trace_iterator *iter,
|
|
struct ftrace_graph_ent_entry *curr)
|
|
struct ftrace_graph_ent_entry *curr)
|
|
{
|
|
{
|
|
- struct ring_buffer_iter *ring_iter;
|
|
|
|
|
|
+ struct fgraph_data *data = iter->private;
|
|
|
|
+ struct ring_buffer_iter *ring_iter = NULL;
|
|
struct ring_buffer_event *event;
|
|
struct ring_buffer_event *event;
|
|
struct ftrace_graph_ret_entry *next;
|
|
struct ftrace_graph_ret_entry *next;
|
|
|
|
|
|
- ring_iter = iter->buffer_iter[iter->cpu];
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If the previous output failed to write to the seq buffer,
|
|
|
|
+ * then we just reuse the data from before.
|
|
|
|
+ */
|
|
|
|
+ if (data && data->failed) {
|
|
|
|
+ curr = &data->ent;
|
|
|
|
+ next = &data->ret;
|
|
|
|
+ } else {
|
|
|
|
|
|
- /* First peek to compare current entry and the next one */
|
|
|
|
- if (ring_iter)
|
|
|
|
- event = ring_buffer_iter_peek(ring_iter, NULL);
|
|
|
|
- else {
|
|
|
|
- /* We need to consume the current entry to see the next one */
|
|
|
|
- ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
|
|
|
|
- event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
|
|
|
|
- NULL);
|
|
|
|
- }
|
|
|
|
|
|
+ ring_iter = iter->buffer_iter[iter->cpu];
|
|
|
|
+
|
|
|
|
+ /* First peek to compare current entry and the next one */
|
|
|
|
+ if (ring_iter)
|
|
|
|
+ event = ring_buffer_iter_peek(ring_iter, NULL);
|
|
|
|
+ else {
|
|
|
|
+ /*
|
|
|
|
+ * We need to consume the current entry to see
|
|
|
|
+ * the next one.
|
|
|
|
+ */
|
|
|
|
+ ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
|
|
|
|
+ event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
|
|
|
|
+ NULL);
|
|
|
|
+ }
|
|
|
|
|
|
- if (!event)
|
|
|
|
- return NULL;
|
|
|
|
|
|
+ if (!event)
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
|
|
+ next = ring_buffer_event_data(event);
|
|
|
|
|
|
- next = ring_buffer_event_data(event);
|
|
|
|
|
|
+ if (data) {
|
|
|
|
+ /*
|
|
|
|
+ * Save current and next entries for later reference
|
|
|
|
+ * if the output fails.
|
|
|
|
+ */
|
|
|
|
+ data->ent = *curr;
|
|
|
|
+ data->ret = *next;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
if (next->ent.type != TRACE_GRAPH_RET)
|
|
if (next->ent.type != TRACE_GRAPH_RET)
|
|
return NULL;
|
|
return NULL;
|
|
@@ -640,7 +674,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
|
|
|
|
|
|
if (data) {
|
|
if (data) {
|
|
int cpu = iter->cpu;
|
|
int cpu = iter->cpu;
|
|
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
|
|
|
|
|
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Comments display at + 1 to depth. Since
|
|
* Comments display at + 1 to depth. Since
|
|
@@ -688,7 +722,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
|
|
|
|
|
|
if (data) {
|
|
if (data) {
|
|
int cpu = iter->cpu;
|
|
int cpu = iter->cpu;
|
|
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
|
|
|
|
|
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
|
|
|
|
|
|
*depth = call->depth;
|
|
*depth = call->depth;
|
|
}
|
|
}
|
|
@@ -782,19 +816,34 @@ static enum print_line_t
|
|
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
|
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
|
struct trace_iterator *iter)
|
|
struct trace_iterator *iter)
|
|
{
|
|
{
|
|
- int cpu = iter->cpu;
|
|
|
|
|
|
+ struct fgraph_data *data = iter->private;
|
|
struct ftrace_graph_ent *call = &field->graph_ent;
|
|
struct ftrace_graph_ent *call = &field->graph_ent;
|
|
struct ftrace_graph_ret_entry *leaf_ret;
|
|
struct ftrace_graph_ret_entry *leaf_ret;
|
|
|
|
+ static enum print_line_t ret;
|
|
|
|
+ int cpu = iter->cpu;
|
|
|
|
|
|
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
|
|
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
|
|
|
|
leaf_ret = get_return_for_leaf(iter, field);
|
|
leaf_ret = get_return_for_leaf(iter, field);
|
|
if (leaf_ret)
|
|
if (leaf_ret)
|
|
- return print_graph_entry_leaf(iter, field, leaf_ret, s);
|
|
|
|
|
|
+ ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
|
|
else
|
|
else
|
|
- return print_graph_entry_nested(iter, field, s, cpu);
|
|
|
|
|
|
+ ret = print_graph_entry_nested(iter, field, s, cpu);
|
|
|
|
|
|
|
|
+ if (data) {
|
|
|
|
+ /*
|
|
|
|
+ * If we failed to write our output, then we need to make
|
|
|
|
+ * note of it. Because we already consumed our entry.
|
|
|
|
+ */
|
|
|
|
+ if (s->full) {
|
|
|
|
+ data->failed = 1;
|
|
|
|
+ data->cpu = cpu;
|
|
|
|
+ } else
|
|
|
|
+ data->failed = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static enum print_line_t
|
|
static enum print_line_t
|
|
@@ -810,7 +859,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
|
|
|
|
|
if (data) {
|
|
if (data) {
|
|
int cpu = iter->cpu;
|
|
int cpu = iter->cpu;
|
|
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
|
|
|
|
|
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Comments display at + 1 to depth. This is the
|
|
* Comments display at + 1 to depth. This is the
|
|
@@ -873,7 +922,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
|
int i;
|
|
int i;
|
|
|
|
|
|
if (data)
|
|
if (data)
|
|
- depth = per_cpu_ptr(data, iter->cpu)->depth;
|
|
|
|
|
|
+ depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
|
|
|
|
|
|
if (print_graph_prologue(iter, s, 0, 0))
|
|
if (print_graph_prologue(iter, s, 0, 0))
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
@@ -941,8 +990,33 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
|
enum print_line_t
|
|
enum print_line_t
|
|
print_graph_function(struct trace_iterator *iter)
|
|
print_graph_function(struct trace_iterator *iter)
|
|
{
|
|
{
|
|
|
|
+ struct ftrace_graph_ent_entry *field;
|
|
|
|
+ struct fgraph_data *data = iter->private;
|
|
struct trace_entry *entry = iter->ent;
|
|
struct trace_entry *entry = iter->ent;
|
|
struct trace_seq *s = &iter->seq;
|
|
struct trace_seq *s = &iter->seq;
|
|
|
|
+ int cpu = iter->cpu;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
|
|
|
|
+ per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
|
|
|
|
+ return TRACE_TYPE_HANDLED;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If the last output failed, there's a possibility we need
|
|
|
|
+ * to print out the missing entry which would never go out.
|
|
|
|
+ */
|
|
|
|
+ if (data && data->failed) {
|
|
|
|
+ field = &data->ent;
|
|
|
|
+ iter->cpu = data->cpu;
|
|
|
|
+ ret = print_graph_entry(field, s, iter);
|
|
|
|
+ if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
|
|
|
|
+ per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
|
|
|
|
+ ret = TRACE_TYPE_NO_CONSUME;
|
|
|
|
+ }
|
|
|
|
+ iter->cpu = cpu;
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
|
|
switch (entry->type) {
|
|
switch (entry->type) {
|
|
case TRACE_GRAPH_ENT: {
|
|
case TRACE_GRAPH_ENT: {
|
|
@@ -952,7 +1026,7 @@ print_graph_function(struct trace_iterator *iter)
|
|
* sizeof(struct ftrace_graph_ent_entry) is very small,
|
|
* sizeof(struct ftrace_graph_ent_entry) is very small,
|
|
* it can be safely saved at the stack.
|
|
* it can be safely saved at the stack.
|
|
*/
|
|
*/
|
|
- struct ftrace_graph_ent_entry *field, saved;
|
|
|
|
|
|
+ struct ftrace_graph_ent_entry saved;
|
|
trace_assign_type(field, entry);
|
|
trace_assign_type(field, entry);
|
|
saved = *field;
|
|
saved = *field;
|
|
return print_graph_entry(&saved, s, iter);
|
|
return print_graph_entry(&saved, s, iter);
|
|
@@ -1030,31 +1104,54 @@ static void print_graph_headers(struct seq_file *s)
|
|
static void graph_trace_open(struct trace_iterator *iter)
|
|
static void graph_trace_open(struct trace_iterator *iter)
|
|
{
|
|
{
|
|
/* pid and depth on the last trace processed */
|
|
/* pid and depth on the last trace processed */
|
|
- struct fgraph_data *data = alloc_percpu(struct fgraph_data);
|
|
|
|
|
|
+ struct fgraph_data *data;
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
|
|
+ iter->private = NULL;
|
|
|
|
+
|
|
|
|
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
|
|
if (!data)
|
|
if (!data)
|
|
- pr_warning("function graph tracer: not enough memory\n");
|
|
|
|
- else
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
|
- pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
|
|
|
|
- int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
|
|
|
- *pid = -1;
|
|
|
|
- *depth = 0;
|
|
|
|
- }
|
|
|
|
|
|
+ goto out_err;
|
|
|
|
+
|
|
|
|
+ data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
|
|
|
|
+ if (!data->cpu_data)
|
|
|
|
+ goto out_err_free;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
|
|
|
|
+ int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
|
|
|
|
+ int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
|
|
|
|
+ *pid = -1;
|
|
|
|
+ *depth = 0;
|
|
|
|
+ *ignore = 0;
|
|
|
|
+ }
|
|
|
|
|
|
iter->private = data;
|
|
iter->private = data;
|
|
|
|
+
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ out_err_free:
|
|
|
|
+ kfree(data);
|
|
|
|
+ out_err:
|
|
|
|
+ pr_warning("function graph tracer: not enough memory\n");
|
|
}
|
|
}
|
|
|
|
|
|
static void graph_trace_close(struct trace_iterator *iter)
|
|
static void graph_trace_close(struct trace_iterator *iter)
|
|
{
|
|
{
|
|
- free_percpu(iter->private);
|
|
|
|
|
|
+ struct fgraph_data *data = iter->private;
|
|
|
|
+
|
|
|
|
+ if (data) {
|
|
|
|
+ free_percpu(data->cpu_data);
|
|
|
|
+ kfree(data);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static struct tracer graph_trace __read_mostly = {
|
|
static struct tracer graph_trace __read_mostly = {
|
|
.name = "function_graph",
|
|
.name = "function_graph",
|
|
.open = graph_trace_open,
|
|
.open = graph_trace_open,
|
|
|
|
+ .pipe_open = graph_trace_open,
|
|
.close = graph_trace_close,
|
|
.close = graph_trace_close,
|
|
|
|
+ .pipe_close = graph_trace_close,
|
|
.wait_pipe = poll_wait_pipe,
|
|
.wait_pipe = poll_wait_pipe,
|
|
.init = graph_trace_init,
|
|
.init = graph_trace_init,
|
|
.reset = graph_trace_reset,
|
|
.reset = graph_trace_reset,
|