|
@@ -2739,15 +2739,20 @@ void unregister_ftrace_graph(void)
|
|
void ftrace_graph_init_task(struct task_struct *t)
|
|
void ftrace_graph_init_task(struct task_struct *t)
|
|
{
|
|
{
|
|
if (atomic_read(&ftrace_graph_active)) {
|
|
if (atomic_read(&ftrace_graph_active)) {
|
|
- t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
|
|
|
|
|
+ struct ftrace_ret_stack *ret_stack;
|
|
|
|
+
|
|
|
|
+ ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
|
* sizeof(struct ftrace_ret_stack),
|
|
* sizeof(struct ftrace_ret_stack),
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
- if (!t->ret_stack)
|
|
|
|
|
|
+ if (!ret_stack)
|
|
return;
|
|
return;
|
|
t->curr_ret_stack = -1;
|
|
t->curr_ret_stack = -1;
|
|
atomic_set(&t->tracing_graph_pause, 0);
|
|
atomic_set(&t->tracing_graph_pause, 0);
|
|
atomic_set(&t->trace_overrun, 0);
|
|
atomic_set(&t->trace_overrun, 0);
|
|
t->ftrace_timestamp = 0;
|
|
t->ftrace_timestamp = 0;
|
|
|
|
+ /* make curr_ret_stack visable before we add the ret_stack */
|
|
|
|
+ smp_wmb();
|
|
|
|
+ t->ret_stack = ret_stack;
|
|
} else
|
|
} else
|
|
t->ret_stack = NULL;
|
|
t->ret_stack = NULL;
|
|
}
|
|
}
|