|
@@ -1344,7 +1344,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
*/
|
|
|
preempt_disable_notrace();
|
|
|
|
|
|
- use_stack = ++__get_cpu_var(ftrace_stack_reserve);
|
|
|
+ use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
|
|
|
/*
|
|
|
* We don't need any atomic variables, just a barrier.
|
|
|
* If an interrupt comes in, we don't care, because it would
|
|
@@ -1398,7 +1398,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
out:
|
|
|
/* Again, don't let gcc optimize things here */
|
|
|
barrier();
|
|
|
- __get_cpu_var(ftrace_stack_reserve)--;
|
|
|
+ __this_cpu_dec(ftrace_stack_reserve);
|
|
|
preempt_enable_notrace();
|
|
|
|
|
|
}
|