|
@@ -39,20 +39,21 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
|
|
|
int stack_tracer_enabled;
|
|
|
static int last_stack_tracer_enabled;
|
|
|
|
|
|
-static inline void check_stack(void)
|
|
|
+static inline void
|
|
|
+check_stack(unsigned long *stack)
|
|
|
{
|
|
|
unsigned long this_size, flags;
|
|
|
unsigned long *p, *top, *start;
|
|
|
int i;
|
|
|
|
|
|
- this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
|
|
|
+ this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
|
|
|
this_size = THREAD_SIZE - this_size;
|
|
|
|
|
|
if (this_size <= max_stack_size)
|
|
|
return;
|
|
|
|
|
|
/* we do not handle interrupt stacks yet */
|
|
|
- if (!object_is_on_stack(&this_size))
|
|
|
+ if (!object_is_on_stack(stack))
|
|
|
return;
|
|
|
|
|
|
local_irq_save(flags);
|
|
@@ -73,7 +74,7 @@ static inline void check_stack(void)
|
|
|
* Now find where in the stack these are.
|
|
|
*/
|
|
|
i = 0;
|
|
|
- start = &this_size;
|
|
|
+ start = stack;
|
|
|
top = (unsigned long *)
|
|
|
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
|
|
|
|
|
@@ -113,6 +114,7 @@ static void
|
|
|
stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
|
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
|
|
{
|
|
|
+ unsigned long stack;
|
|
|
int cpu;
|
|
|
|
|
|
preempt_disable_notrace();
|
|
@@ -122,7 +124,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
|
|
|
if (per_cpu(trace_active, cpu)++ != 0)
|
|
|
goto out;
|
|
|
|
|
|
- check_stack();
|
|
|
+ check_stack(&stack);
|
|
|
|
|
|
out:
|
|
|
per_cpu(trace_active, cpu)--;
|