|
@@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page);
|
|
|
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
|
|
|
extern long hcall_tracepoint_refcount;
|
|
|
|
|
|
+/*
|
|
|
+ * Since the tracing code might execute hcalls we need to guard against
|
|
|
+ * recursion. One example of this are spinlocks calling H_YIELD on
|
|
|
+ * shared processor partitions.
|
|
|
+ */
|
|
|
+static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
|
|
|
+
|
|
|
void hcall_tracepoint_regfunc(void)
|
|
|
{
|
|
|
hcall_tracepoint_refcount++;
|
|
@@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void)
|
|
|
|
|
|
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned int *depth;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ depth = &__get_cpu_var(hcall_trace_depth);
|
|
|
+
|
|
|
+ if (*depth)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ (*depth)++;
|
|
|
trace_hcall_entry(opcode, args);
|
|
|
+ (*depth)--;
|
|
|
+
|
|
|
+out:
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
void __trace_hcall_exit(long opcode, unsigned long retval,
|
|
|
unsigned long *retbuf)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned int *depth;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+
|
|
|
+ depth = &__get_cpu_var(hcall_trace_depth);
|
|
|
+
|
|
|
+ if (*depth)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ (*depth)++;
|
|
|
trace_hcall_exit(opcode, retval, retbuf);
|
|
|
+ (*depth)--;
|
|
|
+
|
|
|
+out:
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
#endif
|