|
@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static void ftrace_sync(struct work_struct *work)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * This function is just a stub to implement a hard force
|
|
|
+ * of synchronize_sched(). This requires synchronizing
|
|
|
+ * tasks even in userspace and idle.
|
|
|
+ *
|
|
|
+ * Yes, function tracing is rude.
|
|
|
+ */
|
|
|
+}
|
|
|
+
|
|
|
static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
{
|
|
|
int ret;
|
|
@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
* so there'll be no new users. We must ensure
|
|
|
* all current users are done before we free
|
|
|
* the control data.
|
|
|
+ * Note synchronize_sched() is not enough, as we
|
|
|
+ * use preempt_disable() to do RCU, but the function
|
|
|
+ * tracer can be called where RCU is not active
|
|
|
+ * (before user_exit()).
|
|
|
*/
|
|
|
- synchronize_sched();
|
|
|
+ schedule_on_each_cpu(ftrace_sync);
|
|
|
control_ops_free(ops);
|
|
|
}
|
|
|
} else
|
|
@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
/*
|
|
|
* Dynamic ops may be freed, we must make sure that all
|
|
|
* callers are done before leaving this function.
|
|
|
+ *
|
|
|
+ * Again, normal synchronize_sched() is not good enough.
|
|
|
+ * We need to do a hard force of sched synchronization.
|
|
|
*/
|
|
|
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
|
|
|
- synchronize_sched();
|
|
|
+ schedule_on_each_cpu(ftrace_sync);
|
|
|
+
|
|
|
|
|
|
return 0;
|
|
|
}
|