|
@@ -2169,12 +2169,57 @@ static cycle_t ftrace_update_time;
|
|
|
static unsigned long ftrace_update_cnt;
|
|
|
unsigned long ftrace_update_tot_cnt;
|
|
|
|
|
|
-static int ops_traces_mod(struct ftrace_ops *ops)
|
|
|
+static inline int ops_traces_mod(struct ftrace_ops *ops)
|
|
|
{
|
|
|
- struct ftrace_hash *hash;
|
|
|
+ /*
|
|
|
+ * Filter_hash being empty will default to trace module.
|
|
|
+ * But notrace hash requires a test of individual module functions.
|
|
|
+ */
|
|
|
+ return ftrace_hash_empty(ops->filter_hash) &&
|
|
|
+ ftrace_hash_empty(ops->notrace_hash);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Check if the current ops references the record.
|
|
|
+ *
|
|
|
+ * If the ops traces all functions, then it was already accounted for.
|
|
|
+ * If the ops does not trace the current record function, skip it.
|
|
|
+ * If the ops ignores the function via notrace filter, skip it.
|
|
|
+ */
|
|
|
+static inline bool
|
|
|
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
|
|
|
+{
|
|
|
+ /* If ops isn't enabled, ignore it */
|
|
|
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* If ops traces all mods, we already accounted for it */
|
|
|
+ if (ops_traces_mod(ops))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* The function must be in the filter */
|
|
|
+ if (!ftrace_hash_empty(ops->filter_hash) &&
|
|
|
+ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
|
|
|
+ return 0;
|
|
|
|
|
|
- hash = ops->filter_hash;
|
|
|
- return ftrace_hash_empty(hash);
|
|
|
+ /* If in notrace hash, we ignore it too */
|
|
|
+ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int referenced_filters(struct dyn_ftrace *rec)
|
|
|
+{
|
|
|
+ struct ftrace_ops *ops;
|
|
|
+ int cnt = 0;
|
|
|
+
|
|
|
+ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
|
|
|
+ if (ops_references_rec(ops, rec))
|
|
|
+ cnt++;
|
|
|
+ }
|
|
|
+
|
|
|
+ return cnt;
|
|
|
}
|
|
|
|
|
|
static int ftrace_update_code(struct module *mod)
|
|
@@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
|
|
|
struct dyn_ftrace *p;
|
|
|
cycle_t start, stop;
|
|
|
unsigned long ref = 0;
|
|
|
+ bool test = false;
|
|
|
int i;
|
|
|
|
|
|
/*
|
|
@@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
|
|
|
|
|
|
for (ops = ftrace_ops_list;
|
|
|
ops != &ftrace_list_end; ops = ops->next) {
|
|
|
- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
|
|
|
- ops_traces_mod(ops))
|
|
|
- ref++;
|
|
|
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
|
|
|
+ if (ops_traces_mod(ops))
|
|
|
+ ref++;
|
|
|
+ else
|
|
|
+ test = true;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
|
|
|
for (pg = ftrace_new_pgs; pg; pg = pg->next) {
|
|
|
|
|
|
for (i = 0; i < pg->index; i++) {
|
|
|
+ int cnt = ref;
|
|
|
+
|
|
|
/* If something went wrong, bail without enabling anything */
|
|
|
if (unlikely(ftrace_disabled))
|
|
|
return -1;
|
|
|
|
|
|
p = &pg->records[i];
|
|
|
- p->flags = ref;
|
|
|
+ if (test)
|
|
|
+ cnt += referenced_filters(p);
|
|
|
+ p->flags = cnt;
|
|
|
|
|
|
/*
|
|
|
* Do the initial record conversion from mcount jump
|
|
@@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
|
|
|
* conversion puts the module to the correct state, thus
|
|
|
* passing the ftrace_make_call check.
|
|
|
*/
|
|
|
- if (ftrace_start_up && ref) {
|
|
|
+ if (ftrace_start_up && cnt) {
|
|
|
int failed = __ftrace_replace_code(p, 1);
|
|
|
if (failed)
|
|
|
ftrace_bug(failed, p->ip);
|