summaryrefslogtreecommitdiff
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 15:08:14 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 15:08:14 -0700
commit42c8e86c9c3e15ac653c31d57b2f985579c2991d (patch)
tree09d9149676af1d37ecd595a89b72c3e367d241f2 /kernel/trace/ftrace.c
parent3aea311c1b4002bd730a09530f80f2f2ad3bf495 (diff)
parent170b3b1050e28d1ba0700e262f0899ffa4fccc52 (diff)
Merge tag 'trace-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: "Nothing new in development for this release. These are mostly fixes that were found during development of changes for the next merge window and fixes that were sent to me late in the last cycle" * tag 'trace-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Apply trace_clock changes to instance max buffer tracing: Fix clear of RECORDED_TGID flag when disabling trace event tracing: Add barrier to trace_printk() buffer nesting modification ftrace: Fix memleak when unregistering dynamic ops when tracing disabled ftrace: Fix selftest goto location on error ftrace: Zero out ftrace hashes when a module is removed tracing: Only have rmmod clear buffers that its events were active in ftrace: Fix debug preempt config name in stack_tracer_{en,dis}able
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c68
1 files changed, 61 insertions, 7 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 96cea88fa00f..6abfafd7f173 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2828,13 +2828,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (!command || !ftrace_enabled) {
/*
- * If these are per_cpu ops, they still need their
- * per_cpu field freed. Since, function tracing is
+ * If these are dynamic or per_cpu ops, they still
+ * need their data freed. Since, function tracing is
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
- if (ops->flags & FTRACE_OPS_FL_PER_CPU)
- per_cpu_ops_free(ops);
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
+ goto free_ops;
+
return 0;
}
@@ -2900,6 +2901,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (IS_ENABLED(CONFIG_PREEMPT))
synchronize_rcu_tasks();
+ free_ops:
arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_PER_CPU)
@@ -5690,10 +5692,51 @@ static int referenced_filters(struct dyn_ftrace *rec)
return cnt;
}
+static void
+clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
+{
+ struct ftrace_func_entry *entry;
+ struct dyn_ftrace *rec;
+ int i;
+
+ if (ftrace_hash_empty(hash))
+ return;
+
+ for (i = 0; i < pg->index; i++) {
+ rec = &pg->records[i];
+ entry = __ftrace_lookup_ip(hash, rec->ip);
+ /*
+ * Do not allow this rec to match again.
+ * Yeah, it may waste some memory, but will be removed
+ * if/when the hash is modified again.
+ */
+ if (entry)
+ entry->ip = 0;
+ }
+}
+
+/* Clear any records from hashs */
+static void clear_mod_from_hashes(struct ftrace_page *pg)
+{
+ struct trace_array *tr;
+
+ mutex_lock(&trace_types_lock);
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (!tr->ops || !tr->ops->func_hash)
+ continue;
+ mutex_lock(&tr->ops->func_hash->regex_lock);
+ clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
+ clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
+ mutex_unlock(&tr->ops->func_hash->regex_lock);
+ }
+ mutex_unlock(&trace_types_lock);
+}
+
void ftrace_release_mod(struct module *mod)
{
struct dyn_ftrace *rec;
struct ftrace_page **last_pg;
+ struct ftrace_page *tmp_page = NULL;
struct ftrace_page *pg;
int order;
@@ -5723,14 +5766,25 @@ void ftrace_release_mod(struct module *mod)
ftrace_update_tot_cnt -= pg->index;
*last_pg = pg->next;
- order = get_count_order(pg->size / ENTRIES_PER_PAGE);
- free_pages((unsigned long)pg->records, order);
- kfree(pg);
+
+ pg->next = tmp_page;
+ tmp_page = pg;
} else
last_pg = &pg->next;
}
out_unlock:
mutex_unlock(&ftrace_lock);
+
+ for (pg = tmp_page; pg; pg = tmp_page) {
+
+ /* Needs to be called outside of ftrace_lock */
+ clear_mod_from_hashes(pg);
+
+ order = get_count_order(pg->size / ENTRIES_PER_PAGE);
+ free_pages((unsigned long)pg->records, order);
+ tmp_page = pg->next;
+ kfree(pg);
+ }
}
void ftrace_module_enable(struct module *mod)