Skip to content

Commit d7fbf8d

Browse files
committed
ftrace: Implement cached modules tracing on module load
If a module is cached in the set_ftrace_filter, and that module is loaded, then enable tracing on that module as if the cached module text was written into set_ftrace_filter just as the module is loaded. # echo ":mod:kvm_intel" > # cat /sys/kernel/tracing/set_ftrace_filter #### all functions enabled #### :mod:kvm_intel # modprobe kvm_intel # cat /sys/kernel/tracing/set_ftrace_filter vmx_get_rflags [kvm_intel] vmx_get_pkru [kvm_intel] vmx_get_interrupt_shadow [kvm_intel] vmx_rdtscp_supported [kvm_intel] vmx_invpcid_supported [kvm_intel] [..] Signed-off-by: Steven Rostedt (VMware) <[email protected]>
1 parent 5985ea8 commit d7fbf8d

File tree

1 file changed

+93
-0
lines changed

1 file changed

+93
-0
lines changed

kernel/trace/ftrace.c

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3969,6 +3969,97 @@ static int cache_mod(struct trace_array *tr,
39693969
return ret;
39703970
}
39713971

3972+
static int
3973+
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3974+
int reset, int enable);
3975+
3976+
static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
3977+
char *mod, bool enable)
3978+
{
3979+
struct ftrace_mod_load *ftrace_mod, *n;
3980+
struct ftrace_hash **orig_hash, *new_hash;
3981+
LIST_HEAD(process_mods);
3982+
char *func;
3983+
int ret;
3984+
3985+
mutex_lock(&ops->func_hash->regex_lock);
3986+
3987+
if (enable)
3988+
orig_hash = &ops->func_hash->filter_hash;
3989+
else
3990+
orig_hash = &ops->func_hash->notrace_hash;
3991+
3992+
new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
3993+
*orig_hash);
3994+
if (!new_hash)
3995+
return; /* Warn? */
3996+
3997+
mutex_lock(&ftrace_lock);
3998+
3999+
list_for_each_entry_safe(ftrace_mod, n, head, list) {
4000+
4001+
if (strcmp(ftrace_mod->module, mod) != 0)
4002+
continue;
4003+
4004+
if (ftrace_mod->func)
4005+
func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4006+
else
4007+
func = kstrdup("*", GFP_KERNEL);
4008+
4009+
if (!func) /* warn? */
4010+
continue;
4011+
4012+
list_del(&ftrace_mod->list);
4013+
list_add(&ftrace_mod->list, &process_mods);
4014+
4015+
/* Use the newly allocated func, as it may be "*" */
4016+
kfree(ftrace_mod->func);
4017+
ftrace_mod->func = func;
4018+
}
4019+
4020+
mutex_unlock(&ftrace_lock);
4021+
4022+
list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4023+
4024+
func = ftrace_mod->func;
4025+
4026+
/* Grabs ftrace_lock, which is why we have this extra step */
4027+
match_records(new_hash, func, strlen(func), mod);
4028+
free_ftrace_mod(ftrace_mod);
4029+
}
4030+
4031+
mutex_lock(&ftrace_lock);
4032+
4033+
ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4034+
new_hash, enable);
4035+
mutex_unlock(&ftrace_lock);
4036+
4037+
mutex_unlock(&ops->func_hash->regex_lock);
4038+
4039+
free_ftrace_hash(new_hash);
4040+
}
4041+
4042+
static void process_cached_mods(const char *mod_name)
4043+
{
4044+
struct trace_array *tr;
4045+
char *mod;
4046+
4047+
mod = kstrdup(mod_name, GFP_KERNEL);
4048+
if (!mod)
4049+
return;
4050+
4051+
mutex_lock(&trace_types_lock);
4052+
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4053+
if (!list_empty(&tr->mod_trace))
4054+
process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4055+
if (!list_empty(&tr->mod_notrace))
4056+
process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4057+
}
4058+
mutex_unlock(&trace_types_lock);
4059+
4060+
kfree(mod);
4061+
}
4062+
39724063
/*
39734064
* We register the module command as a template to show others how
39744065
* to register the a command as well.
@@ -5682,6 +5773,8 @@ void ftrace_module_enable(struct module *mod)
56825773

56835774
out_unlock:
56845775
mutex_unlock(&ftrace_lock);
5776+
5777+
process_cached_mods(mod->name);
56855778
}
56865779

56875780
void ftrace_module_init(struct module *mod)

0 commit comments

Comments
 (0)