Skip to content

Commit 673feb9

Browse files
committed
ftrace: Add :mod: caching infrastructure to trace_array
This is the start of the infrastructure work to allow for tracing module functions before it is loaded. Currently the following command: # echo :mod:some-mod > set_ftrace_filter will enable tracing of all functions within the module "some-mod" if it is loaded. What we want, is if the module is not loaded, that line will be saved. When the module is loaded, then the "some-mod" will have that line executed on it, so that the functions within it starts being traced. Signed-off-by: Steven Rostedt (VMware) <[email protected]>
1 parent feaf128 commit 673feb9

File tree

2 files changed

+148
-6
lines changed

2 files changed

+148
-6
lines changed

kernel/trace/ftrace.c

Lines changed: 136 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1293,6 +1293,28 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
12931293
FTRACE_WARN_ON(hash->count);
12941294
}
12951295

1296+
static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1297+
{
1298+
list_del(&ftrace_mod->list);
1299+
kfree(ftrace_mod->module);
1300+
kfree(ftrace_mod->func);
1301+
kfree(ftrace_mod);
1302+
}
1303+
1304+
static void clear_ftrace_mod_list(struct list_head *head)
1305+
{
1306+
struct ftrace_mod_load *p, *n;
1307+
1308+
/* stack tracer isn't supported yet */
1309+
if (!head)
1310+
return;
1311+
1312+
mutex_lock(&ftrace_lock);
1313+
list_for_each_entry_safe(p, n, head, list)
1314+
free_ftrace_mod(p);
1315+
mutex_unlock(&ftrace_lock);
1316+
}
1317+
12961318
static void free_ftrace_hash(struct ftrace_hash *hash)
12971319
{
12981320
if (!hash || hash == EMPTY_HASH)
@@ -1346,6 +1368,35 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
13461368
return hash;
13471369
}
13481370

1371+
1372+
static int ftrace_add_mod(struct trace_array *tr,
1373+
const char *func, const char *module,
1374+
int enable)
1375+
{
1376+
struct ftrace_mod_load *ftrace_mod;
1377+
struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1378+
1379+
ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1380+
if (!ftrace_mod)
1381+
return -ENOMEM;
1382+
1383+
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1384+
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1385+
ftrace_mod->enable = enable;
1386+
1387+
if (!ftrace_mod->func || !ftrace_mod->module)
1388+
goto out_free;
1389+
1390+
list_add(&ftrace_mod->list, mod_head);
1391+
1392+
return 0;
1393+
1394+
out_free:
1395+
free_ftrace_mod(ftrace_mod);
1396+
1397+
return -ENOMEM;
1398+
}
1399+
13491400
static struct ftrace_hash *
13501401
alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
13511402
{
@@ -3457,6 +3508,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
34573508
{
34583509
struct ftrace_iterator *iter;
34593510
struct ftrace_hash *hash;
3511+
struct list_head *mod_head;
3512+
struct trace_array *tr = ops->private;
34603513
int ret = 0;
34613514

34623515
ftrace_ops_init(ops);
@@ -3478,18 +3531,23 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
34783531

34793532
mutex_lock(&ops->func_hash->regex_lock);
34803533

3481-
if (flag & FTRACE_ITER_NOTRACE)
3534+
if (flag & FTRACE_ITER_NOTRACE) {
34823535
hash = ops->func_hash->notrace_hash;
3483-
else
3536+
mod_head = tr ? &tr->mod_trace : NULL;
3537+
} else {
34843538
hash = ops->func_hash->filter_hash;
3539+
mod_head = tr ? &tr->mod_notrace : NULL;
3540+
}
34853541

34863542
if (file->f_mode & FMODE_WRITE) {
34873543
const int size_bits = FTRACE_HASH_DEFAULT_BITS;
34883544

3489-
if (file->f_flags & O_TRUNC)
3545+
if (file->f_flags & O_TRUNC) {
34903546
iter->hash = alloc_ftrace_hash(size_bits);
3491-
else
3547+
clear_ftrace_mod_list(mod_head);
3548+
} else {
34923549
iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3550+
}
34933551

34943552
if (!iter->hash) {
34953553
trace_parser_put(&iter->parser);
@@ -3761,17 +3819,85 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
37613819
return ret;
37623820
}
37633821

3822+
static bool module_exists(const char *module)
3823+
{
3824+
/* All modules have the symbol __this_module */
3825+
const char this_mod[] = "__this_module";
3826+
const int modname_size = MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 1;
3827+
char modname[modname_size + 1];
3828+
unsigned long val;
3829+
int n;
3830+
3831+
n = snprintf(modname, modname_size + 1, "%s:%s", module, this_mod);
3832+
3833+
if (n > modname_size)
3834+
return false;
3835+
3836+
val = module_kallsyms_lookup_name(modname);
3837+
return val != 0;
3838+
}
3839+
3840+
static int cache_mod(struct trace_array *tr,
3841+
const char *func, char *module, int enable)
3842+
{
3843+
struct ftrace_mod_load *ftrace_mod, *n;
3844+
struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
3845+
int ret;
3846+
3847+
mutex_lock(&ftrace_lock);
3848+
3849+
/* We do not cache inverse filters */
3850+
if (func[0] == '!') {
3851+
func++;
3852+
ret = -EINVAL;
3853+
3854+
/* Look to remove this hash */
3855+
list_for_each_entry_safe(ftrace_mod, n, head, list) {
3856+
if (strcmp(ftrace_mod->module, module) != 0)
3857+
continue;
3858+
3859+
/* no func matches all */
3860+
if (!func || strcmp(func, "*") == 0 ||
3861+
(ftrace_mod->func &&
3862+
strcmp(ftrace_mod->func, func) == 0)) {
3863+
ret = 0;
3864+
free_ftrace_mod(ftrace_mod);
3865+
continue;
3866+
}
3867+
}
3868+
goto out;
3869+
}
3870+
3871+
ret = -EINVAL;
3872+
/* We only care about modules that have not been loaded yet */
3873+
if (module_exists(module))
3874+
goto out;
3875+
3876+
/* Save this string off, and execute it when the module is loaded */
3877+
ret = ftrace_add_mod(tr, func, module, enable);
3878+
out:
3879+
mutex_unlock(&ftrace_lock);
3880+
3881+
return ret;
3882+
}
3883+
37643884
/*
37653885
* We register the module command as a template to show others how
37663886
* to register the a command as well.
37673887
*/
37683888

37693889
static int
37703890
ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
3771-
char *func, char *cmd, char *module, int enable)
3891+
char *func_orig, char *cmd, char *module, int enable)
37723892
{
3893+
char *func;
37733894
int ret;
37743895

3896+
/* match_records() modifies func, and we need the original */
3897+
func = kstrdup(func_orig, GFP_KERNEL);
3898+
if (!func)
3899+
return -ENOMEM;
3900+
37753901
/*
37763902
* cmd == 'mod' because we only registered this func
37773903
* for the 'mod' ftrace_func_command.
@@ -3780,8 +3906,10 @@ ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
37803906
* parameter.
37813907
*/
37823908
ret = match_records(hash, func, strlen(func), module);
3909+
kfree(func);
3910+
37833911
if (!ret)
3784-
return -EINVAL;
3912+
return cache_mod(tr, func_orig, module, enable);
37853913
if (ret < 0)
37863914
return ret;
37873915
return 0;
@@ -5570,6 +5698,8 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
55705698
void ftrace_init_trace_array(struct trace_array *tr)
55715699
{
55725700
INIT_LIST_HEAD(&tr->func_probes);
5701+
INIT_LIST_HEAD(&tr->mod_trace);
5702+
INIT_LIST_HEAD(&tr->mod_notrace);
55735703
}
55745704
#else
55755705

kernel/trace/trace.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,10 @@ struct trace_array {
263263
struct ftrace_ops *ops;
264264
struct trace_pid_list __rcu *function_pids;
265265
#ifdef CONFIG_DYNAMIC_FTRACE
266+
/* All of these are protected by the ftrace_lock */
266267
struct list_head func_probes;
268+
struct list_head mod_trace;
269+
struct list_head mod_notrace;
267270
#endif
268271
/* function tracing enabled */
269272
int function_enabled;
@@ -761,6 +764,15 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
761764

762765
extern char trace_find_mark(unsigned long long duration);
763766

767+
struct ftrace_hash;
768+
769+
struct ftrace_mod_load {
770+
struct list_head list;
771+
char *func;
772+
char *module;
773+
int enable;
774+
};
775+
764776
struct ftrace_hash {
765777
unsigned long size_bits;
766778
struct hlist_head *buckets;

0 commit comments

Comments
 (0)