Skip to content

Commit 5aa4ef9

Browse files
AndybnACTpalmer-dabbelt
authored andcommitted
riscv: ftrace: do not use stop_machine to update code
Now it is safe to remove dependency from stop_machine() for us to patch code in ftrace. Signed-off-by: Andy Chiu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexandre Ghiti <[email protected]> Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent b2137c3 commit 5aa4ef9

File tree

1 file changed

+10
-54
lines changed

1 file changed

+10
-54
lines changed

arch/riscv/kernel/ftrace.c

Lines changed: 10 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -24,23 +24,13 @@ unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
2424
return fentry_ip - MCOUNT_AUIPC_SIZE;
2525
}
2626

27-
void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
27+
void arch_ftrace_update_code(int command)
2828
{
2929
mutex_lock(&text_mutex);
30-
31-
/*
32-
* The code sequences we use for ftrace can't be patched while the
33-
* kernel is running, so we need to use stop_machine() to modify them
34-
* for now. This doesn't play nice with text_mutex, we use this flag
35-
* to elide the check.
36-
*/
37-
riscv_patch_in_stop_machine = true;
38-
}
39-
40-
void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
41-
{
42-
riscv_patch_in_stop_machine = false;
30+
command |= FTRACE_MAY_SLEEP;
31+
ftrace_modify_all_code(command);
4332
mutex_unlock(&text_mutex);
33+
flush_icache_all();
4434
}
4535

4636
static int __ftrace_modify_call(unsigned long source, unsigned long target, bool validate)
@@ -129,51 +119,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
129119
* before the write to function_trace_op later in the generic ftrace.
130120
* If the sequence is not enforced, then an old ftrace_call_dest may
131121
* race loading a new function_trace_op set in ftrace_modify_all_code
132-
*
133-
* If we are in stop_machine, then we don't need to call remote fence
134-
* as there is no concurrent read-side of ftrace_call_dest.
135122
*/
136123
smp_wmb();
137-
if (!irqs_disabled())
138-
smp_call_function(ftrace_sync_ipi, NULL, 1);
139-
return 0;
140-
}
141-
142-
struct ftrace_modify_param {
143-
int command;
144-
atomic_t cpu_count;
145-
};
146-
147-
static int __ftrace_modify_code(void *data)
148-
{
149-
struct ftrace_modify_param *param = data;
150-
151-
if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
152-
ftrace_modify_all_code(param->command);
153-
/*
154-
* Make sure the patching store is effective *before* we
155-
* increment the counter which releases all waiting CPUs
156-
* by using the release variant of atomic increment. The
157-
* release pairs with the call to local_flush_icache_all()
158-
* on the waiting CPU.
159-
*/
160-
atomic_inc_return_release(&param->cpu_count);
161-
} else {
162-
while (atomic_read(&param->cpu_count) <= num_online_cpus())
163-
cpu_relax();
164-
165-
local_flush_icache_all();
166-
}
167-
124+
/*
125+
* Updating ftrace dpes not take stop_machine path, so irqs should not
126+
* be disabled.
127+
*/
128+
WARN_ON(irqs_disabled());
129+
smp_call_function(ftrace_sync_ipi, NULL, 1);
168130
return 0;
169131
}
170132

171-
void arch_ftrace_update_code(int command)
172-
{
173-
struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
174-
175-
stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
176-
}
177133
#else /* CONFIG_DYNAMIC_FTRACE */
178134
unsigned long ftrace_call_adjust(unsigned long addr)
179135
{

0 commit comments

Comments
 (0)