Skip to content

Commit 48d68b2

Browse files
fweisbecIngo Molnar
authored andcommitted
tracing/function-graph-tracer: support for x86-64
Impact: extend and enable the function graph tracer to 64-bit x86 This patch implements the support for function graph tracer under x86-64. Both static and dynamic tracing are supported. This causes some small CPP conditional asm on arch/x86/kernel/ftrace.c I wanted to use probe_kernel_read/write to make the return address saving/patching code more generic but it causes tracing recursion. That would be perhaps useful to implement a notrace version of these function for other archs ports. Note that arch/x86/process_64.c is not traced, as in X86-32. I first thought __switch_to() was responsible of crashes during tracing because I believed current task were changed inside but that's actually not the case (actually yes, but not the "current" pointer). So I will have to investigate to find the functions that harm here, to enable tracing of the other functions inside (but there is no issue at this time, while process_64.c stays out of -pg flags). A little possible race condition is fixed inside this patch too. When the tracer allocate a return stack dynamically, the current depth is not initialized before but after. An interrupt could occur at this time and, after seeing that the return stack is allocated, the tracer could try to trace it with a random uninitialized depth. It's a prevention, even if I hadn't problems with it. Signed-off-by: Frederic Weisbecker <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Tim Bird <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 222658e commit 48d68b2

File tree

5 files changed

+89
-3
lines changed

5 files changed

+89
-3
lines changed

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ config X86
2929
select HAVE_FTRACE_MCOUNT_RECORD
3030
select HAVE_DYNAMIC_FTRACE
3131
select HAVE_FUNCTION_TRACER
32-
select HAVE_FUNCTION_GRAPH_TRACER if X86_32
32+
select HAVE_FUNCTION_GRAPH_TRACER
3333
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
3434
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
3535
select HAVE_ARCH_KGDB if !X86_VOYAGER

arch/x86/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ endif
1717
ifdef CONFIG_FUNCTION_GRAPH_TRACER
1818
# Don't trace __switch_to() but let it for function tracer
1919
CFLAGS_REMOVE_process_32.o = -pg
20+
CFLAGS_REMOVE_process_64.o = -pg
2021
endif
2122

2223
#

arch/x86/kernel/entry_64.S

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,12 @@ ftrace_call:
9898
movq (%rsp), %rax
9999
addq $0x38, %rsp
100100

101+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
102+
.globl ftrace_graph_call
103+
ftrace_graph_call:
104+
jmp ftrace_stub
105+
#endif
106+
101107
.globl ftrace_stub
102108
ftrace_stub:
103109
retq
@@ -110,6 +116,12 @@ ENTRY(mcount)
110116

111117
cmpq $ftrace_stub, ftrace_trace_function
112118
jnz trace
119+
120+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
121+
cmpq $ftrace_stub, ftrace_graph_return
122+
jnz ftrace_graph_caller
123+
#endif
124+
113125
.globl ftrace_stub
114126
ftrace_stub:
115127
retq
@@ -145,6 +157,68 @@ END(mcount)
145157
#endif /* CONFIG_DYNAMIC_FTRACE */
146158
#endif /* CONFIG_FUNCTION_TRACER */
147159

160+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
161+
ENTRY(ftrace_graph_caller)
162+
cmpl $0, function_trace_stop
163+
jne ftrace_stub
164+
165+
subq $0x38, %rsp
166+
movq %rax, (%rsp)
167+
movq %rcx, 8(%rsp)
168+
movq %rdx, 16(%rsp)
169+
movq %rsi, 24(%rsp)
170+
movq %rdi, 32(%rsp)
171+
movq %r8, 40(%rsp)
172+
movq %r9, 48(%rsp)
173+
174+
leaq 8(%rbp), %rdi
175+
movq 0x38(%rsp), %rsi
176+
177+
call prepare_ftrace_return
178+
179+
movq 48(%rsp), %r9
180+
movq 40(%rsp), %r8
181+
movq 32(%rsp), %rdi
182+
movq 24(%rsp), %rsi
183+
movq 16(%rsp), %rdx
184+
movq 8(%rsp), %rcx
185+
movq (%rsp), %rax
186+
addq $0x38, %rsp
187+
retq
188+
END(ftrace_graph_caller)
189+
190+
191+
.globl return_to_handler
192+
return_to_handler:
193+
subq $80, %rsp
194+
195+
movq %rax, (%rsp)
196+
movq %rcx, 8(%rsp)
197+
movq %rdx, 16(%rsp)
198+
movq %rsi, 24(%rsp)
199+
movq %rdi, 32(%rsp)
200+
movq %r8, 40(%rsp)
201+
movq %r9, 48(%rsp)
202+
movq %r10, 56(%rsp)
203+
movq %r11, 64(%rsp)
204+
205+
call ftrace_return_to_handler
206+
207+
movq %rax, 72(%rsp)
208+
movq 64(%rsp), %r11
209+
movq 56(%rsp), %r10
210+
movq 48(%rsp), %r9
211+
movq 40(%rsp), %r8
212+
movq 32(%rsp), %rdi
213+
movq 24(%rsp), %rsi
214+
movq 16(%rsp), %rdx
215+
movq 8(%rsp), %rcx
216+
movq (%rsp), %rax
217+
addq $72, %rsp
218+
retq
219+
#endif
220+
221+
148222
#ifndef CONFIG_PREEMPT
149223
#define retint_kernel retint_restore_args
150224
#endif

arch/x86/kernel/ftrace.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -467,17 +467,27 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
467467
* ignore such a protection.
468468
*/
469469
asm volatile(
470+
#ifdef CONFIG_X86_64
471+
"1: movq (%[parent_old]), %[old]\n"
472+
"2: movq %[return_hooker], (%[parent_replaced])\n"
473+
#else
470474
"1: movl (%[parent_old]), %[old]\n"
471475
"2: movl %[return_hooker], (%[parent_replaced])\n"
476+
#endif
472477
" movl $0, %[faulted]\n"
473478

474479
".section .fixup, \"ax\"\n"
475480
"3: movl $1, %[faulted]\n"
476481
".previous\n"
477482

478483
".section __ex_table, \"a\"\n"
484+
#ifdef CONFIG_X86_64
485+
" .quad 1b, 3b\n"
486+
" .quad 2b, 3b\n"
487+
#else
479488
" .long 1b, 3b\n"
480489
" .long 2b, 3b\n"
490+
#endif
481491
".previous\n"
482492

483493
: [parent_replaced] "=r" (parent), [old] "=r" (old),
@@ -509,5 +519,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
509519
ftrace_graph_entry(&trace);
510520

511521
}
512-
513522
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */

kernel/trace/ftrace.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1673,8 +1673,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
16731673
}
16741674

16751675
if (t->ret_stack == NULL) {
1676-
t->ret_stack = ret_stack_list[start++];
16771676
t->curr_ret_stack = -1;
1677+
/* Make sure IRQs see the -1 first: */
1678+
barrier();
1679+
t->ret_stack = ret_stack_list[start++];
16781680
atomic_set(&t->trace_overrun, 0);
16791681
}
16801682
} while_each_thread(g, t);

0 commit comments

Comments
 (0)