Skip to content

Commit e7dbfe3

Browse files
mhiramathitachirostedt
authored andcommitted
kprobes/x86: Move ftrace-based kprobe code into kprobes-ftrace.c
Split ftrace-based kprobes code from kprobes, and introduce CONFIG_(HAVE_)KPROBES_ON_FTRACE Kconfig flags. For the cleanup reason, this also moves kprobe_ftrace check into skip_singlestep. Link: http://lkml.kernel.org/r/[email protected] Cc: Ingo Molnar <[email protected]> Cc: Ananth N Mavinakayanahalli <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Frederic Weisbecker <[email protected]> Signed-off-by: Masami Hiramatsu <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
1 parent 06aeaae commit e7dbfe3

File tree

8 files changed

+125
-83
lines changed

8 files changed

+125
-83
lines changed

arch/Kconfig

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,15 @@ config OPTPROBES
7676
depends on KPROBES && HAVE_OPTPROBES
7777
depends on !PREEMPT
7878

79+
config KPROBES_ON_FTRACE
80+
def_bool y
81+
depends on KPROBES && HAVE_KPROBES_ON_FTRACE
82+
depends on DYNAMIC_FTRACE_WITH_REGS
83+
help
84+
If function tracer is enabled and the arch supports full
85+
passing of pt_regs to function tracing, then kprobes can
86+
optimize on top of function tracing.
87+
7988
config UPROBES
8089
bool "Transparent user-space probes (EXPERIMENTAL)"
8190
depends on UPROBE_EVENT && PERF_EVENTS
@@ -158,6 +167,9 @@ config HAVE_KRETPROBES
158167
config HAVE_OPTPROBES
159168
bool
160169

170+
config HAVE_KPROBES_ON_FTRACE
171+
bool
172+
161173
config HAVE_NMI_WATCHDOG
162174
bool
163175
#

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ config X86
4040
select HAVE_DMA_CONTIGUOUS if !SWIOTLB
4141
select HAVE_KRETPROBES
4242
select HAVE_OPTPROBES
43+
select HAVE_KPROBES_ON_FTRACE
4344
select HAVE_FTRACE_MCOUNT_RECORD
4445
select HAVE_FENTRY if X86_64
4546
select HAVE_C_RECORDMCOUNT

arch/x86/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
6767
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
6868
obj-$(CONFIG_KPROBES) += kprobes.o
6969
obj-$(CONFIG_OPTPROBES) += kprobes-opt.o
70+
obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o
7071
obj-$(CONFIG_MODULES) += module.o
7172
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
7273
obj-$(CONFIG_KGDB) += kgdb.o

arch/x86/kernel/kprobes-common.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
9999
return addr;
100100
}
101101
#endif
102+
103+
#ifdef CONFIG_KPROBES_ON_FTRACE
104+
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
105+
struct kprobe_ctlblk *kcb);
106+
#else
107+
static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
108+
struct kprobe_ctlblk *kcb)
109+
{
110+
return 0;
111+
}
112+
#endif
102113
#endif

arch/x86/kernel/kprobes-ftrace.c

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
/*
2+
* Dynamic Ftrace based Kprobes Optimization
3+
*
4+
* This program is free software; you can redistribute it and/or modify
5+
* it under the terms of the GNU General Public License as published by
6+
* the Free Software Foundation; either version 2 of the License, or
7+
* (at your option) any later version.
8+
*
9+
* This program is distributed in the hope that it will be useful,
10+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12+
* GNU General Public License for more details.
13+
*
14+
* You should have received a copy of the GNU General Public License
15+
* along with this program; if not, write to the Free Software
16+
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17+
*
18+
* Copyright (C) Hitachi Ltd., 2012
19+
*/
20+
#include <linux/kprobes.h>
21+
#include <linux/ptrace.h>
22+
#include <linux/hardirq.h>
23+
#include <linux/preempt.h>
24+
#include <linux/ftrace.h>
25+
26+
#include "kprobes-common.h"
27+
28+
static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
29+
struct kprobe_ctlblk *kcb)
30+
{
31+
/*
32+
* Emulate singlestep (and also recover regs->ip)
33+
* as if there is a 5byte nop
34+
*/
35+
regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
36+
if (unlikely(p->post_handler)) {
37+
kcb->kprobe_status = KPROBE_HIT_SSDONE;
38+
p->post_handler(p, regs, 0);
39+
}
40+
__this_cpu_write(current_kprobe, NULL);
41+
return 1;
42+
}
43+
44+
int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
45+
struct kprobe_ctlblk *kcb)
46+
{
47+
if (kprobe_ftrace(p))
48+
return __skip_singlestep(p, regs, kcb);
49+
else
50+
return 0;
51+
}
52+
53+
/* Ftrace callback handler for kprobes */
54+
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
55+
struct ftrace_ops *ops, struct pt_regs *regs)
56+
{
57+
struct kprobe *p;
58+
struct kprobe_ctlblk *kcb;
59+
unsigned long flags;
60+
61+
/* Disable irq for emulating a breakpoint and avoiding preempt */
62+
local_irq_save(flags);
63+
64+
p = get_kprobe((kprobe_opcode_t *)ip);
65+
if (unlikely(!p) || kprobe_disabled(p))
66+
goto end;
67+
68+
kcb = get_kprobe_ctlblk();
69+
if (kprobe_running()) {
70+
kprobes_inc_nmissed_count(p);
71+
} else {
72+
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
73+
regs->ip = ip + sizeof(kprobe_opcode_t);
74+
75+
__this_cpu_write(current_kprobe, p);
76+
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
77+
if (!p->pre_handler || !p->pre_handler(p, regs))
78+
__skip_singlestep(p, regs, kcb);
79+
/*
80+
* If pre_handler returns !0, it sets regs->ip and
81+
* resets current kprobe.
82+
*/
83+
}
84+
end:
85+
local_irq_restore(flags);
86+
}
87+
88+
int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
89+
{
90+
p->ainsn.insn = NULL;
91+
p->ainsn.boostable = -1;
92+
return 0;
93+
}

arch/x86/kernel/kprobes.c

Lines changed: 2 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
541541
return 1;
542542
}
543543

544-
#ifdef KPROBES_CAN_USE_FTRACE
545-
static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
546-
struct kprobe_ctlblk *kcb)
547-
{
548-
/*
549-
* Emulate singlestep (and also recover regs->ip)
550-
* as if there is a 5byte nop
551-
*/
552-
regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
553-
if (unlikely(p->post_handler)) {
554-
kcb->kprobe_status = KPROBE_HIT_SSDONE;
555-
p->post_handler(p, regs, 0);
556-
}
557-
__this_cpu_write(current_kprobe, NULL);
558-
}
559-
#endif
560-
561544
/*
562545
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
563546
* remain disabled throughout this function.
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
616599
} else if (kprobe_running()) {
617600
p = __this_cpu_read(current_kprobe);
618601
if (p->break_handler && p->break_handler(p, regs)) {
619-
#ifdef KPROBES_CAN_USE_FTRACE
620-
if (kprobe_ftrace(p)) {
621-
skip_singlestep(p, regs, kcb);
622-
return 1;
623-
}
624-
#endif
625-
setup_singlestep(p, regs, kcb, 0);
602+
if (!skip_singlestep(p, regs, kcb))
603+
setup_singlestep(p, regs, kcb, 0);
626604
return 1;
627605
}
628606
} /* else: not a kprobe fault; let the kernel handle it */
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
10751053
return 0;
10761054
}
10771055

1078-
#ifdef KPROBES_CAN_USE_FTRACE
1079-
/* Ftrace callback handler for kprobes */
1080-
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
1081-
struct ftrace_ops *ops, struct pt_regs *regs)
1082-
{
1083-
struct kprobe *p;
1084-
struct kprobe_ctlblk *kcb;
1085-
unsigned long flags;
1086-
1087-
/* Disable irq for emulating a breakpoint and avoiding preempt */
1088-
local_irq_save(flags);
1089-
1090-
p = get_kprobe((kprobe_opcode_t *)ip);
1091-
if (unlikely(!p) || kprobe_disabled(p))
1092-
goto end;
1093-
1094-
kcb = get_kprobe_ctlblk();
1095-
if (kprobe_running()) {
1096-
kprobes_inc_nmissed_count(p);
1097-
} else {
1098-
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
1099-
regs->ip = ip + sizeof(kprobe_opcode_t);
1100-
1101-
__this_cpu_write(current_kprobe, p);
1102-
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1103-
if (!p->pre_handler || !p->pre_handler(p, regs))
1104-
skip_singlestep(p, regs, kcb);
1105-
/*
1106-
* If pre_handler returns !0, it sets regs->ip and
1107-
* resets current kprobe.
1108-
*/
1109-
}
1110-
end:
1111-
local_irq_restore(flags);
1112-
}
1113-
1114-
int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
1115-
{
1116-
p->ainsn.insn = NULL;
1117-
p->ainsn.boostable = -1;
1118-
return 0;
1119-
}
1120-
#endif
1121-
11221056
int __init arch_init_kprobes(void)
11231057
{
11241058
return arch_init_optprobes();

include/linux/kprobes.h

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -49,16 +49,6 @@
4949
#define KPROBE_REENTER 0x00000004
5050
#define KPROBE_HIT_SSDONE 0x00000008
5151

52-
/*
53-
* If function tracer is enabled and the arch supports full
54-
* passing of pt_regs to function tracing, then kprobes can
55-
* optimize on top of function tracing.
56-
*/
57-
#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
58-
&& defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
59-
# define KPROBES_CAN_USE_FTRACE
60-
#endif
61-
6252
/* Attach to insert probes on any functions which should be ignored*/
6353
#define __kprobes __attribute__((__section__(".kprobes.text")))
6454

@@ -316,7 +306,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
316306
#endif
317307

318308
#endif /* CONFIG_OPTPROBES */
319-
#ifdef KPROBES_CAN_USE_FTRACE
309+
#ifdef CONFIG_KPROBES_ON_FTRACE
320310
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
321311
struct ftrace_ops *ops, struct pt_regs *regs);
322312
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);

kernel/kprobes.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -919,7 +919,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
919919
}
920920
#endif /* CONFIG_OPTPROBES */
921921

922-
#ifdef KPROBES_CAN_USE_FTRACE
922+
#ifdef CONFIG_KPROBES_ON_FTRACE
923923
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
924924
.func = kprobe_ftrace_handler,
925925
.flags = FTRACE_OPS_FL_SAVE_REGS,
@@ -964,7 +964,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
964964
(unsigned long)p->addr, 1, 0);
965965
WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
966966
}
967-
#else /* !KPROBES_CAN_USE_FTRACE */
967+
#else /* !CONFIG_KPROBES_ON_FTRACE */
968968
#define prepare_kprobe(p) arch_prepare_kprobe(p)
969969
#define arm_kprobe_ftrace(p) do {} while (0)
970970
#define disarm_kprobe_ftrace(p) do {} while (0)
@@ -1414,12 +1414,12 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
14141414
*/
14151415
ftrace_addr = ftrace_location((unsigned long)p->addr);
14161416
if (ftrace_addr) {
1417-
#ifdef KPROBES_CAN_USE_FTRACE
1417+
#ifdef CONFIG_KPROBES_ON_FTRACE
14181418
/* Given address is not on the instruction boundary */
14191419
if ((unsigned long)p->addr != ftrace_addr)
14201420
return -EILSEQ;
14211421
p->flags |= KPROBE_FLAG_FTRACE;
1422-
#else /* !KPROBES_CAN_USE_FTRACE */
1422+
#else /* !CONFIG_KPROBES_ON_FTRACE */
14231423
return -EINVAL;
14241424
#endif
14251425
}

0 commit comments

Comments
 (0)