Skip to content

Commit 8e7a392

Browse files
borkmanndavem330
authored andcommitted
bpf, perf: split bpf_perf_event_output
Split the bpf_perf_event_output() helper as a preparation into two parts. The new bpf_perf_event_output() will prepare the raw record itself and test for unknown flags from BPF trace context, where the __bpf_perf_event_output() does the core work. The latter will be reused later on from bpf_event_output() directly. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 7e3f977 commit 8e7a392

File tree

1 file changed

+22
-13
lines changed

1 file changed

+22
-13
lines changed

kernel/trace/bpf_trace.c

Lines changed: 22 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -233,26 +233,17 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
233233
.arg2_type = ARG_ANYTHING,
234234
};
235235

236-
static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
236+
static __always_inline u64
237+
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
238+
u64 flags, struct perf_raw_record *raw)
237239
{
238-
struct pt_regs *regs = (struct pt_regs *) (long) r1;
239-
struct bpf_map *map = (struct bpf_map *) (long) r2;
240240
struct bpf_array *array = container_of(map, struct bpf_array, map);
241241
unsigned int cpu = smp_processor_id();
242242
u64 index = flags & BPF_F_INDEX_MASK;
243-
void *data = (void *) (long) r4;
244243
struct perf_sample_data sample_data;
245244
struct bpf_event_entry *ee;
246245
struct perf_event *event;
247-
struct perf_raw_record raw = {
248-
.frag = {
249-
.size = size,
250-
.data = data,
251-
},
252-
};
253246

254-
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
255-
return -EINVAL;
256247
if (index == BPF_F_CURRENT_CPU)
257248
index = cpu;
258249
if (unlikely(index >= array->map.max_entries))
@@ -271,11 +262,29 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
271262
return -EOPNOTSUPP;
272263

273264
perf_sample_data_init(&sample_data, 0, 0);
274-
sample_data.raw = &raw;
265+
sample_data.raw = raw;
275266
perf_event_output(event, &sample_data, regs);
276267
return 0;
277268
}
278269

270+
static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
271+
{
272+
struct pt_regs *regs = (struct pt_regs *)(long) r1;
273+
struct bpf_map *map = (struct bpf_map *)(long) r2;
274+
void *data = (void *)(long) r4;
275+
struct perf_raw_record raw = {
276+
.frag = {
277+
.size = size,
278+
.data = data,
279+
},
280+
};
281+
282+
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
283+
return -EINVAL;
284+
285+
return __bpf_perf_event_output(regs, map, flags, &raw);
286+
}
287+
279288
static const struct bpf_func_proto bpf_perf_event_output_proto = {
280289
.func = bpf_perf_event_output,
281290
.gpl_only = true,

0 commit comments

Comments
 (0)