Skip to content

Commit 20b9d7a

Browse files
borkmanndavem330
authored andcommitted
bpf: avoid excessive stack usage for perf_sample_data
perf_sample_data consumes 386 bytes on stack, reduce excessive stack usage and move it to per cpu buffer. It's allowed due to preemption being disabled for tracing, xdp and tc programs, thus at all times only one program can run on a specific CPU and programs cannot run from interrupt. We similarly also handle bpf_pt_regs. Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Alexei Starovoitov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 41e8e40 commit 20b9d7a

File tree

1 file changed

+6
-4
lines changed

1 file changed

+6
-4
lines changed

kernel/trace/bpf_trace.c

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -266,14 +266,16 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
266266
.arg2_type = ARG_ANYTHING,
267267
};
268268

269+
static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
270+
269271
static __always_inline u64
270272
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
271273
u64 flags, struct perf_raw_record *raw)
272274
{
273275
struct bpf_array *array = container_of(map, struct bpf_array, map);
276+
struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
274277
unsigned int cpu = smp_processor_id();
275278
u64 index = flags & BPF_F_INDEX_MASK;
276-
struct perf_sample_data sample_data;
277279
struct bpf_event_entry *ee;
278280
struct perf_event *event;
279281

@@ -294,9 +296,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
294296
if (unlikely(event->oncpu != cpu))
295297
return -EOPNOTSUPP;
296298

297-
perf_sample_data_init(&sample_data, 0, 0);
298-
sample_data.raw = raw;
299-
perf_event_output(event, &sample_data, regs);
299+
perf_sample_data_init(sd, 0, 0);
300+
sd->raw = raw;
301+
perf_event_output(event, sd, regs);
300302
return 0;
301303
}
302304

0 commit comments

Comments
 (0)