@@ -396,14 +396,17 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
396
396
.arg5_type = ARG_CONST_SIZE ,
397
397
};
398
398
399
- static DEFINE_PER_CPU (struct pt_regs , bpf_pt_regs ) ;
400
- static DEFINE_PER_CPU (struct perf_sample_data , bpf_misc_sd ) ;
399
+ static DEFINE_PER_CPU (int , bpf_event_output_nest_level ) ;
400
+ struct bpf_nested_pt_regs {
401
+ struct pt_regs regs [3 ];
402
+ };
403
+ static DEFINE_PER_CPU (struct bpf_nested_pt_regs , bpf_pt_regs ) ;
404
+ static DEFINE_PER_CPU (struct bpf_trace_sample_data , bpf_misc_sds ) ;
401
405
402
406
u64 bpf_event_output (struct bpf_map * map , u64 flags , void * meta , u64 meta_size ,
403
407
void * ctx , u64 ctx_size , bpf_ctx_copy_t ctx_copy )
404
408
{
405
- struct perf_sample_data * sd = this_cpu_ptr (& bpf_misc_sd );
406
- struct pt_regs * regs = this_cpu_ptr (& bpf_pt_regs );
409
+ int nest_level = this_cpu_inc_return (bpf_event_output_nest_level );
407
410
struct perf_raw_frag frag = {
408
411
.copy = ctx_copy ,
409
412
.size = ctx_size ,
@@ -418,12 +421,25 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
418
421
.data = meta ,
419
422
},
420
423
};
424
+ struct perf_sample_data * sd ;
425
+ struct pt_regs * regs ;
426
+ u64 ret ;
427
+
428
+ if (WARN_ON_ONCE (nest_level > ARRAY_SIZE (bpf_misc_sds .sds ))) {
429
+ ret = - EBUSY ;
430
+ goto out ;
431
+ }
432
+ sd = this_cpu_ptr (& bpf_misc_sds .sds [nest_level - 1 ]);
433
+ regs = this_cpu_ptr (& bpf_pt_regs .regs [nest_level - 1 ]);
421
434
422
435
perf_fetch_caller_regs (regs );
423
436
perf_sample_data_init (sd , 0 , 0 );
424
437
sd -> raw = & raw ;
425
438
426
- return __bpf_perf_event_output (regs , map , flags , sd );
439
+ ret = __bpf_perf_event_output (regs , map , flags , sd );
440
+ out :
441
+ this_cpu_dec (bpf_event_output_nest_level );
442
+ return ret ;
427
443
}
428
444
429
445
BPF_CALL_0 (bpf_get_current_task )
0 commit comments