@@ -3487,14 +3487,15 @@ struct perf_read_data {
3487
3487
int ret ;
3488
3488
};
3489
3489
3490
- static int find_cpu_to_read (struct perf_event * event , int local_cpu )
3490
+ static int __perf_event_read_cpu (struct perf_event * event , int event_cpu )
3491
3491
{
3492
- int event_cpu = event -> oncpu ;
3493
3492
u16 local_pkg , event_pkg ;
3494
3493
3495
3494
if (event -> group_caps & PERF_EV_CAP_READ_ACTIVE_PKG ) {
3496
- event_pkg = topology_physical_package_id (event_cpu );
3497
- local_pkg = topology_physical_package_id (local_cpu );
3495
+ int local_cpu = smp_processor_id ();
3496
+
3497
+ event_pkg = topology_physical_package_id (event_cpu );
3498
+ local_pkg = topology_physical_package_id (local_cpu );
3498
3499
3499
3500
if (event_pkg == local_pkg )
3500
3501
return local_cpu ;
@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)
3624
3625
3625
3626
static int perf_event_read (struct perf_event * event , bool group )
3626
3627
{
3627
- int ret = 0 , cpu_to_read , local_cpu ;
3628
+ int event_cpu , ret = 0 ;
3628
3629
3629
3630
/*
3630
3631
* If event is enabled and currently active on a CPU, update the
@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
3637
3638
.ret = 0 ,
3638
3639
};
3639
3640
3640
- local_cpu = get_cpu ();
3641
- cpu_to_read = find_cpu_to_read (event , local_cpu );
3642
- put_cpu ();
3641
+ event_cpu = READ_ONCE (event -> oncpu );
3642
+ if ((unsigned )event_cpu >= nr_cpu_ids )
3643
+ return 0 ;
3644
+
3645
+ preempt_disable ();
3646
+ event_cpu = __perf_event_read_cpu (event , event_cpu );
3643
3647
3644
3648
/*
3645
3649
* Purposely ignore the smp_call_function_single() return
3646
3650
* value.
3647
3651
*
3648
- * If event->oncpu isn't a valid CPU it means the event got
3652
+ * If event_cpu isn't a valid CPU it means the event got
3649
3653
* scheduled out and that will have updated the event count.
3650
3654
*
3651
3655
* Therefore, either way, we'll have an up-to-date event count
3652
3656
* after this.
3653
3657
*/
3654
- (void )smp_call_function_single (cpu_to_read , __perf_event_read , & data , 1 );
3658
+ (void )smp_call_function_single (event_cpu , __perf_event_read , & data , 1 );
3659
+ preempt_enable ();
3655
3660
ret = data .ret ;
3656
3661
} else if (event -> state == PERF_EVENT_STATE_INACTIVE ) {
3657
3662
struct perf_event_context * ctx = event -> ctx ;
0 commit comments