@@ -3184,12 +3184,18 @@ void perf_event_exec(void)
3184
3184
rcu_read_unlock ();
3185
3185
}
3186
3186
3187
+ struct perf_read_data {
3188
+ struct perf_event * event ;
3189
+ bool group ;
3190
+ };
3191
+
3187
3192
/*
3188
3193
* Cross CPU call to read the hardware event
3189
3194
*/
3190
3195
static void __perf_event_read (void * info )
3191
3196
{
3192
- struct perf_event * event = info ;
3197
+ struct perf_read_data * data = info ;
3198
+ struct perf_event * sub , * event = data -> event ;
3193
3199
struct perf_event_context * ctx = event -> ctx ;
3194
3200
struct perf_cpu_context * cpuctx = __get_cpu_context (ctx );
3195
3201
@@ -3208,9 +3214,21 @@ static void __perf_event_read(void *info)
3208
3214
update_context_time (ctx );
3209
3215
update_cgrp_time_from_event (event );
3210
3216
}
3217
+
3211
3218
update_event_times (event );
3212
3219
if (event -> state == PERF_EVENT_STATE_ACTIVE )
3213
3220
event -> pmu -> read (event );
3221
+
3222
+ if (!data -> group )
3223
+ goto unlock ;
3224
+
3225
+ list_for_each_entry (sub , & event -> sibling_list , group_entry ) {
3226
+ update_event_times (sub );
3227
+ if (sub -> state == PERF_EVENT_STATE_ACTIVE )
3228
+ sub -> pmu -> read (sub );
3229
+ }
3230
+
3231
+ unlock :
3214
3232
raw_spin_unlock (& ctx -> lock );
3215
3233
}
3216
3234
@@ -3275,15 +3293,19 @@ u64 perf_event_read_local(struct perf_event *event)
3275
3293
return val ;
3276
3294
}
3277
3295
3278
- static void perf_event_read (struct perf_event * event )
3296
+ static void perf_event_read (struct perf_event * event , bool group )
3279
3297
{
3280
3298
/*
3281
3299
* If event is enabled and currently active on a CPU, update the
3282
3300
* value in the event structure:
3283
3301
*/
3284
3302
if (event -> state == PERF_EVENT_STATE_ACTIVE ) {
3303
+ struct perf_read_data data = {
3304
+ .event = event ,
3305
+ .group = group ,
3306
+ };
3285
3307
smp_call_function_single (event -> oncpu ,
3286
- __perf_event_read , event , 1 );
3308
+ __perf_event_read , & data , 1 );
3287
3309
} else if (event -> state == PERF_EVENT_STATE_INACTIVE ) {
3288
3310
struct perf_event_context * ctx = event -> ctx ;
3289
3311
unsigned long flags ;
@@ -3298,7 +3320,10 @@ static void perf_event_read(struct perf_event *event)
3298
3320
update_context_time (ctx );
3299
3321
update_cgrp_time_from_event (event );
3300
3322
}
3301
- update_event_times (event );
3323
+ if (group )
3324
+ update_group_times (event );
3325
+ else
3326
+ update_event_times (event );
3302
3327
raw_spin_unlock_irqrestore (& ctx -> lock , flags );
3303
3328
}
3304
3329
}
@@ -3817,7 +3842,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3817
3842
3818
3843
mutex_lock (& event -> child_mutex );
3819
3844
3820
- perf_event_read (event );
3845
+ perf_event_read (event , false );
3821
3846
total += perf_event_count (event );
3822
3847
3823
3848
* enabled += event -> total_time_enabled +
@@ -3826,7 +3851,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3826
3851
atomic64_read (& event -> child_total_time_running );
3827
3852
3828
3853
list_for_each_entry (child , & event -> child_list , child_list ) {
3829
- perf_event_read (child );
3854
+ perf_event_read (child , false );
3830
3855
total += perf_event_count (child );
3831
3856
* enabled += child -> total_time_enabled ;
3832
3857
* running += child -> total_time_running ;
@@ -3987,7 +4012,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
3987
4012
3988
4013
static void _perf_event_reset (struct perf_event * event )
3989
4014
{
3990
- perf_event_read (event );
4015
+ perf_event_read (event , false );
3991
4016
local64_set (& event -> count , 0 );
3992
4017
perf_event_update_userpage (event );
3993
4018
}
0 commit comments