@@ -3862,50 +3862,75 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3862
3862
}
3863
3863
EXPORT_SYMBOL_GPL (perf_event_read_value );
3864
3864
3865
- static int perf_read_group (struct perf_event * event ,
3866
- u64 read_format , char __user * buf )
3865
+ static void __perf_read_group_add (struct perf_event * leader ,
3866
+ u64 read_format , u64 * values )
3867
3867
{
3868
- struct perf_event * leader = event -> group_leader , * sub ;
3869
- struct perf_event_context * ctx = leader -> ctx ;
3870
- int n = 0 , size = 0 , ret ;
3871
- u64 count , enabled , running ;
3872
- u64 values [5 ];
3868
+ struct perf_event * sub ;
3869
+ int n = 1 ; /* skip @nr */
3873
3870
3874
- lockdep_assert_held (& ctx -> mutex );
3871
+ perf_event_read (leader , true);
3872
+
3873
+ /*
3874
+ * Since we co-schedule groups, {enabled,running} times of siblings
3875
+ * will be identical to those of the leader, so we only publish one
3876
+ * set.
3877
+ */
3878
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED ) {
3879
+ values [n ++ ] += leader -> total_time_enabled +
3880
+ atomic64_read (& leader -> child_total_time_enabled );
3881
+ }
3875
3882
3876
- count = perf_event_read_value (leader , & enabled , & running );
3883
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING ) {
3884
+ values [n ++ ] += leader -> total_time_running +
3885
+ atomic64_read (& leader -> child_total_time_running );
3886
+ }
3877
3887
3878
- values [n ++ ] = 1 + leader -> nr_siblings ;
3879
- if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED )
3880
- values [n ++ ] = enabled ;
3881
- if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING )
3882
- values [n ++ ] = running ;
3883
- values [n ++ ] = count ;
3888
+ /*
3889
+ * Write {count,id} tuples for every sibling.
3890
+ */
3891
+ values [n ++ ] += perf_event_count (leader );
3884
3892
if (read_format & PERF_FORMAT_ID )
3885
3893
values [n ++ ] = primary_event_id (leader );
3886
3894
3887
- size = n * sizeof (u64 );
3895
+ list_for_each_entry (sub , & leader -> sibling_list , group_entry ) {
3896
+ values [n ++ ] += perf_event_count (sub );
3897
+ if (read_format & PERF_FORMAT_ID )
3898
+ values [n ++ ] = primary_event_id (sub );
3899
+ }
3900
+ }
3888
3901
3889
- if (copy_to_user (buf , values , size ))
3890
- return - EFAULT ;
3902
+ static int perf_read_group (struct perf_event * event ,
3903
+ u64 read_format , char __user * buf )
3904
+ {
3905
+ struct perf_event * leader = event -> group_leader , * child ;
3906
+ struct perf_event_context * ctx = leader -> ctx ;
3907
+ int ret = event -> read_size ;
3908
+ u64 * values ;
3891
3909
3892
- ret = size ;
3910
+ lockdep_assert_held ( & ctx -> mutex ) ;
3893
3911
3894
- list_for_each_entry (sub , & leader -> sibling_list , group_entry ) {
3895
- n = 0 ;
3912
+ values = kzalloc (event -> read_size , GFP_KERNEL );
3913
+ if (!values )
3914
+ return - ENOMEM ;
3896
3915
3897
- values [n ++ ] = perf_event_read_value (sub , & enabled , & running );
3898
- if (read_format & PERF_FORMAT_ID )
3899
- values [n ++ ] = primary_event_id (sub );
3916
+ values [0 ] = 1 + leader -> nr_siblings ;
3917
+
3918
+ /*
3919
+ * By locking the child_mutex of the leader we effectively
3920
+ * lock the child list of all siblings.. XXX explain how.
3921
+ */
3922
+ mutex_lock (& leader -> child_mutex );
3900
3923
3901
- size = n * sizeof (u64 );
3924
+ __perf_read_group_add (leader , read_format , values );
3925
+ list_for_each_entry (child , & leader -> child_list , child_list )
3926
+ __perf_read_group_add (child , read_format , values );
3902
3927
3903
- if (copy_to_user (buf + ret , values , size )) {
3904
- return - EFAULT ;
3905
- }
3928
+ mutex_unlock (& leader -> child_mutex );
3906
3929
3907
- ret += size ;
3908
- }
3930
+ if (copy_to_user (buf , values , event -> read_size ))
3931
+ ret = - EFAULT ;
3932
+
3933
+ kfree (values );
3909
3934
3910
3935
return ret ;
3911
3936
}
0 commit comments