@@ -2799,7 +2799,7 @@ static int perf_event_stop(struct perf_event *event, int restart)
2799
2799
*
2800
2800
* (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2801
2801
* we update the addresses of corresponding vmas in
2802
- * event::addr_filters_offs array and bump the event::addr_filters_gen;
2802
+ * event::addr_filter_ranges array and bump the event::addr_filters_gen;
2803
2803
* (p2) when an event is scheduled in (pmu::add), it calls
2804
2804
* perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2805
2805
* if the generation has changed since the previous call.
@@ -4446,7 +4446,7 @@ static void _free_event(struct perf_event *event)
4446
4446
4447
4447
perf_event_free_bpf_prog (event );
4448
4448
perf_addr_filters_splice (event , NULL );
4449
- kfree (event -> addr_filters_offs );
4449
+ kfree (event -> addr_filter_ranges );
4450
4450
4451
4451
if (event -> destroy )
4452
4452
event -> destroy (event );
@@ -6687,7 +6687,8 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6687
6687
raw_spin_lock_irqsave (& ifh -> lock , flags );
6688
6688
list_for_each_entry (filter , & ifh -> list , entry ) {
6689
6689
if (filter -> path .dentry ) {
6690
- event -> addr_filters_offs [count ] = 0 ;
6690
+ event -> addr_filter_ranges [count ].start = 0 ;
6691
+ event -> addr_filter_ranges [count ].size = 0 ;
6691
6692
restart ++ ;
6692
6693
}
6693
6694
@@ -7367,28 +7368,47 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
7367
7368
return true;
7368
7369
}
7369
7370
7371
+ static bool perf_addr_filter_vma_adjust (struct perf_addr_filter * filter ,
7372
+ struct vm_area_struct * vma ,
7373
+ struct perf_addr_filter_range * fr )
7374
+ {
7375
+ unsigned long vma_size = vma -> vm_end - vma -> vm_start ;
7376
+ unsigned long off = vma -> vm_pgoff << PAGE_SHIFT ;
7377
+ struct file * file = vma -> vm_file ;
7378
+
7379
+ if (!perf_addr_filter_match (filter , file , off , vma_size ))
7380
+ return false;
7381
+
7382
+ if (filter -> offset < off ) {
7383
+ fr -> start = vma -> vm_start ;
7384
+ fr -> size = min (vma_size , filter -> size - (off - filter -> offset ));
7385
+ } else {
7386
+ fr -> start = vma -> vm_start + filter -> offset - off ;
7387
+ fr -> size = min (vma -> vm_end - fr -> start , filter -> size );
7388
+ }
7389
+
7390
+ return true;
7391
+ }
7392
+
7370
7393
static void __perf_addr_filters_adjust (struct perf_event * event , void * data )
7371
7394
{
7372
7395
struct perf_addr_filters_head * ifh = perf_event_addr_filters (event );
7373
7396
struct vm_area_struct * vma = data ;
7374
- unsigned long off = vma -> vm_pgoff << PAGE_SHIFT , flags ;
7375
- struct file * file = vma -> vm_file ;
7376
7397
struct perf_addr_filter * filter ;
7377
7398
unsigned int restart = 0 , count = 0 ;
7399
+ unsigned long flags ;
7378
7400
7379
7401
if (!has_addr_filter (event ))
7380
7402
return ;
7381
7403
7382
- if (!file )
7404
+ if (!vma -> vm_file )
7383
7405
return ;
7384
7406
7385
7407
raw_spin_lock_irqsave (& ifh -> lock , flags );
7386
7408
list_for_each_entry (filter , & ifh -> list , entry ) {
7387
- if (perf_addr_filter_match (filter , file , off ,
7388
- vma -> vm_end - vma -> vm_start )) {
7389
- event -> addr_filters_offs [count ] = vma -> vm_start ;
7409
+ if (perf_addr_filter_vma_adjust (filter , vma ,
7410
+ & event -> addr_filter_ranges [count ]))
7390
7411
restart ++ ;
7391
- }
7392
7412
7393
7413
count ++ ;
7394
7414
}
@@ -8978,26 +8998,19 @@ static void perf_addr_filters_splice(struct perf_event *event,
8978
8998
* @filter; if so, adjust filter's address range.
8979
8999
* Called with mm::mmap_sem down for reading.
8980
9000
*/
8981
- static unsigned long perf_addr_filter_apply (struct perf_addr_filter * filter ,
8982
- struct mm_struct * mm )
9001
+ static void perf_addr_filter_apply (struct perf_addr_filter * filter ,
9002
+ struct mm_struct * mm ,
9003
+ struct perf_addr_filter_range * fr )
8983
9004
{
8984
9005
struct vm_area_struct * vma ;
8985
9006
8986
9007
for (vma = mm -> mmap ; vma ; vma = vma -> vm_next ) {
8987
- struct file * file = vma -> vm_file ;
8988
- unsigned long off = vma -> vm_pgoff << PAGE_SHIFT ;
8989
- unsigned long vma_size = vma -> vm_end - vma -> vm_start ;
8990
-
8991
- if (!file )
9008
+ if (!vma -> vm_file )
8992
9009
continue ;
8993
9010
8994
- if (!perf_addr_filter_match (filter , file , off , vma_size ))
8995
- continue ;
8996
-
8997
- return vma -> vm_start ;
9011
+ if (perf_addr_filter_vma_adjust (filter , vma , fr ))
9012
+ return ;
8998
9013
}
8999
-
9000
- return 0 ;
9001
9014
}
9002
9015
9003
9016
/*
@@ -9031,15 +9044,15 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
9031
9044
9032
9045
raw_spin_lock_irqsave (& ifh -> lock , flags );
9033
9046
list_for_each_entry (filter , & ifh -> list , entry ) {
9034
- event -> addr_filters_offs [count ] = 0 ;
9047
+ event -> addr_filter_ranges [count ].start = 0 ;
9048
+ event -> addr_filter_ranges [count ].size = 0 ;
9035
9049
9036
9050
/*
9037
9051
* Adjust base offset if the filter is associated to a binary
9038
9052
* that needs to be mapped:
9039
9053
*/
9040
9054
if (filter -> path .dentry )
9041
- event -> addr_filters_offs [count ] =
9042
- perf_addr_filter_apply (filter , mm );
9055
+ perf_addr_filter_apply (filter , mm , & event -> addr_filter_ranges [count ]);
9043
9056
9044
9057
count ++ ;
9045
9058
}
@@ -10305,10 +10318,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
10305
10318
goto err_pmu ;
10306
10319
10307
10320
if (has_addr_filter (event )) {
10308
- event -> addr_filters_offs = kcalloc (pmu -> nr_addr_filters ,
10309
- sizeof (unsigned long ),
10310
- GFP_KERNEL );
10311
- if (!event -> addr_filters_offs ) {
10321
+ event -> addr_filter_ranges = kcalloc (pmu -> nr_addr_filters ,
10322
+ sizeof (struct perf_addr_filter_range ),
10323
+ GFP_KERNEL );
10324
+ if (!event -> addr_filter_ranges ) {
10312
10325
err = - ENOMEM ;
10313
10326
goto err_per_task ;
10314
10327
}
@@ -10321,9 +10334,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
10321
10334
struct perf_addr_filters_head * ifh = perf_event_addr_filters (event );
10322
10335
10323
10336
raw_spin_lock_irq (& ifh -> lock );
10324
- memcpy (event -> addr_filters_offs ,
10325
- event -> parent -> addr_filters_offs ,
10326
- pmu -> nr_addr_filters * sizeof (unsigned long ));
10337
+ memcpy (event -> addr_filter_ranges ,
10338
+ event -> parent -> addr_filter_ranges ,
10339
+ pmu -> nr_addr_filters * sizeof (struct perf_addr_filter_range ));
10327
10340
raw_spin_unlock_irq (& ifh -> lock );
10328
10341
}
10329
10342
@@ -10345,7 +10358,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
10345
10358
return event ;
10346
10359
10347
10360
err_addr_filters :
10348
- kfree (event -> addr_filters_offs );
10361
+ kfree (event -> addr_filter_ranges );
10349
10362
10350
10363
err_per_task :
10351
10364
exclusive_event_destroy (event );
0 commit comments