File tree Expand file tree Collapse file tree 3 files changed +2
-48
lines changed Expand file tree Collapse file tree 3 files changed +2
-48
lines changed Original file line number Diff line number Diff line change @@ -702,29 +702,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
702
702
return perf_evlist__set_paused (evlist , false);
703
703
}
704
704
705
- union perf_event * perf_evlist__mmap_read_forward (struct perf_evlist * evlist , int idx )
706
- {
707
- struct perf_mmap * md = & evlist -> mmap [idx ];
708
-
709
- /*
710
- * Check messup is required for forward overwritable ring buffer:
711
- * memory pointed by md->prev can be overwritten in this case.
712
- * No need for read-write ring buffer: kernel stop outputting when
713
- * it hit md->prev (perf_mmap__consume()).
714
- */
715
- return perf_mmap__read_forward (md );
716
- }
717
-
718
- union perf_event * perf_evlist__mmap_read (struct perf_evlist * evlist , int idx )
719
- {
720
- return perf_evlist__mmap_read_forward (evlist , idx );
721
- }
722
-
723
- void perf_evlist__mmap_consume (struct perf_evlist * evlist , int idx )
724
- {
725
- perf_mmap__consume (& evlist -> mmap [idx ], false);
726
- }
727
-
728
705
static void perf_evlist__munmap_nofree (struct perf_evlist * evlist )
729
706
{
730
707
int i ;
@@ -761,7 +738,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
761
738
map [i ].fd = -1 ;
762
739
/*
763
740
* When the perf_mmap() call is made we grab one refcount, plus
764
- * one extra to let perf_evlist__mmap_consume () get the last
741
+ * one extra to let perf_mmap__consume () get the last
765
742
* events after all real references (perf_mmap__get()) are
766
743
* dropped.
767
744
*
Original file line number Diff line number Diff line change @@ -129,10 +129,6 @@ struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
129
129
130
130
void perf_evlist__toggle_bkw_mmap (struct perf_evlist * evlist , enum bkw_mmap_state state );
131
131
132
- union perf_event * perf_evlist__mmap_read (struct perf_evlist * evlist , int idx );
133
-
134
- union perf_event * perf_evlist__mmap_read_forward (struct perf_evlist * evlist ,
135
- int idx );
136
132
void perf_evlist__mmap_consume (struct perf_evlist * evlist , int idx );
137
133
138
134
int perf_evlist__open (struct perf_evlist * evlist );
Original file line number Diff line number Diff line change @@ -63,25 +63,6 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
63
63
return event ;
64
64
}
65
65
66
- /*
67
- * legacy interface for mmap read.
68
- * Don't use it. Use perf_mmap__read_event().
69
- */
70
- union perf_event * perf_mmap__read_forward (struct perf_mmap * map )
71
- {
72
- u64 head ;
73
-
74
- /*
75
- * Check if event was unmapped due to a POLLHUP/POLLERR.
76
- */
77
- if (!refcount_read (& map -> refcnt ))
78
- return NULL ;
79
-
80
- head = perf_mmap__read_head (map );
81
-
82
- return perf_mmap__read (map , & map -> prev , head );
83
- }
84
-
85
66
/*
86
67
* Read event from ring buffer one by one.
87
68
* Return one event for each call.
@@ -191,7 +172,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
191
172
int perf_mmap__mmap (struct perf_mmap * map , struct mmap_params * mp , int fd )
192
173
{
193
174
/*
194
- * The last one will be done at perf_evlist__mmap_consume (), so that we
175
+ * The last one will be done at perf_mmap__consume (), so that we
195
176
* make sure we don't prevent tools from consuming every last event in
196
177
* the ring buffer.
197
178
*
You can’t perform that action at this time.
0 commit comments