@@ -680,33 +680,6 @@ enum perf_event_active_state {
680
680
};
681
681
682
682
struct file ;
683
-
684
- #define PERF_BUFFER_WRITABLE 0x01
685
-
686
- struct perf_buffer {
687
- atomic_t refcount ;
688
- struct rcu_head rcu_head ;
689
- #ifdef CONFIG_PERF_USE_VMALLOC
690
- struct work_struct work ;
691
- int page_order ; /* allocation order */
692
- #endif
693
- int nr_pages ; /* nr of data pages */
694
- int writable ; /* are we writable */
695
-
696
- atomic_t poll ; /* POLL_ for wakeups */
697
-
698
- local_t head ; /* write position */
699
- local_t nest ; /* nested writers */
700
- local_t events ; /* event limit */
701
- local_t wakeup ; /* wakeup stamp */
702
- local_t lost ; /* nr records lost */
703
-
704
- long watermark ; /* wakeup watermark */
705
-
706
- struct perf_event_mmap_page * user_page ;
707
- void * data_pages [0 ];
708
- };
709
-
710
683
struct perf_sample_data ;
711
684
712
685
typedef void (* perf_overflow_handler_t )(struct perf_event * , int ,
@@ -745,6 +718,8 @@ struct perf_cgroup {
745
718
};
746
719
#endif
747
720
721
+ struct ring_buffer ;
722
+
748
723
/**
749
724
* struct perf_event - performance event kernel representation:
750
725
*/
@@ -834,7 +809,7 @@ struct perf_event {
834
809
atomic_t mmap_count ;
835
810
int mmap_locked ;
836
811
struct user_struct * mmap_user ;
837
- struct perf_buffer * buffer ;
812
+ struct ring_buffer * rb ;
838
813
839
814
/* poll related */
840
815
wait_queue_head_t waitq ;
@@ -945,7 +920,7 @@ struct perf_cpu_context {
945
920
946
921
struct perf_output_handle {
947
922
struct perf_event * event ;
948
- struct perf_buffer * buffer ;
923
+ struct ring_buffer * rb ;
949
924
unsigned long wakeup ;
950
925
unsigned long size ;
951
926
void * addr ;
0 commit comments