@@ -1675,10 +1675,152 @@ static bool rb_meta_valid(struct ring_buffer_meta *meta, int cpu,
1675
1675
subbuf = (void * )subbuf + subbuf_size ;
1676
1676
}
1677
1677
1678
- pr_info ("Ring buffer meta is from previous boot!\n" );
1679
1678
return true;
1680
1679
}
1681
1680
1681
+ static int rb_meta_subbuf_idx (struct ring_buffer_meta * meta , void * subbuf );
1682
+
1683
+ static int rb_read_data_buffer (struct buffer_data_page * dpage , int tail , int cpu ,
1684
+ unsigned long long * timestamp , u64 * delta_ptr )
1685
+ {
1686
+ struct ring_buffer_event * event ;
1687
+ u64 ts , delta ;
1688
+ int events = 0 ;
1689
+ int e ;
1690
+
1691
+ * delta_ptr = 0 ;
1692
+ * timestamp = 0 ;
1693
+
1694
+ ts = dpage -> time_stamp ;
1695
+
1696
+ for (e = 0 ; e < tail ; e += rb_event_length (event )) {
1697
+
1698
+ event = (struct ring_buffer_event * )(dpage -> data + e );
1699
+
1700
+ switch (event -> type_len ) {
1701
+
1702
+ case RINGBUF_TYPE_TIME_EXTEND :
1703
+ delta = rb_event_time_stamp (event );
1704
+ ts += delta ;
1705
+ break ;
1706
+
1707
+ case RINGBUF_TYPE_TIME_STAMP :
1708
+ delta = rb_event_time_stamp (event );
1709
+ delta = rb_fix_abs_ts (delta , ts );
1710
+ if (delta < ts ) {
1711
+ * delta_ptr = delta ;
1712
+ * timestamp = ts ;
1713
+ return -1 ;
1714
+ }
1715
+ ts = delta ;
1716
+ break ;
1717
+
1718
+ case RINGBUF_TYPE_PADDING :
1719
+ if (event -> time_delta == 1 )
1720
+ break ;
1721
+ fallthrough ;
1722
+ case RINGBUF_TYPE_DATA :
1723
+ events ++ ;
1724
+ ts += event -> time_delta ;
1725
+ break ;
1726
+
1727
+ default :
1728
+ return -1 ;
1729
+ }
1730
+ }
1731
+ * timestamp = ts ;
1732
+ return events ;
1733
+ }
1734
+
1735
+ static int rb_validate_buffer (struct buffer_data_page * dpage , int cpu )
1736
+ {
1737
+ unsigned long long ts ;
1738
+ u64 delta ;
1739
+ int tail ;
1740
+
1741
+ tail = local_read (& dpage -> commit );
1742
+ return rb_read_data_buffer (dpage , tail , cpu , & ts , & delta );
1743
+ }
1744
+
1745
+ /* If the meta data has been validated, now validate the events */
1746
+ static void rb_meta_validate_events (struct ring_buffer_per_cpu * cpu_buffer )
1747
+ {
1748
+ struct ring_buffer_meta * meta = cpu_buffer -> ring_meta ;
1749
+ struct buffer_page * head_page ;
1750
+ unsigned long entry_bytes = 0 ;
1751
+ unsigned long entries = 0 ;
1752
+ int ret ;
1753
+ int i ;
1754
+
1755
+ if (!meta || !meta -> head_buffer )
1756
+ return ;
1757
+
1758
+ /* Do the reader page first */
1759
+ ret = rb_validate_buffer (cpu_buffer -> reader_page -> page , cpu_buffer -> cpu );
1760
+ if (ret < 0 ) {
1761
+ pr_info ("Ring buffer reader page is invalid\n" );
1762
+ goto invalid ;
1763
+ }
1764
+ entries += ret ;
1765
+ entry_bytes += local_read (& cpu_buffer -> reader_page -> page -> commit );
1766
+ local_set (& cpu_buffer -> reader_page -> entries , ret );
1767
+
1768
+ head_page = cpu_buffer -> head_page ;
1769
+
1770
+ /* If both the head and commit are on the reader_page then we are done. */
1771
+ if (head_page == cpu_buffer -> reader_page &&
1772
+ head_page == cpu_buffer -> commit_page )
1773
+ goto done ;
1774
+
1775
+ /* Iterate until finding the commit page */
1776
+ for (i = 0 ; i < meta -> nr_subbufs + 1 ; i ++ , rb_inc_page (& head_page )) {
1777
+
1778
+ /* Reader page has already been done */
1779
+ if (head_page == cpu_buffer -> reader_page )
1780
+ continue ;
1781
+
1782
+ ret = rb_validate_buffer (head_page -> page , cpu_buffer -> cpu );
1783
+ if (ret < 0 ) {
1784
+ pr_info ("Ring buffer meta [%d] invalid buffer page\n" ,
1785
+ cpu_buffer -> cpu );
1786
+ goto invalid ;
1787
+ }
1788
+ entries += ret ;
1789
+ entry_bytes += local_read (& head_page -> page -> commit );
1790
+ local_set (& cpu_buffer -> head_page -> entries , ret );
1791
+
1792
+ if (head_page == cpu_buffer -> commit_page )
1793
+ break ;
1794
+ }
1795
+
1796
+ if (head_page != cpu_buffer -> commit_page ) {
1797
+ pr_info ("Ring buffer meta [%d] commit page not found\n" ,
1798
+ cpu_buffer -> cpu );
1799
+ goto invalid ;
1800
+ }
1801
+ done :
1802
+ local_set (& cpu_buffer -> entries , entries );
1803
+ local_set (& cpu_buffer -> entries_bytes , entry_bytes );
1804
+
1805
+ pr_info ("Ring buffer meta [%d] is from previous boot!\n" , cpu_buffer -> cpu );
1806
+ return ;
1807
+
1808
+ invalid :
1809
+ /* The content of the buffers are invalid, reset the meta data */
1810
+ meta -> head_buffer = 0 ;
1811
+ meta -> commit_buffer = 0 ;
1812
+
1813
+ /* Reset the reader page */
1814
+ local_set (& cpu_buffer -> reader_page -> entries , 0 );
1815
+ local_set (& cpu_buffer -> reader_page -> page -> commit , 0 );
1816
+
1817
+ /* Reset all the subbuffers */
1818
+ for (i = 0 ; i < meta -> nr_subbufs - 1 ; i ++ , rb_inc_page (& head_page )) {
1819
+ local_set (& head_page -> entries , 0 );
1820
+ local_set (& head_page -> page -> commit , 0 );
1821
+ }
1822
+ }
1823
+
1682
1824
static void rb_range_meta_init (struct trace_buffer * buffer , int nr_pages )
1683
1825
{
1684
1826
struct ring_buffer_meta * meta ;
@@ -1757,8 +1899,6 @@ static void *rbm_next(struct seq_file *m, void *v, loff_t *pos)
1757
1899
return rbm_start (m , pos );
1758
1900
}
1759
1901
1760
- static int rb_meta_subbuf_idx (struct ring_buffer_meta * meta , void * subbuf );
1761
-
1762
1902
static int rbm_show (struct seq_file * m , void * v )
1763
1903
{
1764
1904
struct ring_buffer_per_cpu * cpu_buffer = m -> private ;
@@ -2011,6 +2151,8 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
2011
2151
if (ret < 0 )
2012
2152
goto fail_free_reader ;
2013
2153
2154
+ rb_meta_validate_events (cpu_buffer );
2155
+
2014
2156
/* If the boot meta was valid then this has already been updated */
2015
2157
meta = cpu_buffer -> ring_meta ;
2016
2158
if (!meta || !meta -> head_buffer ||
@@ -3955,11 +4097,10 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3955
4097
struct rb_event_info * info ,
3956
4098
unsigned long tail )
3957
4099
{
3958
- struct ring_buffer_event * event ;
3959
4100
struct buffer_data_page * bpage ;
3960
4101
u64 ts , delta ;
3961
4102
bool full = false;
3962
- int e ;
4103
+ int ret ;
3963
4104
3964
4105
bpage = info -> tail_page -> page ;
3965
4106
@@ -3985,39 +4126,12 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3985
4126
if (atomic_inc_return (this_cpu_ptr (& checking )) != 1 )
3986
4127
goto out ;
3987
4128
3988
- ts = bpage -> time_stamp ;
3989
-
3990
- for (e = 0 ; e < tail ; e += rb_event_length (event )) {
3991
-
3992
- event = (struct ring_buffer_event * )(bpage -> data + e );
3993
-
3994
- switch (event -> type_len ) {
3995
-
3996
- case RINGBUF_TYPE_TIME_EXTEND :
3997
- delta = rb_event_time_stamp (event );
3998
- ts += delta ;
3999
- break ;
4000
-
4001
- case RINGBUF_TYPE_TIME_STAMP :
4002
- delta = rb_event_time_stamp (event );
4003
- delta = rb_fix_abs_ts (delta , ts );
4004
- if (delta < ts ) {
4005
- buffer_warn_return ("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n" ,
4006
- cpu_buffer -> cpu , ts , delta );
4007
- }
4008
- ts = delta ;
4009
- break ;
4010
-
4011
- case RINGBUF_TYPE_PADDING :
4012
- if (event -> time_delta == 1 )
4013
- break ;
4014
- fallthrough ;
4015
- case RINGBUF_TYPE_DATA :
4016
- ts += event -> time_delta ;
4017
- break ;
4018
-
4019
- default :
4020
- RB_WARN_ON (cpu_buffer , 1 );
4129
+ ret = rb_read_data_buffer (bpage , tail , cpu_buffer -> cpu , & ts , & delta );
4130
+ if (ret < 0 ) {
4131
+ if (delta < ts ) {
4132
+ buffer_warn_return ("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n" ,
4133
+ cpu_buffer -> cpu , ts , delta );
4134
+ goto out ;
4021
4135
}
4022
4136
}
4023
4137
if ((full && ts > info -> ts ) ||
0 commit comments