@@ -339,6 +339,7 @@ struct Peer {
339
339
msgs_sent_since_pong : usize ,
340
340
awaiting_pong_timer_tick_intervals : i8 ,
341
341
received_message_since_timer_tick : bool ,
342
+ sent_gossip_timestamp_filter : bool ,
342
343
}
343
344
344
345
impl Peer {
@@ -348,7 +349,11 @@ impl Peer {
348
349
/// announcements/updates for the given channel_id then we will send it when we get to that
349
350
/// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
350
351
/// sent the old versions, we should send the update, and so return true here.
351
- fn should_forward_channel_announcement ( & self , channel_id : u64 ) ->bool {
352
+ fn should_forward_channel_announcement ( & self , channel_id : u64 ) -> bool {
353
+ if self . their_features . as_ref ( ) . unwrap ( ) . supports_gossip_queries ( ) &&
354
+ !self . sent_gossip_timestamp_filter {
355
+ return false ;
356
+ }
352
357
match self . sync_status {
353
358
InitSyncTracker :: NoSyncRequested => true ,
354
359
InitSyncTracker :: ChannelsSyncing ( i) => i < channel_id,
@@ -358,6 +363,10 @@ impl Peer {
358
363
359
364
/// Similar to the above, but for node announcements indexed by node_id.
360
365
fn should_forward_node_announcement ( & self , node_id : PublicKey ) -> bool {
366
+ if self . their_features . as_ref ( ) . unwrap ( ) . supports_gossip_queries ( ) &&
367
+ !self . sent_gossip_timestamp_filter {
368
+ return false ;
369
+ }
361
370
match self . sync_status {
362
371
InitSyncTracker :: NoSyncRequested => true ,
363
372
InitSyncTracker :: ChannelsSyncing ( _) => false ,
@@ -619,6 +628,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
619
628
msgs_sent_since_pong : 0 ,
620
629
awaiting_pong_timer_tick_intervals : 0 ,
621
630
received_message_since_timer_tick : false ,
631
+ sent_gossip_timestamp_filter : false ,
622
632
} ) . is_some ( ) {
623
633
panic ! ( "PeerManager driver duplicated descriptors!" ) ;
624
634
} ;
@@ -665,6 +675,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
665
675
msgs_sent_since_pong : 0 ,
666
676
awaiting_pong_timer_tick_intervals : 0 ,
667
677
received_message_since_timer_tick : false ,
678
+ sent_gossip_timestamp_filter : false ,
668
679
} ) . is_some ( ) {
669
680
panic ! ( "PeerManager driver duplicated descriptors!" ) ;
670
681
} ;
@@ -1058,7 +1069,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
1058
1069
1059
1070
log_info ! ( self . logger, "Received peer Init message from {}: {}" , log_pubkey!( peer. their_node_id. unwrap( ) ) , msg. features) ;
1060
1071
1061
- if msg. features . initial_routing_sync ( ) {
1072
+ // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter.
1073
+ if msg. features . initial_routing_sync ( ) && !msg. features . supports_gossip_queries ( ) {
1062
1074
peer. sync_status = InitSyncTracker :: ChannelsSyncing ( 0 ) ;
1063
1075
}
1064
1076
if !msg. features . supports_static_remote_key ( ) {
@@ -1205,7 +1217,13 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
1205
1217
self . message_handler . route_handler . handle_reply_channel_range ( & peer. their_node_id . unwrap ( ) , msg) ?;
1206
1218
} ,
1207
1219
wire:: Message :: GossipTimestampFilter ( _msg) => {
1208
- // TODO: handle message
1220
+ // When supporting gossip messages, start inital gossip sync only after we receive
1221
+ // a GossipTimestampFilter
1222
+ if peer. their_features . as_ref ( ) . unwrap ( ) . supports_gossip_queries ( ) &&
1223
+ !peer. sent_gossip_timestamp_filter {
1224
+ peer. sent_gossip_timestamp_filter = true ;
1225
+ peer. sync_status = InitSyncTracker :: ChannelsSyncing ( 0 ) ;
1226
+ }
1209
1227
} ,
1210
1228
1211
1229
// Unknown messages:
@@ -1803,6 +1821,8 @@ mod tests {
1803
1821
assert_eq ! ( peer_b. read_event( & mut fd_b, & fd_a. outbound_data. lock( ) . unwrap( ) . split_off( 0 ) ) . unwrap( ) , false ) ;
1804
1822
peer_b. process_events ( ) ;
1805
1823
assert_eq ! ( peer_a. read_event( & mut fd_a, & fd_b. outbound_data. lock( ) . unwrap( ) . split_off( 0 ) ) . unwrap( ) , false ) ;
1824
+ peer_a. process_events ( ) ;
1825
+ assert_eq ! ( peer_b. read_event( & mut fd_b, & fd_a. outbound_data. lock( ) . unwrap( ) . split_off( 0 ) ) . unwrap( ) , false ) ;
1806
1826
( fd_a. clone ( ) , fd_b. clone ( ) )
1807
1827
}
1808
1828
@@ -1866,21 +1886,21 @@ mod tests {
1866
1886
let ( mut fd_a, mut fd_b) = establish_connection ( & peers[ 0 ] , & peers[ 1 ] ) ;
1867
1887
1868
1888
// Make each peer to read the messages that the other peer just wrote to them. Note that
1869
- // due to the max-messagse -before-ping limits this may take a few iterations to complete.
1889
+ // due to the max-message -before-ping limits this may take a few iterations to complete.
1870
1890
for _ in 0 ..150 /super :: BUFFER_DRAIN_MSGS_PER_TICK + 1 {
1871
- peers[ 0 ] . process_events ( ) ;
1872
- let b_read_data = fd_a. outbound_data . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
1873
- assert ! ( !b_read_data. is_empty( ) ) ;
1874
-
1875
- peers[ 1 ] . read_event ( & mut fd_b, & b_read_data) . unwrap ( ) ;
1876
1891
peers[ 1 ] . process_events ( ) ;
1877
-
1878
1892
let a_read_data = fd_b. outbound_data . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
1879
1893
assert ! ( !a_read_data. is_empty( ) ) ;
1894
+
1880
1895
peers[ 0 ] . read_event ( & mut fd_a, & a_read_data) . unwrap ( ) ;
1896
+ peers[ 0 ] . process_events ( ) ;
1881
1897
1882
- peers[ 1 ] . process_events ( ) ;
1883
- assert_eq ! ( fd_b. outbound_data. lock( ) . unwrap( ) . len( ) , 0 , "Until B receives data, it shouldn't send more messages" ) ;
1898
+ let b_read_data = fd_a. outbound_data . lock ( ) . unwrap ( ) . split_off ( 0 ) ;
1899
+ assert ! ( !b_read_data. is_empty( ) ) ;
1900
+ peers[ 1 ] . read_event ( & mut fd_b, & b_read_data) . unwrap ( ) ;
1901
+
1902
+ peers[ 0 ] . process_events ( ) ;
1903
+ assert_eq ! ( fd_a. outbound_data. lock( ) . unwrap( ) . len( ) , 0 , "Until A receives data, it shouldn't send more messages" ) ;
1884
1904
}
1885
1905
1886
1906
// Check that each peer has received the expected number of channel updates and channel
0 commit comments