@@ -27,7 +27,7 @@ use ln::wire;
27
27
use ln:: wire:: Encode ;
28
28
use routing:: gossip:: { NetworkGraph , P2PGossipSync } ;
29
29
use util:: atomic_counter:: AtomicCounter ;
30
- use util:: events:: { MessageSendEvent , MessageSendEventsProvider } ;
30
+ use util:: events:: { MessageSendEvent , MessageSendEventsProvider , OnionMessageProvider } ;
31
31
use util:: logger:: Logger ;
32
32
33
33
use prelude:: * ;
@@ -81,6 +81,11 @@ impl Deref for IgnoringMessageHandler {
81
81
fn deref ( & self ) -> & Self { self }
82
82
}
83
83
84
+ // TODO: implement `OnionMessageProvider` for `IgnoringMessageHandler` instead
85
+ impl OnionMessageProvider for ( ) {
86
+ fn next_onion_messages_for_peer ( & self , _peer_node_id : PublicKey , _max_messages : usize ) -> Vec < msgs:: OnionMessage > { Vec :: new ( ) }
87
+ }
88
+
84
89
// Implement Type for Infallible, note that it cannot be constructed, and thus you can never call a
85
90
// method that takes self for it.
86
91
impl wire:: Type for Infallible {
@@ -294,15 +299,23 @@ enum InitSyncTracker{
294
299
/// forwarding gossip messages to peers altogether.
295
300
const FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO : usize = 2 ;
296
301
302
+ /// The ratio between buffer sizes at which we stop sending initial sync messages vs when we pause
303
+ /// forwarding onion messages to peers altogether.
304
+ const OM_BUFFER_LIMIT_RATIO : usize = 2 ;
305
+
297
306
/// When the outbound buffer has this many messages, we'll stop reading bytes from the peer until
298
307
/// we have fewer than this many messages in the outbound buffer again.
299
- /// We also use this as the target number of outbound gossip messages to keep in the write buffer,
300
- /// refilled as we send bytes.
308
+ /// We also use this as the target number of outbound gossip and onion messages to keep in the write
309
+ /// buffer, refilled as we send bytes.
301
310
const OUTBOUND_BUFFER_LIMIT_READ_PAUSE : usize = 10 ;
302
311
/// When the outbound buffer has this many messages, we'll simply skip relaying gossip messages to
303
312
/// the peer.
304
313
const OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP : usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * FORWARD_INIT_SYNC_BUFFER_LIMIT_RATIO ;
305
314
315
+ /// When the outbound buffer has this many messages, we won't poll for new onion messages for this
316
+ /// peer.
317
+ const OUTBOUND_BUFFER_LIMIT_PAUSE_OMS : usize = OUTBOUND_BUFFER_LIMIT_READ_PAUSE * OM_BUFFER_LIMIT_RATIO ;
318
+
306
319
/// If we've sent a ping, and are still awaiting a response, we may need to churn our way through
307
320
/// the socket receive buffer before receiving the ping.
308
321
///
@@ -378,6 +391,14 @@ impl Peer {
378
391
InitSyncTracker :: NodesSyncing ( pk) => pk < node_id,
379
392
}
380
393
}
394
+
395
+ /// Returns the number of onion messages we can fit in this peer's buffer.
396
+ fn onion_message_buffer_slots_available ( & self ) -> usize {
397
+ cmp:: min (
398
+ OUTBOUND_BUFFER_LIMIT_PAUSE_OMS . saturating_sub ( self . pending_outbound_buffer . len ( ) ) ,
399
+ ( BUFFER_DRAIN_MSGS_PER_TICK * OM_BUFFER_LIMIT_RATIO ) . saturating_sub ( self . msgs_sent_since_pong ) )
400
+ }
401
+
381
402
/// Returns whether this peer's buffer is full and we should drop gossip messages.
382
403
fn buffer_full_drop_gossip ( & self ) -> bool {
383
404
if self . pending_outbound_buffer . len ( ) > OUTBOUND_BUFFER_LIMIT_DROP_GOSSIP
@@ -432,6 +453,8 @@ pub struct PeerManager<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: De
432
453
L :: Target : Logger ,
433
454
CMH :: Target : CustomMessageHandler {
434
455
message_handler : MessageHandler < CM , RM > ,
456
+ onion_message_handler : ( ) , // TODO: add `OnionMessageHandler` trait to
457
+ // `MessageHandler` and get rid of this
435
458
/// Connection state for each connected peer - we have an outer read-write lock which is taken
436
459
/// as read while we're doing processing for a peer and taken write when a peer is being added
437
460
/// or removed.
@@ -587,6 +610,7 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
587
610
588
611
PeerManager {
589
612
message_handler,
613
+ onion_message_handler : ( ) ,
590
614
peers : FairRwLock :: new ( HashMap :: new ( ) ) ,
591
615
node_id_to_descriptor : Mutex :: new ( HashMap :: new ( ) ) ,
592
616
event_processing_lock : Mutex :: new ( ( ) ) ,
@@ -797,8 +821,12 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
797
821
/// ready to call `[write_buffer_space_avail`] again if a write call generated here isn't
798
822
/// sufficient!
799
823
///
824
+ /// If any bytes are written, [`process_events`] should be called afterwards.
825
+ // TODO: why?
826
+ ///
800
827
/// [`send_data`]: SocketDescriptor::send_data
801
828
/// [`write_buffer_space_avail`]: PeerManager::write_buffer_space_avail
829
+ /// [`process_events`]: PeerManager::process_events
802
830
pub fn write_buffer_space_avail ( & self , descriptor : & mut Descriptor ) -> Result < ( ) , PeerHandleError > {
803
831
let peers = self . peers . read ( ) . unwrap ( ) ;
804
832
match peers. get ( descriptor) {
@@ -1387,6 +1415,8 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
1387
1415
/// You don't have to call this function explicitly if you are using [`lightning-net-tokio`]
1388
1416
/// or one of the other clients provided in our language bindings.
1389
1417
///
1418
+ /// Note that this method should be called again if any bytes are written.
1419
+ ///
1390
1420
/// Note that if there are any other calls to this function waiting on lock(s) this may return
1391
1421
/// without doing any work. All available events that need handling will be handled before the
1392
1422
/// other calls return.
@@ -1637,6 +1667,22 @@ impl<Descriptor: SocketDescriptor, CM: Deref, RM: Deref, L: Deref, CMH: Deref> P
1637
1667
1638
1668
for ( descriptor, peer_mutex) in peers. iter ( ) {
1639
1669
self . do_attempt_write_data ( & mut ( * descriptor) . clone ( ) , & mut * peer_mutex. lock ( ) . unwrap ( ) ) ;
1670
+
1671
+ // Only see if we have room for onion messages after we've written all channel messages, to
1672
+ // ensure they take priority.
1673
+ let ( peer_node_id, om_buffer_slots_avail) = {
1674
+ let peer = peer_mutex. lock ( ) . unwrap ( ) ;
1675
+ if let Some ( peer_node_id) = peer. their_node_id {
1676
+ ( Some ( peer_node_id. clone ( ) ) , peer. onion_message_buffer_slots_available ( ) )
1677
+ } else { ( None , 0 ) }
1678
+ } ;
1679
+ if peer_node_id. is_some ( ) && om_buffer_slots_avail > 0 {
1680
+ for event in self . onion_message_handler . next_onion_messages_for_peer (
1681
+ peer_node_id. unwrap ( ) , om_buffer_slots_avail)
1682
+ {
1683
+ // TODO: forward onion message to peer
1684
+ }
1685
+ }
1640
1686
}
1641
1687
}
1642
1688
if !peers_to_disconnect. is_empty ( ) {
0 commit comments