@@ -466,6 +466,7 @@ enum BackgroundEvent {
466
466
ClosingMonitorUpdate ( ( OutPoint , ChannelMonitorUpdate ) ) ,
467
467
}
468
468
469
+ #[ derive( Debug ) ]
469
470
pub ( crate ) enum MonitorUpdateCompletionAction {
470
471
/// Indicates that a payment ultimately destined for us was claimed and we should emit an
471
472
/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
@@ -1476,6 +1477,54 @@ macro_rules! emit_channel_ready_event {
1476
1477
}
1477
1478
}
1478
1479
1480
+ macro_rules! handle_monitor_update_completion {
1481
+ ( $self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
1482
+ let mut updates = $chan. monitor_updating_restored( & $self. logger,
1483
+ & $self. node_signer, $self. genesis_hash, & $self. default_configuration,
1484
+ $self. best_block. read( ) . unwrap( ) . height( ) ) ;
1485
+ let counterparty_node_id = $chan. get_counterparty_node_id( ) ;
1486
+ let channel_update = if updates. channel_ready. is_some( ) && $chan. is_usable( ) {
1487
+ // We only send a channel_update in the case where we are just now sending a
1488
+ // channel_ready and the channel is in a usable state. We may re-send a
1489
+ // channel_update later through the announcement_signatures process for public
1490
+ // channels, but there's no reason not to just inform our counterparty of our fees
1491
+ // now.
1492
+ if let Ok ( msg) = $self. get_channel_update_for_unicast( $chan) {
1493
+ Some ( events:: MessageSendEvent :: SendChannelUpdate {
1494
+ node_id: counterparty_node_id,
1495
+ msg,
1496
+ } )
1497
+ } else { None }
1498
+ } else { None } ;
1499
+
1500
+ let update_actions = $peer_state. monitor_update_blocked_actions
1501
+ . remove( & $chan. channel_id( ) ) . unwrap_or( Vec :: new( ) ) ;
1502
+
1503
+ let htlc_forwards = $self. handle_channel_resumption(
1504
+ & mut $peer_state. pending_msg_events, $chan, updates. raa,
1505
+ updates. commitment_update, updates. order, updates. accepted_htlcs,
1506
+ updates. funding_broadcastable, updates. channel_ready,
1507
+ updates. announcement_sigs) ;
1508
+ if let Some ( upd) = channel_update {
1509
+ $peer_state. pending_msg_events. push( upd) ;
1510
+ }
1511
+
1512
+ let channel_id = $chan. channel_id( ) ;
1513
+ core:: mem:: drop( $peer_state_lock) ;
1514
+
1515
+ $self. handle_monitor_update_completion_actions( update_actions) ;
1516
+
1517
+ if let Some ( forwards) = htlc_forwards {
1518
+ $self. forward_htlcs( & mut [ forwards] [ ..] ) ;
1519
+ }
1520
+ $self. finalize_claims( updates. finalized_claimed_htlcs) ;
1521
+ for failure in updates. failed_htlcs. drain( ..) {
1522
+ let receiver = HTLCDestination :: NextHopChannel { node_id: Some ( counterparty_node_id) , channel_id } ;
1523
+ $self. fail_htlc_backwards_internal( & failure. 0 , & failure. 1 , & failure. 2 , receiver) ;
1524
+ }
1525
+ } }
1526
+ }
1527
+
1479
1528
macro_rules! handle_new_monitor_update {
1480
1529
( $self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING , $remove: expr) => { {
1481
1530
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
@@ -1504,43 +1553,7 @@ macro_rules! handle_new_monitor_update {
1504
1553
. update_id == $update_id) &&
1505
1554
$chan. get_latest_monitor_update_id( ) == $update_id
1506
1555
{
1507
- let mut updates = $chan. monitor_updating_restored( & $self. logger,
1508
- & $self. node_signer, $self. genesis_hash, & $self. default_configuration,
1509
- $self. best_block. read( ) . unwrap( ) . height( ) ) ;
1510
- let counterparty_node_id = $chan. get_counterparty_node_id( ) ;
1511
- let channel_update = if updates. channel_ready. is_some( ) && $chan. is_usable( ) {
1512
- // We only send a channel_update in the case where we are just now sending a
1513
- // channel_ready and the channel is in a usable state. We may re-send a
1514
- // channel_update later through the announcement_signatures process for public
1515
- // channels, but there's no reason not to just inform our counterparty of our fees
1516
- // now.
1517
- if let Ok ( msg) = $self. get_channel_update_for_unicast( $chan) {
1518
- Some ( events:: MessageSendEvent :: SendChannelUpdate {
1519
- node_id: counterparty_node_id,
1520
- msg,
1521
- } )
1522
- } else { None }
1523
- } else { None } ;
1524
- let htlc_forwards = $self. handle_channel_resumption(
1525
- & mut $peer_state. pending_msg_events, $chan, updates. raa,
1526
- updates. commitment_update, updates. order, updates. accepted_htlcs,
1527
- updates. funding_broadcastable, updates. channel_ready,
1528
- updates. announcement_sigs) ;
1529
- if let Some ( upd) = channel_update {
1530
- $peer_state. pending_msg_events. push( upd) ;
1531
- }
1532
-
1533
- let channel_id = $chan. channel_id( ) ;
1534
- core:: mem:: drop( $peer_state_lock) ;
1535
-
1536
- if let Some ( forwards) = htlc_forwards {
1537
- $self. forward_htlcs( & mut [ forwards] [ ..] ) ;
1538
- }
1539
- $self. finalize_claims( updates. finalized_claimed_htlcs) ;
1540
- for failure in updates. failed_htlcs. drain( ..) {
1541
- let receiver = HTLCDestination :: NextHopChannel { node_id: Some ( counterparty_node_id) , channel_id } ;
1542
- $self. fail_htlc_backwards_internal( & failure. 0 , & failure. 1 , & failure. 2 , receiver) ;
1543
- }
1556
+ handle_monitor_update_completion!( $self, $update_id, $peer_state_lock, $peer_state, $chan) ;
1544
1557
}
1545
1558
Ok ( ( ) )
1546
1559
} ,
@@ -4208,6 +4221,14 @@ where
4208
4221
pending_forwards : Vec < ( PendingHTLCInfo , u64 ) > , funding_broadcastable : Option < Transaction > ,
4209
4222
channel_ready : Option < msgs:: ChannelReady > , announcement_sigs : Option < msgs:: AnnouncementSignatures > )
4210
4223
-> Option < ( u64 , OutPoint , u128 , Vec < ( PendingHTLCInfo , u64 ) > ) > {
4224
+ log_trace ! ( self . logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement" ,
4225
+ log_bytes!( channel. channel_id( ) ) ,
4226
+ if raa. is_some( ) { "an" } else { "no" } ,
4227
+ if commitment_update. is_some( ) { "a" } else { "no" } , pending_forwards. len( ) ,
4228
+ if funding_broadcastable. is_some( ) { "" } else { "not " } ,
4229
+ if channel_ready. is_some( ) { "sending" } else { "without" } ,
4230
+ if announcement_sigs. is_some( ) { "sending" } else { "without" } ) ;
4231
+
4211
4232
let mut htlc_forwards = None ;
4212
4233
4213
4234
let counterparty_node_id = channel. get_counterparty_node_id ( ) ;
@@ -4266,65 +4287,36 @@ where
4266
4287
fn channel_monitor_updated ( & self , funding_txo : & OutPoint , highest_applied_update_id : u64 , counterparty_node_id : Option < & PublicKey > ) {
4267
4288
let _persistence_guard = PersistenceNotifierGuard :: notify_on_drop ( & self . total_consistency_lock , & self . persistence_notifier ) ;
4268
4289
4269
- let htlc_forwards;
4270
- let ( mut pending_failures, finalized_claims, counterparty_node_id) = {
4271
- let counterparty_node_id = match counterparty_node_id {
4272
- Some ( cp_id) => cp_id. clone ( ) ,
4273
- None => {
4274
- // TODO: Once we can rely on the counterparty_node_id from the
4275
- // monitor event, this and the id_to_peer map should be removed.
4276
- let id_to_peer = self . id_to_peer . lock ( ) . unwrap ( ) ;
4277
- match id_to_peer. get ( & funding_txo. to_channel_id ( ) ) {
4278
- Some ( cp_id) => cp_id. clone ( ) ,
4279
- None => return ,
4280
- }
4281
- }
4282
- } ;
4283
- let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
4284
- let mut peer_state_lock;
4285
- let peer_state_mutex_opt = per_peer_state. get ( & counterparty_node_id) ;
4286
- if peer_state_mutex_opt. is_none ( ) { return }
4287
- peer_state_lock = peer_state_mutex_opt. unwrap ( ) . lock ( ) . unwrap ( ) ;
4288
- let peer_state = & mut * peer_state_lock;
4289
- let mut channel = {
4290
- match peer_state. channel_by_id . entry ( funding_txo. to_channel_id ( ) ) {
4291
- hash_map:: Entry :: Occupied ( chan) => chan,
4292
- hash_map:: Entry :: Vacant ( _) => return ,
4290
+ let counterparty_node_id = match counterparty_node_id {
4291
+ Some ( cp_id) => cp_id. clone ( ) ,
4292
+ None => {
4293
+ // TODO: Once we can rely on the counterparty_node_id from the
4294
+ // monitor event, this and the id_to_peer map should be removed.
4295
+ let id_to_peer = self . id_to_peer . lock ( ) . unwrap ( ) ;
4296
+ match id_to_peer. get ( & funding_txo. to_channel_id ( ) ) {
4297
+ Some ( cp_id) => cp_id. clone ( ) ,
4298
+ None => return ,
4293
4299
}
4294
- } ;
4295
- if !channel. get ( ) . is_awaiting_monitor_update ( ) || channel. get ( ) . get_latest_monitor_update_id ( ) != highest_applied_update_id {
4296
- return ;
4297
4300
}
4298
-
4299
- let updates = channel. get_mut ( ) . monitor_updating_restored ( & self . logger , & self . node_signer , self . genesis_hash , & self . default_configuration , self . best_block . read ( ) . unwrap ( ) . height ( ) ) ;
4300
- let channel_update = if updates. channel_ready . is_some ( ) && channel. get ( ) . is_usable ( ) {
4301
- // We only send a channel_update in the case where we are just now sending a
4302
- // channel_ready and the channel is in a usable state. We may re-send a
4303
- // channel_update later through the announcement_signatures process for public
4304
- // channels, but there's no reason not to just inform our counterparty of our fees
4305
- // now.
4306
- if let Ok ( msg) = self . get_channel_update_for_unicast ( channel. get ( ) ) {
4307
- Some ( events:: MessageSendEvent :: SendChannelUpdate {
4308
- node_id : channel. get ( ) . get_counterparty_node_id ( ) ,
4309
- msg,
4310
- } )
4311
- } else { None }
4312
- } else { None } ;
4313
- htlc_forwards = self . handle_channel_resumption ( & mut peer_state. pending_msg_events , channel. get_mut ( ) , updates. raa , updates. commitment_update , updates. order , updates. accepted_htlcs , updates. funding_broadcastable , updates. channel_ready , updates. announcement_sigs ) ;
4314
- if let Some ( upd) = channel_update {
4315
- peer_state. pending_msg_events . push ( upd) ;
4301
+ } ;
4302
+ let per_peer_state = self . per_peer_state . read ( ) . unwrap ( ) ;
4303
+ let mut peer_state_lock;
4304
+ let peer_state_mutex_opt = per_peer_state. get ( & counterparty_node_id) ;
4305
+ if peer_state_mutex_opt. is_none ( ) { return }
4306
+ peer_state_lock = peer_state_mutex_opt. unwrap ( ) . lock ( ) . unwrap ( ) ;
4307
+ let peer_state = & mut * peer_state_lock;
4308
+ let mut channel = {
4309
+ match peer_state. channel_by_id . entry ( funding_txo. to_channel_id ( ) ) {
4310
+ hash_map:: Entry :: Occupied ( chan) => chan,
4311
+ hash_map:: Entry :: Vacant ( _) => return ,
4316
4312
}
4317
-
4318
- ( updates. failed_htlcs , updates. finalized_claimed_htlcs , counterparty_node_id)
4319
4313
} ;
4320
- if let Some ( forwards) = htlc_forwards {
4321
- self . forward_htlcs ( & mut [ forwards] [ ..] ) ;
4322
- }
4323
- self . finalize_claims ( finalized_claims) ;
4324
- for failure in pending_failures. drain ( ..) {
4325
- let receiver = HTLCDestination :: NextHopChannel { node_id : Some ( counterparty_node_id) , channel_id : funding_txo. to_channel_id ( ) } ;
4326
- self . fail_htlc_backwards_internal ( & failure. 0 , & failure. 1 , & failure. 2 , receiver) ;
4314
+ log_trace ! ( self . logger, "ChannelMonitor updated to {}. Current highest is {}" ,
4315
+ highest_applied_update_id, channel. get( ) . get_latest_monitor_update_id( ) ) ;
4316
+ if !channel. get ( ) . is_awaiting_monitor_update ( ) || channel. get ( ) . get_latest_monitor_update_id ( ) != highest_applied_update_id {
4317
+ return ;
4327
4318
}
4319
+ handle_monitor_update_completion ! ( self , highest_applied_update_id, peer_state_lock, peer_state, channel. get_mut( ) ) ;
4328
4320
}
4329
4321
4330
4322
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
0 commit comments