@@ -4588,7 +4588,7 @@ where
4588
4588
for htlc in sources. drain ( ..) {
4589
4589
if let Err ( ( pk, err) ) = self . claim_funds_from_hop (
4590
4590
htlc. prev_hop , payment_preimage,
4591
- |_| Some ( MonitorUpdateCompletionAction :: PaymentClaimed { payment_hash } ) )
4591
+ |_| Some ( MonitorUpdateCompletionAction :: PaymentClaimed { payment_hash } ) , false )
4592
4592
{
4593
4593
if let msgs:: ErrorAction :: IgnoreError = err. err . action {
4594
4594
// We got a temporary failure updating monitor, but will claim the
@@ -4618,7 +4618,7 @@ where
4618
4618
}
4619
4619
4620
4620
fn claim_funds_from_hop < ComplFunc : FnOnce ( Option < u64 > ) -> Option < MonitorUpdateCompletionAction > > ( & self ,
4621
- prev_hop : HTLCPreviousHopData , payment_preimage : PaymentPreimage , completion_action : ComplFunc )
4621
+ prev_hop : HTLCPreviousHopData , payment_preimage : PaymentPreimage , completion_action : ComplFunc , during_init : bool )
4622
4622
-> Result < ( ) , ( PublicKey , MsgHandleErrInternal ) > {
4623
4623
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
4624
4624
@@ -4648,14 +4648,26 @@ where
4648
4648
log_bytes!( chan_id) , action) ;
4649
4649
peer_state. monitor_update_blocked_actions . entry ( chan_id) . or_insert ( Vec :: new ( ) ) . push ( action) ;
4650
4650
}
4651
- let res = handle_new_monitor_update ! ( self , prev_hop. outpoint, monitor_update, peer_state_lock,
4652
- peer_state, per_peer_state, chan) ;
4653
- if let Err ( e) = res {
4654
- // TODO: This is a *critical* error - we probably updated the outbound edge
4655
- // of the HTLC's monitor with a preimage. We should retry this monitor
4656
- // update over and over again until morale improves.
4657
- log_error ! ( self . logger, "Failed to update channel monitor with preimage {:?}" , payment_preimage) ;
4658
- return Err ( ( counterparty_node_id, e) ) ;
4651
+ if !during_init {
4652
+ let res = handle_new_monitor_update ! ( self , prev_hop. outpoint, monitor_update, peer_state_lock,
4653
+ peer_state, per_peer_state, chan) ;
4654
+ if let Err ( e) = res {
4655
+ // TODO: This is a *critical* error - we probably updated the outbound edge
4656
+ // of the HTLC's monitor with a preimage. We should retry this monitor
4657
+ // update over and over again until morale improves.
4658
+ log_error ! ( self . logger, "Failed to update channel monitor with preimage {:?}" , payment_preimage) ;
4659
+ return Err ( ( counterparty_node_id, e) ) ;
4660
+ }
4661
+ } else {
4662
+ // If we're running during init we cannot update a monitor directly -
4663
+ // they probably haven't actually been loaded yet. Instead, push the
4664
+ // monitor update as a background event.
4665
+ self . pending_background_events . lock ( ) . unwrap ( ) . push (
4666
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
4667
+ counterparty_node_id,
4668
+ funding_txo : prev_hop. outpoint ,
4669
+ update : monitor_update. clone ( ) ,
4670
+ } ) ;
4659
4671
}
4660
4672
}
4661
4673
return Ok ( ( ) ) ;
@@ -4668,16 +4680,34 @@ where
4668
4680
payment_preimage,
4669
4681
} ] ,
4670
4682
} ;
4671
- // We update the ChannelMonitor on the backward link, after
4672
- // receiving an `update_fulfill_htlc` from the forward link.
4673
- let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , & preimage_update) ;
4674
- if update_res != ChannelMonitorUpdateStatus :: Completed {
4675
- // TODO: This needs to be handled somehow - if we receive a monitor update
4676
- // with a preimage we *must* somehow manage to propagate it to the upstream
4677
- // channel, or we must have an ability to receive the same event and try
4678
- // again on restart.
4679
- log_error ! ( self . logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}" ,
4680
- payment_preimage, update_res) ;
4683
+
4684
+ if !during_init {
4685
+ // We update the ChannelMonitor on the backward link, after
4686
+ // receiving an `update_fulfill_htlc` from the forward link.
4687
+ let update_res = self . chain_monitor . update_channel ( prev_hop. outpoint , & preimage_update) ;
4688
+ if update_res != ChannelMonitorUpdateStatus :: Completed {
4689
+ // TODO: This needs to be handled somehow - if we receive a monitor update
4690
+ // with a preimage we *must* somehow manage to propagate it to the upstream
4691
+ // channel, or we must have an ability to receive the same event and try
4692
+ // again on restart.
4693
+ log_error ! ( self . logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}" ,
4694
+ payment_preimage, update_res) ;
4695
+ }
4696
+ } else {
4697
+ // If we're running during init we cannot update a monitor directly - they probably
4698
+ // haven't actually been loaded yet. Instead, push the monitor update as a background
4699
+ // event.
4700
+ // Note that while its safe to use `ClosingMonitorUpdateRegeneratedOnStartup` here (the
4701
+ // channel is already closed) we need to ultimately handle the monitor update
4702
+ // completion action only after we've completed the monitor update. This is the only
4703
+ // way to guarantee this update *will* be regenerated on startup (otherwise if this was
4704
+ // from a forwarded HTLC the downstream preimage may be deleted before we claim
4705
+ // upstream). Thus, we need to transition to some new `BackgroundEvent` type which will
4706
+ // complete the monitor update completion action from `completion_action`.
4707
+ self . pending_background_events . lock ( ) . unwrap ( ) . push (
4708
+ BackgroundEvent :: ClosingMonitorUpdateRegeneratedOnStartup ( (
4709
+ prev_hop. outpoint , preimage_update,
4710
+ ) ) ) ;
4681
4711
}
4682
4712
// Note that we do process the completion action here. This totally could be a
4683
4713
// duplicate claim, but we have no way of knowing without interrogating the
@@ -4692,9 +4722,10 @@ where
4692
4722
self . pending_outbound_payments . finalize_claims ( sources, & self . pending_events ) ;
4693
4723
}
4694
4724
4695
- fn claim_funds_internal ( & self , source : HTLCSource , payment_preimage : PaymentPreimage , forwarded_htlc_value_msat : Option < u64 > , from_onchain : bool , next_channel_id : [ u8 ; 32 ] ) {
4725
+ fn claim_funds_internal ( & self , source : HTLCSource , payment_preimage : PaymentPreimage , forwarded_htlc_value_msat : Option < u64 > , from_onchain : bool , next_channel_id : [ u8 ; 32 ] , during_init : bool ) {
4696
4726
match source {
4697
4727
HTLCSource :: OutboundRoute { session_priv, payment_id, path, .. } => {
4728
+ debug_assert ! ( !during_init) ;
4698
4729
self . pending_outbound_payments . claim_htlc ( payment_id, payment_preimage, session_priv, path, from_onchain, & self . pending_events , & self . logger ) ;
4699
4730
} ,
4700
4731
HTLCSource :: PreviousHopData ( hop_data) => {
@@ -4717,7 +4748,7 @@ where
4717
4748
downstream_counterparty_and_funding_outpoint : None ,
4718
4749
} )
4719
4750
} else { None }
4720
- } ) ;
4751
+ } , during_init ) ;
4721
4752
if let Err ( ( pk, err) ) = res {
4722
4753
let result: Result < ( ) , _ > = Err ( err) ;
4723
4754
let _ = handle_error ! ( self , result, pk) ;
@@ -5455,7 +5486,7 @@ where
5455
5486
hash_map:: Entry :: Vacant ( _) => return Err ( MsgHandleErrInternal :: send_err_msg_no_close ( format ! ( "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}" , counterparty_node_id) , msg. channel_id ) )
5456
5487
}
5457
5488
} ;
5458
- self . claim_funds_internal ( htlc_source, msg. payment_preimage . clone ( ) , Some ( forwarded_htlc_value) , false , msg. channel_id ) ;
5489
+ self . claim_funds_internal ( htlc_source, msg. payment_preimage . clone ( ) , Some ( forwarded_htlc_value) , false , msg. channel_id , false ) ;
5459
5490
Ok ( ( ) )
5460
5491
}
5461
5492
@@ -5825,7 +5856,7 @@ where
5825
5856
MonitorEvent :: HTLCEvent ( htlc_update) => {
5826
5857
if let Some ( preimage) = htlc_update. payment_preimage {
5827
5858
log_trace ! ( self . logger, "Claiming HTLC with preimage {} from our monitor" , log_bytes!( preimage. 0 ) ) ;
5828
- self . claim_funds_internal ( htlc_update. source , preimage, htlc_update. htlc_value_satoshis . map ( |v| v * 1000 ) , true , funding_outpoint. to_channel_id ( ) ) ;
5859
+ self . claim_funds_internal ( htlc_update. source , preimage, htlc_update. htlc_value_satoshis . map ( |v| v * 1000 ) , true , funding_outpoint. to_channel_id ( ) , false ) ;
5829
5860
} else {
5830
5861
log_trace ! ( self . logger, "Failing HTLC with hash {} from our monitor" , log_bytes!( htlc_update. payment_hash. 0 ) ) ;
5831
5862
let receiver = HTLCDestination :: NextHopChannel { node_id : counterparty_node_id, channel_id : funding_outpoint. to_channel_id ( ) } ;
@@ -8385,6 +8416,11 @@ where
8385
8416
// Note that we have to do the above replays before we push new monitor updates.
8386
8417
pending_background_events. append ( & mut close_background_events) ;
8387
8418
8419
+ // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
8420
+ // should ensure we try them again on the inbound edge. We put them here and do so after we
8421
+ // have a fully-constructed `ChannelManager` at the end.
8422
+ let mut pending_claims_to_replay = Vec :: new ( ) ;
8423
+
8388
8424
{
8389
8425
// If we're tracking pending payments, ensure we haven't lost any by looking at the
8390
8426
// ChannelMonitor data for any channels for which we do not have authorative state
@@ -8395,7 +8431,8 @@ where
8395
8431
// We only rebuild the pending payments map if we were most recently serialized by
8396
8432
// 0.0.102+
8397
8433
for ( _, monitor) in args. channel_monitors . iter ( ) {
8398
- if id_to_peer. get ( & monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) . is_none ( ) {
8434
+ let counterparty_opt = id_to_peer. get ( & monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) ;
8435
+ if counterparty_opt. is_none ( ) {
8399
8436
for ( htlc_source, ( htlc, _) ) in monitor. get_pending_or_resolved_outbound_htlcs ( ) {
8400
8437
if let HTLCSource :: OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
8401
8438
if path. hops . is_empty ( ) {
@@ -8489,6 +8526,30 @@ where
8489
8526
}
8490
8527
}
8491
8528
}
8529
+
8530
+ // Whether the downstream channel was closed or not, try to re-apply any payment
8531
+ // preimages from it which may be needed in upstream channels for forwarded
8532
+ // payments.
8533
+ let outbound_claimed_htlcs_iter = monitor. get_all_current_outbound_htlcs ( )
8534
+ . into_iter ( )
8535
+ . filter_map ( |( htlc_source, ( htlc, preimage_opt) ) | {
8536
+ if let HTLCSource :: PreviousHopData ( _) = htlc_source {
8537
+ if let Some ( payment_preimage) = preimage_opt {
8538
+ Some ( ( htlc_source, payment_preimage, htlc. amount_msat ,
8539
+ counterparty_opt. is_none ( ) , // i.e. the downstream chan is closed
8540
+ monitor. get_funding_txo ( ) . 0 . to_channel_id ( ) ) )
8541
+ } else { None }
8542
+ } else {
8543
+ // If it was an outbound payment, we've handled it above - if a preimage
8544
+ // came in and we persisted the `ChannelManager` we either handled it and
8545
+ // are good to go or the channel force-closed - we don't have to handle the
8546
+ // channel still live case here.
8547
+ None
8548
+ }
8549
+ } ) ;
8550
+ for tuple in outbound_claimed_htlcs_iter {
8551
+ pending_claims_to_replay. push ( tuple) ;
8552
+ }
8492
8553
}
8493
8554
}
8494
8555
@@ -8740,6 +8801,11 @@ where
8740
8801
channel_manager. fail_htlc_backwards_internal ( & source, & payment_hash, & reason, receiver) ;
8741
8802
}
8742
8803
8804
+ for ( source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
8805
+ channel_manager. claim_funds_internal ( source, preimage, Some ( downstream_value) ,
8806
+ downstream_closed, downstream_chan_id, true ) ;
8807
+ }
8808
+
8743
8809
//TODO: Broadcast channel update for closed channels, but only after we've made a
8744
8810
//connection or two.
8745
8811
0 commit comments