@@ -499,11 +499,26 @@ struct ClaimablePayments {
499
499
/// for some reason. They are handled in timer_tick_occurred, so may be processed with
500
500
/// quite some time lag.
501
501
enum BackgroundEvent {
502
- /// Handle a ChannelMonitorUpdate
502
+ /// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from
503
+ /// [`Self::MonitorUpdateRegeneratedOnStartup`] as the non-closing variant needs a public key
504
+ /// to handle channel resumption, whereas if the channel has been force-closed we do not need
505
+ /// the counterparty node_id.
503
506
///
504
507
/// Note that any such events are lost on shutdown, so in general they must be updates which
505
508
/// are regenerated on startup.
506
- MonitorUpdateRegeneratedOnStartup ( ( OutPoint , ChannelMonitorUpdate ) ) ,
509
+ ClosingMonitorUpdateRegeneratedOnStartup ( ( OutPoint , ChannelMonitorUpdate ) ) ,
510
+ /// Handle a ChannelMonitorUpdate which may or may not close the channel. In general this
511
+ /// should be used rather than [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in
512
+ /// cases where the `counterparty_node_id` is not available as the channel has closed from a
513
+ /// [`ChannelMonitor`] error the other variant is acceptable.
514
+ ///
515
+ /// Note that any such events are lost on shutdown, so in general they must be updates which
516
+ /// are regenerated on startup.
517
+ MonitorUpdateRegeneratedOnStartup {
518
+ counterparty_node_id : PublicKey ,
519
+ funding_txo : OutPoint ,
520
+ update : ChannelMonitorUpdate
521
+ } ,
507
522
}
508
523
509
524
#[ derive( Debug ) ]
@@ -2193,7 +2208,7 @@ where
2193
2208
let receiver = HTLCDestination :: NextHopChannel { node_id : Some ( counterparty_node_id) , channel_id } ;
2194
2209
self . fail_htlc_backwards_internal ( & source, & payment_hash, & reason, receiver) ;
2195
2210
}
2196
- if let Some ( ( funding_txo, monitor_update) ) = monitor_update_option {
2211
+ if let Some ( ( _ , funding_txo, monitor_update) ) = monitor_update_option {
2197
2212
// There isn't anything we can do if we get an update failure - we're already
2198
2213
// force-closing. The monitor update on the required in-memory copy should broadcast
2199
2214
// the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -3774,7 +3789,12 @@ where
3774
3789
3775
3790
for event in background_events. drain ( ..) {
3776
3791
match event {
3777
- BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( ( funding_txo, update) ) => {
3792
+ BackgroundEvent :: ClosingMonitorUpdateRegeneratedOnStartup ( ( funding_txo, update) ) => {
3793
+ // The channel has already been closed, so no use bothering to care about the
3794
+ // monitor updating completing.
3795
+ let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
3796
+ } ,
3797
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup { funding_txo, update, .. } => {
3778
3798
// The channel has already been closed, so no use bothering to care about the
3779
3799
// monitor updating completing.
3780
3800
let _ = self . chain_monitor . update_channel ( funding_txo, & update) ;
@@ -5689,12 +5709,15 @@ where
5689
5709
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
5690
5710
// so we track the update internally and handle it when the user next calls
5691
5711
// timer_tick_occurred, guaranteeing we're running normally.
5692
- if let Some ( ( funding_txo, update) ) = failure. 0 . take ( ) {
5712
+ if let Some ( ( counterparty_node_id , funding_txo, update) ) = failure. 0 . take ( ) {
5693
5713
assert_eq ! ( update. updates. len( ) , 1 ) ;
5694
5714
if let ChannelMonitorUpdateStep :: ChannelForceClosed { should_broadcast } = update. updates [ 0 ] {
5695
5715
assert ! ( should_broadcast) ;
5696
5716
} else { unreachable ! ( ) ; }
5697
- self . pending_background_events . lock ( ) . unwrap ( ) . push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( ( funding_txo, update) ) ) ;
5717
+ self . pending_background_events . lock ( ) . unwrap ( ) . push (
5718
+ BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
5719
+ counterparty_node_id, funding_txo, update
5720
+ } ) ;
5698
5721
}
5699
5722
self . finish_force_close_channel ( failure) ;
5700
5723
}
@@ -7767,8 +7790,10 @@ where
7767
7790
log_error ! ( args. logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}." ,
7768
7791
log_bytes!( channel. channel_id( ) ) , monitor. get_latest_update_id( ) , channel. get_latest_monitor_update_id( ) ) ;
7769
7792
let ( monitor_update, mut new_failed_htlcs) = channel. force_shutdown ( true ) ;
7770
- if let Some ( monitor_update) = monitor_update {
7771
- pending_background_events. push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( monitor_update) ) ;
7793
+ if let Some ( ( counterparty_node_id, funding_txo, update) ) = monitor_update {
7794
+ pending_background_events. push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup {
7795
+ counterparty_node_id, funding_txo, update
7796
+ } ) ;
7772
7797
}
7773
7798
failed_htlcs. append ( & mut new_failed_htlcs) ;
7774
7799
channel_closures. push_back ( ( events:: Event :: ChannelClosed {
@@ -7843,7 +7868,7 @@ where
7843
7868
update_id : CLOSED_CHANNEL_UPDATE_ID ,
7844
7869
updates : vec ! [ ChannelMonitorUpdateStep :: ChannelForceClosed { should_broadcast: true } ] ,
7845
7870
} ;
7846
- pending_background_events. push ( BackgroundEvent :: MonitorUpdateRegeneratedOnStartup ( ( * funding_txo, monitor_update) ) ) ;
7871
+ pending_background_events. push ( BackgroundEvent :: ClosingMonitorUpdateRegeneratedOnStartup ( ( * funding_txo, monitor_update) ) ) ;
7847
7872
}
7848
7873
}
7849
7874
0 commit comments