Skip to content

Commit 56aafbd

Browse files
committed
Return the counterparty node_id as a part of a force-shutdown res
In the coming commits we'll need the counterparty node_id when handling a background monitor update as we may need to resume normal channel operation as a result. Thus, we go ahead and pipe it through from the shutdown end, as it makes the codepaths consistent. Sadly, the monitor-originated shutdown case doesn't allow for a required counterparty node_id as some versions of LDK didn't have it present in the ChannelMonitor.
1 parent 9e542ec commit 56aafbd

File tree

2 files changed

+25
-10
lines changed

2 files changed

+25
-10
lines changed

lightning/src/ln/channel.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6228,7 +6228,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
62286228
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
62296229
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
62306230
/// immediately (others we will have to allow to time out).
6231-
pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
6231+
pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
62326232
// Note that we MUST only generate a monitor update that indicates force-closure - we're
62336233
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
62346234
// being fully configured in some cases. Thus, its likely any monitor events we generate will
@@ -6257,7 +6257,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
62576257
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
62586258
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
62596259
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
6260-
Some((funding_txo, ChannelMonitorUpdate {
6260+
Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
62616261
update_id: self.latest_monitor_update_id,
62626262
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
62636263
}))

lightning/src/ln/channelmanager.rs

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ pub enum FailureCode {
359359
IncorrectOrUnknownPaymentDetails = 0x4000 | 15,
360360
}
361361

362-
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
362+
type ShutdownResult = (Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
363363

364364
/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
365365
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -501,11 +501,19 @@ struct ClaimablePayments {
501501
/// for some reason. They are handled in timer_tick_occurred, so may be processed with
502502
/// quite some time lag.
503503
enum BackgroundEvent {
504-
/// Handle a ChannelMonitorUpdate
504+
/// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from
505+
/// [`Self::MonitorUpdateRegeneratedOnStartup`] as the non-closing variant needs a public key
506+
/// to handle channel resumption, whereas if the channel has been force-closed we do not need
507+
/// the counterparty node_id.
505508
///
506509
/// Note that any such events are lost on shutdown, so in general they must be updates which
507510
/// are regenerated on startup.
508-
MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
511+
ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
512+
/// Handle a ChannelMonitorUpdate.
513+
///
514+
/// Note that any such events are lost on shutdown, so in general they must be updates which
515+
/// are regenerated on startup.
516+
MonitorUpdateRegeneratedOnStartup((PublicKey, OutPoint, ChannelMonitorUpdate)),
509517
}
510518

511519
#[derive(Debug)]
@@ -2195,7 +2203,7 @@ where
21952203
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
21962204
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
21972205
}
2198-
if let Some((funding_txo, monitor_update)) = monitor_update_option {
2206+
if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
21992207
// There isn't anything we can do if we get an update failure - we're already
22002208
// force-closing. The monitor update on the required in-memory copy should broadcast
22012209
// the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -3776,7 +3784,12 @@ where
37763784

37773785
for event in background_events.drain(..) {
37783786
match event {
3779-
BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
3787+
BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
3788+
// The channel has already been closed, so no use bothering to care about the
3789+
// monitor updating completing.
3790+
let _ = self.chain_monitor.update_channel(funding_txo, &update);
3791+
},
3792+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup((_, funding_txo, update)) => {
37803793
// The channel has already been closed, so no use bothering to care about the
37813794
// monitor updating completing.
37823795
let _ = self.chain_monitor.update_channel(funding_txo, &update);
@@ -5691,12 +5704,14 @@ where
56915704
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
56925705
// so we track the update internally and handle it when the user next calls
56935706
// timer_tick_occurred, guaranteeing we're running normally.
5694-
if let Some((funding_txo, update)) = failure.0.take() {
5707+
if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
56955708
assert_eq!(update.updates.len(), 1);
56965709
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
56975710
assert!(should_broadcast);
56985711
} else { unreachable!(); }
5699-
self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)));
5712+
self.pending_background_events.lock().unwrap().push(
5713+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup(
5714+
(counterparty_node_id, funding_txo, update)));
57005715
}
57015716
self.finish_force_close_channel(failure);
57025717
}
@@ -7845,7 +7860,7 @@ where
78457860
update_id: CLOSED_CHANNEL_UPDATE_ID,
78467861
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
78477862
};
7848-
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
7863+
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
78497864
}
78507865
}
78517866

0 commit comments

Comments
 (0)