Skip to content

Commit 34d5f2a

Browse files
committed
Return the counterparty node_id as a part of a force-shutdown res
In the coming commits we'll need the counterparty node_id when handling a background monitor update as we may need to resume normal channel operation as a result. Thus, we go ahead and pipe it through from the shutdown end, as it makes the codepaths consistent. Sadly, the monitor-originated shutdown case doesn't allow for a required counterparty node_id as some versions of LDK didn't have it present in the ChannelMonitor.
1 parent 3ce1a5e commit 34d5f2a

File tree

2 files changed

+36
-11
lines changed

2 files changed

+36
-11
lines changed

lightning/src/ln/channel.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ pub(super) struct ReestablishResponses {
434434

435435
/// The return type of `force_shutdown`
436436
pub(crate) type ShutdownResult = (
437-
Option<(OutPoint, ChannelMonitorUpdate)>,
437+
Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
438438
Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
439439
);
440440

@@ -6263,7 +6263,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
62636263
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
62646264
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
62656265
self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
6266-
Some((funding_txo, ChannelMonitorUpdate {
6266+
Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
62676267
update_id: self.latest_monitor_update_id,
62686268
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
62696269
}))

lightning/src/ln/channelmanager.rs

Lines changed: 34 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -499,11 +499,26 @@ struct ClaimablePayments {
499499
/// for some reason. They are handled in timer_tick_occurred, so may be processed with
500500
/// quite some time lag.
501501
enum BackgroundEvent {
502-
/// Handle a ChannelMonitorUpdate
502+
/// Handle a ChannelMonitorUpdate which closes the channel. This is only separated from
503+
/// [`Self::MonitorUpdateRegeneratedOnStartup`] as the non-closing variant needs a public key
504+
/// to handle channel resumption, whereas if the channel has been force-closed we do not need
505+
/// the counterparty node_id.
503506
///
504507
/// Note that any such events are lost on shutdown, so in general they must be updates which
505508
/// are regenerated on startup.
506-
MonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
509+
ClosingMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)),
510+
/// Handle a ChannelMonitorUpdate which may or may not close the channel. In general this
511+
/// should be used rather than [`Self::ClosingMonitorUpdateRegeneratedOnStartup`], however in
512+
/// cases where the `counterparty_node_id` is not available as the channel has closed from a
513+
/// [`ChannelMonitor`] error the other variant is acceptable.
514+
///
515+
/// Note that any such events are lost on shutdown, so in general they must be updates which
516+
/// are regenerated on startup.
517+
MonitorUpdateRegeneratedOnStartup {
518+
counterparty_node_id: PublicKey,
519+
funding_txo: OutPoint,
520+
update: ChannelMonitorUpdate
521+
},
507522
}
508523

509524
#[derive(Debug)]
@@ -2193,7 +2208,7 @@ where
21932208
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
21942209
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
21952210
}
2196-
if let Some((funding_txo, monitor_update)) = monitor_update_option {
2211+
if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
21972212
// There isn't anything we can do if we get an update failure - we're already
21982213
// force-closing. The monitor update on the required in-memory copy should broadcast
21992214
// the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -3774,7 +3789,12 @@ where
37743789

37753790
for event in background_events.drain(..) {
37763791
match event {
3777-
BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
3792+
BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => {
3793+
// The channel has already been closed, so no use bothering to care about the
3794+
// monitor updating completing.
3795+
let _ = self.chain_monitor.update_channel(funding_txo, &update);
3796+
},
3797+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { funding_txo, update, .. } => {
37783798
// The channel has already been closed, so no use bothering to care about the
37793799
// monitor updating completing.
37803800
let _ = self.chain_monitor.update_channel(funding_txo, &update);
@@ -5689,12 +5709,15 @@ where
56895709
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
56905710
// so we track the update internally and handle it when the user next calls
56915711
// timer_tick_occurred, guaranteeing we're running normally.
5692-
if let Some((funding_txo, update)) = failure.0.take() {
5712+
if let Some((counterparty_node_id, funding_txo, update)) = failure.0.take() {
56935713
assert_eq!(update.updates.len(), 1);
56945714
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
56955715
assert!(should_broadcast);
56965716
} else { unreachable!(); }
5697-
self.pending_background_events.lock().unwrap().push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((funding_txo, update)));
5717+
self.pending_background_events.lock().unwrap().push(
5718+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
5719+
counterparty_node_id, funding_txo, update
5720+
});
56985721
}
56995722
self.finish_force_close_channel(failure);
57005723
}
@@ -7767,8 +7790,10 @@ where
77677790
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
77687791
log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
77697792
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
7770-
if let Some(monitor_update) = monitor_update {
7771-
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup(monitor_update));
7793+
if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
7794+
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
7795+
counterparty_node_id, funding_txo, update
7796+
});
77727797
}
77737798
failed_htlcs.append(&mut new_failed_htlcs);
77747799
channel_closures.push_back((events::Event::ChannelClosed {
@@ -7843,7 +7868,7 @@ where
78437868
update_id: CLOSED_CHANNEL_UPDATE_ID,
78447869
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
78457870
};
7846-
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
7871+
pending_background_events.push(BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update)));
78477872
}
78487873
}
78497874

0 commit comments

Comments
 (0)