Skip to content

Commit 8ae8d9f

Browse files
committed
Support async ChannelMonitorUpdates to closed chans at startup
One of the largest gaps in our async persistence functionality has been preimage (claim) updates to closed channels. Here we finally implement support for this (for updates which are generated during startup). Thanks to all the work we've built up over the past many commits, this is a fairly straightforward patch, removing the immediate-completion logic from `claim_mpp_part` and adding the required in-flight tracking logic to `apply_post_close_monitor_update`. Like in the during-runtime case in the previous commit, we sadly can't use the `handle_new_monitor_update` macro wholesale as it handles the `Channel` resumption as well which we don't do here.
1 parent 66d051d commit 8ae8d9f

File tree

2 files changed

+44
-38
lines changed

2 files changed

+44
-38
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3294,10 +3294,6 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
32943294
if !close_chans_before_reload {
32953295
check_closed_broadcast(&nodes[1], 1, true);
32963296
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3297-
} else {
3298-
// While we forwarded the payment a while ago, we don't want to process events too early or
3299-
// we'll run background tasks we wanted to test individually.
3300-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
33013297
}
33023298

33033299
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
@@ -3308,24 +3304,33 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
33083304
// Make sure the B<->C channel is still alive and well by sending a payment over it.
33093305
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
33103306
reconnect_args.pending_responding_commitment_signed.1 = true;
3311-
if !close_chans_before_reload {
3312-
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3313-
// will consider the forwarded payment complete and allow the B<->C
3314-
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3315-
// be allowed, and needs fixing.
3316-
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3317-
}
3307+
// The B<->C `ChannelMonitorUpdate` shouldn't be allowed to complete, which is the
3308+
// equivalent to the responding `commitment_signed` being a duplicate for node B, thus we
3309+
// need to set the `pending_responding_commitment_signed_dup` flag.
3310+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
33183311
reconnect_args.pending_raa.1 = true;
33193312

33203313
reconnect_nodes(reconnect_args);
3314+
3315+
// Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3316+
// `PaymentForwarded` event will finally be released.
33213317
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
33223318
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3323-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3324-
if !close_chans_before_reload {
3325-
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3326-
// channel will fly, removing the payment preimage from it.
3327-
check_added_monitors(&nodes[1], 1);
3319+
3320+
// If the A<->B channel was closed before we reload, we'll replay the claim against it on
3321+
// reload, causing the `PaymentForwarded` event to get replayed.
3322+
let evs = nodes[1].node.get_and_clear_pending_events();
3323+
assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 });
3324+
for ev in evs {
3325+
if let Event::PaymentForwarded { .. } = ev { }
3326+
else {
3327+
panic!();
3328+
}
33283329
}
3330+
3331+
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
3332+
// will fly, removing the payment preimage from it.
3333+
check_added_monitors(&nodes[1], 1);
33293334
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
33303335
send_payment(&nodes[1], &[&nodes[2]], 100_000);
33313336
}

lightning/src/ln/channelmanager.rs

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -3972,11 +3972,10 @@ where
39723972
}
39733973

39743974
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3975-
#[must_use]
39763975
fn apply_post_close_monitor_update(
39773976
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39783977
monitor_update: ChannelMonitorUpdate,
3979-
) -> ChannelMonitorUpdateStatus {
3978+
) {
39803979
// Note that there may be some post-close updates which need to be well-ordered with
39813980
// respect to the `update_id`, so we hold the `peer_state` lock here.
39823981
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3987,16 +3986,21 @@ where
39873986
match peer_state.channel_by_id.entry(channel_id) {
39883987
hash_map::Entry::Occupied(mut chan_phase) => {
39893988
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3990-
let in_flight = handle_new_monitor_update!(self, funding_txo,
3989+
handle_new_monitor_update!(self, funding_txo,
39913990
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3992-
return if in_flight { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed };
3991+
return;
39933992
} else {
39943993
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39953994
}
39963995
},
39973996
hash_map::Entry::Vacant(_) => {},
39983997
}
3999-
self.chain_monitor.update_channel(funding_txo, &monitor_update)
3998+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
3999+
4000+
handle_new_monitor_update!(
4001+
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4002+
logger, channel_id, POST_CHANNEL_CLOSE
4003+
);
40004004
}
40014005

40024006
/// When a channel is removed, two things need to happen:
@@ -4025,7 +4029,7 @@ where
40254029
}
40264030
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
40274031
debug_assert!(false, "This should have been handled in `locked_close_channel`");
4028-
let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4032+
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
40294033
}
40304034
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
40314035
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6342,9 +6346,7 @@ where
63426346
let _ = self.chain_monitor.update_channel(funding_txo, &update);
63436347
},
63446348
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6345-
// The monitor update will be replayed on startup if it doesnt complete, so no
6346-
// use bothering to care about the monitor update completing.
6347-
let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6349+
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
63486350
},
63496351
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63506352
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7265,32 +7267,31 @@ where
72657267
let payment_hash = payment_preimage.into();
72667268
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72677269

7268-
if !during_init {
7269-
if let Some(action) = action_opt {
7270-
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7271-
chan_id, action);
7272-
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7273-
}
7270+
if let Some(action) = action_opt {
7271+
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7272+
chan_id, action);
7273+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7274+
}
72747275

7276+
if !during_init {
72757277
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72767278
} else {
72777279
// If we're running during init we cannot update a monitor directly - they probably
72787280
// haven't actually been loaded yet. Instead, push the monitor update as a background
72797281
// event.
7280-
// TODO: Track this update as pending and only complete the completion action when it
7281-
// finishes.
7282+
7283+
let in_flight_updates = peer_state.in_flight_monitor_updates
7284+
.entry(prev_hop.funding_txo)
7285+
.or_insert_with(Vec::new);
7286+
in_flight_updates.push(preimage_update.clone());
7287+
72827288
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
72837289
counterparty_node_id,
72847290
funding_txo: prev_hop.funding_txo,
72857291
channel_id: prev_hop.channel_id,
72867292
update: preimage_update,
72877293
};
72887294
self.pending_background_events.lock().unwrap().push(event);
7289-
7290-
mem::drop(peer_state);
7291-
mem::drop(per_peer_state);
7292-
7293-
self.handle_monitor_update_completion_actions(action_opt);
72947295
}
72957296
}
72967297

0 commit comments

Comments
 (0)