Skip to content

Commit 995de2d

Browse files
committed
Support async ChannelMonitorUpdates to closed chans at startup
One of the largest gaps in our async persistence functionality has been preimage (claim) updates to closed channels. Here we finally implement support for this (for updates which are generated during startup). Thanks to all the work we've built up over the past many commits, this is a fairly straightforward patch, removing the immediate-completion logic from `claim_mpp_part` and adding the required in-flight tracking logic to `apply_post_close_monitor_update`. Like in the during-runtime case in the previous commit, we sadly can't use the `handle_new_monitor_update` macro wholesale as it handles the `Channel` resumption as well which we don't do here.
1 parent dc37d4d commit 995de2d

File tree

2 files changed

+44
-38
lines changed

2 files changed

+44
-38
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3294,10 +3294,6 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
32943294
if !close_chans_before_reload {
32953295
check_closed_broadcast(&nodes[1], 1, true);
32963296
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
3297-
} else {
3298-
// While we forwarded the payment a while ago, we don't want to process events too early or
3299-
// we'll run background tasks we wanted to test individually.
3300-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
33013297
}
33023298

33033299
mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
@@ -3308,24 +3304,33 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
33083304
// Make sure the B<->C channel is still alive and well by sending a payment over it.
33093305
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
33103306
reconnect_args.pending_responding_commitment_signed.1 = true;
3311-
if !close_chans_before_reload {
3312-
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
3313-
// will consider the forwarded payment complete and allow the B<->C
3314-
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
3315-
// be allowed, and needs fixing.
3316-
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
3317-
}
3307+
// The B<->C `ChannelMonitorUpdate` shouldn't be allowed to complete, which is the
3308+
// equivalent to the responding `commitment_signed` being a duplicate for node B, thus we
3309+
// need to set the `pending_responding_commitment_signed_dup` flag.
3310+
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
33183311
reconnect_args.pending_raa.1 = true;
33193312

33203313
reconnect_nodes(reconnect_args);
3314+
3315+
// Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
3316+
// `PaymentForwarded` event will finally be released.
33213317
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
33223318
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
3323-
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
3324-
if !close_chans_before_reload {
3325-
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
3326-
// channel will fly, removing the payment preimage from it.
3327-
check_added_monitors(&nodes[1], 1);
3319+
3320+
// If the A<->B channel was closed before we reload, we'll replay the claim against it on
3321+
// reload, causing the `PaymentForwarded` event to get replayed.
3322+
let evs = nodes[1].node.get_and_clear_pending_events();
3323+
assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 });
3324+
for ev in evs {
3325+
if let Event::PaymentForwarded { .. } = ev { }
3326+
else {
3327+
panic!();
3328+
}
33283329
}
3330+
3331+
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
3332+
// will fly, removing the payment preimage from it.
3333+
check_added_monitors(&nodes[1], 1);
33293334
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
33303335
send_payment(&nodes[1], &[&nodes[2]], 100_000);
33313336
}

lightning/src/ln/channelmanager.rs

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -3974,11 +3974,10 @@ where
39743974
}
39753975

39763976
/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3977-
#[must_use]
39783977
fn apply_post_close_monitor_update(
39793978
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
39803979
monitor_update: ChannelMonitorUpdate,
3981-
) -> ChannelMonitorUpdateStatus {
3980+
) {
39823981
// Note that there may be some post-close updates which need to be well-ordered with
39833982
// respect to the `update_id`, so we hold the `peer_state` lock here.
39843983
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3989,16 +3988,21 @@ where
39893988
match peer_state.channel_by_id.entry(channel_id) {
39903989
hash_map::Entry::Occupied(mut chan_phase) => {
39913990
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3992-
let in_flight = handle_new_monitor_update!(self, funding_txo,
3991+
handle_new_monitor_update!(self, funding_txo,
39933992
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3994-
return if in_flight { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed };
3993+
return;
39953994
} else {
39963995
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
39973996
}
39983997
},
39993998
hash_map::Entry::Vacant(_) => {},
40003999
}
4001-
self.chain_monitor.update_channel(funding_txo, &monitor_update)
4000+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
4001+
4002+
handle_new_monitor_update!(
4003+
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4004+
logger, channel_id, POST_CHANNEL_CLOSE
4005+
);
40024006
}
40034007

40044008
/// When a channel is removed, two things need to happen:
@@ -4027,7 +4031,7 @@ where
40274031
}
40284032
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
40294033
debug_assert!(false, "This should have been handled in `locked_close_channel`");
4030-
let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4034+
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
40314035
}
40324036
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
40334037
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
@@ -6378,9 +6382,7 @@ where
63786382
let _ = self.chain_monitor.update_channel(funding_txo, &update);
63796383
},
63806384
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6381-
// The monitor update will be replayed on startup if it doesnt complete, so no
6382-
// use bothering to care about the monitor update completing.
6383-
let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6385+
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
63846386
},
63856387
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
63866388
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -7324,32 +7326,31 @@ where
73247326
let payment_hash = payment_preimage.into();
73257327
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
73267328

7327-
if !during_init {
7328-
if let Some(action) = action_opt {
7329-
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7330-
chan_id, action);
7331-
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7332-
}
7329+
if let Some(action) = action_opt {
7330+
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7331+
chan_id, action);
7332+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7333+
}
73337334

7335+
if !during_init {
73347336
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
73357337
} else {
73367338
// If we're running during init we cannot update a monitor directly - they probably
73377339
// haven't actually been loaded yet. Instead, push the monitor update as a background
73387340
// event.
7339-
// TODO: Track this update as pending and only complete the completion action when it
7340-
// finishes.
7341+
7342+
let in_flight_updates = peer_state.in_flight_monitor_updates
7343+
.entry(prev_hop.funding_txo)
7344+
.or_insert_with(Vec::new);
7345+
in_flight_updates.push(preimage_update.clone());
7346+
73417347
let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
73427348
counterparty_node_id,
73437349
funding_txo: prev_hop.funding_txo,
73447350
channel_id: prev_hop.channel_id,
73457351
update: preimage_update,
73467352
};
73477353
self.pending_background_events.lock().unwrap().push(event);
7348-
7349-
mem::drop(peer_state);
7350-
mem::drop(per_peer_state);
7351-
7352-
self.handle_monitor_update_completion_actions(action_opt);
73537354
}
73547355
}
73557356

0 commit comments

Comments
 (0)