Skip to content

Commit e938ed7

Browse files
committed
Support async ChannelMonitorUpdates to closed chans at runtime
One of the largest gaps in our async persistence functionality has been preimage (claim) updates to closed channels. Here we finally implement support for this (for updates at runtime). Thanks to all the work we've built up over the past many commits, this is a well-contained patch within `claim_mpp_part`, pushing the generated `ChannelMonitorUpdate`s through the same pipeline we use for open channels. Sadly we can't use the `handle_new_monitor_update` macro wholesale as it handles the `Channel` resumption as well which we don't do here.
1 parent 3395938 commit e938ed7

File tree

2 files changed

+135
-49
lines changed

2 files changed

+135
-49
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3713,3 +3713,111 @@ fn test_partial_claim_mon_update_compl_actions() {
37133713
send_payment(&nodes[2], &[&nodes[3]], 100_000);
37143714
assert!(!get_monitor!(nodes[3], chan_4_id).get_stored_preimages().contains_key(&payment_hash));
37153715
}
3716+
3717+
3718+
#[test]
3719+
fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() {
3720+
// One of the last features for async persistence we implemented was the correct blocking of
3721+
// RAA(s) which remove a preimage from an outbound channel for a forwarded payment until the
3722+
// preimage write makes it durably to the closed inbound channel.
3723+
// This tests that behavior.
3724+
let chanmon_cfgs = create_chanmon_cfgs(3);
3725+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3726+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3727+
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3728+
3729+
// First open channels, route a payment, and force-close the first hop.
3730+
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3731+
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000);
3732+
3733+
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3734+
3735+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap();
3736+
check_added_monitors!(nodes[0], 1);
3737+
let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
3738+
check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000);
3739+
check_closed_broadcast!(nodes[0], true);
3740+
3741+
let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3742+
assert_eq!(as_commit_tx.len(), 1);
3743+
3744+
mine_transaction(&nodes[1], &as_commit_tx[0]);
3745+
check_added_monitors!(nodes[1], 1);
3746+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
3747+
check_closed_broadcast!(nodes[1], true);
3748+
3749+
// Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim
3750+
// the payment on C and give B the preimage for it.
3751+
nodes[2].node.claim_funds(payment_preimage);
3752+
check_added_monitors!(nodes[2], 1);
3753+
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3754+
3755+
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3756+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3757+
nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
3758+
check_added_monitors!(nodes[1], 1);
3759+
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
3760+
3761+
// At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for
3762+
// channel A to hit disk. Until it does so, it shouldn't ever let the preimage dissapear from
3763+
// channel B's `ChannelMonitor`
3764+
assert!(get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage)));
3765+
3766+
// Once we complete the `ChannelMonitorUpdate` on channel A, and the `ChannelManager` processes
3767+
// background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate`
3768+
// will fly and we'll drop the preimage from channel B's `ChannelMonitor`. We'll also release
3769+
// the `Event::PaymentForwarded`.
3770+
check_added_monitors!(nodes[1], 0);
3771+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3772+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3773+
3774+
nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2);
3775+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3776+
check_added_monitors!(nodes[1], 1);
3777+
assert!(!get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage)));
3778+
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false);
3779+
}
3780+
3781+
#[test]
3782+
fn test_claim_to_closed_channel_blocks_claimed_event() {
3783+
// One of the last features for async persistence we implemented was the correct blocking of
3784+
// event(s) until the preimage for a claimed HTLC is durably on disk in a ChannelMonitor for a
3785+
// closed channel.
3786+
// This tests that behavior.
3787+
let chanmon_cfgs = create_chanmon_cfgs(2);
3788+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3789+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3790+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3791+
3792+
// First open channels, route a payment, and force-close the first hop.
3793+
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3794+
3795+
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3796+
3797+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap();
3798+
check_added_monitors!(nodes[0], 1);
3799+
let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
3800+
check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000);
3801+
check_closed_broadcast!(nodes[0], true);
3802+
3803+
let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3804+
assert_eq!(as_commit_tx.len(), 1);
3805+
3806+
mine_transaction(&nodes[1], &as_commit_tx[0]);
3807+
check_added_monitors!(nodes[1], 1);
3808+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
3809+
check_closed_broadcast!(nodes[1], true);
3810+
3811+
// Now that B has a pending payment with the inbound HTLC on a closed channel, claim the
3812+
// payment on disk, but don't let the `ChannelMonitorUpdate` complete. This should prevent the
3813+
// `Event::PaymentClaimed` from being generated.
3814+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3815+
nodes[1].node.claim_funds(payment_preimage);
3816+
check_added_monitors!(nodes[1], 1);
3817+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3818+
3819+
// Once we complete the `ChannelMonitorUpdate` the `Event::PaymentClaimed` will become
3820+
// available.
3821+
nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2);
3822+
expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3823+
}

lightning/src/ln/channelmanager.rs

Lines changed: 27 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -7199,36 +7199,41 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
71997199

72007200
let preimage_update = ChannelMonitorUpdate {
72017201
update_id,
7202-
counterparty_node_id: prev_hop.counterparty_node_id,
7202+
counterparty_node_id: Some(counterparty_node_id),
72037203
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
72047204
payment_preimage,
72057205
payment_info,
72067206
}],
72077207
channel_id: Some(prev_hop.channel_id),
72087208
};
72097209

7210-
// Note that the below is race-y - we set the `update_id` above and then drop the peer_state
7211-
// lock before applying the update in `apply_post_close_monitor_update` (or via the
7212-
// background events pipeline). During that time, some other update could be created and
7213-
// then applied, resultin in `ChannelMonitorUpdate`s being applied out of order and causing
7214-
// a panic.
7210+
// Note that we do process the completion action here. This totally could be a
7211+
// duplicate claim, but we have no way of knowing without interrogating the
7212+
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7213+
// generally always allowed to be duplicative (and it's specifically noted in
7214+
// `PaymentForwarded`).
7215+
let (action_opt, raa_blocker_opt) = completion_action(None, false);
72157216

7216-
mem::drop(peer_state);
7217-
mem::drop(per_peer_state);
7217+
if let Some(raa_blocker) = raa_blocker_opt {
7218+
peer_state.actions_blocking_raa_monitor_updates
7219+
.entry(prev_hop.channel_id)
7220+
.or_default()
7221+
.push(raa_blocker);
7222+
}
7223+
7224+
// Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage
7225+
// to include the `payment_hash` in the log metadata here.
7226+
let payment_hash = payment_preimage.into();
7227+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
72187228

72197229
if !during_init {
7220-
// We update the ChannelMonitor on the backward link, after
7221-
// receiving an `update_fulfill_htlc` from the forward link.
7222-
let update_res = self.apply_post_close_monitor_update(counterparty_node_id, prev_hop.channel_id, prev_hop.funding_txo, preimage_update);
7223-
if update_res != ChannelMonitorUpdateStatus::Completed {
7224-
// TODO: This needs to be handled somehow - if we receive a monitor update
7225-
// with a preimage we *must* somehow manage to propagate it to the upstream
7226-
// channel, or we must have an ability to receive the same event and try
7227-
// again on restart.
7228-
log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id), None),
7229-
"Critical error: failed to update channel monitor with preimage {:?}: {:?}",
7230-
payment_preimage, update_res);
7230+
if let Some(action) = action_opt {
7231+
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7232+
chan_id, action);
7233+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
72317234
}
7235+
7236+
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
72327237
} else {
72337238
// If we're running during init we cannot update a monitor directly - they probably
72347239
// haven't actually been loaded yet. Instead, push the monitor update as a background
@@ -7242,39 +7247,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
72427247
update: preimage_update,
72437248
};
72447249
self.pending_background_events.lock().unwrap().push(event);
7245-
}
72467250

7247-
// Note that we do process the completion action here. This totally could be a
7248-
// duplicate claim, but we have no way of knowing without interrogating the
7249-
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7250-
// generally always allowed to be duplicative (and it's specifically noted in
7251-
// `PaymentForwarded`).
7252-
let (action_opt, raa_blocker_opt) = completion_action(None, false);
7253-
7254-
if let Some(raa_blocker) = raa_blocker_opt {
7255-
// TODO: Avoid always blocking the world for the write lock here.
7256-
let mut per_peer_state = self.per_peer_state.write().unwrap();
7257-
let peer_state_mutex = per_peer_state.entry(counterparty_node_id).or_insert_with(||
7258-
Mutex::new(PeerState {
7259-
channel_by_id: new_hash_map(),
7260-
inbound_channel_request_by_id: new_hash_map(),
7261-
latest_features: InitFeatures::empty(),
7262-
pending_msg_events: Vec::new(),
7263-
in_flight_monitor_updates: BTreeMap::new(),
7264-
monitor_update_blocked_actions: BTreeMap::new(),
7265-
actions_blocking_raa_monitor_updates: BTreeMap::new(),
7266-
closed_channel_monitor_update_ids: BTreeMap::new(),
7267-
is_connected: false,
7268-
}));
7269-
let mut peer_state = peer_state_mutex.lock().unwrap();
7251+
mem::drop(peer_state);
7252+
mem::drop(per_peer_state);
72707253

7271-
peer_state.actions_blocking_raa_monitor_updates
7272-
.entry(prev_hop.channel_id)
7273-
.or_default()
7274-
.push(raa_blocker);
7254+
self.handle_monitor_update_completion_actions(action_opt);
72757255
}
7276-
7277-
self.handle_monitor_update_completion_actions(action_opt);
72787256
}
72797257

72807258
fn finalize_claims(&self, sources: Vec<HTLCSource>) {

0 commit comments

Comments
 (0)