Skip to content

Commit 8a418ba

Browse files
committed
Support async ChannelMonitorUpdates to closed chans at runtime
One of the largest gaps in our async persistence functionality has been preimage (claim) updates to closed channels. Here we finally implement support for this (for updates at runtime). Thanks to all the work we've built up over the past many commits, this is a well-contained patch within `claim_mpp_part`, pushing the generated `ChannelMonitorUpdate`s through the same pipeline we use for open channels. Sadly we can't use the `handle_new_monitor_update` macro wholesale as it handles the `Channel` resumption as well which we don't do here.
1 parent a4740f4 commit 8a418ba

File tree

2 files changed

+135
-50
lines changed

2 files changed

+135
-50
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3713,3 +3713,110 @@ fn test_partial_claim_mon_update_compl_actions() {
37133713
send_payment(&nodes[2], &[&nodes[3]], 100_000);
37143714
assert!(!get_monitor!(nodes[3], chan_4_id).get_stored_preimages().contains_key(&payment_hash));
37153715
}
3716+
3717+
3718+
#[test]
3719+
fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() {
3720+
// One of the last features for async persistence we implemented was the correct blocking of
3721+
// RAA(s) which remove a preimage from an outbound channel for a forwarded payment until the
3722+
// preimage write makes it durably to the closed inbound channel.
3723+
// This tests that behavior.
3724+
let chanmon_cfgs = create_chanmon_cfgs(3);
3725+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
3726+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
3727+
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
3728+
3729+
// First open channels, route a payment, and force-close the first hop.
3730+
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3731+
let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000);
3732+
3733+
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
3734+
3735+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap();
3736+
check_added_monitors!(nodes[0], 1);
3737+
let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
3738+
check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000);
3739+
check_closed_broadcast!(nodes[0], true);
3740+
3741+
let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3742+
assert_eq!(as_commit_tx.len(), 1);
3743+
3744+
mine_transaction(&nodes[1], &as_commit_tx[0]);
3745+
check_added_monitors!(nodes[1], 1);
3746+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
3747+
check_closed_broadcast!(nodes[1], true);
3748+
3749+
// Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim
3750+
// the payment on C and give B the preimage for it.
3751+
nodes[2].node.claim_funds(payment_preimage);
3752+
check_added_monitors!(nodes[2], 1);
3753+
expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
3754+
3755+
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
3756+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3757+
nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
3758+
check_added_monitors!(nodes[1], 1);
3759+
commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
3760+
3761+
// At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for
3762+
// channel A to hit disk. Until it does so, it shouldn't ever let the preimage dissapear from
3763+
// channel B's `ChannelMonitor`
3764+
assert!(get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage)));
3765+
3766+
// Once we complete the `ChannelMonitorUpdate` on channel A, and the `ChannelManager` processes
3767+
// background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate`
3768+
// will fly and we'll drop the preimage from channel B's `ChannelMonitor`. We'll also release
3769+
// the `Event::PaymentForwarded`.
3770+
check_added_monitors!(nodes[1], 0);
3771+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3772+
3773+
nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2);
3774+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
3775+
check_added_monitors!(nodes[1], 1);
3776+
assert!(!get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage)));
3777+
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false);
3778+
}
3779+
3780+
#[test]
3781+
fn test_claim_to_closed_channel_blocks_claimed_event() {
3782+
// One of the last features for async persistence we implemented was the correct blocking of
3783+
// event(s) until the preimage for a claimed HTLC is durably on disk in a ChannelMonitor for a
3784+
// closed channel.
3785+
// This tests that behavior.
3786+
let chanmon_cfgs = create_chanmon_cfgs(2);
3787+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
3788+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
3789+
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
3790+
3791+
// First open channels, route a payment, and force-close the first hop.
3792+
let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000);
3793+
3794+
let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
3795+
3796+
nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap();
3797+
check_added_monitors!(nodes[0], 1);
3798+
let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
3799+
check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000);
3800+
check_closed_broadcast!(nodes[0], true);
3801+
3802+
let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
3803+
assert_eq!(as_commit_tx.len(), 1);
3804+
3805+
mine_transaction(&nodes[1], &as_commit_tx[0]);
3806+
check_added_monitors!(nodes[1], 1);
3807+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000);
3808+
check_closed_broadcast!(nodes[1], true);
3809+
3810+
// Now that B has a pending payment with the inbound HTLC on a closed channel, claim the
3811+
// payment on disk, but don't let the `ChannelMonitorUpdate` complete. This should prevent the
3812+
// `Event::PaymentClaimed` from being generated.
3813+
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
3814+
nodes[1].node.claim_funds(payment_preimage);
3815+
check_added_monitors!(nodes[1], 1);
3816+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
3817+
3818+
// Once we complete the `ChannelMonitorUpdate` the `Event::PaymentClaimed` will become
3819+
// available.
3820+
nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2);
3821+
expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
3822+
}

lightning/src/ln/channelmanager.rs

Lines changed: 28 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -7296,37 +7296,42 @@ where
72967296

72977297
let mut preimage_update = ChannelMonitorUpdate {
72987298
update_id: 0, // set in set_closed_chan_next_monitor_update_id
7299-
counterparty_node_id: prev_hop.counterparty_node_id,
7299+
counterparty_node_id: None,
73007300
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
73017301
payment_preimage,
73027302
payment_info,
73037303
}],
73047304
channel_id: Some(prev_hop.channel_id),
73057305
};
7306-
7307-
// Note that the below is race-y - we set the `update_id` here and then drop the peer_state
7308-
// lock before applying the update in `apply_post_close_monitor_update` (or via the
7309-
// background events pipeline). During that time, some other update could be created and
7310-
// then applied, resultin in `ChannelMonitorUpdate`s being applied out of order and causing
7311-
// a panic.
73127306
Self::set_closed_chan_next_monitor_update_id(&mut *peer_state, prev_hop.channel_id, &mut preimage_update);
73137307

7314-
mem::drop(peer_state);
7315-
mem::drop(per_peer_state);
7308+
// Note that we do process the completion action here. This totally could be a
7309+
// duplicate claim, but we have no way of knowing without interrogating the
7310+
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7311+
// generally always allowed to be duplicative (and it's specifically noted in
7312+
// `PaymentForwarded`).
7313+
let (action_opt, raa_blocker_opt) = completion_action(None, false);
7314+
7315+
if let Some(raa_blocker) = raa_blocker_opt {
7316+
peer_state.actions_blocking_raa_monitor_updates
7317+
.entry(prev_hop.channel_id)
7318+
.or_default()
7319+
.push(raa_blocker);
7320+
}
7321+
7322+
// Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage
7323+
// to include the `payment_hash` in the log metadata here.
7324+
let payment_hash = payment_preimage.into();
7325+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
73167326

73177327
if !during_init {
7318-
// We update the ChannelMonitor on the backward link, after
7319-
// receiving an `update_fulfill_htlc` from the forward link.
7320-
let update_res = self.apply_post_close_monitor_update(counterparty_node_id, prev_hop.channel_id, prev_hop.funding_txo, preimage_update);
7321-
if update_res != ChannelMonitorUpdateStatus::Completed {
7322-
// TODO: This needs to be handled somehow - if we receive a monitor update
7323-
// with a preimage we *must* somehow manage to propagate it to the upstream
7324-
// channel, or we must have an ability to receive the same event and try
7325-
// again on restart.
7326-
log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id), None),
7327-
"Critical error: failed to update channel monitor with preimage {:?}: {:?}",
7328-
payment_preimage, update_res);
7328+
if let Some(action) = action_opt {
7329+
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7330+
chan_id, action);
7331+
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
73297332
}
7333+
7334+
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
73307335
} else {
73317336
// If we're running during init we cannot update a monitor directly - they probably
73327337
// haven't actually been loaded yet. Instead, push the monitor update as a background
@@ -7340,39 +7345,12 @@ where
73407345
update: preimage_update,
73417346
};
73427347
self.pending_background_events.lock().unwrap().push(event);
7343-
}
73447348

7345-
// Note that we do process the completion action here. This totally could be a
7346-
// duplicate claim, but we have no way of knowing without interrogating the
7347-
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7348-
// generally always allowed to be duplicative (and it's specifically noted in
7349-
// `PaymentForwarded`).
7350-
let (action_opt, raa_blocker_opt) = completion_action(None, false);
7351-
7352-
if let Some(raa_blocker) = raa_blocker_opt {
7353-
// TODO: Avoid always blocking the world for the write lock here.
7354-
let mut per_peer_state = self.per_peer_state.write().unwrap();
7355-
let peer_state_mutex = per_peer_state.entry(counterparty_node_id).or_insert_with(||
7356-
Mutex::new(PeerState {
7357-
channel_by_id: new_hash_map(),
7358-
inbound_channel_request_by_id: new_hash_map(),
7359-
latest_features: InitFeatures::empty(),
7360-
pending_msg_events: Vec::new(),
7361-
in_flight_monitor_updates: BTreeMap::new(),
7362-
monitor_update_blocked_actions: BTreeMap::new(),
7363-
actions_blocking_raa_monitor_updates: BTreeMap::new(),
7364-
closed_channel_monitor_update_ids: BTreeMap::new(),
7365-
is_connected: false,
7366-
}));
7367-
let mut peer_state = peer_state_mutex.lock().unwrap();
7349+
mem::drop(peer_state);
7350+
mem::drop(per_peer_state);
73687351

7369-
peer_state.actions_blocking_raa_monitor_updates
7370-
.entry(prev_hop.channel_id)
7371-
.or_default()
7372-
.push(raa_blocker);
7352+
self.handle_monitor_update_completion_actions(action_opt);
73737353
}
7374-
7375-
self.handle_monitor_update_completion_actions(action_opt);
73767354
}
73777355

73787356
fn finalize_claims(&self, sources: Vec<HTLCSource>) {

0 commit comments

Comments
 (0)