Skip to content

Commit 55fefe4

Browse files
committed
Add infra to block ChannelMonitorUpdates on forwarded claims
When we forward a payment and receive an `update_fulfill_htlc` message from the downstream channel, we immediately claim the HTLC on the upstream channel, before even doing a `commitment_signed` dance on the downstream channel. This implies that our `ChannelMonitorUpdate`s "go out" in the right order - first we ensure we'll get our money by writing the preimage down, then we write the update that resolves giving money on the downstream node. This is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are generated, but of course looking forward we want to support asynchronous updates, which may complete in any order. Here we add infrastructure to handle downstream `ChannelMonitorUpdate`s which are blocked on an upstream preimage-containing one. We don't yet actually do the blocking which will come in a future commit.
1 parent 70a0dff commit 55fefe4

File tree

1 file changed

+122
-26
lines changed

1 file changed

+122
-26
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 122 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -525,12 +525,24 @@ pub(crate) enum MonitorUpdateCompletionAction {
525525
/// event can be generated.
526526
PaymentClaimed { payment_hash: PaymentHash },
527527
/// Indicates an [`events::Event`] should be surfaced to the user.
528-
EmitEvent { event: events::Event },
528+
EmitEventAndFreeOtherChannel {
529+
event: events::Event,
530+
downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
531+
},
529532
}
530533

531534
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
532535
(0, PaymentClaimed) => { (0, payment_hash, required) },
533-
(2, EmitEvent) => { (0, event, upgradable_required) },
536+
(2, EmitEventAndFreeOtherChannel) => {
537+
(0, event, upgradable_required),
538+
// LDK prior to 0.0.115 did not have this field as the monitor update application order was
539+
// required by clients. If we downgrade to something prior to 0.0.115 this may result in
540+
// monitor updates which aren't properly blocked or resumed, however that's fine - we don't
541+
// support async monitor updates even in LDK 0.0.115 and once we do we'll require no
542+
// downgrades to prior versions. Thus, while this would break on downgrade, we don't
543+
// support it even without downgrade, so if it breaks its not on us ¯\_(ツ)_/¯.
544+
(1, downstream_counterparty_and_funding_outpoint, option),
545+
},
534546
);
535547

536548
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -547,6 +559,29 @@ impl_writeable_tlv_based_enum!(EventCompletionAction,
547559
};
548560
);
549561

562+
#[derive(Clone, PartialEq, Eq, Debug)]
563+
pub(crate) enum RAAMonitorUpdateBlockingAction {
564+
/// The inbound channel's channel_id
565+
ForwardedPaymentOtherChannelClaim {
566+
channel_id: [u8; 32],
567+
htlc_id: u64,
568+
},
569+
}
570+
571+
impl RAAMonitorUpdateBlockingAction {
572+
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
573+
Self::ForwardedPaymentOtherChannelClaim {
574+
channel_id: prev_hop.outpoint.to_channel_id(),
575+
htlc_id: prev_hop.htlc_id,
576+
}
577+
}
578+
}
579+
580+
impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
581+
(0, ForwardedPaymentOtherChannelClaim) => { (0, channel_id, required), (2, htlc_id, required) }
582+
;);
583+
584+
550585
/// State we hold per-peer.
551586
pub(super) struct PeerState<Signer: ChannelSigner> {
552587
/// `temporary_channel_id` or `channel_id` -> `channel`.
@@ -575,6 +610,11 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
575610
/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
576611
/// duplicates do not occur, so such channels should fail without a monitor update completing.
577612
monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
613+
/// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
614+
/// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
615+
/// will remove a preimage that needs to be durably in an upstream channel first), we put an
616+
/// entry here to note that the channel with the key's ID is blocked on a set of actions.
617+
actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
578618
/// The peer is currently connected (i.e. we've seen a
579619
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
580620
/// [`ChannelMessageHandler::peer_disconnected`].
@@ -4459,23 +4499,24 @@ where
44594499
},
44604500
HTLCSource::PreviousHopData(hop_data) => {
44614501
let prev_outpoint = hop_data.outpoint;
4502+
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
44624503
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
44634504
|htlc_claim_value_msat| {
44644505
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
44654506
let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
44664507
Some(claimed_htlc_value - forwarded_htlc_value)
44674508
} else { None };
44684509

4469-
let prev_channel_id = Some(prev_outpoint.to_channel_id());
4470-
let next_channel_id = Some(next_channel_id);
4471-
4472-
Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
4473-
fee_earned_msat,
4474-
claim_from_onchain_tx: from_onchain,
4475-
prev_channel_id,
4476-
next_channel_id,
4477-
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
4478-
}})
4510+
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
4511+
event: events::Event::PaymentForwarded {
4512+
fee_earned_msat,
4513+
claim_from_onchain_tx: from_onchain,
4514+
prev_channel_id: Some(prev_outpoint.to_channel_id()),
4515+
next_channel_id: Some(next_channel_id),
4516+
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
4517+
},
4518+
downstream_counterparty_and_funding_outpoint: None,
4519+
})
44794520
} else { None }
44804521
});
44814522
if let Err((pk, err)) = res {
@@ -4502,8 +4543,13 @@ where
45024543
}, None));
45034544
}
45044545
},
4505-
MonitorUpdateCompletionAction::EmitEvent { event } => {
4546+
MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
4547+
event, downstream_counterparty_and_funding_outpoint
4548+
} => {
45064549
self.pending_events.lock().unwrap().push_back((event, None));
4550+
if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
4551+
self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
4552+
}
45074553
},
45084554
}
45094555
}
@@ -5350,6 +5396,36 @@ where
53505396
}
53515397
}
53525398

5399+
fn raa_monitor_updates_held(&self,
5400+
actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
5401+
channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
5402+
) -> bool {
5403+
actions_blocking_raa_monitor_updates
5404+
.get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
5405+
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
5406+
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
5407+
channel_funding_outpoint,
5408+
counterparty_node_id,
5409+
})
5410+
})
5411+
}
5412+
5413+
pub(crate) fn test_raa_monitor_updates_held(&self, counterparty_node_id: PublicKey,
5414+
channel_id: [u8; 32])
5415+
-> bool {
5416+
let per_peer_state = self.per_peer_state.read().unwrap();
5417+
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
5418+
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
5419+
let peer_state = &mut *peer_state_lck;
5420+
5421+
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
5422+
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
5423+
chan.get_funding_txo().unwrap(), counterparty_node_id);
5424+
}
5425+
}
5426+
false
5427+
}
5428+
53535429
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
53545430
let (htlcs_to_fail, res) = {
53555431
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6013,25 +6089,29 @@ where
60136089
self.pending_outbound_payments.clear_pending_payments()
60146090
}
60156091

6016-
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint) {
6092+
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
60176093
let mut errors = Vec::new();
60186094
loop {
60196095
let per_peer_state = self.per_peer_state.read().unwrap();
60206096
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
60216097
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
60226098
let peer_state = &mut *peer_state_lck;
6023-
if self.pending_events.lock().unwrap().iter()
6024-
.any(|(_ev, action_opt)| action_opt == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
6025-
channel_funding_outpoint, counterparty_node_id
6026-
}))
6027-
{
6028-
// Check that, while holding the peer lock, we don't have another event
6029-
// blocking any monitor updates for this channel. If we do, let those
6030-
// events be the ones that ultimately release the monitor update(s).
6031-
log_trace!(self.logger, "Delaying monitor unlock for channel {} as another event is pending",
6099+
6100+
if let Some(blocker) = &completed_blocker {
6101+
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
6102+
.get_mut(&channel_funding_outpoint.to_channel_id())
6103+
{
6104+
blockers.retain(|iter| iter != blocker);
6105+
}
6106+
}
6107+
6108+
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
6109+
channel_funding_outpoint, counterparty_node_id) {
6110+
log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
60326111
log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
60336112
break;
60346113
}
6114+
60356115
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
60366116
debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
60376117
if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
@@ -6073,7 +6153,7 @@ where
60736153
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
60746154
channel_funding_outpoint, counterparty_node_id
60756155
} => {
6076-
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint);
6156+
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
60776157
}
60786158
}
60796159
}
@@ -6749,6 +6829,7 @@ where
67496829
latest_features: init_msg.features.clone(),
67506830
pending_msg_events: Vec::new(),
67516831
monitor_update_blocked_actions: BTreeMap::new(),
6832+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
67526833
is_connected: true,
67536834
}));
67546835
},
@@ -7943,6 +8024,7 @@ where
79438024
latest_features: Readable::read(reader)?,
79448025
pending_msg_events: Vec::new(),
79458026
monitor_update_blocked_actions: BTreeMap::new(),
8027+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
79468028
is_connected: false,
79478029
};
79488030
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -8023,7 +8105,7 @@ where
80238105
let mut claimable_htlc_purposes = None;
80248106
let mut claimable_htlc_onion_fields = None;
80258107
let mut pending_claiming_payments = Some(HashMap::new());
8026-
let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
8108+
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
80278109
let mut events_override = None;
80288110
read_tlv_fields!(reader, {
80298111
(1, pending_outbound_payments_no_retry, option),
@@ -8348,7 +8430,21 @@ where
83488430
}
83498431

83508432
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
8351-
if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
8433+
if let Some(peer_state) = per_peer_state.get(&node_id) {
8434+
for (_, actions) in monitor_update_blocked_actions.iter() {
8435+
for action in actions.iter() {
8436+
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
8437+
downstream_counterparty_and_funding_outpoint:
8438+
Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
8439+
} = action {
8440+
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
8441+
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
8442+
.entry(blocked_channel_outpoint.to_channel_id())
8443+
.or_insert_with(Vec::new).push(blocking_action.clone());
8444+
}
8445+
}
8446+
}
8447+
}
83528448
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
83538449
} else {
83548450
log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);

0 commit comments

Comments
 (0)