Skip to content

Commit fc643f5

Browse files
committed
Add infra to block ChannelMonitorUpdates on forwarded claims
When we forward a payment and receive an `update_fulfill_htlc` message from the downstream channel, we immediately claim the HTLC on the upstream channel, before even doing a `commitment_signed` dance on the downstream channel. This implies that our `ChannelMonitorUpdate`s "go out" in the right order - first we ensure we'll get our money by writing the preimage down, then we write the update that resolves giving money on the downstream node. This is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are generated, but of course looking forward we want to support asynchronous updates, which may complete in any order. Here we add infrastructure to handle downstream `ChannelMonitorUpdate`s which are blocked on an upstream preimage-containing one. We don't yet actually do the blocking which will come in a future commit.
1 parent f5b6e11 commit fc643f5

File tree

1 file changed

+122
-26
lines changed

1 file changed

+122
-26
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 122 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -520,12 +520,24 @@ pub(crate) enum MonitorUpdateCompletionAction {
520520
/// event can be generated.
521521
PaymentClaimed { payment_hash: PaymentHash },
522522
/// Indicates an [`events::Event`] should be surfaced to the user.
523-
EmitEvent { event: events::Event },
523+
EmitEventAndFreeOtherChannel {
524+
event: events::Event,
525+
downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>,
526+
},
524527
}
525528

526529
impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
527530
(0, PaymentClaimed) => { (0, payment_hash, required) },
528-
(2, EmitEvent) => { (0, event, upgradable_required) },
531+
(2, EmitEventAndFreeOtherChannel) => {
532+
(0, event, upgradable_required),
533+
// LDK prior to 0.0.115 did not have this field as the monitor update application order was
534+
// required by clients. If we downgrade to something prior to 0.0.115 this may result in
535+
// monitor updates which aren't properly blocked or resumed, however that's fine - we don't
536+
// support async monitor updates even in LDK 0.0.115 and once we do we'll require no
537+
// downgrades to prior versions. Thus, while this would break on downgrade, we don't
538+
// support it even without downgrade, so if it breaks its not on us ¯\_(ツ)_/¯.
539+
(1, downstream_counterparty_and_funding_outpoint, option),
540+
},
529541
);
530542

531543
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -542,6 +554,29 @@ impl_writeable_tlv_based_enum!(EventCompletionAction,
542554
};
543555
);
544556

557+
#[derive(Clone, PartialEq, Eq, Debug)]
558+
pub(crate) enum RAAMonitorUpdateBlockingAction {
559+
/// The inbound channel's channel_id
560+
ForwardedPaymentOtherChannelClaim {
561+
channel_id: [u8; 32],
562+
htlc_id: u64,
563+
},
564+
}
565+
566+
impl RAAMonitorUpdateBlockingAction {
567+
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
568+
Self::ForwardedPaymentOtherChannelClaim {
569+
channel_id: prev_hop.outpoint.to_channel_id(),
570+
htlc_id: prev_hop.htlc_id,
571+
}
572+
}
573+
}
574+
575+
impl_writeable_tlv_based_enum!(RAAMonitorUpdateBlockingAction,
576+
(0, ForwardedPaymentOtherChannelClaim) => { (0, channel_id, required), (2, htlc_id, required) }
577+
;);
578+
579+
545580
/// State we hold per-peer.
546581
pub(super) struct PeerState<Signer: ChannelSigner> {
547582
/// `temporary_channel_id` or `channel_id` -> `channel`.
@@ -570,6 +605,11 @@ pub(super) struct PeerState<Signer: ChannelSigner> {
570605
/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
571606
/// duplicates do not occur, so such channels should fail without a monitor update completing.
572607
monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
608+
/// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
609+
/// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
610+
/// will remove a preimage that needs to be durably in an upstream channel first), we put an
611+
/// entry here to note that the channel with the key's ID is blocked on a set of actions.
612+
actions_blocking_raa_monitor_updates: BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
573613
/// The peer is currently connected (i.e. we've seen a
574614
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
575615
/// [`ChannelMessageHandler::peer_disconnected`].
@@ -4468,23 +4508,24 @@ where
44684508
},
44694509
HTLCSource::PreviousHopData(hop_data) => {
44704510
let prev_outpoint = hop_data.outpoint;
4511+
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
44714512
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
44724513
|htlc_claim_value_msat| {
44734514
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
44744515
let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
44754516
Some(claimed_htlc_value - forwarded_htlc_value)
44764517
} else { None };
44774518

4478-
let prev_channel_id = Some(prev_outpoint.to_channel_id());
4479-
let next_channel_id = Some(next_channel_id);
4480-
4481-
Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
4482-
fee_earned_msat,
4483-
claim_from_onchain_tx: from_onchain,
4484-
prev_channel_id,
4485-
next_channel_id,
4486-
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
4487-
}})
4519+
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
4520+
event: events::Event::PaymentForwarded {
4521+
fee_earned_msat,
4522+
claim_from_onchain_tx: from_onchain,
4523+
prev_channel_id: Some(prev_outpoint.to_channel_id()),
4524+
next_channel_id: Some(next_channel_id),
4525+
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
4526+
},
4527+
downstream_counterparty_and_funding_outpoint: None,
4528+
})
44884529
} else { None }
44894530
});
44904531
if let Err((pk, err)) = res {
@@ -4511,8 +4552,13 @@ where
45114552
}, None));
45124553
}
45134554
},
4514-
MonitorUpdateCompletionAction::EmitEvent { event } => {
4555+
MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
4556+
event, downstream_counterparty_and_funding_outpoint
4557+
} => {
45154558
self.pending_events.lock().unwrap().push_back((event, None));
4559+
if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint {
4560+
self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker));
4561+
}
45164562
},
45174563
}
45184564
}
@@ -5359,6 +5405,36 @@ where
53595405
}
53605406
}
53615407

5408+
fn raa_monitor_updates_held(&self,
5409+
actions_blocking_raa_monitor_updates: &BTreeMap<[u8; 32], Vec<RAAMonitorUpdateBlockingAction>>,
5410+
channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey
5411+
) -> bool {
5412+
actions_blocking_raa_monitor_updates
5413+
.get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false)
5414+
|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
5415+
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
5416+
channel_funding_outpoint,
5417+
counterparty_node_id,
5418+
})
5419+
})
5420+
}
5421+
5422+
pub(crate) fn test_raa_monitor_updates_held(&self, counterparty_node_id: PublicKey,
5423+
channel_id: [u8; 32])
5424+
-> bool {
5425+
let per_peer_state = self.per_peer_state.read().unwrap();
5426+
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
5427+
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
5428+
let peer_state = &mut *peer_state_lck;
5429+
5430+
if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
5431+
return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
5432+
chan.get_funding_txo().unwrap(), counterparty_node_id);
5433+
}
5434+
}
5435+
false
5436+
}
5437+
53625438
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
53635439
let (htlcs_to_fail, res) = {
53645440
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6020,25 +6096,29 @@ where
60206096
self.pending_outbound_payments.clear_pending_payments()
60216097
}
60226098

6023-
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint) {
6099+
fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
60246100
let mut errors = Vec::new();
60256101
loop {
60266102
let per_peer_state = self.per_peer_state.read().unwrap();
60276103
if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
60286104
let mut peer_state_lck = peer_state_mtx.lock().unwrap();
60296105
let peer_state = &mut *peer_state_lck;
6030-
if self.pending_events.lock().unwrap().iter()
6031-
.any(|(_ev, action_opt)| action_opt == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
6032-
channel_funding_outpoint, counterparty_node_id
6033-
}))
6034-
{
6035-
// Check that, while holding the peer lock, we don't have another event
6036-
// blocking any monitor updates for this channel. If we do, let those
6037-
// events be the ones that ultimately release the monitor update(s).
6038-
log_trace!(self.logger, "Delaying monitor unlock for channel {} as another event is pending",
6106+
6107+
if let Some(blocker) = &completed_blocker {
6108+
if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
6109+
.get_mut(&channel_funding_outpoint.to_channel_id())
6110+
{
6111+
blockers.retain(|iter| iter != blocker);
6112+
}
6113+
}
6114+
6115+
if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
6116+
channel_funding_outpoint, counterparty_node_id) {
6117+
log_trace!(self.logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
60396118
log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
60406119
break;
60416120
}
6121+
60426122
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
60436123
debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
60446124
if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
@@ -6080,7 +6160,7 @@ where
60806160
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
60816161
channel_funding_outpoint, counterparty_node_id
60826162
} => {
6083-
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint);
6163+
self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None);
60846164
}
60856165
}
60866166
}
@@ -6731,6 +6811,7 @@ where
67316811
latest_features: init_msg.features.clone(),
67326812
pending_msg_events: Vec::new(),
67336813
monitor_update_blocked_actions: BTreeMap::new(),
6814+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
67346815
is_connected: true,
67356816
}));
67366817
},
@@ -7876,6 +7957,7 @@ where
78767957
latest_features: Readable::read(reader)?,
78777958
pending_msg_events: Vec::new(),
78787959
monitor_update_blocked_actions: BTreeMap::new(),
7960+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
78797961
is_connected: false,
78807962
};
78817963
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
@@ -7959,7 +8041,7 @@ where
79598041
let mut claimable_htlc_purposes = None;
79608042
let mut claimable_htlc_onion_fields = None;
79618043
let mut pending_claiming_payments = Some(HashMap::new());
7962-
let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
8044+
let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
79638045
let mut events_override = None;
79648046
read_tlv_fields!(reader, {
79658047
(1, pending_outbound_payments_no_retry, option),
@@ -8284,7 +8366,21 @@ where
82848366
}
82858367

82868368
for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
8287-
if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
8369+
if let Some(peer_state) = per_peer_state.get(&node_id) {
8370+
for (_, actions) in monitor_update_blocked_actions.iter() {
8371+
for action in actions.iter() {
8372+
if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
8373+
downstream_counterparty_and_funding_outpoint:
8374+
Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), ..
8375+
} = action {
8376+
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
8377+
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
8378+
.entry(blocked_channel_outpoint.to_channel_id())
8379+
.or_insert_with(Vec::new).push(blocking_action.clone());
8380+
}
8381+
}
8382+
}
8383+
}
82888384
peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
82898385
} else {
82908386
log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);

0 commit comments

Comments
 (0)