Skip to content

Commit 5259cd5

Browse files
committed
Re-claim forwarded HTLCs on startup
Now that we let `commitment_signed` `ChannelMonitorUpdate`s from a downstream channel complete prior to the preimage `ChannelMonitorUpdate` on the upstream channel, we may not get a `update_fulfill_htlc` replay on startup. Thus, we have to ensure any payment preimages contained in that downstream update are re-claimed on startup. Here we do this during the existing walk of the `ChannelMonitor` preimages for closed channels.
1 parent 4454b42 commit 5259cd5

File tree

1 file changed

+91
-25
lines changed

1 file changed

+91
-25
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 91 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -4588,7 +4588,7 @@ where
45884588
for htlc in sources.drain(..) {
45894589
if let Err((pk, err)) = self.claim_funds_from_hop(
45904590
htlc.prev_hop, payment_preimage,
4591-
|_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
4591+
|_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }), false)
45924592
{
45934593
if let msgs::ErrorAction::IgnoreError = err.err.action {
45944594
// We got a temporary failure updating monitor, but will claim the
@@ -4618,7 +4618,7 @@ where
46184618
}
46194619

46204620
fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
4621-
prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
4621+
prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc, during_init: bool)
46224622
-> Result<(), (PublicKey, MsgHandleErrInternal)> {
46234623
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
46244624

@@ -4648,14 +4648,26 @@ where
46484648
log_bytes!(chan_id), action);
46494649
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
46504650
}
4651-
let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
4652-
peer_state, per_peer_state, chan);
4653-
if let Err(e) = res {
4654-
// TODO: This is a *critical* error - we probably updated the outbound edge
4655-
// of the HTLC's monitor with a preimage. We should retry this monitor
4656-
// update over and over again until morale improves.
4657-
log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
4658-
return Err((counterparty_node_id, e));
4651+
if !during_init {
4652+
let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
4653+
peer_state, per_peer_state, chan);
4654+
if let Err(e) = res {
4655+
// TODO: This is a *critical* error - we probably updated the outbound edge
4656+
// of the HTLC's monitor with a preimage. We should retry this monitor
4657+
// update over and over again until morale improves.
4658+
log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
4659+
return Err((counterparty_node_id, e));
4660+
}
4661+
} else {
4662+
// If we're running during init we cannot update a monitor directly -
4663+
// they probably haven't actually been loaded yet. Instead, push the
4664+
// monitor update as a background event.
4665+
self.pending_background_events.lock().unwrap().push(
4666+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
4667+
counterparty_node_id,
4668+
funding_txo: prev_hop.outpoint,
4669+
update: monitor_update.clone(),
4670+
});
46594671
}
46604672
}
46614673
return Ok(());
@@ -4668,16 +4680,34 @@ where
46684680
payment_preimage,
46694681
}],
46704682
};
4671-
// We update the ChannelMonitor on the backward link, after
4672-
// receiving an `update_fulfill_htlc` from the forward link.
4673-
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
4674-
if update_res != ChannelMonitorUpdateStatus::Completed {
4675-
// TODO: This needs to be handled somehow - if we receive a monitor update
4676-
// with a preimage we *must* somehow manage to propagate it to the upstream
4677-
// channel, or we must have an ability to receive the same event and try
4678-
// again on restart.
4679-
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
4680-
payment_preimage, update_res);
4683+
4684+
if !during_init {
4685+
// We update the ChannelMonitor on the backward link, after
4686+
// receiving an `update_fulfill_htlc` from the forward link.
4687+
let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
4688+
if update_res != ChannelMonitorUpdateStatus::Completed {
4689+
// TODO: This needs to be handled somehow - if we receive a monitor update
4690+
// with a preimage we *must* somehow manage to propagate it to the upstream
4691+
// channel, or we must have an ability to receive the same event and try
4692+
// again on restart.
4693+
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
4694+
payment_preimage, update_res);
4695+
}
4696+
} else {
4697+
// If we're running during init we cannot update a monitor directly - they probably
4698+
// haven't actually been loaded yet. Instead, push the monitor update as a background
4699+
// event.
4700+
// Note that while its safe to use `ClosingMonitorUpdateRegeneratedOnStartup` here (the
4701+
// channel is already closed) we need to ultimately handle the monitor update
4702+
// completion action only after we've completed the monitor update. This is the only
4703+
// way to guarantee this update *will* be regenerated on startup (otherwise if this was
4704+
// from a forwarded HTLC the downstream preimage may be deleted before we claim
4705+
// upstream). Thus, we need to transition to some new `BackgroundEvent` type which will
4706+
// complete the monitor update completion action from `completion_action`.
4707+
self.pending_background_events.lock().unwrap().push(
4708+
BackgroundEvent::ClosingMonitorUpdateRegeneratedOnStartup((
4709+
prev_hop.outpoint, preimage_update,
4710+
)));
46814711
}
46824712
// Note that we do process the completion action here. This totally could be a
46834713
// duplicate claim, but we have no way of knowing without interrogating the
@@ -4692,9 +4722,10 @@ where
46924722
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
46934723
}
46944724

4695-
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
4725+
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32], during_init: bool) {
46964726
match source {
46974727
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
4728+
debug_assert!(!during_init);
46984729
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
46994730
},
47004731
HTLCSource::PreviousHopData(hop_data) => {
@@ -4717,7 +4748,7 @@ where
47174748
downstream_counterparty_and_funding_outpoint: None,
47184749
})
47194750
} else { None }
4720-
});
4751+
}, during_init);
47214752
if let Err((pk, err)) = res {
47224753
let result: Result<(), _> = Err(err);
47234754
let _ = handle_error!(self, result, pk);
@@ -5455,7 +5486,7 @@ where
54555486
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
54565487
}
54575488
};
5458-
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
5489+
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id, false);
54595490
Ok(())
54605491
}
54615492

@@ -5825,7 +5856,7 @@ where
58255856
MonitorEvent::HTLCEvent(htlc_update) => {
58265857
if let Some(preimage) = htlc_update.payment_preimage {
58275858
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
5828-
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
5859+
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id(), false);
58295860
} else {
58305861
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
58315862
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
@@ -8385,6 +8416,11 @@ where
83858416
// Note that we have to do the above replays before we push new monitor updates.
83868417
pending_background_events.append(&mut close_background_events);
83878418

8419+
// If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
8420+
// should ensure we try them again on the inbound edge. We put them here and do so after we
8421+
// have a fully-constructed `ChannelManager` at the end.
8422+
let mut pending_claims_to_replay = Vec::new();
8423+
83888424
{
83898425
// If we're tracking pending payments, ensure we haven't lost any by looking at the
83908426
// ChannelMonitor data for any channels for which we do not have authorative state
@@ -8395,7 +8431,8 @@ where
83958431
// We only rebuild the pending payments map if we were most recently serialized by
83968432
// 0.0.102+
83978433
for (_, monitor) in args.channel_monitors.iter() {
8398-
if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
8434+
let counterparty_opt = id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id());
8435+
if counterparty_opt.is_none() {
83998436
for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
84008437
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
84018438
if path.hops.is_empty() {
@@ -8489,6 +8526,30 @@ where
84898526
}
84908527
}
84918528
}
8529+
8530+
// Whether the downstream channel was closed or not, try to re-apply any payment
8531+
// preimages from it which may be needed in upstream channels for forwarded
8532+
// payments.
8533+
let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
8534+
.into_iter()
8535+
.filter_map(|(htlc_source, (htlc, preimage_opt))| {
8536+
if let HTLCSource::PreviousHopData(_) = htlc_source {
8537+
if let Some(payment_preimage) = preimage_opt {
8538+
Some((htlc_source, payment_preimage, htlc.amount_msat,
8539+
counterparty_opt.is_none(), // i.e. the downstream chan is closed
8540+
monitor.get_funding_txo().0.to_channel_id()))
8541+
} else { None }
8542+
} else {
8543+
// If it was an outbound payment, we've handled it above - if a preimage
8544+
// came in and we persisted the `ChannelManager` we either handled it and
8545+
// are good to go or the channel force-closed - we don't have to handle the
8546+
// channel still live case here.
8547+
None
8548+
}
8549+
});
8550+
for tuple in outbound_claimed_htlcs_iter {
8551+
pending_claims_to_replay.push(tuple);
8552+
}
84928553
}
84938554
}
84948555

@@ -8740,6 +8801,11 @@ where
87408801
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
87418802
}
87428803

8804+
for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay {
8805+
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
8806+
downstream_closed, downstream_chan_id, true);
8807+
}
8808+
87438809
//TODO: Broadcast channel update for closed channels, but only after we've made a
87448810
//connection or two.
87458811

0 commit comments

Comments
 (0)