Skip to content

Commit 5b729df

Browse files
committed
Handle claim result event generation in claim_funds_from_hop
Currently `claim_funds` and `claim_funds_internal` call `claim_funds_from_hop` and then surface and `Event` to the user informing them of the forwarded/claimed payment based on it's result. In both places we assume that a claim "completed" even if a monitor update is being done async. Instead, here we push that event generation through a `MonitorUpdateCompletionAction` and a call to `handle_monitor_update_completion_action`. This will allow us to hold the event(s) until async monitor updates complete in the future.
1 parent 59ebb6f commit 5b729df

File tree

1 file changed

+43
-50
lines changed

1 file changed

+43
-50
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 43 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -4300,7 +4300,6 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
43004300
let mut expected_amt_msat = None;
43014301
let mut valid_mpp = true;
43024302
let mut errs = Vec::new();
4303-
let mut claimed_any_htlcs = false;
43044303
let mut channel_state = Some(self.channel_state.lock().unwrap());
43054304
for htlc in sources.iter() {
43064305
let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
@@ -4350,13 +4349,14 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
43504349
if valid_mpp {
43514350
for htlc in sources.drain(..) {
43524351
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
4353-
match self.claim_funds_from_hop(channel_state.take().unwrap(), htlc.prev_hop, payment_preimage) {
4352+
match self.claim_funds_from_hop(channel_state.take().unwrap(), htlc.prev_hop, payment_preimage,
4353+
|_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
4354+
{
43544355
ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
43554356
if let msgs::ErrorAction::IgnoreError = err.err.action {
43564357
// We got a temporary failure updating monitor, but will claim the
43574358
// HTLC when the monitor updating is restored (or on chain).
43584359
log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
4359-
claimed_any_htlcs = true;
43604360
} else { errs.push((pk, err)); }
43614361
},
43624362
ClaimFundsFromHop::PrevHopForceClosed => {
@@ -4371,7 +4371,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
43714371
// available to be claimed. Thus, it does not make sense to set
43724372
// `claimed_any_htlcs`.
43734373
},
4374-
ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
4374+
ClaimFundsFromHop::Success(_) => {},
43754375
}
43764376
}
43774377
}
@@ -4385,14 +4385,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
43854385
let receiver = HTLCDestination::FailedPayment { payment_hash };
43864386
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
43874387
}
4388-
}
4389-
4390-
let ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id } =
4391-
self.claimable_payments.lock().unwrap().pending_claimed_payments.remove(&payment_hash).unwrap();
4392-
if claimed_any_htlcs {
4393-
self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
4394-
payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
4395-
});
4388+
self.claimable_payments.lock().unwrap().pending_claimed_payments.remove(&payment_hash);
43964389
}
43974390

43984391
// Now we can handle any errors which were generated.
@@ -4402,12 +4395,16 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
44024395
}
44034396
}
44044397

4405-
fn claim_funds_from_hop(&self, mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
4398+
fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
4399+
mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
4400+
prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
4401+
-> ClaimFundsFromHop {
44064402
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
44074403

44084404
let chan_id = prev_hop.outpoint.to_channel_id();
44094405
let channel_state = &mut *channel_state_lock;
44104406
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
4407+
let counterparty_node_id = chan.get().get_counterparty_node_id();
44114408
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
44124409
Ok(msgs_monitor_option) => {
44134410
if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
@@ -4417,10 +4414,11 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
44174414
log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
44184415
"Failed to update channel monitor with preimage {:?}: {:?}",
44194416
payment_preimage, e);
4417+
let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
4418+
mem::drop(channel_state_lock);
4419+
self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
44204420
return ClaimFundsFromHop::MonitorUpdateFail(
4421-
chan.get().get_counterparty_node_id(),
4422-
handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
4423-
Some(htlc_value_msat)
4421+
counterparty_node_id, err, Some(htlc_value_msat)
44244422
);
44254423
}
44264424
}
@@ -4439,6 +4437,8 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
44394437
}
44404438
});
44414439
}
4440+
mem::drop(channel_state_lock);
4441+
self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
44424442
return ClaimFundsFromHop::Success(htlc_value_msat);
44434443
} else {
44444444
return ClaimFundsFromHop::DuplicateClaim;
@@ -4453,11 +4453,12 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
44534453
payment_preimage, e);
44544454
},
44554455
}
4456-
let counterparty_node_id = chan.get().get_counterparty_node_id();
44574456
let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
44584457
if drop {
44594458
chan.remove_entry();
44604459
}
4460+
mem::drop(channel_state_lock);
4461+
self.handle_monitor_update_completion_actions(completion_action(None));
44614462
return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
44624463
},
44634464
}
@@ -4479,6 +4480,13 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
44794480
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
44804481
payment_preimage, update_res);
44814482
}
4483+
mem::drop(channel_state_lock);
4484+
// Note that we do process the completion action here. This totally could be a
4485+
// duplicate claim, but we have no way of knowing without interrogating the
4486+
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
4487+
// generally always allowed to be duplicative (and it's specifically noted in
4488+
// `PaymentForwarded`).
4489+
self.handle_monitor_update_completion_actions(completion_action(None));
44824490
return ClaimFundsFromHop::PrevHopForceClosed
44834491
}
44844492
}
@@ -4553,43 +4561,28 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
45534561
},
45544562
HTLCSource::PreviousHopData(hop_data) => {
45554563
let prev_outpoint = hop_data.outpoint;
4556-
let res = self.claim_funds_from_hop(channel_state_lock, hop_data, payment_preimage);
4557-
let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
4558-
let htlc_claim_value_msat = match res {
4559-
ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
4560-
ClaimFundsFromHop::Success(amt) => Some(amt),
4561-
_ => None,
4562-
};
4563-
if let ClaimFundsFromHop::PrevHopForceClosed = res {
4564-
// Note that we do *not* set `claimed_htlc` to false here. In fact, this
4565-
// totally could be a duplicate claim, but we have no way of knowing
4566-
// without interrogating the `ChannelMonitor` we've provided the above
4567-
// update to. Instead, we simply document in `PaymentForwarded` that this
4568-
// can happen.
4569-
}
4564+
let res = self.claim_funds_from_hop(channel_state_lock, hop_data, payment_preimage,
4565+
|htlc_claim_value_msat| {
4566+
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
4567+
let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
4568+
Some(claimed_htlc_value - forwarded_htlc_value)
4569+
} else { None };
4570+
4571+
let prev_channel_id = Some(prev_outpoint.to_channel_id());
4572+
let next_channel_id = Some(next_channel_id);
4573+
4574+
Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
4575+
fee_earned_msat,
4576+
claim_from_onchain_tx: from_onchain,
4577+
prev_channel_id,
4578+
next_channel_id,
4579+
}})
4580+
} else { None }
4581+
});
45704582
if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
45714583
let result: Result<(), _> = Err(err);
45724584
let _ = handle_error!(self, result, pk);
45734585
}
4574-
4575-
if claimed_htlc {
4576-
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
4577-
let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
4578-
Some(claimed_htlc_value - forwarded_htlc_value)
4579-
} else { None };
4580-
4581-
let mut pending_events = self.pending_events.lock().unwrap();
4582-
let prev_channel_id = Some(prev_outpoint.to_channel_id());
4583-
let next_channel_id = Some(next_channel_id);
4584-
4585-
pending_events.push(events::Event::PaymentForwarded {
4586-
fee_earned_msat,
4587-
claim_from_onchain_tx: from_onchain,
4588-
prev_channel_id,
4589-
next_channel_id,
4590-
});
4591-
}
4592-
}
45934586
},
45944587
}
45954588
}

0 commit comments

Comments
 (0)