Skip to content

Commit 8e5b498

Browse files
committed
Refactor incoming HTLC accept checks out from Channel::update_add_htlc
In the future, we plan to completely remove `decode_update_add_htlc_onion` and replace it with a batched variant. This refactor, while improving readability in its current form, does not feature any functional changes and allows us to reuse the incoming HTLC acceptance checks in the batched variant.
1 parent 72d7ae3 commit 8e5b498

File tree

2 files changed

+120
-92
lines changed

2 files changed

+120
-92
lines changed

lightning/src/ln/channel.rs

Lines changed: 84 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -4105,20 +4105,12 @@ impl<SP: Deref> Channel<SP> where
41054105
Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
41064106
}
41074107

4108-
pub fn update_add_htlc<F, FE: Deref, L: Deref>(
4109-
&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
4110-
create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
4111-
) -> Result<(), ChannelError>
4112-
where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
4113-
FE::Target: FeeEstimator, L::Target: Logger,
4114-
{
4108+
pub fn update_add_htlc(
4109+
&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
4110+
) -> Result<(), ChannelError> {
41154111
if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
41164112
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
41174113
}
4118-
// We can't accept HTLCs sent after we've sent a shutdown.
4119-
if self.context.channel_state.is_local_shutdown_sent() {
4120-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
4121-
}
41224114
// If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
41234115
if self.context.channel_state.is_remote_shutdown_sent() {
41244116
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
@@ -4137,7 +4129,6 @@ impl<SP: Deref> Channel<SP> where
41374129
}
41384130

41394131
let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4140-
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
41414132
if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
41424133
return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
41434134
}
@@ -4166,34 +4157,6 @@ impl<SP: Deref> Channel<SP> where
41664157
}
41674158
}
41684159

4169-
let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4170-
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4171-
(0, 0)
4172-
} else {
4173-
let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
4174-
(dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
4175-
dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
4176-
};
4177-
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
4178-
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
4179-
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
4180-
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4181-
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
4182-
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4183-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4184-
}
4185-
}
4186-
4187-
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
4188-
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
4189-
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
4190-
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
4191-
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
4192-
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
4193-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4194-
}
4195-
}
4196-
41974160
let pending_value_to_self_msat =
41984161
self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
41994162
let pending_remote_value_msat =
@@ -4227,23 +4190,7 @@ impl<SP: Deref> Channel<SP> where
42274190
} else {
42284191
0
42294192
};
4230-
if !self.context.is_outbound() {
4231-
// `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
4232-
// the spec because the fee spike buffer requirement doesn't exist on the receiver's
4233-
// side, only on the sender's. Note that with anchor outputs we are no longer as
4234-
// sensitive to fee spikes, so we need to account for them.
4235-
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4236-
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
4237-
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4238-
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
4239-
}
4240-
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
4241-
// Note that if the pending_forward_status is not updated here, then it's because we're already failing
4242-
// the HTLC, i.e. its status is already set to failing.
4243-
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
4244-
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
4245-
}
4246-
} else {
4193+
if self.context.is_outbound() {
42474194
// Check that they won't violate our local required channel reserve by adding this HTLC.
42484195
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
42494196
let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
@@ -6130,6 +6077,86 @@ impl<SP: Deref> Channel<SP> where
61306077
})
61316078
}
61326079

6080+
pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
6081+
&self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
6082+
) -> Result<(), (&'static str, u16)>
6083+
where
6084+
F::Target: FeeEstimator,
6085+
L::Target: Logger
6086+
{
6087+
if self.context.channel_state.is_local_shutdown_sent() {
6088+
return Err(("Shutdown was already sent", 0x4000|8))
6089+
}
6090+
6091+
let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
6092+
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
6093+
let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
6094+
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6095+
(0, 0)
6096+
} else {
6097+
let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
6098+
(dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
6099+
dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
6100+
};
6101+
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
6102+
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
6103+
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
6104+
if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6105+
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
6106+
on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6107+
return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
6108+
}
6109+
}
6110+
6111+
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
6112+
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
6113+
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
6114+
if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6115+
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
6116+
on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6117+
return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
6118+
}
6119+
}
6120+
6121+
let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6122+
ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
6123+
} else {
6124+
0
6125+
};
6126+
6127+
let mut removed_outbound_total_msat = 0;
6128+
for ref htlc in self.context.pending_outbound_htlcs.iter() {
6129+
if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
6130+
removed_outbound_total_msat += htlc.amount_msat;
6131+
} else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
6132+
removed_outbound_total_msat += htlc.amount_msat;
6133+
}
6134+
}
6135+
6136+
let pending_value_to_self_msat =
6137+
self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
6138+
let pending_remote_value_msat =
6139+
self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
6140+
6141+
if !self.context.is_outbound() {
6142+
// `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
6143+
// the spec because the fee spike buffer requirement doesn't exist on the receiver's
6144+
// side, only on the sender's. Note that with anchor outputs we are no longer as
6145+
// sensitive to fee spikes, so we need to account for them.
6146+
let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
6147+
let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
6148+
if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6149+
remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
6150+
}
6151+
if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
6152+
log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
6153+
return Err(("Fee spike buffer violation", 0x1000|7));
6154+
}
6155+
}
6156+
6157+
Ok(())
6158+
}
6159+
61336160
pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
61346161
self.context.cur_holder_commitment_transaction_number + 1
61356162
}

lightning/src/ln/channelmanager.rs

Lines changed: 36 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6792,52 +6792,53 @@ where
67926792
match peer_state.channel_by_id.entry(msg.channel_id) {
67936793
hash_map::Entry::Occupied(mut chan_phase_entry) => {
67946794
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6795-
let pending_forward_info = match decoded_hop_res {
6795+
let mut pending_forward_info = match decoded_hop_res {
67966796
Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
67976797
self.construct_pending_htlc_status(
67986798
msg, counterparty_node_id, shared_secret, next_hop,
67996799
chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
68006800
),
68016801
Err(e) => PendingHTLCStatus::Fail(e)
68026802
};
6803-
let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
6803+
let logger = WithChannelContext::from(&self.logger, &chan.context);
6804+
// If the update_add is completely bogus, the call will Err and we will close,
6805+
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
6806+
// want to reject the new HTLC and fail it backwards instead of forwarding.
6807+
if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
68046808
if msg.blinding_point.is_some() {
6805-
return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
6806-
msgs::UpdateFailMalformedHTLC {
6807-
channel_id: msg.channel_id,
6808-
htlc_id: msg.htlc_id,
6809-
sha256_of_onion: [0; 32],
6810-
failure_code: INVALID_ONION_BLINDING,
6811-
}
6812-
))
6813-
}
6814-
// If the update_add is completely bogus, the call will Err and we will close,
6815-
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
6816-
// want to reject the new HTLC and fail it backwards instead of forwarding.
6817-
match pending_forward_info {
6818-
PendingHTLCStatus::Forward(PendingHTLCInfo {
6819-
ref incoming_shared_secret, ref routing, ..
6820-
}) => {
6821-
let reason = if routing.blinded_failure().is_some() {
6822-
HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
6823-
} else if (error_code & 0x1000) != 0 {
6824-
let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
6825-
HTLCFailReason::reason(real_code, error_data)
6826-
} else {
6827-
HTLCFailReason::from_failure_code(error_code)
6828-
}.get_encrypted_failure_packet(incoming_shared_secret, &None);
6829-
let msg = msgs::UpdateFailHTLC {
6809+
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
6810+
msgs::UpdateFailMalformedHTLC {
68306811
channel_id: msg.channel_id,
68316812
htlc_id: msg.htlc_id,
6832-
reason
6833-
};
6834-
PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
6835-
},
6836-
_ => pending_forward_info
6813+
sha256_of_onion: [0; 32],
6814+
failure_code: INVALID_ONION_BLINDING,
6815+
}
6816+
))
6817+
} else {
6818+
match pending_forward_info {
6819+
PendingHTLCStatus::Forward(PendingHTLCInfo {
6820+
ref incoming_shared_secret, ref routing, ..
6821+
}) => {
6822+
let reason = if routing.blinded_failure().is_some() {
6823+
HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
6824+
} else if (error_code & 0x1000) != 0 {
6825+
let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
6826+
HTLCFailReason::reason(real_code, error_data)
6827+
} else {
6828+
HTLCFailReason::from_failure_code(error_code)
6829+
}.get_encrypted_failure_packet(incoming_shared_secret, &None);
6830+
let msg = msgs::UpdateFailHTLC {
6831+
channel_id: msg.channel_id,
6832+
htlc_id: msg.htlc_id,
6833+
reason
6834+
};
6835+
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
6836+
},
6837+
_ => {},
6838+
}
68376839
}
6838-
};
6839-
let logger = WithChannelContext::from(&self.logger, &chan.context);
6840-
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &&logger), chan_phase_entry);
6840+
}
6841+
try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info), chan_phase_entry);
68416842
} else {
68426843
return try_chan_phase_entry!(self, Err(ChannelError::Close(
68436844
"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);

0 commit comments

Comments
 (0)