Skip to content

Commit f66ad14

Browse files
Remove unnecessary aquiring of the channel_state lock
1 parent 2f04e81 commit f66ad14

File tree

1 file changed

+16
-24
lines changed

1 file changed

+16
-24
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1891,7 +1891,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
18911891

18921892
for htlc_source in failed_htlcs.drain(..) {
18931893
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
1894-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
1894+
self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
18951895
}
18961896

18971897
let _ = handle_error!(self, result, *counterparty_node_id);
@@ -1949,7 +1949,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19491949
for htlc_source in failed_htlcs.drain(..) {
19501950
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
19511951
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
1952-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
1952+
self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
19531953
}
19541954
if let Some((funding_txo, monitor_update)) = monitor_update_option {
19551955
// There isn't anything we can do if we get an update failure - we're already
@@ -3493,7 +3493,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
34933493
}
34943494

34953495
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
3496-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
3496+
self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination);
34973497
}
34983498
self.forward_htlcs(&mut phantom_receives);
34993499

@@ -3717,7 +3717,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
37173717

37183718
for htlc_source in timed_out_mpp_htlcs.drain(..) {
37193719
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
3720-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
3720+
self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
37213721
}
37223722

37233723
for (err, counterparty_node_id) in handle_errors.drain(..) {
@@ -3751,7 +3751,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
37513751
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
37523752
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
37533753
self.best_block.read().unwrap().height()));
3754-
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
3754+
self.fail_htlc_backwards_internal(
37553755
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
37563756
HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
37573757
HTLCDestination::FailedPayment { payment_hash: *payment_hash });
@@ -3823,10 +3823,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
38233823
},
38243824
hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
38253825
};
3826-
let channel_state = self.channel_state.lock().unwrap();
3827-
38283826
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
3829-
self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
3827+
self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
38303828
},
38313829
HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
38323830
let mut session_priv_bytes = [0; 32];
@@ -3874,12 +3872,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
38743872
}
38753873

38763874
/// Fails an HTLC backwards to the sender of it to us.
3877-
/// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
3878-
/// There are several callsites that do stupid things like loop over a list of payment_hashes
3879-
/// to fail and take the channel_state lock for each iteration (as we take ownership and may
3880-
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
3881-
/// still-available channels.
3882-
fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
3875+
/// Note that while this function pushes events as well as FailHTLC's to fail htlcs for
3876+
/// designated channels, no assumptions are made that the channels are still available.
3877+
fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) {
38833878
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
38843879
//identify whether we sent it or not based on the (I presume) very different runtime
38853880
//between the branches here. We should make this async and move it into the forward HTLCs
@@ -3918,7 +3913,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
39183913
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
39193914
return;
39203915
}
3921-
mem::drop(channel_state_lock);
39223916
let mut retry = if let Some(payment_params_data) = payment_params {
39233917
let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
39243918
Some(RouteParameters {
@@ -4042,7 +4036,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
40424036
}
40434037
}
40444038
mem::drop(forward_htlcs);
4045-
mem::drop(channel_state_lock);
40464039
let mut pending_events = self.pending_events.lock().unwrap();
40474040
if let Some(time) = forward_event {
40484041
pending_events.push(events::Event::PendingHTLCsForwardable {
@@ -4142,7 +4135,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
41424135
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
41434136
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
41444137
self.best_block.read().unwrap().height()));
4145-
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
4138+
self.fail_htlc_backwards_internal(
41464139
HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
41474140
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
41484141
HTLCDestination::FailedPayment { payment_hash } );
@@ -4426,7 +4419,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
44264419
self.finalize_claims(finalized_claims);
44274420
for failure in pending_failures.drain(..) {
44284421
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
4429-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
4422+
self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
44304423
}
44314424
}
44324425

@@ -4786,7 +4779,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
47864779
};
47874780
for htlc_source in dropped_htlcs.drain(..) {
47884781
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
4789-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
4782+
self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
47904783
}
47914784

47924785
let _ = handle_error!(self, result, *counterparty_node_id);
@@ -4988,7 +4981,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
49884981
for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
49894982
let mut forward_event = None;
49904983
if !pending_forwards.is_empty() {
4991-
let mut channel_state = self.channel_state.lock().unwrap();
49924984
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
49934985
if forward_htlcs.is_empty() {
49944986
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
@@ -5075,7 +5067,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
50755067
{
50765068
for failure in pending_failures.drain(..) {
50775069
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
5078-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
5070+
self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
50795071
}
50805072
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
50815073
self.finalize_claims(finalized_claim_htlcs);
@@ -5232,7 +5224,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
52325224
} else {
52335225
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
52345226
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
5235-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
5227+
self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
52365228
}
52375229
},
52385230
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
@@ -5971,7 +5963,7 @@ where
59715963
self.handle_init_event_channel_failures(failed_channels);
59725964

59735965
for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
5974-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
5966+
self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination);
59755967
}
59765968
}
59775969

@@ -7330,7 +7322,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
73307322
for htlc_source in failed_htlcs.drain(..) {
73317323
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
73327324
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
7333-
channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
7325+
channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
73347326
}
73357327

73367328
//TODO: Broadcast channel update for closed channels, but only after we've made a

0 commit comments

Comments
 (0)