Skip to content

Commit 0f357f0

Browse files
Remove unnecessary aquiring of the channel_state lock
1 parent 470112b commit 0f357f0

File tree

1 file changed

+16
-24
lines changed

1 file changed

+16
-24
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1894,7 +1894,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
18941894

18951895
for htlc_source in failed_htlcs.drain(..) {
18961896
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
1897-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
1897+
self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
18981898
}
18991899

19001900
let _ = handle_error!(self, result, *counterparty_node_id);
@@ -1952,7 +1952,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
19521952
for htlc_source in failed_htlcs.drain(..) {
19531953
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
19541954
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
1955-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
1955+
self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
19561956
}
19571957
if let Some((funding_txo, monitor_update)) = monitor_update_option {
19581958
// There isn't anything we can do if we get an update failure - we're already
@@ -3499,7 +3499,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
34993499
}
35003500

35013501
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
3502-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
3502+
self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination);
35033503
}
35043504
self.forward_htlcs(&mut phantom_receives);
35053505

@@ -3723,7 +3723,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
37233723

37243724
for htlc_source in timed_out_mpp_htlcs.drain(..) {
37253725
let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
3726-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
3726+
self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
37273727
}
37283728

37293729
for (err, counterparty_node_id) in handle_errors.drain(..) {
@@ -3757,7 +3757,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
37573757
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
37583758
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
37593759
self.best_block.read().unwrap().height()));
3760-
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
3760+
self.fail_htlc_backwards_internal(
37613761
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
37623762
HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
37633763
HTLCDestination::FailedPayment { payment_hash: *payment_hash });
@@ -3829,10 +3829,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
38293829
},
38303830
hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
38313831
};
3832-
let channel_state = self.channel_state.lock().unwrap();
3833-
38343832
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
3835-
self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
3833+
self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
38363834
},
38373835
HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
38383836
let mut session_priv_bytes = [0; 32];
@@ -3880,12 +3878,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
38803878
}
38813879

38823880
/// Fails an HTLC backwards to the sender of it to us.
3883-
/// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
3884-
/// There are several callsites that do stupid things like loop over a list of payment_hashes
3885-
/// to fail and take the channel_state lock for each iteration (as we take ownership and may
3886-
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
3887-
/// still-available channels.
3888-
fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
3881+
/// Note that while this function pushes events as well as FailHTLC's to fail htlcs for
3882+
/// designated channels, no assumptions are made that the channels are still available.
3883+
fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) {
38893884
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
38903885
//identify whether we sent it or not based on the (I presume) very different runtime
38913886
//between the branches here. We should make this async and move it into the forward HTLCs
@@ -3924,7 +3919,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
39243919
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
39253920
return;
39263921
}
3927-
mem::drop(channel_state_lock);
39283922
let mut retry = if let Some(payment_params_data) = payment_params {
39293923
let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
39303924
Some(RouteParameters {
@@ -4048,7 +4042,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
40484042
}
40494043
}
40504044
mem::drop(forward_htlcs);
4051-
mem::drop(channel_state_lock);
40524045
let mut pending_events = self.pending_events.lock().unwrap();
40534046
if let Some(time) = forward_event {
40544047
pending_events.push(events::Event::PendingHTLCsForwardable {
@@ -4148,7 +4141,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
41484141
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
41494142
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
41504143
self.best_block.read().unwrap().height()));
4151-
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
4144+
self.fail_htlc_backwards_internal(
41524145
HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
41534146
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
41544147
HTLCDestination::FailedPayment { payment_hash } );
@@ -4432,7 +4425,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
44324425
self.finalize_claims(finalized_claims);
44334426
for failure in pending_failures.drain(..) {
44344427
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
4435-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
4428+
self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
44364429
}
44374430
}
44384431

@@ -4792,7 +4785,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
47924785
};
47934786
for htlc_source in dropped_htlcs.drain(..) {
47944787
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
4795-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
4788+
self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
47964789
}
47974790

47984791
let _ = handle_error!(self, result, *counterparty_node_id);
@@ -4994,7 +4987,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
49944987
for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
49954988
let mut forward_event = None;
49964989
if !pending_forwards.is_empty() {
4997-
let mut channel_state = self.channel_state.lock().unwrap();
49984990
let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
49994991
if forward_htlcs.is_empty() {
50004992
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
@@ -5081,7 +5073,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
50815073
{
50825074
for failure in pending_failures.drain(..) {
50835075
let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
5084-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
5076+
self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
50855077
}
50865078
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
50875079
self.finalize_claims(finalized_claim_htlcs);
@@ -5238,7 +5230,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
52385230
} else {
52395231
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
52405232
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
5241-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
5233+
self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
52425234
}
52435235
},
52445236
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
@@ -5977,7 +5969,7 @@ where
59775969
self.handle_init_event_channel_failures(failed_channels);
59785970

59795971
for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
5980-
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
5972+
self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination);
59815973
}
59825974
}
59835975

@@ -7336,7 +7328,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
73367328
for htlc_source in failed_htlcs.drain(..) {
73377329
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
73387330
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
7339-
channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
7331+
channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
73407332
}
73417333

73427334
//TODO: Broadcast channel update for closed channels, but only after we've made a

0 commit comments

Comments
 (0)