Skip to content

Commit 47230b3

Browse files
f possible solution to locking thing
1 parent b8750c7 commit 47230b3

File tree

1 file changed

+26
-39
lines changed

1 file changed

+26
-39
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 26 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1825,23 +1825,17 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
18251825
// Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
18261826
// failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
18271827
// be surfaced to the user.
1828-
fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) -> Result<(), MsgHandleErrInternal> {
1828+
fn fail_holding_cell_htlcs(&self, channel: &Channel<ChanSigner>, pending_msg_events: &mut Vec<events::MessageSendEvent>, forward_htlcs: &mut HashMap<u64, Vec<HTLCForwardInfo>>, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>) -> Result<(), MsgHandleErrInternal> {
18291829
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
18301830
match htlc_src {
18311831
HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => {
18321832
let (failure_code, onion_failure_data) =
1833-
match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
1834-
hash_map::Entry::Occupied(chan_entry) => {
1835-
if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
1836-
(0x1000|7, upd.encode_with_len())
1837-
} else {
1838-
(0x4000|10, Vec::new())
1839-
}
1840-
},
1841-
hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
1833+
if let Ok(upd) = self.get_channel_update(channel) {
1834+
(0x1000|7, upd.encode_with_len())
1835+
} else {
1836+
(0x4000|10, Vec::new())
18421837
};
1843-
let channel_state = self.channel_state.lock().unwrap();
1844-
self.fail_htlc_backwards_internal(channel_state,
1838+
self.fail_htlc_backwards_internal_unlocked(pending_msg_events, forward_htlcs,
18451839
htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
18461840
},
18471841
HTLCSource::OutboundRoute { .. } => {
@@ -1868,14 +1862,21 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
18681862
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
18691863
/// still-available channels.
18701864
fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<ChanSigner>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
1865+
let channel_state = &mut *channel_state_lock;
1866+
let pending_msg_events = &mut channel_state.pending_msg_events;
1867+
let forward_htlcs = &mut channel_state.forward_htlcs;
1868+
self.fail_htlc_backwards_internal_unlocked(pending_msg_events, forward_htlcs, source, payment_hash, onion_error)
1869+
}
1870+
1871+
// Sometimes it's necessary to fail an HTLC backwards while a lock is still held.
1872+
fn fail_htlc_backwards_internal_unlocked(&self, pending_msg_events: &mut Vec<events::MessageSendEvent>, forward_htlcs: &mut HashMap<u64, Vec<HTLCForwardInfo>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
18711873
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
18721874
//identify whether we sent it or not based on the (I presume) very different runtime
18731875
//between the branches here. We should make this async and move it into the forward HTLCs
18741876
//timer handling.
18751877
match source {
18761878
HTLCSource::OutboundRoute { ref path, .. } => {
18771879
log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
1878-
mem::drop(channel_state_lock);
18791880
match &onion_error {
18801881
&HTLCFailReason::LightningError { ref err } => {
18811882
#[cfg(test)]
@@ -1886,7 +1887,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
18861887
// process_onion_failure we should close that channel as it implies our
18871888
// next-hop is needlessly blaming us!
18881889
if let Some(update) = channel_update {
1889-
self.channel_state.lock().unwrap().pending_msg_events.push(
1890+
pending_msg_events.push(
18901891
events::MessageSendEvent::PaymentFailureNetworkUpdate {
18911892
update,
18921893
}
@@ -1943,18 +1944,17 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
19431944
};
19441945

19451946
let mut forward_event = None;
1946-
if channel_state_lock.forward_htlcs.is_empty() {
1947+
if forward_htlcs.is_empty() {
19471948
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
19481949
}
1949-
match channel_state_lock.forward_htlcs.entry(short_channel_id) {
1950+
match forward_htlcs.entry(short_channel_id) {
19501951
hash_map::Entry::Occupied(mut entry) => {
19511952
entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
19521953
},
19531954
hash_map::Entry::Vacant(entry) => {
19541955
entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
19551956
}
19561957
}
1957-
mem::drop(channel_state_lock);
19581958
if let Some(time) = forward_event {
19591959
let mut pending_events = self.pending_events.lock().unwrap();
19601960
pending_events.push(events::Event::PendingHTLCsForwardable {
@@ -2709,33 +2709,20 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
27092709
}
27102710

27112711
fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
2712-
let (commitment_update, pending_forwards, mut pending_failures, closing_signed, monitor_update, htlcs_to_fail) = {
2712+
let (pending_forwards, mut pending_failures, short_channel_id) = {
27132713
let mut channel_state_lock = self.channel_state.lock().unwrap();
27142714
let channel_state = &mut *channel_state_lock;
2715+
let pending_msg_events = &mut channel_state.pending_msg_events;
2716+
let forward_htlcs = &mut channel_state.forward_htlcs;
27152717
match channel_state.by_id.entry(msg.channel_id) {
27162718
hash_map::Entry::Occupied(mut chan) => {
27172719
if chan.get().get_their_node_id() != *their_node_id {
27182720
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
27192721
}
2720-
try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan)
2721-
},
2722-
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
2723-
}
2724-
};
2725-
2726-
// Failing back holding cell HTLCs requires acquiring the channel state
2727-
// lock, hence why this processing can't be done above.
2728-
if let Err(e) = self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id) { return Err(e) };
2729-
2730-
// Updating the corresponding ChannelMonitor may result in returning early,
2731-
// hence why we couldn't do this processing when we originally acquired the
2732-
// lock above.
2733-
let short_channel_id = {
2734-
let mut channel_state_lock = self.channel_state.lock().unwrap();
2735-
let channel_state = &mut *channel_state_lock;
2736-
match channel_state.by_id.entry(msg.channel_id) {
2737-
hash_map::Entry::Occupied(mut chan) => {
27382722
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
2723+
let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail) =
2724+
try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
2725+
if let Err(e) = self.fail_holding_cell_htlcs(chan.get(), pending_msg_events, forward_htlcs, htlcs_to_fail) { return Err(e) };
27392726
if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
27402727
if was_frozen_for_monitor {
27412728
assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
@@ -2745,18 +2732,18 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
27452732
}
27462733
}
27472734
if let Some(updates) = commitment_update {
2748-
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2735+
pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
27492736
node_id: their_node_id.clone(),
27502737
updates,
27512738
});
27522739
}
27532740
if let Some(msg) = closing_signed {
2754-
channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
2741+
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
27552742
node_id: their_node_id.clone(),
27562743
msg,
27572744
});
27582745
}
2759-
chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel")
2746+
(pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
27602747
},
27612748
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
27622749
}

0 commit comments

Comments
 (0)