Skip to content

Commit 72f1db0

Browse files
committed
Add missing pending channel maps checks in ChannelManager
One of a series of follow-up commits to address some issues found in PR 2077, where we split channels up into different maps and structs depending on phase in their life.
1 parent df237ba commit 72f1db0

File tree

2 files changed

+31
-38
lines changed

2 files changed

+31
-38
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -698,6 +698,18 @@ impl <Signer: ChannelSigner> PeerState<Signer> {
698698
self.outbound_v1_channel_by_id.contains_key(channel_id) ||
699699
self.inbound_v1_channel_by_id.contains_key(channel_id)
700700
}
701+
702+
/// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer
703+
/// that is in one of our pending (unfunded) channel maps.
704+
///
705+
/// NOTE: Although V1 established channels will always have a `temporary_channel_id` if they're
706+
/// in `(outbound/inbound)_v1_channel_by_id`, we use the more general `channel_id` as V2
707+
/// established channels will have a fixed `channel_id` already after the `accept_channel2`
708+
/// message is sent/received.
709+
fn has_pending_channel(&self, channel_id: &[u8; 32]) -> bool {
710+
self.outbound_v1_channel_by_id.contains_key(channel_id) ||
711+
self.inbound_v1_channel_by_id.contains_key(channel_id)
712+
}
701713
}
702714

703715
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
@@ -2246,6 +2258,7 @@ where
22462258
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
22472259
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
22482260
let peer_state = &mut *peer_state_lock;
2261+
// Only `Channels` in the channel_by_id map can be considered funded.
22492262
for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
22502263
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
22512264
peer_state.latest_features.clone(), &self.fee_estimator);
@@ -2314,11 +2327,15 @@ where
23142327
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
23152328
let peer_state = &mut *peer_state_lock;
23162329
let features = &peer_state.latest_features;
2330+
let chan_context_to_details = |context| {
2331+
ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
2332+
};
23172333
return peer_state.channel_by_id
23182334
.iter()
2319-
.map(|(_, channel)|
2320-
ChannelDetails::from_channel_context(&channel.context, best_block_height,
2321-
features.clone(), &self.fee_estimator))
2335+
.map(|(_, channel)| &channel.context)
2336+
.chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
2337+
.chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
2338+
.map(chan_context_to_details)
23222339
.collect();
23232340
}
23242341
vec![]
@@ -7214,37 +7231,20 @@ where
72147231
log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
72157232

72167233
let per_peer_state = self.per_peer_state.read().unwrap();
7217-
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
7234+
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
72187235
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
72197236
let peer_state = &mut *peer_state_lock;
72207237
let pending_msg_events = &mut peer_state.pending_msg_events;
7221-
peer_state.channel_by_id.retain(|_, chan| {
7222-
let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
7223-
if !chan.context.have_received_message() {
7224-
// If we created this (outbound) channel while we were disconnected from the
7225-
// peer we probably failed to send the open_channel message, which is now
7226-
// lost. We can't have had anything pending related to this channel, so we just
7227-
// drop it.
7228-
false
7229-
} else {
7230-
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
7231-
node_id: chan.context.get_counterparty_node_id(),
7232-
msg: chan.get_channel_reestablish(&self.logger),
7233-
});
7234-
true
7235-
}
7236-
} else { true };
7237-
if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
7238-
if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
7239-
if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
7240-
pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
7241-
node_id: *counterparty_node_id,
7242-
msg, update_msg,
7243-
});
7244-
}
7245-
}
7246-
}
7247-
retain
7238+
7239+
// Since pending channel maps are cleared upon disconnecting a peer, and they're not persisted
7240+
// (so won't be recovered after a crash) we don't need to bother closing pending channels and
7241+
// clearing their maps here. Instead we can just send queue channel_reestablish messages for
7242+
// channels in the channel_by_id map.
7243+
peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
7244+
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
7245+
node_id: chan.context.get_counterparty_node_id(),
7246+
msg: chan.get_channel_reestablish(&self.logger),
7247+
});
72487248
});
72497249
}
72507250
//TODO: Also re-broadcast announcement_signatures

lightning/src/ln/functional_test_utils.rs

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2836,13 +2836,6 @@ macro_rules! get_chan_reestablish_msgs {
28362836
panic!("Unexpected event")
28372837
}
28382838
}
2839-
for chan in $src_node.node.list_channels() {
2840-
if chan.is_public && chan.counterparty.node_id != $dst_node.node.get_our_node_id() {
2841-
if let Some(scid) = chan.short_channel_id {
2842-
assert!(announcements.remove(&scid));
2843-
}
2844-
}
2845-
}
28462839
assert!(announcements.is_empty());
28472840
res
28482841
}

0 commit comments

Comments
 (0)