Skip to content

Commit 5d8acc2

Browse files
committed
f DRY update_id building
1 parent 6fca688 commit 5d8acc2

File tree

1 file changed

+14
-37
lines changed

1 file changed

+14
-37
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 14 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -3951,39 +3951,6 @@ where
39513951
self.chain_monitor.update_channel(funding_txo, &monitor_update)
39523952
}
39533953

3954-
fn set_closed_chan_next_monitor_update_id(
3955-
peer_state: &mut PeerState<SP>, channel_id: ChannelId, monitor_update: &mut ChannelMonitorUpdate,
3956-
) {
3957-
match peer_state.closed_channel_monitor_update_ids.entry(channel_id) {
3958-
btree_map::Entry::Vacant(entry) => {
3959-
let is_closing_unupdated_monitor = monitor_update.update_id == 1
3960-
&& monitor_update.updates.len() == 1
3961-
&& matches!(&monitor_update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. });
3962-
// If the ChannelMonitorUpdate is closing a channel that never got past initial
3963-
// funding (to have any commitment updates), we'll skip inserting in
3964-
// `locked_close_channel`, allowing us to avoid keeping around the PeerState for
3965-
// that peer. In that specific case we expect no entry in the map here. In any
3966-
// other cases, this is a bug, but in production we go ahead and recover by
3967-
// inserting the update_id and hoping its right.
3968-
debug_assert!(is_closing_unupdated_monitor, "Expected closing monitor against an unused channel, got {:?}", monitor_update);
3969-
if !is_closing_unupdated_monitor {
3970-
entry.insert(monitor_update.update_id);
3971-
}
3972-
},
3973-
btree_map::Entry::Occupied(entry) => {
3974-
// If we're running in a threaded environment its possible we generate updates for
3975-
// a channel that is closing, then apply some preimage update, then go back and
3976-
// apply the close monitor update here. In order to ensure the updates are still
3977-
// well-ordered, we have to use the `closed_channel_monitor_update_ids` map to
3978-
// override the `update_id`, taking care to handle old monitors where the
3979-
// `latest_update_id` is already `u64::MAX`.
3980-
let latest_update_id = entry.into_mut();
3981-
*latest_update_id = latest_update_id.saturating_add(1);
3982-
monitor_update.update_id = *latest_update_id;
3983-
}
3984-
}
3985-
}
3986-
39873954
/// When a channel is removed, two things need to happen:
39883955
/// (a) [`locked_close_channel`] must be called in the same `per_peer_state` lock as
39893956
/// the channel-closing action,
@@ -7198,8 +7165,19 @@ where
71987165
let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above");
71997166
let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
72007167

7201-
let mut preimage_update = ChannelMonitorUpdate {
7202-
update_id: 0, // set in set_closed_chan_next_monitor_update_id
7168+
let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7169+
*latest_update_id = latest_update_id.saturating_add(1);
7170+
*latest_update_id
7171+
} else {
7172+
let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7173+
This should have been checked for availability on startup but somehow it is no longer available.
7174+
This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7175+
log_error!(self.logger, "{}", err);
7176+
panic!("{}", err);
7177+
};
7178+
7179+
let preimage_update = ChannelMonitorUpdate {
7180+
update_id,
72037181
counterparty_node_id: prev_hop.counterparty_node_id,
72047182
updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
72057183
payment_preimage,
@@ -7208,12 +7186,11 @@ where
72087186
channel_id: Some(prev_hop.channel_id),
72097187
};
72107188

7211-
// Note that the below is race-y - we set the `update_id` here and then drop the peer_state
7189+
// Note that the below is race-y - we set the `update_id` above and then drop the peer_state
72127190
// lock before applying the update in `apply_post_close_monitor_update` (or via the
72137191
// background events pipeline). During that time, some other update could be created and
72147192
// then applied, resultin in `ChannelMonitorUpdate`s being applied out of order and causing
72157193
// a panic.
7216-
Self::set_closed_chan_next_monitor_update_id(&mut *peer_state, prev_hop.channel_id, &mut preimage_update);
72177194

72187195
mem::drop(peer_state);
72197196
mem::drop(per_peer_state);

0 commit comments

Comments
 (0)