@@ -457,7 +457,7 @@ impl MsgHandleErrInternal {
457
457
#[inline]
458
458
fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
459
459
let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
460
- let action = if let (Some(_), ..) = &shutdown_res {
460
+ let action = if shutdown_res.monitor_update.is_some() {
461
461
// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
462
462
// should disconnect our peer such that we force them to broadcast their latest
463
463
// commitment upon reconnecting.
@@ -2602,7 +2602,7 @@ where
2602
2602
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
2603
2603
2604
2604
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2605
- let mut shutdown_result = None ;
2605
+ let shutdown_result;
2606
2606
loop {
2607
2607
let per_peer_state = self.per_peer_state.read().unwrap();
2608
2608
@@ -2617,10 +2617,11 @@ where
2617
2617
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
2618
2618
let funding_txo_opt = chan.context.get_funding_txo();
2619
2619
let their_features = &peer_state.latest_features;
2620
- let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
2621
- let (shutdown_msg, mut monitor_update_opt, htlcs) =
2620
+ let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
2622
2621
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
2623
2622
failed_htlcs = htlcs;
2623
+ shutdown_result = local_shutdown_result;
2624
+ debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
2624
2625
2625
2626
// We can send the `shutdown` message before updating the `ChannelMonitor`
2626
2627
// here as we don't need the monitor update to complete until we send a
@@ -2648,7 +2649,6 @@ where
2648
2649
});
2649
2650
}
2650
2651
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2651
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
2652
2652
}
2653
2653
}
2654
2654
break;
@@ -2739,30 +2739,29 @@ where
2739
2739
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
2740
2740
}
2741
2741
2742
- fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
2742
+ fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
2743
2743
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2744
2744
#[cfg(debug_assertions)]
2745
2745
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
2746
2746
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
2747
2747
}
2748
2748
2749
- let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
2750
- log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
2751
- for htlc_source in failed_htlcs.drain(..) {
2749
+ log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
2750
+ for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
2752
2751
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
2753
2752
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
2754
2753
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
2755
2754
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
2756
2755
}
2757
- if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
2756
+ if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
2758
2757
// There isn't anything we can do if we get an update failure - we're already
2759
2758
// force-closing. The monitor update on the required in-memory copy should broadcast
2760
2759
// the latest local state, which is the best we can do anyway. Thus, it is safe to
2761
2760
// ignore the result here.
2762
2761
let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
2763
2762
}
2764
2763
let mut shutdown_results = Vec::new();
2765
- if let Some(txid) = unbroadcasted_batch_funding_txid {
2764
+ if let Some(txid) = shutdown_res. unbroadcasted_batch_funding_txid {
2766
2765
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
2767
2766
let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
2768
2767
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -3907,7 +3906,7 @@ where
3907
3906
/// Return values are identical to [`Self::funding_transaction_generated`], respective to
3908
3907
/// each individual channel and transaction output.
3909
3908
///
3910
- /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction
3909
+ /// Do NOT broadcast the funding transaction yourself. This batch funding transaction
3911
3910
/// will only be broadcast when we have safely received and persisted the counterparty's
3912
3911
/// signature for each channel.
3913
3912
///
@@ -3961,7 +3960,7 @@ where
3961
3960
btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
3962
3961
}
3963
3962
});
3964
- for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter() {
3963
+ for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
3965
3964
result = result.and_then(|_| self.funding_transaction_generated_intern(
3966
3965
temporary_channel_id,
3967
3966
counterparty_node_id,
@@ -6453,22 +6452,20 @@ where
6453
6452
}
6454
6453
6455
6454
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
6456
- let mut shutdown_result = None;
6457
- let unbroadcasted_batch_funding_txid;
6458
6455
let per_peer_state = self.per_peer_state.read().unwrap();
6459
6456
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
6460
6457
.ok_or_else(|| {
6461
6458
debug_assert!(false);
6462
6459
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
6463
6460
})?;
6464
- let (tx, chan_option) = {
6461
+ let (tx, chan_option, shutdown_result ) = {
6465
6462
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6466
6463
let peer_state = &mut *peer_state_lock;
6467
6464
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
6468
6465
hash_map::Entry::Occupied(mut chan_phase_entry) => {
6469
6466
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6470
- unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid( );
6471
- let (closing_signed, tx) = try_chan_phase_entry!(self , chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6467
+ let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6468
+ debug_assert_eq!(shutdown_result.is_some() , chan.is_shutdown() );
6472
6469
if let Some(msg) = closing_signed {
6473
6470
peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
6474
6471
node_id: counterparty_node_id.clone(),
@@ -6481,8 +6478,8 @@ where
6481
6478
// also implies there are no pending HTLCs left on the channel, so we can
6482
6479
// fully delete it from tracking (the channel monitor is still around to
6483
6480
// watch for old state broadcasts)!
6484
- (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
6485
- } else { (tx, None) }
6481
+ (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result )
6482
+ } else { (tx, None, shutdown_result ) }
6486
6483
} else {
6487
6484
return try_chan_phase_entry!(self, Err(ChannelError::Close(
6488
6485
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6504,7 +6501,6 @@ where
6504
6501
});
6505
6502
}
6506
6503
self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
6507
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
6508
6504
}
6509
6505
mem::drop(per_peer_state);
6510
6506
if let Some(shutdown_result) = shutdown_result {
@@ -7237,15 +7233,18 @@ where
7237
7233
peer_state.channel_by_id.retain(|channel_id, phase| {
7238
7234
match phase {
7239
7235
ChannelPhase::Funded(chan) => {
7240
- let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
7241
7236
match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
7242
- Ok((msg_opt, tx_opt)) => {
7237
+ Ok((msg_opt, tx_opt, shutdown_result_opt )) => {
7243
7238
if let Some(msg) = msg_opt {
7244
7239
has_update = true;
7245
7240
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
7246
7241
node_id: chan.context.get_counterparty_node_id(), msg,
7247
7242
});
7248
7243
}
7244
+ debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
7245
+ if let Some(shutdown_result) = shutdown_result_opt {
7246
+ shutdown_results.push(shutdown_result);
7247
+ }
7249
7248
if let Some(tx) = tx_opt {
7250
7249
// We're done with this channel. We got a closing_signed and sent back
7251
7250
// a closing_signed with a closing transaction to broadcast.
@@ -7260,7 +7259,6 @@ where
7260
7259
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
7261
7260
self.tx_broadcaster.broadcast_transactions(&[&tx]);
7262
7261
update_maps_on_chan_removal!(self, &chan.context);
7263
- shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
7264
7262
false
7265
7263
} else { true }
7266
7264
},
@@ -7301,7 +7299,7 @@ where
7301
7299
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
7302
7300
// so we track the update internally and handle it when the user next calls
7303
7301
// timer_tick_occurred, guaranteeing we're running normally.
7304
- if let Some((counterparty_node_id, funding_txo, update)) = failure.0 .take() {
7302
+ if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update .take() {
7305
7303
assert_eq!(update.updates.len(), 1);
7306
7304
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
7307
7305
assert!(should_broadcast);
@@ -9966,16 +9964,16 @@ where
9966
9964
log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
9967
9965
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
9968
9966
}
9969
- let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
9970
- if batch_funding_txid .is_some() {
9967
+ let mut shutdown_result = channel.context.force_shutdown(true);
9968
+ if shutdown_result.unbroadcasted_batch_funding_txid .is_some() {
9971
9969
return Err(DecodeError::InvalidValue);
9972
9970
}
9973
- if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
9971
+ if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result. monitor_update {
9974
9972
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9975
9973
counterparty_node_id, funding_txo, update
9976
9974
});
9977
9975
}
9978
- failed_htlcs.append(&mut new_failed_htlcs );
9976
+ failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs );
9979
9977
channel_closures.push_back((events::Event::ChannelClosed {
9980
9978
channel_id: channel.context.channel_id(),
9981
9979
user_channel_id: channel.context.get_user_id(),
0 commit comments