@@ -452,7 +452,7 @@ impl MsgHandleErrInternal {
452
452
#[inline]
453
453
fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
454
454
let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
455
- let action = if let (Some(_), ..) = &shutdown_res {
455
+ let action = if shutdown_res.monitor_update.is_some() {
456
456
// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
457
457
// should disconnect our peer such that we force them to broadcast their latest
458
458
// commitment upon reconnecting.
@@ -2564,7 +2564,7 @@ where
2564
2564
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
2565
2565
2566
2566
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2567
- let mut shutdown_result = None ;
2567
+ let shutdown_result;
2568
2568
loop {
2569
2569
let per_peer_state = self.per_peer_state.read().unwrap();
2570
2570
@@ -2579,10 +2579,11 @@ where
2579
2579
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
2580
2580
let funding_txo_opt = chan.context.get_funding_txo();
2581
2581
let their_features = &peer_state.latest_features;
2582
- let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
2583
- let (shutdown_msg, mut monitor_update_opt, htlcs) =
2582
+ let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
2584
2583
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
2585
2584
failed_htlcs = htlcs;
2585
+ shutdown_result = local_shutdown_result;
2586
+ debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
2586
2587
2587
2588
// We can send the `shutdown` message before updating the `ChannelMonitor`
2588
2589
// here as we don't need the monitor update to complete until we send a
@@ -2610,7 +2611,6 @@ where
2610
2611
});
2611
2612
}
2612
2613
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2613
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
2614
2614
}
2615
2615
}
2616
2616
break;
@@ -2702,30 +2702,29 @@ where
2702
2702
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
2703
2703
}
2704
2704
2705
- fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
2705
+ fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
2706
2706
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2707
2707
#[cfg(debug_assertions)]
2708
2708
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
2709
2709
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
2710
2710
}
2711
2711
2712
- let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
2713
- log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", failed_htlcs.len());
2714
- for htlc_source in failed_htlcs.drain(..) {
2712
+ log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
2713
+ for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
2715
2714
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
2716
2715
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
2717
2716
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
2718
2717
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
2719
2718
}
2720
- if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
2719
+ if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
2721
2720
// There isn't anything we can do if we get an update failure - we're already
2722
2721
// force-closing. The monitor update on the required in-memory copy should broadcast
2723
2722
// the latest local state, which is the best we can do anyway. Thus, it is safe to
2724
2723
// ignore the result here.
2725
2724
let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
2726
2725
}
2727
2726
let mut shutdown_results = Vec::new();
2728
- if let Some(txid) = unbroadcasted_batch_funding_txid {
2727
+ if let Some(txid) = shutdown_res. unbroadcasted_batch_funding_txid {
2729
2728
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
2730
2729
let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
2731
2730
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6268,22 +6267,20 @@ where
6268
6267
}
6269
6268
6270
6269
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
6271
- let mut shutdown_result = None;
6272
- let unbroadcasted_batch_funding_txid;
6273
6270
let per_peer_state = self.per_peer_state.read().unwrap();
6274
6271
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
6275
6272
.ok_or_else(|| {
6276
6273
debug_assert!(false);
6277
6274
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
6278
6275
})?;
6279
- let (tx, chan_option) = {
6276
+ let (tx, chan_option, shutdown_result ) = {
6280
6277
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6281
6278
let peer_state = &mut *peer_state_lock;
6282
6279
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
6283
6280
hash_map::Entry::Occupied(mut chan_phase_entry) => {
6284
6281
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6285
- unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid( );
6286
- let (closing_signed, tx) = try_chan_phase_entry!(self , chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6282
+ let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry );
6283
+ debug_assert_eq!(shutdown_result.is_some() , chan.is_shutdown() );
6287
6284
if let Some(msg) = closing_signed {
6288
6285
peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
6289
6286
node_id: counterparty_node_id.clone(),
@@ -6296,8 +6293,8 @@ where
6296
6293
// also implies there are no pending HTLCs left on the channel, so we can
6297
6294
// fully delete it from tracking (the channel monitor is still around to
6298
6295
// watch for old state broadcasts)!
6299
- (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
6300
- } else { (tx, None) }
6296
+ (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result )
6297
+ } else { (tx, None, shutdown_result ) }
6301
6298
} else {
6302
6299
return try_chan_phase_entry!(self, Err(ChannelError::Close(
6303
6300
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6319,7 +6316,6 @@ where
6319
6316
});
6320
6317
}
6321
6318
self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
6322
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
6323
6319
}
6324
6320
mem::drop(per_peer_state);
6325
6321
if let Some(shutdown_result) = shutdown_result {
@@ -7049,15 +7045,18 @@ where
7049
7045
peer_state.channel_by_id.retain(|channel_id, phase| {
7050
7046
match phase {
7051
7047
ChannelPhase::Funded(chan) => {
7052
- let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
7053
7048
match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
7054
- Ok((msg_opt, tx_opt)) => {
7049
+ Ok((msg_opt, tx_opt, shutdown_result_opt )) => {
7055
7050
if let Some(msg) = msg_opt {
7056
7051
has_update = true;
7057
7052
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
7058
7053
node_id: chan.context.get_counterparty_node_id(), msg,
7059
7054
});
7060
7055
}
7056
+ debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
7057
+ if let Some(shutdown_result) = shutdown_result_opt {
7058
+ shutdown_results.push(shutdown_result);
7059
+ }
7061
7060
if let Some(tx) = tx_opt {
7062
7061
// We're done with this channel. We got a closing_signed and sent back
7063
7062
// a closing_signed with a closing transaction to broadcast.
@@ -7072,7 +7071,6 @@ where
7072
7071
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
7073
7072
self.tx_broadcaster.broadcast_transactions(&[&tx]);
7074
7073
update_maps_on_chan_removal!(self, &chan.context);
7075
- shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid));
7076
7074
false
7077
7075
} else { true }
7078
7076
},
@@ -7113,7 +7111,7 @@ where
7113
7111
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
7114
7112
// so we track the update internally and handle it when the user next calls
7115
7113
// timer_tick_occurred, guaranteeing we're running normally.
7116
- if let Some((counterparty_node_id, funding_txo, update)) = failure.0 .take() {
7114
+ if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update .take() {
7117
7115
assert_eq!(update.updates.len(), 1);
7118
7116
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
7119
7117
assert!(should_broadcast);
@@ -9405,16 +9403,16 @@ where
9405
9403
log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
9406
9404
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
9407
9405
}
9408
- let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
9409
- if batch_funding_txid .is_some() {
9406
+ let mut shutdown_result = channel.context.force_shutdown(true);
9407
+ if shutdown_result.unbroadcasted_batch_funding_txid .is_some() {
9410
9408
return Err(DecodeError::InvalidValue);
9411
9409
}
9412
- if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
9410
+ if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result. monitor_update {
9413
9411
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9414
9412
counterparty_node_id, funding_txo, update
9415
9413
});
9416
9414
}
9417
- failed_htlcs.append(&mut new_failed_htlcs );
9415
+ failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs );
9418
9416
channel_closures.push_back((events::Event::ChannelClosed {
9419
9417
channel_id: channel.context.channel_id(),
9420
9418
user_channel_id: channel.context.get_user_id(),
0 commit comments