@@ -7686,24 +7686,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7686
7686
(htlc_forwards, decode_update_add_htlcs)
7687
7687
}
7688
7688
7689
- fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option< &PublicKey> ) {
7689
+ fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
7690
7690
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
7691
7691
7692
- let counterparty_node_id = match counterparty_node_id {
7693
- Some(cp_id) => cp_id.clone(),
7694
- None => {
7695
- // TODO: Once we can rely on the counterparty_node_id from the
7696
- // monitor event, this and the outpoint_to_peer map should be removed.
7697
- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7698
- match outpoint_to_peer.get(funding_txo) {
7699
- Some(cp_id) => cp_id.clone(),
7700
- None => return,
7701
- }
7702
- }
7703
- };
7704
7692
let per_peer_state = self.per_peer_state.read().unwrap();
7705
7693
let mut peer_state_lock;
7706
- let peer_state_mutex_opt = per_peer_state.get(& counterparty_node_id);
7694
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
7707
7695
if peer_state_mutex_opt.is_none() { return }
7708
7696
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
7709
7697
let peer_state = &mut *peer_state_lock;
@@ -7714,7 +7702,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
7714
7702
pending.len()
7715
7703
} else { 0 };
7716
7704
7717
- let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7705
+ let logger = WithContext::from(&self.logger, Some(* counterparty_node_id), Some(*channel_id), None);
7718
7706
log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
7719
7707
highest_applied_update_id, remaining_in_flight);
7720
7708
@@ -9466,67 +9454,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
9466
9454
for monitor_event in monitor_events.drain(..) {
9467
9455
match monitor_event {
9468
9456
MonitorEvent::HTLCEvent(htlc_update) => {
9469
- let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9457
+ let logger = WithContext::from(&self.logger, Some( counterparty_node_id) , Some(channel_id), Some(htlc_update.payment_hash));
9470
9458
if let Some(preimage) = htlc_update.payment_preimage {
9471
9459
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
9472
9460
self.claim_funds_internal(htlc_update.source, preimage,
9473
9461
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9474
- false, counterparty_node_id, funding_outpoint, channel_id, None);
9462
+ false, Some( counterparty_node_id) , funding_outpoint, channel_id, None);
9475
9463
} else {
9476
9464
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9477
- let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9465
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some( counterparty_node_id) , channel_id };
9478
9466
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
9479
9467
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
9480
9468
}
9481
9469
},
9482
9470
MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9483
- let counterparty_node_id_opt = match counterparty_node_id {
9484
- Some(cp_id) => Some(cp_id),
9485
- None => {
9486
- // TODO: Once we can rely on the counterparty_node_id from the
9487
- // monitor event, this and the outpoint_to_peer map should be removed.
9488
- let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9489
- outpoint_to_peer.get(&funding_outpoint).cloned()
9490
- }
9491
- };
9492
- if let Some(counterparty_node_id) = counterparty_node_id_opt {
9493
- let per_peer_state = self.per_peer_state.read().unwrap();
9494
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9495
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9496
- let peer_state = &mut *peer_state_lock;
9497
- let pending_msg_events = &mut peer_state.pending_msg_events;
9498
- if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9499
- let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9500
- reason
9501
- } else {
9502
- ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9503
- };
9504
- let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9505
- let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9506
- failed_channels.push(shutdown_res);
9507
- if let Some(funded_chan) = chan.as_funded() {
9508
- if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9509
- let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9510
- pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9511
- msg: update
9512
- });
9513
- }
9514
- pending_msg_events.push(events::MessageSendEvent::HandleError {
9515
- node_id: funded_chan.context.get_counterparty_node_id(),
9516
- action: msgs::ErrorAction::DisconnectPeer {
9517
- msg: Some(msgs::ErrorMessage {
9518
- channel_id: funded_chan.context.channel_id(),
9519
- data: reason.to_string()
9520
- })
9521
- },
9471
+ let per_peer_state = self.per_peer_state.read().unwrap();
9472
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9473
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9474
+ let peer_state = &mut *peer_state_lock;
9475
+ let pending_msg_events = &mut peer_state.pending_msg_events;
9476
+ if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9477
+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9478
+ reason
9479
+ } else {
9480
+ ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9481
+ };
9482
+ let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9483
+ let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9484
+ failed_channels.push(shutdown_res);
9485
+ if let Some(funded_chan) = chan.as_funded() {
9486
+ if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9487
+ let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9488
+ pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9489
+ msg: update
9522
9490
});
9523
9491
}
9492
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
9493
+ node_id: counterparty_node_id,
9494
+ action: msgs::ErrorAction::DisconnectPeer {
9495
+ msg: Some(msgs::ErrorMessage {
9496
+ channel_id,
9497
+ data: reason.to_string()
9498
+ })
9499
+ },
9500
+ });
9524
9501
}
9525
9502
}
9526
9503
}
9527
9504
},
9528
9505
MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9529
- self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref() );
9506
+ self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, & counterparty_node_id);
9530
9507
},
9531
9508
}
9532
9509
}
@@ -13756,26 +13733,26 @@ where
13756
13733
for (channel_id, monitor) in args.channel_monitors.iter() {
13757
13734
if !channel_id_set.contains(channel_id) {
13758
13735
let mut should_queue_fc_update = false;
13759
- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13760
- // If the ChannelMonitor had any updates, we may need to update it further and
13761
- // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13762
- // had any updates at all, there can't be any HTLCs pending which we need to
13763
- // claim.
13764
- // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13765
- // provide it with a closure update its `update_id` will be at 1.
13766
- if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13767
- should_queue_fc_update = !monitor.no_further_updates_allowed();
13768
- let mut latest_update_id = monitor.get_latest_update_id();
13769
- if should_queue_fc_update {
13770
- latest_update_id += 1;
13771
- }
13772
- per_peer_state.entry(counterparty_node_id)
13773
- .or_insert_with(|| Mutex::new(empty_peer_state()))
13774
- .lock().unwrap()
13775
- .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13776
- .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13777
- .or_insert(latest_update_id);
13736
+ let counterparty_node_id = monitor.get_counterparty_node_id();
13737
+
13738
+ // If the ChannelMonitor had any updates, we may need to update it further and
13739
+ // thus track it in `closed_channel_monitor_update_ids`. If the channel never
13740
+ // had any updates at all, there can't be any HTLCs pending which we need to
13741
+ // claim.
13742
+ // Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13743
+ // provide it with a closure update its `update_id` will be at 1.
13744
+ if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13745
+ should_queue_fc_update = !monitor.no_further_updates_allowed();
13746
+ let mut latest_update_id = monitor.get_latest_update_id();
13747
+ if should_queue_fc_update {
13748
+ latest_update_id += 1;
13778
13749
}
13750
+ per_peer_state.entry(counterparty_node_id)
13751
+ .or_insert_with(|| Mutex::new(empty_peer_state()))
13752
+ .lock().unwrap()
13753
+ .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13754
+ .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13755
+ .or_insert(latest_update_id);
13779
13756
}
13780
13757
13781
13758
if !should_queue_fc_update {
@@ -13786,31 +13763,20 @@ where
13786
13763
let channel_id = monitor.channel_id();
13787
13764
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
13788
13765
&channel_id);
13789
- let mut monitor_update = ChannelMonitorUpdate {
13766
+ let monitor_update = ChannelMonitorUpdate {
13790
13767
update_id: monitor.get_latest_update_id().saturating_add(1),
13791
- counterparty_node_id: None ,
13768
+ counterparty_node_id: Some(counterparty_node_id) ,
13792
13769
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
13793
13770
channel_id: Some(monitor.channel_id()),
13794
13771
};
13795
13772
let funding_txo = monitor.get_funding_txo();
13796
- if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13797
- let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13798
- counterparty_node_id,
13799
- funding_txo,
13800
- channel_id,
13801
- update: monitor_update,
13802
- };
13803
- close_background_events.push(update);
13804
- } else {
13805
- // This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13806
- // off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13807
- // `ChannelMonitorUpdate` will set the counterparty ID).
13808
- // Thus, we assume that it has no pending HTLCs and we will not need to
13809
- // generate a `ChannelMonitorUpdate` for it aside from this
13810
- // `ChannelForceClosed` one.
13811
- monitor_update.update_id = u64::MAX;
13812
- close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13813
- }
13773
+ let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13774
+ counterparty_node_id,
13775
+ funding_txo,
13776
+ channel_id,
13777
+ update: monitor_update,
13778
+ };
13779
+ close_background_events.push(update);
13814
13780
}
13815
13781
}
13816
13782
@@ -14369,7 +14335,7 @@ where
14369
14335
// downstream chan is closed (because we don't have a
14370
14336
// channel_id -> peer map entry).
14371
14337
counterparty_opt.is_none(),
14372
- counterparty_opt.cloned().or (monitor.get_counterparty_node_id()),
14338
+ Some (monitor.get_counterparty_node_id()),
14373
14339
monitor.get_funding_txo(), monitor.channel_id()))
14374
14340
} else { None }
14375
14341
} else {
@@ -15055,8 +15021,8 @@ mod tests {
15055
15021
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15056
15022
15057
15023
create_announced_chan_between_nodes(&nodes, 0, 1);
15058
-
15059
- // Since we do not send peer storage, we manually simulate receiving a dummy
15024
+
15025
+ // Since we do not send peer storage, we manually simulate receiving a dummy
15060
15026
// `PeerStorage` from the channel partner.
15061
15027
nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
15062
15028
0 commit comments