@@ -904,6 +904,7 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
904
904
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
905
905
/// [`ChannelMessageHandler::peer_disconnected`].
906
906
is_connected: bool,
907
+ peer_storage: Vec<u8>,
907
908
}
908
909
909
910
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -2526,7 +2527,6 @@ where
2526
2527
entropy_source,
2527
2528
node_signer,
2528
2529
signer_provider,
2529
-
2530
2530
logger,
2531
2531
}
2532
2532
}
@@ -6685,6 +6685,53 @@ where
6685
6685
}
6686
6686
}
6687
6687
6688
+ fn internal_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::PeerStorageMessage) {
6689
+ let per_peer_state = self.per_peer_state.write().unwrap();
6690
+ let peer_state_mutex = match per_peer_state.get(counterparty_node_id) {
6691
+ Some(peer_state_mutex) => peer_state_mutex,
6692
+ None => return,
6693
+ };
6694
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6695
+ let peer_state = &mut *peer_state_lock;
6696
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
6697
+
6698
+ // Check if we have any channels with the peer (Currently we only provide the servie to peers we have a channel with).
6699
+ if peer_state.total_channel_count() == 0 {
6700
+ log_debug!(logger, "We do not have any channel with {}", log_pubkey!(counterparty_node_id));
6701
+ return;
6702
+ }
6703
+
6704
+ let mut funded_channels: Vec<&mut Channel<SP>> = peer_state.channel_by_id.values_mut()
6705
+ .filter_map(|phase| {
6706
+ if let ChannelPhase::Funded(channel) = phase {
6707
+ Some(channel)
6708
+ } else {
6709
+ None
6710
+ }
6711
+ })
6712
+ .collect();
6713
+
6714
+ let min_funded_chan = funded_channels
6715
+ .iter_mut()
6716
+ .min_by_key(|s| s.context.get_funding_txo().unwrap().get_txid())
6717
+ .unwrap();
6718
+
6719
+ // Send ChannelMonitor Update.
6720
+ let peer_storage_update = ChannelMonitorUpdate {
6721
+ update_id: min_funded_chan.context.increment_and_fetch_monitor_update_id(),
6722
+ counterparty_node_id: None,
6723
+ updates: vec![ChannelMonitorUpdateStep::LatestPeerStorage {
6724
+ data: msg.data.clone(),
6725
+ }],
6726
+ channel_id: Some(min_funded_chan.context.channel_id()),
6727
+ };
6728
+
6729
+ // Update the store.
6730
+ peer_state.peer_storage = msg.data.clone();
6731
+
6732
+ handle_new_monitor_update!(self, min_funded_chan.context.get_funding_txo().unwrap(), peer_storage_update, peer_state_lock, peer_state, per_peer_state, min_funded_chan);
6733
+ }
6734
+
6688
6735
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
6689
6736
let best_block = *self.best_block.read().unwrap();
6690
6737
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -9069,6 +9116,8 @@ where
9069
9116
}
9070
9117
9071
9118
fn handle_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::PeerStorageMessage) {
9119
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9120
+ self.internal_peer_storage(counterparty_node_id, msg);
9072
9121
}
9073
9122
9074
9123
fn handle_your_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::YourPeerStorageMessage) {
@@ -9384,6 +9433,7 @@ where
9384
9433
monitor_update_blocked_actions: BTreeMap::new(),
9385
9434
actions_blocking_raa_monitor_updates: BTreeMap::new(),
9386
9435
is_connected: true,
9436
+ peer_storage: Vec::new(),
9387
9437
}));
9388
9438
},
9389
9439
hash_map::Entry::Occupied(e) => {
@@ -9456,6 +9506,15 @@ where
9456
9506
},
9457
9507
}
9458
9508
}
9509
+
9510
+ let peer_storage = peer_state.peer_storage.clone();
9511
+
9512
+ pending_msg_events.push(events::MessageSendEvent::SendYourPeerStorageMessage {
9513
+ node_id: counterparty_node_id.clone(),
9514
+ msg: msgs::YourPeerStorageMessage {
9515
+ data: peer_storage
9516
+ },
9517
+ });
9459
9518
}
9460
9519
9461
9520
return NotifyOption::SkipPersistHandleEvents;
@@ -10783,6 +10842,7 @@ where
10783
10842
let mut channel_closures = VecDeque::new();
10784
10843
let mut close_background_events = Vec::new();
10785
10844
let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
10845
+ let mut peer_storage_dir: HashMap<PublicKey, Vec<u8>> = HashMap::new();
10786
10846
for _ in 0..channel_count {
10787
10847
let mut channel: Channel<SP> = Channel::read(reader, (
10788
10848
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
@@ -10792,6 +10852,9 @@ where
10792
10852
funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
10793
10853
funding_txo_set.insert(funding_txo.clone());
10794
10854
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
10855
+ // Load Peer_storage from ChannelMonitor to memory.
10856
+ peer_storage_dir.insert(channel.context.get_counterparty_node_id(), monitor.get_peer_storage());
10857
+
10795
10858
if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
10796
10859
channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
10797
10860
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
@@ -10938,7 +11001,7 @@ where
10938
11001
claimable_htlcs_list.push((payment_hash, previous_hops));
10939
11002
}
10940
11003
10941
- let peer_state_from_chans = |channel_by_id| {
11004
+ let peer_state_from_chans = |channel_by_id, peer_storage_blob | {
10942
11005
PeerState {
10943
11006
channel_by_id,
10944
11007
inbound_channel_request_by_id: new_hash_map(),
@@ -10948,6 +11011,7 @@ where
10948
11011
monitor_update_blocked_actions: BTreeMap::new(),
10949
11012
actions_blocking_raa_monitor_updates: BTreeMap::new(),
10950
11013
is_connected: false,
11014
+ peer_storage: peer_storage_blob,
10951
11015
}
10952
11016
};
10953
11017
@@ -10956,7 +11020,7 @@ where
10956
11020
for _ in 0..peer_count {
10957
11021
let peer_pubkey = Readable::read(reader)?;
10958
11022
let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
10959
- let mut peer_state = peer_state_from_chans(peer_chans);
11023
+ let mut peer_state = peer_state_from_chans(peer_chans, peer_storage_dir.get(&peer_pubkey).cloned().unwrap_or_default() );
10960
11024
peer_state.latest_features = Readable::read(reader)?;
10961
11025
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
10962
11026
}
@@ -11167,7 +11231,7 @@ where
11167
11231
// still open, we need to replay any monitor updates that are for closed channels,
11168
11232
// creating the neccessary peer_state entries as we go.
11169
11233
let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
11170
- Mutex::new(peer_state_from_chans(new_hash_map()))
11234
+ Mutex::new(peer_state_from_chans(new_hash_map(), peer_storage_dir.get(&counterparty_id).cloned().unwrap_or_default() ))
11171
11235
});
11172
11236
let mut peer_state = peer_state_mutex.lock().unwrap();
11173
11237
handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
@@ -11617,7 +11681,6 @@ where
11617
11681
entropy_source: args.entropy_source,
11618
11682
node_signer: args.node_signer,
11619
11683
signer_provider: args.signer_provider,
11620
-
11621
11684
logger: args.logger,
11622
11685
default_configuration: args.default_config,
11623
11686
};
0 commit comments