@@ -1380,6 +1380,8 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1380
1380
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
1381
1381
/// [`ChannelMessageHandler::peer_disconnected`].
1382
1382
pub is_connected: bool,
1383
+ /// Holds the peer storage data for the channel partner on a per-peer basis.
1384
+ peer_storage: Vec<u8>,
1383
1385
}
1384
1386
1385
1387
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -2848,6 +2850,13 @@ const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
2848
2850
/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
2849
2851
const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
2850
2852
2853
+ /// The maximum allowed size for peer storage, in bytes.
2854
+ ///
2855
+ /// This constant defines the upper limit for the size of data
2856
+ /// that can be stored for a peer. It is set to 1024 bytes (1 kilobyte)
2857
+ /// to prevent excessive resource consumption.
2858
+ const MAX_PEER_STORAGE_SIZE: usize = 1024;
2859
+
2851
2860
/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
2852
2861
/// many peers we reject new (inbound) connections.
2853
2862
const MAX_NO_CHANNEL_PEERS: usize = 250;
@@ -8221,9 +8230,75 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8221
8230
}
8222
8231
}
8223
8232
8224
- fn internal_peer_storage_retrieval(&self, _counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) {}
8233
+ fn internal_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) {
8234
+ // TODO: Decrypt and check if have any stale or missing ChannelMonitor.
8235
+ let per_peer_state = self.per_peer_state.read().unwrap();
8236
+ let peer_state_mutex = match per_peer_state.get(&counterparty_node_id) {
8237
+ Some(peer_state_mutex) => peer_state_mutex,
8238
+ None => return,
8239
+ };
8240
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8241
+ let peer_state = &mut *peer_state_lock;
8242
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8225
8243
8226
- fn internal_peer_storage(&self, _counterparty_node_id: PublicKey, _msg: msgs::PeerStorage) {}
8244
+ log_debug!(logger, "Received unexpected peer_storage_retrieval from {}. This is unusual since we do not yet distribute peer storage. Sending a warning.", log_pubkey!(counterparty_node_id));
8245
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
8246
+ node_id: counterparty_node_id.clone(),
8247
+ action: msgs::ErrorAction::SendWarningMessage {
8248
+ msg: msgs::WarningMessage {
8249
+ channel_id: ChannelId([0; 32]),
8250
+ data: "Invalid peer_storage_retrieval message received.".to_owned()
8251
+ },
8252
+ log_level: Level::Trace,
8253
+ }
8254
+ });
8255
+ }
8256
+
8257
+ fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) {
8258
+ let per_peer_state = self.per_peer_state.read().unwrap();
8259
+ let peer_state_mutex = match per_peer_state.get(&counterparty_node_id) {
8260
+ Some(peer_state_mutex) => peer_state_mutex,
8261
+ None => return,
8262
+ };
8263
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8264
+ let peer_state = &mut *peer_state_lock;
8265
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8266
+
8267
+ // Check if we have any channels with the peer (Currently we only provide the service to peers we have a channel with).
8268
+ if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
8269
+ log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
8270
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
8271
+ node_id: counterparty_node_id.clone(),
8272
+ action: msgs::ErrorAction::SendWarningMessage {
8273
+ msg: msgs::WarningMessage {
8274
+ channel_id: ChannelId([0; 32]),
8275
+ data: "Ignoring peer_storage message, as peer storage is currently supported only for peers with an active funded channel.".to_owned()
8276
+ },
8277
+ log_level: Level::Trace,
8278
+ }
8279
+ });
8280
+ return;
8281
+ }
8282
+
8283
+ #[cfg(not(test))]
8284
+ if msg.data.len() > MAX_PEER_STORAGE_SIZE {
8285
+ log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
8286
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
8287
+ node_id: counterparty_node_id.clone(),
8288
+ action: msgs::ErrorAction::SendWarningMessage {
8289
+ msg: msgs::WarningMessage {
8290
+ channel_id: ChannelId([0; 32]),
8291
+ data: format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
8292
+ },
8293
+ log_level: Level::Trace,
8294
+ }
8295
+ });
8296
+ return;
8297
+ }
8298
+
8299
+ log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id));
8300
+ peer_state.peer_storage = msg.data;
8301
+ }
8227
8302
8228
8303
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
8229
8304
let best_block = *self.best_block.read().unwrap();
@@ -11728,6 +11803,7 @@ where
11728
11803
actions_blocking_raa_monitor_updates: BTreeMap::new(),
11729
11804
closed_channel_monitor_update_ids: BTreeMap::new(),
11730
11805
is_connected: true,
11806
+ peer_storage: Vec::new(),
11731
11807
}));
11732
11808
},
11733
11809
hash_map::Entry::Occupied(e) => {
@@ -11757,6 +11833,15 @@ where
11757
11833
let peer_state = &mut *peer_state_lock;
11758
11834
let pending_msg_events = &mut peer_state.pending_msg_events;
11759
11835
11836
+ if !peer_state.peer_storage.is_empty() {
11837
+ pending_msg_events.push(events::MessageSendEvent::SendPeerStorageRetrieval {
11838
+ node_id: counterparty_node_id.clone(),
11839
+ msg: msgs::PeerStorageRetrieval {
11840
+ data: peer_state.peer_storage.clone()
11841
+ },
11842
+ });
11843
+ }
11844
+
11760
11845
for (_, chan) in peer_state.channel_by_id.iter_mut() {
11761
11846
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
11762
11847
match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
@@ -12928,6 +13013,8 @@ where
12928
13013
peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
12929
13014
}
12930
13015
13016
+ let mut peer_storage_dir: Vec<(&PublicKey, &Vec<u8>)> = Vec::new();
13017
+
12931
13018
(serializable_peer_count).write(writer)?;
12932
13019
for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12933
13020
// Peers which we have no channels to should be dropped once disconnected. As we
@@ -12937,6 +13024,8 @@ where
12937
13024
if !peer_state.ok_to_remove(false) {
12938
13025
peer_pubkey.write(writer)?;
12939
13026
peer_state.latest_features.write(writer)?;
13027
+ peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage));
13028
+
12940
13029
if !peer_state.monitor_update_blocked_actions.is_empty() {
12941
13030
monitor_update_blocked_actions_per_peer
12942
13031
.get_or_insert_with(Vec::new)
@@ -13058,6 +13147,7 @@ where
13058
13147
(14, decode_update_add_htlcs_opt, option),
13059
13148
(15, self.inbound_payment_id_secret, required),
13060
13149
(17, in_flight_monitor_updates, required),
13150
+ (19, peer_storage_dir, optional_vec),
13061
13151
});
13062
13152
13063
13153
Ok(())
@@ -13290,6 +13380,7 @@ where
13290
13380
monitor_update_blocked_actions: BTreeMap::new(),
13291
13381
actions_blocking_raa_monitor_updates: BTreeMap::new(),
13292
13382
closed_channel_monitor_update_ids: BTreeMap::new(),
13383
+ peer_storage: Vec::new(),
13293
13384
is_connected: false,
13294
13385
}
13295
13386
};
@@ -13585,6 +13676,7 @@ where
13585
13676
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>> = None;
13586
13677
let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
13587
13678
let mut inbound_payment_id_secret = None;
13679
+ let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None;
13588
13680
read_tlv_fields!(reader, {
13589
13681
(1, pending_outbound_payments_no_retry, option),
13590
13682
(2, pending_intercepted_htlcs, option),
@@ -13601,8 +13693,10 @@ where
13601
13693
(14, decode_update_add_htlcs, option),
13602
13694
(15, inbound_payment_id_secret, option),
13603
13695
(17, in_flight_monitor_updates, required),
13696
+ (19, peer_storage_dir, optional_vec),
13604
13697
});
13605
13698
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13699
+ let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
13606
13700
if fake_scid_rand_bytes.is_none() {
13607
13701
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
13608
13702
}
@@ -13634,6 +13728,12 @@ where
13634
13728
}
13635
13729
let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
13636
13730
13731
+ for (peer_pubkey, peer_storage) in peer_storage_dir {
13732
+ if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13733
+ peer_state.get_mut().unwrap().peer_storage = peer_storage;
13734
+ }
13735
+ }
13736
+
13637
13737
// Handle transitioning from the legacy TLV to the new one on upgrades.
13638
13738
if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates {
13639
13739
// We should never serialize an empty map.
0 commit comments