@@ -1404,6 +1404,8 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1404
1404
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
1405
1405
/// [`ChannelMessageHandler::peer_disconnected`].
1406
1406
pub is_connected: bool,
1407
+ /// Holds the peer storage data for the channel partner on a per-peer basis.
1408
+ peer_storage: Vec<u8>,
1407
1409
}
1408
1410
1409
1411
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -2872,6 +2874,13 @@ const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
2872
2874
/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
2873
2875
const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
2874
2876
2877
+ /// The maximum allowed size for peer storage, in bytes.
2878
+ ///
2879
+ /// This constant defines the upper limit for the size of data
2880
+ /// that can be stored for a peer. It is set to 1024 bytes (1 kilobyte)
2881
+ /// to prevent excessive resource consumption.
2882
+ const MAX_PEER_STORAGE_SIZE: usize = 1024;
2883
+
2875
2884
/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
2876
2885
/// many peers we reject new (inbound) connections.
2877
2886
const MAX_NO_CHANNEL_PEERS: usize = 250;
@@ -8245,11 +8254,50 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
8245
8254
}
8246
8255
}
8247
8256
8248
- fn internal_peer_storage_retrieval(&self, _counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) -> Result<(), MsgHandleErrInternal> {
8249
- Ok(())
8257
+ fn internal_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) -> Result<(), MsgHandleErrInternal> {
8258
+ // TODO: Decrypt and check if have any stale or missing ChannelMonitor.
8259
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8260
+
8261
+ log_debug!(logger, "Received unexpected peer_storage_retrieval from {}. This is unusual since we do not yet distribute peer storage. Sending a warning.", log_pubkey!(counterparty_node_id));
8262
+
8263
+ Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8264
+ "Invalid peer_storage_retrieval message received.".into(),
8265
+ ), ChannelId([0; 32])))
8250
8266
}
8251
8267
8252
- fn internal_peer_storage(&self, _counterparty_node_id: PublicKey, _msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
8268
+ fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
8269
+ let per_peer_state = self.per_peer_state.read().unwrap();
8270
+ let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8271
+ .ok_or_else(|| {
8272
+ debug_assert!(false);
8273
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), ChannelId([0; 32]))
8274
+ })?;
8275
+
8276
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8277
+ let peer_state = &mut *peer_state_lock;
8278
+ let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8279
+
8280
+ // Check if we have any channels with the peer (Currently we only provide the service to peers we have a channel with).
8281
+ if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
8282
+ log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
8283
+ return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8284
+ "Ignoring peer_storage message, as peer storage is currently supported only for \
8285
+ peers with an active funded channel.".into(),
8286
+ ), ChannelId([0; 32])));
8287
+ }
8288
+
8289
+ #[cfg(not(test))]
8290
+ if msg.data.len() > MAX_PEER_STORAGE_SIZE {
8291
+ log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
8292
+
8293
+ return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8294
+ format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
8295
+ ), ChannelId([0; 32])));
8296
+ }
8297
+
8298
+ log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id));
8299
+ peer_state.peer_storage = msg.data;
8300
+
8253
8301
Ok(())
8254
8302
}
8255
8303
@@ -11758,6 +11806,7 @@ where
11758
11806
actions_blocking_raa_monitor_updates: BTreeMap::new(),
11759
11807
closed_channel_monitor_update_ids: BTreeMap::new(),
11760
11808
is_connected: true,
11809
+ peer_storage: Vec::new(),
11761
11810
}));
11762
11811
},
11763
11812
hash_map::Entry::Occupied(e) => {
@@ -11787,6 +11836,15 @@ where
11787
11836
let peer_state = &mut *peer_state_lock;
11788
11837
let pending_msg_events = &mut peer_state.pending_msg_events;
11789
11838
11839
+ if !peer_state.peer_storage.is_empty() {
11840
+ pending_msg_events.push(events::MessageSendEvent::SendPeerStorageRetrieval {
11841
+ node_id: counterparty_node_id.clone(),
11842
+ msg: msgs::PeerStorageRetrieval {
11843
+ data: peer_state.peer_storage.clone()
11844
+ },
11845
+ });
11846
+ }
11847
+
11790
11848
for (_, chan) in peer_state.channel_by_id.iter_mut() {
11791
11849
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
11792
11850
match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
@@ -12995,6 +13053,8 @@ where
12995
13053
peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
12996
13054
}
12997
13055
13056
+ let mut peer_storage_dir: Vec<(&PublicKey, &Vec<u8>)> = Vec::new();
13057
+
12998
13058
(serializable_peer_count).write(writer)?;
12999
13059
for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
13000
13060
// Peers which we have no channels to should be dropped once disconnected. As we
@@ -13004,6 +13064,8 @@ where
13004
13064
if !peer_state.ok_to_remove(false) {
13005
13065
peer_pubkey.write(writer)?;
13006
13066
peer_state.latest_features.write(writer)?;
13067
+ peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage));
13068
+
13007
13069
if !peer_state.monitor_update_blocked_actions.is_empty() {
13008
13070
monitor_update_blocked_actions_per_peer
13009
13071
.get_or_insert_with(Vec::new)
@@ -13125,6 +13187,7 @@ where
13125
13187
(14, decode_update_add_htlcs_opt, option),
13126
13188
(15, self.inbound_payment_id_secret, required),
13127
13189
(17, in_flight_monitor_updates, required),
13190
+ (19, peer_storage_dir, optional_vec),
13128
13191
});
13129
13192
13130
13193
Ok(())
@@ -13357,6 +13420,7 @@ where
13357
13420
monitor_update_blocked_actions: BTreeMap::new(),
13358
13421
actions_blocking_raa_monitor_updates: BTreeMap::new(),
13359
13422
closed_channel_monitor_update_ids: BTreeMap::new(),
13423
+ peer_storage: Vec::new(),
13360
13424
is_connected: false,
13361
13425
}
13362
13426
};
@@ -13652,6 +13716,7 @@ where
13652
13716
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>> = None;
13653
13717
let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
13654
13718
let mut inbound_payment_id_secret = None;
13719
+ let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None;
13655
13720
read_tlv_fields!(reader, {
13656
13721
(1, pending_outbound_payments_no_retry, option),
13657
13722
(2, pending_intercepted_htlcs, option),
@@ -13668,8 +13733,10 @@ where
13668
13733
(14, decode_update_add_htlcs, option),
13669
13734
(15, inbound_payment_id_secret, option),
13670
13735
(17, in_flight_monitor_updates, required),
13736
+ (19, peer_storage_dir, optional_vec),
13671
13737
});
13672
13738
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13739
+ let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
13673
13740
if fake_scid_rand_bytes.is_none() {
13674
13741
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
13675
13742
}
@@ -13701,6 +13768,12 @@ where
13701
13768
}
13702
13769
let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
13703
13770
13771
+ for (peer_pubkey, peer_storage) in peer_storage_dir {
13772
+ if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13773
+ peer_state.get_mut().unwrap().peer_storage = peer_storage;
13774
+ }
13775
+ }
13776
+
13704
13777
// Handle transitioning from the legacy TLV to the new one on upgrades.
13705
13778
if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates {
13706
13779
// We should never serialize an empty map.
0 commit comments