Skip to content

Commit 068549d

Browse files
Aditya Sharmaadi2011
authored andcommitted
Handle PeerStorage Message and its Persistence
This commit introduces the handling and persistence of PeerStorage messages on a per-peer basis. The peer storage is stored within the PeerState to simplify management, ensuring we do not need to remove it when there are no active channels with the peer. Key changes include: - Add PeerStorage to PeerState for persistent storage. - Implement internal_peer_storage to manage PeerStorage and its updates. - Add resend logic in peer_connected() to resend PeerStorage before sending the channel reestablish message upon reconnection. - Update PeerState's write() and read() methods to support PeerStorage persistence.
1 parent 81e89d8 commit 068549d

File tree

1 file changed

+76
-3
lines changed

1 file changed

+76
-3
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 76 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1404,6 +1404,8 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
14041404
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
14051405
/// [`ChannelMessageHandler::peer_disconnected`].
14061406
pub is_connected: bool,
1407+
/// Holds the peer storage data for the channel partner on a per-peer basis.
1408+
peer_storage: Vec<u8>,
14071409
}
14081410

14091411
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
@@ -2872,6 +2874,13 @@ const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
28722874
/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
28732875
const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
28742876

2877+
/// The maximum allowed size for peer storage, in bytes.
2878+
///
2879+
/// This constant defines the upper limit for the size of data
2880+
/// that can be stored for a peer. It is set to 1024 bytes (1 kilobyte)
2881+
/// to prevent excessive resource consumption.
2882+
const MAX_PEER_STORAGE_SIZE: usize = 1024;
2883+
28752884
/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
28762885
/// many peers we reject new (inbound) connections.
28772886
const MAX_NO_CHANNEL_PEERS: usize = 250;
@@ -8245,11 +8254,50 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
82458254
}
82468255
}
82478256

8248-
fn internal_peer_storage_retrieval(&self, _counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) -> Result<(), MsgHandleErrInternal> {
8249-
Ok(())
8257+
fn internal_peer_storage_retrieval(&self, counterparty_node_id: PublicKey, _msg: msgs::PeerStorageRetrieval) -> Result<(), MsgHandleErrInternal> {
8258+
// TODO: Decrypt and check if have any stale or missing ChannelMonitor.
8259+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8260+
8261+
log_debug!(logger, "Received unexpected peer_storage_retrieval from {}. This is unusual since we do not yet distribute peer storage. Sending a warning.", log_pubkey!(counterparty_node_id));
8262+
8263+
Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8264+
"Invalid peer_storage_retrieval message received.".into(),
8265+
), ChannelId([0; 32])))
82508266
}
82518267

8252-
fn internal_peer_storage(&self, _counterparty_node_id: PublicKey, _msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
8268+
fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
8269+
let per_peer_state = self.per_peer_state.read().unwrap();
8270+
let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8271+
.ok_or_else(|| {
8272+
debug_assert!(false);
8273+
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), ChannelId([0; 32]))
8274+
})?;
8275+
8276+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8277+
let peer_state = &mut *peer_state_lock;
8278+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
8279+
8280+
// Check if we have any channels with the peer (Currently we only provide the service to peers we have a channel with).
8281+
if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
8282+
log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
8283+
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8284+
"Ignoring peer_storage message, as peer storage is currently supported only for \
8285+
peers with an active funded channel.".into(),
8286+
), ChannelId([0; 32])));
8287+
}
8288+
8289+
#[cfg(not(test))]
8290+
if msg.data.len() > MAX_PEER_STORAGE_SIZE {
8291+
log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
8292+
8293+
return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8294+
format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
8295+
), ChannelId([0; 32])));
8296+
}
8297+
8298+
log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id));
8299+
peer_state.peer_storage = msg.data;
8300+
82538301
Ok(())
82548302
}
82558303

@@ -11758,6 +11806,7 @@ where
1175811806
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1175911807
closed_channel_monitor_update_ids: BTreeMap::new(),
1176011808
is_connected: true,
11809+
peer_storage: Vec::new(),
1176111810
}));
1176211811
},
1176311812
hash_map::Entry::Occupied(e) => {
@@ -11787,6 +11836,15 @@ where
1178711836
let peer_state = &mut *peer_state_lock;
1178811837
let pending_msg_events = &mut peer_state.pending_msg_events;
1178911838

11839+
if !peer_state.peer_storage.is_empty() {
11840+
pending_msg_events.push(events::MessageSendEvent::SendPeerStorageRetrieval {
11841+
node_id: counterparty_node_id.clone(),
11842+
msg: msgs::PeerStorageRetrieval {
11843+
data: peer_state.peer_storage.clone()
11844+
},
11845+
});
11846+
}
11847+
1179011848
for (_, chan) in peer_state.channel_by_id.iter_mut() {
1179111849
let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
1179211850
match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
@@ -12995,6 +13053,8 @@ where
1299513053
peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
1299613054
}
1299713055

13056+
let mut peer_storage_dir: Vec<(&PublicKey, &Vec<u8>)> = Vec::new();
13057+
1299813058
(serializable_peer_count).write(writer)?;
1299913059
for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
1300013060
// Peers which we have no channels to should be dropped once disconnected. As we
@@ -13004,6 +13064,8 @@ where
1300413064
if !peer_state.ok_to_remove(false) {
1300513065
peer_pubkey.write(writer)?;
1300613066
peer_state.latest_features.write(writer)?;
13067+
peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage));
13068+
1300713069
if !peer_state.monitor_update_blocked_actions.is_empty() {
1300813070
monitor_update_blocked_actions_per_peer
1300913071
.get_or_insert_with(Vec::new)
@@ -13125,6 +13187,7 @@ where
1312513187
(14, decode_update_add_htlcs_opt, option),
1312613188
(15, self.inbound_payment_id_secret, required),
1312713189
(17, in_flight_monitor_updates, required),
13190+
(19, peer_storage_dir, optional_vec),
1312813191
});
1312913192

1313013193
Ok(())
@@ -13357,6 +13420,7 @@ where
1335713420
monitor_update_blocked_actions: BTreeMap::new(),
1335813421
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1335913422
closed_channel_monitor_update_ids: BTreeMap::new(),
13423+
peer_storage: Vec::new(),
1336013424
is_connected: false,
1336113425
}
1336213426
};
@@ -13652,6 +13716,7 @@ where
1365213716
let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>> = None;
1365313717
let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
1365413718
let mut inbound_payment_id_secret = None;
13719+
let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None;
1365513720
read_tlv_fields!(reader, {
1365613721
(1, pending_outbound_payments_no_retry, option),
1365713722
(2, pending_intercepted_htlcs, option),
@@ -13668,8 +13733,10 @@ where
1366813733
(14, decode_update_add_htlcs, option),
1366913734
(15, inbound_payment_id_secret, option),
1367013735
(17, in_flight_monitor_updates, required),
13736+
(19, peer_storage_dir, optional_vec),
1367113737
});
1367213738
let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13739+
let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
1367313740
if fake_scid_rand_bytes.is_none() {
1367413741
fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
1367513742
}
@@ -13701,6 +13768,12 @@ where
1370113768
}
1370213769
let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
1370313770

13771+
for (peer_pubkey, peer_storage) in peer_storage_dir {
13772+
if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13773+
peer_state.get_mut().unwrap().peer_storage = peer_storage;
13774+
}
13775+
}
13776+
1370413777
// Handle transitioning from the legacy TLV to the new one on upgrades.
1370513778
if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates {
1370613779
// We should never serialize an empty map.

0 commit comments

Comments
 (0)