Skip to content

Commit e53ae91

Browse files
Aditya SharmaAditya Sharma
authored andcommitted
lightning: Handle peer storage message, it's persistance and send it to the respective peer upon reconnection.
1 parent 31b8588 commit e53ae91

File tree

1 file changed

+83
-2
lines changed

1 file changed

+83
-2
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 83 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1168,9 +1168,23 @@ pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
11681168
/// [`ChannelMessageHandler::peer_connected`] and no corresponding
11691169
/// [`ChannelMessageHandler::peer_disconnected`].
11701170
pub is_connected: bool,
1171+
peer_storage: Vec<u8>,
11711172
}
11721173

11731174
impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
1175+
pub fn new(features: &InitFeatures) -> Self {
1176+
Self {
1177+
channel_by_id: new_hash_map(),
1178+
inbound_channel_request_by_id: new_hash_map(),
1179+
latest_features: features.clone(),
1180+
pending_msg_events: Vec::new(),
1181+
in_flight_monitor_updates: BTreeMap::new(),
1182+
monitor_update_blocked_actions: BTreeMap::new(),
1183+
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1184+
is_connected: true,
1185+
peer_storage: Vec::new(),
1186+
}
1187+
}
11741188
/// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
11751189
/// If true is passed for `require_disconnected`, the function will return false if we haven't
11761190
/// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
@@ -2431,7 +2445,7 @@ where
24312445
entropy_source: ES,
24322446
node_signer: NS,
24332447
signer_provider: SP,
2434-
2448+
our_peer_storage: FairRwLock<OurPeerStorage>,
24352449
logger: L,
24362450
}
24372451

@@ -3249,7 +3263,7 @@ where
32493263
entropy_source,
32503264
node_signer,
32513265
signer_provider,
3252-
3266+
our_peer_storage: FairRwLock::new(OurPeerStorage::new()),
32533267
logger,
32543268
}
32553269
}
@@ -7000,6 +7014,7 @@ where
70007014
monitor_update_blocked_actions: BTreeMap::new(),
70017015
actions_blocking_raa_monitor_updates: BTreeMap::new(),
70027016
is_connected: false,
7017+
peer_storage: Vec::new(),
70037018
}));
70047019
let mut peer_state = peer_state_mutex.lock().unwrap();
70057020

@@ -7860,6 +7875,42 @@ where
78607875
}
78617876
}
78627877

7878+
fn internal_peer_storage(&self, counterparty_node_id: &PublicKey, msg: &msgs::PeerStorageMessage) {
7879+
let per_peer_state = self.per_peer_state.read().unwrap();
7880+
let peer_state_mutex = match per_peer_state.get(counterparty_node_id) {
7881+
Some(peer_state_mutex) => peer_state_mutex,
7882+
None => return,
7883+
};
7884+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7885+
let peer_state = &mut *peer_state_lock;
7886+
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None);
7887+
7888+
// Check if we have any channels with the peer (Currently we only provide the servie to peers we have a channel with).
7889+
if !peer_state.channel_by_id.values().any(|phase| matches!(phase, ChannelPhase::Funded(_))) {
7890+
log_debug!(logger, "We do not have any channel with {}", log_pubkey!(counterparty_node_id));
7891+
return;
7892+
}
7893+
7894+
#[cfg(not(test))]
7895+
if msg.data.len() > 1024 {
7896+
log_debug!(logger, "We do not allow more than 1 KiB of data for each peer in peer storage. Sending warning to peer {}", log_pubkey!(counterparty_node_id));
7897+
peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
7898+
node_id: counterparty_node_id.clone(),
7899+
action: msgs::ErrorAction::SendWarningMessage {
7900+
msg: msgs::WarningMessage {
7901+
channel_id: ChannelId([0; 32]),
7902+
data: "Supports only data up to 1 KiB in peer storage.".to_owned()
7903+
},
7904+
log_level: Level::Trace,
7905+
}
7906+
});
7907+
return;
7908+
}
7909+
7910+
log_trace!(logger, "Received Peer Storage from {}", log_pubkey!(counterparty_node_id));
7911+
peer_state.peer_storage = msg.data.clone();
7912+
}
7913+
78637914
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
78647915
let best_block = *self.best_block.read().unwrap();
78657916
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -10471,6 +10522,8 @@ where
1047110522
}
1047210523

1047310524
fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: &msgs::PeerStorageMessage) {
10525+
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
10526+
self.internal_peer_storage(&counterparty_node_id, msg);
1047410527
}
1047510528

1047610529
fn handle_your_peer_storage(&self, counterparty_node_id: PublicKey, msg: &msgs::YourPeerStorageMessage) {
@@ -10796,6 +10849,7 @@ where
1079610849
monitor_update_blocked_actions: BTreeMap::new(),
1079710850
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1079810851
is_connected: true,
10852+
peer_storage: Vec::new(),
1079910853
}));
1080010854
},
1080110855
hash_map::Entry::Occupied(e) => {
@@ -10825,6 +10879,16 @@ where
1082510879
let peer_state = &mut *peer_state_lock;
1082610880
let pending_msg_events = &mut peer_state.pending_msg_events;
1082710881

10882+
if !peer_state.peer_storage.is_empty() {
10883+
pending_msg_events.push(events::MessageSendEvent::SendYourPeerStorageMessage {
10884+
node_id: counterparty_node_id.clone(),
10885+
msg: msgs::YourPeerStorageMessage {
10886+
data: peer_state.peer_storage.clone()
10887+
},
10888+
});
10889+
}
10890+
10891+
1082810892
for (_, phase) in peer_state.channel_by_id.iter_mut() {
1082910893
match phase {
1083010894
ChannelPhase::Funded(chan) => {
@@ -11910,6 +11974,12 @@ where
1191011974
if !peer_state.ok_to_remove(false) {
1191111975
peer_pubkey.write(writer)?;
1191211976
peer_state.latest_features.write(writer)?;
11977+
11978+
(peer_state.peer_storage.len() as u64).write(writer)?;
11979+
for p in peer_state.peer_storage.iter() {
11980+
p.write(writer)?;
11981+
}
11982+
1191311983
if !peer_state.monitor_update_blocked_actions.is_empty() {
1191411984
monitor_update_blocked_actions_per_peer
1191511985
.get_or_insert_with(Vec::new)
@@ -12423,6 +12493,7 @@ where
1242312493
monitor_update_blocked_actions: BTreeMap::new(),
1242412494
actions_blocking_raa_monitor_updates: BTreeMap::new(),
1242512495
is_connected: false,
12496+
peer_storage: Vec::new(),
1242612497
}
1242712498
};
1242812499

@@ -12433,6 +12504,15 @@ where
1243312504
let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(new_hash_map());
1243412505
let mut peer_state = peer_state_from_chans(peer_chans);
1243512506
peer_state.latest_features = Readable::read(reader)?;
12507+
12508+
let peer_storage_count:u64 = Readable::read(reader)?;
12509+
let mut peer_storage: Vec<u8> = Vec::with_capacity(cmp::min(peer_storage_count as usize, MAX_ALLOC_SIZE/mem::size_of::<u8>()));
12510+
for i in 0..peer_storage_count {
12511+
let x = Readable::read(reader)?;
12512+
peer_storage.insert(i as usize, x);
12513+
}
12514+
peer_state.peer_storage = peer_storage;
12515+
1243612516
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
1243712517
}
1243812518

@@ -13087,6 +13167,7 @@ where
1308713167

1308813168
last_days_feerates: Mutex::new(VecDeque::new()),
1308913169

13170+
our_peer_storage: FairRwLock::new(our_peer_storage),
1309013171
logger: args.logger,
1309113172
default_configuration: args.default_config,
1309213173
};

0 commit comments

Comments
 (0)