@@ -29,13 +29,15 @@ use bitcoin::hash_types::{Txid, BlockHash};
29
29
use crate :: chain;
30
30
use crate :: chain:: { ChannelMonitorUpdateStatus , Filter , WatchedOutput } ;
31
31
use crate :: chain:: chaininterface:: { BroadcasterInterface , FeeEstimator } ;
32
- use crate :: chain:: channelmonitor:: { ChannelMonitor , ChannelMonitorUpdate , Balance , MonitorEvent , TransactionOutputs , WithChannelMonitor } ;
32
+ use crate :: chain:: channelmonitor:: { ChannelMonitor , ChannelMonitorUpdate , Balance , MonitorEvent , TransactionOutputs , WithChannelMonitor , write_util } ;
33
33
use crate :: chain:: transaction:: { OutPoint , TransactionData } ;
34
34
use crate :: ln:: types:: ChannelId ;
35
+ use crate :: ln:: msgs;
35
36
use crate :: sign:: ecdsa:: EcdsaChannelSigner ;
36
37
use crate :: events:: { self , Event , EventHandler , ReplayEvent } ;
37
38
use crate :: util:: logger:: { Logger , WithContext } ;
38
39
use crate :: util:: errors:: APIError ;
40
+ use crate :: util:: ser:: VecWriter ;
39
41
use crate :: util:: wakers:: { Future , Notifier } ;
40
42
use crate :: ln:: channel_state:: ChannelDetails ;
41
43
use crate :: ln:: msgs:: SendingOnlyMessageHandler ;
@@ -46,6 +48,8 @@ use core::ops::Deref;
46
48
use core:: sync:: atomic:: { AtomicUsize , Ordering } ;
47
49
use bitcoin:: hashes:: Hash ;
48
50
use bitcoin:: secp256k1:: PublicKey ;
51
+ use core:: mem;
52
+ use crate :: ln:: our_peer_storage:: OurPeerStorage ;
49
53
50
54
/// `Persist` defines behavior for persisting channel monitors: this could mean
51
55
/// writing once to disk, and/or uploading to one or more backup services.
@@ -166,8 +170,8 @@ pub trait Persist<ChannelSigner: EcdsaChannelSigner> {
166
170
fn archive_persisted_channel ( & self , channel_funding_outpoint : OutPoint ) ;
167
171
}
168
172
169
- struct MonitorHolder < ChannelSigner : EcdsaChannelSigner > {
170
- monitor : ChannelMonitor < ChannelSigner > ,
173
+ pub ( crate ) struct MonitorHolder < ChannelSigner : EcdsaChannelSigner > {
174
+ pub ( crate ) monitor : ChannelMonitor < ChannelSigner > ,
171
175
/// The full set of pending monitor updates for this Channel.
172
176
///
173
177
/// Note that this lock must be held from [`ChannelMonitor::update_monitor`] through to
@@ -182,7 +186,7 @@ struct MonitorHolder<ChannelSigner: EcdsaChannelSigner> {
182
186
/// could cause users to have a full [`ChannelMonitor`] on disk as well as a
183
187
/// [`ChannelMonitorUpdate`] which was already applied. While this isn't an issue for the
184
188
/// LDK-provided update-based [`Persist`], it is somewhat surprising for users so we avoid it.
185
- pending_monitor_updates : Mutex < Vec < u64 > > ,
189
+ pub ( crate ) pending_monitor_updates : Mutex < Vec < u64 > > ,
186
190
}
187
191
188
192
impl < ChannelSigner : EcdsaChannelSigner > MonitorHolder < ChannelSigner > {
@@ -196,8 +200,8 @@ impl<ChannelSigner: EcdsaChannelSigner> MonitorHolder<ChannelSigner> {
196
200
/// Note that this holds a mutex in [`ChainMonitor`] and may block other events until it is
197
201
/// released.
198
202
pub struct LockedChannelMonitor < ' a , ChannelSigner : EcdsaChannelSigner > {
199
- lock : RwLockReadGuard < ' a , HashMap < OutPoint , MonitorHolder < ChannelSigner > > > ,
200
- funding_txo : OutPoint ,
203
+ pub ( crate ) lock : RwLockReadGuard < ' a , HashMap < OutPoint , MonitorHolder < ChannelSigner > > > ,
204
+ pub ( crate ) funding_txo : OutPoint ,
201
205
}
202
206
203
207
impl < ChannelSigner : EcdsaChannelSigner > Deref for LockedChannelMonitor < ' _ , ChannelSigner > {
@@ -246,6 +250,8 @@ pub struct ChainMonitor<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F
246
250
/// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process).
247
251
event_notifier : Notifier ,
248
252
pending_send_only_events : Mutex < Vec < MessageSendEvent > > ,
253
+ our_peer_storage : Mutex < OurPeerStorage > ,
254
+ our_peerstorage_encryption_key : [ u8 ; 32 ] ,
249
255
}
250
256
251
257
impl < ChannelSigner : EcdsaChannelSigner , C : Deref , T : Deref , F : Deref , L : Deref , P : Deref > ChainMonitor < ChannelSigner , C , T , F , L , P >
@@ -395,6 +401,27 @@ where C::Target: chain::Filter,
395
401
P :: Target : Persist < ChannelSigner > ,
396
402
{
397
403
fn send_peer_storage ( & self , their_node_id : PublicKey ) {
404
+ let monitors: RwLockReadGuard < ' _ , hash_map:: HashMap < OutPoint , MonitorHolder < ChannelSigner > , RandomState > > = self . monitors . read ( ) . unwrap ( ) ;
405
+ let mut ser_channels: Vec < u8 > = Vec :: new ( ) ;
406
+ log_debug ! ( self . logger, "Sending Peer Storage from chainmonitor" ) ;
407
+ ser_channels. extend_from_slice ( & ( monitors. len ( ) as u64 ) . to_be_bytes ( ) ) ;
408
+ for ( _, mon) in monitors. iter ( ) {
409
+ let mut ser_chan = VecWriter ( Vec :: new ( ) ) ;
410
+
411
+ match write_util ( & mon. monitor . inner . lock ( ) . unwrap ( ) , true , & mut ser_chan) {
412
+ Ok ( _) => {
413
+ ser_channels. extend_from_slice ( & ( ser_chan. 0 . len ( ) as u64 ) . to_be_bytes ( ) ) ;
414
+ ser_channels. extend ( ser_chan. 0 . iter ( ) ) ;
415
+ }
416
+ Err ( _) => {
417
+ panic ! ( "Can not write monitor for {}" , mon. monitor. channel_id( ) )
418
+ }
419
+ }
420
+ }
421
+ self . our_peer_storage . lock ( ) . unwrap ( ) . stub_channels ( ser_channels) ;
422
+
423
+ self . pending_send_only_events . lock ( ) . unwrap ( ) . push ( events:: MessageSendEvent :: SendPeerStorageMessage { node_id : their_node_id
424
+ , msg : msgs:: PeerStorageMessage { data : self . our_peer_storage . lock ( ) . unwrap ( ) . encrypt_our_peer_storage ( self . our_peerstorage_encryption_key ) } } )
398
425
}
399
426
}
400
427
@@ -406,7 +433,7 @@ where C::Target: chain::Filter,
406
433
/// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may
407
434
/// always need to fetch full blocks absent another means for determining which blocks contain
408
435
/// transactions relevant to the watched channels.
409
- pub fn new ( chain_source : Option < C > , broadcaster : T , logger : L , feeest : F , persister : P ) -> Self {
436
+ pub fn new ( chain_source : Option < C > , broadcaster : T , logger : L , feeest : F , persister : P , our_peerstorage_encryption_key : [ u8 ; 32 ] ) -> Self {
410
437
Self {
411
438
monitors : RwLock :: new ( new_hash_map ( ) ) ,
412
439
chain_source,
@@ -418,6 +445,8 @@ where C::Target: chain::Filter,
418
445
highest_chain_height : AtomicUsize :: new ( 0 ) ,
419
446
event_notifier : Notifier :: new ( ) ,
420
447
pending_send_only_events : Mutex :: new ( Vec :: new ( ) ) ,
448
+ our_peer_storage : Mutex :: new ( OurPeerStorage :: new ( ) ) ,
449
+ our_peerstorage_encryption_key
421
450
}
422
451
}
423
452
@@ -685,6 +714,18 @@ where C::Target: chain::Filter,
685
714
} ) ;
686
715
}
687
716
}
717
+
718
+ /// Retrieves all node IDs associated with the monitors.
719
+ ///
720
+ /// This function collects the counterparty node IDs from all monitors into a `HashSet`,
721
+ /// ensuring unique IDs are returned.
722
+ fn get_peer_node_ids ( & self ) -> HashSet < PublicKey > {
723
+ let mon = self . monitors . read ( ) . unwrap ( ) ;
724
+ mon
725
+ . values ( )
726
+ . map ( |monitor| monitor. monitor . get_counterparty_node_id ( ) . unwrap ( ) . clone ( ) )
727
+ . collect ( )
728
+ }
688
729
}
689
730
690
731
impl < ChannelSigner : EcdsaChannelSigner , C : Deref , T : Deref , F : Deref , L : Deref , P : Deref >
@@ -753,6 +794,12 @@ where
753
794
header, height, & * self . broadcaster , & * self . fee_estimator , & self . logger
754
795
)
755
796
} ) ;
797
+
798
+ // Send peer storage everytime a new block arrives.
799
+ for node_id in self . get_peer_node_ids ( ) {
800
+ self . send_peer_storage ( node_id) ;
801
+ }
802
+
756
803
// Assume we may have some new events and wake the event processor
757
804
self . event_notifier . notify ( ) ;
758
805
}
0 commit comments