@@ -2313,6 +2313,188 @@ where
2313
2313
logger: L,
2314
2314
}
2315
2315
2316
+
2317
+ /// This works as a mock [`ChannelMessageHandler`] it is used mainly when a user wants to run their node in
2318
+ /// offline mode i.e. This node won't communicate with any peer except sending a BogusChannelReestablish
2319
+ /// for all the [`StubChannelMonitors`] being tracked by the [`ChainMonitor`].
2320
+ ///
2321
+ /// [`FundRecoverer`] is parameterized by a number of components to achieve this.
2322
+ /// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
2323
+ /// channel
2324
+ /// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
2325
+ /// - [`Logger`] for logging operational information of varying degrees
2326
+ ///
2327
+ /// Additionally, it implements the following traits:
2328
+ /// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
2329
+ /// - [`MessageSendEventsProvider`] to similarly send such messages to peers
2330
+ ///
2331
+ pub struct FundRecoverer<SP: Deref, L:Deref, M: Deref>
2332
+ where
2333
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2334
+ SP::Target: SignerProvider,
2335
+ L::Target: Logger
2336
+ {
2337
+ default_configuration: UserConfig,
2338
+ chain_monitor: M,
2339
+ chain_hash: ChainHash,
2340
+ per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2341
+ logger: L,
2342
+ }
2343
+
2344
+ impl<SP:Deref, L:Deref, M:Deref> MessageSendEventsProvider for FundRecoverer<SP, L, M>
2345
+ where
2346
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2347
+ SP::Target: SignerProvider,
2348
+ L::Target: Logger
2349
+ {
2350
+ fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
2351
+ let mut pending_events = Vec::new();
2352
+ let events = RefCell::new(Vec::new());
2353
+ let per_peer_state = self.per_peer_state.read().unwrap();
2354
+ for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
2355
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2356
+ let peer_state = &mut *peer_state_lock;
2357
+ if peer_state.pending_msg_events.len() > 0 {
2358
+ pending_events.append(&mut peer_state.pending_msg_events);
2359
+ }
2360
+ }
2361
+ if !pending_events.is_empty() {
2362
+ events.replace(pending_events);
2363
+ }
2364
+ events.into_inner()
2365
+ }
2366
+ }
2367
+
2368
+ impl<SP:Deref, L: Deref, M:Deref> FundRecoverer<SP, L, M>
2369
+ where
2370
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2371
+ SP::Target: SignerProvider,
2372
+ L::Target: Logger
2373
+ {
2374
+ /// Creates a new instance of `FundRecoverer`.
2375
+ ///
2376
+ /// This function initializes a `FundRecoverer` with the provided `chain_monitor`,
2377
+ /// `logger`, configuration, and chain parameters. The `FundRecoverer` is set up with
2378
+ /// the default configuration and a chain hash derived from the genesis block of the
2379
+ /// specified network.
2380
+ pub fn new(chain_monitor: M, logger: L, config: UserConfig, params: ChainParameters) -> Self {
2381
+ return Self { default_configuration: config.clone(),
2382
+ chain_monitor,
2383
+ chain_hash: ChainHash::using_genesis_block(params.network),
2384
+ per_peer_state: FairRwLock::new(new_hash_map()),
2385
+ logger
2386
+ }
2387
+ }
2388
+ }
2389
+
2390
+ impl<SP:Deref, L:Deref, M:Deref> ChannelMessageHandler for FundRecoverer<SP, L, M>
2391
+ where
2392
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2393
+ SP::Target: SignerProvider,
2394
+ L::Target: Logger
2395
+ {
2396
+ fn handle_open_channel(&self, _their_node_id: &PublicKey, _msg: &msgs::OpenChannel) {}
2397
+ fn handle_accept_channel(&self, _their_node_id: &PublicKey, _msg: &msgs::AcceptChannel) {}
2398
+ fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) {}
2399
+ fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) {}
2400
+ fn handle_channel_ready(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReady) {}
2401
+ fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) {}
2402
+ fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) {}
2403
+ fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) {}
2404
+ fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) {}
2405
+ fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) {}
2406
+ fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) {}
2407
+ fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) {}
2408
+ fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) {}
2409
+ fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) {}
2410
+ fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) {}
2411
+ fn handle_channel_update(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelUpdate) {}
2412
+ fn handle_open_channel_v2(&self, _their_node_id: &PublicKey, _msg: &msgs::OpenChannelV2) {}
2413
+ fn handle_accept_channel_v2(&self, _their_node_id: &PublicKey, _msg: &msgs::AcceptChannelV2) {}
2414
+ fn handle_stfu(&self, _their_node_id: &PublicKey, _msg: &msgs::Stfu) {}
2415
+ #[cfg(splicing)]
2416
+ fn handle_splice_init(&self, _their_node_id: &PublicKey, _msg: &msgs::SpliceInit) {}
2417
+ #[cfg(splicing)]
2418
+ fn handle_splice_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::SpliceAck) {}
2419
+ #[cfg(splicing)]
2420
+ fn handle_splice_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::SpliceLocked) {}
2421
+ fn handle_tx_add_input(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAddInput) {}
2422
+ fn handle_tx_add_output(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAddOutput) {}
2423
+ fn handle_tx_remove_input(&self, _their_node_id: &PublicKey, _msg: &msgs::TxRemoveInput) {}
2424
+ fn handle_tx_remove_output(&self, _their_node_id: &PublicKey, _msg: &msgs::TxRemoveOutput) {}
2425
+ fn handle_tx_complete(&self, _their_node_id: &PublicKey, _msg: &msgs::TxComplete) {}
2426
+ fn handle_tx_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::TxSignatures) {}
2427
+ fn handle_tx_init_rbf(&self, _their_node_id: &PublicKey, _msg: &msgs::TxInitRbf) {}
2428
+ fn handle_tx_ack_rbf(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAckRbf) {}
2429
+ fn handle_tx_abort(&self, _their_node_id: &PublicKey, _msg: &msgs::TxAbort) {}
2430
+ fn handle_peer_storage(&self, _their_node_id: &PublicKey, _msg: &msgs::PeerStorageMessage) {}
2431
+ fn handle_your_peer_storage(&self, _their_node_id: &PublicKey, _msg: &msgs::YourPeerStorageMessage) {}
2432
+ fn peer_disconnected(&self, _their_node_id: &PublicKey) {}
2433
+
2434
+ fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, _inbound: bool) -> Result<(), ()> {
2435
+ let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), None, None);
2436
+
2437
+ {
2438
+ let mut peer_state_lock = self.per_peer_state.write().unwrap();
2439
+ match peer_state_lock.entry(counterparty_node_id.clone()) {
2440
+ hash_map::Entry::Vacant(e) => {
2441
+ e.insert(Mutex::new(PeerState {
2442
+ channel_by_id: new_hash_map(),
2443
+ inbound_channel_request_by_id: new_hash_map(),
2444
+ latest_features: init_msg.features.clone(),
2445
+ pending_msg_events: Vec::new(),
2446
+ in_flight_monitor_updates: BTreeMap::new(),
2447
+ monitor_update_blocked_actions: BTreeMap::new(),
2448
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
2449
+ is_connected: true,
2450
+ }));
2451
+ },
2452
+ hash_map::Entry::Occupied(e) => {
2453
+ let mut peer_state = e.get().lock().unwrap();
2454
+ peer_state.latest_features = init_msg.features.clone();
2455
+
2456
+ debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
2457
+ peer_state.is_connected = true;
2458
+ },
2459
+ }
2460
+ }
2461
+ log_debug!(logger, "Generating Bogus channel_reestablish events for all the stub channels with peer {}", log_pubkey!(counterparty_node_id));
2462
+
2463
+ let per_peer_state = self.per_peer_state.read().unwrap();
2464
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
2465
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2466
+ let peer_state = &mut *peer_state_lock;
2467
+ let pending_msg_events = &mut peer_state.pending_msg_events;
2468
+ for cid in self.chain_monitor.get_stub_cids_with_counterparty(*counterparty_node_id) {
2469
+ pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
2470
+ node_id: *counterparty_node_id,
2471
+ msg: msgs::ChannelReestablish {
2472
+ channel_id: cid,
2473
+ next_local_commitment_number: 0,
2474
+ next_remote_commitment_number: 0,
2475
+ your_last_per_commitment_secret: [1u8; 32],
2476
+ my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
2477
+ next_funding_txid: None,
2478
+ },
2479
+ })
2480
+ }
2481
+ }
2482
+ Ok(())
2483
+ }
2484
+
2485
+ fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) {}
2486
+ fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
2487
+ fn provided_node_features(&self) -> NodeFeatures {
2488
+ provided_node_features(&self.default_configuration)
2489
+ }
2490
+ fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
2491
+ provided_init_features(&self.default_configuration)
2492
+ }
2493
+ fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
2494
+ Some(vec![self.chain_hash])
2495
+ }
2496
+ }
2497
+
2316
2498
/// Chain-related parameters used to construct a new `ChannelManager`.
2317
2499
///
2318
2500
/// Typically, the block-specific parameters are derived from the best block hash for the network,
0 commit comments