Skip to content

Commit f9d76bb

Browse files
committed
Stop relying on ChannelMonitor persistence after manager read
When we discover we've only partially claimed an MPP HTLC during `ChannelManager` reading, we need to add the payment preimage to all other `ChannelMonitor`s that were a part of the payment. We previously did this with a direct call on the `ChannelMonitor`, requiring users write the full `ChannelMonitor` to disk to ensure that updated information made it. This adds quite a bit of delay during initial startup - fully resilvering each `ChannelMonitor` just to handle this one case is incredibly excessive. Over the past few commits we dropped the need to pass HTLCs directly to the `ChannelMonitor`s using the background events to provide `ChannelMonitorUpdate`s insetad. Thus, here we finally drop the requirement to resilver `ChannelMonitor`s on startup.
1 parent cf58c44 commit f9d76bb

File tree

5 files changed

+21
-11
lines changed

5 files changed

+21
-11
lines changed

fuzz/src/chanmon_consistency.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -767,7 +767,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
767767
chain_monitor.latest_monitors.lock().unwrap().insert(outpoint, prev_state);
768768
}
769769
let mut monitor_refs = new_hash_map();
770-
for (outpoint, monitor) in monitors.iter_mut() {
770+
for (outpoint, monitor) in monitors.iter() {
771771
monitor_refs.insert(*outpoint, monitor);
772772
}
773773

lightning/src/ln/channelmanager.rs

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1607,7 +1607,7 @@ where
16071607
/// let mut channel_monitors = read_channel_monitors();
16081608
/// let args = ChannelManagerReadArgs::new(
16091609
/// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
1610-
/// router, message_router, logger, default_config, channel_monitors.iter_mut().collect(),
1610+
/// router, message_router, logger, default_config, channel_monitors.iter().collect(),
16111611
/// );
16121612
/// let (block_hash, channel_manager) =
16131613
/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
@@ -12217,9 +12217,12 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
1221712217
/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
1221812218
/// same way you would handle a [`chain::Filter`] call using
1221912219
/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
12220-
/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
12221-
/// 5) Disconnect/connect blocks on the [`ChannelManager`].
12222-
/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
12220+
/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
12221+
/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
12222+
/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
12223+
/// This is important if you have replayed a nontrivial number of blocks in step (4), allowing
12224+
/// you to avoid having to replay the same blocks if you shut down quickly after startup. It is
12225+
/// otherwise not required.
1222312226
/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
1222412227
/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
1222512228
/// the next step.
@@ -12302,7 +12305,7 @@ where
1230212305
/// this struct.
1230312306
///
1230412307
/// This is not exported to bindings users because we have no HashMap bindings
12305-
pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
12308+
pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
1230612309
}
1230712310

1230812311
impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
@@ -12325,7 +12328,7 @@ where
1232512328
entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
1232612329
chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
1232712330
default_config: UserConfig,
12328-
mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
12331+
mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
1232912332
) -> Self {
1233012333
Self {
1233112334
entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,

lightning/src/ln/functional_test_utils.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> {
684684
// them to ensure we can write and reload our ChannelManager.
685685
{
686686
let mut channel_monitors = new_hash_map();
687-
for monitor in deserialized_monitors.iter_mut() {
687+
for monitor in deserialized_monitors.iter() {
688688
channel_monitors.insert(monitor.get_funding_txo().0, monitor);
689689
}
690690

@@ -1126,7 +1126,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User
11261126
let mut node_read = &chanman_encoded[..];
11271127
let (_, node_deserialized) = {
11281128
let mut channel_monitors = new_hash_map();
1129-
for monitor in monitors_read.iter_mut() {
1129+
for monitor in monitors_read.iter() {
11301130
assert!(channel_monitors.insert(monitor.get_funding_txo().0, monitor).is_none());
11311131
}
11321132
<(BlockHash, TestChannelManager<'b, 'c>)>::read(&mut node_read, ChannelManagerReadArgs {

lightning/src/ln/reload_tests.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
426426
chain_monitor: nodes[0].chain_monitor,
427427
tx_broadcaster: nodes[0].tx_broadcaster,
428428
logger: &logger,
429-
channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
429+
channel_monitors: node_0_stale_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
430430
}) { } else {
431431
panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
432432
};
@@ -444,7 +444,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() {
444444
chain_monitor: nodes[0].chain_monitor,
445445
tx_broadcaster: nodes[0].tx_broadcaster,
446446
logger: &logger,
447-
channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
447+
channel_monitors: node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
448448
}).unwrap();
449449
nodes_0_deserialized = nodes_0_deserialized_tmp;
450450
assert!(nodes_0_read.is_empty());

pending_changelog/3322-b.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
API Updates
2+
===========
3+
4+
As a part of adding robustness against several unlikely scenarios, redundant `PaymentClaimed`
5+
`Event`s will be generated more frequently on startup for payments received on LDK 0.1 and
6+
newer. A new `Event::PaymentClaimed::payment_id` field may be used to better differentiate
7+
between redundant payments.

0 commit comments

Comments
 (0)