Skip to content

Commit 9cdc9c1

Browse files
committed
Handle events immediately if we are running during block connection
During block connection, we cannot apply `ChannelMonitorUpdate`s if we're running during the startup sequence (i.e. before the user has called any methods outside of block connection). We previously handled this by simply always pushing any `ChannelMonitorUpdate`s generated during block connection into the `pending_background_events` queue. However, this results in `ChannelMonitorUpdate`s going through the queue when we could just push them immediately. Here we explicitly check `background_events_processed_since_startup` and use that to decide whether to push updates through the background queue instead.
1 parent 09db50f commit 9cdc9c1

File tree

2 files changed

+33
-12
lines changed

2 files changed

+33
-12
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9274,10 +9274,16 @@ where
92749274
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
92759275
assert!(should_broadcast);
92769276
} else { unreachable!(); }
9277-
self.pending_background_events.lock().unwrap().push(
9278-
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9279-
counterparty_node_id, funding_txo, update, channel_id,
9280-
});
9277+
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
9278+
let res = self.chain_monitor.update_channel(funding_txo, &update);
9279+
debug_assert_eq!(res, ChannelMonitorUpdateStatus::Completed,
9280+
"TODO: We don't currently handle failures here, this logic is removed in the next commit");
9281+
} else {
9282+
self.pending_background_events.lock().unwrap().push(
9283+
BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9284+
counterparty_node_id, funding_txo, update, channel_id,
9285+
});
9286+
}
92819287
}
92829288
self.finish_close_channel(failure);
92839289
}

lightning/src/ln/reorg_tests.rs

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,9 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
268268
assert_eq!(nodes[1].node.list_channels()[0].confirmations, Some(10));
269269

270270
if !reorg_after_reload {
271+
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
272+
// is a ChannelForcClosed on the right channel with should_broadcast set.
273+
*nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
271274
if use_funding_unconfirmed {
272275
let relevant_txids = nodes[0].node.get_relevant_txids();
273276
assert_eq!(relevant_txids.len(), 1);
@@ -293,12 +296,17 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
293296
let relevant_txids = nodes[0].node.get_relevant_txids();
294297
assert_eq!(relevant_txids.len(), 0);
295298

299+
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
300+
assert_eq!(txn.len(), 1);
301+
296302
{
297303
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
298304
let peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
299305
assert_eq!(peer_state.channel_by_id.len(), 0);
300306
assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
301307
}
308+
309+
check_added_monitors!(nodes[0], 1);
302310
}
303311

304312
if reload_node {
@@ -310,10 +318,13 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
310318
let chan_0_monitor_serialized = get_monitor!(nodes[0], chan.2).encode();
311319

312320
reload_node!(nodes[0], *nodes[0].node.get_current_default_configuration(), &nodes_0_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
313-
assert!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
314321
}
315322

316323
if reorg_after_reload {
324+
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
325+
// is a ChannelForcClosed on the right channel with should_broadcast set.
326+
*nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
327+
317328
if use_funding_unconfirmed {
318329
let relevant_txids = nodes[0].node.get_relevant_txids();
319330
assert_eq!(relevant_txids.len(), 1);
@@ -345,12 +356,18 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
345356
assert_eq!(peer_state.channel_by_id.len(), 0);
346357
assert_eq!(nodes[0].node.short_to_chan_info.read().unwrap().len(), 0);
347358
}
359+
360+
if reload_node {
361+
// The update may come when we free background events if we just restarted, or in-line if
362+
// we were already running.
363+
nodes[0].node.test_process_background_events();
364+
}
365+
check_added_monitors!(nodes[0], 1);
366+
367+
let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
368+
assert_eq!(txn.len(), 1);
348369
}
349-
// With expect_channel_force_closed set the TestChainMonitor will enforce that the next update
350-
// is a ChannelForcClosed on the right channel with should_broadcast set.
351-
*nodes[0].chain_monitor.expect_channel_force_closed.lock().unwrap() = Some((chan.2, true));
352-
nodes[0].node.test_process_background_events(); // Required to free the pending background monitor update
353-
check_added_monitors!(nodes[0], 1);
370+
354371
let expected_err = "Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.";
355372
if reorg_after_reload || !reload_node {
356373
handle_announce_close_broadcast_events(&nodes, 0, 1, true, "Channel closed because of an exception: Funding transaction was un-confirmed. Locked at 6 confs, now have 0 confs.");
@@ -361,8 +378,6 @@ fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_
361378

362379
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: expected_err.to_owned() },
363380
[nodes[1].node.get_our_node_id()], 100000);
364-
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
365-
nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
366381

367382
// Now check that we can create a new channel
368383
if reload_node && nodes[0].node.per_peer_state.read().unwrap().len() == 0 {

0 commit comments

Comments
 (0)