Skip to content

Commit 3016ed2

Browse files
committed
Update test_dup_htlc_onchain_fails_on_reload for new persist API
ChannelMonitors now require that they be re-persisted before MonitorEvents be provided to the ChannelManager, the exact thing that test_dup_htlc_onchain_fails_on_reload was testing for when it *didn't* happen. As such, test_dup_htlc_onchain_fails_on_reload is now testing that we bahve correctly when the API guarantees are not met, something we don't need to do. Here, we adapt it to test the new API requirements through ChainMonitor's calls to the Persist trait instead.
1 parent 5c2ff2c commit 3016ed2

File tree

2 files changed

+62
-26
lines changed

2 files changed

+62
-26
lines changed

lightning/src/ln/functional_tests.rs

Lines changed: 54 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
//! claim outputs on-chain.
1313
1414
use chain;
15-
use chain::{Confirm, Listen, Watch};
15+
use chain::{Confirm, Listen, Watch, ChannelMonitorUpdateErr};
1616
use chain::channelmonitor;
1717
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
1818
use chain::transaction::OutPoint;
@@ -4099,21 +4099,15 @@ fn test_no_txn_manager_serialize_deserialize() {
40994099
send_payment(&nodes[0], &[&nodes[1]], 1000000);
41004100
}
41014101

4102-
#[test]
4103-
fn test_dup_htlc_onchain_fails_on_reload() {
4102+
fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) {
41044103
// When a Channel is closed, any outbound HTLCs which were relayed through it are simply
41054104
// dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor
41064105
// having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when
41074106
// the ChannelMonitor tells it to.
41084107
//
4109-
// If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the
4110-
// ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a
4111-
// PaymentPathFailed event appearing). However, because we may not serialize the relevant
4112-
// ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this
4113-
// consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs
4114-
// and de-duplicates ChannelMonitor events.
4115-
//
4116-
// This tests that explicit tracking behavior.
4108+
// If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the
4109+
// ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a
4110+
// duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event).
41174111
let chanmon_cfgs = create_chanmon_cfgs(2);
41184112
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
41194113
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
@@ -4122,7 +4116,7 @@ fn test_dup_htlc_onchain_fails_on_reload() {
41224116
let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
41234117
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
41244118

4125-
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
4119+
let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
41264120

41274121
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
41284122
// nodes[0].
@@ -4140,35 +4134,59 @@ fn test_dup_htlc_onchain_fails_on_reload() {
41404134
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
41414135
assert_eq!(node_txn.len(), 3);
41424136
assert_eq!(node_txn[0], node_txn[1]);
4137+
check_spends!(node_txn[1], funding_tx);
4138+
check_spends!(node_txn[2], node_txn[1]);
41434139

41444140
assert!(nodes[1].node.claim_funds(payment_preimage));
41454141
check_added_monitors!(nodes[1], 1);
41464142

41474143
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
4148-
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4144+
connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]});
41494145
check_closed_broadcast!(nodes[1], true);
41504146
check_added_monitors!(nodes[1], 1);
41514147
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
41524148
let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
41534149

41544150
header.prev_blockhash = nodes[0].best_block_hash();
4155-
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]});
4151+
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]});
41564152

4157-
// Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is
4158-
// fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events
4159-
// happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s).
4160-
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4161-
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
4153+
// Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update
4154+
// returning TemporaryFailure. This should cause the claim event to never make its way to the
4155+
// ChannelManager.
4156+
chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
4157+
chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure));
41624158

41634159
header.prev_blockhash = nodes[0].best_block_hash();
4164-
let claim_block = Block { header, txdata: claim_txn};
4160+
let claim_block = Block { header, txdata: claim_txn };
41654161
connect_block(&nodes[0], &claim_block);
4166-
expect_payment_sent!(nodes[0], payment_preimage);
41674162

4168-
// ChannelManagers generally get re-serialized after any relevant event(s). Since we just
4169-
// connected a highly-relevant block, it likely gets serialized out now.
4163+
let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 };
4164+
let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap()
4165+
.get_mut(&funding_txo).unwrap().drain().collect();
4166+
assert_eq!(mon_updates.len(), 1);
4167+
assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
4168+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4169+
4170+
// If we persist the ChannelManager here, we should get the PaymentSent event after
4171+
// deserialization.
41704172
let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new());
4171-
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4173+
if !persist_manager_post_event {
4174+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4175+
}
4176+
4177+
// Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the
4178+
// payment sent event.
4179+
chanmon_cfgs[0].persister.set_update_ret(Ok(()));
4180+
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
4181+
get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap();
4182+
nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap();
4183+
expect_payment_sent!(nodes[0], payment_preimage);
4184+
4185+
// If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it
4186+
// twice.
4187+
if persist_manager_post_event {
4188+
nodes[0].node.write(&mut chan_manager_serialized).unwrap();
4189+
}
41724190

41734191
// Now reload nodes[0]...
41744192
persister = test_utils::TestPersister::new();
@@ -4200,6 +4218,12 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42004218
check_added_monitors!(nodes[0], 1);
42014219
nodes[0].node = &nodes_0_deserialized;
42024220

4221+
if persist_manager_post_event {
4222+
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
4223+
} else {
4224+
expect_payment_sent!(nodes[0], payment_preimage);
4225+
}
4226+
42034227
// Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but
42044228
// which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of
42054229
// payment events should kick in, leaving us with no pending events here.
@@ -4208,6 +4232,12 @@ fn test_dup_htlc_onchain_fails_on_reload() {
42084232
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
42094233
}
42104234

4235+
#[test]
4236+
fn test_dup_htlc_onchain_fails_on_reload() {
4237+
do_test_dup_htlc_onchain_fails_on_reload(true);
4238+
do_test_dup_htlc_onchain_fails_on_reload(false);
4239+
}
4240+
42114241
#[test]
42124242
fn test_manager_serialize_deserialize_events() {
42134243
// This test makes sure the events field in ChannelManager survives de/serialization

lightning/src/util/test_utils.rs

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,13 +163,16 @@ pub struct TestPersister {
163163
/// If this is set to Some(), after the next return, we'll always return this until update_ret
164164
/// is changed:
165165
pub next_update_ret: Mutex<Option<Result<(), chain::ChannelMonitorUpdateErr>>>,
166-
166+
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
167+
/// MonitorUpdateId here.
168+
pub chain_sync_monitor_persistences: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
167169
}
168170
impl TestPersister {
169171
pub fn new() -> Self {
170172
Self {
171173
update_ret: Mutex::new(Ok(())),
172174
next_update_ret: Mutex::new(None),
175+
chain_sync_monitor_persistences: Mutex::new(HashMap::new()),
173176
}
174177
}
175178

@@ -190,11 +193,14 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
190193
ret
191194
}
192195

193-
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
196+
fn update_persisted_channel(&self, funding_txo: OutPoint, update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
194197
let ret = self.update_ret.lock().unwrap().clone();
195198
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
196199
*self.update_ret.lock().unwrap() = next_ret;
197200
}
201+
if update.is_none() {
202+
self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id);
203+
}
198204
ret
199205
}
200206
}

0 commit comments

Comments
 (0)