Skip to content

Commit 6c864b8

Browse files
committed
Avoid generating redundant claims after initial confirmation
These claims will never be valid as a previous claim has already confirmed. If a previous claim is reorged out of the chain, a new claim will be generated bypassing the new behavior. While this doesn't change much for our existing transaction-based claims, as broadcasting an already confirmed transaction acts as a NOP, it prevents us from yielding redundant event-based claims, which will be introduced as part of the anchors patchset.
1 parent 7544030 commit 6c864b8

File tree

2 files changed

+36
-35
lines changed

2 files changed

+36
-35
lines changed

lightning/src/chain/onchaintx.rs

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,42 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
382382
where F::Target: FeeEstimator,
383383
L::Target: Logger,
384384
{
385-
if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
385+
let request_outpoints = cached_request.outpoints();
386+
if request_outpoints.is_empty() {
387+
// Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable
388+
// packages cannot be aggregated and will never be split, so we cannot end up with an
389+
// empty claim.
390+
debug_assert!(cached_request.is_malleable());
391+
return None;
392+
}
393+
// If we've seen transaction inclusion in the chain for our request, we don't need to
394+
// continue generating more claims. We'll keep tracking the request to fully remove it once
395+
// it reaches the confirmation threshold, or to generate a new claim if the transaction is
396+
// reorged out.
397+
if self.onchain_events_awaiting_threshold_conf.iter()
398+
.any(|event_entry| if let OnchainEvent::Claim { claim_request } = event_entry.event {
399+
for outpoint in &request_outpoints {
400+
if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) {
401+
if first_claim_txid_height.0 != claim_request {
402+
// The request's outpoint spend does not correspond to the current claim event.
403+
return false;
404+
}
405+
// There is a confirmed claim referencing this request's outpoint.
406+
} else {
407+
// The request's outpoint spend does not exist yet.
408+
return false;
409+
}
410+
}
411+
// All of the request's outpoints were spent by the claim event.
412+
true
413+
} else {
414+
// The onchain event is not a claim. Keep seeking until we have a request containing
415+
// all outpoints.
416+
false
417+
})
418+
{
419+
return None;
420+
}
386421

387422
// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
388423
// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).

lightning/src/ln/functional_tests.rs

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -2951,26 +2951,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
29512951
mine_transaction(&nodes[1], &timeout_tx);
29522952
check_added_monitors!(nodes[1], 1);
29532953
check_closed_broadcast!(nodes[1], true);
2954-
{
2955-
// B will rebroadcast a fee-bumped timeout transaction here.
2956-
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2957-
assert_eq!(node_txn.len(), 1);
2958-
check_spends!(node_txn[0], commitment_tx[0]);
2959-
}
29602954

29612955
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
2962-
{
2963-
// B may rebroadcast its own holder commitment transaction here, as a safeguard against
2964-
// some incredibly unlikely partial-eclipse-attack scenarios. That said, because the
2965-
// original commitment_tx[0] (also spending chan_2.3) has reached ANTI_REORG_DELAY B really
2966-
// shouldn't broadcast anything here, and in some connect style scenarios we do not.
2967-
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
2968-
if node_txn.len() == 1 {
2969-
check_spends!(node_txn[0], chan_2.3);
2970-
} else {
2971-
assert_eq!(node_txn.len(), 0);
2972-
}
2973-
}
29742956

29752957
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
29762958
check_added_monitors!(nodes[1], 1);
@@ -8001,22 +7983,6 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
80017983
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
80027984
let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
80037985
connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
8004-
{
8005-
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
8006-
assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx
8007-
8008-
check_spends!(node_txn[0], revoked_local_txn[0]);
8009-
check_spends!(node_txn[1], revoked_local_txn[0]);
8010-
// Note that these are both bogus - they spend outputs already claimed in block 129:
8011-
if node_txn[0].input[0].previous_output == revoked_htlc_txn[0].input[0].previous_output {
8012-
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8013-
} else {
8014-
assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
8015-
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
8016-
}
8017-
8018-
node_txn.clear();
8019-
};
80207986

80217987
// Few more blocks to confirm penalty txn
80227988
connect_blocks(&nodes[0], 4);

0 commit comments

Comments
 (0)