Skip to content

Avoid generating redundant claims after initial confirmation #1753

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 37 additions & 1 deletion lightning/src/chain/onchaintx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,43 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
where F::Target: FeeEstimator,
L::Target: Logger,
{
if cached_request.outpoints().len() == 0 { return None } // But don't prune pending claiming request yet, we may have to resurrect HTLCs
let request_outpoints = cached_request.outpoints();
if request_outpoints.is_empty() {
// Don't prune pending claiming request yet, we may have to resurrect HTLCs. Untractable
// packages cannot be aggregated and will never be split, so we cannot end up with an
// empty claim.
debug_assert!(cached_request.is_malleable());
return None;
}
// If we've seen transaction inclusion in the chain for all outpoints in our request, we
// don't need to continue generating more claims. We'll keep tracking the request to fully
// remove it once it reaches the confirmation threshold, or to generate a new claim if the
// transaction is reorged out.
let mut all_inputs_have_confirmed_spend = true;
for outpoint in &request_outpoints {
if let Some(first_claim_txid_height) = self.claimable_outpoints.get(outpoint) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

btw, this variable name is a bit confusing, because it sounds like it's the height of the txid, but it's actually the (txid, height) tuple

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that said, I don't have any better ideas ¯_(ツ)_/¯

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Its copied from another of other places, so if we want to change it ISTM we should do it broadly through the file and in a followup.

// We check for outpoint spends within claims individually rather than as a set
// since requests can have outpoints split off.
if !self.onchain_events_awaiting_threshold_conf.iter()
.any(|event_entry| if let OnchainEvent::Claim { claim_request } = event_entry.event {
first_claim_txid_height.0 == claim_request
} else {
// The onchain event is not a claim, keep seeking until we find one.
false
})
{
// Either we had no `OnchainEvent::Claim`, or we did but none matched the
// outpoint's registered spend.
all_inputs_have_confirmed_spend = false;
}
} else {
// The request's outpoint spend does not exist yet.
all_inputs_have_confirmed_spend = false;
}
}
if all_inputs_have_confirmed_spend {
return None;
}

// Compute new height timer to decide when we need to regenerate a new bumped version of the claim tx (if we
// didn't receive confirmation of it before, or not enough reorg-safe depth on top of it).
Expand Down
34 changes: 0 additions & 34 deletions lightning/src/ln/functional_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2951,26 +2951,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) {
mine_transaction(&nodes[1], &timeout_tx);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
{
// B will rebroadcast a fee-bumped timeout transaction here.
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], commitment_tx[0]);
}

connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
{
// B may rebroadcast its own holder commitment transaction here, as a safeguard against
// some incredibly unlikely partial-eclipse-attack scenarios. That said, because the
// original commitment_tx[0] (also spending chan_2.3) has reached ANTI_REORG_DELAY B really
// shouldn't broadcast anything here, and in some connect style scenarios we do not.
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
if node_txn.len() == 1 {
check_spends!(node_txn[0], chan_2.3);
} else {
assert_eq!(node_txn.len(), 0);
}
}

expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
Expand Down Expand Up @@ -8001,22 +7983,6 @@ fn test_bump_penalty_txn_on_revoked_htlcs() {
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn });
let header_131 = BlockHeader { version: 0x20000000, prev_blockhash: header_130.block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_131, txdata: Vec::new() });
{
let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2); // 2 bumped penalty txn on revoked commitment tx

check_spends!(node_txn[0], revoked_local_txn[0]);
check_spends!(node_txn[1], revoked_local_txn[0]);
// Note that these are both bogus - they spend outputs already claimed in block 129:
if node_txn[0].input[0].previous_output == revoked_htlc_txn[0].input[0].previous_output {
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
} else {
assert_eq!(node_txn[0].input[0].previous_output, revoked_htlc_txn[2].input[0].previous_output);
assert_eq!(node_txn[1].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
}

node_txn.clear();
};

// Few more blocks to confirm penalty txn
connect_blocks(&nodes[0], 4);
Expand Down