Skip to content

Commit be91587

Browse files
committed
Clean up PackageTemplate::get_height_timer to consider type
`PackageTemplate::get_height_timer` is used to decide when to next bump our feerate on claims which need to make it on chain within some window. It does so by comparing the current height with some deadline and increasing the bump rate as the deadline approaches. However, the deadline used is the `counterparty_spendable_height`, which is the height at which the counterparty might be able to spend the same output, irrespective of why. This doesn't make sense for all output types, for example outbound HTLCs are spendable by our counteraprty immediately (by revealing the preimage), but we don't need to get our HTLC timeout claims confirmed immedaitely, as we actually have `MIN_CLTV_EXPIRY` blocks before the inbound edge of a forwarded HTLC becomes claimable by our (other) counterparty. Thus, here, we adapt `get_height_timer` to look at the type of output being claimed, and adjust the rate at which we bump the fee according to the real deadline.
1 parent 303a0c9 commit be91587

File tree

4 files changed

+94
-34
lines changed

4 files changed

+94
-34
lines changed

lightning/src/chain/package.rs

Lines changed: 81 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ use crate::ln::types::PaymentPreimage;
2828
use crate::ln::chan_utils::{self, TxCreationKeys, HTLCOutputInCommitment};
2929
use crate::ln::features::ChannelTypeFeatures;
3030
use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
31+
use crate::ln::channelmanager::MIN_CLTV_EXPIRY_DELTA;
3132
use crate::ln::msgs::DecodeError;
3233
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT, compute_feerate_sat_per_1000_weight, FEERATE_FLOOR_SATS_PER_KW};
3334
use crate::chain::transaction::MaybeSignedTransaction;
@@ -104,8 +105,13 @@ pub(crate) fn verify_channel_type_features(channel_type_features: &Option<Channe
104105
// number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
105106
pub(crate) const WEIGHT_REVOKED_OUTPUT: u64 = 1 + 1 + 73 + 1 + 1 + 1 + 77;
106107

108+
#[cfg(not(test))]
107109
/// Height delay at which transactions are fee-bumped/rebroadcasted with a low priority.
108110
const LOW_FREQUENCY_BUMP_INTERVAL: u32 = 15;
111+
#[cfg(test)]
112+
/// Height delay at which transactions are fee-bumped/rebroadcasted with a low priority.
113+
pub(crate) const LOW_FREQUENCY_BUMP_INTERVAL: u32 = 15;
114+
109115
/// Height delay at which transactions are fee-bumped/rebroadcasted with a middle priority.
110116
const MIDDLE_FREQUENCY_BUMP_INTERVAL: u32 = 3;
111117
/// Height delay at which transactions are fee-bumped/rebroadcasted with a high priority.
@@ -945,18 +951,83 @@ impl PackageTemplate {
945951
return None;
946952
} else { panic!("API Error: Package must not be inputs empty"); }
947953
}
948-
/// In LN, output claimed are time-sensitive, which means we have to spend them before reaching some timelock expiration. At in-channel
949-
/// output detection, we generate a first version of a claim tx and associate to it a height timer. A height timer is an absolute block
950-
/// height that once reached we should generate a new bumped "version" of the claim tx to be sure that we safely claim outputs before
951-
/// that our counterparty can do so. If timelock expires soon, height timer is going to be scaled down in consequence to increase
952-
/// frequency of the bump and so increase our bets of success.
954+
/// Gets the next height at which we should fee-bump this package, assuming we can do so and
955+
/// the package is last fee-bumped at `current_height`.
956+
///
957+
/// As the deadline with which to get a claim confirmed approaches, the rate at which the timer
958+
/// ticks increases.
953959
pub(crate) fn get_height_timer(&self, current_height: u32) -> u32 {
954-
if self.soonest_conf_deadline <= current_height + MIDDLE_FREQUENCY_BUMP_INTERVAL {
955-
return current_height + HIGH_FREQUENCY_BUMP_INTERVAL
956-
} else if self.soonest_conf_deadline - current_height <= LOW_FREQUENCY_BUMP_INTERVAL {
957-
return current_height + MIDDLE_FREQUENCY_BUMP_INTERVAL
960+
let mut height_timer = current_height + LOW_FREQUENCY_BUMP_INTERVAL;
961+
let timer_for_target_conf = |target_conf| -> u32 {
962+
if target_conf <= current_height + MIDDLE_FREQUENCY_BUMP_INTERVAL {
963+
current_height + HIGH_FREQUENCY_BUMP_INTERVAL
964+
} else if target_conf <= current_height + LOW_FREQUENCY_BUMP_INTERVAL {
965+
current_height + MIDDLE_FREQUENCY_BUMP_INTERVAL
966+
} else {
967+
current_height + LOW_FREQUENCY_BUMP_INTERVAL
968+
}
969+
};
970+
for (_, input) in self.inputs.iter() {
971+
match input {
972+
PackageSolvingData::RevokedOutput(_) => {
973+
// Revoked Outputs will become spendable by our counterparty at the height
974+
// where the CSV expires, which is also our `soonest_conf_deadline`.
975+
height_timer = cmp::min(
976+
height_timer,
977+
timer_for_target_conf(self.soonest_conf_deadline),
978+
);
979+
},
980+
PackageSolvingData::RevokedHTLCOutput(_) => {
981+
// Revoked HTLC Outputs may be spendable by our counterparty right now, but
982+
// after they spend them they still have to wait for an additional CSV delta
983+
// before they can claim the full funds. Thus, we leave the timer at
984+
// `LOW_FREQUENCY_BUMP_INTERVAL` until the HTLC output is spent, creating a
985+
// `RevokedOutput`.
986+
},
987+
PackageSolvingData::CounterpartyOfferedHTLCOutput(outp) => {
988+
// Incoming HTLCs being claimed by preimage should be claimed by the time their
989+
// CLTV unlocks.
990+
height_timer = cmp::min(
991+
height_timer,
992+
timer_for_target_conf(outp.htlc.cltv_expiry),
993+
);
994+
},
995+
PackageSolvingData::HolderHTLCOutput(outp) if outp.preimage.is_some() => {
996+
// We have the same deadline here as for `CounterpartyOfferedHTLCOutput`. Note
997+
// that `outp.cltv_expiry` is always 0 in this case, but
998+
// `soonest_conf_deadline` holds the real HTLC expiry.
999+
height_timer = cmp::min(
1000+
height_timer,
1001+
timer_for_target_conf(self.soonest_conf_deadline),
1002+
);
1003+
},
1004+
PackageSolvingData::CounterpartyReceivedHTLCOutput(outp) => {
1005+
// Outgoing HTLCs being claimed through their timeout should be claimed fast
1006+
// enough to allow us to claim before the CLTV lock expires on the inbound
1007+
// edge (assuming the HTLC was forwarded).
1008+
height_timer = cmp::min(
1009+
height_timer,
1010+
timer_for_target_conf(outp.htlc.cltv_expiry + MIN_CLTV_EXPIRY_DELTA as u32),
1011+
);
1012+
},
1013+
PackageSolvingData::HolderHTLCOutput(outp) => {
1014+
// We have the same deadline for holder timeout claims as for
1015+
// `CounterpartyReceivedHTLCOutput`
1016+
height_timer = cmp::min(
1017+
height_timer,
1018+
timer_for_target_conf(outp.cltv_expiry + MIN_CLTV_EXPIRY_DELTA as u32),
1019+
);
1020+
},
1021+
PackageSolvingData::HolderFundingOutput(_) => {
1022+
// We should apply a smart heuristic here based on the HTLCs in the commitment
1023+
// transaction, but we don't currently have that information available so
1024+
// instead we just bump once per block.
1025+
height_timer =
1026+
cmp::min(height_timer, current_height + HIGH_FREQUENCY_BUMP_INTERVAL);
1027+
},
1028+
}
9581029
}
959-
current_height + LOW_FREQUENCY_BUMP_INTERVAL
1030+
height_timer
9601031
}
9611032

9621033
/// Returns value in satoshis to be included as package outgoing output amount and feerate

lightning/src/ln/functional_tests.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2811,7 +2811,7 @@ fn claim_htlc_outputs_single_tx() {
28112811

28122812
// Filter out any non justice transactions.
28132813
node_txn.retain(|tx| tx.input[0].previous_output.txid == revoked_local_txn[0].compute_txid());
2814-
assert!(node_txn.len() > 3);
2814+
assert!(node_txn.len() >= 3);
28152815

28162816
assert_eq!(node_txn[0].input.len(), 1);
28172817
assert_eq!(node_txn[1].input.len(), 1);
@@ -7805,7 +7805,7 @@ fn test_bump_penalty_txn_on_remote_commitment() {
78057805
assert_ne!(feerate_preimage, 0);
78067806

78077807
// After exhaustion of height timer, new bumped claim txn should have been broadcast, check it
7808-
connect_blocks(&nodes[1], 1);
7808+
connect_blocks(&nodes[1], crate::chain::package::LOW_FREQUENCY_BUMP_INTERVAL);
78097809
{
78107810
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
78117811
assert_eq!(node_txn.len(), 1);

lightning/src/ln/monitor_tests.rs

Lines changed: 8 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2299,17 +2299,13 @@ fn do_test_restored_packages_retry(check_old_monitor_retries_after_upgrade: bool
22992299
}
23002300

23012301
// Connecting more blocks should result in the HTLC transactions being rebroadcast.
2302-
connect_blocks(&nodes[0], 6);
2302+
connect_blocks(&nodes[0], crate::chain::package::LOW_FREQUENCY_BUMP_INTERVAL);
23032303
if check_old_monitor_retries_after_upgrade {
23042304
check_added_monitors(&nodes[0], 1);
23052305
}
23062306
{
23072307
let txn = nodes[0].tx_broadcaster.txn_broadcast();
2308-
if !nodes[0].connect_style.borrow().skips_blocks() {
2309-
assert_eq!(txn.len(), 6);
2310-
} else {
2311-
assert!(txn.len() < 6);
2312-
}
2308+
assert_eq!(txn.len(), 1);
23132309
for tx in txn {
23142310
assert_eq!(tx.input.len(), htlc_timeout_tx.input.len());
23152311
assert_eq!(tx.output.len(), htlc_timeout_tx.output.len());
@@ -2423,9 +2419,9 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) {
24232419
connect_blocks(&nodes[0], 1);
24242420
check_htlc_retry(true, false);
24252421

2426-
// Connect one more block, expecting a retry with a fee bump. Unfortunately, we cannot bump HTLC
2427-
// transactions pre-anchors.
2428-
connect_blocks(&nodes[0], 1);
2422+
// Connect a few more blocks, expecting a retry with a fee bump. Unfortunately, we cannot bump
2423+
// HTLC transactions pre-anchors.
2424+
connect_blocks(&nodes[0], crate::chain::package::LOW_FREQUENCY_BUMP_INTERVAL);
24292425
check_htlc_retry(true, anchors);
24302426

24312427
// Trigger a call and we should have another retry, but without a bump.
@@ -2437,20 +2433,13 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) {
24372433
nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims();
24382434
check_htlc_retry(true, anchors);
24392435

2440-
// Connect one more block, expecting a retry with a fee bump. Unfortunately, we cannot bump HTLC
2441-
// transactions pre-anchors.
2442-
connect_blocks(&nodes[0], 1);
2436+
// Connect a few more blocks, expecting a retry with a fee bump. Unfortunately, we cannot bump
2437+
// HTLC transactions pre-anchors.
2438+
connect_blocks(&nodes[0], crate::chain::package::LOW_FREQUENCY_BUMP_INTERVAL);
24432439
let htlc_tx = check_htlc_retry(true, anchors).unwrap();
24442440

24452441
// Mine the HTLC transaction to ensure we don't retry claims while they're confirmed.
24462442
mine_transaction(&nodes[0], &htlc_tx);
2447-
// If we have a `ConnectStyle` that advertises the new block first without the transactions,
2448-
// we'll receive an extra bumped claim.
2449-
if nodes[0].connect_style.borrow().updates_best_block_first() {
2450-
nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value);
2451-
nodes[0].wallet_source.remove_utxo(bitcoin::OutPoint { txid: htlc_tx.compute_txid(), vout: 1 });
2452-
check_htlc_retry(true, anchors);
2453-
}
24542443
nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims();
24552444
check_htlc_retry(false, false);
24562445
}

lightning/src/ln/reorg_tests.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -847,9 +847,9 @@ fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_c
847847
{
848848
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
849849
if nodes[0].connect_style.borrow().updates_best_block_first() {
850-
// `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
851-
// updated prior to seeing `commitment_b`.
852-
assert_eq!(txn.len(), if anchors { 2 } else { 3 });
850+
// `commitment_a` is rebroadcast because the best block was updated prior to seeing
851+
// `commitment_b`.
852+
assert_eq!(txn.len(), 2);
853853
check_spends!(txn.last().unwrap(), commitment_b);
854854
} else {
855855
assert_eq!(txn.len(), 1);

0 commit comments

Comments
 (0)