Skip to content

Commit 963f002

Browse files
author
Antoine Riard
committed
Add more comments about timelock assumptions and security model
Rename HTLC_FAIL_ANTI_REORG_DELAY to ANTI_REORG_DELAY because we are going to rely on it also to remove bump candidates outpoint from tracker after claim get enough depth. Rename HTLC_FAIL_TIMEOUT_BLOCKS to LATENCY_GRACE_PERIOD_BLOCKS because it's carrying more meaningfully that we are doing a favor to our peer instead of ruthlessly enforcing the contract. CLTV_EXPIRY_DELTA should be > to LATENCY_GRACE_PERIOD_BLOCKS + +CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS When we reached height + LATENCY_GRACE_PERIOD_BLOCKS and we have pending unsolved outbound HTLC, we fail onchain with our local commitment tx. At this point we expect to get in chain in a worst-case delay of CLTV_CLAIM_BUFFER. When our HTLC-timeout is confirmed with ANTI_REORG_DELAY we may safely fail backward the corresponding inbound output.
1 parent 041b04c commit 963f002

File tree

3 files changed

+65
-52
lines changed

3 files changed

+65
-52
lines changed

src/ln/channelmanager.rs

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ use secp256k1;
2828
use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
2929
use chain::transaction::OutPoint;
3030
use ln::channel::{Channel, ChannelError};
31-
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY};
31+
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
3232
use ln::router::Route;
3333
use ln::msgs;
3434
use ln::onion_utils;
@@ -351,20 +351,21 @@ pub struct ChannelManager {
351351
const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
352352
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
353353

354-
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS +
355-
// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within
356-
// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it
357-
// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel
358-
// on-chain to time out the HTLC.
354+
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
355+
// ie that if the next-hop peer fails the HTLC within
356+
// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
357+
// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
358+
// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
359+
// LATENCY_GRACE_PERIOD_BLOCKS.
359360
#[deny(const_err)]
360361
#[allow(dead_code)]
361-
const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY;
362+
const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
362363

363364
// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
364365
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
365366
#[deny(const_err)]
366367
#[allow(dead_code)]
367-
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
368+
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
368369

369370
macro_rules! secp_call {
370371
( $res: expr, $err: expr ) => {
@@ -820,7 +821,7 @@ impl ChannelManager {
820821
let pending_forward_info = if next_hop_data.hmac == [0; 32] {
821822
// OUR PAYMENT!
822823
// final_expiry_too_soon
823-
if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
824+
if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
824825
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
825826
}
826827
// final_incorrect_htlc_amount
@@ -912,8 +913,8 @@ impl ChannelManager {
912913
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
913914
}
914915
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
915-
// We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
916-
if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
916+
// We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
917+
if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon
917918
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
918919
}
919920
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far

src/ln/channelmonitor.rs

Lines changed: 32 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -303,12 +303,24 @@ const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
303303
pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
304304
/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
305305
/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
306-
/// copies of ChannelMonitors, including watchtowers).
307-
pub(crate) const HTLC_FAIL_TIMEOUT_BLOCKS: u32 = 3;
308-
/// Number of blocks we wait on seeing a confirmed HTLC-Timeout or previous revoked commitment
309-
/// transaction before we fail corresponding inbound HTLCs. This prevents us from failing backwards
310-
/// and then getting a reorg resulting in us losing money.
311-
pub(crate) const HTLC_FAIL_ANTI_REORG_DELAY: u32 = 6;
306+
/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
307+
/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
308+
/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
309+
/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
310+
/// due to expiration but increase the cost of funds being locked longuer in case of failure.
311+
/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
312+
/// accurate block height.
313+
/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
314+
/// with at worst this delay, so we are not only using this value as a mercy for them but also
315+
/// us as a safeguard to delay with enough time.
316+
pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
317+
/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
318+
/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
319+
/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
320+
/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
321+
/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
322+
/// keeping bumping another claim tx to solve the outpoint.
323+
pub(crate) const ANTI_REORG_DELAY: u32 = 6;
312324

313325
#[derive(Clone, PartialEq)]
314326
enum Storage {
@@ -353,7 +365,7 @@ enum InputDescriptors {
353365
}
354366

355367
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
356-
/// once they mature to enough confirmations (HTLC_FAIL_ANTI_REORG_DELAY)
368+
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
357369
#[derive(Clone, PartialEq)]
358370
enum OnchainEvent {
359371
/// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
@@ -1298,8 +1310,8 @@ impl ChannelMonitor {
12981310
if let Some(ref outpoints) = self.remote_claimable_outpoints.get($txid) {
12991311
for &(ref htlc, ref source_option) in outpoints.iter() {
13001312
if let &Some(ref source) = source_option {
1301-
log_info!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
1302-
match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
1313+
log_info!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
1314+
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
13031315
hash_map::Entry::Occupied(mut entry) => {
13041316
let e = entry.get_mut();
13051317
e.retain(|ref event| {
@@ -1396,7 +1408,7 @@ impl ChannelMonitor {
13961408
}
13971409
}
13981410
log_trace!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of remote commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx);
1399-
match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
1411+
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
14001412
hash_map::Entry::Occupied(mut entry) => {
14011413
let e = entry.get_mut();
14021414
e.retain(|ref event| {
@@ -1788,8 +1800,8 @@ impl ChannelMonitor {
17881800

17891801
macro_rules! wait_threshold_conf {
17901802
($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => {
1791-
log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
1792-
match self.onchain_events_waiting_threshold_conf.entry($height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
1803+
log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
1804+
match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) {
17931805
hash_map::Entry::Occupied(mut entry) => {
17941806
let e = entry.get_mut();
17951807
e.retain(|ref event| {
@@ -2022,7 +2034,7 @@ impl ChannelMonitor {
20222034
}
20232035

20242036
fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash) {
2025-
if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + HTLC_FAIL_ANTI_REORG_DELAY - 1)) {
2037+
if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
20262038
//We may discard:
20272039
//- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
20282040
//- our claim tx on a commitment tx output
@@ -2059,16 +2071,16 @@ impl ChannelMonitor {
20592071
// from us until we've reached the point where we go on-chain with the
20602072
// corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
20612073
// least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
2062-
// aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER
2074+
// aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
20632075
// inbound_cltv == height + CLTV_CLAIM_BUFFER
2064-
// outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
2065-
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
2076+
// outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
2077+
// LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
20662078
// CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
2067-
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
2079+
// LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
20682080
// The final, above, condition is checked for statically in channelmanager
20692081
// with CHECK_CLTV_EXPIRY_SANITY_2.
20702082
let htlc_outbound = $local_tx == htlc.offered;
2071-
if ( htlc_outbound && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) ||
2083+
if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
20722084
(!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
20732085
log_info!(self, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
20742086
return true;
@@ -2206,8 +2218,8 @@ impl ChannelMonitor {
22062218
payment_preimage.0.copy_from_slice(&input.witness[1]);
22072219
htlc_updated.push((source, Some(payment_preimage), payment_hash));
22082220
} else {
2209-
log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
2210-
match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
2221+
log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
2222+
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
22112223
hash_map::Entry::Occupied(mut entry) => {
22122224
let e = entry.get_mut();
22132225
e.retain(|ref event| {

0 commit comments

Comments
 (0)