Skip to content

Commit a4a5a3b

Browse files
author
Antoine Riard
committed
Add more comments about timelock assumptions and security model
Rename HTLC_FAIL_ANTI_REORG_DELAY to ANTI_REORG_DELAY because we are going to rely on it also to remove bump candidates outpoint from tracker after claim get enough depth. Rename HTLC_FAIL_TIMEOUT_BLOCKS to GRACE_PERIOD_BLOCKS because it's carrying more meaningfully that we are doing a favor to our peer instead of ruthlessly enforcing the contract. CLTV_EXPIRY_DELTA should be > to GRACE_PERIOD_BLOCKS + +CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY. When we reached height + GRACE_PERIOD_BLOCKS and we have pending unsolved outbound HTLC, we fail onchain with our local commitment tx. At this point we expect to get in chain in a worst-case delay of CLTV_CLAIM_BUFFER. When our HTLC-timeout is confirmed with ANTI_REORG_DELAY we may safely fail backward the corresponding inbound output.
1 parent 041b04c commit a4a5a3b

File tree

3 files changed

+59
-52
lines changed

3 files changed

+59
-52
lines changed

src/ln/channelmanager.rs

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ use secp256k1;
2828
use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
2929
use chain::transaction::OutPoint;
3030
use ln::channel::{Channel, ChannelError};
31-
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY};
31+
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
3232
use ln::router::Route;
3333
use ln::msgs;
3434
use ln::onion_utils;
@@ -351,20 +351,20 @@ pub struct ChannelManager {
351351
const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
352352
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
353353

354-
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS +
355-
// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within
356-
// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it
357-
// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel
358-
// on-chain to time out the HTLC.
354+
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + GRACE_PERIOD_BLOCKS,
355+
// ie that if the next-hop peer fails the HTLC within
356+
// GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
357+
// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
358+
// failing the corresponding htlc backward.
359359
#[deny(const_err)]
360360
#[allow(dead_code)]
361-
const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY;
361+
const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY;
362362

363363
// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
364364
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
365365
#[deny(const_err)]
366366
#[allow(dead_code)]
367-
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
367+
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
368368

369369
macro_rules! secp_call {
370370
( $res: expr, $err: expr ) => {
@@ -820,7 +820,7 @@ impl ChannelManager {
820820
let pending_forward_info = if next_hop_data.hmac == [0; 32] {
821821
// OUR PAYMENT!
822822
// final_expiry_too_soon
823-
if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
823+
if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + GRACE_PERIOD_BLOCKS) as u64 {
824824
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
825825
}
826826
// final_incorrect_htlc_amount
@@ -912,8 +912,8 @@ impl ChannelManager {
912912
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
913913
}
914914
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
915-
// We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
916-
if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
915+
// We want to have at least GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
916+
if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon
917917
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
918918
}
919919
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far

src/ln/channelmonitor.rs

Lines changed: 27 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -303,12 +303,19 @@ const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
303303
pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
304304
/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
305305
/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
306-
/// copies of ChannelMonitors, including watchtowers).
307-
pub(crate) const HTLC_FAIL_TIMEOUT_BLOCKS: u32 = 3;
308-
/// Number of blocks we wait on seeing a confirmed HTLC-Timeout or previous revoked commitment
309-
/// transaction before we fail corresponding inbound HTLCs. This prevents us from failing backwards
310-
/// and then getting a reorg resulting in us losing money.
311-
pub(crate) const HTLC_FAIL_ANTI_REORG_DELAY: u32 = 6;
306+
/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
307+
/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
308+
/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
309+
/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
310+
/// due to expiration but increase the cost of funds being locked longuer in case of failure.
311+
pub(crate) const GRACE_PERIOD_BLOCKS: u32 = 3;
312+
/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
313+
/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
314+
/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
315+
/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
316+
/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
317+
/// keeping bumping another claim tx to solve the outpoint.
318+
pub(crate) const ANTI_REORG_DELAY: u32 = 6;
312319

313320
#[derive(Clone, PartialEq)]
314321
enum Storage {
@@ -353,7 +360,7 @@ enum InputDescriptors {
353360
}
354361

355362
/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
356-
/// once they mature to enough confirmations (HTLC_FAIL_ANTI_REORG_DELAY)
363+
/// once they mature to enough confirmations (ANTI_REORG_DELAY)
357364
#[derive(Clone, PartialEq)]
358365
enum OnchainEvent {
359366
/// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
@@ -1298,8 +1305,8 @@ impl ChannelMonitor {
12981305
if let Some(ref outpoints) = self.remote_claimable_outpoints.get($txid) {
12991306
for &(ref htlc, ref source_option) in outpoints.iter() {
13001307
if let &Some(ref source) = source_option {
1301-
log_info!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
1302-
match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
1308+
log_info!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
1309+
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
13031310
hash_map::Entry::Occupied(mut entry) => {
13041311
let e = entry.get_mut();
13051312
e.retain(|ref event| {
@@ -1396,7 +1403,7 @@ impl ChannelMonitor {
13961403
}
13971404
}
13981405
log_trace!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of remote commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx);
1399-
match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
1406+
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
14001407
hash_map::Entry::Occupied(mut entry) => {
14011408
let e = entry.get_mut();
14021409
e.retain(|ref event| {
@@ -1788,8 +1795,8 @@ impl ChannelMonitor {
17881795

17891796
macro_rules! wait_threshold_conf {
17901797
($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => {
1791-
log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
1792-
match self.onchain_events_waiting_threshold_conf.entry($height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
1798+
log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
1799+
match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) {
17931800
hash_map::Entry::Occupied(mut entry) => {
17941801
let e = entry.get_mut();
17951802
e.retain(|ref event| {
@@ -2022,7 +2029,7 @@ impl ChannelMonitor {
20222029
}
20232030

20242031
fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash) {
2025-
if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + HTLC_FAIL_ANTI_REORG_DELAY - 1)) {
2032+
if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
20262033
//We may discard:
20272034
//- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
20282035
//- our claim tx on a commitment tx output
@@ -2059,16 +2066,16 @@ impl ChannelMonitor {
20592066
// from us until we've reached the point where we go on-chain with the
20602067
// corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
20612068
// least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
2062-
// aka outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS == height - CLTV_CLAIM_BUFFER
2069+
// aka outbound_cltv + GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
20632070
// inbound_cltv == height + CLTV_CLAIM_BUFFER
2064-
// outbound_cltv + HTLC_FAIL_TIMEOUT_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
2065-
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
2071+
// outbound_cltv + GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
2072+
// GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
20662073
// CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
2067-
// HTLC_FAIL_TIMEOUT_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
2074+
// GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
20682075
// The final, above, condition is checked for statically in channelmanager
20692076
// with CHECK_CLTV_EXPIRY_SANITY_2.
20702077
let htlc_outbound = $local_tx == htlc.offered;
2071-
if ( htlc_outbound && htlc.cltv_expiry + HTLC_FAIL_TIMEOUT_BLOCKS <= height) ||
2078+
if ( htlc_outbound && htlc.cltv_expiry + GRACE_PERIOD_BLOCKS <= height) ||
20722079
(!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
20732080
log_info!(self, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
20742081
return true;
@@ -2206,8 +2213,8 @@ impl ChannelMonitor {
22062213
payment_preimage.0.copy_from_slice(&input.witness[1]);
22072214
htlc_updated.push((source, Some(payment_preimage), payment_hash));
22082215
} else {
2209-
log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + HTLC_FAIL_ANTI_REORG_DELAY - 1);
2210-
match self.onchain_events_waiting_threshold_conf.entry(height + HTLC_FAIL_ANTI_REORG_DELAY - 1) {
2216+
log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
2217+
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
22112218
hash_map::Entry::Occupied(mut entry) => {
22122219
let e = entry.get_mut();
22132220
e.retain(|ref event| {

0 commit comments

Comments
 (0)