Skip to content

Commit 29a780e

Browse files
committed
Delay broadcast of PackageTemplate packages until their locktime
This stores transaction templates temporarily until their locktime is reached, avoiding broadcasting (or RBF bumping) transactions prior to their locktime. For those broadcasting transactions (potentially indirectly) via Bitcoin Core RPC, this ensures no automated rebroadcast of transactions on the client side is required to get transactions confirmed.
1 parent 79ffc5e commit 29a780e

File tree

4 files changed

+311
-210
lines changed

4 files changed

+311
-210
lines changed

fuzz/src/full_stack.rs

Lines changed: 1 addition & 2 deletions
Large diffs are not rendered by default.

lightning/src/chain/onchaintx.rs

Lines changed: 55 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ use util::ser::{Readable, ReadableArgs, Writer, Writeable, VecWriter};
3333
use util::byte_utils;
3434

3535
use prelude::*;
36+
use alloc::collections::BTreeMap;
3637
use std::collections::HashMap;
3738
use core::cmp;
3839
use core::ops::Deref;
@@ -165,8 +166,9 @@ pub struct OnchainTxHandler<ChannelSigner: Sign> {
165166
#[cfg(not(test))]
166167
claimable_outpoints: HashMap<BitcoinOutPoint, (Txid, u32)>,
167168

168-
onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
169+
locktimed_packages: BTreeMap<u32, Vec<PackageTemplate>>,
169170

171+
onchain_events_awaiting_threshold_conf: Vec<OnchainEventEntry>,
170172

171173
pub(super) secp_ctx: Secp256k1<secp256k1::All>,
172174
}
@@ -206,6 +208,15 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
206208
claim_and_height.1.write(writer)?;
207209
}
208210

211+
writer.write_all(&byte_utils::be64_to_array(self.locktimed_packages.len() as u64))?;
212+
for (ref locktime, ref packages) in self.locktimed_packages.iter() {
213+
locktime.write(writer)?;
214+
writer.write_all(&byte_utils::be64_to_array(packages.len() as u64))?;
215+
for ref package in packages.iter() {
216+
package.write(writer)?;
217+
}
218+
}
219+
209220
writer.write_all(&byte_utils::be64_to_array(self.onchain_events_awaiting_threshold_conf.len() as u64))?;
210221
for ref entry in self.onchain_events_awaiting_threshold_conf.iter() {
211222
entry.txid.write(writer)?;
@@ -265,6 +276,19 @@ impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
265276
let height = Readable::read(reader)?;
266277
claimable_outpoints.insert(outpoint, (ancestor_claim_txid, height));
267278
}
279+
280+
let locktimed_packages_len: u64 = Readable::read(reader)?;
281+
let mut locktimed_packages = BTreeMap::new();
282+
for _ in 0..locktimed_packages_len {
283+
let locktime = Readable::read(reader)?;
284+
let packages_len: u64 = Readable::read(reader)?;
285+
let mut packages = Vec::with_capacity(cmp::min(packages_len as usize, MAX_ALLOC_SIZE / std::mem::size_of::<PackageTemplate>()));
286+
for _ in 0..packages_len {
287+
packages.push(Readable::read(reader)?);
288+
}
289+
locktimed_packages.insert(locktime, packages);
290+
}
291+
268292
let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
269293
let mut onchain_events_awaiting_threshold_conf = Vec::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
270294
for _ in 0..waiting_threshold_conf_len {
@@ -302,6 +326,7 @@ impl<'a, K: KeysInterface> ReadableArgs<&'a K> for OnchainTxHandler<K::Signer> {
302326
signer,
303327
channel_transaction_parameters: channel_parameters,
304328
claimable_outpoints,
329+
locktimed_packages,
305330
pending_claim_requests,
306331
onchain_events_awaiting_threshold_conf,
307332
secp_ctx,
@@ -321,6 +346,7 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
321346
channel_transaction_parameters: channel_parameters,
322347
pending_claim_requests: HashMap::new(),
323348
claimable_outpoints: HashMap::new(),
349+
locktimed_packages: BTreeMap::new(),
324350
onchain_events_awaiting_threshold_conf: Vec::new(),
325351

326352
secp_ctx,
@@ -378,7 +404,26 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
378404
// <= CLTV_SHARED_CLAIM_BUFFER) and they don't require an immediate nLockTime (aggregable).
379405
for req in requests {
380406
// Don't claim a outpoint twice that would be bad for privacy and may uselessly lock a CPFP input for a while
381-
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) { log_trace!(logger, "Bouncing off outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout); } else {
407+
if let Some(_) = self.claimable_outpoints.get(req.outpoints()[0]) {
408+
log_trace!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", req.outpoints()[0].txid, req.outpoints()[0].vout);
409+
} else {
410+
let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten()
411+
.find(|locked_package| locked_package.outpoints() == req.outpoints());
412+
if let Some(package) = timelocked_equivalent_package {
413+
log_trace!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.",
414+
req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_timelock());
415+
continue;
416+
}
417+
418+
if req.package_timelock() > height + 1 {
419+
log_debug!(logger, "Delaying claim of package until its timelock at {} (current height {}), the following outpoints are spent:", req.package_timelock(), height);
420+
for outpoint in req.outpoints() {
421+
log_debug!(logger, " Outpoint {}", outpoint);
422+
}
423+
self.locktimed_packages.entry(req.package_timelock()).or_insert(Vec::new()).push(req);
424+
continue;
425+
}
426+
382427
log_trace!(logger, "Test if outpoint can be aggregated with expiration {} against {}", req.timelock(), height + CLTV_SHARED_CLAIM_BUFFER);
383428
if req.timelock() <= height + CLTV_SHARED_CLAIM_BUFFER || !req.aggregable() {
384429
// Don't aggregate if outpoint package timelock is soon or marked as non-aggregable
@@ -394,6 +439,14 @@ impl<ChannelSigner: Sign> OnchainTxHandler<ChannelSigner> {
394439
preprocessed_requests.push(req);
395440
}
396441

442+
// Claim everything up to and including height + 1
443+
let remaining_locked_packages = self.locktimed_packages.split_off(&(height + 2));
444+
for (pop_height, mut entry) in self.locktimed_packages.iter_mut() {
445+
log_trace!(logger, "Restoring delayed claim of package(s) at their timelock at {}.", pop_height);
446+
preprocessed_requests.append(&mut entry);
447+
}
448+
self.locktimed_packages = remaining_locked_packages;
449+
397450
// Generate claim transactions and track them to bump if necessary at
398451
// height timer expiration (i.e in how many blocks we're going to take action).
399452
for mut req in preprocessed_requests {

0 commit comments

Comments
 (0)