Skip to content

Commit 03b5da1

Browse files
committed
Broadcast final local txn via ChannelMonitorUpdate
1 parent 82d40ee commit 03b5da1

File tree

8 files changed

+201
-116
lines changed

8 files changed

+201
-116
lines changed

fuzz/src/chanmon_consistency.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ impl channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMon
116116
};
117117
let mut deserialized_monitor = <(Sha256d, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
118118
read(&mut Cursor::new(&map_entry.get().1), Arc::clone(&self.logger)).unwrap().1;
119-
deserialized_monitor.update_monitor(update.clone()).unwrap();
119+
deserialized_monitor.update_monitor(update.clone(), &&TestBroadcaster {}).unwrap();
120120
let mut ser = VecWriter(Vec::new());
121121
deserialized_monitor.write_for_disk(&mut ser).unwrap();
122122
map_entry.insert((update.update_id, ser.0));

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ fn test_simple_monitor_permanent_update_fail() {
3131

3232
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
3333
if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
34-
check_added_monitors!(nodes[0], 1);
34+
check_added_monitors!(nodes[0], 2);
3535

3636
let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
3737
assert_eq!(events_1.len(), 2);
@@ -120,7 +120,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
120120

121121
// ...and make sure we can force-close a frozen channel
122122
nodes[0].node.force_close_channel(&channel_id);
123-
check_added_monitors!(nodes[0], 0);
123+
check_added_monitors!(nodes[0], 1);
124124
check_closed_broadcast!(nodes[0], false);
125125

126126
// TODO: Once we hit the chain with the failure transaction we should check that we get a

lightning/src/ln/channel.rs

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3774,7 +3774,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
37743774
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
37753775
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
37763776
/// immediately (others we will have to allow to time out).
3777-
pub fn force_shutdown(&mut self) -> (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>) {
3777+
pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>) {
37783778
assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
37793779

37803780
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
@@ -3797,12 +3797,11 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
37973797

37983798
self.channel_state = ChannelState::ShutdownComplete as u32;
37993799
self.update_time_counter += 1;
3800-
if self.channel_monitor.is_some() {
3801-
(self.channel_monitor.as_mut().unwrap().get_latest_local_commitment_txn(), dropped_outbound_htlcs)
3802-
} else {
3803-
// We aren't even signed funding yet, so can't broadcast anything
3804-
(Vec::new(), dropped_outbound_htlcs)
3805-
}
3800+
self.latest_monitor_update_id += 1;
3801+
(self.funding_txo.clone(), ChannelMonitorUpdate {
3802+
update_id: self.latest_monitor_update_id,
3803+
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3804+
}, dropped_outbound_htlcs)
38063805
}
38073806
}
38083807

lightning/src/ln/channelmanager.rs

Lines changed: 33 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ use secp256k1;
2828
use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
2929
use chain::transaction::OutPoint;
3030
use ln::channel::{Channel, ChannelError};
31-
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
31+
use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
3232
use ln::features::{InitFeatures, NodeFeatures};
3333
use ln::router::Route;
3434
use ln::msgs;
@@ -152,7 +152,7 @@ pub struct PaymentHash(pub [u8;32]);
152152
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
153153
pub struct PaymentPreimage(pub [u8;32]);
154154

155-
type ShutdownResult = (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>);
155+
type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
156156

157157
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
158158
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -502,8 +502,7 @@ macro_rules! break_chan_entry {
502502
if let Some(short_id) = chan.get_short_channel_id() {
503503
$channel_state.short_to_id.remove(&short_id);
504504
}
505-
break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
506-
},
505+
break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) },
507506
Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
508507
}
509508
}
@@ -522,7 +521,7 @@ macro_rules! try_chan_entry {
522521
if let Some(short_id) = chan.get_short_channel_id() {
523522
$channel_state.short_to_id.remove(&short_id);
524523
}
525-
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
524+
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
526525
},
527526
Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
528527
log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
@@ -540,11 +539,7 @@ macro_rules! try_chan_entry {
540539
ChannelMonitorUpdateErr::TemporaryFailure => {},
541540
}
542541
}
543-
let mut shutdown_res = chan.force_shutdown();
544-
if shutdown_res.0.len() >= 1 {
545-
log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
546-
}
547-
shutdown_res.0.clear();
542+
let shutdown_res = chan.force_shutdown(false);
548543
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
549544
}
550545
}
@@ -572,7 +567,7 @@ macro_rules! handle_monitor_err {
572567
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
573568
// given up the preimage yet, so might as well just wait until the payment is
574569
// retried, avoiding the on-chain fees.
575-
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
570+
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
576571
res
577572
},
578573
ChannelMonitorUpdateErr::TemporaryFailure => {
@@ -820,14 +815,17 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
820815

821816
#[inline]
822817
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
823-
let (local_txn, mut failed_htlcs) = shutdown_res;
824-
log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
818+
let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
819+
log_trace!(self, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
825820
for htlc_source in failed_htlcs.drain(..) {
826821
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
827822
}
828-
for tx in local_txn {
829-
log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
830-
self.tx_broadcaster.broadcast_transaction(&tx);
823+
if let Some(funding_txo) = funding_txo_option {
824+
// There isn't anything we can do if we get an update failure - we're already
825+
// force-closing. The monitor update on the required in-memory copy should broadcast
826+
// the latest local state, which is the best we can do anyway. Thus, it is safe to
827+
// ignore the result here.
828+
let _ = self.monitor.update_monitor(funding_txo, monitor_update);
831829
}
832830
}
833831

@@ -849,7 +847,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
849847
}
850848
};
851849
log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..]));
852-
self.finish_force_close_channel(chan.force_shutdown());
850+
self.finish_force_close_channel(chan.force_shutdown(true));
853851
if let Ok(update) = self.get_channel_update(&chan) {
854852
let mut channel_state = self.channel_state.lock().unwrap();
855853
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1268,7 +1266,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
12681266
Some(mut chan) => {
12691267
(chan.get_outbound_funding_created(funding_txo)
12701268
.map_err(|e| if let ChannelError::Close(msg) = e {
1271-
MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None)
1269+
MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
12721270
} else { unreachable!(); })
12731271
, chan)
12741272
},
@@ -1288,7 +1286,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
12881286
ChannelMonitorUpdateErr::PermanentFailure => {
12891287
{
12901288
let mut channel_state = self.channel_state.lock().unwrap();
1291-
match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) {
1289+
match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(true), None)), chan.get_their_node_id(), channel_state) {
12921290
Err(_) => { return; },
12931291
Ok(()) => unreachable!(),
12941292
}
@@ -1518,7 +1516,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
15181516
if let Some(short_id) = channel.get_short_channel_id() {
15191517
channel_state.short_to_id.remove(&short_id);
15201518
}
1521-
Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok()))
1519+
Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
15221520
},
15231521
ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
15241522
};
@@ -2021,7 +2019,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref> ChannelMan
20212019
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
20222020
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
20232021
// any messages referencing a previously-closed channel anyway.
2024-
return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
2022+
return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None));
20252023
},
20262024
ChannelMonitorUpdateErr::TemporaryFailure => {
20272025
// There's no problem signing a counterparty's funding transaction if our monitor
@@ -2741,7 +2739,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
27412739
// It looks like our counterparty went on-chain. We go ahead and
27422740
// broadcast our latest local state as well here, just in case its
27432741
// some kind of SPV attack, though we expect these to be dropped.
2744-
failed_channels.push(channel.force_shutdown());
2742+
failed_channels.push(channel.force_shutdown(true));
27452743
if let Ok(update) = self.get_channel_update(&channel) {
27462744
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
27472745
msg: update
@@ -2756,11 +2754,10 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
27562754
if let Some(short_id) = channel.get_short_channel_id() {
27572755
short_to_id.remove(&short_id);
27582756
}
2759-
failed_channels.push(channel.force_shutdown());
27602757
// If would_broadcast_at_height() is true, the channel_monitor will broadcast
27612758
// the latest local tx for us, so we should skip that here (it doesn't really
27622759
// hurt anything, but does make tests a bit simpler).
2763-
failed_channels.last_mut().unwrap().0 = Vec::new();
2760+
failed_channels.push(channel.force_shutdown(false));
27642761
if let Ok(update) = self.get_channel_update(&channel) {
27652762
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
27662763
msg: update
@@ -2804,7 +2801,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
28042801
if let Some(short_id) = v.get_short_channel_id() {
28052802
short_to_id.remove(&short_id);
28062803
}
2807-
failed_channels.push(v.force_shutdown());
2804+
failed_channels.push(v.force_shutdown(true));
28082805
if let Ok(update) = self.get_channel_update(&v) {
28092806
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
28102807
msg: update
@@ -2992,7 +2989,7 @@ impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K:
29922989
if let Some(short_id) = chan.get_short_channel_id() {
29932990
short_to_id.remove(&short_id);
29942991
}
2995-
failed_channels.push(chan.force_shutdown());
2992+
failed_channels.push(chan.force_shutdown(true));
29962993
if let Ok(update) = self.get_channel_update(&chan) {
29972994
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
29982995
msg: update
@@ -3458,7 +3455,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
34583455
let latest_block_height: u32 = Readable::read(reader)?;
34593456
let last_block_hash: Sha256dHash = Readable::read(reader)?;
34603457

3461-
let mut closed_channels = Vec::new();
3458+
let mut failed_htlcs = Vec::new();
34623459

34633460
let channel_count: u64 = Readable::read(reader)?;
34643461
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
@@ -3477,9 +3474,9 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
34773474
channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
34783475
channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() ||
34793476
channel.get_latest_monitor_update_id() != monitor.get_latest_update_id() {
3480-
let mut force_close_res = channel.force_shutdown();
3481-
force_close_res.0 = monitor.get_latest_local_commitment_txn();
3482-
closed_channels.push(force_close_res);
3477+
let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
3478+
failed_htlcs.append(&mut new_failed_htlcs);
3479+
monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
34833480
} else {
34843481
if let Some(short_channel_id) = channel.get_short_channel_id() {
34853482
short_to_id.insert(short_channel_id, channel.channel_id());
@@ -3493,7 +3490,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
34933490

34943491
for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
34953492
if !funding_txo_set.contains(funding_txo) {
3496-
closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
3493+
monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
34973494
}
34983495
}
34993496

@@ -3563,12 +3560,13 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De
35633560
default_configuration: args.default_config,
35643561
};
35653562

3566-
for close_res in closed_channels.drain(..) {
3567-
channel_manager.finish_force_close_channel(close_res);
3568-
//TODO: Broadcast channel update for closed channels, but only after we've made a
3569-
//connection or two.
3563+
for htlc_source in failed_htlcs.drain(..) {
3564+
channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
35703565
}
35713566

3567+
//TODO: Broadcast channel update for closed channels, but only after we've made a
3568+
//connection or two.
3569+
35723570
Ok((last_block_hash.clone(), channel_manager))
35733571
}
35743572
}

0 commit comments

Comments
 (0)