Skip to content

Commit 43c4ffe

Browse files
author
Antoine Riard
committed
Add is_resolving_output in ChannelMonitor
Called in ChannelMonitor block_connected, returning pending_htlc_updated upstream via ManyChannelMonitor to link htlcs between monitors. Used by ChannelManager to fulfill/fail htlcs backwards accordingly If spurrious htlc updates are generated due to block re-scan and htlc are already LocalRemoved, discard them in channel get_update_*_htlc
1 parent 6e1ccc7 commit 43c4ffe

File tree

4 files changed

+302
-19
lines changed

4 files changed

+302
-19
lines changed

src/ln/channel.rs

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1140,10 +1140,17 @@ impl Channel {
11401140
for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
11411141
if htlc.htlc_id == htlc_id_arg {
11421142
assert_eq!(htlc.payment_hash, payment_hash_calc);
1143-
if let InboundHTLCState::Committed = htlc.state {
1144-
} else {
1145-
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
1146-
// Don't return in release mode here so that we can update channel_monitor
1143+
match htlc.state {
1144+
InboundHTLCState::Committed => {},
1145+
InboundHTLCState::LocalRemoved(_) => {
1146+
//TODO (ariard) We may have spurrious HTLC update events from ChannelMonitor due to block re-scan
1147+
// Should we discard them ?
1148+
return Ok((None, None));
1149+
},
1150+
_ => {
1151+
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
1152+
// Don't return in release mode here so that we can update channel_monitor
1153+
}
11471154
}
11481155
pending_idx = idx;
11491156
break;
@@ -1226,10 +1233,17 @@ impl Channel {
12261233
let mut pending_idx = std::usize::MAX;
12271234
for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
12281235
if htlc.htlc_id == htlc_id_arg {
1229-
if let InboundHTLCState::Committed = htlc.state {
1230-
} else {
1231-
debug_assert!(false, "Have an inbound HTLC we tried to fail before it was fully committed to");
1232-
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
1236+
match htlc.state {
1237+
InboundHTLCState::Committed => {},
1238+
InboundHTLCState::LocalRemoved(_) => {
1239+
//TODO (ariard) We may have spurrious HTLC update events from ChannelMonitor due to block re-scan
1240+
// Should we discard them ?
1241+
return Ok(None);
1242+
},
1243+
_ => {
1244+
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
1245+
// Don't return in release mode here so that we can update channel_monitor
1246+
}
12331247
}
12341248
pending_idx = idx;
12351249
}

src/ln/channelmanager.rs

Lines changed: 111 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -624,13 +624,7 @@ impl ChannelManager {
624624
for tx in local_txn {
625625
self.tx_broadcaster.broadcast_transaction(&tx);
626626
}
627-
//TODO: We need to have a way where outbound HTLC claims can result in us claiming the
628-
//now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards).
629-
//TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
630-
//may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
631-
//timeouts are hit and our claims confirm).
632-
//TODO: In any case, we need to make sure we remove any pending htlc tracking (via
633-
//fail_backwards or claim_funds) eventually for all HTLCs that were in the channel
627+
634628
}
635629

636630
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
@@ -2661,6 +2655,17 @@ impl ChainListener for ChannelManager {
26612655
for failure in failed_channels.drain(..) {
26622656
self.finish_force_close_channel(failure);
26632657
}
2658+
{
2659+
let mut channel_state = Some(self.channel_state.lock().unwrap());
2660+
for (_, payment_preimage, htlc_source) in self.monitor.fetch_pending_htlc_updated() {
2661+
if let Some(source) = htlc_source {
2662+
if let Some(preimage) = payment_preimage {
2663+
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap());}
2664+
self.claim_funds_internal(channel_state.take().unwrap(), source, preimage);
2665+
}
2666+
}
2667+
}
2668+
}
26642669
self.latest_block_height.store(height as usize, Ordering::Release);
26652670
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
26662671
}
@@ -6064,6 +6069,105 @@ mod tests {
60646069
assert_eq!(nodes[1].node.list_channels().len(), 0);
60656070
}
60666071

6072+
#[test]
6073+
fn test_htlc_on_chain_success() {
6074+
// Test that in case of an unilateral close onchain, we detect the state of output thanks to
6075+
// ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is
6076+
// broadcasting the right event to other nodes in payment path.
6077+
// A --------------------> B ----------------------> C (preimage)
6078+
// A's commitment tx C's commitment tx
6079+
// \ \
6080+
// B's preimage tx C's HTLC Success tx
6081+
6082+
let nodes = create_network(3);
6083+
6084+
// Create some initial channels
6085+
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
6086+
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6087+
6088+
// Rebalance the network a bit by relaying one payment through all the channels...
6089+
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
6090+
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
6091+
6092+
let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
6093+
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
6094+
6095+
// Broadcast legit commitment tx from C on B's chain
6096+
// Broadcast HTLC Success transation by C on received output from C's commitment tx on B's chain
6097+
let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
6098+
nodes[2].node.claim_funds(payment_preimage);
6099+
{
6100+
let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap();
6101+
assert_eq!(added_monitors.len(), 1);
6102+
added_monitors.clear();
6103+
}
6104+
let events = nodes[2].node.get_and_clear_pending_msg_events();
6105+
assert_eq!(events.len(), 1);
6106+
match events[0] {
6107+
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
6108+
assert!(update_add_htlcs.is_empty());
6109+
assert!(update_fail_htlcs.is_empty());
6110+
assert!(!update_fulfill_htlcs.is_empty());
6111+
assert!(update_fail_malformed_htlcs.is_empty());
6112+
assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
6113+
},
6114+
_ => panic!("Unexpected event"),
6115+
};
6116+
nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
6117+
let events = nodes[2].node.get_and_clear_pending_msg_events();
6118+
assert_eq!(events.len(), 1);
6119+
match events[0] {
6120+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6121+
_ => panic!("Unexpected event"),
6122+
}
6123+
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
6124+
6125+
// Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
6126+
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn}, 1);
6127+
{
6128+
let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
6129+
assert_eq!(added_monitors.len(), 1);
6130+
added_monitors.clear();
6131+
}
6132+
let events = nodes[1].node.get_and_clear_pending_msg_events();
6133+
assert_eq!(events.len(), 2);
6134+
match events[0] {
6135+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6136+
_ => panic!("Unexpected event"),
6137+
}
6138+
match events[1] {
6139+
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
6140+
assert!(update_add_htlcs.is_empty());
6141+
assert!(update_fail_htlcs.is_empty());
6142+
assert!(!update_fulfill_htlcs.is_empty());
6143+
assert!(update_fail_malformed_htlcs.is_empty());
6144+
assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
6145+
},
6146+
_ => panic!("Unexpected event"),
6147+
};
6148+
6149+
// Broadcast legit commitment tx from A on B's chain
6150+
// Broadcast preimage tx by B on offered output from A commitment tx on A's chain
6151+
let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
6152+
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
6153+
let events = nodes[1].node.get_and_clear_pending_msg_events();
6154+
assert_eq!(events.len(), 1);
6155+
match events[0] {
6156+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6157+
_ => panic!("Unexpected event"),
6158+
}
6159+
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
6160+
6161+
// Verify that A's ChannelManager is able to extract preimage from preimage tx and pass it backward
6162+
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn }, 1);
6163+
let events = nodes[0].node.get_and_clear_pending_msg_events();
6164+
assert_eq!(events.len(), 1);
6165+
match events[0] {
6166+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6167+
_ => panic!("Unexpected event"),
6168+
}
6169+
}
6170+
60676171
#[test]
60686172
fn test_htlc_ignore_latest_remote_commitment() {
60696173
// Test that HTLC transactions spending the latest remote commitment transaction are simply

0 commit comments

Comments
 (0)