Skip to content

Commit 0016b80

Browse files
author
Antoine Riard
committed
Add is_resolving_output in ChannelMonitor
Called in ChannelMonitor block_connected, returning pending_htlc_updated upstream via ManyChannelMonitor to link htlcs between monitors. Used by ChannelManager to fulfill/fail htlcs backwards accordingly If spurrious htlc updates are generated due to block re-scan and htlc are already LocalRemoved, discard them in channel get_update_*_htlc
1 parent 69196aa commit 0016b80

File tree

4 files changed

+302
-19
lines changed

4 files changed

+302
-19
lines changed

src/ln/channel.rs

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1140,10 +1140,17 @@ impl Channel {
11401140
for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
11411141
if htlc.htlc_id == htlc_id_arg {
11421142
assert_eq!(htlc.payment_hash, payment_hash_calc);
1143-
if let InboundHTLCState::Committed = htlc.state {
1144-
} else {
1145-
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
1146-
// Don't return in release mode here so that we can update channel_monitor
1143+
match htlc.state {
1144+
InboundHTLCState::Committed => {},
1145+
InboundHTLCState::LocalRemoved(_) => {
1146+
//TODO (ariard) We may have spurrious HTLC update events from ChannelMonitor due to block re-scan
1147+
// Should we discard them ?
1148+
return Ok((None, None));
1149+
},
1150+
_ => {
1151+
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
1152+
// Don't return in release mode here so that we can update channel_monitor
1153+
}
11471154
}
11481155
pending_idx = idx;
11491156
break;
@@ -1226,10 +1233,17 @@ impl Channel {
12261233
let mut pending_idx = std::usize::MAX;
12271234
for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() {
12281235
if htlc.htlc_id == htlc_id_arg {
1229-
if let InboundHTLCState::Committed = htlc.state {
1230-
} else {
1231-
debug_assert!(false, "Have an inbound HTLC we tried to fail before it was fully committed to");
1232-
return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
1236+
match htlc.state {
1237+
InboundHTLCState::Committed => {},
1238+
InboundHTLCState::LocalRemoved(_) => {
1239+
//TODO (ariard) We may have spurrious HTLC update events from ChannelMonitor due to block re-scan
1240+
// Should we discard them ?
1241+
return Ok(None);
1242+
},
1243+
_ => {
1244+
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
1245+
// Don't return in release mode here so that we can update channel_monitor
1246+
}
12331247
}
12341248
pending_idx = idx;
12351249
}

src/ln/channelmanager.rs

Lines changed: 111 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -624,13 +624,7 @@ impl ChannelManager {
624624
for tx in local_txn {
625625
self.tx_broadcaster.broadcast_transaction(&tx);
626626
}
627-
//TODO: We need to have a way where outbound HTLC claims can result in us claiming the
628-
//now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards).
629-
//TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
630-
//may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
631-
//timeouts are hit and our claims confirm).
632-
//TODO: In any case, we need to make sure we remove any pending htlc tracking (via
633-
//fail_backwards or claim_funds) eventually for all HTLCs that were in the channel
627+
634628
}
635629

636630
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
@@ -2665,6 +2659,17 @@ impl ChainListener for ChannelManager {
26652659
for failure in failed_channels.drain(..) {
26662660
self.finish_force_close_channel(failure);
26672661
}
2662+
{
2663+
let mut channel_state = Some(self.channel_state.lock().unwrap());
2664+
for (_, payment_preimage, htlc_source) in self.monitor.fetch_pending_htlc_updated() {
2665+
if let Some(source) = htlc_source {
2666+
if let Some(preimage) = payment_preimage {
2667+
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap());}
2668+
self.claim_funds_internal(channel_state.take().unwrap(), source, preimage);
2669+
}
2670+
}
2671+
}
2672+
}
26682673
self.latest_block_height.store(height as usize, Ordering::Release);
26692674
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
26702675
}
@@ -6068,6 +6073,105 @@ mod tests {
60686073
assert_eq!(nodes[1].node.list_channels().len(), 0);
60696074
}
60706075

6076+
#[test]
6077+
fn test_htlc_on_chain_success() {
6078+
// Test that in case of an unilateral close onchain, we detect the state of output thanks to
6079+
// ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is
6080+
// broadcasting the right event to other nodes in payment path.
6081+
// A --------------------> B ----------------------> C (preimage)
6082+
// A's commitment tx C's commitment tx
6083+
// \ \
6084+
// B's preimage tx C's HTLC Success tx
6085+
6086+
let nodes = create_network(3);
6087+
6088+
// Create some initial channels
6089+
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
6090+
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
6091+
6092+
// Rebalance the network a bit by relaying one payment through all the channels...
6093+
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
6094+
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
6095+
6096+
let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
6097+
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
6098+
6099+
// Broadcast legit commitment tx from C on B's chain
6100+
// Broadcast HTLC Success transation by C on received output from C's commitment tx on B's chain
6101+
let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
6102+
nodes[2].node.claim_funds(payment_preimage);
6103+
{
6104+
let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap();
6105+
assert_eq!(added_monitors.len(), 1);
6106+
added_monitors.clear();
6107+
}
6108+
let events = nodes[2].node.get_and_clear_pending_msg_events();
6109+
assert_eq!(events.len(), 1);
6110+
match events[0] {
6111+
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
6112+
assert!(update_add_htlcs.is_empty());
6113+
assert!(update_fail_htlcs.is_empty());
6114+
assert!(!update_fulfill_htlcs.is_empty());
6115+
assert!(update_fail_malformed_htlcs.is_empty());
6116+
assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
6117+
},
6118+
_ => panic!("Unexpected event"),
6119+
};
6120+
nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
6121+
let events = nodes[2].node.get_and_clear_pending_msg_events();
6122+
assert_eq!(events.len(), 1);
6123+
match events[0] {
6124+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6125+
_ => panic!("Unexpected event"),
6126+
}
6127+
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
6128+
6129+
// Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
6130+
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn}, 1);
6131+
{
6132+
let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
6133+
assert_eq!(added_monitors.len(), 1);
6134+
added_monitors.clear();
6135+
}
6136+
let events = nodes[1].node.get_and_clear_pending_msg_events();
6137+
assert_eq!(events.len(), 2);
6138+
match events[0] {
6139+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6140+
_ => panic!("Unexpected event"),
6141+
}
6142+
match events[1] {
6143+
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
6144+
assert!(update_add_htlcs.is_empty());
6145+
assert!(update_fail_htlcs.is_empty());
6146+
assert!(!update_fulfill_htlcs.is_empty());
6147+
assert!(update_fail_malformed_htlcs.is_empty());
6148+
assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
6149+
},
6150+
_ => panic!("Unexpected event"),
6151+
};
6152+
6153+
// Broadcast legit commitment tx from A on B's chain
6154+
// Broadcast preimage tx by B on offered output from A commitment tx on A's chain
6155+
let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
6156+
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
6157+
let events = nodes[1].node.get_and_clear_pending_msg_events();
6158+
assert_eq!(events.len(), 1);
6159+
match events[0] {
6160+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6161+
_ => panic!("Unexpected event"),
6162+
}
6163+
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
6164+
6165+
// Verify that A's ChannelManager is able to extract preimage from preimage tx and pass it backward
6166+
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn }, 1);
6167+
let events = nodes[0].node.get_and_clear_pending_msg_events();
6168+
assert_eq!(events.len(), 1);
6169+
match events[0] {
6170+
MessageSendEvent::BroadcastChannelUpdate { .. } => {},
6171+
_ => panic!("Unexpected event"),
6172+
}
6173+
}
6174+
60716175
#[test]
60726176
fn test_htlc_ignore_latest_remote_commitment() {
60736177
// Test that HTLC transactions spending the latest remote commitment transaction are simply

0 commit comments

Comments
 (0)