Skip to content

Commit 55a392a

Browse files
committed
Handle monitor update failure during funding on the fundee side
This carries a surprising amount of complexity despite only being possible in the case where monitor updating failed during the processing of funding_generated. Specifically, this requires handling rebroadcasting funding_locked once we successfully persist our monitor again. As an alternative we could never send funding_signed when the monitor failed to persist, but this approach avoids needless delays during funding.
1 parent 650bb79 commit 55a392a

File tree

4 files changed

+143
-36
lines changed

4 files changed

+143
-36
lines changed

src/ln/chanmon_update_fail_tests.rs

Lines changed: 47 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1557,7 +1557,9 @@ fn monitor_update_claim_fail_no_response() {
15571557
// Note that restore_between_fails with !fail_on_generate is useless
15581558
// Also note that !fail_on_generate && !fail_on_signed is useless
15591559
// Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
1560-
fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool) {
1560+
// confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
1561+
// restore_b_before_conf has no meaning if !confirm_a_first
1562+
fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) {
15611563
// Test that if the monitor update generated by funding_transaction_generated fails we continue
15621564
// the channel setup happily after the update is restored.
15631565
let mut nodes = create_network(2, &[None, None]);
@@ -1574,6 +1576,7 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails:
15741576
nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
15751577
check_added_monitors!(nodes[0], 1);
15761578

1579+
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
15771580
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap();
15781581
check_added_monitors!(nodes[1], 1);
15791582

@@ -1623,8 +1626,45 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails:
16231626
_ => panic!("Unexpected event"),
16241627
};
16251628

1626-
let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
1627-
let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
1629+
if confirm_a_first {
1630+
confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1631+
nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())).unwrap();
1632+
} else {
1633+
assert!(!restore_b_before_conf);
1634+
confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1635+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1636+
}
1637+
1638+
// Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
1639+
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
1640+
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
1641+
reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
1642+
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1643+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1644+
1645+
if !restore_b_before_conf {
1646+
confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1647+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1648+
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
1649+
}
1650+
1651+
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
1652+
nodes[1].node.test_restore_channel_monitor();
1653+
check_added_monitors!(nodes[1], 1);
1654+
1655+
let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
1656+
nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())).unwrap();
1657+
1658+
confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
1659+
let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
1660+
(channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
1661+
} else {
1662+
if restore_b_before_conf {
1663+
confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
1664+
}
1665+
let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
1666+
(channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
1667+
};
16281668
for node in nodes.iter() {
16291669
assert!(node.router.handle_channel_announcement(&announcement).unwrap());
16301670
node.router.handle_channel_update(&as_update).unwrap();
@@ -1637,8 +1677,8 @@ fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails:
16371677

16381678
#[test]
16391679
fn during_funding_monitor_fail() {
1640-
do_during_funding_monitor_fail(false, false, true);
1641-
do_during_funding_monitor_fail(true, false, true);
1642-
do_during_funding_monitor_fail(true, true, true);
1643-
do_during_funding_monitor_fail(true, true, false);
1680+
do_during_funding_monitor_fail(false, false, true, true, true);
1681+
do_during_funding_monitor_fail(true, false, true, false, false);
1682+
do_during_funding_monitor_fail(true, true, true, true, false);
1683+
do_during_funding_monitor_fail(true, true, false, false, false);
16441684
}

src/ln/channel.rs

Lines changed: 50 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -181,9 +181,9 @@ enum ChannelState {
181181
/// "disconnected" and no updates are allowed until after we've done a channel_reestablish
182182
/// dance.
183183
PeerDisconnected = (1 << 7),
184-
/// Flag which is set on ChannelFunded and FundingSent indicating the user has told us they
185-
/// failed to update our ChannelMonitor somewhere and we should pause sending any outbound
186-
/// messages until they've managed to do so.
184+
/// Flag which is set on ChannelFunded, FundingCreated, and FundingSent indicating the user has
185+
/// told us they failed to update our ChannelMonitor somewhere and we should pause sending any
186+
/// outbound messages until they've managed to do so.
187187
MonitorUpdateFailed = (1 << 8),
188188
/// Flag which implies that we have sent a commitment_signed but are awaiting the responding
189189
/// revoke_and_ack message. During this time period, we can't generate new commitment_signed
@@ -248,6 +248,7 @@ pub(super) struct Channel {
248248
/// send it first.
249249
resend_order: RAACommitmentOrder,
250250

251+
monitor_pending_funding_locked: bool,
251252
monitor_pending_revoke_and_ack: bool,
252253
monitor_pending_commitment_signed: bool,
253254
monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>,
@@ -457,6 +458,7 @@ impl Channel {
457458

458459
resend_order: RAACommitmentOrder::CommitmentFirst,
459460

461+
monitor_pending_funding_locked: false,
460462
monitor_pending_revoke_and_ack: false,
461463
monitor_pending_commitment_signed: false,
462464
monitor_pending_forwards: Vec::new(),
@@ -672,6 +674,7 @@ impl Channel {
672674

673675
resend_order: RAACommitmentOrder::CommitmentFirst,
674676

677+
monitor_pending_funding_locked: false,
675678
monitor_pending_revoke_and_ack: false,
676679
monitor_pending_commitment_signed: false,
677680
monitor_pending_forwards: Vec::new(),
@@ -1583,10 +1586,12 @@ impl Channel {
15831586
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
15841587
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
15851588
self.channel_update_count += 1;
1586-
} else if self.channel_state & (ChannelState::ChannelFunded as u32) != 0 &&
1587-
// Note that funding_signed/funding_created will have decremented both by 1!
1588-
self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
1589-
self.cur_remote_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
1589+
} else if (self.channel_state & (ChannelState::ChannelFunded as u32) != 0 &&
1590+
// Note that funding_signed/funding_created will have decremented both by 1!
1591+
self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
1592+
self.cur_remote_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1) ||
1593+
(self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) ==
1594+
(ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32)) {
15901595
if self.their_cur_commitment_point != Some(msg.next_per_commitment_point) {
15911596
return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point"));
15921597
}
@@ -2349,11 +2354,29 @@ impl Channel {
23492354
/// Indicates that the latest ChannelMonitor update has been committed by the client
23502355
/// successfully and we should restore normal operation. Returns messages which should be sent
23512356
/// to the remote side.
2352-
pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool) {
2357+
pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) {
23532358
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
23542359
self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
2360+
23552361
let needs_broadcast_safe = self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.channel_outbound;
23562362

2363+
// Because we will never generate a FundingBroadcastSafe event when we're in
2364+
// MonitorUpdateFailed, if we assume the user only broadcast the funding transaction when
2365+
// they received the FundingBroadcastSafe event, we can only ever hit
2366+
// monitor_pending_funding_locked when we're an inbound channel which failed to persist the
2367+
// monitor on funding_signed, and we even got the funding transaction confirmed before the
2368+
// monitor was persisted.
2369+
let funding_locked = if self.monitor_pending_funding_locked {
2370+
assert!(!self.channel_outbound, "Funding transaction broadcast without FundingBroadcastSafe!");
2371+
self.monitor_pending_funding_locked = false;
2372+
let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number);
2373+
let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret);
2374+
Some(msgs::FundingLocked {
2375+
channel_id: self.channel_id(),
2376+
next_per_commitment_point: next_per_commitment_point,
2377+
})
2378+
} else { None };
2379+
23572380
let mut forwards = Vec::new();
23582381
mem::swap(&mut forwards, &mut self.monitor_pending_forwards);
23592382
let mut failures = Vec::new();
@@ -2362,7 +2385,7 @@ impl Channel {
23622385
if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
23632386
self.monitor_pending_revoke_and_ack = false;
23642387
self.monitor_pending_commitment_signed = false;
2365-
return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe);
2388+
return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe, funding_locked);
23662389
}
23672390

23682391
let raa = if self.monitor_pending_revoke_and_ack {
@@ -2380,7 +2403,7 @@ impl Channel {
23802403
if commitment_update.is_some() { "a" } else { "no" },
23812404
if raa.is_some() { "an" } else { "no" },
23822405
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
2383-
(raa, commitment_update, order, forwards, failures, needs_broadcast_safe)
2406+
(raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked)
23842407
}
23852408

23862409
pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError> {
@@ -2490,7 +2513,9 @@ impl Channel {
24902513
} else { None };
24912514

24922515
if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
2493-
if self.channel_state & ChannelState::OurFundingLocked as u32 == 0 {
2516+
// If we're waiting on a monitor update, we shouldn't re-send any funding_locked's.
2517+
if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 ||
2518+
self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
24942519
if msg.next_remote_commitment_number != 0 {
24952520
return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet"));
24962521
}
@@ -2981,12 +3006,17 @@ impl Channel {
29813006
//they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
29823007
//a protocol oversight, but I assume I'm just missing something.
29833008
if need_commitment_update {
2984-
let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number);
2985-
let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret);
2986-
return Ok(Some(msgs::FundingLocked {
2987-
channel_id: self.channel_id,
2988-
next_per_commitment_point: next_per_commitment_point,
2989-
}));
3009+
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
3010+
let next_per_commitment_secret = self.build_local_commitment_secret(self.cur_local_commitment_transaction_number);
3011+
let next_per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &next_per_commitment_secret);
3012+
return Ok(Some(msgs::FundingLocked {
3013+
channel_id: self.channel_id,
3014+
next_per_commitment_point: next_per_commitment_point,
3015+
}));
3016+
} else {
3017+
self.monitor_pending_funding_locked = true;
3018+
return Ok(None);
3019+
}
29903020
}
29913021
}
29923022
}
@@ -3705,6 +3735,7 @@ impl Writeable for Channel {
37053735
RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
37063736
}
37073737

3738+
self.monitor_pending_funding_locked.write(writer)?;
37083739
self.monitor_pending_revoke_and_ack.write(writer)?;
37093740
self.monitor_pending_commitment_signed.write(writer)?;
37103741

@@ -3872,6 +3903,7 @@ impl<R : ::std::io::Read> ReadableArgs<R, Arc<Logger>> for Channel {
38723903
_ => return Err(DecodeError::InvalidValue),
38733904
};
38743905

3906+
let monitor_pending_funding_locked = Readable::read(reader)?;
38753907
let monitor_pending_revoke_and_ack = Readable::read(reader)?;
38763908
let monitor_pending_commitment_signed = Readable::read(reader)?;
38773909

@@ -3968,6 +4000,7 @@ impl<R : ::std::io::Read> ReadableArgs<R, Arc<Logger>> for Channel {
39684000

39694001
resend_order,
39704002

4003+
monitor_pending_funding_locked,
39714004
monitor_pending_revoke_and_ack,
39724005
monitor_pending_commitment_signed,
39734006
monitor_pending_forwards,

src/ln/channelmanager.rs

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1684,7 +1684,7 @@ impl ChannelManager {
16841684
ChannelMonitorUpdateErr::TemporaryFailure => true,
16851685
}
16861686
} else {
1687-
let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe) = channel.monitor_updating_restored();
1687+
let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
16881688
if !pending_forwards.is_empty() {
16891689
htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
16901690
}
@@ -1722,6 +1722,19 @@ impl ChannelManager {
17221722
user_channel_id: channel.get_user_id(),
17231723
});
17241724
}
1725+
if let Some(msg) = funding_locked {
1726+
pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
1727+
node_id: channel.get_their_node_id(),
1728+
msg,
1729+
});
1730+
if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
1731+
pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
1732+
node_id: channel.get_their_node_id(),
1733+
msg: announcement_sigs,
1734+
});
1735+
}
1736+
short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
1737+
}
17251738
true
17261739
}
17271740
} else { true }
@@ -1790,7 +1803,7 @@ impl ChannelManager {
17901803
}
17911804

17921805
fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
1793-
let ((funding_msg, monitor_update), chan) = {
1806+
let ((funding_msg, monitor_update), mut chan) = {
17941807
let mut channel_lock = self.channel_state.lock().unwrap();
17951808
let channel_state = channel_lock.borrow_parts();
17961809
match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
@@ -1806,8 +1819,23 @@ impl ChannelManager {
18061819
};
18071820
// Because we have exclusive ownership of the channel here we can release the channel_state
18081821
// lock before add_update_monitor
1809-
if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
1810-
unimplemented!();
1822+
if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
1823+
match e {
1824+
ChannelMonitorUpdateErr::PermanentFailure => {
1825+
// Note that we reply with the new channel_id in error messages if we gave up on the
1826+
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
1827+
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
1828+
// any messages referencing a previously-closed channel anyway.
1829+
return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
1830+
},
1831+
ChannelMonitorUpdateErr::TemporaryFailure => {
1832+
// There's no problem signing a counterparty's funding transaction if our monitor
1833+
// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
1834+
// accepted payment from yet. We do, however, need to wait to send our funding_locked
1835+
// until we have persisted our monitor.
1836+
chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
1837+
},
1838+
}
18111839
}
18121840
let mut channel_state_lock = self.channel_state.lock().unwrap();
18131841
let channel_state = channel_state_lock.borrow_parts();

src/ln/functional_test_utils.rs

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -221,31 +221,37 @@ pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, c
221221
tx
222222
}
223223

224-
pub fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
225-
confirm_transaction(&node_b.chain_monitor, &tx, tx.version);
226-
node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingLocked, node_a.node.get_our_node_id())).unwrap();
224+
pub fn create_chan_between_nodes_with_value_confirm_first(node_recv: &Node, node_conf: &Node, tx: &Transaction) {
225+
confirm_transaction(&node_conf.chain_monitor, &tx, tx.version);
226+
node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())).unwrap();
227+
}
227228

229+
pub fn create_chan_between_nodes_with_value_confirm_second(node_recv: &Node, node_conf: &Node) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
228230
let channel_id;
229-
230-
confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
231-
let events_6 = node_a.node.get_and_clear_pending_msg_events();
231+
let events_6 = node_conf.node.get_and_clear_pending_msg_events();
232232
assert_eq!(events_6.len(), 2);
233233
((match events_6[0] {
234234
MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
235235
channel_id = msg.channel_id.clone();
236-
assert_eq!(*node_id, node_b.node.get_our_node_id());
236+
assert_eq!(*node_id, node_recv.node.get_our_node_id());
237237
msg.clone()
238238
},
239239
_ => panic!("Unexpected event"),
240240
}, match events_6[1] {
241241
MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
242-
assert_eq!(*node_id, node_b.node.get_our_node_id());
242+
assert_eq!(*node_id, node_recv.node.get_our_node_id());
243243
msg.clone()
244244
},
245245
_ => panic!("Unexpected event"),
246246
}), channel_id)
247247
}
248248

249+
pub fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
250+
create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx);
251+
confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
252+
create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
253+
}
254+
249255
pub fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
250256
let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
251257
let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);

0 commit comments

Comments
 (0)