Skip to content

Commit 7c1b9cf

Browse files
committed
Require counterparty_node_id TLV for ChannelMonitor
New `ChannelMonitor`s created starting from v0.0.110 will already have this field set, and those created before then will have it set if a `ChannelMonitorUpdate` created in v0.0.116 or later has been applied. It would be extremely rare for a user to not fall under either of these conditions: they opened a channel almost 3 years ago, and haven't had any activity on it in the last 2 years. Nonetheless, a panic has been added on `ChannelMonitor` deserialization to ensure users can move forward by first running a v0.1.* release and sending/routing a payment or closing the channel before upgrading to v0.2.0.
1 parent 5099dce commit 7c1b9cf

File tree

5 files changed

+88
-128
lines changed

5 files changed

+88
-128
lines changed

lightning/src/chain/chainmonitor.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ pub struct ChainMonitor<ChannelSigner: EcdsaChannelSigner, C: Deref, T: Deref, F
246246
persister: P,
247247
/// "User-provided" (ie persistence-completion/-failed) [`MonitorEvent`]s. These came directly
248248
/// from the user and not from a [`ChannelMonitor`].
249-
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>>,
249+
pending_monitor_events: Mutex<Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)>>,
250250
/// The best block height seen, used as a proxy for the passage of time.
251251
highest_chain_height: AtomicUsize,
252252

@@ -874,7 +874,7 @@ where C::Target: chain::Filter,
874874
}
875875
}
876876

877-
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
877+
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
878878
let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0);
879879
for monitor_state in self.monitors.read().unwrap().values() {
880880
let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events();

lightning/src/chain/channelmonitor.rs

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1018,7 +1018,7 @@ pub(crate) struct ChannelMonitorImpl<Signer: EcdsaChannelSigner> {
10181018
best_block: BestBlock,
10191019

10201020
/// The node_id of our counterparty
1021-
counterparty_node_id: Option<PublicKey>,
1021+
counterparty_node_id: PublicKey,
10221022

10231023
/// Initial counterparty commmitment data needed to recreate the commitment tx
10241024
/// in the persistence pipeline for third-party watchtowers. This will only be present on
@@ -1240,7 +1240,7 @@ impl<Signer: EcdsaChannelSigner> Writeable for ChannelMonitorImpl<Signer> {
12401240
(3, self.htlcs_resolved_on_chain, required_vec),
12411241
(5, pending_monitor_events, required_vec),
12421242
(7, self.funding_spend_seen, required),
1243-
(9, self.counterparty_node_id, option),
1243+
(9, self.counterparty_node_id, required),
12441244
(11, self.confirmed_commitment_tx_counterparty_output, option),
12451245
(13, self.spendable_txids_confirmed, required_vec),
12461246
(15, self.counterparty_fulfilled_htlcs, required),
@@ -1336,7 +1336,7 @@ impl<'a, L: Deref> WithChannelMonitor<'a, L> where L::Target: Logger {
13361336
}
13371337

13381338
pub(crate) fn from_impl<S: EcdsaChannelSigner>(logger: &'a L, monitor_impl: &ChannelMonitorImpl<S>, payment_hash: Option<PaymentHash>) -> Self {
1339-
let peer_id = monitor_impl.counterparty_node_id;
1339+
let peer_id = Some(monitor_impl.counterparty_node_id);
13401340
let channel_id = Some(monitor_impl.channel_id());
13411341
WithChannelMonitor {
13421342
logger, peer_id, channel_id, payment_hash,
@@ -1459,7 +1459,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
14591459
spendable_txids_confirmed: Vec::new(),
14601460

14611461
best_block,
1462-
counterparty_node_id: Some(counterparty_node_id),
1462+
counterparty_node_id: counterparty_node_id,
14631463
initial_counterparty_commitment_info: None,
14641464
balances_empty_height: None,
14651465

@@ -1785,10 +1785,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitor<Signer> {
17851785
}
17861786

17871787
/// Gets the `node_id` of the counterparty for this channel.
1788-
///
1789-
/// Will be `None` for channels constructed on LDK versions prior to 0.0.110 and always `Some`
1790-
/// otherwise.
1791-
pub fn get_counterparty_node_id(&self) -> Option<PublicKey> {
1788+
pub fn get_counterparty_node_id(&self) -> PublicKey {
17921789
self.inner.lock().unwrap().counterparty_node_id
17931790
}
17941791

@@ -3197,12 +3194,8 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
31973194
log_funding_info!(self), self.latest_update_id, updates.update_id, updates.updates.len());
31983195
}
31993196

3200-
if updates.counterparty_node_id.is_some() {
3201-
if self.counterparty_node_id.is_none() {
3202-
self.counterparty_node_id = updates.counterparty_node_id;
3203-
} else {
3204-
debug_assert_eq!(self.counterparty_node_id, updates.counterparty_node_id);
3205-
}
3197+
if let Some(counterparty_node_id) = &updates.counterparty_node_id {
3198+
debug_assert_eq!(self.counterparty_node_id, *counterparty_node_id);
32063199
}
32073200

32083201
// ChannelMonitor updates may be applied after force close if we receive a preimage for a
@@ -3373,10 +3366,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
33733366
package_target_feerate_sat_per_1000_weight, commitment_tx, anchor_output_idx,
33743367
} => {
33753368
let channel_id = self.channel_id;
3376-
// unwrap safety: `ClaimEvent`s are only available for Anchor channels,
3377-
// introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
3378-
// since v0.0.110.
3379-
let counterparty_node_id = self.counterparty_node_id.unwrap();
3369+
let counterparty_node_id = self.counterparty_node_id;
33803370
let commitment_txid = commitment_tx.compute_txid();
33813371
debug_assert_eq!(self.current_holder_commitment_tx.txid, commitment_txid);
33823372
let pending_htlcs = self.current_holder_commitment_tx.non_dust_htlcs();
@@ -3407,10 +3397,7 @@ impl<Signer: EcdsaChannelSigner> ChannelMonitorImpl<Signer> {
34073397
target_feerate_sat_per_1000_weight, htlcs, tx_lock_time,
34083398
} => {
34093399
let channel_id = self.channel_id;
3410-
// unwrap safety: `ClaimEvent`s are only available for Anchor channels,
3411-
// introduced with v0.0.116. counterparty_node_id is guaranteed to be `Some`
3412-
// since v0.0.110.
3413-
let counterparty_node_id = self.counterparty_node_id.unwrap();
3400+
let counterparty_node_id = self.counterparty_node_id;
34143401
let mut htlc_descriptors = Vec::with_capacity(htlcs.len());
34153402
for htlc in htlcs {
34163403
htlc_descriptors.push(HTLCDescriptor {
@@ -5138,6 +5125,13 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
51385125
chan_utils::get_to_countersignatory_with_anchors_redeemscript(&payment_point).to_p2wsh();
51395126
}
51405127

5128+
let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint));
5129+
if counterparty_node_id.is_none() {
5130+
panic!(format!("Found monitor for channel {} with no updates since v0.0.118.\
5131+
These monitors are no longer supported.\
5132+
To continue, run a v0.1 release, send/route a payment over the channel or close it.", channel_id));
5133+
}
5134+
51415135
Ok((best_block.block_hash, ChannelMonitor::from_impl(ChannelMonitorImpl {
51425136
latest_update_id,
51435137
commitment_transaction_number_obscure_factor,
@@ -5149,7 +5143,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
51495143

51505144
channel_keys_id,
51515145
holder_revocation_basepoint,
5152-
channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)),
5146+
channel_id,
51535147
funding_info,
51545148
first_confirmed_funding_txo: first_confirmed_funding_txo.0.unwrap(),
51555149
current_counterparty_commitment_txid,
@@ -5193,7 +5187,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP
51935187
spendable_txids_confirmed: spendable_txids_confirmed.unwrap(),
51945188

51955189
best_block,
5196-
counterparty_node_id,
5190+
counterparty_node_id: counterparty_node_id.unwrap(),
51975191
initial_counterparty_commitment_info,
51985192
balances_empty_height,
51995193
failed_back_htlc_ids: new_hash_set(),

lightning/src/chain/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ pub trait Watch<ChannelSigner: EcdsaChannelSigner> {
304304
///
305305
/// For details on asynchronous [`ChannelMonitor`] updating and returning
306306
/// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`].
307-
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)>;
307+
fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)>;
308308
}
309309

310310
/// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to

lightning/src/ln/channelmanager.rs

Lines changed: 66 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -7686,24 +7686,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
76867686
(htlc_forwards, decode_update_add_htlcs)
76877687
}
76887688

7689-
fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
7689+
fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: &PublicKey) {
76907690
debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
76917691

7692-
let counterparty_node_id = match counterparty_node_id {
7693-
Some(cp_id) => cp_id.clone(),
7694-
None => {
7695-
// TODO: Once we can rely on the counterparty_node_id from the
7696-
// monitor event, this and the outpoint_to_peer map should be removed.
7697-
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7698-
match outpoint_to_peer.get(funding_txo) {
7699-
Some(cp_id) => cp_id.clone(),
7700-
None => return,
7701-
}
7702-
}
7703-
};
77047692
let per_peer_state = self.per_peer_state.read().unwrap();
77057693
let mut peer_state_lock;
7706-
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
7694+
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
77077695
if peer_state_mutex_opt.is_none() { return }
77087696
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
77097697
let peer_state = &mut *peer_state_lock;
@@ -7714,7 +7702,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
77147702
pending.len()
77157703
} else { 0 };
77167704

7717-
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7705+
let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
77187706
log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
77197707
highest_applied_update_id, remaining_in_flight);
77207708

@@ -9466,67 +9454,56 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
94669454
for monitor_event in monitor_events.drain(..) {
94679455
match monitor_event {
94689456
MonitorEvent::HTLCEvent(htlc_update) => {
9469-
let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9457+
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), Some(htlc_update.payment_hash));
94709458
if let Some(preimage) = htlc_update.payment_preimage {
94719459
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
94729460
self.claim_funds_internal(htlc_update.source, preimage,
94739461
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9474-
false, counterparty_node_id, funding_outpoint, channel_id, None);
9462+
false, Some(counterparty_node_id), funding_outpoint, channel_id, None);
94759463
} else {
94769464
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9477-
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9465+
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
94789466
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
94799467
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
94809468
}
94819469
},
94829470
MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9483-
let counterparty_node_id_opt = match counterparty_node_id {
9484-
Some(cp_id) => Some(cp_id),
9485-
None => {
9486-
// TODO: Once we can rely on the counterparty_node_id from the
9487-
// monitor event, this and the outpoint_to_peer map should be removed.
9488-
let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9489-
outpoint_to_peer.get(&funding_outpoint).cloned()
9490-
}
9491-
};
9492-
if let Some(counterparty_node_id) = counterparty_node_id_opt {
9493-
let per_peer_state = self.per_peer_state.read().unwrap();
9494-
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9495-
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9496-
let peer_state = &mut *peer_state_lock;
9497-
let pending_msg_events = &mut peer_state.pending_msg_events;
9498-
if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9499-
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9500-
reason
9501-
} else {
9502-
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9503-
};
9504-
let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9505-
let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9506-
failed_channels.push(shutdown_res);
9507-
if let Some(funded_chan) = chan.as_funded() {
9508-
if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9509-
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9510-
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9511-
msg: update
9512-
});
9513-
}
9514-
pending_msg_events.push(events::MessageSendEvent::HandleError {
9515-
node_id: funded_chan.context.get_counterparty_node_id(),
9516-
action: msgs::ErrorAction::DisconnectPeer {
9517-
msg: Some(msgs::ErrorMessage {
9518-
channel_id: funded_chan.context.channel_id(),
9519-
data: reason.to_string()
9520-
})
9521-
},
9471+
let per_peer_state = self.per_peer_state.read().unwrap();
9472+
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9473+
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9474+
let peer_state = &mut *peer_state_lock;
9475+
let pending_msg_events = &mut peer_state.pending_msg_events;
9476+
if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(channel_id) {
9477+
let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9478+
reason
9479+
} else {
9480+
ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9481+
};
9482+
let mut shutdown_res = chan_entry.get_mut().force_shutdown(false, reason.clone());
9483+
let chan = remove_channel_entry!(self, peer_state, chan_entry, shutdown_res);
9484+
failed_channels.push(shutdown_res);
9485+
if let Some(funded_chan) = chan.as_funded() {
9486+
if let Ok(update) = self.get_channel_update_for_broadcast(funded_chan) {
9487+
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9488+
pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9489+
msg: update
95229490
});
95239491
}
9492+
pending_msg_events.push(events::MessageSendEvent::HandleError {
9493+
node_id: counterparty_node_id,
9494+
action: msgs::ErrorAction::DisconnectPeer {
9495+
msg: Some(msgs::ErrorMessage {
9496+
channel_id,
9497+
data: reason.to_string()
9498+
})
9499+
},
9500+
});
95249501
}
95259502
}
95269503
}
95279504
},
95289505
MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9529-
self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
9506+
self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, &counterparty_node_id);
95309507
},
95319508
}
95329509
}
@@ -13756,26 +13733,26 @@ where
1375613733
for (channel_id, monitor) in args.channel_monitors.iter() {
1375713734
if !channel_id_set.contains(channel_id) {
1375813735
let mut should_queue_fc_update = false;
13759-
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13760-
// If the ChannelMonitor had any updates, we may need to update it further and
13761-
// thus track it in `closed_channel_monitor_update_ids`. If the channel never
13762-
// had any updates at all, there can't be any HTLCs pending which we need to
13763-
// claim.
13764-
// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13765-
// provide it with a closure update its `update_id` will be at 1.
13766-
if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13767-
should_queue_fc_update = !monitor.no_further_updates_allowed();
13768-
let mut latest_update_id = monitor.get_latest_update_id();
13769-
if should_queue_fc_update {
13770-
latest_update_id += 1;
13771-
}
13772-
per_peer_state.entry(counterparty_node_id)
13773-
.or_insert_with(|| Mutex::new(empty_peer_state()))
13774-
.lock().unwrap()
13775-
.closed_channel_monitor_update_ids.entry(monitor.channel_id())
13776-
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13777-
.or_insert(latest_update_id);
13736+
let counterparty_node_id = monitor.get_counterparty_node_id();
13737+
13738+
// If the ChannelMonitor had any updates, we may need to update it further and
13739+
// thus track it in `closed_channel_monitor_update_ids`. If the channel never
13740+
// had any updates at all, there can't be any HTLCs pending which we need to
13741+
// claim.
13742+
// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13743+
// provide it with a closure update its `update_id` will be at 1.
13744+
if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13745+
should_queue_fc_update = !monitor.no_further_updates_allowed();
13746+
let mut latest_update_id = monitor.get_latest_update_id();
13747+
if should_queue_fc_update {
13748+
latest_update_id += 1;
1377813749
}
13750+
per_peer_state.entry(counterparty_node_id)
13751+
.or_insert_with(|| Mutex::new(empty_peer_state()))
13752+
.lock().unwrap()
13753+
.closed_channel_monitor_update_ids.entry(monitor.channel_id())
13754+
.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13755+
.or_insert(latest_update_id);
1377913756
}
1378013757

1378113758
if !should_queue_fc_update {
@@ -13786,31 +13763,20 @@ where
1378613763
let channel_id = monitor.channel_id();
1378713764
log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
1378813765
&channel_id);
13789-
let mut monitor_update = ChannelMonitorUpdate {
13766+
let monitor_update = ChannelMonitorUpdate {
1379013767
update_id: monitor.get_latest_update_id().saturating_add(1),
13791-
counterparty_node_id: None,
13768+
counterparty_node_id: Some(counterparty_node_id),
1379213769
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
1379313770
channel_id: Some(monitor.channel_id()),
1379413771
};
1379513772
let funding_txo = monitor.get_funding_txo();
13796-
if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13797-
let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13798-
counterparty_node_id,
13799-
funding_txo,
13800-
channel_id,
13801-
update: monitor_update,
13802-
};
13803-
close_background_events.push(update);
13804-
} else {
13805-
// This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13806-
// off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13807-
// `ChannelMonitorUpdate` will set the counterparty ID).
13808-
// Thus, we assume that it has no pending HTLCs and we will not need to
13809-
// generate a `ChannelMonitorUpdate` for it aside from this
13810-
// `ChannelForceClosed` one.
13811-
monitor_update.update_id = u64::MAX;
13812-
close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, monitor_update)));
13813-
}
13773+
let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13774+
counterparty_node_id,
13775+
funding_txo,
13776+
channel_id,
13777+
update: monitor_update,
13778+
};
13779+
close_background_events.push(update);
1381413780
}
1381513781
}
1381613782

@@ -14369,7 +14335,7 @@ where
1436914335
// downstream chan is closed (because we don't have a
1437014336
// channel_id -> peer map entry).
1437114337
counterparty_opt.is_none(),
14372-
counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
14338+
Some(monitor.get_counterparty_node_id()),
1437314339
monitor.get_funding_txo(), monitor.channel_id()))
1437414340
} else { None }
1437514341
} else {
@@ -15055,8 +15021,8 @@ mod tests {
1505515021
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1505615022

1505715023
create_announced_chan_between_nodes(&nodes, 0, 1);
15058-
15059-
// Since we do not send peer storage, we manually simulate receiving a dummy
15024+
15025+
// Since we do not send peer storage, we manually simulate receiving a dummy
1506015026
// `PeerStorage` from the channel partner.
1506115027
nodes[0].node.handle_peer_storage(nodes[1].node.get_our_node_id(), msgs::PeerStorage{data: vec![0; 100]});
1506215028

lightning/src/util/test_utils.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -500,7 +500,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
500500

501501
fn release_pending_monitor_events(
502502
&self,
503-
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, Option<PublicKey>)> {
503+
) -> Vec<(OutPoint, ChannelId, Vec<MonitorEvent>, PublicKey)> {
504504
return self.chain_monitor.release_pending_monitor_events();
505505
}
506506
}

0 commit comments

Comments
 (0)