Skip to content

Commit 9802afa

Browse files
committed
Handle MonitorUpdateCompletionActions after monitor update sync
In a previous PR, we added a `MonitorUpdateCompletionAction` enum which described actions to take after a `ChannelMonitorUpdate` persistence completes. At the time, it was only used to execute actions in-line, however in the next commit we'll start (correctly) leaving the existing actions until after monitor updates complete.
1 parent ba07622 commit 9802afa

File tree

1 file changed

+83
-91
lines changed

1 file changed

+83
-91
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 83 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -466,6 +466,7 @@ enum BackgroundEvent {
466466
ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
467467
}
468468

469+
#[derive(Debug)]
469470
pub(crate) enum MonitorUpdateCompletionAction {
470471
/// Indicates that a payment ultimately destined for us was claimed and we should emit an
471472
/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
@@ -1476,6 +1477,54 @@ macro_rules! emit_channel_ready_event {
14761477
}
14771478
}
14781479

1480+
macro_rules! handle_monitor_update_completion {
1481+
($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr) => { {
1482+
let mut updates = $chan.monitor_updating_restored(&$self.logger,
1483+
&$self.node_signer, $self.genesis_hash, &$self.default_configuration,
1484+
$self.best_block.read().unwrap().height());
1485+
let counterparty_node_id = $chan.get_counterparty_node_id();
1486+
let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
1487+
// We only send a channel_update in the case where we are just now sending a
1488+
// channel_ready and the channel is in a usable state. We may re-send a
1489+
// channel_update later through the announcement_signatures process for public
1490+
// channels, but there's no reason not to just inform our counterparty of our fees
1491+
// now.
1492+
if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
1493+
Some(events::MessageSendEvent::SendChannelUpdate {
1494+
node_id: counterparty_node_id,
1495+
msg,
1496+
})
1497+
} else { None }
1498+
} else { None };
1499+
1500+
let update_actions = $peer_state.monitor_update_blocked_actions
1501+
.remove(&$chan.channel_id()).unwrap_or(Vec::new());
1502+
1503+
let htlc_forwards = $self.handle_channel_resumption(
1504+
&mut $peer_state.pending_msg_events, $chan, updates.raa,
1505+
updates.commitment_update, updates.order, updates.accepted_htlcs,
1506+
updates.funding_broadcastable, updates.channel_ready,
1507+
updates.announcement_sigs);
1508+
if let Some(upd) = channel_update {
1509+
$peer_state.pending_msg_events.push(upd);
1510+
}
1511+
1512+
let channel_id = $chan.channel_id();
1513+
core::mem::drop($peer_state_lock);
1514+
1515+
$self.handle_monitor_update_completion_actions(update_actions);
1516+
1517+
if let Some(forwards) = htlc_forwards {
1518+
$self.forward_htlcs(&mut [forwards][..]);
1519+
}
1520+
$self.finalize_claims(updates.finalized_claimed_htlcs);
1521+
for failure in updates.failed_htlcs.drain(..) {
1522+
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
1523+
$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
1524+
}
1525+
} }
1526+
}
1527+
14791528
macro_rules! handle_new_monitor_update {
14801529
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
14811530
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
@@ -1504,43 +1553,7 @@ macro_rules! handle_new_monitor_update {
15041553
.update_id == $update_id) &&
15051554
$chan.get_latest_monitor_update_id() == $update_id
15061555
{
1507-
let mut updates = $chan.monitor_updating_restored(&$self.logger,
1508-
&$self.node_signer, $self.genesis_hash, &$self.default_configuration,
1509-
$self.best_block.read().unwrap().height());
1510-
let counterparty_node_id = $chan.get_counterparty_node_id();
1511-
let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
1512-
// We only send a channel_update in the case where we are just now sending a
1513-
// channel_ready and the channel is in a usable state. We may re-send a
1514-
// channel_update later through the announcement_signatures process for public
1515-
// channels, but there's no reason not to just inform our counterparty of our fees
1516-
// now.
1517-
if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
1518-
Some(events::MessageSendEvent::SendChannelUpdate {
1519-
node_id: counterparty_node_id,
1520-
msg,
1521-
})
1522-
} else { None }
1523-
} else { None };
1524-
let htlc_forwards = $self.handle_channel_resumption(
1525-
&mut $peer_state.pending_msg_events, $chan, updates.raa,
1526-
updates.commitment_update, updates.order, updates.accepted_htlcs,
1527-
updates.funding_broadcastable, updates.channel_ready,
1528-
updates.announcement_sigs);
1529-
if let Some(upd) = channel_update {
1530-
$peer_state.pending_msg_events.push(upd);
1531-
}
1532-
1533-
let channel_id = $chan.channel_id();
1534-
core::mem::drop($peer_state_lock);
1535-
1536-
if let Some(forwards) = htlc_forwards {
1537-
$self.forward_htlcs(&mut [forwards][..]);
1538-
}
1539-
$self.finalize_claims(updates.finalized_claimed_htlcs);
1540-
for failure in updates.failed_htlcs.drain(..) {
1541-
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
1542-
$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
1543-
}
1556+
handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $chan);
15441557
}
15451558
Ok(())
15461559
},
@@ -4208,6 +4221,14 @@ where
42084221
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
42094222
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
42104223
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
4224+
log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
4225+
log_bytes!(channel.channel_id()),
4226+
if raa.is_some() { "an" } else { "no" },
4227+
if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
4228+
if funding_broadcastable.is_some() { "" } else { "not " },
4229+
if channel_ready.is_some() { "sending" } else { "without" },
4230+
if announcement_sigs.is_some() { "sending" } else { "without" });
4231+
42114232
let mut htlc_forwards = None;
42124233

42134234
let counterparty_node_id = channel.get_counterparty_node_id();
@@ -4266,65 +4287,36 @@ where
42664287
fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
42674288
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
42684289

4269-
let htlc_forwards;
4270-
let (mut pending_failures, finalized_claims, counterparty_node_id) = {
4271-
let counterparty_node_id = match counterparty_node_id {
4272-
Some(cp_id) => cp_id.clone(),
4273-
None => {
4274-
// TODO: Once we can rely on the counterparty_node_id from the
4275-
// monitor event, this and the id_to_peer map should be removed.
4276-
let id_to_peer = self.id_to_peer.lock().unwrap();
4277-
match id_to_peer.get(&funding_txo.to_channel_id()) {
4278-
Some(cp_id) => cp_id.clone(),
4279-
None => return,
4280-
}
4281-
}
4282-
};
4283-
let per_peer_state = self.per_peer_state.read().unwrap();
4284-
let mut peer_state_lock;
4285-
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4286-
if peer_state_mutex_opt.is_none() { return }
4287-
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4288-
let peer_state = &mut *peer_state_lock;
4289-
let mut channel = {
4290-
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
4291-
hash_map::Entry::Occupied(chan) => chan,
4292-
hash_map::Entry::Vacant(_) => return,
4290+
let counterparty_node_id = match counterparty_node_id {
4291+
Some(cp_id) => cp_id.clone(),
4292+
None => {
4293+
// TODO: Once we can rely on the counterparty_node_id from the
4294+
// monitor event, this and the id_to_peer map should be removed.
4295+
let id_to_peer = self.id_to_peer.lock().unwrap();
4296+
match id_to_peer.get(&funding_txo.to_channel_id()) {
4297+
Some(cp_id) => cp_id.clone(),
4298+
None => return,
42934299
}
4294-
};
4295-
if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
4296-
return;
42974300
}
4298-
4299-
let updates = channel.get_mut().monitor_updating_restored(&self.logger, &self.node_signer, self.genesis_hash, &self.default_configuration, self.best_block.read().unwrap().height());
4300-
let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
4301-
// We only send a channel_update in the case where we are just now sending a
4302-
// channel_ready and the channel is in a usable state. We may re-send a
4303-
// channel_update later through the announcement_signatures process for public
4304-
// channels, but there's no reason not to just inform our counterparty of our fees
4305-
// now.
4306-
if let Ok(msg) = self.get_channel_update_for_unicast(channel.get()) {
4307-
Some(events::MessageSendEvent::SendChannelUpdate {
4308-
node_id: channel.get().get_counterparty_node_id(),
4309-
msg,
4310-
})
4311-
} else { None }
4312-
} else { None };
4313-
htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
4314-
if let Some(upd) = channel_update {
4315-
peer_state.pending_msg_events.push(upd);
4301+
};
4302+
let per_peer_state = self.per_peer_state.read().unwrap();
4303+
let mut peer_state_lock;
4304+
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4305+
if peer_state_mutex_opt.is_none() { return }
4306+
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4307+
let peer_state = &mut *peer_state_lock;
4308+
let mut channel = {
4309+
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
4310+
hash_map::Entry::Occupied(chan) => chan,
4311+
hash_map::Entry::Vacant(_) => return,
43164312
}
4317-
4318-
(updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
43194313
};
4320-
if let Some(forwards) = htlc_forwards {
4321-
self.forward_htlcs(&mut [forwards][..]);
4322-
}
4323-
self.finalize_claims(finalized_claims);
4324-
for failure in pending_failures.drain(..) {
4325-
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
4326-
self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
4314+
log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
4315+
highest_applied_update_id, channel.get().get_latest_monitor_update_id());
4316+
if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
4317+
return;
43274318
}
4319+
handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, channel.get_mut());
43284320
}
43294321

43304322
/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].

0 commit comments

Comments
 (0)