Skip to content

Commit ba07622

Browse files
committed
Add a new monitor update result handling macro
Over the next few commits, this macro will replace the `handle_monitor_update_res` macro. It takes a different approach - instead of receiving the message(s) that need to be re-sent after the monitor update completes and pushing them back into the channel, we'll not get the messages from the channel at all until we're ready for them. This will unify our message sending into only actually fetching + sending messages in the common monitor-update-completed code, rather than both there *and* in the functions that call `Channel` when new messages are originated.
1 parent 34218cc commit ba07622

File tree

1 file changed

+75
-0
lines changed

1 file changed

+75
-0
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1476,6 +1476,81 @@ macro_rules! emit_channel_ready_event {
14761476
}
14771477
}
14781478

1479+
macro_rules! handle_new_monitor_update {
1480+
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
1481+
// update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
1482+
// any case so that it won't deadlock.
1483+
debug_assert!($self.id_to_peer.try_lock().is_ok());
1484+
match $update_res {
1485+
ChannelMonitorUpdateStatus::InProgress => {
1486+
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
1487+
log_bytes!($chan.channel_id()[..]));
1488+
Ok(())
1489+
},
1490+
ChannelMonitorUpdateStatus::PermanentFailure => {
1491+
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
1492+
log_bytes!($chan.channel_id()[..]));
1493+
update_maps_on_chan_removal!($self, $chan);
1494+
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
1495+
"ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
1496+
$chan.get_user_id(), $chan.force_shutdown(false),
1497+
$self.get_channel_update_for_broadcast(&$chan).ok()));
1498+
$remove;
1499+
res
1500+
},
1501+
ChannelMonitorUpdateStatus::Completed => {
1502+
if ($update_id == 0 || $chan.get_next_monitor_update()
1503+
.expect("We can't be processing a monitor update if it isn't queued")
1504+
.update_id == $update_id) &&
1505+
$chan.get_latest_monitor_update_id() == $update_id
1506+
{
1507+
let mut updates = $chan.monitor_updating_restored(&$self.logger,
1508+
&$self.node_signer, $self.genesis_hash, &$self.default_configuration,
1509+
$self.best_block.read().unwrap().height());
1510+
let counterparty_node_id = $chan.get_counterparty_node_id();
1511+
let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
1512+
// We only send a channel_update in the case where we are just now sending a
1513+
// channel_ready and the channel is in a usable state. We may re-send a
1514+
// channel_update later through the announcement_signatures process for public
1515+
// channels, but there's no reason not to just inform our counterparty of our fees
1516+
// now.
1517+
if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
1518+
Some(events::MessageSendEvent::SendChannelUpdate {
1519+
node_id: counterparty_node_id,
1520+
msg,
1521+
})
1522+
} else { None }
1523+
} else { None };
1524+
let htlc_forwards = $self.handle_channel_resumption(
1525+
&mut $peer_state.pending_msg_events, $chan, updates.raa,
1526+
updates.commitment_update, updates.order, updates.accepted_htlcs,
1527+
updates.funding_broadcastable, updates.channel_ready,
1528+
updates.announcement_sigs);
1529+
if let Some(upd) = channel_update {
1530+
$peer_state.pending_msg_events.push(upd);
1531+
}
1532+
1533+
let channel_id = $chan.channel_id();
1534+
core::mem::drop($peer_state_lock);
1535+
1536+
if let Some(forwards) = htlc_forwards {
1537+
$self.forward_htlcs(&mut [forwards][..]);
1538+
}
1539+
$self.finalize_claims(updates.finalized_claimed_htlcs);
1540+
for failure in updates.failed_htlcs.drain(..) {
1541+
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
1542+
$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
1543+
}
1544+
}
1545+
Ok(())
1546+
},
1547+
}
1548+
} };
1549+
($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $chan_entry: expr) => {
1550+
handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
1551+
}
1552+
}
1553+
14791554
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
14801555
where
14811556
M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,

0 commit comments

Comments
 (0)