Skip to content

Commit 65737c6

Browse files
committed
Handling for sign_counterparty_commitment failing during normal op
If sign_counterparty_commitment fails (i.e. because the signer is temporarily disconnected), this really indicates that we should retry the message sending later, rather than force-closing the channel (which probably won't even work if the signer is missing). Here we add initial handling of sign_counterparty_commitment failing during normal channel operation, setting a new flag in `ChannelContext` which indicates we should retry sending the commitment update later. We don't yet add any ability to do that retry.
1 parent eb44d99 commit 65737c6

File tree

1 file changed

+38
-13
lines changed

1 file changed

+38
-13
lines changed

lightning/src/ln/channel.rs

Lines changed: 38 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -700,6 +700,14 @@ pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
700700
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
701701
monitor_pending_finalized_fulfills: Vec<HTLCSource>,
702702

703+
/// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
704+
/// but our signer (initially) refused to give us a signature, we should retry at some point in
705+
/// the future when the signer indicates it may have a signature for us.
706+
///
707+
/// This flag is set in such a case. Note that we don't need to persist this as we'll end up
708+
/// setting it again as a side-effect of [`Channel::channel_reestablish`].
709+
signer_pending_commitment_update: bool,
710+
703711
// pending_update_fee is filled when sending and receiving update_fee.
704712
//
705713
// Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
@@ -3047,8 +3055,8 @@ impl<SP: Deref> Channel<SP> where
30473055
self.context.monitor_pending_revoke_and_ack = true;
30483056
if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
30493057
// If we were going to send a commitment_signed after the RAA, go ahead and do all
3050-
// the corresponding HTLC status updates so that get_last_commitment_update
3051-
// includes the right HTLCs.
3058+
// the corresponding HTLC status updates so that
3059+
// get_last_commitment_update_for_send includes the right HTLCs.
30523060
self.context.monitor_pending_commitment_signed = true;
30533061
let mut additional_update = self.build_commitment_no_status_check(logger);
30543062
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
@@ -3422,9 +3430,10 @@ impl<SP: Deref> Channel<SP> where
34223430
// cells) while we can't update the monitor, so we just return what we have.
34233431
if require_commitment {
34243432
self.context.monitor_pending_commitment_signed = true;
3425-
// When the monitor updating is restored we'll call get_last_commitment_update(),
3426-
// which does not update state, but we're definitely now awaiting a remote revoke
3427-
// before we can step forward any more, so set it here.
3433+
// When the monitor updating is restored we'll call
3434+
// get_last_commitment_update_for_send(), which does not update state, but we're
3435+
// definitely now awaiting a remote revoke before we can step forward any more, so
3436+
// set it here.
34283437
let mut additional_update = self.build_commitment_no_status_check(logger);
34293438
// build_commitment_no_status_check may bump latest_monitor_id but we want them to be
34303439
// strictly increasing by one, so decrement it here.
@@ -3726,9 +3735,11 @@ impl<SP: Deref> Channel<SP> where
37263735
Some(self.get_last_revoke_and_ack())
37273736
} else { None };
37283737
let commitment_update = if self.context.monitor_pending_commitment_signed {
3729-
self.mark_awaiting_response();
3730-
Some(self.get_last_commitment_update(logger))
3738+
self.get_last_commitment_update_for_send(logger).ok()
37313739
} else { None };
3740+
if commitment_update.is_some() {
3741+
self.mark_awaiting_response();
3742+
}
37323743

37333744
self.context.monitor_pending_revoke_and_ack = false;
37343745
self.context.monitor_pending_commitment_signed = false;
@@ -3789,7 +3800,8 @@ impl<SP: Deref> Channel<SP> where
37893800
}
37903801
}
37913802

3792-
fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
3803+
/// Gets the last commitment update for immediate sending to our peer.
3804+
fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
37933805
let mut update_add_htlcs = Vec::new();
37943806
let mut update_fulfill_htlcs = Vec::new();
37953807
let mut update_fail_htlcs = Vec::new();
@@ -3848,10 +3860,17 @@ impl<SP: Deref> Channel<SP> where
38483860
log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
38493861
&self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
38503862
update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
3851-
msgs::CommitmentUpdate {
3863+
let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
3864+
self.context.signer_pending_commitment_update = false;
3865+
update
3866+
} else {
3867+
self.context.signer_pending_commitment_update = true;
3868+
return Err(());
3869+
};
3870+
Ok(msgs::CommitmentUpdate {
38523871
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
3853-
commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
3854-
}
3872+
commitment_signed,
3873+
})
38553874
}
38563875

38573876
/// Gets the `Shutdown` message we should send our peer on reconnect, if any.
@@ -4031,7 +4050,7 @@ impl<SP: Deref> Channel<SP> where
40314050
Ok(ReestablishResponses {
40324051
channel_ready, shutdown_msg, announcement_sigs,
40334052
raa: required_revoke,
4034-
commitment_update: Some(self.get_last_commitment_update(logger)),
4053+
commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
40354054
order: self.context.resend_order.clone(),
40364055
})
40374056
}
@@ -5389,7 +5408,7 @@ impl<SP: Deref> Channel<SP> where
53895408
}
53905409

53915410
let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5392-
.map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
5411+
.map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
53935412
signature = res.0;
53945413
htlc_signatures = res.1;
53955414

@@ -5702,6 +5721,8 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
57025721
monitor_pending_failures: Vec::new(),
57035722
monitor_pending_finalized_fulfills: Vec::new(),
57045723

5724+
signer_pending_commitment_update: false,
5725+
57055726
#[cfg(debug_assertions)]
57065727
holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
57075728
#[cfg(debug_assertions)]
@@ -6349,6 +6370,8 @@ impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
63496370
monitor_pending_failures: Vec::new(),
63506371
monitor_pending_finalized_fulfills: Vec::new(),
63516372

6373+
signer_pending_commitment_update: false,
6374+
63526375
#[cfg(debug_assertions)]
63536376
holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
63546377
#[cfg(debug_assertions)]
@@ -7435,6 +7458,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch
74357458
monitor_pending_failures,
74367459
monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
74377460

7461+
signer_pending_commitment_update: false,
7462+
74387463
pending_update_fee,
74397464
holding_cell_update_fee,
74407465
next_holder_htlc_id,

0 commit comments

Comments
 (0)