Skip to content

Commit a27c1cb

Browse files
Fix rustfmt'd short ChannelManager methods
In the previous commit we formatted a bunch of short methods. Here we clean up the default formatting that rustfmt applied by extracting code into variables.
1 parent adc714a commit a27c1cb

File tree

1 file changed

+74
-93
lines changed

1 file changed

+74
-93
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 74 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -1517,10 +1517,10 @@ where
15171517
return false;
15181518
}
15191519
}
1520-
!self
1521-
.channel_by_id
1522-
.iter()
1523-
.any(|(_, channel)| channel.is_funded() || channel.funding().is_outbound())
1520+
let chan_is_funded_or_outbound = |(_, channel): (_, &Channel<SP>)| {
1521+
channel.is_funded() || channel.funding().is_outbound()
1522+
};
1523+
!self.channel_by_id.iter().any(chan_is_funded_or_outbound)
15241524
&& self.monitor_update_blocked_actions.is_empty()
15251525
&& self.closed_channel_monitor_update_ids.is_empty()
15261526
}
@@ -3322,17 +3322,14 @@ macro_rules! emit_funding_tx_broadcast_safe_event {
33223322
macro_rules! emit_channel_pending_event {
33233323
($locked_events: expr, $channel: expr) => {
33243324
if $channel.context.should_emit_channel_pending_event() {
3325+
let funding_txo = $channel.funding.get_funding_txo().unwrap();
33253326
$locked_events.push_back((
33263327
events::Event::ChannelPending {
33273328
channel_id: $channel.context.channel_id(),
33283329
former_temporary_channel_id: $channel.context.temporary_channel_id(),
33293330
counterparty_node_id: $channel.context.get_counterparty_node_id(),
33303331
user_channel_id: $channel.context.get_user_id(),
3331-
funding_txo: $channel
3332-
.funding
3333-
.get_funding_txo()
3334-
.unwrap()
3335-
.into_bitcoin_outpoint(),
3332+
funding_txo: funding_txo.into_bitcoin_outpoint(),
33363333
channel_type: Some($channel.funding.get_channel_type().clone()),
33373334
},
33383335
None,
@@ -3807,8 +3804,8 @@ where
38073804
let mut outbound_scid_alias = 0;
38083805
let mut i = 0;
38093806
loop {
3807+
// fuzzing chacha20 doesn't use the key at all so we always get the same alias
38103808
if cfg!(fuzzing) {
3811-
// fuzzing chacha20 doesn't use the key at all so we always get the same alias
38123809
outbound_scid_alias += 1;
38133810
} else {
38143811
outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(
@@ -3940,22 +3937,17 @@ where
39403937
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
39413938
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
39423939
let peer_state = &mut *peer_state_lock;
3943-
res.extend(
3944-
peer_state
3945-
.channel_by_id
3946-
.iter()
3947-
// Only `Channels` in the `Channel::Funded` phase can be considered funded.
3948-
.filter(|(_, chan)| chan.is_funded())
3949-
.filter(f)
3950-
.map(|(_channel_id, channel)| {
3951-
ChannelDetails::from_channel(
3952-
channel,
3953-
best_block_height,
3954-
peer_state.latest_features.clone(),
3955-
&self.fee_estimator,
3956-
)
3957-
}),
3958-
);
3940+
// Only `Channels` in the `Channel::Funded` phase can be considered funded.
3941+
let filtered_chan_by_id =
3942+
peer_state.channel_by_id.iter().filter(|(_, chan)| chan.is_funded()).filter(f);
3943+
res.extend(filtered_chan_by_id.map(|(_channel_id, channel)| {
3944+
ChannelDetails::from_channel(
3945+
channel,
3946+
best_block_height,
3947+
peer_state.latest_features.clone(),
3948+
&self.fee_estimator,
3949+
)
3950+
}));
39593951
}
39603952
}
39613953
res
@@ -4022,12 +4014,8 @@ where
40224014
&self.fee_estimator,
40234015
)
40244016
};
4025-
return peer_state
4026-
.channel_by_id
4027-
.iter()
4028-
.map(|(_, chan)| (chan))
4029-
.map(channel_to_details)
4030-
.collect();
4017+
let chan_by_id = peer_state.channel_by_id.iter();
4018+
return chan_by_id.map(|(_, chan)| (chan)).map(channel_to_details).collect();
40314019
}
40324020
vec![]
40334021
}
@@ -8928,9 +8916,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89288916
) -> Result<(), MsgHandleErrInternal> {
89298917
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89308918
match channel.as_unfunded_v2_mut() {
8931-
Some(unfunded_channel) => Ok(unfunded_channel
8932-
.tx_add_output(msg)
8933-
.into_msg_send_event(counterparty_node_id)),
8919+
Some(unfunded_channel) => {
8920+
let msg_send_event = unfunded_channel
8921+
.tx_add_output(msg)
8922+
.into_msg_send_event(counterparty_node_id);
8923+
Ok(msg_send_event)
8924+
},
89348925
None => Err("tx_add_output"),
89358926
}
89368927
})
@@ -8941,9 +8932,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89418932
) -> Result<(), MsgHandleErrInternal> {
89428933
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89438934
match channel.as_unfunded_v2_mut() {
8944-
Some(unfunded_channel) => Ok(unfunded_channel
8945-
.tx_remove_input(msg)
8946-
.into_msg_send_event(counterparty_node_id)),
8935+
Some(unfunded_channel) => {
8936+
let msg_send_event = unfunded_channel
8937+
.tx_remove_input(msg)
8938+
.into_msg_send_event(counterparty_node_id);
8939+
Ok(msg_send_event)
8940+
},
89478941
None => Err("tx_remove_input"),
89488942
}
89498943
})
@@ -8954,9 +8948,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
89548948
) -> Result<(), MsgHandleErrInternal> {
89558949
self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
89568950
match channel.as_unfunded_v2_mut() {
8957-
Some(unfunded_channel) => Ok(unfunded_channel
8958-
.tx_remove_output(msg)
8959-
.into_msg_send_event(counterparty_node_id)),
8951+
Some(unfunded_channel) => {
8952+
let msg_send_event = unfunded_channel
8953+
.tx_remove_output(msg)
8954+
.into_msg_send_event(counterparty_node_id);
8955+
Ok(msg_send_event)
8956+
},
89608957
None => Err("tx_remove_output"),
89618958
}
89628959
})
@@ -9666,13 +9663,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
96669663
let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
96679664
let num_forward_events = pending_events
96689665
.iter()
9669-
.filter(|(ev, _)| {
9670-
if let events::Event::PendingHTLCsForwardable { .. } = ev {
9671-
true
9672-
} else {
9673-
false
9674-
}
9675-
})
9666+
.filter(|(ev, _)| matches!(ev, events::Event::PendingHTLCsForwardable { .. }))
96769667
.count();
96779668
// We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
96789669
// events is done in batches and they are not removed until we're done processing each
@@ -10963,30 +10954,32 @@ where
1096310954
payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
1096410955
route_params_config: RouteParametersConfig,
1096510956
) -> Result<(), Bolt12SemanticError> {
10957+
let create_pending_payment_fn = |invoice_request: &InvoiceRequest, nonce| {
10958+
let expiration = StaleExpiration::TimerTicks(1);
10959+
let retryable_invoice_request = RetryableInvoiceRequest {
10960+
invoice_request: invoice_request.clone(),
10961+
nonce,
10962+
needs_retry: true,
10963+
};
10964+
self.pending_outbound_payments
10965+
.add_new_awaiting_invoice(
10966+
payment_id,
10967+
expiration,
10968+
retry_strategy,
10969+
route_params_config,
10970+
Some(retryable_invoice_request),
10971+
)
10972+
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10973+
};
10974+
1096610975
self.pay_for_offer_intern(
1096710976
offer,
1096810977
quantity,
1096910978
amount_msats,
1097010979
payer_note,
1097110980
payment_id,
1097210981
None,
10973-
|invoice_request, nonce| {
10974-
let expiration = StaleExpiration::TimerTicks(1);
10975-
let retryable_invoice_request = RetryableInvoiceRequest {
10976-
invoice_request: invoice_request.clone(),
10977-
nonce,
10978-
needs_retry: true,
10979-
};
10980-
self.pending_outbound_payments
10981-
.add_new_awaiting_invoice(
10982-
payment_id,
10983-
expiration,
10984-
retry_strategy,
10985-
route_params_config,
10986-
Some(retryable_invoice_request),
10987-
)
10988-
.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10989-
},
10982+
create_pending_payment_fn,
1099010983
)
1099110984
}
1099210985

@@ -11290,9 +11283,8 @@ where
1129011283
}
1129111284

1129211285
fn get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
11293-
self.per_peer_state
11294-
.read()
11295-
.unwrap()
11286+
let per_peer_state = self.per_peer_state.read().unwrap();
11287+
per_peer_state
1129611288
.iter()
1129711289
.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
1129811290
.filter(|(_, peer)| peer.is_connected)
@@ -12112,13 +12104,10 @@ where
1211212104
self.do_chain_event(None, |channel| {
1211312105
if let Some(funding_txo) = channel.funding.get_funding_txo() {
1211412106
if funding_txo.txid == *txid {
12115-
channel
12116-
.funding_transaction_unconfirmed(&&WithChannelContext::from(
12117-
&self.logger,
12118-
&channel.context,
12119-
None,
12120-
))
12121-
.map(|()| (None, Vec::new(), None))
12107+
let chan_context =
12108+
WithChannelContext::from(&self.logger, &channel.context, None);
12109+
let res = channel.funding_transaction_unconfirmed(&&chan_context);
12110+
res.map(|()| (None, Vec::new(), None))
1212212111
} else {
1212312112
Ok((None, Vec::new(), None))
1212412113
}
@@ -12445,13 +12434,13 @@ where
1244512434
MR::Target: MessageRouter,
1244612435
L::Target: Logger,
1244712436
{
12448-
fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannel) {
12437+
fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) {
1244912438
// Note that we never need to persist the updated ChannelManager for an inbound
1245012439
// open_channel message - pre-funded channels are never written so there should be no
1245112440
// change to the contents.
1245212441
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
12453-
let res =
12454-
self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1(msg));
12442+
let msg = OpenChannelMessageRef::V1(message);
12443+
let res = self.internal_open_channel(&counterparty_node_id, msg);
1245512444
let persist = match &res {
1245612445
Err(e) if e.closes_channel() => {
1245712446
debug_assert!(false, "We shouldn't close a new channel");
@@ -12960,16 +12949,10 @@ where
1296012949
{
1296112950
let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request;
1296212951

12963-
if self
12964-
.flow
12965-
.enqueue_invoice_request(
12966-
invoice_request,
12967-
payment_id,
12968-
nonce,
12969-
self.get_peers_for_blinded_path(),
12970-
)
12971-
.is_err()
12972-
{
12952+
let peers = self.get_peers_for_blinded_path();
12953+
let enqueue_invreq_res =
12954+
self.flow.enqueue_invoice_request(invoice_request, payment_id, nonce, peers);
12955+
if enqueue_invreq_res.is_err() {
1297312956
log_warn!(
1297412957
self.logger,
1297512958
"Retry failed for invoice request with payment_id {}",
@@ -14078,11 +14061,9 @@ impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
1407814061
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1407914062
let len: u64 = Readable::read(reader)?;
1408014063
const MAX_ALLOC_SIZE: u64 = 1024 * 16;
14081-
let mut events: Self = VecDeque::with_capacity(cmp::min(
14082-
MAX_ALLOC_SIZE
14083-
/ mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
14084-
len,
14085-
) as usize);
14064+
let event_size = mem::size_of::<(events::Event, Option<EventCompletionAction>)>();
14065+
let mut events: Self =
14066+
VecDeque::with_capacity(cmp::min(MAX_ALLOC_SIZE / event_size as u64, len) as usize);
1408614067
for _ in 0..len {
1408714068
let ev_opt = MaybeReadable::read(reader)?;
1408814069
let action = Readable::read(reader)?;

0 commit comments

Comments
 (0)