Skip to content

Commit 70440a5

Browse files
committed
[bindings] Use consistent imports for MessageSendEvents traits
Our bindings generator is braindead with respect to the idents used in a trait definition - it treats them as if they were used where the trait is being used, instead of where the trait is defined. Thus, if the idents used in a trait definition are not also imported the same in the files where the traits are used, we will claim the idents are bogus. I spent some time trying to track the TypeResolvers globally through the entire conversion run so that we could use the original file's TypeResolver later when using the trait, but it is somewhat of a lifetime mess. While likely possible, import consistency is generally the case anyway, so unless it becomes more of an issue in the future, it likely makes the most sense to just keep imports consistent. This commit keeps imports consistent across trait definition files around `MessageSendEvent` and `MessageSendEventsProvider`.
1 parent ecf736a commit 70440a5

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

lightning/src/ln/msgs.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
3535
use std::{cmp, fmt};
3636
use std::io::Read;
3737

38-
use util::events;
38+
use util::events::MessageSendEventsProvider;
3939
use util::ser::{Readable, Writeable, Writer, FixedLengthReader, HighZeroBytesDroppedVarInt};
4040

4141
use ln::channelmanager::{PaymentPreimage, PaymentHash, PaymentSecret};
@@ -746,7 +746,7 @@ pub enum OptionalField<T> {
746746
///
747747
/// Messages MAY be called in parallel when they originate from different their_node_ids, however
748748
/// they MUST NOT be called in parallel when the two calls have the same their_node_id.
749-
pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Sync {
749+
pub trait ChannelMessageHandler : MessageSendEventsProvider + Send + Sync {
750750
//Channel init:
751751
/// Handle an incoming open_channel message from the given peer.
752752
fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &OpenChannel);
@@ -810,7 +810,7 @@ pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Syn
810810
/// For `gossip_queries` messages there are potential DoS vectors when handling
811811
/// inbound queries. Implementors using an on-disk network graph should be aware of
812812
/// repeated disk I/O for queries accessing different parts of the network graph.
813-
pub trait RoutingMessageHandler : Send + Sync + events::MessageSendEventsProvider {
813+
pub trait RoutingMessageHandler : Send + Sync + MessageSendEventsProvider {
814814
/// Handle an incoming node_announcement message, returning true if it should be forwarded on,
815815
/// false or returning an Err otherwise.
816816
fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, LightningError>;

lightning/src/routing/network_graph.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ use ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, Reply
2929
use ln::msgs;
3030
use util::ser::{Writeable, Readable, Writer};
3131
use util::logger::Logger;
32-
use util::events;
32+
use util::events::{MessageSendEvent, MessageSendEventsProvider};
3333

3434
use std::{cmp, fmt};
3535
use std::sync::{RwLock, RwLockReadGuard};
@@ -64,7 +64,7 @@ pub struct NetGraphMsgHandler<C: Deref, L: Deref> where C::Target: chain::Access
6464
pub network_graph: RwLock<NetworkGraph>,
6565
chain_access: Option<C>,
6666
full_syncs_requested: AtomicUsize,
67-
pending_events: Mutex<Vec<events::MessageSendEvent>>,
67+
pending_events: Mutex<Vec<MessageSendEvent>>,
6868
logger: L,
6969
}
7070

@@ -244,7 +244,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
244244
let number_of_blocks = 0xffffffff;
245245
log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks);
246246
let mut pending_events = self.pending_events.lock().unwrap();
247-
pending_events.push(events::MessageSendEvent::SendChannelRangeQuery {
247+
pending_events.push(MessageSendEvent::SendChannelRangeQuery {
248248
node_id: their_node_id.clone(),
249249
msg: QueryChannelRange {
250250
chain_hash: self.network_graph.read().unwrap().genesis_hash,
@@ -279,7 +279,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
279279

280280
log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len());
281281
let mut pending_events = self.pending_events.lock().unwrap();
282-
pending_events.push(events::MessageSendEvent::SendShortIdsQuery {
282+
pending_events.push(MessageSendEvent::SendShortIdsQuery {
283283
node_id: their_node_id.clone(),
284284
msg: QueryShortChannelIds {
285285
chain_hash: msg.chain_hash,
@@ -327,12 +327,12 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
327327
}
328328
}
329329

330-
impl<C: Deref, L: Deref> events::MessageSendEventsProvider for NetGraphMsgHandler<C, L>
330+
impl<C: Deref, L: Deref> MessageSendEventsProvider for NetGraphMsgHandler<C, L>
331331
where
332332
C::Target: chain::Access,
333333
L::Target: Logger,
334334
{
335-
fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
335+
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
336336
let mut ret = Vec::new();
337337
let mut pending_events = self.pending_events.lock().unwrap();
338338
std::mem::swap(&mut ret, &mut pending_events);

0 commit comments

Comments
 (0)