Skip to content

Minor cleanups #164

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -893,10 +893,11 @@ impl Channel {

/// Gets the redeemscript for the funding transaction output (ie the funding transaction output
/// pays to get_funding_redeemscript().to_v0_p2wsh()).
/// Panics if called before accept_channel/new_from_req
pub fn get_funding_redeemscript(&self) -> Script {
let builder = Builder::new().push_opcode(opcodes::All::OP_PUSHNUM_2);
let our_funding_key = PublicKey::from_secret_key(&self.secp_ctx, &self.local_keys.funding_key).serialize();
let their_funding_key = self.their_funding_pubkey.unwrap().serialize();
let their_funding_key = self.their_funding_pubkey.expect("get_funding_redeemscript only allowed after accept_channel").serialize();
if our_funding_key[..] < their_funding_key[..] {
builder.push_slice(&our_funding_key)
.push_slice(&their_funding_key)
Expand Down
7 changes: 4 additions & 3 deletions src/ln/channelmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,11 @@ pub enum ChannelMonitorUpdateErr {
/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
/// which we have revoked, allowing our counterparty to claim all funds in the channel!
/// A call to add_update_monitor is needed to register outpoint and its txid with ChainWatchInterface
/// after setting funding_txo in a ChannelMonitor
pub trait ManyChannelMonitor: Send + Sync {
/// Adds or updates a monitor for the given `funding_txo`.
/// Implementor must also ensure that the funding_txo outpoint is registered with any relevant
/// ChainWatchInterfaces such that the provided monitor receives block_connected callbacks with
/// any spends of it.
fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>;
}

Expand Down Expand Up @@ -471,7 +472,7 @@ impl ChannelMonitor {
/// optional, without it this monitor cannot be used in an SPV client, but you may wish to
/// avoid this (or call unset_funding_info) on a monitor you wish to send to a watchtower as it
/// provides slightly better privacy.
/// It's the responsability of the caller to register outpoint and script with passing the former
/// It's the responsibility of the caller to register outpoint and script with passing the former
/// value as key to add_update_monitor.
pub(super) fn set_funding_info(&mut self, funding_info: (OutPoint, Script)) {
self.funding_txo = Some(funding_info);
Expand Down
54 changes: 40 additions & 14 deletions src/ln/peer_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use util::byte_utils;
use util::events::{EventsProvider,Event};
use util::logger::Logger;

use std::collections::{HashMap,LinkedList};
use std::collections::{HashMap,hash_map,LinkedList};
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{cmp,error,mem,hash,fmt};
Expand Down Expand Up @@ -90,6 +90,18 @@ struct PeerHolder<Descriptor: SocketDescriptor> {
/// Only add to this set when noise completes:
node_id_to_descriptor: HashMap<PublicKey, Descriptor>,
}
struct MutPeerHolder<'a, Descriptor: SocketDescriptor + 'a> {
peers: &'a mut HashMap<Descriptor, Peer>,
node_id_to_descriptor: &'a mut HashMap<PublicKey, Descriptor>,
}
impl<Descriptor: SocketDescriptor> PeerHolder<Descriptor> {
fn borrow_parts(&mut self) -> MutPeerHolder<Descriptor> {
MutPeerHolder {
peers: &mut self.peers,
node_id_to_descriptor: &mut self.node_id_to_descriptor,
}
}
}

pub struct PeerManager<Descriptor: SocketDescriptor> {
message_handler: MessageHandler,
Expand All @@ -100,7 +112,6 @@ pub struct PeerManager<Descriptor: SocketDescriptor> {
logger: Arc<Logger>,
}


macro_rules! encode_msg {
($msg: expr, $msg_code: expr) => {
{
Expand Down Expand Up @@ -136,7 +147,12 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
/// completed and we are sure the remote peer has the private key for the given node_id.
pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
let peers = self.peers.lock().unwrap();
peers.peers.values().filter_map(|p| p.their_node_id).collect()
peers.peers.values().filter_map(|p| {
if !p.channel_encryptor.is_ready_for_encryption() || p.their_global_features.is_none() {
return None;
}
p.their_node_id
}).collect()
}

/// Indicates a new outbound connection has been established to a node with the given node_id.
Expand Down Expand Up @@ -267,14 +283,14 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {

fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: Vec<u8>) -> Result<bool, PeerHandleError> {
let pause_read = {
let mut peers = self.peers.lock().unwrap();
let (should_insert_node_id, pause_read) = match peers.peers.get_mut(peer_descriptor) {
let mut peers_lock = self.peers.lock().unwrap();
let peers = peers_lock.borrow_parts();
let pause_read = match peers.peers.get_mut(peer_descriptor) {
None => panic!("Descriptor for read_event is not already known to PeerManager"),
Some(peer) => {
assert!(peer.pending_read_buffer.len() > 0);
assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos);

let mut insert_node_id = None;
let mut read_pos = 0;
while read_pos < data.len() {
{
Expand Down Expand Up @@ -353,6 +369,18 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
}
}

macro_rules! insert_node_id {
() => {
match peers.node_id_to_descriptor.entry(peer.their_node_id.unwrap()) {
hash_map::Entry::Occupied(_) => {
peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
return Err(PeerHandleError{ no_connection_possible: false })
},
hash_map::Entry::Vacant(entry) => entry.insert(peer_descriptor.clone()),
};
}
}

let next_step = peer.channel_encryptor.get_noise_step();
match next_step {
NextNoiseStep::ActOne => {
Expand All @@ -366,7 +394,7 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
peer.pending_read_is_header = true;

insert_node_id = Some(peer.their_node_id.unwrap());
insert_node_id!();
let mut local_features = msgs::LocalFeatures::new();
if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND {
self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel);
Expand All @@ -382,7 +410,7 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
peer.pending_read_is_header = true;
peer.their_node_id = Some(their_node_id);
insert_node_id = Some(peer.their_node_id.unwrap());
insert_node_id!();
},
NextNoiseStep::NoiseComplete => {
if peer.pending_read_is_header {
Expand Down Expand Up @@ -417,6 +445,9 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
if msg.local_features.requires_unknown_bits() {
return Err(PeerHandleError{ no_connection_possible: true });
}
if peer.their_global_features.is_some() {
return Err(PeerHandleError{ no_connection_possible: false });
}
peer.their_global_features = Some(msg.global_features);
peer.their_local_features = Some(msg.local_features);

Expand Down Expand Up @@ -607,15 +638,10 @@ impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {

Self::do_attempt_write_data(peer_descriptor, peer);

(insert_node_id /* should_insert_node_id */, peer.pending_outbound_buffer.len() > 10) // pause_read
peer.pending_outbound_buffer.len() > 10 // pause_read
}
};

match should_insert_node_id {
Some(node_id) => { peers.node_id_to_descriptor.insert(node_id, peer_descriptor.clone()); },
None => {}
};

pause_read
};

Expand Down