Ethereum 2.0 Network Specification Upgrade (#510)

Updates lighthouse to the latest networking spec

-  Sync re-write (#496)
-  Updates to the latest eth2 networking spec (#495)
-  Libp2p updates and improvements
This commit is contained in:
Age Manning 2019-08-29 13:23:28 +02:00 committed by GitHub
parent bcffe42712
commit 192380cb58
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 1414 additions and 1523 deletions

View File

@ -11,7 +11,7 @@ store = { path = "./store" }
client = { path = "client" } client = { path = "client" }
version = { path = "version" } version = { path = "version" }
clap = "2.32.0" clap = "2.32.0"
slog = { version = "^2.2.3" , features = ["max_level_trace"] } slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] }
slog-term = "^2.4.0" slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"
ctrlc = { version = "3.1.1", features = ["termination"] } ctrlc = { version = "3.1.1", features = ["termination"] }

View File

@ -93,16 +93,9 @@ where
} }
do_state_catchup(&beacon_chain, &log); do_state_catchup(&beacon_chain, &log);
// Start the network service, libp2p and syncing threads
// TODO: Add beacon_chain reference to network parameters
let network_config = &client_config.network; let network_config = &client_config.network;
let network_logger = log.new(o!("Service" => "Network")); let (network, network_send) =
let (network, network_send) = NetworkService::new( NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?;
beacon_chain.clone(),
network_config,
executor,
network_logger,
)?;
// spawn the RPC server // spawn the RPC server
let rpc_exit_signal = if client_config.rpc.enabled { let rpc_exit_signal = if client_config.rpc.enabled {

View File

@ -34,7 +34,7 @@ pub fn run<T: BeaconChainTypes>(client: &Client<T>, executor: TaskExecutor, exit
// Panics if libp2p is poisoned. // Panics if libp2p is poisoned.
let connected_peer_count = libp2p.lock().swarm.connected_peers(); let connected_peer_count = libp2p.lock().swarm.connected_peers();
debug!(log, "libp2p"; "peer_count" => connected_peer_count); debug!(log, "Libp2p connected peer status"; "peer_count" => connected_peer_count);
if connected_peer_count <= WARN_PEER_COUNT { if connected_peer_count <= WARN_PEER_COUNT {
warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count);

View File

@ -7,8 +7,8 @@ edition = "2018"
[dependencies] [dependencies]
clap = "2.32.0" clap = "2.32.0"
#SigP repository #SigP repository
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "61036890d574f5b46573952b20def2baafd6a6e9" }
enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "61036890d574f5b46573952b20def2baafd6a6e9", features = ["serde"] }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
@ -26,5 +26,6 @@ smallvec = "0.6.10"
fnv = "1.0.6" fnv = "1.0.6"
unsigned-varint = "0.2.2" unsigned-varint = "0.2.2"
bytes = "0.4.12" bytes = "0.4.12"
tokio-io-timeout = "0.3.1"
lazy_static = "1.3.0" lazy_static = "1.3.0"
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }

View File

@ -1,3 +1,4 @@
use crate::config::*;
use crate::discovery::Discovery; use crate::discovery::Discovery;
use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::rpc::{RPCEvent, RPCMessage, RPC};
use crate::{error, NetworkConfig}; use crate::{error, NetworkConfig};
@ -15,7 +16,6 @@ use libp2p::{
NetworkBehaviour, PeerId, NetworkBehaviour, PeerId,
}; };
use slog::{debug, o, trace}; use slog::{debug, o, trace};
use ssz::{ssz_encode, Encode};
use std::num::NonZeroU32; use std::num::NonZeroU32;
use std::time::Duration; use std::time::Duration;
@ -91,7 +91,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubE
fn inject_event(&mut self, event: GossipsubEvent) { fn inject_event(&mut self, event: GossipsubEvent) {
match event { match event {
GossipsubEvent::Message(gs_msg) => { GossipsubEvent::Message(gs_msg) => {
trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg)); trace!(self.log, "Received GossipEvent");
let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data);
@ -192,10 +192,10 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
} }
/// Publishes a message on the pubsub (gossipsub) behaviour. /// Publishes a message on the pubsub (gossipsub) behaviour.
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage) { pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) {
let message_bytes = ssz_encode(&message); let message_data = message.to_data();
for topic in topics { for topic in topics {
self.gossipsub.publish(topic, message_bytes.clone()); self.gossipsub.publish(topic, message_data.clone());
} }
} }
@ -224,13 +224,20 @@ pub enum BehaviourEvent {
}, },
} }
/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. /// Messages that are passed to and from the pubsub (Gossipsub) behaviour. These are encoded and
/// decoded upstream.
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub enum PubsubMessage { pub enum PubsubMessage {
/// Gossipsub message providing notification of a new block. /// Gossipsub message providing notification of a new block.
Block(Vec<u8>), Block(Vec<u8>),
/// Gossipsub message providing notification of a new attestation. /// Gossipsub message providing notification of a new attestation.
Attestation(Vec<u8>), Attestation(Vec<u8>),
/// Gossipsub message providing notification of a voluntary exit.
VoluntaryExit(Vec<u8>),
/// Gossipsub message providing notification of a new proposer slashing.
ProposerSlashing(Vec<u8>),
/// Gossipsub message providing notification of a new attester slashing.
AttesterSlashing(Vec<u8>),
/// Gossipsub message from an unknown topic. /// Gossipsub message from an unknown topic.
Unknown(Vec<u8>), Unknown(Vec<u8>),
} }
@ -244,29 +251,33 @@ impl PubsubMessage {
*/ */
fn from_topics(topics: &Vec<TopicHash>, data: Vec<u8>) -> Self { fn from_topics(topics: &Vec<TopicHash>, data: Vec<u8>) -> Self {
for topic in topics { for topic in topics {
match topic.as_str() { // compare the prefix and postfix, then match on the topic
let topic_parts: Vec<&str> = topic.as_str().split('/').collect();
if topic_parts.len() == 4
&& topic_parts[1] == TOPIC_PREFIX
&& topic_parts[3] == TOPIC_ENCODING_POSTFIX
{
match topic_parts[2] {
BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data),
BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data),
VOLUNTARY_EXIT_TOPIC => return PubsubMessage::VoluntaryExit(data),
PROPOSER_SLASHING_TOPIC => return PubsubMessage::ProposerSlashing(data),
ATTESTER_SLASHING_TOPIC => return PubsubMessage::AttesterSlashing(data),
_ => {} _ => {}
} }
} }
}
PubsubMessage::Unknown(data) PubsubMessage::Unknown(data)
} }
}
impl Encode for PubsubMessage { fn to_data(self) -> Vec<u8> {
fn is_ssz_fixed_len() -> bool {
false
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
match self { match self {
PubsubMessage::Block(inner) PubsubMessage::Block(data)
| PubsubMessage::Attestation(inner) | PubsubMessage::Attestation(data)
| PubsubMessage::Unknown(inner) => { | PubsubMessage::VoluntaryExit(data)
// Encode the gossip as a Vec<u8>; | PubsubMessage::ProposerSlashing(data)
buf.append(&mut inner.as_ssz_bytes()); | PubsubMessage::AttesterSlashing(data)
} | PubsubMessage::Unknown(data) => data,
} }
} }
} }

View File

@ -6,9 +6,16 @@ use serde_derive::{Deserialize, Serialize};
use std::path::PathBuf; use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
/// The beacon node topic string to subscribe to. /// The gossipsub topic names.
// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX
// For example /eth2/beacon_block/ssz
pub const TOPIC_PREFIX: &str = "eth2";
pub const TOPIC_ENCODING_POSTFIX: &str = "ssz";
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation";
pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit";
pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing";
pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing";
pub const SHARD_TOPIC_PREFIX: &str = "shard"; pub const SHARD_TOPIC_PREFIX: &str = "shard";
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
@ -63,10 +70,10 @@ impl Default for Config {
discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_address: "127.0.0.1".parse().expect("valid ip address"),
discovery_port: 9000, discovery_port: 9000,
max_peers: 10, max_peers: 10,
//TODO: Set realistic values for production // Note: The topics by default are sent as plain strings. Hashes are an optional
// Note: This defaults topics to plain strings. Not hashes // parameter.
gs_config: GossipsubConfigBuilder::new() gs_config: GossipsubConfigBuilder::new()
.max_transmit_size(1_000_000) .max_transmit_size(1_048_576)
.heartbeat_interval(Duration::from_secs(20)) .heartbeat_interval(Duration::from_secs(20))
.build(), .build(),
boot_nodes: vec![], boot_nodes: vec![],

View File

@ -10,7 +10,7 @@ use libp2p::discv5::{Discv5, Discv5Event};
use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::enr::{Enr, EnrBuilder, NodeId};
use libp2p::multiaddr::Protocol; use libp2p::multiaddr::Protocol;
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler};
use slog::{debug, info, o, warn}; use slog::{debug, info, warn};
use std::collections::HashSet; use std::collections::HashSet;
use std::fs::File; use std::fs::File;
use std::io::prelude::*; use std::io::prelude::*;
@ -64,7 +64,7 @@ impl<TSubstream> Discovery<TSubstream> {
config: &NetworkConfig, config: &NetworkConfig,
log: &slog::Logger, log: &slog::Logger,
) -> error::Result<Self> { ) -> error::Result<Self> {
let log = log.new(o!("Service" => "Libp2p-Discovery")); let log = log.clone();
// checks if current ENR matches that found on disk // checks if current ENR matches that found on disk
let local_enr = load_enr(local_key, config, &log)?; let local_enr = load_enr(local_key, config, &log)?;
@ -74,19 +74,19 @@ impl<TSubstream> Discovery<TSubstream> {
None => String::from(""), None => String::from(""),
}; };
info!(log, "Local ENR: {}", local_enr.to_base64()); info!(log, "ENR Initialised"; "ENR" => local_enr.to_base64(), "Seq" => local_enr.seq());
debug!(log, "Local Node Id: {}", local_enr.node_id()); debug!(log, "Discv5 Node ID Initialised"; "node_id" => format!("{}",local_enr.node_id()));
debug!(log, "Local ENR seq: {}", local_enr.seq());
let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address) let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address)
.map_err(|e| format!("Discv5 service failed: {:?}", e))?; .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?;
// Add bootnodes to routing table // Add bootnodes to routing table
for bootnode_enr in config.boot_nodes.clone() { for bootnode_enr in config.boot_nodes.clone() {
debug!( debug!(
log, log,
"Adding node to routing table: {}", "Adding node to routing table";
bootnode_enr.node_id() "Node ID" => format!("{}",
bootnode_enr.node_id())
); );
discovery.add_enr(bootnode_enr); discovery.add_enr(bootnode_enr);
} }
@ -133,7 +133,7 @@ impl<TSubstream> Discovery<TSubstream> {
fn find_peers(&mut self) { fn find_peers(&mut self) {
// pick a random NodeId // pick a random NodeId
let random_node = NodeId::random(); let random_node = NodeId::random();
debug!(self.log, "Searching for peers..."); debug!(self.log, "Searching for peers");
self.discovery.find_node(random_node); self.discovery.find_node(random_node);
// update the time until next discovery // update the time until next discovery
@ -217,7 +217,7 @@ where
} }
Ok(Async::NotReady) => break, Ok(Async::NotReady) => break,
Err(e) => { Err(e) => {
warn!(self.log, "Discovery peer search failed: {:?}", e); warn!(self.log, "Discovery peer search failed"; "Error" => format!("{:?}", e));
} }
} }
} }
@ -244,16 +244,16 @@ where
}); });
} }
Discv5Event::FindNodeResult { closer_peers, .. } => { Discv5Event::FindNodeResult { closer_peers, .. } => {
debug!(self.log, "Discv5 query found {} peers", closer_peers.len()); debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len());
if closer_peers.is_empty() { if closer_peers.is_empty() {
debug!(self.log, "Discv5 random query yielded empty results"); debug!(self.log, "Discovery random query found no peers");
} }
for peer_id in closer_peers { for peer_id in closer_peers {
// if we need more peers, attempt a connection // if we need more peers, attempt a connection
if self.connected_peers.len() < self.max_peers if self.connected_peers.len() < self.max_peers
&& self.connected_peers.get(&peer_id).is_none() && self.connected_peers.get(&peer_id).is_none()
{ {
debug!(self.log, "Discv5: Peer discovered"; "Peer"=> format!("{:?}", peer_id)); debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id));
return Async::Ready(NetworkBehaviourAction::DialPeer { return Async::Ready(NetworkBehaviourAction::DialPeer {
peer_id, peer_id,
}); });
@ -300,14 +300,12 @@ fn load_enr(
Ok(_) => { Ok(_) => {
match Enr::from_str(&enr_string) { match Enr::from_str(&enr_string) {
Ok(enr) => { Ok(enr) => {
debug!(log, "ENR found in file: {:?}", enr_f);
if enr.node_id() == local_enr.node_id() { if enr.node_id() == local_enr.node_id() {
if enr.ip() == config.discovery_address.into() if enr.ip() == config.discovery_address.into()
&& enr.tcp() == Some(config.libp2p_port) && enr.tcp() == Some(config.libp2p_port)
&& enr.udp() == Some(config.discovery_port) && enr.udp() == Some(config.discovery_port)
{ {
debug!(log, "ENR loaded from file"); debug!(log, "ENR loaded from file"; "file" => format!("{:?}", enr_f));
// the stored ENR has the same configuration, use it // the stored ENR has the same configuration, use it
return Ok(enr); return Ok(enr);
} }
@ -317,11 +315,11 @@ fn load_enr(
local_enr.set_seq(new_seq_no, local_key).map_err(|e| { local_enr.set_seq(new_seq_no, local_key).map_err(|e| {
format!("Could not update ENR sequence number: {:?}", e) format!("Could not update ENR sequence number: {:?}", e)
})?; })?;
debug!(log, "ENR sequence number increased to: {}", new_seq_no); debug!(log, "ENR sequence number increased"; "seq" => new_seq_no);
} }
} }
Err(e) => { Err(e) => {
warn!(log, "ENR from file could not be decoded: {:?}", e); warn!(log, "ENR from file could not be decoded"; "error" => format!("{:?}", e));
} }
} }
} }
@ -344,7 +342,7 @@ fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) {
Err(e) => { Err(e) => {
warn!( warn!(
log, log,
"Could not write ENR to file: {:?}{:?}. Error: {}", dir, ENR_FILENAME, e "Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e)
); );
} }
} }

View File

@ -16,6 +16,7 @@ mod service;
pub use behaviour::PubsubMessage; pub use behaviour::PubsubMessage;
pub use config::{ pub use config::{
Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX,
TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX,
}; };
pub use libp2p::enr::Enr; pub use libp2p::enr::Enr;
pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::gossipsub::{Topic, TopicHash};

View File

@ -41,10 +41,8 @@ impl Encoder for SSZInboundCodec {
RPCErrorResponse::Success(resp) => { RPCErrorResponse::Success(resp) => {
match resp { match resp {
RPCResponse::Hello(res) => res.as_ssz_bytes(), RPCResponse::Hello(res) => res.as_ssz_bytes(),
RPCResponse::BeaconBlockRoots(res) => res.as_ssz_bytes(), RPCResponse::BeaconBlocks(res) => res, // already raw bytes
RPCResponse::BeaconBlockHeaders(res) => res.headers, // already raw bytes RPCResponse::RecentBeaconBlocks(res) => res, // already raw bytes
RPCResponse::BeaconBlockBodies(res) => res.block_bodies, // already raw bytes
RPCResponse::BeaconChainState(res) => res.as_ssz_bytes(),
} }
} }
RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(), RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(),
@ -72,52 +70,30 @@ impl Decoder for SSZInboundCodec {
match self.inner.decode(src).map_err(RPCError::from) { match self.inner.decode(src).map_err(RPCError::from) {
Ok(Some(packet)) => match self.protocol.message_name.as_str() { Ok(Some(packet)) => match self.protocol.message_name.as_str() {
"hello" => match self.protocol.version.as_str() { "hello" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( "1" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes(
&packet, &packet,
)?))), )?))),
_ => Err(RPCError::InvalidProtocol("Unknown HELLO version")), _ => unreachable!("Cannot negotiate an unknown version"),
}, },
"goodbye" => match self.protocol.version.as_str() { "goodbye" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(
&packet, &packet,
)?))), )?))),
_ => Err(RPCError::InvalidProtocol( _ => unreachable!("Cannot negotiate an unknown version"),
"Unknown GOODBYE version.as_str()",
)),
}, },
"beacon_block_roots" => match self.protocol.version.as_str() { "beacon_blocks" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCRequest::BeaconBlockRoots( "1" => Ok(Some(RPCRequest::BeaconBlocks(
BeaconBlockRootsRequest::from_ssz_bytes(&packet)?, BeaconBlocksRequest::from_ssz_bytes(&packet)?,
))), ))),
_ => Err(RPCError::InvalidProtocol( _ => unreachable!("Cannot negotiate an unknown version"),
"Unknown BEACON_BLOCK_ROOTS version.",
)),
}, },
"beacon_block_headers" => match self.protocol.version.as_str() { "recent_beacon_blocks" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCRequest::BeaconBlockHeaders( "1" => Ok(Some(RPCRequest::RecentBeaconBlocks(
BeaconBlockHeadersRequest::from_ssz_bytes(&packet)?, RecentBeaconBlocksRequest::from_ssz_bytes(&packet)?,
))), ))),
_ => Err(RPCError::InvalidProtocol( _ => unreachable!("Cannot negotiate an unknown version"),
"Unknown BEACON_BLOCK_HEADERS version.",
)),
}, },
"beacon_block_bodies" => match self.protocol.version.as_str() { _ => unreachable!("Cannot negotiate an unknown protocol"),
"1.0.0" => Ok(Some(RPCRequest::BeaconBlockBodies(
BeaconBlockBodiesRequest::from_ssz_bytes(&packet)?,
))),
_ => Err(RPCError::InvalidProtocol(
"Unknown BEACON_BLOCK_BODIES version.",
)),
},
"beacon_chain_state" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCRequest::BeaconChainState(
BeaconChainStateRequest::from_ssz_bytes(&packet)?,
))),
_ => Err(RPCError::InvalidProtocol(
"Unknown BEACON_CHAIN_STATE version.",
)),
},
_ => Err(RPCError::InvalidProtocol("Unknown message name.")),
}, },
Ok(None) => Ok(None), Ok(None) => Ok(None),
Err(e) => Err(e), Err(e) => Err(e),
@ -156,10 +132,8 @@ impl Encoder for SSZOutboundCodec {
let bytes = match item { let bytes = match item {
RPCRequest::Hello(req) => req.as_ssz_bytes(), RPCRequest::Hello(req) => req.as_ssz_bytes(),
RPCRequest::Goodbye(req) => req.as_ssz_bytes(), RPCRequest::Goodbye(req) => req.as_ssz_bytes(),
RPCRequest::BeaconBlockRoots(req) => req.as_ssz_bytes(), RPCRequest::BeaconBlocks(req) => req.as_ssz_bytes(),
RPCRequest::BeaconBlockHeaders(req) => req.as_ssz_bytes(), RPCRequest::RecentBeaconBlocks(req) => req.as_ssz_bytes(),
RPCRequest::BeaconBlockBodies(req) => req.as_ssz_bytes(),
RPCRequest::BeaconChainState(req) => req.as_ssz_bytes(),
}; };
// length-prefix // length-prefix
self.inner self.inner
@ -168,7 +142,11 @@ impl Encoder for SSZOutboundCodec {
} }
} }
// Decoder for outbound // Decoder for outbound streams
//
// The majority of the decoding has now been pushed upstream due to the changing specification.
// We prefer to decode blocks and attestations with extra knowledge about the chain to perform
// faster verification checks before decoding entire blocks/attestations.
impl Decoder for SSZOutboundCodec { impl Decoder for SSZOutboundCodec {
type Item = RPCResponse; type Item = RPCResponse;
type Error = RPCError; type Error = RPCError;
@ -177,53 +155,41 @@ impl Decoder for SSZOutboundCodec {
match self.inner.decode(src).map_err(RPCError::from) { match self.inner.decode(src).map_err(RPCError::from) {
Ok(Some(packet)) => match self.protocol.message_name.as_str() { Ok(Some(packet)) => match self.protocol.message_name.as_str() {
"hello" => match self.protocol.version.as_str() { "hello" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes(
&packet, &packet,
)?))), )?))),
_ => Err(RPCError::InvalidProtocol("Unknown HELLO version.")), _ => unreachable!("Cannot negotiate an unknown version"),
}, },
"goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")),
"beacon_block_roots" => match self.protocol.version.as_str() { "beacon_blocks" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCResponse::BeaconBlockRoots( "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))),
BeaconBlockRootsResponse::from_ssz_bytes(&packet)?, _ => unreachable!("Cannot negotiate an unknown version"),
))),
_ => Err(RPCError::InvalidProtocol(
"Unknown BEACON_BLOCK_ROOTS version.",
)),
}, },
"beacon_block_headers" => match self.protocol.version.as_str() { "recent_beacon_blocks" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCResponse::BeaconBlockHeaders( "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))),
BeaconBlockHeadersResponse { _ => unreachable!("Cannot negotiate an unknown version"),
headers: packet.to_vec(),
}, },
))), _ => unreachable!("Cannot negotiate an unknown protocol"),
_ => Err(RPCError::InvalidProtocol(
"Unknown BEACON_BLOCK_HEADERS version.",
)),
}, },
"beacon_block_bodies" => match self.protocol.version.as_str() { Ok(None) => {
"1.0.0" => Ok(Some(RPCResponse::BeaconBlockBodies( // the object sent could be a empty. We return the empty object if this is the case
BeaconBlockBodiesResponse { match self.protocol.message_name.as_str() {
block_bodies: packet.to_vec(), "hello" => match self.protocol.version.as_str() {
// this gets filled in the protocol handler "1" => Ok(None), // cannot have an empty HELLO message. The stream has terminated unexpectedly
block_roots: None, _ => unreachable!("Cannot negotiate an unknown version"),
}, },
))), "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")),
_ => Err(RPCError::InvalidProtocol( "beacon_blocks" => match self.protocol.version.as_str() {
"Unknown BEACON_BLOCK_BODIES version.", "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))),
)), _ => unreachable!("Cannot negotiate an unknown version"),
}, },
"beacon_chain_state" => match self.protocol.version.as_str() { "recent_beacon_blocks" => match self.protocol.version.as_str() {
"1.0.0" => Ok(Some(RPCResponse::BeaconChainState( "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))),
BeaconChainStateResponse::from_ssz_bytes(&packet)?, _ => unreachable!("Cannot negotiate an unknown version"),
))),
_ => Err(RPCError::InvalidProtocol(
"Unknown BEACON_CHAIN_STATE version.",
)),
}, },
_ => Err(RPCError::InvalidProtocol("Unknown method")), _ => unreachable!("Cannot negotiate an unknown protocol"),
}, }
Ok(None) => Ok(None), }
Err(e) => Err(e), Err(e) => Err(e),
} }
} }

View File

@ -1,4 +1,4 @@
use super::methods::{RPCErrorResponse, RPCResponse, RequestId}; use super::methods::RequestId;
use super::protocol::{RPCError, RPCProtocol, RPCRequest}; use super::protocol::{RPCError, RPCProtocol, RPCRequest};
use super::RPCEvent; use super::RPCEvent;
use crate::rpc::protocol::{InboundFramed, OutboundFramed}; use crate::rpc::protocol::{InboundFramed, OutboundFramed};
@ -13,8 +13,8 @@ use smallvec::SmallVec;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
/// The time (in seconds) before a substream that is awaiting a response times out. /// The time (in seconds) before a substream that is awaiting a response from the user times out.
pub const RESPONSE_TIMEOUT: u64 = 9; pub const RESPONSE_TIMEOUT: u64 = 10;
/// Implementation of `ProtocolsHandler` for the RPC protocol. /// Implementation of `ProtocolsHandler` for the RPC protocol.
pub struct RPCHandler<TSubstream> pub struct RPCHandler<TSubstream>
@ -314,14 +314,14 @@ where
Ok(Async::Ready(response)) => { Ok(Async::Ready(response)) => {
if let Some(response) = response { if let Some(response) = response {
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
build_response(rpc_event, response), RPCEvent::Response(rpc_event.id(), response),
))); )));
} else { } else {
// stream closed early // stream closed early or nothing was sent
return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(
RPCEvent::Error( RPCEvent::Error(
rpc_event.id(), rpc_event.id(),
RPCError::Custom("Stream Closed Early".into()), RPCError::Custom("Stream closed early. Empty response".into()),
), ),
))); )));
} }
@ -365,31 +365,3 @@ where
Ok(Async::NotReady) Ok(Async::NotReady)
} }
} }
/// Given a response back from a peer and the request that sent it, construct a response to send
/// back to the user. This allows for some data manipulation of responses given requests.
fn build_response(rpc_event: RPCEvent, rpc_response: RPCErrorResponse) -> RPCEvent {
let id = rpc_event.id();
// handle the types of responses
match rpc_response {
RPCErrorResponse::Success(response) => {
match response {
// if the response is block roots, tag on the extra request data
RPCResponse::BeaconBlockBodies(mut resp) => {
if let RPCEvent::Request(_id, RPCRequest::BeaconBlockBodies(bodies_req)) =
rpc_event
{
resp.block_roots = Some(bodies_req.block_roots);
}
RPCEvent::Response(
id,
RPCErrorResponse::Success(RPCResponse::BeaconBlockBodies(resp)),
)
}
_ => RPCEvent::Response(id, RPCErrorResponse::Success(response)),
}
}
_ => RPCEvent::Response(id, rpc_response),
}
}

View File

@ -2,7 +2,7 @@
use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz::{impl_decode_via_from, impl_encode_via_from};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use types::{BeaconBlockBody, Epoch, EthSpec, Hash256, Slot}; use types::{Epoch, Hash256, Slot};
/* Request/Response data structures for RPC methods */ /* Request/Response data structures for RPC methods */
@ -13,23 +13,20 @@ pub type RequestId = usize;
/// The HELLO request/response handshake message. /// The HELLO request/response handshake message.
#[derive(Encode, Decode, Clone, Debug)] #[derive(Encode, Decode, Clone, Debug)]
pub struct HelloMessage { pub struct HelloMessage {
/// The network ID of the peer. /// The fork version of the chain we are broadcasting.
pub network_id: u8, pub fork_version: [u8; 4],
/// The chain id for the HELLO request. /// Latest finalized root.
pub chain_id: u64, pub finalized_root: Hash256,
/// The peers last finalized root. /// Latest finalized epoch.
pub latest_finalized_root: Hash256, pub finalized_epoch: Epoch,
/// The peers last finalized epoch. /// The latest block root.
pub latest_finalized_epoch: Epoch, pub head_root: Hash256,
/// The peers last block root. /// The slot associated with the latest block root.
pub best_root: Hash256, pub head_slot: Slot,
/// The peers last slot.
pub best_slot: Slot,
} }
/// The reason given for a `Goodbye` message. /// The reason given for a `Goodbye` message.
@ -74,108 +71,31 @@ impl_decode_via_from!(GoodbyeReason, u64);
/// Request a number of beacon block roots from a peer. /// Request a number of beacon block roots from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockRootsRequest { pub struct BeaconBlocksRequest {
/// The starting slot of the requested blocks. /// The hash tree root of a block on the requested chain.
pub start_slot: Slot, pub head_block_root: Hash256,
/// The starting slot to request blocks.
pub start_slot: u64,
/// The number of blocks from the start slot. /// The number of blocks from the start slot.
pub count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers pub count: u64,
}
/// Response containing a number of beacon block roots from a peer. /// The step increment to receive blocks.
#[derive(Encode, Decode, Clone, Debug, PartialEq)] ///
pub struct BeaconBlockRootsResponse { /// A value of 1 returns every block.
/// List of requested blocks and associated slots. /// A value of 2 returns every second block.
pub roots: Vec<BlockRootSlot>, /// A value of 3 returns every third block and so on.
} pub step: u64,
/// Contains a block root and associated slot.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BlockRootSlot {
/// The block root.
pub block_root: Hash256,
/// The block slot.
pub slot: Slot,
}
/// The response of a beacon block roots request.
impl BeaconBlockRootsResponse {
/// Returns `true` if each `self.roots.slot[i]` is higher than the preceding `i`.
pub fn slots_are_ascending(&self) -> bool {
for window in self.roots.windows(2) {
if window[0].slot >= window[1].slot {
return false;
}
}
true
}
}
/// Request a number of beacon block headers from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockHeadersRequest {
/// The starting header hash of the requested headers.
pub start_root: Hash256,
/// The starting slot of the requested headers.
pub start_slot: Slot,
/// The maximum number of headers than can be returned.
pub max_headers: u64,
/// The maximum number of slots to skip between blocks.
pub skip_slots: u64,
}
/// Response containing requested block headers.
#[derive(Clone, Debug, PartialEq)]
pub struct BeaconBlockHeadersResponse {
/// The list of ssz-encoded requested beacon block headers.
pub headers: Vec<u8>,
} }
/// Request a number of beacon block bodies from a peer. /// Request a number of beacon block bodies from a peer.
#[derive(Encode, Decode, Clone, Debug, PartialEq)] #[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconBlockBodiesRequest { pub struct RecentBeaconBlocksRequest {
/// The list of beacon block bodies being requested. /// The list of beacon block bodies being requested.
pub block_roots: Vec<Hash256>, pub block_roots: Vec<Hash256>,
} }
/// Response containing the list of requested beacon block bodies.
#[derive(Clone, Debug, PartialEq)]
pub struct BeaconBlockBodiesResponse {
/// The list of hashes that were sent in the request and match these roots response. None when
/// sending outbound.
pub block_roots: Option<Vec<Hash256>>,
/// The list of ssz-encoded beacon block bodies being requested.
pub block_bodies: Vec<u8>,
}
/// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`.
pub struct DecodedBeaconBlockBodiesResponse<E: EthSpec> {
/// The list of hashes sent in the request to get this response.
pub block_roots: Vec<Hash256>,
/// The valid decoded block bodies.
pub block_bodies: Vec<BeaconBlockBody<E>>,
}
/// Request values for tree hashes which yield a blocks `state_root`.
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconChainStateRequest {
/// The tree hashes that a value is requested for.
pub hashes: Vec<Hash256>,
}
/// Request values for tree hashes which yield a blocks `state_root`.
// Note: TBD
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
pub struct BeaconChainStateResponse {
/// The values corresponding the to the requested tree hashes.
pub values: bool, //TBD - stubbed with encodable bool
}
/* RPC Handling and Grouping */ /* RPC Handling and Grouping */
// Collection of enums and structs used by the Codecs to encode/decode RPC messages // Collection of enums and structs used by the Codecs to encode/decode RPC messages
@ -183,14 +103,10 @@ pub struct BeaconChainStateResponse {
pub enum RPCResponse { pub enum RPCResponse {
/// A HELLO message. /// A HELLO message.
Hello(HelloMessage), Hello(HelloMessage),
/// A response to a get BEACON_BLOCK_ROOTS request. /// A response to a get BEACON_BLOCKS request.
BeaconBlockRoots(BeaconBlockRootsResponse), BeaconBlocks(Vec<u8>),
/// A response to a get BEACON_BLOCK_HEADERS request. /// A response to a get RECENT_BEACON_BLOCKS request.
BeaconBlockHeaders(BeaconBlockHeadersResponse), RecentBeaconBlocks(Vec<u8>),
/// A response to a get BEACON_BLOCK_BODIES request.
BeaconBlockBodies(BeaconBlockBodiesResponse),
/// A response to a get BEACON_CHAIN_STATE request.
BeaconChainState(BeaconChainStateResponse),
} }
#[derive(Debug)] #[derive(Debug)]
@ -206,8 +122,8 @@ impl RPCErrorResponse {
pub fn as_u8(&self) -> u8 { pub fn as_u8(&self) -> u8 {
match self { match self {
RPCErrorResponse::Success(_) => 0, RPCErrorResponse::Success(_) => 0,
RPCErrorResponse::InvalidRequest(_) => 2, RPCErrorResponse::InvalidRequest(_) => 1,
RPCErrorResponse::ServerError(_) => 3, RPCErrorResponse::ServerError(_) => 2,
RPCErrorResponse::Unknown(_) => 255, RPCErrorResponse::Unknown(_) => 255,
} }
} }
@ -223,8 +139,8 @@ impl RPCErrorResponse {
/// Builds an RPCErrorResponse from a response code and an ErrorMessage /// Builds an RPCErrorResponse from a response code and an ErrorMessage
pub fn from_error(response_code: u8, err: ErrorMessage) -> Self { pub fn from_error(response_code: u8, err: ErrorMessage) -> Self {
match response_code { match response_code {
2 => RPCErrorResponse::InvalidRequest(err), 1 => RPCErrorResponse::InvalidRequest(err),
3 => RPCErrorResponse::ServerError(err), 2 => RPCErrorResponse::ServerError(err),
_ => RPCErrorResponse::Unknown(err), _ => RPCErrorResponse::Unknown(err),
} }
} }

View File

@ -16,13 +16,17 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tokio::prelude::*; use tokio::prelude::*;
use tokio::timer::timeout; use tokio::timer::timeout;
use tokio::util::FutureExt; use tokio::util::FutureExt;
use tokio_io_timeout::TimeoutStream;
/// The maximum bytes that can be sent across the RPC. /// The maximum bytes that can be sent across the RPC.
const MAX_RPC_SIZE: usize = 4_194_304; // 4M const MAX_RPC_SIZE: usize = 4_194_304; // 4M
/// The protocol prefix the RPC protocol id. /// The protocol prefix the RPC protocol id.
const PROTOCOL_PREFIX: &str = "/eth2/beacon_node/rpc"; const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
/// The number of seconds to wait for a request once a protocol has been established before the stream is terminated. /// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte).
const REQUEST_TIMEOUT: u64 = 3; const TTFB_TIMEOUT: u64 = 5;
/// The number of seconds to wait for the first bytes of a request once a protocol has been
/// established before the stream is terminated.
const REQUEST_TIMEOUT: u64 = 15;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct RPCProtocol; pub struct RPCProtocol;
@ -33,11 +37,10 @@ impl UpgradeInfo for RPCProtocol {
fn protocol_info(&self) -> Self::InfoIter { fn protocol_info(&self) -> Self::InfoIter {
vec![ vec![
ProtocolId::new("hello", "1.0.0", "ssz"), ProtocolId::new("hello", "1", "ssz"),
ProtocolId::new("goodbye", "1.0.0", "ssz"), ProtocolId::new("goodbye", "1", "ssz"),
ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), ProtocolId::new("beacon_blocks", "1", "ssz"),
ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), ProtocolId::new("recent_beacon_blocks", "1", "ssz"),
ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"),
] ]
} }
} }
@ -87,7 +90,7 @@ impl ProtocolName for ProtocolId {
// handler to respond to once ready. // handler to respond to once ready.
pub type InboundOutput<TSocket> = (RPCRequest, InboundFramed<TSocket>); pub type InboundOutput<TSocket> = (RPCRequest, InboundFramed<TSocket>);
pub type InboundFramed<TSocket> = Framed<upgrade::Negotiated<TSocket>, InboundCodec>; pub type InboundFramed<TSocket> = Framed<TimeoutStream<upgrade::Negotiated<TSocket>>, InboundCodec>;
type FnAndThen<TSocket> = fn( type FnAndThen<TSocket> = fn(
(Option<RPCRequest>, InboundFramed<TSocket>), (Option<RPCRequest>, InboundFramed<TSocket>),
) -> FutureResult<InboundOutput<TSocket>, RPCError>; ) -> FutureResult<InboundOutput<TSocket>, RPCError>;
@ -118,7 +121,9 @@ where
"ssz" | _ => { "ssz" | _ => {
let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE));
let codec = InboundCodec::SSZ(ssz_codec); let codec = InboundCodec::SSZ(ssz_codec);
Framed::new(socket, codec) let mut timed_socket = TimeoutStream::new(socket);
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
Framed::new(timed_socket, codec)
.into_future() .into_future()
.timeout(Duration::from_secs(REQUEST_TIMEOUT)) .timeout(Duration::from_secs(REQUEST_TIMEOUT))
.map_err(RPCError::from as FnMapErr<TSocket>) .map_err(RPCError::from as FnMapErr<TSocket>)
@ -144,10 +149,8 @@ where
pub enum RPCRequest { pub enum RPCRequest {
Hello(HelloMessage), Hello(HelloMessage),
Goodbye(GoodbyeReason), Goodbye(GoodbyeReason),
BeaconBlockRoots(BeaconBlockRootsRequest), BeaconBlocks(BeaconBlocksRequest),
BeaconBlockHeaders(BeaconBlockHeadersRequest), RecentBeaconBlocks(RecentBeaconBlocksRequest),
BeaconBlockBodies(BeaconBlockBodiesRequest),
BeaconChainState(BeaconChainStateRequest),
} }
impl UpgradeInfo for RPCRequest { impl UpgradeInfo for RPCRequest {
@ -165,22 +168,11 @@ impl RPCRequest {
pub fn supported_protocols(&self) -> Vec<ProtocolId> { pub fn supported_protocols(&self) -> Vec<ProtocolId> {
match self { match self {
// add more protocols when versions/encodings are supported // add more protocols when versions/encodings are supported
RPCRequest::Hello(_) => vec![ RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1", "ssz")],
ProtocolId::new("hello", "1.0.0", "ssz"), RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1", "ssz")],
ProtocolId::new("goodbye", "1.0.0", "ssz"), RPCRequest::BeaconBlocks(_) => vec![ProtocolId::new("beacon_blocks", "1", "ssz")],
], RPCRequest::RecentBeaconBlocks(_) => {
RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], vec![ProtocolId::new("recent_beacon_blocks", "1", "ssz")]
RPCRequest::BeaconBlockRoots(_) => {
vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")]
}
RPCRequest::BeaconBlockHeaders(_) => {
vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")]
}
RPCRequest::BeaconBlockBodies(_) => {
vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")]
}
RPCRequest::BeaconChainState(_) => {
vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")]
} }
} }
} }
@ -215,7 +207,8 @@ where
) -> Self::Future { ) -> Self::Future {
match protocol.encoding.as_str() { match protocol.encoding.as_str() {
"ssz" | _ => { "ssz" | _ => {
let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); let ssz_codec =
BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE));
let codec = OutboundCodec::SSZ(ssz_codec); let codec = OutboundCodec::SSZ(ssz_codec);
Framed::new(socket, codec).send(self) Framed::new(socket, codec).send(self)
} }

View File

@ -1,10 +1,10 @@
use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage}; use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage};
use crate::config::*;
use crate::error; use crate::error;
use crate::multiaddr::Protocol; use crate::multiaddr::Protocol;
use crate::rpc::RPCEvent; use crate::rpc::RPCEvent;
use crate::NetworkConfig; use crate::NetworkConfig;
use crate::{Topic, TopicHash}; use crate::{Topic, TopicHash};
use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC};
use futures::prelude::*; use futures::prelude::*;
use futures::Stream; use futures::Stream;
use libp2p::core::{ use libp2p::core::{
@ -40,13 +40,12 @@ pub struct Service {
impl Service { impl Service {
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
debug!(log, "Network-libp2p Service starting"); trace!(log, "Libp2p Service starting");
// load the private key from CLI flag, disk or generate a new one // load the private key from CLI flag, disk or generate a new one
let local_private_key = load_private_key(&config, &log); let local_private_key = load_private_key(&config, &log);
let local_peer_id = PeerId::from(local_private_key.public()); let local_peer_id = PeerId::from(local_private_key.public());
info!(log, "Local peer id: {:?}", local_peer_id); info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id));
let mut swarm = { let mut swarm = {
// Set up the transport - tcp/ws with secio and mplex/yamux // Set up the transport - tcp/ws with secio and mplex/yamux
@ -67,7 +66,7 @@ impl Service {
Ok(_) => { Ok(_) => {
let mut log_address = listen_multiaddr; let mut log_address = listen_multiaddr;
log_address.push(Protocol::P2p(local_peer_id.clone().into())); log_address.push(Protocol::P2p(local_peer_id.clone().into()));
info!(log, "Listening on: {}", log_address); info!(log, "Listening established"; "address" => format!("{}", log_address));
} }
Err(err) => { Err(err) => {
crit!( crit!(
@ -83,20 +82,34 @@ impl Service {
// attempt to connect to user-input libp2p nodes // attempt to connect to user-input libp2p nodes
for multiaddr in config.libp2p_nodes { for multiaddr in config.libp2p_nodes {
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)),
Err(err) => debug!( Err(err) => debug!(
log, log,
"Could not connect to node: {} error: {:?}", multiaddr, err "Could not connect to peer"; "address" => format!("{}", multiaddr), "Error" => format!("{:?}", err)
), ),
}; };
} }
// subscribe to default gossipsub topics // subscribe to default gossipsub topics
let mut topics = vec![]; let mut topics = vec![];
//TODO: Handle multiple shard attestations. For now we simply use a separate topic for
// attestations /* Here we subscribe to all the required gossipsub topics required for interop.
topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); * The topic builder adds the required prefix and postfix to the hardcoded topics that we
topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); * must subscribe to.
*/
let topic_builder = |topic| {
Topic::new(format!(
"/{}/{}/{}",
TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX,
))
};
topics.push(topic_builder(BEACON_BLOCK_TOPIC));
topics.push(topic_builder(BEACON_ATTESTATION_TOPIC));
topics.push(topic_builder(VOLUNTARY_EXIT_TOPIC));
topics.push(topic_builder(PROPOSER_SLASHING_TOPIC));
topics.push(topic_builder(ATTESTER_SLASHING_TOPIC));
// Add any topics specified by the user
topics.append( topics.append(
&mut config &mut config
.topics .topics
@ -109,13 +122,13 @@ impl Service {
let mut subscribed_topics = vec![]; let mut subscribed_topics = vec![];
for topic in topics { for topic in topics {
if swarm.subscribe(topic.clone()) { if swarm.subscribe(topic.clone()) {
trace!(log, "Subscribed to topic: {:?}", topic); trace!(log, "Subscribed to topic"; "topic" => format!("{}", topic));
subscribed_topics.push(topic); subscribed_topics.push(topic);
} else { } else {
warn!(log, "Could not subscribe to topic: {:?}", topic) warn!(log, "Could not subscribe to topic"; "topic" => format!("{}", topic));
} }
} }
info!(log, "Subscribed to topics: {:?}", subscribed_topics); info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::<Vec<String>>()));
Ok(Service { Ok(Service {
local_peer_id, local_peer_id,
@ -140,7 +153,7 @@ impl Stream for Service {
topics, topics,
message, message,
} => { } => {
trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); trace!(self.log, "Gossipsub message received"; "service" => "Swarm");
return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage {
source, source,
topics, topics,
@ -255,7 +268,7 @@ fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
Err(e) => { Err(e) => {
warn!( warn!(
log, log,
"Could not write node key to file: {:?}. Error: {}", network_key_f, e "Could not write node key to file: {:?}. error: {}", network_key_f, e
); );
} }
} }

View File

@ -1,8 +1,7 @@
use crate::error; use crate::error;
use crate::service::{NetworkMessage, OutgoingMessage}; use crate::service::NetworkMessage;
use crate::sync::SimpleSync; use crate::sync::SimpleSync;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::{ use eth2_libp2p::{
behaviour::PubsubMessage, behaviour::PubsubMessage,
rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId}, rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId},
@ -10,11 +9,11 @@ use eth2_libp2p::{
}; };
use futures::future::Future; use futures::future::Future;
use futures::stream::Stream; use futures::stream::Stream;
use slog::{debug, warn}; use slog::{debug, trace, warn};
use ssz::{Decode, DecodeError}; use ssz::{Decode, DecodeError};
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::{Attestation, BeaconBlock, BeaconBlockHeader}; use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, VoluntaryExit};
/// Handles messages received from the network and client and organises syncing. /// Handles messages received from the network and client and organises syncing.
pub struct MessageHandler<T: BeaconChainTypes> { pub struct MessageHandler<T: BeaconChainTypes> {
@ -22,8 +21,6 @@ pub struct MessageHandler<T: BeaconChainTypes> {
_chain: Arc<BeaconChain<T>>, _chain: Arc<BeaconChain<T>>,
/// The syncing framework. /// The syncing framework.
sync: SimpleSync<T>, sync: SimpleSync<T>,
/// The context required to send messages to, and process messages from peers.
network_context: NetworkContext,
/// The `MessageHandler` logger. /// The `MessageHandler` logger.
log: slog::Logger, log: slog::Logger,
} }
@ -49,23 +46,20 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
executor: &tokio::runtime::TaskExecutor, executor: &tokio::runtime::TaskExecutor,
log: slog::Logger, log: slog::Logger,
) -> error::Result<mpsc::UnboundedSender<HandlerMessage>> { ) -> error::Result<mpsc::UnboundedSender<HandlerMessage>> {
debug!(log, "Service starting"); trace!(log, "Service starting");
let (handler_send, handler_recv) = mpsc::unbounded_channel(); let (handler_send, handler_recv) = mpsc::unbounded_channel();
// Initialise sync and begin processing in thread // Initialise sync and begin processing in thread
// generate the Message handler let sync = SimpleSync::new(beacon_chain.clone(), network_send, &log);
let sync = SimpleSync::new(beacon_chain.clone(), &log);
// generate the Message handler
let mut handler = MessageHandler { let mut handler = MessageHandler {
_chain: beacon_chain.clone(), _chain: beacon_chain.clone(),
sync, sync,
network_context: NetworkContext::new(network_send, log.clone()),
log: log.clone(), log: log.clone(),
}; };
// spawn handler task // spawn handler task and move the message handler instance into the spawned thread
// TODO: Handle manual termination of thread
executor.spawn( executor.spawn(
handler_recv handler_recv
.for_each(move |msg| Ok(handler.handle_message(msg))) .for_each(move |msg| Ok(handler.handle_message(msg)))
@ -82,17 +76,17 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
match message { match message {
// we have initiated a connection to a peer // we have initiated a connection to a peer
HandlerMessage::PeerDialed(peer_id) => { HandlerMessage::PeerDialed(peer_id) => {
self.sync.on_connect(peer_id, &mut self.network_context); self.sync.on_connect(peer_id);
} }
// A peer has disconnected // A peer has disconnected
HandlerMessage::PeerDisconnected(peer_id) => { HandlerMessage::PeerDisconnected(peer_id) => {
self.sync.on_disconnect(peer_id); self.sync.on_disconnect(peer_id);
} }
// we have received an RPC message request/response // An RPC message request/response has been received
HandlerMessage::RPC(peer_id, rpc_event) => { HandlerMessage::RPC(peer_id, rpc_event) => {
self.handle_rpc_message(peer_id, rpc_event); self.handle_rpc_message(peer_id, rpc_event);
} }
// we have received an RPC message request/response // An RPC message request/response has been received
HandlerMessage::PubsubMessage(peer_id, gossip) => { HandlerMessage::PubsubMessage(peer_id, gossip) => {
self.handle_gossip(peer_id, gossip); self.handle_gossip(peer_id, gossip);
} }
@ -105,7 +99,7 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) {
match rpc_message { match rpc_message {
RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req), RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req),
RPCEvent::Response(_id, resp) => self.handle_rpc_response(peer_id, resp), RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp),
RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error), RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error),
} }
} }
@ -113,106 +107,81 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
/// A new RPC request has been received from the network. /// A new RPC request has been received from the network.
fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) {
match request { match request {
RPCRequest::Hello(hello_message) => self.sync.on_hello_request( RPCRequest::Hello(hello_message) => {
peer_id, self.sync
request_id, .on_hello_request(peer_id, request_id, hello_message)
hello_message,
&mut self.network_context,
),
RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason),
RPCRequest::BeaconBlockRoots(request) => self.sync.on_beacon_block_roots_request(
peer_id,
request_id,
request,
&mut self.network_context,
),
RPCRequest::BeaconBlockHeaders(request) => self.sync.on_beacon_block_headers_request(
peer_id,
request_id,
request,
&mut self.network_context,
),
RPCRequest::BeaconBlockBodies(request) => self.sync.on_beacon_block_bodies_request(
peer_id,
request_id,
request,
&mut self.network_context,
),
RPCRequest::BeaconChainState(_) => {
// We do not implement this endpoint, it is not required and will only likely be
// useful for light-client support in later phases.
warn!(self.log, "BeaconChainState RPC call is not supported.");
} }
RPCRequest::Goodbye(goodbye_reason) => {
debug!(
self.log, "PeerGoodbye";
"peer" => format!("{:?}", peer_id),
"reason" => format!("{:?}", goodbye_reason),
);
self.sync.on_disconnect(peer_id);
}
RPCRequest::BeaconBlocks(request) => self
.sync
.on_beacon_blocks_request(peer_id, request_id, request),
RPCRequest::RecentBeaconBlocks(request) => self
.sync
.on_recent_beacon_blocks_request(peer_id, request_id, request),
} }
} }
/// An RPC response has been received from the network. /// An RPC response has been received from the network.
// we match on id and ignore responses past the timeout. // we match on id and ignore responses past the timeout.
fn handle_rpc_response(&mut self, peer_id: PeerId, error_response: RPCErrorResponse) { fn handle_rpc_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
error_response: RPCErrorResponse,
) {
// an error could have occurred. // an error could have occurred.
// TODO: Handle Error gracefully
match error_response { match error_response {
RPCErrorResponse::InvalidRequest(error) => { RPCErrorResponse::InvalidRequest(error) => {
warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Invalid Request" => error.as_string()) warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string())
} }
RPCErrorResponse::ServerError(error) => { RPCErrorResponse::ServerError(error) => {
warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Server Error" => error.as_string()) warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string())
} }
RPCErrorResponse::Unknown(error) => { RPCErrorResponse::Unknown(error) => {
warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Unknown Error" => error.as_string()) warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string())
} }
RPCErrorResponse::Success(response) => { RPCErrorResponse::Success(response) => {
match response { match response {
RPCResponse::Hello(hello_message) => { RPCResponse::Hello(hello_message) => {
self.sync.on_hello_response( self.sync.on_hello_response(peer_id, hello_message);
}
RPCResponse::BeaconBlocks(response) => {
match self.decode_beacon_blocks(&response) {
Ok(beacon_blocks) => {
self.sync.on_beacon_blocks_response(
peer_id, peer_id,
hello_message, request_id,
&mut self.network_context, beacon_blocks,
); );
} }
RPCResponse::BeaconBlockRoots(response) => { Err(e) => {
self.sync.on_beacon_block_roots_response( // TODO: Down-vote Peer
warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e));
}
}
}
RPCResponse::RecentBeaconBlocks(response) => {
match self.decode_beacon_blocks(&response) {
Ok(beacon_blocks) => {
self.sync.on_recent_beacon_blocks_response(
peer_id, peer_id,
response, request_id,
&mut self.network_context, beacon_blocks,
); );
} }
RPCResponse::BeaconBlockHeaders(response) => { Err(e) => {
match self.decode_block_headers(response) { // TODO: Down-vote Peer
Ok(decoded_block_headers) => { warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e));
self.sync.on_beacon_block_headers_response(
peer_id,
decoded_block_headers,
&mut self.network_context,
);
}
Err(_e) => {
warn!(self.log, "Peer sent invalid block headers";"peer" => format!("{:?}", peer_id))
} }
} }
} }
RPCResponse::BeaconBlockBodies(response) => {
match self.decode_block_bodies(response) {
Ok(decoded_block_bodies) => {
self.sync.on_beacon_block_bodies_response(
peer_id,
decoded_block_bodies,
&mut self.network_context,
);
}
Err(_e) => {
warn!(self.log, "Peer sent invalid block bodies";"peer" => format!("{:?}", peer_id))
}
}
}
RPCResponse::BeaconChainState(_) => {
// We do not implement this endpoint, it is not required and will only likely be
// useful for light-client support in later phases.
//
// Theoretically, we shouldn't reach this code because we should never send a
// beacon state RPC request.
warn!(self.log, "BeaconChainState RPC call is not supported.");
}
} }
} }
} }
@ -221,43 +190,74 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
/// Handle various RPC errors /// Handle various RPC errors
fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) {
//TODO: Handle error correctly //TODO: Handle error correctly
warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "request_id" => format!("{}", request_id), "Error" => format!("{:?}", error));
} }
/// Handle RPC messages /// Handle RPC messages
fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) {
match gossip_message { match gossip_message {
PubsubMessage::Block(message) => match self.decode_gossip_block(message) { PubsubMessage::Block(message) => match self.decode_gossip_block(message) {
Err(e) => {
debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e));
}
Ok(block) => { Ok(block) => {
let _should_forward_on = let _should_forward_on = self.sync.on_block_gossip(peer_id, block);
self.sync }
.on_block_gossip(peer_id, block, &mut self.network_context); Err(e) => {
debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e));
} }
}, },
PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) {
Ok(attestation) => self.sync.on_attestation_gossip(peer_id, attestation),
Err(e) => { Err(e) => {
debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e));
}
Ok(attestation) => {
self.sync
.on_attestation_gossip(peer_id, attestation, &mut self.network_context)
} }
}, },
PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) {
Ok(_exit) => {
// TODO: Handle exits
debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) );
}
Err(e) => {
debug!(self.log, "Invalid gossiped exit"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e));
}
},
PubsubMessage::ProposerSlashing(message) => {
match self.decode_gossip_proposer_slashing(message) {
Ok(_slashing) => {
// TODO: Handle proposer slashings
debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) );
}
Err(e) => {
debug!(self.log, "Invalid gossiped proposer slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e));
}
}
}
PubsubMessage::AttesterSlashing(message) => {
match self.decode_gossip_attestation_slashing(message) {
Ok(_slashing) => {
// TODO: Handle attester slashings
debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) );
}
Err(e) => {
debug!(self.log, "Invalid gossiped attester slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e));
}
}
}
PubsubMessage::Unknown(message) => { PubsubMessage::Unknown(message) => {
// Received a message from an unknown topic. Ignore for now // Received a message from an unknown topic. Ignore for now
debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); debug!(self.log, "Unknown Gossip Message"; "peer_id" => format!("{}", peer_id), "Message" => format!("{:?}", message));
} }
} }
} }
/* Decoding of blocks and attestations from the network. /* Decoding of gossipsub objects from the network.
*
* The decoding is done in the message handler as it has access to to a `BeaconChain` and can
* therefore apply more efficient logic in decoding and verification.
* *
* TODO: Apply efficient decoding/verification of these objects * TODO: Apply efficient decoding/verification of these objects
*/ */
/* Gossipsub Domain Decoding */
// Note: These are not generics as type-specific verification will need to be applied.
fn decode_gossip_block( fn decode_gossip_block(
&self, &self,
beacon_block: Vec<u8>, beacon_block: Vec<u8>,
@ -274,80 +274,39 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
Attestation::from_ssz_bytes(&beacon_block) Attestation::from_ssz_bytes(&beacon_block)
} }
/// Verifies and decodes the ssz-encoded block bodies received from peers. fn decode_gossip_exit(&self, voluntary_exit: Vec<u8>) -> Result<VoluntaryExit, DecodeError> {
fn decode_block_bodies( //TODO: Apply verification before decoding.
VoluntaryExit::from_ssz_bytes(&voluntary_exit)
}
fn decode_gossip_proposer_slashing(
&self, &self,
bodies_response: BeaconBlockBodiesResponse, proposer_slashing: Vec<u8>,
) -> Result<DecodedBeaconBlockBodiesResponse<T::EthSpec>, DecodeError> { ) -> Result<ProposerSlashing, DecodeError> {
//TODO: Apply verification before decoding.
ProposerSlashing::from_ssz_bytes(&proposer_slashing)
}
fn decode_gossip_attestation_slashing(
&self,
attester_slashing: Vec<u8>,
) -> Result<AttesterSlashing<T::EthSpec>, DecodeError> {
//TODO: Apply verification before decoding.
AttesterSlashing::from_ssz_bytes(&attester_slashing)
}
/* Req/Resp Domain Decoding */
/// Verifies and decodes an ssz-encoded list of `BeaconBlock`s. This list may contain empty
/// entries encoded with an SSZ NULL.
fn decode_beacon_blocks(
&self,
beacon_blocks: &[u8],
) -> Result<Vec<BeaconBlock<T::EthSpec>>, DecodeError> {
if beacon_blocks.is_empty() {
return Ok(Vec::new());
}
//TODO: Implement faster block verification before decoding entirely //TODO: Implement faster block verification before decoding entirely
let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?; Vec::from_ssz_bytes(&beacon_blocks)
Ok(DecodedBeaconBlockBodiesResponse {
block_roots: bodies_response
.block_roots
.expect("Responses must have associated roots"),
block_bodies,
})
}
/// Verifies and decodes the ssz-encoded block headers received from peers.
fn decode_block_headers(
&self,
headers_response: BeaconBlockHeadersResponse,
) -> Result<Vec<BeaconBlockHeader>, DecodeError> {
//TODO: Implement faster header verification before decoding entirely
Vec::from_ssz_bytes(&headers_response.headers)
}
}
// TODO: RPC Rewrite makes this struct fairly pointless
pub struct NetworkContext {
/// The network channel to relay messages to the Network service.
network_send: mpsc::UnboundedSender<NetworkMessage>,
/// The `MessageHandler` logger.
log: slog::Logger,
}
impl NetworkContext {
pub fn new(network_send: mpsc::UnboundedSender<NetworkMessage>, log: slog::Logger) -> Self {
Self { network_send, log }
}
pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason))
// TODO: disconnect peers.
}
pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) {
// Note: There is currently no use of keeping track of requests. However the functionality
// is left here for future revisions.
self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request));
}
//TODO: Handle Error responses
pub fn send_rpc_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
rpc_response: RPCResponse,
) {
self.send_rpc_event(
peer_id,
RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)),
);
}
fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
self.send(peer_id, OutgoingMessage::RPC(rpc_event))
}
fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) {
self.network_send
.try_send(NetworkMessage::Send(peer_id, outgoing_message))
.unwrap_or_else(|_| {
warn!(
self.log,
"Could not send RPC message to the network service"
)
});
} }
} }

View File

@ -21,8 +21,7 @@ pub struct Service<T: BeaconChainTypes> {
libp2p_port: u16, libp2p_port: u16,
_libp2p_exit: oneshot::Sender<()>, _libp2p_exit: oneshot::Sender<()>,
_network_send: mpsc::UnboundedSender<NetworkMessage>, _network_send: mpsc::UnboundedSender<NetworkMessage>,
_phantom: PhantomData<T>, //message_handler: MessageHandler, _phantom: PhantomData<T>,
//message_handler_send: Sender<HandlerMessage>
} }
impl<T: BeaconChainTypes + 'static> Service<T> { impl<T: BeaconChainTypes + 'static> Service<T> {
@ -43,17 +42,19 @@ impl<T: BeaconChainTypes + 'static> Service<T> {
message_handler_log, message_handler_log,
)?; )?;
let network_log = log.new(o!("Service" => "Network"));
// launch libp2p service // launch libp2p service
let libp2p_log = log.new(o!("Service" => "Libp2p")); let libp2p_service = Arc::new(Mutex::new(LibP2PService::new(
let libp2p_service = Arc::new(Mutex::new(LibP2PService::new(config.clone(), libp2p_log)?)); config.clone(),
network_log.clone(),
)?));
// TODO: Spawn thread to handle libp2p messages and pass to message handler thread.
let libp2p_exit = spawn_service( let libp2p_exit = spawn_service(
libp2p_service.clone(), libp2p_service.clone(),
network_recv, network_recv,
message_handler_send, message_handler_send,
executor, executor,
log, network_log,
)?; )?;
let network_service = Service { let network_service = Service {
libp2p_service, libp2p_service,
@ -166,7 +167,7 @@ fn network_service(
}, },
NetworkMessage::Publish { topics, message } => { NetworkMessage::Publish { topics, message } => {
debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics));
libp2p_service.lock().swarm.publish(topics, message); libp2p_service.lock().swarm.publish(&topics, message);
} }
}, },
Ok(Async::NotReady) => break, Ok(Async::NotReady) => break,
@ -190,13 +191,13 @@ fn network_service(
.map_err(|_| "Failed to send RPC to handler")?; .map_err(|_| "Failed to send RPC to handler")?;
} }
Libp2pEvent::PeerDialed(peer_id) => { Libp2pEvent::PeerDialed(peer_id) => {
debug!(log, "Peer Dialed: {:?}", peer_id); debug!(log, "Peer Dialed"; "PeerID" => format!("{:?}", peer_id));
message_handler_send message_handler_send
.try_send(HandlerMessage::PeerDialed(peer_id)) .try_send(HandlerMessage::PeerDialed(peer_id))
.map_err(|_| "Failed to send PeerDialed to handler")?; .map_err(|_| "Failed to send PeerDialed to handler")?;
} }
Libp2pEvent::PeerDisconnected(peer_id) => { Libp2pEvent::PeerDisconnected(peer_id) => {
debug!(log, "Peer Disconnected: {:?}", peer_id); debug!(log, "Peer Disconnected"; "PeerID" => format!("{:?}", peer_id));
message_handler_send message_handler_send
.try_send(HandlerMessage::PeerDisconnected(peer_id)) .try_send(HandlerMessage::PeerDisconnected(peer_id))
.map_err(|_| "Failed to send PeerDisconnected to handler")?; .map_err(|_| "Failed to send PeerDisconnected to handler")?;

View File

@ -1,307 +0,0 @@
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::PeerId;
use slog::error;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tree_hash::TreeHash;
use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot};
/// Provides a queue for fully and partially built `BeaconBlock`s.
///
/// The queue is fundamentally a `Vec<PartialBeaconBlock>` where no two items have the same
/// `item.block_root`. This struct it backed by a `Vec` not a `HashMap` for the following two
/// reasons:
///
/// - When we receive a `BeaconBlockBody`, the only way we can find it's matching
/// `BeaconBlockHeader` is to find a header such that `header.beacon_block_body ==
/// tree_hash_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of
/// `BeaconBlockBody` as the key.
/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore
/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`.
pub struct ImportQueue<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>,
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
partials: HashMap<Hash256, PartialBeaconBlock<T::EthSpec>>,
/// Time before a queue entry is considered state.
pub stale_time: Duration,
/// Logging
log: slog::Logger,
}
impl<T: BeaconChainTypes> ImportQueue<T> {
/// Return a new, empty queue.
pub fn new(chain: Arc<BeaconChain<T>>, stale_time: Duration, log: slog::Logger) -> Self {
Self {
chain,
partials: HashMap::new(),
stale_time,
log,
}
}
/// Returns true of the if the `BlockRoot` is found in the `import_queue`.
pub fn contains_block_root(&self, block_root: Hash256) -> bool {
self.partials.contains_key(&block_root)
}
/// Attempts to complete the `BlockRoot` if it is found in the `import_queue`.
///
/// Returns an Enum with a `PartialBeaconBlockCompletion`.
/// Does not remove the `block_root` from the `import_queue`.
pub fn attempt_complete_block(
&self,
block_root: Hash256,
) -> PartialBeaconBlockCompletion<T::EthSpec> {
if let Some(partial) = self.partials.get(&block_root) {
partial.attempt_complete()
} else {
PartialBeaconBlockCompletion::MissingRoot
}
}
/// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial
/// if it exists.
pub fn remove(&mut self, block_root: Hash256) -> Option<PartialBeaconBlock<T::EthSpec>> {
self.partials.remove(&block_root)
}
/// Flushes all stale entries from the queue.
///
/// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the
/// past.
pub fn remove_stale(&mut self) {
let stale_time = self.stale_time;
self.partials
.retain(|_, partial| partial.inserted + stale_time > Instant::now())
}
/// Returns `true` if `self.chain` has not yet processed this block.
pub fn chain_has_not_seen_block(&self, block_root: &Hash256) -> bool {
self.chain
.is_new_block_root(&block_root)
.unwrap_or_else(|_| {
error!(self.log, "Unable to determine if block is new.");
true
})
}
/// Adds the `block_roots` to the partials queue.
///
/// If a `block_root` is not in the queue and has not been processed by the chain it is added
/// to the queue and it's block root is included in the output.
pub fn enqueue_block_roots(
&mut self,
block_roots: &[BlockRootSlot],
sender: PeerId,
) -> Vec<BlockRootSlot> {
// TODO: This will currently not return a `BlockRootSlot` if this root exists but there is no header.
// It would be more robust if it did.
let new_block_root_slots: Vec<BlockRootSlot> = block_roots
.iter()
// Ignore any roots already stored in the queue.
.filter(|brs| !self.contains_block_root(brs.block_root))
// Ignore any roots already processed by the chain.
.filter(|brs| self.chain_has_not_seen_block(&brs.block_root))
.cloned()
.collect();
self.partials.extend(
new_block_root_slots
.iter()
.map(|brs| PartialBeaconBlock {
slot: brs.slot,
block_root: brs.block_root,
sender: sender.clone(),
header: None,
body: None,
inserted: Instant::now(),
})
.map(|partial| (partial.block_root, partial)),
);
new_block_root_slots
}
/// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for
/// which we should use to request `BeaconBlockBodies`.
///
/// If a `header` is not in the queue and has not been processed by the chain it is added to
/// the queue and it's block root is included in the output.
///
/// If a `header` is already in the queue, but not yet processed by the chain the block root is
/// not included in the output and the `inserted` time for the partial record is set to
/// `Instant::now()`. Updating the `inserted` time stops the partial from becoming stale.
pub fn enqueue_headers(
&mut self,
headers: Vec<BeaconBlockHeader>,
sender: PeerId,
) -> Vec<Hash256> {
let mut required_bodies: Vec<Hash256> = vec![];
for header in headers {
let block_root = Hash256::from_slice(&header.canonical_root()[..]);
if self.chain_has_not_seen_block(&block_root)
&& !self.insert_header(block_root, header, sender.clone())
{
// If a body is empty
required_bodies.push(block_root);
}
}
required_bodies
}
/// If there is a matching `header` for this `body`, adds it to the queue.
///
/// If there is no `header` for the `body`, the body is simply discarded.
pub fn enqueue_bodies(
&mut self,
bodies: Vec<BeaconBlockBody<T::EthSpec>>,
sender: PeerId,
) -> Option<Hash256> {
let mut last_block_hash = None;
for body in bodies {
last_block_hash = self.insert_body(body, sender.clone());
}
last_block_hash
}
pub fn enqueue_full_blocks(&mut self, blocks: Vec<BeaconBlock<T::EthSpec>>, sender: PeerId) {
for block in blocks {
self.insert_full_block(block, sender.clone());
}
}
/// Inserts a header to the queue.
///
/// If the header already exists, the `inserted` time is set to `now` and not other
/// modifications are made.
/// Returns true is `body` exists.
fn insert_header(
&mut self,
block_root: Hash256,
header: BeaconBlockHeader,
sender: PeerId,
) -> bool {
let mut exists = false;
self.partials
.entry(block_root)
.and_modify(|partial| {
partial.header = Some(header.clone());
partial.inserted = Instant::now();
if partial.body.is_some() {
exists = true;
}
})
.or_insert_with(|| PartialBeaconBlock {
slot: header.slot,
block_root,
header: Some(header),
body: None,
inserted: Instant::now(),
sender,
});
exists
}
/// Updates an existing partial with the `body`.
///
/// If the body already existed, the `inserted` time is set to `now`.
///
/// Returns the block hash of the inserted body
fn insert_body(
&mut self,
body: BeaconBlockBody<T::EthSpec>,
sender: PeerId,
) -> Option<Hash256> {
let body_root = Hash256::from_slice(&body.tree_hash_root()[..]);
let mut last_root = None;
self.partials.iter_mut().for_each(|(root, mut p)| {
if let Some(header) = &mut p.header {
if body_root == header.body_root {
p.inserted = Instant::now();
p.body = Some(body.clone());
p.sender = sender.clone();
last_root = Some(*root);
}
}
});
last_root
}
/// Updates an existing `partial` with the completed block, or adds a new (complete) partial.
///
/// If the partial already existed, the `inserted` time is set to `now`.
fn insert_full_block(&mut self, block: BeaconBlock<T::EthSpec>, sender: PeerId) {
let block_root = Hash256::from_slice(&block.canonical_root()[..]);
let partial = PartialBeaconBlock {
slot: block.slot,
block_root,
header: Some(block.block_header()),
body: Some(block.body),
inserted: Instant::now(),
sender,
};
self.partials
.entry(block_root)
.and_modify(|existing_partial| *existing_partial = partial.clone())
.or_insert(partial);
}
}
/// Individual components of a `BeaconBlock`, potentially all that are required to form a full
/// `BeaconBlock`.
#[derive(Clone, Debug)]
pub struct PartialBeaconBlock<E: EthSpec> {
pub slot: Slot,
/// `BeaconBlock` root.
pub block_root: Hash256,
pub header: Option<BeaconBlockHeader>,
pub body: Option<BeaconBlockBody<E>>,
/// The instant at which this record was created or last meaningfully modified. Used to
/// determine if an entry is stale and should be removed.
pub inserted: Instant,
/// The `PeerId` that last meaningfully contributed to this item.
pub sender: PeerId,
}
impl<E: EthSpec> PartialBeaconBlock<E> {
/// Attempts to build a block.
///
/// Does not comsume the `PartialBeaconBlock`.
pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion<E> {
if self.header.is_none() {
PartialBeaconBlockCompletion::MissingHeader(self.slot)
} else if self.body.is_none() {
PartialBeaconBlockCompletion::MissingBody
} else {
PartialBeaconBlockCompletion::Complete(
self.header
.clone()
.unwrap()
.into_block(self.body.clone().unwrap()),
)
}
}
}
/// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`.
pub enum PartialBeaconBlockCompletion<E: EthSpec> {
/// The partial contains a valid BeaconBlock.
Complete(BeaconBlock<E>),
/// The partial does not exist.
MissingRoot,
/// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`.
MissingHeader(Slot),
/// The partial contains a `BeaconBlockRoot` and `BeaconBlockHeader` but no `BeaconBlockBody`.
MissingBody,
}

View File

@ -0,0 +1,725 @@
use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE};
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::RequestId;
use eth2_libp2p::PeerId;
use slog::{debug, info, trace, warn, Logger};
use std::collections::{HashMap, HashSet};
use std::ops::{Add, Sub};
use std::sync::Arc;
use types::{BeaconBlock, EthSpec, Hash256, Slot};
const MAX_BLOCKS_PER_REQUEST: u64 = 10;
/// The number of slots that we can import blocks ahead of us, before going into full Sync mode.
const SLOT_IMPORT_TOLERANCE: usize = 10;
const PARENT_FAIL_TOLERANCE: usize = 3;
const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2;
#[derive(PartialEq)]
enum BlockRequestsState {
Queued,
Pending(RequestId),
Complete,
Failed,
}
struct BlockRequests<T: EthSpec> {
target_head_slot: Slot,
target_head_root: Hash256,
downloaded_blocks: Vec<BeaconBlock<T>>,
state: BlockRequestsState,
/// Specifies whether the current state is syncing forwards or backwards.
forward_sync: bool,
/// The current `start_slot` of the batched block request.
current_start_slot: Slot,
}
struct ParentRequests<T: EthSpec> {
downloaded_blocks: Vec<BeaconBlock<T>>,
failed_attempts: usize,
last_submitted_peer: PeerId, // to downvote the submitting peer.
state: BlockRequestsState,
}
impl<T: EthSpec> BlockRequests<T> {
// gets the start slot for next batch
// last block slot downloaded plus 1
fn update_start_slot(&mut self) {
if self.forward_sync {
self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST);
} else {
self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST);
}
self.state = BlockRequestsState::Queued;
}
}
#[derive(PartialEq, Debug, Clone)]
enum ManagerState {
Syncing,
Regular,
Stalled,
}
pub(crate) enum ImportManagerOutcome {
Idle,
RequestBlocks {
peer_id: PeerId,
request_id: RequestId,
request: BeaconBlocksRequest,
},
/// Updates information with peer via requesting another HELLO handshake.
Hello(PeerId),
RecentRequest(PeerId, RecentBeaconBlocksRequest),
DownvotePeer(PeerId),
}
pub struct ImportManager<T: BeaconChainTypes> {
/// A reference to the underlying beacon chain.
chain: Arc<BeaconChain<T>>,
state: ManagerState,
import_queue: HashMap<PeerId, BlockRequests<T::EthSpec>>,
parent_queue: Vec<ParentRequests<T::EthSpec>>,
full_peers: HashSet<PeerId>,
current_req_id: usize,
log: Logger,
}
impl<T: BeaconChainTypes> ImportManager<T> {
pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: &slog::Logger) -> Self {
ImportManager {
chain: beacon_chain.clone(),
state: ManagerState::Regular,
import_queue: HashMap::new(),
parent_queue: Vec::new(),
full_peers: HashSet::new(),
current_req_id: 0,
log: log.clone(),
}
}
pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) {
// TODO: Improve comments.
// initially try to download blocks from our current head
// then backwards search all the way back to our finalized epoch until we match on a chain
// has to be done sequentially to find next slot to start the batch from
let local = PeerSyncInfo::from(&self.chain);
// If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync
if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE {
trace!(self.log, "Ignoring full sync with peer";
"peer" => format!("{:?}", peer_id),
"peer_head_slot" => remote.head_slot,
"local_head_slot" => local.head_slot,
);
// remove the peer from the queue if it exists
self.import_queue.remove(&peer_id);
return;
}
if let Some(block_requests) = self.import_queue.get_mut(&peer_id) {
// update the target head slot
if remote.head_slot > block_requests.target_head_slot {
block_requests.target_head_slot = remote.head_slot;
}
} else {
let block_requests = BlockRequests {
target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called
target_head_root: remote.head_root,
downloaded_blocks: Vec::new(),
state: BlockRequestsState::Queued,
forward_sync: true,
current_start_slot: self.chain.best_slot(),
};
self.import_queue.insert(peer_id, block_requests);
}
}
pub fn beacon_blocks_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
mut blocks: Vec<BeaconBlock<T::EthSpec>>,
) {
// find the request
let block_requests = match self
.import_queue
.get_mut(&peer_id)
.filter(|r| r.state == BlockRequestsState::Pending(request_id))
{
Some(req) => req,
_ => {
// No pending request, invalid request_id or coding error
warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id);
return;
}
};
// If we are syncing up to a target head block, at least the target head block should be
// returned. If we are syncing back to our last finalized block the request should return
// at least the last block we received (last known block). In diagram form:
//
// unknown blocks requested blocks downloaded blocks
// |-------------------|------------------------|------------------------|
// ^finalized slot ^ requested start slot ^ last known block ^ remote head
if blocks.is_empty() {
debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id);
block_requests.update_start_slot();
return;
}
// verify the range of received blocks
// Note that the order of blocks is verified in block processing
let last_sent_slot = blocks[blocks.len() - 1].slot;
if block_requests.current_start_slot > blocks[0].slot
|| block_requests
.current_start_slot
.add(MAX_BLOCKS_PER_REQUEST)
< last_sent_slot
{
//TODO: Downvote peer - add a reason to failed
dbg!(&blocks);
warn!(self.log, "BeaconBlocks response returned out of range blocks";
"request_id" => request_id,
"response_initial_slot" => blocks[0].slot,
"requested_initial_slot" => block_requests.current_start_slot);
// consider this sync failed
block_requests.state = BlockRequestsState::Failed;
return;
}
// Determine if more blocks need to be downloaded. There are a few cases:
// - We have downloaded a batch from our head_slot, which has not reached the remotes head
// (target head). Therefore we need to download another sequential batch.
// - The latest batch includes blocks that greater than or equal to the target_head slot,
// which means we have caught up to their head. We then check to see if the first
// block downloaded matches our head. If so, we are on the same chain and can process
// the blocks. If not we need to sync back further until we are on the same chain. So
// request more blocks.
// - We are syncing backwards (from our head slot) and need to check if we are on the same
// chain. If so, process the blocks, if not, request more blocks all the way up to
// our last finalized slot.
if block_requests.forward_sync {
// append blocks if syncing forward
block_requests.downloaded_blocks.append(&mut blocks);
} else {
// prepend blocks if syncing backwards
block_requests.downloaded_blocks.splice(..0, blocks);
}
// does the batch contain the target_head_slot
let last_element_index = block_requests.downloaded_blocks.len() - 1;
if block_requests.downloaded_blocks[last_element_index].slot
>= block_requests.target_head_slot
|| !block_requests.forward_sync
{
// if the batch is on our chain, this is complete and we can then process.
// Otherwise start backwards syncing until we reach a common chain.
let earliest_slot = block_requests.downloaded_blocks[0].slot;
//TODO: Decide which is faster. Reading block from db and comparing or calculating
//the hash tree root and comparing.
if Some(block_requests.downloaded_blocks[0].canonical_root())
== root_at_slot(&self.chain, earliest_slot)
{
block_requests.state = BlockRequestsState::Complete;
return;
}
// not on the same chain, request blocks backwards
let state = &self.chain.head().beacon_state;
let local_finalized_slot = state
.finalized_checkpoint
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
// check that the request hasn't failed by having no common chain
if local_finalized_slot >= block_requests.current_start_slot {
warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id);
block_requests.state = BlockRequestsState::Failed;
return;
}
// if this is a forward sync, then we have reached the head without a common chain
// and we need to start syncing backwards.
if block_requests.forward_sync {
// Start a backwards sync by requesting earlier blocks
block_requests.forward_sync = false;
block_requests.current_start_slot = std::cmp::min(
self.chain.best_slot(),
block_requests.downloaded_blocks[0].slot,
);
}
}
// update the start slot and re-queue the batch
block_requests.update_start_slot();
}
pub fn recent_blocks_response(
&mut self,
peer_id: PeerId,
request_id: RequestId,
blocks: Vec<BeaconBlock<T::EthSpec>>,
) {
// find the request
let parent_request = match self
.parent_queue
.iter_mut()
.find(|request| request.state == BlockRequestsState::Pending(request_id))
{
Some(req) => req,
None => {
// No pending request, invalid request_id or coding error
warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id);
return;
}
};
// if an empty response is given, the peer didn't have the requested block, try again
if blocks.is_empty() {
parent_request.failed_attempts += 1;
parent_request.state = BlockRequestsState::Queued;
parent_request.last_submitted_peer = peer_id;
return;
}
// currently only support a single block lookup. Reject any response that has more than 1
// block
if blocks.len() != 1 {
//TODO: Potentially downvote the peer
debug!(self.log, "Peer sent more than 1 parent. Ignoring";
"peer_id" => format!("{:?}", peer_id),
"no_parents" => blocks.len()
);
return;
}
// queue for processing
parent_request.state = BlockRequestsState::Complete;
}
pub fn _inject_error(_peer_id: PeerId, _id: RequestId) {
//TODO: Remove block state from pending
}
pub fn peer_disconnect(&mut self, peer_id: &PeerId) {
self.import_queue.remove(peer_id);
self.full_peers.remove(peer_id);
self.update_state();
}
pub fn add_full_peer(&mut self, peer_id: PeerId) {
debug!(
self.log, "Fully synced peer added";
"peer" => format!("{:?}", peer_id),
);
self.full_peers.insert(peer_id);
self.update_state();
}
pub fn add_unknown_block(&mut self, block: BeaconBlock<T::EthSpec>, peer_id: PeerId) {
// if we are not in regular sync mode, ignore this block
if let ManagerState::Regular = self.state {
return;
}
// make sure this block is not already being searched for
// TODO: Potentially store a hashset of blocks for O(1) lookups
for parent_req in self.parent_queue.iter() {
if let Some(_) = parent_req
.downloaded_blocks
.iter()
.find(|d_block| d_block == &&block)
{
// we are already searching for this block, ignore it
return;
}
}
let req = ParentRequests {
downloaded_blocks: vec![block],
failed_attempts: 0,
last_submitted_peer: peer_id,
state: BlockRequestsState::Queued,
};
self.parent_queue.push(req);
}
pub(crate) fn poll(&mut self) -> ImportManagerOutcome {
loop {
// update the state of the manager
self.update_state();
// process potential block requests
if let Some(outcome) = self.process_potential_block_requests() {
return outcome;
}
// process any complete long-range batches
if let Some(outcome) = self.process_complete_batches() {
return outcome;
}
// process any parent block lookup-requests
if let Some(outcome) = self.process_parent_requests() {
return outcome;
}
// process any complete parent lookups
let (re_run, outcome) = self.process_complete_parent_requests();
if let Some(outcome) = outcome {
return outcome;
} else if !re_run {
break;
}
}
return ImportManagerOutcome::Idle;
}
fn update_state(&mut self) {
let previous_state = self.state.clone();
self.state = {
if !self.import_queue.is_empty() {
ManagerState::Syncing
} else if !self.full_peers.is_empty() {
ManagerState::Regular
} else {
ManagerState::Stalled
}
};
if self.state != previous_state {
info!(self.log, "Syncing state updated";
"old_state" => format!("{:?}", previous_state),
"new_state" => format!("{:?}", self.state),
);
}
}
fn process_potential_block_requests(&mut self) -> Option<ImportManagerOutcome> {
// check if an outbound request is required
// Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p
// layer and not needed here.
// If any in queued state we submit a request.
// remove any failed batches
let debug_log = &self.log;
self.import_queue.retain(|peer_id, block_request| {
if let BlockRequestsState::Failed = block_request.state {
debug!(debug_log, "Block import from peer failed";
"peer_id" => format!("{:?}", peer_id),
"downloaded_blocks" => block_request.downloaded_blocks.len()
);
false
} else {
true
}
});
// process queued block requests
for (peer_id, block_requests) in self
.import_queue
.iter_mut()
.find(|(_peer_id, req)| req.state == BlockRequestsState::Queued)
{
let request_id = self.current_req_id;
block_requests.state = BlockRequestsState::Pending(request_id);
self.current_req_id += 1;
let request = BeaconBlocksRequest {
head_block_root: block_requests.target_head_root,
start_slot: block_requests.current_start_slot.as_u64(),
count: MAX_BLOCKS_PER_REQUEST,
step: 0,
};
return Some(ImportManagerOutcome::RequestBlocks {
peer_id: peer_id.clone(),
request,
request_id,
});
}
None
}
fn process_complete_batches(&mut self) -> Option<ImportManagerOutcome> {
let completed_batches = self
.import_queue
.iter()
.filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete)
.map(|(peer, _)| peer)
.cloned()
.collect::<Vec<PeerId>>();
for peer_id in completed_batches {
let block_requests = self.import_queue.remove(&peer_id).expect("key exists");
match self.process_blocks(block_requests.downloaded_blocks.clone()) {
Ok(()) => {
//TODO: Verify it's impossible to have empty downloaded_blocks
let last_element = block_requests.downloaded_blocks.len() - 1;
debug!(self.log, "Blocks processed successfully";
"peer" => format!("{:?}", peer_id),
"start_slot" => block_requests.downloaded_blocks[0].slot,
"end_slot" => block_requests.downloaded_blocks[last_element].slot,
"no_blocks" => last_element + 1,
);
// Re-HELLO to ensure we are up to the latest head
return Some(ImportManagerOutcome::Hello(peer_id));
}
Err(e) => {
let last_element = block_requests.downloaded_blocks.len() - 1;
warn!(self.log, "Block processing failed";
"peer" => format!("{:?}", peer_id),
"start_slot" => block_requests.downloaded_blocks[0].slot,
"end_slot" => block_requests.downloaded_blocks[last_element].slot,
"no_blocks" => last_element + 1,
"error" => format!("{:?}", e),
);
return Some(ImportManagerOutcome::DownvotePeer(peer_id));
}
}
}
None
}
fn process_parent_requests(&mut self) -> Option<ImportManagerOutcome> {
// remove any failed requests
let debug_log = &self.log;
self.parent_queue.retain(|parent_request| {
if parent_request.state == BlockRequestsState::Failed {
debug!(debug_log, "Parent import failed";
"block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()),
"ancestors_found" => parent_request.downloaded_blocks.len()
);
false
} else {
true
}
});
// check to make sure there are peers to search for the parent from
if self.full_peers.is_empty() {
return None;
}
// check if parents need to be searched for
for parent_request in self.parent_queue.iter_mut() {
if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE {
parent_request.state = BlockRequestsState::Failed;
continue;
} else if parent_request.state == BlockRequestsState::Queued {
// check the depth isn't too large
if parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE {
parent_request.state = BlockRequestsState::Failed;
continue;
}
parent_request.state = BlockRequestsState::Pending(self.current_req_id);
self.current_req_id += 1;
let last_element_index = parent_request.downloaded_blocks.len() - 1;
let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root;
let req = RecentBeaconBlocksRequest {
block_roots: vec![parent_hash],
};
// select a random fully synced peer to attempt to download the parent block
let peer_id = self.full_peers.iter().next().expect("List is not empty");
return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req));
}
}
None
}
fn process_complete_parent_requests(&mut self) -> (bool, Option<ImportManagerOutcome>) {
// flag to determine if there is more process to drive or if the manager can be switched to
// an idle state
let mut re_run = false;
// Find any parent_requests ready to be processed
for completed_request in self
.parent_queue
.iter_mut()
.filter(|req| req.state == BlockRequestsState::Complete)
{
// verify the last added block is the parent of the last requested block
let last_index = completed_request.downloaded_blocks.len() - 1;
let expected_hash = completed_request.downloaded_blocks[last_index].parent_root;
// Note: the length must be greater than 1 so this cannot panic.
let block_hash = completed_request.downloaded_blocks[last_index - 1].canonical_root();
if block_hash != expected_hash {
// remove the head block
let _ = completed_request.downloaded_blocks.pop();
completed_request.state = BlockRequestsState::Queued;
//TODO: Potentially downvote the peer
let peer = completed_request.last_submitted_peer.clone();
debug!(self.log, "Peer sent invalid parent. Ignoring";
"peer_id" => format!("{:?}",peer),
"received_block" => format!("{}", block_hash),
"expected_parent" => format!("{}", expected_hash),
);
return (true, Some(ImportManagerOutcome::DownvotePeer(peer)));
}
// try and process the list of blocks up to the requested block
while !completed_request.downloaded_blocks.is_empty() {
let block = completed_request
.downloaded_blocks
.pop()
.expect("Block must exist exist");
match self.chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => {
// need to keep looking for parents
completed_request.downloaded_blocks.push(block);
completed_request.state = BlockRequestsState::Queued;
re_run = true;
break;
}
Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {}
Ok(outcome) => {
// it's a future slot or an invalid block, remove it and try again
completed_request.failed_attempts += 1;
trace!(
self.log, "Invalid parent block";
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", completed_request.last_submitted_peer),
);
completed_request.state = BlockRequestsState::Queued;
re_run = true;
return (
re_run,
Some(ImportManagerOutcome::DownvotePeer(
completed_request.last_submitted_peer.clone(),
)),
);
}
Err(e) => {
completed_request.failed_attempts += 1;
warn!(
self.log, "Parent processing error";
"error" => format!("{:?}", e)
);
completed_request.state = BlockRequestsState::Queued;
re_run = true;
return (
re_run,
Some(ImportManagerOutcome::DownvotePeer(
completed_request.last_submitted_peer.clone(),
)),
);
}
}
}
}
// remove any full completed and processed parent chains
self.parent_queue.retain(|req| {
if req.state == BlockRequestsState::Complete {
false
} else {
true
}
});
(re_run, None)
}
fn process_blocks(&mut self, blocks: Vec<BeaconBlock<T::EthSpec>>) -> Result<(), String> {
for block in blocks {
let processing_result = self.chain.process_block(block.clone());
if let Ok(outcome) = processing_result {
match outcome {
BlockProcessingOutcome::Processed { block_root } => {
// The block was valid and we processed it successfully.
trace!(
self.log, "Imported block from network";
"slot" => block.slot,
"block_root" => format!("{}", block_root),
);
}
BlockProcessingOutcome::ParentUnknown { parent } => {
// blocks should be sequential and all parents should exist
trace!(
self.log, "ParentBlockUnknown";
"parent_root" => format!("{}", parent),
"baby_block_slot" => block.slot,
);
return Err(format!(
"Block at slot {} has an unknown parent.",
block.slot
));
}
BlockProcessingOutcome::FutureSlot {
present_slot,
block_slot,
} => {
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
// The block is too far in the future, drop it.
trace!(
self.log, "FutureBlock";
"msg" => "block for future slot rejected, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
);
return Err(format!(
"Block at slot {} is too far in the future",
block.slot
));
} else {
// The block is in the future, but not too far.
trace!(
self.log, "QueuedFutureBlock";
"msg" => "queuing future block, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
);
}
}
BlockProcessingOutcome::FinalizedSlot => {
trace!(
self.log, "Finalized or earlier block processed";
"outcome" => format!("{:?}", outcome),
);
// block reached our finalized slot or was earlier, move to the next block
}
_ => {
trace!(
self.log, "InvalidBlock";
"msg" => "peer sent invalid block",
"outcome" => format!("{:?}", outcome),
);
return Err(format!("Invalid block at slot {}", block.slot));
}
}
} else {
trace!(
self.log, "BlockProcessingFailure";
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", processing_result)
);
return Err(format!(
"Unexpected block processing error: {:?}",
processing_result
));
}
}
Ok(())
}
}
fn root_at_slot<T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
target_slot: Slot,
) -> Option<Hash256> {
chain
.rev_iter_block_roots()
.find(|(_root, slot)| *slot == target_slot)
.map(|(root, _slot)| root)
}

View File

@ -1,4 +1,4 @@
mod import_queue; mod manager;
/// Syncing for lighthouse. /// Syncing for lighthouse.
/// ///
/// Stores the various syncing methods for the beacon chain. /// Stores the various syncing methods for the beacon chain.

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
use eth2_libp2p::PubsubMessage; use eth2_libp2p::PubsubMessage;
use eth2_libp2p::Topic; use eth2_libp2p::Topic;
use eth2_libp2p::BEACON_ATTESTATION_TOPIC; use eth2_libp2p::{BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX};
use futures::Future; use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use network::NetworkMessage; use network::NetworkMessage;
@ -144,7 +144,11 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
); );
// valid attestation, propagate to the network // valid attestation, propagate to the network
let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); let topic_string = format!(
"/{}/{}/{}",
TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX
);
let topic = Topic::new(topic_string);
let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); let message = PubsubMessage::Attestation(attestation.as_ssz_bytes());
self.network_chan self.network_chan

View File

@ -1,6 +1,6 @@
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use eth2_libp2p::BEACON_BLOCK_TOPIC;
use eth2_libp2p::{PubsubMessage, Topic}; use eth2_libp2p::{PubsubMessage, Topic};
use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX};
use futures::Future; use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use network::NetworkMessage; use network::NetworkMessage;
@ -105,8 +105,12 @@ impl<T: BeaconChainTypes> BeaconBlockService for BeaconBlockServiceInstance<T> {
"block_root" => format!("{}", block_root), "block_root" => format!("{}", block_root),
); );
// get the network topic to send on // create the network topic to send on
let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); let topic_string = format!(
"/{}/{}/{}",
TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX
);
let topic = Topic::new(topic_string);
let message = PubsubMessage::Block(block.as_ssz_bytes()); let message = PubsubMessage::Block(block.as_ssz_bytes());
// Publish the block to the p2p network via gossipsub. // Publish the block to the p2p network via gossipsub.

View File

@ -9,7 +9,6 @@ use std::fs;
use std::path::PathBuf; use std::path::PathBuf;
pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const DEFAULT_DATA_DIR: &str = ".lighthouse";
pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml";
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml"; pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml";
@ -228,13 +227,6 @@ fn main() {
_ => unreachable!("guarded by clap"), _ => unreachable!("guarded by clap"),
}; };
let drain = match matches.occurrences_of("verbosity") {
0 => drain.filter_level(Level::Info),
1 => drain.filter_level(Level::Debug),
2 => drain.filter_level(Level::Trace),
_ => drain.filter_level(Level::Trace),
};
let mut log = slog::Logger::root(drain.fuse(), o!()); let mut log = slog::Logger::root(drain.fuse(), o!());
warn!( warn!(