diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5efb73423..531c4615a 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -13,7 +13,7 @@ client = { path = "client" } version = { path = "version" } clap = "2.32.0" rand = "0.7" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" ctrlc = { version = "3.1.1", features = ["termination"] } diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 05c58cc8b..9b5a9cf42 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -27,3 +27,5 @@ clap = "2.32.0" dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" +reqwest = "0.9" +url = "1.2" diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs new file mode 100644 index 000000000..c94d9a51d --- /dev/null +++ b/beacon_node/client/src/bootstrapper.rs @@ -0,0 +1,210 @@ +use eth2_libp2p::{ + multiaddr::{Multiaddr, Protocol}, + Enr, +}; +use reqwest::{Error as HttpError, Url}; +use serde::Deserialize; +use std::borrow::Cow; +use std::net::Ipv4Addr; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; +use url::Host; + +#[derive(Debug)] +enum Error { + InvalidUrl, + HttpError(HttpError), +} + +impl From for Error { + fn from(e: HttpError) -> Error { + Error::HttpError(e) + } +} + +/// Used to load "bootstrap" information from the HTTP API of another Lighthouse beacon node. +/// +/// Bootstrapping information includes things like genesis and finalized states and blocks, and +/// libp2p connection details. +pub struct Bootstrapper { + url: Url, +} + +impl Bootstrapper { + /// Parses the given `server` as a URL, instantiating `Self`. + pub fn from_server_string(server: String) -> Result { + Ok(Self { + url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, + }) + } + + /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. + /// + /// The address is created by querying the HTTP server for its listening libp2p addresses. + /// Then, we find the first TCP port in those addresses and combine the port with the URL of + /// the server. + /// + /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of + /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of + /// `/ipv4/172.0.0.1/tcp/9000`. + pub fn best_effort_multiaddr(&self) -> Option { + let tcp_port = self.listen_port().ok()?; + + let mut multiaddr = Multiaddr::with_capacity(2); + + match self.url.host()? { + Host::Ipv4(addr) => multiaddr.push(Protocol::Ip4(addr)), + Host::Domain(s) => multiaddr.push(Protocol::Dns4(Cow::Borrowed(s))), + _ => return None, + }; + + multiaddr.push(Protocol::Tcp(tcp_port)); + + Some(multiaddr) + } + + /// Returns the IPv4 address of the server URL, unless it contains a FQDN. + pub fn server_ipv4_addr(&self) -> Option { + match self.url.host()? { + Host::Ipv4(addr) => Some(addr), + _ => None, + } + } + + /// Returns the servers ENR address. + pub fn enr(&self) -> Result { + get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) + } + + /// Returns the servers listening libp2p addresses. + pub fn listen_port(&self) -> Result { + get_listen_port(self.url.clone()).map_err(|e| format!("Unable to get listen port: {:?}", e)) + } + + /// Returns the genesis block and state. + pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { + let genesis_slot = Slot::new(0); + + let block = get_block(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis block: {:?}", e))? + .beacon_block; + let state = get_state(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis state: {:?}", e))? + .beacon_state; + + Ok((state, block)) + } + + /// Returns the most recent finalized state and block. + pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { + let slots_per_epoch = get_slots_per_epoch(self.url.clone()) + .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; + let finalized_slot = get_finalized_slot(self.url.clone(), slots_per_epoch.as_u64()) + .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; + + let block = get_block(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized block: {:?}", e))? + .beacon_block; + let state = get_state(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized state: {:?}", e))? + .beacon_state; + + Ok((state, block)) + } +} + +fn get_slots_per_epoch(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("slots_per_epoch"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("latest_finalized_checkpoint"); + }) + .map_err(|_| Error::InvalidUrl)?; + + let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; + + Ok(checkpoint.epoch.start_slot(slots_per_epoch)) +} + +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} + +fn get_state(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("state"); + }) + .map_err(|_| Error::InvalidUrl)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct BlockResponse { + pub root: Hash256, + pub beacon_block: BeaconBlock, +} + +fn get_block(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("block"); + }) + .map_err(|_| Error::InvalidUrl)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_enr(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("network").push("enr"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_listen_port(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("network").push("listen_port"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e2baf22d5..0bb30d0af 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -137,16 +137,9 @@ where } do_state_catchup(&beacon_chain, &log); - // Start the network service, libp2p and syncing threads - // TODO: Add beacon_chain reference to network parameters let network_config = &client_config.network; - let network_logger = log.new(o!("Service" => "Network")); - let (network, network_send) = NetworkService::new( - beacon_chain.clone(), - network_config, - executor, - network_logger, - )?; + let (network, network_send) = + NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; // spawn the RPC server let rpc_exit_signal = if client_config.rpc.enabled { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 78e50ac79..d705637cb 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -34,7 +34,7 @@ pub fn run(client: &Client, executor: TaskExecutor, exit // Panics if libp2p is poisoned. let connected_peer_count = libp2p.lock().swarm.connected_peers(); - debug!(log, "libp2p"; "peer_count" => connected_peer_count); + debug!(log, "Libp2p connected peer status"; "peer_count" => connected_peer_count); if connected_peer_count <= WARN_PEER_COUNT { warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 006b895a1..92c2c80d4 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" @@ -26,5 +26,6 @@ smallvec = "0.6.10" fnv = "1.0.6" unsigned-varint = "0.2.2" bytes = "0.4.12" +tokio-io-timeout = "0.3.1" lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 9158fe485..2c574e46a 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -1,3 +1,4 @@ +use crate::config::*; use crate::discovery::Discovery; use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::{error, NetworkConfig}; @@ -15,7 +16,6 @@ use libp2p::{ NetworkBehaviour, PeerId, }; use slog::{debug, o, trace}; -use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; @@ -91,7 +91,7 @@ impl NetworkBehaviourEventProcess { - trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg)); + trace!(self.log, "Received GossipEvent"); let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); @@ -192,10 +192,10 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { - let message_bytes = ssz_encode(&message); + pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) { + let message_data = message.to_data(); for topic in topics { - self.gossipsub.publish(topic, message_bytes.clone()); + self.gossipsub.publish(topic, message_data.clone()); } } @@ -224,13 +224,20 @@ pub enum BehaviourEvent { }, } -/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. +/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. These are encoded and +/// decoded upstream. #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. Block(Vec), /// Gossipsub message providing notification of a new attestation. Attestation(Vec), + /// Gossipsub message providing notification of a voluntary exit. + VoluntaryExit(Vec), + /// Gossipsub message providing notification of a new proposer slashing. + ProposerSlashing(Vec), + /// Gossipsub message providing notification of a new attester slashing. + AttesterSlashing(Vec), /// Gossipsub message from an unknown topic. Unknown(Vec), } @@ -244,29 +251,33 @@ impl PubsubMessage { */ fn from_topics(topics: &Vec, data: Vec) -> Self { for topic in topics { - match topic.as_str() { - BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), - BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), - _ => {} + // compare the prefix and postfix, then match on the topic + let topic_parts: Vec<&str> = topic.as_str().split('/').collect(); + if topic_parts.len() == 4 + && topic_parts[1] == TOPIC_PREFIX + && topic_parts[3] == TOPIC_ENCODING_POSTFIX + { + match topic_parts[2] { + BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), + BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), + VOLUNTARY_EXIT_TOPIC => return PubsubMessage::VoluntaryExit(data), + PROPOSER_SLASHING_TOPIC => return PubsubMessage::ProposerSlashing(data), + ATTESTER_SLASHING_TOPIC => return PubsubMessage::AttesterSlashing(data), + _ => {} + } } } PubsubMessage::Unknown(data) } -} -impl Encode for PubsubMessage { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_append(&self, buf: &mut Vec) { + fn to_data(self) -> Vec { match self { - PubsubMessage::Block(inner) - | PubsubMessage::Attestation(inner) - | PubsubMessage::Unknown(inner) => { - // Encode the gossip as a Vec; - buf.append(&mut inner.as_ssz_bytes()); - } + PubsubMessage::Block(data) + | PubsubMessage::Attestation(data) + | PubsubMessage::VoluntaryExit(data) + | PubsubMessage::ProposerSlashing(data) + | PubsubMessage::AttesterSlashing(data) + | PubsubMessage::Unknown(data) => data, } } } diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index ddf14cc04..7cb501c1f 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -6,9 +6,16 @@ use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -/// The beacon node topic string to subscribe to. +/// The gossipsub topic names. +// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX +// For example /eth2/beacon_block/ssz +pub const TOPIC_PREFIX: &str = "eth2"; +pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; +pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; +pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; +pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SHARD_TOPIC_PREFIX: &str = "shard"; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -63,10 +70,10 @@ impl Default for Config { discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_port: 9000, max_peers: 10, - //TODO: Set realistic values for production - // Note: This defaults topics to plain strings. Not hashes + // Note: The topics by default are sent as plain strings. Hashes are an optional + // parameter. gs_config: GossipsubConfigBuilder::new() - .max_transmit_size(1_000_000) + .max_transmit_size(1_048_576) .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 87d5dd558..1bbbb16fe 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -10,7 +10,7 @@ use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::multiaddr::Protocol; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use slog::{debug, info, o, warn}; +use slog::{debug, info, warn}; use std::collections::HashSet; use std::fs::File; use std::io::prelude::*; @@ -64,7 +64,7 @@ impl Discovery { config: &NetworkConfig, log: &slog::Logger, ) -> error::Result { - let log = log.new(o!("Service" => "Libp2p-Discovery")); + let log = log.clone(); // checks if current ENR matches that found on disk let local_enr = load_enr(local_key, config, &log)?; @@ -74,19 +74,19 @@ impl Discovery { None => String::from(""), }; - info!(log, "Local ENR: {}", local_enr.to_base64()); - debug!(log, "Local Node Id: {}", local_enr.node_id()); - debug!(log, "Local ENR seq: {}", local_enr.seq()); + info!(log, "ENR Initialised"; "ENR" => local_enr.to_base64(), "Seq" => local_enr.seq()); + debug!(log, "Discv5 Node ID Initialised"; "node_id" => format!("{}",local_enr.node_id())); let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address) - .map_err(|e| format!("Discv5 service failed: {:?}", e))?; + .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; // Add bootnodes to routing table for bootnode_enr in config.boot_nodes.clone() { debug!( log, - "Adding node to routing table: {}", - bootnode_enr.node_id() + "Adding node to routing table"; + "Node ID" => format!("{}", + bootnode_enr.node_id()) ); discovery.add_enr(bootnode_enr); } @@ -133,7 +133,7 @@ impl Discovery { fn find_peers(&mut self) { // pick a random NodeId let random_node = NodeId::random(); - debug!(self.log, "Searching for peers..."); + debug!(self.log, "Searching for peers"); self.discovery.find_node(random_node); // update the time until next discovery @@ -217,7 +217,7 @@ where } Ok(Async::NotReady) => break, Err(e) => { - warn!(self.log, "Discovery peer search failed: {:?}", e); + warn!(self.log, "Discovery peer search failed"; "Error" => format!("{:?}", e)); } } } @@ -244,16 +244,16 @@ where }); } Discv5Event::FindNodeResult { closer_peers, .. } => { - debug!(self.log, "Discv5 query found {} peers", closer_peers.len()); + debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len()); if closer_peers.is_empty() { - debug!(self.log, "Discv5 random query yielded empty results"); + debug!(self.log, "Discovery random query found no peers"); } for peer_id in closer_peers { // if we need more peers, attempt a connection if self.connected_peers.len() < self.max_peers && self.connected_peers.get(&peer_id).is_none() { - debug!(self.log, "Discv5: Peer discovered"; "Peer"=> format!("{:?}", peer_id)); + debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id)); return Async::Ready(NetworkBehaviourAction::DialPeer { peer_id, }); @@ -300,14 +300,12 @@ fn load_enr( Ok(_) => { match Enr::from_str(&enr_string) { Ok(enr) => { - debug!(log, "ENR found in file: {:?}", enr_f); - if enr.node_id() == local_enr.node_id() { if enr.ip() == config.discovery_address.into() && enr.tcp() == Some(config.libp2p_port) && enr.udp() == Some(config.discovery_port) { - debug!(log, "ENR loaded from file"); + debug!(log, "ENR loaded from file"; "File" => format!("{:?}", enr_f)); // the stored ENR has the same configuration, use it return Ok(enr); } @@ -317,11 +315,11 @@ fn load_enr( local_enr.set_seq(new_seq_no, local_key).map_err(|e| { format!("Could not update ENR sequence number: {:?}", e) })?; - debug!(log, "ENR sequence number increased to: {}", new_seq_no); + debug!(log, "ENR sequence number increased"; "Seq" => new_seq_no); } } Err(e) => { - warn!(log, "ENR from file could not be decoded: {:?}", e); + warn!(log, "ENR from file could not be decoded"; "Error" => format!("{:?}", e)); } } } @@ -344,7 +342,7 @@ fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) { Err(e) => { warn!( log, - "Could not write ENR to file: {:?}{:?}. Error: {}", dir, ENR_FILENAME, e + "Could not write ENR to file"; "File" => format!("{:?}{:?}",dir, ENR_FILENAME), "Error" => format!("{}", e) ); } } diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 4c84469ce..4b4935818 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -16,6 +16,7 @@ mod service; pub use behaviour::PubsubMessage; pub use config::{ Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, + TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX, }; pub use libp2p::enr::Enr; pub use libp2p::gossipsub::{Topic, TopicHash}; diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 8e2bdaa64..260a00346 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -41,10 +41,8 @@ impl Encoder for SSZInboundCodec { RPCErrorResponse::Success(resp) => { match resp { RPCResponse::Hello(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockRoots(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockHeaders(res) => res.headers, // already raw bytes - RPCResponse::BeaconBlockBodies(res) => res.block_bodies, // already raw bytes - RPCResponse::BeaconChainState(res) => res.as_ssz_bytes(), + RPCResponse::BeaconBlocks(res) => res, // already raw bytes + RPCResponse::RecentBeaconBlocks(res) => res, // already raw bytes } } RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(), @@ -72,52 +70,30 @@ impl Decoder for SSZInboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol( - "Unknown GOODBYE version.as_str()", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockRoots( - BeaconBlockRootsRequest::from_ssz_bytes(&packet)?, + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::BeaconBlocks( + BeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockHeaders( - BeaconBlockHeadersRequest::from_ssz_bytes(&packet)?, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::RecentBeaconBlocks( + RecentBeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockBodies( - BeaconBlockBodiesRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconChainState( - BeaconChainStateRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown message name.")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), @@ -156,10 +132,8 @@ impl Encoder for SSZOutboundCodec { let bytes = match item { RPCRequest::Hello(req) => req.as_ssz_bytes(), RPCRequest::Goodbye(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockRoots(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockHeaders(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockBodies(req) => req.as_ssz_bytes(), - RPCRequest::BeaconChainState(req) => req.as_ssz_bytes(), + RPCRequest::BeaconBlocks(req) => req.as_ssz_bytes(), + RPCRequest::RecentBeaconBlocks(req) => req.as_ssz_bytes(), }; // length-prefix self.inner @@ -168,7 +142,11 @@ impl Encoder for SSZOutboundCodec { } } -// Decoder for outbound +// Decoder for outbound streams +// +// The majority of the decoding has now been pushed upstream due to the changing specification. +// We prefer to decode blocks and attestations with extra knowledge about the chain to perform +// faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZOutboundCodec { type Item = RPCResponse; type Error = RPCError; @@ -177,53 +155,41 @@ impl Decoder for SSZOutboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version.")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockRoots( - BeaconBlockRootsResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockHeaders( - BeaconBlockHeadersResponse { - headers: packet.to_vec(), - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockBodies( - BeaconBlockBodiesResponse { - block_bodies: packet.to_vec(), - // this gets filled in the protocol handler - block_roots: None, - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconChainState( - BeaconChainStateResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown method")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, - Ok(None) => Ok(None), + Ok(None) => { + // the object sent could be a empty. We return the empty object if this is the case + match self.protocol.message_name.as_str() { + "hello" => match self.protocol.version.as_str() { + "1" => Ok(None), // cannot have an empty HELLO message. The stream has terminated unexpectedly + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + _ => unreachable!("Cannot negotiate an unknown protocol"), + } + } Err(e) => Err(e), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index dbc32c5a4..07322875f 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -1,4 +1,4 @@ -use super::methods::{RPCErrorResponse, RPCResponse, RequestId}; +use super::methods::RequestId; use super::protocol::{RPCError, RPCProtocol, RPCRequest}; use super::RPCEvent; use crate::rpc::protocol::{InboundFramed, OutboundFramed}; @@ -13,8 +13,8 @@ use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; -/// The time (in seconds) before a substream that is awaiting a response times out. -pub const RESPONSE_TIMEOUT: u64 = 9; +/// The time (in seconds) before a substream that is awaiting a response from the user times out. +pub const RESPONSE_TIMEOUT: u64 = 10; /// Implementation of `ProtocolsHandler` for the RPC protocol. pub struct RPCHandler @@ -314,14 +314,14 @@ where Ok(Async::Ready(response)) => { if let Some(response) = response { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - build_response(rpc_event, response), + RPCEvent::Response(rpc_event.id(), response), ))); } else { - // stream closed early + // stream closed early or nothing was sent return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error( rpc_event.id(), - RPCError::Custom("Stream Closed Early".into()), + RPCError::Custom("Stream closed early. Empty response".into()), ), ))); } @@ -365,31 +365,3 @@ where Ok(Async::NotReady) } } - -/// Given a response back from a peer and the request that sent it, construct a response to send -/// back to the user. This allows for some data manipulation of responses given requests. -fn build_response(rpc_event: RPCEvent, rpc_response: RPCErrorResponse) -> RPCEvent { - let id = rpc_event.id(); - - // handle the types of responses - match rpc_response { - RPCErrorResponse::Success(response) => { - match response { - // if the response is block roots, tag on the extra request data - RPCResponse::BeaconBlockBodies(mut resp) => { - if let RPCEvent::Request(_id, RPCRequest::BeaconBlockBodies(bodies_req)) = - rpc_event - { - resp.block_roots = Some(bodies_req.block_roots); - } - RPCEvent::Response( - id, - RPCErrorResponse::Success(RPCResponse::BeaconBlockBodies(resp)), - ) - } - _ => RPCEvent::Response(id, RPCErrorResponse::Success(response)), - } - } - _ => RPCEvent::Response(id, rpc_response), - } -} diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 2e5a9a7ff..d912bcfa1 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -2,7 +2,7 @@ use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz_derive::{Decode, Encode}; -use types::{BeaconBlockBody, Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; /* Request/Response data structures for RPC methods */ @@ -13,23 +13,20 @@ pub type RequestId = usize; /// The HELLO request/response handshake message. #[derive(Encode, Decode, Clone, Debug)] pub struct HelloMessage { - /// The network ID of the peer. - pub network_id: u8, + /// The fork version of the chain we are broadcasting. + pub fork_version: [u8; 4], - /// The chain id for the HELLO request. - pub chain_id: u64, + /// Latest finalized root. + pub finalized_root: Hash256, - /// The peers last finalized root. - pub latest_finalized_root: Hash256, + /// Latest finalized epoch. + pub finalized_epoch: Epoch, - /// The peers last finalized epoch. - pub latest_finalized_epoch: Epoch, + /// The latest block root. + pub head_root: Hash256, - /// The peers last block root. - pub best_root: Hash256, - - /// The peers last slot. - pub best_slot: Slot, + /// The slot associated with the latest block root. + pub head_slot: Slot, } /// The reason given for a `Goodbye` message. @@ -74,108 +71,31 @@ impl_decode_via_from!(GoodbyeReason, u64); /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsRequest { - /// The starting slot of the requested blocks. - pub start_slot: Slot, +pub struct BeaconBlocksRequest { + /// The hash tree root of a block on the requested chain. + pub head_block_root: Hash256, + + /// The starting slot to request blocks. + pub start_slot: u64, /// The number of blocks from the start slot. - pub count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers -} + pub count: u64, -/// Response containing a number of beacon block roots from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsResponse { - /// List of requested blocks and associated slots. - pub roots: Vec, -} - -/// Contains a block root and associated slot. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BlockRootSlot { - /// The block root. - pub block_root: Hash256, - - /// The block slot. - pub slot: Slot, -} - -/// The response of a beacon block roots request. -impl BeaconBlockRootsResponse { - /// Returns `true` if each `self.roots.slot[i]` is higher than the preceding `i`. - pub fn slots_are_ascending(&self) -> bool { - for window in self.roots.windows(2) { - if window[0].slot >= window[1].slot { - return false; - } - } - - true - } -} - -/// Request a number of beacon block headers from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersRequest { - /// The starting header hash of the requested headers. - pub start_root: Hash256, - - /// The starting slot of the requested headers. - pub start_slot: Slot, - - /// The maximum number of headers than can be returned. - pub max_headers: u64, - - /// The maximum number of slots to skip between blocks. - pub skip_slots: u64, -} - -/// Response containing requested block headers. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersResponse { - /// The list of ssz-encoded requested beacon block headers. - pub headers: Vec, + /// The step increment to receive blocks. + /// + /// A value of 1 returns every block. + /// A value of 2 returns every second block. + /// A value of 3 returns every third block and so on. + pub step: u64, } /// Request a number of beacon block bodies from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesRequest { +pub struct RecentBeaconBlocksRequest { /// The list of beacon block bodies being requested. pub block_roots: Vec, } -/// Response containing the list of requested beacon block bodies. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesResponse { - /// The list of hashes that were sent in the request and match these roots response. None when - /// sending outbound. - pub block_roots: Option>, - /// The list of ssz-encoded beacon block bodies being requested. - pub block_bodies: Vec, -} - -/// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`. -pub struct DecodedBeaconBlockBodiesResponse { - /// The list of hashes sent in the request to get this response. - pub block_roots: Vec, - /// The valid decoded block bodies. - pub block_bodies: Vec>, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateRequest { - /// The tree hashes that a value is requested for. - pub hashes: Vec, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -// Note: TBD -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateResponse { - /// The values corresponding the to the requested tree hashes. - pub values: bool, //TBD - stubbed with encodable bool -} - /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -183,14 +103,10 @@ pub struct BeaconChainStateResponse { pub enum RPCResponse { /// A HELLO message. Hello(HelloMessage), - /// A response to a get BEACON_BLOCK_ROOTS request. - BeaconBlockRoots(BeaconBlockRootsResponse), - /// A response to a get BEACON_BLOCK_HEADERS request. - BeaconBlockHeaders(BeaconBlockHeadersResponse), - /// A response to a get BEACON_BLOCK_BODIES request. - BeaconBlockBodies(BeaconBlockBodiesResponse), - /// A response to a get BEACON_CHAIN_STATE request. - BeaconChainState(BeaconChainStateResponse), + /// A response to a get BEACON_BLOCKS request. + BeaconBlocks(Vec), + /// A response to a get RECENT_BEACON_BLOCKS request. + RecentBeaconBlocks(Vec), } #[derive(Debug)] @@ -206,8 +122,8 @@ impl RPCErrorResponse { pub fn as_u8(&self) -> u8 { match self { RPCErrorResponse::Success(_) => 0, - RPCErrorResponse::InvalidRequest(_) => 2, - RPCErrorResponse::ServerError(_) => 3, + RPCErrorResponse::InvalidRequest(_) => 1, + RPCErrorResponse::ServerError(_) => 2, RPCErrorResponse::Unknown(_) => 255, } } @@ -223,8 +139,8 @@ impl RPCErrorResponse { /// Builds an RPCErrorResponse from a response code and an ErrorMessage pub fn from_error(response_code: u8, err: ErrorMessage) -> Self { match response_code { - 2 => RPCErrorResponse::InvalidRequest(err), - 3 => RPCErrorResponse::ServerError(err), + 1 => RPCErrorResponse::InvalidRequest(err), + 2 => RPCErrorResponse::ServerError(err), _ => RPCErrorResponse::Unknown(err), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index b606fc743..be1efdf5d 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -16,13 +16,17 @@ use tokio::io::{AsyncRead, AsyncWrite}; use tokio::prelude::*; use tokio::timer::timeout; use tokio::util::FutureExt; +use tokio_io_timeout::TimeoutStream; /// The maximum bytes that can be sent across the RPC. const MAX_RPC_SIZE: usize = 4_194_304; // 4M /// The protocol prefix the RPC protocol id. -const PROTOCOL_PREFIX: &str = "/eth2/beacon_node/rpc"; -/// The number of seconds to wait for a request once a protocol has been established before the stream is terminated. -const REQUEST_TIMEOUT: u64 = 3; +const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; +/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). +const TTFB_TIMEOUT: u64 = 5; +/// The number of seconds to wait for the first bytes of a request once a protocol has been +/// established before the stream is terminated. +const REQUEST_TIMEOUT: u64 = 15; #[derive(Debug, Clone)] pub struct RPCProtocol; @@ -33,11 +37,10 @@ impl UpgradeInfo for RPCProtocol { fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"), + ProtocolId::new("hello", "1", "ssz"), + ProtocolId::new("goodbye", "1", "ssz"), + ProtocolId::new("beacon_blocks", "1", "ssz"), + ProtocolId::new("recent_beacon_blocks", "1", "ssz"), ] } } @@ -87,7 +90,7 @@ impl ProtocolName for ProtocolId { // handler to respond to once ready. pub type InboundOutput = (RPCRequest, InboundFramed); -pub type InboundFramed = Framed, InboundCodec>; +pub type InboundFramed = Framed>, InboundCodec>; type FnAndThen = fn( (Option, InboundFramed), ) -> FutureResult, RPCError>; @@ -118,7 +121,9 @@ where "ssz" | _ => { let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = InboundCodec::SSZ(ssz_codec); - Framed::new(socket, codec) + let mut timed_socket = TimeoutStream::new(socket); + timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); + Framed::new(timed_socket, codec) .into_future() .timeout(Duration::from_secs(REQUEST_TIMEOUT)) .map_err(RPCError::from as FnMapErr) @@ -144,10 +149,8 @@ where pub enum RPCRequest { Hello(HelloMessage), Goodbye(GoodbyeReason), - BeaconBlockRoots(BeaconBlockRootsRequest), - BeaconBlockHeaders(BeaconBlockHeadersRequest), - BeaconBlockBodies(BeaconBlockBodiesRequest), - BeaconChainState(BeaconChainStateRequest), + BeaconBlocks(BeaconBlocksRequest), + RecentBeaconBlocks(RecentBeaconBlocksRequest), } impl UpgradeInfo for RPCRequest { @@ -165,22 +168,11 @@ impl RPCRequest { pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Hello(_) => vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ], - RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], - RPCRequest::BeaconBlockRoots(_) => { - vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockHeaders(_) => { - vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockBodies(_) => { - vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")] - } - RPCRequest::BeaconChainState(_) => { - vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")] + RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1", "ssz")], + RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1", "ssz")], + RPCRequest::BeaconBlocks(_) => vec![ProtocolId::new("beacon_blocks", "1", "ssz")], + RPCRequest::RecentBeaconBlocks(_) => { + vec![ProtocolId::new("recent_beacon_blocks", "1", "ssz")] } } } @@ -215,7 +207,8 @@ where ) -> Self::Future { match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); + let ssz_codec = + BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = OutboundCodec::SSZ(ssz_codec); Framed::new(socket, codec).send(self) } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index e208dbeca..d0324f6b8 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -1,10 +1,10 @@ use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage}; +use crate::config::*; use crate::error; use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -40,13 +40,12 @@ pub struct Service { impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { - debug!(log, "Network-libp2p Service starting"); + trace!(log, "Libp2p Service starting"); // load the private key from CLI flag, disk or generate a new one let local_private_key = load_private_key(&config, &log); - let local_peer_id = PeerId::from(local_private_key.public()); - info!(log, "Local peer id: {:?}", local_peer_id); + info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); let mut swarm = { // Set up the transport - tcp/ws with secio and mplex/yamux @@ -67,7 +66,7 @@ impl Service { Ok(_) => { let mut log_address = listen_multiaddr; log_address.push(Protocol::P2p(local_peer_id.clone().into())); - info!(log, "Listening on: {}", log_address); + info!(log, "Listening established"; "Address" => format!("{}", log_address)); } Err(err) => { crit!( @@ -83,20 +82,34 @@ impl Service { // attempt to connect to user-input libp2p nodes for multiaddr in config.libp2p_nodes { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), + Ok(()) => debug!(log, "Dialing libp2p peer"; "Address" => format!("{}", multiaddr)), Err(err) => debug!( log, - "Could not connect to node: {} error: {:?}", multiaddr, err + "Could not connect to peer"; "Address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) ), }; } // subscribe to default gossipsub topics let mut topics = vec![]; - //TODO: Handle multiple shard attestations. For now we simply use a separate topic for - // attestations - topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); - topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); + + /* Here we subscribe to all the required gossipsub topics required for interop. + * The topic builder adds the required prefix and postfix to the hardcoded topics that we + * must subscribe to. + */ + let topic_builder = |topic| { + Topic::new(format!( + "/{}/{}/{}", + TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX, + )) + }; + topics.push(topic_builder(BEACON_BLOCK_TOPIC)); + topics.push(topic_builder(BEACON_ATTESTATION_TOPIC)); + topics.push(topic_builder(VOLUNTARY_EXIT_TOPIC)); + topics.push(topic_builder(PROPOSER_SLASHING_TOPIC)); + topics.push(topic_builder(ATTESTER_SLASHING_TOPIC)); + + // Add any topics specified by the user topics.append( &mut config .topics @@ -109,13 +122,13 @@ impl Service { let mut subscribed_topics = vec![]; for topic in topics { if swarm.subscribe(topic.clone()) { - trace!(log, "Subscribed to topic: {:?}", topic); + trace!(log, "Subscribed to topic"; "Topic" => format!("{}", topic)); subscribed_topics.push(topic); } else { - warn!(log, "Could not subscribe to topic: {:?}", topic) + warn!(log, "Could not subscribe to topic"; "Topic" => format!("{}", topic)); } } - info!(log, "Subscribed to topics: {:?}", subscribed_topics); + info!(log, "Subscribed to topics"; "Topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::>())); Ok(Service { local_peer_id, @@ -140,7 +153,7 @@ impl Stream for Service { topics, message, } => { - trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); + trace!(self.log, "Gossipsub message received"; "service" => "Swarm"); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { source, topics, diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 72a507ad7..c14fc970d 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,8 +1,7 @@ use crate::error; -use crate::service::{NetworkMessage, OutgoingMessage}; +use crate::service::NetworkMessage; use crate::sync::SimpleSync; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; use eth2_libp2p::{ behaviour::PubsubMessage, rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId}, @@ -10,11 +9,11 @@ use eth2_libp2p::{ }; use futures::future::Future; use futures::stream::Stream; -use slog::{debug, warn}; +use slog::{debug, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{Attestation, BeaconBlock, BeaconBlockHeader}; +use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, VoluntaryExit}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -22,8 +21,6 @@ pub struct MessageHandler { _chain: Arc>, /// The syncing framework. sync: SimpleSync, - /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } @@ -49,23 +46,20 @@ impl MessageHandler { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { - debug!(log, "Service starting"); + trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // Initialise sync and begin processing in thread - // generate the Message handler - let sync = SimpleSync::new(beacon_chain.clone(), &log); + let sync = SimpleSync::new(beacon_chain.clone(), network_send, &log); + // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, - network_context: NetworkContext::new(network_send, log.clone()), log: log.clone(), }; - // spawn handler task - // TODO: Handle manual termination of thread + // spawn handler task and move the message handler instance into the spawned thread executor.spawn( handler_recv .for_each(move |msg| Ok(handler.handle_message(msg))) @@ -82,17 +76,17 @@ impl MessageHandler { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { - self.sync.on_connect(peer_id, &mut self.network_context); + self.sync.on_connect(peer_id); } // A peer has disconnected HandlerMessage::PeerDisconnected(peer_id) => { self.sync.on_disconnect(peer_id); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::RPC(peer_id, rpc_event) => { self.handle_rpc_message(peer_id, rpc_event); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::PubsubMessage(peer_id, gossip) => { self.handle_gossip(peer_id, gossip); } @@ -105,7 +99,7 @@ impl MessageHandler { fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { match rpc_message { RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req), - RPCEvent::Response(_id, resp) => self.handle_rpc_response(peer_id, resp), + RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp), RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error), } } @@ -113,106 +107,81 @@ impl MessageHandler { /// A new RPC request has been received from the network. fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { match request { - RPCRequest::Hello(hello_message) => self.sync.on_hello_request( - peer_id, - request_id, - hello_message, - &mut self.network_context, - ), - RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason), - RPCRequest::BeaconBlockRoots(request) => self.sync.on_beacon_block_roots_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::BeaconBlockHeaders(request) => self.sync.on_beacon_block_headers_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::BeaconBlockBodies(request) => self.sync.on_beacon_block_bodies_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - warn!(self.log, "BeaconChainState RPC call is not supported."); + RPCRequest::Hello(hello_message) => { + self.sync + .on_hello_request(peer_id, request_id, hello_message) } + RPCRequest::Goodbye(goodbye_reason) => { + debug!( + self.log, "PeerGoodbye"; + "peer" => format!("{:?}", peer_id), + "reason" => format!("{:?}", goodbye_reason), + ); + self.sync.on_disconnect(peer_id); + } + RPCRequest::BeaconBlocks(request) => self + .sync + .on_beacon_blocks_request(peer_id, request_id, request), + RPCRequest::RecentBeaconBlocks(request) => self + .sync + .on_recent_beacon_blocks_request(peer_id, request_id, request), } } /// An RPC response has been received from the network. // we match on id and ignore responses past the timeout. - fn handle_rpc_response(&mut self, peer_id: PeerId, error_response: RPCErrorResponse) { + fn handle_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + error_response: RPCErrorResponse, + ) { // an error could have occurred. - // TODO: Handle Error gracefully match error_response { RPCErrorResponse::InvalidRequest(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Invalid Request" => error.as_string()) + warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::ServerError(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Server Error" => error.as_string()) + warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Unknown(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Unknown Error" => error.as_string()) + warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Success(response) => { match response { RPCResponse::Hello(hello_message) => { - self.sync.on_hello_response( - peer_id, - hello_message, - &mut self.network_context, - ); + self.sync.on_hello_response(peer_id, hello_message); } - RPCResponse::BeaconBlockRoots(response) => { - self.sync.on_beacon_block_roots_response( - peer_id, - response, - &mut self.network_context, - ); - } - RPCResponse::BeaconBlockHeaders(response) => { - match self.decode_block_headers(response) { - Ok(decoded_block_headers) => { - self.sync.on_beacon_block_headers_response( + RPCResponse::BeaconBlocks(response) => { + match self.decode_beacon_blocks(&response) { + Ok(beacon_blocks) => { + self.sync.on_beacon_blocks_response( peer_id, - decoded_block_headers, - &mut self.network_context, + request_id, + beacon_blocks, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block headers";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconBlockBodies(response) => { - match self.decode_block_bodies(response) { - Ok(decoded_block_bodies) => { - self.sync.on_beacon_block_bodies_response( + RPCResponse::RecentBeaconBlocks(response) => { + match self.decode_beacon_blocks(&response) { + Ok(beacon_blocks) => { + self.sync.on_recent_beacon_blocks_response( peer_id, - decoded_block_bodies, - &mut self.network_context, + request_id, + beacon_blocks, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block bodies";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - // - // Theoretically, we shouldn't reach this code because we should never send a - // beacon state RPC request. - warn!(self.log, "BeaconChainState RPC call is not supported."); - } } } } @@ -221,43 +190,74 @@ impl MessageHandler { /// Handle various RPC errors fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { //TODO: Handle error correctly - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "request_id" => format!("{}", request_id), "Error" => format!("{:?}", error)); } /// Handle RPC messages fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { - Err(e) => { - debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } Ok(block) => { - let _should_forward_on = - self.sync - .on_block_gossip(peer_id, block, &mut self.network_context); + let _should_forward_on = self.sync.on_block_gossip(peer_id, block); + } + Err(e) => { + debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { + Ok(attestation) => self.sync.on_attestation_gossip(peer_id, attestation), Err(e) => { - debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } - Ok(attestation) => { - self.sync - .on_attestation_gossip(peer_id, attestation, &mut self.network_context) + debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, + PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) { + Ok(_exit) => { + // TODO: Handle exits + debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped exit"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + }, + PubsubMessage::ProposerSlashing(message) => { + match self.decode_gossip_proposer_slashing(message) { + Ok(_slashing) => { + // TODO: Handle proposer slashings + debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped proposer slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } + PubsubMessage::AttesterSlashing(message) => { + match self.decode_gossip_attestation_slashing(message) { + Ok(_slashing) => { + // TODO: Handle attester slashings + debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped attester slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } PubsubMessage::Unknown(message) => { // Received a message from an unknown topic. Ignore for now - debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); + debug!(self.log, "Unknown Gossip Message"; "peer_id" => format!("{}", peer_id), "Message" => format!("{:?}", message)); } } } - /* Decoding of blocks and attestations from the network. + /* Decoding of gossipsub objects from the network. + * + * The decoding is done in the message handler as it has access to to a `BeaconChain` and can + * therefore apply more efficient logic in decoding and verification. * * TODO: Apply efficient decoding/verification of these objects */ + /* Gossipsub Domain Decoding */ + // Note: These are not generics as type-specific verification will need to be applied. fn decode_gossip_block( &self, beacon_block: Vec, @@ -274,80 +274,39 @@ impl MessageHandler { Attestation::from_ssz_bytes(&beacon_block) } - /// Verifies and decodes the ssz-encoded block bodies received from peers. - fn decode_block_bodies( + fn decode_gossip_exit(&self, voluntary_exit: Vec) -> Result { + //TODO: Apply verification before decoding. + VoluntaryExit::from_ssz_bytes(&voluntary_exit) + } + + fn decode_gossip_proposer_slashing( &self, - bodies_response: BeaconBlockBodiesResponse, - ) -> Result, DecodeError> { + proposer_slashing: Vec, + ) -> Result { + //TODO: Apply verification before decoding. + ProposerSlashing::from_ssz_bytes(&proposer_slashing) + } + + fn decode_gossip_attestation_slashing( + &self, + attester_slashing: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + AttesterSlashing::from_ssz_bytes(&attester_slashing) + } + + /* Req/Resp Domain Decoding */ + + /// Verifies and decodes an ssz-encoded list of `BeaconBlock`s. This list may contain empty + /// entries encoded with an SSZ NULL. + fn decode_beacon_blocks( + &self, + beacon_blocks: &[u8], + ) -> Result>, DecodeError> { + if beacon_blocks.is_empty() { + return Ok(Vec::new()); + } //TODO: Implement faster block verification before decoding entirely - let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?; - Ok(DecodedBeaconBlockBodiesResponse { - block_roots: bodies_response - .block_roots - .expect("Responses must have associated roots"), - block_bodies, - }) - } - - /// Verifies and decodes the ssz-encoded block headers received from peers. - fn decode_block_headers( - &self, - headers_response: BeaconBlockHeadersResponse, - ) -> Result, DecodeError> { - //TODO: Implement faster header verification before decoding entirely - Vec::from_ssz_bytes(&headers_response.headers) - } -} - -// TODO: RPC Rewrite makes this struct fairly pointless -pub struct NetworkContext { - /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender, - /// The `MessageHandler` logger. - log: slog::Logger, -} - -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { - Self { network_send, log } - } - - pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) - // TODO: disconnect peers. - } - - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { - // Note: There is currently no use of keeping track of requests. However the functionality - // is left here for future revisions. - self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); - } - - //TODO: Handle Error responses - pub fn send_rpc_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - rpc_response: RPCResponse, - ) { - self.send_rpc_event( - peer_id, - RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), - ); - } - - fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.send(peer_id, OutgoingMessage::RPC(rpc_event)) - } - - fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { - self.network_send - .try_send(NetworkMessage::Send(peer_id, outgoing_message)) - .unwrap_or_else(|_| { - warn!( - self.log, - "Could not send RPC message to the network service" - ) - }); + Vec::from_ssz_bytes(&beacon_blocks) } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 152f4dc77..a8b3c74b6 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -21,8 +21,7 @@ pub struct Service { libp2p_port: u16, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, - _phantom: PhantomData, //message_handler: MessageHandler, - //message_handler_send: Sender + _phantom: PhantomData, } impl Service { @@ -43,17 +42,19 @@ impl Service { message_handler_log, )?; + let network_log = log.new(o!("Service" => "Network")); // launch libp2p service - let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = Arc::new(Mutex::new(LibP2PService::new(config.clone(), libp2p_log)?)); + let libp2p_service = Arc::new(Mutex::new(LibP2PService::new( + config.clone(), + network_log.clone(), + )?)); - // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. let libp2p_exit = spawn_service( libp2p_service.clone(), network_recv, message_handler_send, executor, - log, + network_log, )?; let network_service = Service { libp2p_service, @@ -166,7 +167,7 @@ fn network_service( }, NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.lock().swarm.publish(topics, message); + libp2p_service.lock().swarm.publish(&topics, message); } }, Ok(Async::NotReady) => break, @@ -190,13 +191,13 @@ fn network_service( .map_err(|_| "Failed to send RPC to handler")?; } Libp2pEvent::PeerDialed(peer_id) => { - debug!(log, "Peer Dialed: {:?}", peer_id); + debug!(log, "Peer Dialed"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDialed(peer_id)) .map_err(|_| "Failed to send PeerDialed to handler")?; } Libp2pEvent::PeerDisconnected(peer_id) => { - debug!(log, "Peer Disconnected: {:?}", peer_id); + debug!(log, "Peer Disconnected"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDisconnected(peer_id)) .map_err(|_| "Failed to send PeerDisconnected to handler")?; diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs deleted file mode 100644 index 5503ed64f..000000000 --- a/beacon_node/network/src/sync/import_queue.rs +++ /dev/null @@ -1,307 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::PeerId; -use slog::error; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use tree_hash::TreeHash; -use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot}; - -/// Provides a queue for fully and partially built `BeaconBlock`s. -/// -/// The queue is fundamentally a `Vec` where no two items have the same -/// `item.block_root`. This struct it backed by a `Vec` not a `HashMap` for the following two -/// reasons: -/// -/// - When we receive a `BeaconBlockBody`, the only way we can find it's matching -/// `BeaconBlockHeader` is to find a header such that `header.beacon_block_body == -/// tree_hash_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of -/// `BeaconBlockBody` as the key. -/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore -/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`. -pub struct ImportQueue { - pub chain: Arc>, - /// Partially imported blocks, keyed by the root of `BeaconBlockBody`. - partials: HashMap>, - /// Time before a queue entry is considered state. - pub stale_time: Duration, - /// Logging - log: slog::Logger, -} - -impl ImportQueue { - /// Return a new, empty queue. - pub fn new(chain: Arc>, stale_time: Duration, log: slog::Logger) -> Self { - Self { - chain, - partials: HashMap::new(), - stale_time, - log, - } - } - - /// Returns true of the if the `BlockRoot` is found in the `import_queue`. - pub fn contains_block_root(&self, block_root: Hash256) -> bool { - self.partials.contains_key(&block_root) - } - - /// Attempts to complete the `BlockRoot` if it is found in the `import_queue`. - /// - /// Returns an Enum with a `PartialBeaconBlockCompletion`. - /// Does not remove the `block_root` from the `import_queue`. - pub fn attempt_complete_block( - &self, - block_root: Hash256, - ) -> PartialBeaconBlockCompletion { - if let Some(partial) = self.partials.get(&block_root) { - partial.attempt_complete() - } else { - PartialBeaconBlockCompletion::MissingRoot - } - } - - /// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial - /// if it exists. - pub fn remove(&mut self, block_root: Hash256) -> Option> { - self.partials.remove(&block_root) - } - - /// Flushes all stale entries from the queue. - /// - /// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the - /// past. - pub fn remove_stale(&mut self) { - let stale_time = self.stale_time; - - self.partials - .retain(|_, partial| partial.inserted + stale_time > Instant::now()) - } - - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_not_seen_block(&self, block_root: &Hash256) -> bool { - self.chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - true - }) - } - - /// Adds the `block_roots` to the partials queue. - /// - /// If a `block_root` is not in the queue and has not been processed by the chain it is added - /// to the queue and it's block root is included in the output. - pub fn enqueue_block_roots( - &mut self, - block_roots: &[BlockRootSlot], - sender: PeerId, - ) -> Vec { - // TODO: This will currently not return a `BlockRootSlot` if this root exists but there is no header. - // It would be more robust if it did. - let new_block_root_slots: Vec = block_roots - .iter() - // Ignore any roots already stored in the queue. - .filter(|brs| !self.contains_block_root(brs.block_root)) - // Ignore any roots already processed by the chain. - .filter(|brs| self.chain_has_not_seen_block(&brs.block_root)) - .cloned() - .collect(); - - self.partials.extend( - new_block_root_slots - .iter() - .map(|brs| PartialBeaconBlock { - slot: brs.slot, - block_root: brs.block_root, - sender: sender.clone(), - header: None, - body: None, - inserted: Instant::now(), - }) - .map(|partial| (partial.block_root, partial)), - ); - - new_block_root_slots - } - - /// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for - /// which we should use to request `BeaconBlockBodies`. - /// - /// If a `header` is not in the queue and has not been processed by the chain it is added to - /// the queue and it's block root is included in the output. - /// - /// If a `header` is already in the queue, but not yet processed by the chain the block root is - /// not included in the output and the `inserted` time for the partial record is set to - /// `Instant::now()`. Updating the `inserted` time stops the partial from becoming stale. - pub fn enqueue_headers( - &mut self, - headers: Vec, - sender: PeerId, - ) -> Vec { - let mut required_bodies: Vec = vec![]; - - for header in headers { - let block_root = Hash256::from_slice(&header.canonical_root()[..]); - - if self.chain_has_not_seen_block(&block_root) - && !self.insert_header(block_root, header, sender.clone()) - { - // If a body is empty - required_bodies.push(block_root); - } - } - - required_bodies - } - - /// If there is a matching `header` for this `body`, adds it to the queue. - /// - /// If there is no `header` for the `body`, the body is simply discarded. - pub fn enqueue_bodies( - &mut self, - bodies: Vec>, - sender: PeerId, - ) -> Option { - let mut last_block_hash = None; - for body in bodies { - last_block_hash = self.insert_body(body, sender.clone()); - } - - last_block_hash - } - - pub fn enqueue_full_blocks(&mut self, blocks: Vec>, sender: PeerId) { - for block in blocks { - self.insert_full_block(block, sender.clone()); - } - } - - /// Inserts a header to the queue. - /// - /// If the header already exists, the `inserted` time is set to `now` and not other - /// modifications are made. - /// Returns true is `body` exists. - fn insert_header( - &mut self, - block_root: Hash256, - header: BeaconBlockHeader, - sender: PeerId, - ) -> bool { - let mut exists = false; - self.partials - .entry(block_root) - .and_modify(|partial| { - partial.header = Some(header.clone()); - partial.inserted = Instant::now(); - if partial.body.is_some() { - exists = true; - } - }) - .or_insert_with(|| PartialBeaconBlock { - slot: header.slot, - block_root, - header: Some(header), - body: None, - inserted: Instant::now(), - sender, - }); - exists - } - - /// Updates an existing partial with the `body`. - /// - /// If the body already existed, the `inserted` time is set to `now`. - /// - /// Returns the block hash of the inserted body - fn insert_body( - &mut self, - body: BeaconBlockBody, - sender: PeerId, - ) -> Option { - let body_root = Hash256::from_slice(&body.tree_hash_root()[..]); - let mut last_root = None; - - self.partials.iter_mut().for_each(|(root, mut p)| { - if let Some(header) = &mut p.header { - if body_root == header.body_root { - p.inserted = Instant::now(); - p.body = Some(body.clone()); - p.sender = sender.clone(); - last_root = Some(*root); - } - } - }); - - last_root - } - - /// Updates an existing `partial` with the completed block, or adds a new (complete) partial. - /// - /// If the partial already existed, the `inserted` time is set to `now`. - fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { - let block_root = Hash256::from_slice(&block.canonical_root()[..]); - - let partial = PartialBeaconBlock { - slot: block.slot, - block_root, - header: Some(block.block_header()), - body: Some(block.body), - inserted: Instant::now(), - sender, - }; - - self.partials - .entry(block_root) - .and_modify(|existing_partial| *existing_partial = partial.clone()) - .or_insert(partial); - } -} - -/// Individual components of a `BeaconBlock`, potentially all that are required to form a full -/// `BeaconBlock`. -#[derive(Clone, Debug)] -pub struct PartialBeaconBlock { - pub slot: Slot, - /// `BeaconBlock` root. - pub block_root: Hash256, - pub header: Option, - pub body: Option>, - /// The instant at which this record was created or last meaningfully modified. Used to - /// determine if an entry is stale and should be removed. - pub inserted: Instant, - /// The `PeerId` that last meaningfully contributed to this item. - pub sender: PeerId, -} - -impl PartialBeaconBlock { - /// Attempts to build a block. - /// - /// Does not comsume the `PartialBeaconBlock`. - pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion { - if self.header.is_none() { - PartialBeaconBlockCompletion::MissingHeader(self.slot) - } else if self.body.is_none() { - PartialBeaconBlockCompletion::MissingBody - } else { - PartialBeaconBlockCompletion::Complete( - self.header - .clone() - .unwrap() - .into_block(self.body.clone().unwrap()), - ) - } - } -} - -/// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`. -pub enum PartialBeaconBlockCompletion { - /// The partial contains a valid BeaconBlock. - Complete(BeaconBlock), - /// The partial does not exist. - MissingRoot, - /// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`. - MissingHeader(Slot), - /// The partial contains a `BeaconBlockRoot` and `BeaconBlockHeader` but no `BeaconBlockBody`. - MissingBody, -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs new file mode 100644 index 000000000..b81da0991 --- /dev/null +++ b/beacon_node/network/src/sync/manager.rs @@ -0,0 +1,725 @@ +use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::RequestId; +use eth2_libp2p::PeerId; +use slog::{debug, info, trace, warn, Logger}; +use std::collections::{HashMap, HashSet}; +use std::ops::{Add, Sub}; +use std::sync::Arc; +use types::{BeaconBlock, EthSpec, Hash256, Slot}; + +const MAX_BLOCKS_PER_REQUEST: u64 = 10; + +/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. +const SLOT_IMPORT_TOLERANCE: usize = 10; +const PARENT_FAIL_TOLERANCE: usize = 3; +const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; + +#[derive(PartialEq)] +enum BlockRequestsState { + Queued, + Pending(RequestId), + Complete, + Failed, +} + +struct BlockRequests { + target_head_slot: Slot, + target_head_root: Hash256, + downloaded_blocks: Vec>, + state: BlockRequestsState, + /// Specifies whether the current state is syncing forwards or backwards. + forward_sync: bool, + /// The current `start_slot` of the batched block request. + current_start_slot: Slot, +} + +struct ParentRequests { + downloaded_blocks: Vec>, + failed_attempts: usize, + last_submitted_peer: PeerId, // to downvote the submitting peer. + state: BlockRequestsState, +} + +impl BlockRequests { + // gets the start slot for next batch + // last block slot downloaded plus 1 + fn update_start_slot(&mut self) { + if self.forward_sync { + self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); + } else { + self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); + } + self.state = BlockRequestsState::Queued; + } +} + +#[derive(PartialEq, Debug, Clone)] +enum ManagerState { + Syncing, + Regular, + Stalled, +} + +pub(crate) enum ImportManagerOutcome { + Idle, + RequestBlocks { + peer_id: PeerId, + request_id: RequestId, + request: BeaconBlocksRequest, + }, + /// Updates information with peer via requesting another HELLO handshake. + Hello(PeerId), + RecentRequest(PeerId, RecentBeaconBlocksRequest), + DownvotePeer(PeerId), +} + +pub struct ImportManager { + /// A reference to the underlying beacon chain. + chain: Arc>, + state: ManagerState, + import_queue: HashMap>, + parent_queue: Vec>, + full_peers: HashSet, + current_req_id: usize, + log: Logger, +} + +impl ImportManager { + pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + ImportManager { + chain: beacon_chain.clone(), + state: ManagerState::Regular, + import_queue: HashMap::new(), + parent_queue: Vec::new(), + full_peers: HashSet::new(), + current_req_id: 0, + log: log.clone(), + } + } + + pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { + // TODO: Improve comments. + // initially try to download blocks from our current head + // then backwards search all the way back to our finalized epoch until we match on a chain + // has to be done sequentially to find next slot to start the batch from + + let local = PeerSyncInfo::from(&self.chain); + + // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync + if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { + trace!(self.log, "Ignoring full sync with peer"; + "peer" => format!("{:?}", peer_id), + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local.head_slot, + ); + // remove the peer from the queue if it exists + self.import_queue.remove(&peer_id); + return; + } + + if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { + // update the target head slot + if remote.head_slot > block_requests.target_head_slot { + block_requests.target_head_slot = remote.head_slot; + } + } else { + let block_requests = BlockRequests { + target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called + target_head_root: remote.head_root, + downloaded_blocks: Vec::new(), + state: BlockRequestsState::Queued, + forward_sync: true, + current_start_slot: self.chain.best_slot(), + }; + self.import_queue.insert(peer_id, block_requests); + } + } + + pub fn beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + mut blocks: Vec>, + ) { + // find the request + let block_requests = match self + .import_queue + .get_mut(&peer_id) + .filter(|r| r.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, + _ => { + // No pending request, invalid request_id or coding error + warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id); + return; + } + }; + + // If we are syncing up to a target head block, at least the target head block should be + // returned. If we are syncing back to our last finalized block the request should return + // at least the last block we received (last known block). In diagram form: + // + // unknown blocks requested blocks downloaded blocks + // |-------------------|------------------------|------------------------| + // ^finalized slot ^ requested start slot ^ last known block ^ remote head + + if blocks.is_empty() { + debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); + block_requests.update_start_slot(); + return; + } + + // verify the range of received blocks + // Note that the order of blocks is verified in block processing + let last_sent_slot = blocks[blocks.len() - 1].slot; + if block_requests.current_start_slot > blocks[0].slot + || block_requests + .current_start_slot + .add(MAX_BLOCKS_PER_REQUEST) + < last_sent_slot + { + //TODO: Downvote peer - add a reason to failed + dbg!(&blocks); + warn!(self.log, "BeaconBlocks response returned out of range blocks"; + "request_id" => request_id, + "response_initial_slot" => blocks[0].slot, + "requested_initial_slot" => block_requests.current_start_slot); + // consider this sync failed + block_requests.state = BlockRequestsState::Failed; + return; + } + + // Determine if more blocks need to be downloaded. There are a few cases: + // - We have downloaded a batch from our head_slot, which has not reached the remotes head + // (target head). Therefore we need to download another sequential batch. + // - The latest batch includes blocks that greater than or equal to the target_head slot, + // which means we have caught up to their head. We then check to see if the first + // block downloaded matches our head. If so, we are on the same chain and can process + // the blocks. If not we need to sync back further until we are on the same chain. So + // request more blocks. + // - We are syncing backwards (from our head slot) and need to check if we are on the same + // chain. If so, process the blocks, if not, request more blocks all the way up to + // our last finalized slot. + + if block_requests.forward_sync { + // append blocks if syncing forward + block_requests.downloaded_blocks.append(&mut blocks); + } else { + // prepend blocks if syncing backwards + block_requests.downloaded_blocks.splice(..0, blocks); + } + + // does the batch contain the target_head_slot + let last_element_index = block_requests.downloaded_blocks.len() - 1; + if block_requests.downloaded_blocks[last_element_index].slot + >= block_requests.target_head_slot + || !block_requests.forward_sync + { + // if the batch is on our chain, this is complete and we can then process. + // Otherwise start backwards syncing until we reach a common chain. + let earliest_slot = block_requests.downloaded_blocks[0].slot; + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == root_at_slot(&self.chain, earliest_slot) + { + block_requests.state = BlockRequestsState::Complete; + return; + } + + // not on the same chain, request blocks backwards + let state = &self.chain.head().beacon_state; + let local_finalized_slot = state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.current_start_slot { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = BlockRequestsState::Failed; + return; + } + + // if this is a forward sync, then we have reached the head without a common chain + // and we need to start syncing backwards. + if block_requests.forward_sync { + // Start a backwards sync by requesting earlier blocks + block_requests.forward_sync = false; + block_requests.current_start_slot = std::cmp::min( + self.chain.best_slot(), + block_requests.downloaded_blocks[0].slot, + ); + } + } + + // update the start slot and re-queue the batch + block_requests.update_start_slot(); + } + + pub fn recent_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blocks: Vec>, + ) { + // find the request + let parent_request = match self + .parent_queue + .iter_mut() + .find(|request| request.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, + None => { + // No pending request, invalid request_id or coding error + warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id); + return; + } + }; + + // if an empty response is given, the peer didn't have the requested block, try again + if blocks.is_empty() { + parent_request.failed_attempts += 1; + parent_request.state = BlockRequestsState::Queued; + parent_request.last_submitted_peer = peer_id; + return; + } + + // currently only support a single block lookup. Reject any response that has more than 1 + // block + if blocks.len() != 1 { + //TODO: Potentially downvote the peer + debug!(self.log, "Peer sent more than 1 parent. Ignoring"; + "peer_id" => format!("{:?}", peer_id), + "no_parents" => blocks.len() + ); + return; + } + + // queue for processing + parent_request.state = BlockRequestsState::Complete; + } + + pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { + //TODO: Remove block state from pending + } + + pub fn peer_disconnect(&mut self, peer_id: &PeerId) { + self.import_queue.remove(peer_id); + self.full_peers.remove(peer_id); + self.update_state(); + } + + pub fn add_full_peer(&mut self, peer_id: PeerId) { + debug!( + self.log, "Fully synced peer added"; + "peer" => format!("{:?}", peer_id), + ); + self.full_peers.insert(peer_id); + self.update_state(); + } + + pub fn add_unknown_block(&mut self, block: BeaconBlock, peer_id: PeerId) { + // if we are not in regular sync mode, ignore this block + if let ManagerState::Regular = self.state { + return; + } + + // make sure this block is not already being searched for + // TODO: Potentially store a hashset of blocks for O(1) lookups + for parent_req in self.parent_queue.iter() { + if let Some(_) = parent_req + .downloaded_blocks + .iter() + .find(|d_block| d_block == &&block) + { + // we are already searching for this block, ignore it + return; + } + } + + let req = ParentRequests { + downloaded_blocks: vec![block], + failed_attempts: 0, + last_submitted_peer: peer_id, + state: BlockRequestsState::Queued, + }; + + self.parent_queue.push(req); + } + + pub(crate) fn poll(&mut self) -> ImportManagerOutcome { + loop { + // update the state of the manager + self.update_state(); + + // process potential block requests + if let Some(outcome) = self.process_potential_block_requests() { + return outcome; + } + + // process any complete long-range batches + if let Some(outcome) = self.process_complete_batches() { + return outcome; + } + + // process any parent block lookup-requests + if let Some(outcome) = self.process_parent_requests() { + return outcome; + } + + // process any complete parent lookups + let (re_run, outcome) = self.process_complete_parent_requests(); + if let Some(outcome) = outcome { + return outcome; + } else if !re_run { + break; + } + } + + return ImportManagerOutcome::Idle; + } + + fn update_state(&mut self) { + let previous_state = self.state.clone(); + self.state = { + if !self.import_queue.is_empty() { + ManagerState::Syncing + } else if !self.full_peers.is_empty() { + ManagerState::Regular + } else { + ManagerState::Stalled + } + }; + if self.state != previous_state { + info!(self.log, "Syncing state updated"; + "old_state" => format!("{:?}", previous_state), + "new_state" => format!("{:?}", self.state), + ); + } + } + + fn process_potential_block_requests(&mut self) -> Option { + // check if an outbound request is required + // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p + // layer and not needed here. + // If any in queued state we submit a request. + + // remove any failed batches + let debug_log = &self.log; + self.import_queue.retain(|peer_id, block_request| { + if let BlockRequestsState::Failed = block_request.state { + debug!(debug_log, "Block import from peer failed"; + "peer_id" => format!("{:?}", peer_id), + "downloaded_blocks" => block_request.downloaded_blocks.len() + ); + false + } else { + true + } + }); + + // process queued block requests + for (peer_id, block_requests) in self + .import_queue + .iter_mut() + .find(|(_peer_id, req)| req.state == BlockRequestsState::Queued) + { + let request_id = self.current_req_id; + block_requests.state = BlockRequestsState::Pending(request_id); + self.current_req_id += 1; + + let request = BeaconBlocksRequest { + head_block_root: block_requests.target_head_root, + start_slot: block_requests.current_start_slot.as_u64(), + count: MAX_BLOCKS_PER_REQUEST, + step: 0, + }; + return Some(ImportManagerOutcome::RequestBlocks { + peer_id: peer_id.clone(), + request, + request_id, + }); + } + + None + } + + fn process_complete_batches(&mut self) -> Option { + let completed_batches = self + .import_queue + .iter() + .filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete) + .map(|(peer, _)| peer) + .cloned() + .collect::>(); + for peer_id in completed_batches { + let block_requests = self.import_queue.remove(&peer_id).expect("key exists"); + match self.process_blocks(block_requests.downloaded_blocks.clone()) { + Ok(()) => { + //TODO: Verify it's impossible to have empty downloaded_blocks + let last_element = block_requests.downloaded_blocks.len() - 1; + debug!(self.log, "Blocks processed successfully"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + ); + // Re-HELLO to ensure we are up to the latest head + return Some(ImportManagerOutcome::Hello(peer_id)); + } + Err(e) => { + let last_element = block_requests.downloaded_blocks.len() - 1; + warn!(self.log, "Block processing failed"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + } + } + } + None + } + + fn process_parent_requests(&mut self) -> Option { + // remove any failed requests + let debug_log = &self.log; + self.parent_queue.retain(|parent_request| { + if parent_request.state == BlockRequestsState::Failed { + debug!(debug_log, "Parent import failed"; + "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), + "ancestors_found" => parent_request.downloaded_blocks.len() + ); + false + } else { + true + } + }); + + // check to make sure there are peers to search for the parent from + if self.full_peers.is_empty() { + return None; + } + + // check if parents need to be searched for + for parent_request in self.parent_queue.iter_mut() { + if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { + parent_request.state = BlockRequestsState::Failed; + continue; + } else if parent_request.state == BlockRequestsState::Queued { + // check the depth isn't too large + if parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + parent_request.state = BlockRequestsState::Failed; + continue; + } + + parent_request.state = BlockRequestsState::Pending(self.current_req_id); + self.current_req_id += 1; + let last_element_index = parent_request.downloaded_blocks.len() - 1; + let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root; + let req = RecentBeaconBlocksRequest { + block_roots: vec![parent_hash], + }; + + // select a random fully synced peer to attempt to download the parent block + let peer_id = self.full_peers.iter().next().expect("List is not empty"); + + return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); + } + } + + None + } + + fn process_complete_parent_requests(&mut self) -> (bool, Option) { + // flag to determine if there is more process to drive or if the manager can be switched to + // an idle state + let mut re_run = false; + + // Find any parent_requests ready to be processed + for completed_request in self + .parent_queue + .iter_mut() + .filter(|req| req.state == BlockRequestsState::Complete) + { + // verify the last added block is the parent of the last requested block + let last_index = completed_request.downloaded_blocks.len() - 1; + let expected_hash = completed_request.downloaded_blocks[last_index].parent_root; + // Note: the length must be greater than 1 so this cannot panic. + let block_hash = completed_request.downloaded_blocks[last_index - 1].canonical_root(); + if block_hash != expected_hash { + // remove the head block + let _ = completed_request.downloaded_blocks.pop(); + completed_request.state = BlockRequestsState::Queued; + //TODO: Potentially downvote the peer + let peer = completed_request.last_submitted_peer.clone(); + debug!(self.log, "Peer sent invalid parent. Ignoring"; + "peer_id" => format!("{:?}",peer), + "received_block" => format!("{}", block_hash), + "expected_parent" => format!("{}", expected_hash), + ); + return (true, Some(ImportManagerOutcome::DownvotePeer(peer))); + } + + // try and process the list of blocks up to the requested block + while !completed_request.downloaded_blocks.is_empty() { + let block = completed_request + .downloaded_blocks + .pop() + .expect("Block must exist exist"); + match self.chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { + // need to keep looking for parents + completed_request.downloaded_blocks.push(block); + completed_request.state = BlockRequestsState::Queued; + re_run = true; + break; + } + Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} + Ok(outcome) => { + // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts += 1; + trace!( + self.log, "Invalid parent block"; + "outcome" => format!("{:?}", outcome), + "peer" => format!("{:?}", completed_request.last_submitted_peer), + ); + completed_request.state = BlockRequestsState::Queued; + re_run = true; + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); + } + Err(e) => { + completed_request.failed_attempts += 1; + warn!( + self.log, "Parent processing error"; + "error" => format!("{:?}", e) + ); + completed_request.state = BlockRequestsState::Queued; + re_run = true; + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); + } + } + } + } + + // remove any full completed and processed parent chains + self.parent_queue.retain(|req| { + if req.state == BlockRequestsState::Complete { + false + } else { + true + } + }); + (re_run, None) + } + + fn process_blocks(&mut self, blocks: Vec>) -> Result<(), String> { + for block in blocks { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. + trace!( + self.log, "Imported block from network"; + "slot" => block.slot, + "block_root" => format!("{}", block_root), + ); + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + self.log, "ParentBlockUnknown"; + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!( + "Block at slot {} has an unknown parent.", + block.slot + )); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + trace!( + self.log, "FutureBlock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + return Err(format!( + "Block at slot {} is too far in the future", + block.slot + )); + } else { + // The block is in the future, but not too far. + trace!( + self.log, "QueuedFutureBlock"; + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } + } + BlockProcessingOutcome::FinalizedSlot => { + trace!( + self.log, "Finalized or earlier block processed"; + "outcome" => format!("{:?}", outcome), + ); + // block reached our finalized slot or was earlier, move to the next block + } + _ => { + trace!( + self.log, "InvalidBlock"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } + } + } else { + trace!( + self.log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!( + "Unexpected block processing error: {:?}", + processing_result + )); + } + } + Ok(()) + } +} + +fn root_at_slot( + chain: &Arc>, + target_slot: Slot, +) -> Option { + chain + .rev_iter_block_roots() + .find(|(_root, slot)| *slot == target_slot) + .map(|(root, _slot)| root) +} diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index fac1b46eb..b26d78c14 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -1,4 +1,4 @@ -mod import_queue; +mod manager; /// Syncing for lighthouse. /// /// Stores the various syncing methods for the beacon chain. diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 3cf43b7a0..573ac9dd1 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,52 +1,44 @@ -use super::import_queue::{ImportQueue, PartialBeaconBlockCompletion}; -use crate::message_handler::NetworkContext; +use super::manager::{ImportManager, ImportManagerOutcome}; +use crate::service::{NetworkMessage, OutgoingMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; +use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; -use slog::{debug, error, info, o, trace, warn}; +use slog::{debug, info, o, trace, warn}; use ssz::Encode; -use std::collections::HashMap; +use std::ops::Sub; use std::sync::Arc; -use std::time::Duration; use store::Store; -use types::{ - Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, -}; - -/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. -const SLOT_IMPORT_TOLERANCE: u64 = 100; - -/// The amount of seconds a block (or partial block) may exist in the import queue. -const QUEUE_STALE_SECS: u64 = 100; +use tokio::sync::mpsc; +use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. -const FUTURE_SLOT_TOLERANCE: u64 = 1; +pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; +/// The number of slots behind our head that we still treat a peer as a fully synced peer. +const FULL_PEER_TOLERANCE: u64 = 10; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { - network_id: u8, - chain_id: u64, - latest_finalized_root: Hash256, - latest_finalized_epoch: Epoch, - best_root: Hash256, - best_slot: Slot, + fork_version: [u8; 4], + pub finalized_root: Hash256, + pub finalized_epoch: Epoch, + pub head_root: Hash256, + pub head_slot: Slot, } impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { - network_id: hello.network_id, - chain_id: hello.chain_id, - latest_finalized_root: hello.latest_finalized_root, - latest_finalized_epoch: hello.latest_finalized_epoch, - best_root: hello.best_root, - best_slot: hello.best_slot, + fork_version: hello.fork_version, + finalized_root: hello.finalized_root, + finalized_epoch: hello.finalized_epoch, + head_root: hello.head_root, + head_slot: hello.head_slot, } } } @@ -60,8 +52,8 @@ impl From<&Arc>> for PeerSyncInfo { /// The current syncing state. #[derive(PartialEq)] pub enum SyncState { - Idle, - Downloading, + _Idle, + _Downloading, _Stopped, } @@ -69,64 +61,41 @@ pub enum SyncState { pub struct SimpleSync { /// A reference to the underlying beacon chain. chain: Arc>, - /// A mapping of Peers to their respective PeerSyncInfo. - known_peers: HashMap, - /// A queue to allow importing of blocks - import_queue: ImportQueue, - /// The current state of the syncing protocol. - state: SyncState, + manager: ImportManager, + network: NetworkContext, log: slog::Logger, } impl SimpleSync { /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. - pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + pub fn new( + beacon_chain: Arc>, + network_send: mpsc::UnboundedSender, + log: &slog::Logger, + ) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); - let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS); - - let import_queue = - ImportQueue::new(beacon_chain.clone(), queue_item_stale_time, log.clone()); SimpleSync { chain: beacon_chain.clone(), - known_peers: HashMap::new(), - import_queue, - state: SyncState::Idle, + manager: ImportManager::new(beacon_chain, log), + network: NetworkContext::new(network_send, log.clone()), log: sync_logger, } } - /// Handle a `Goodbye` message from a peer. - /// - /// Removes the peer from `known_peers`. - pub fn on_goodbye(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - info!( - self.log, "PeerGoodbye"; - "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", reason), - ); - - self.known_peers.remove(&peer_id); - } - /// Handle a peer disconnect. /// - /// Removes the peer from `known_peers`. + /// Removes the peer from the manager. pub fn on_disconnect(&mut self, peer_id: PeerId) { - info!( - self.log, "Peer Disconnected"; - "peer" => format!("{:?}", peer_id), - ); - self.known_peers.remove(&peer_id); + self.manager.peer_disconnect(&peer_id); } /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); - - network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + pub fn on_connect(&mut self, peer_id: PeerId) { + self.network + .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -137,73 +106,64 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, ) { - debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. - network.send_rpc_response( + self.network.send_rpc_response( peer_id.clone(), request_id, RPCResponse::Hello(hello_message(&self.chain)), ); - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` response from a peer. - pub fn on_hello_response( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { - debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); + pub fn on_hello_response(&mut self, peer_id: PeerId, hello: HelloMessage) { + trace!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); // Process the hello message, without sending back another hello. - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` message, requesting new blocks if appropriate. /// /// Disconnects the peer if required. - fn process_hello( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { + fn process_hello(&mut self, peer_id: PeerId, hello: HelloMessage) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - if local.network_id != remote.network_id { - // The node is on a different network, disconnect them. - info!( + if local.fork_version != remote.fork_version { + // The node is on a different network/fork, disconnect them. + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "network_id" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch <= local.latest_finalized_epoch - && remote.latest_finalized_root != Hash256::zero() - && local.latest_finalized_root != Hash256::zero() - && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) - != Some(remote.latest_finalized_root)) + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch <= local.finalized_epoch + && remote.finalized_root != Hash256::zero() + && local.finalized_root != Hash256::zero() + && (self.root_at_slot(start_slot(remote.finalized_epoch)) + != Some(remote.finalized_root)) { // The remotes finalized epoch is less than or greater than ours, but the block root is // different to the one in our chain. // // Therefore, the node is on a different chain and we should not communicate with them. - info!( + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch < local.latest_finalized_epoch { + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch < local.finalized_epoch { // The node has a lower finalized epoch, their chain is not useful to us. There are two // cases where a node can have a lower finalized epoch: // @@ -226,16 +186,21 @@ impl SimpleSync { } else if self .chain .store - .exists::>(&remote.best_root) + .exists::>(&remote.head_root) .unwrap_or_else(|_| false) { - // If the node's best-block is already known to us, we have nothing to request. - debug!( - self.log, - "NaivePeer"; - "peer" => format!("{:?}", peer_id), - "reason" => "best block is known" - ); + // If the node's best-block is already known to us and they are close to our current + // head, treat them as a fully sync'd peer. + if self.chain.best_slot().sub(remote.head_slot).as_u64() < FULL_PEER_TOLERANCE { + self.manager.add_full_peer(peer_id); + self.process_sync(); + } else { + debug!( + self.log, + "Out of sync peer connected"; + "peer" => format!("{:?}", peer_id), + ); + } } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -244,26 +209,83 @@ impl SimpleSync { debug!( self.log, "UsefulPeer"; "peer" => format!("{:?}", peer_id), - "local_finalized_epoch" => local.latest_finalized_epoch, - "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, + "local_finalized_epoch" => local.finalized_epoch, + "remote_latest_finalized_epoch" => remote.finalized_epoch, ); - let start_slot = local - .latest_finalized_epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let required_slots = remote.best_slot - start_slot; - - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - start_slot, - count: required_slots.as_u64(), - }, - network, - ); + self.manager.add_peer(peer_id, remote); + self.process_sync(); } } + fn process_sync(&mut self) { + loop { + match self.manager.poll() { + ImportManagerOutcome::Hello(peer_id) => { + trace!( + self.log, + "RPC Request"; + "method" => "HELLO", + "peer" => format!("{:?}", peer_id) + ); + self.network.send_rpc_request( + None, + peer_id, + RPCRequest::Hello(hello_message(&self.chain)), + ); + } + ImportManagerOutcome::RequestBlocks { + peer_id, + request_id, + request, + } => { + trace!( + self.log, + "RPC Request"; + "method" => "BeaconBlocks", + "id" => request_id, + "count" => request.count, + "peer" => format!("{:?}", peer_id) + ); + self.network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::BeaconBlocks(request), + ); + } + ImportManagerOutcome::RecentRequest(peer_id, req) => { + trace!( + self.log, + "RPC Request"; + "method" => "RecentBeaconBlocks", + "count" => req.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + self.network.send_rpc_request( + None, + peer_id.clone(), + RPCRequest::RecentBeaconBlocks(req), + ); + } + ImportManagerOutcome::DownvotePeer(peer_id) => { + trace!( + self.log, + "Peer downvoted"; + "peer" => format!("{:?}", peer_id) + ); + // TODO: Implement reputation + self.network + .disconnect(peer_id.clone(), GoodbyeReason::Fault); + } + ImportManagerOutcome::Idle => { + // nothing to do + return; + } + } + } + } + + //TODO: Move to beacon chain fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() @@ -271,213 +293,19 @@ impl SimpleSync { .map(|(root, _slot)| root) } - /// Handle a `BeaconBlockRoots` request from the peer. - pub fn on_beacon_block_roots_request( + /// Handle a `RecentBeaconBlocks` request from the peer. + pub fn on_recent_beacon_blocks_request( &mut self, peer_id: PeerId, request_id: RequestId, - req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + request: RecentBeaconBlocksRequest, ) { - debug!( - self.log, - "BlockRootsRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.count, - "start_slot" => req.start_slot, - ); - - let mut roots: Vec = self - .chain - .rev_iter_block_roots() - .take_while(|(_root, slot)| req.start_slot <= *slot) - .filter(|(_root, slot)| *slot < req.start_slot + req.count) - .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) - .collect(); - - if roots.len() as u64 != req.count { - debug!( - self.log, - "BlockRootsRequest"; - "peer" => format!("{:?}", peer_id), - "msg" => "Failed to return all requested hashes", - "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), - "requested" => req.count, - "returned" => roots.len(), - ); - } - - roots.reverse(); - roots.dedup_by_key(|brs| brs.block_root); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots }), - ) - } - - /// Handle a `BeaconBlockRoots` response from the peer. - pub fn on_beacon_block_roots_response( - &mut self, - peer_id: PeerId, - res: BeaconBlockRootsResponse, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockRootsResponse"; - "peer" => format!("{:?}", peer_id), - "count" => res.roots.len(), - ); - - if res.roots.is_empty() { - warn!( - self.log, - "Peer returned empty block roots response"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - // The wire protocol specifies that slots must be in ascending order. - if !res.slots_are_ascending() { - warn!( - self.log, - "Peer returned block roots response with bad slot ordering"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - let new_roots = self - .import_queue - .enqueue_block_roots(&res.roots, peer_id.clone()); - - // No new roots means nothing to do. - // - // This check protects against future panics. - if new_roots.is_empty() { - return; - } - - // Determine the first (earliest) and last (latest) `BlockRootSlot` items. - // - // This logic relies upon slots to be in ascending order, which is enforced earlier. - let first = new_roots.first().expect("Non-empty list must have first"); - let last = new_roots.last().expect("Non-empty list must have last"); - - // Request all headers between the earliest and latest new `BlockRootSlot` items. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: first.block_root, - start_slot: first.slot, - max_headers: (last.slot - first.slot + 1).as_u64(), - skip_slots: 0, - }, - network, - ) - } - - /// Handle a `BeaconBlockHeaders` request from the peer. - pub fn on_beacon_block_headers_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockHeadersRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.max_headers, - ); - - let count = req.max_headers; - - // Collect the block roots. - let mut roots: Vec = self - .chain - .rev_iter_block_roots() - .take_while(|(_root, slot)| req.start_slot <= *slot) - .filter(|(_root, slot)| *slot < req.start_slot + count) - .map(|(root, _slot)| root) - .collect(); - - roots.reverse(); - roots.dedup(); - - let headers: Vec = roots - .into_iter() - .step_by(req.skip_slots as usize + 1) - .filter_map(|root| { - let block = self - .chain - .store - .get::>(&root) - .ok()?; - Some(block?.block_header()) - }) - .collect(); - - // ssz-encode the headers - let headers = headers.as_ssz_bytes(); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers }), - ) - } - - /// Handle a `BeaconBlockHeaders` response from the peer. - pub fn on_beacon_block_headers_response( - &mut self, - peer_id: PeerId, - headers: Vec, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockHeadersResponse"; - "peer" => format!("{:?}", peer_id), - "count" => headers.len(), - ); - - if headers.is_empty() { - warn!( - self.log, - "Peer returned empty block headers response. PeerId: {:?}", peer_id - ); - return; - } - - // Enqueue the headers, obtaining a list of the roots of the headers which were newly added - // to the queue. - let block_roots = self.import_queue.enqueue_headers(headers, peer_id.clone()); - - if !block_roots.is_empty() { - self.request_block_bodies(peer_id, BeaconBlockBodiesRequest { block_roots }, network); - } - } - - /// Handle a `BeaconBlockBodies` request from the peer. - pub fn on_beacon_block_bodies_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - let block_bodies: Vec> = req + let blocks: Vec> = request .block_roots .iter() .filter_map(|root| { if let Ok(Some(block)) = self.chain.store.get::>(root) { - Some(block.body) + Some(block) } else { debug!( self.log, @@ -495,55 +323,121 @@ impl SimpleSync { self.log, "BlockBodiesRequest"; "peer" => format!("{:?}", peer_id), - "requested" => req.block_roots.len(), - "returned" => block_bodies.len(), + "requested" => request.block_roots.len(), + "returned" => blocks.len(), ); - let bytes = block_bodies.as_ssz_bytes(); - - network.send_rpc_response( + self.network.send_rpc_response( peer_id, request_id, - RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { - block_bodies: bytes, - block_roots: None, - }), + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), ) } - /// Handle a `BeaconBlockBodies` response from the peer. - pub fn on_beacon_block_bodies_response( + /// Handle a `BeaconBlocks` request from the peer. + pub fn on_beacon_blocks_request( &mut self, peer_id: PeerId, - res: DecodedBeaconBlockBodiesResponse, - network: &mut NetworkContext, + request_id: RequestId, + req: BeaconBlocksRequest, ) { debug!( self.log, - "BlockBodiesResponse"; + "BeaconBlocksRequest"; "peer" => format!("{:?}", peer_id), - "count" => res.block_bodies.len(), + "count" => req.count, + "start_slot" => req.start_slot, ); - if !res.block_bodies.is_empty() { - // Import all blocks to queue - let last_root = self - .import_queue - .enqueue_bodies(res.block_bodies, peer_id.clone()); + //TODO: Optimize this + // Currently for skipped slots, the blocks returned could be less than the requested range. + // In the current implementation we read from the db then filter out out-of-range blocks. + // Improving the db schema to prevent this would be ideal. - // Attempt to process all received bodies by recursively processing the latest block - if let Some(root) = last_root { - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, root, network, &"rpc") - { - // If processing is successful remove from `import_queue` - self.import_queue.remove(root); + let mut blocks: Vec> = self + .chain + .rev_iter_block_roots() + .filter(|(_root, slot)| { + req.start_slot <= slot.as_u64() && req.start_slot + req.count > slot.as_u64() + }) + .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) + .filter_map(|(root, _slot)| { + if let Ok(Some(block)) = self.chain.store.get::>(&root) { + Some(block) + } else { + warn!( + self.log, + "Block in the chain is not in the store"; + "request_root" => format!("{:}", root), + ); + + None } - } + }) + .filter(|block| block.slot >= req.start_slot) + .collect(); + + blocks.reverse(); + blocks.dedup_by_key(|brs| brs.slot); + + if blocks.len() as u64 != req.count { + debug!( + self.log, + "BeaconBlocksRequest response"; + "peer" => format!("{:?}", peer_id), + "msg" => "Failed to return all requested hashes", + "start_slot" => req.start_slot, + "current_slot" => self.chain.present_slot(), + "requested" => req.count, + "returned" => blocks.len(), + ); } - // Clear out old entries - self.import_queue.remove_stale(); + self.network.send_rpc_response( + peer_id, + request_id, + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), + ) + } + + /// Handle a `BeaconBlocks` response from the peer. + pub fn on_beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + beacon_blocks: Vec>, + ) { + debug!( + self.log, + "BeaconBlocksResponse"; + "peer" => format!("{:?}", peer_id), + "count" => beacon_blocks.len(), + ); + + self.manager + .beacon_blocks_response(peer_id, request_id, beacon_blocks); + + self.process_sync(); + } + + /// Handle a `RecentBeaconBlocks` response from the peer. + pub fn on_recent_beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + beacon_blocks: Vec>, + ) { + debug!( + self.log, + "BeaconBlocksResponse"; + "peer" => format!("{:?}", peer_id), + "count" => beacon_blocks.len(), + ); + + self.manager + .recent_blocks_response(peer_id, request_id, beacon_blocks); + + self.process_sync(); } /// Process a gossip message declaring a new block. @@ -551,65 +445,28 @@ impl SimpleSync { /// Attempts to apply to block to the beacon chain. May queue the block for later processing. /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. - pub fn on_block_gossip( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - network: &mut NetworkContext, - ) -> bool { - if let Some(outcome) = - self.process_block(peer_id.clone(), block.clone(), network, &"gossip") - { + pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { + if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { - BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, - BlockProcessingOutcome::ParentUnknown { parent } => { - // Add this block to the queue - self.import_queue - .enqueue_full_blocks(vec![block.clone()], peer_id.clone()); - debug!( - self.log, "RequestParentBlock"; - "parent_root" => format!("{}", parent), - "parent_slot" => block.slot - 1, - "peer" => format!("{:?}", peer_id), - ); - - // Request roots between parent and start of finality from peer. - let start_slot = self - .chain - .head() - .beacon_state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - // Request blocks between `latest_finalized_slot` and the `block` - start_slot, - count: block.slot.as_u64() - start_slot.as_u64(), - }, - network, - ); - - // Clean the stale entries from the queue. - self.import_queue.remove_stale(); - + BlockProcessingOutcome::Processed { .. } => { + trace!(self.log, "Gossipsub block processed"; + "peer_id" => format!("{:?}",peer_id)); + SHOULD_FORWARD_GOSSIP_BLOCK + } + BlockProcessingOutcome::ParentUnknown { parent: _ } => { + // Inform the sync manager to find parents for this block + trace!(self.log, "Unknown parent gossip"; + "peer_id" => format!("{:?}",peer_id)); + self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK } - BlockProcessingOutcome::FutureSlot { present_slot, block_slot, } if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => { - self.import_queue - .enqueue_full_blocks(vec![block], peer_id.clone()); - + //TODO: Decide the logic here SHOULD_FORWARD_GOSSIP_BLOCK } - // Note: known blocks are forwarded on the gossip network. - // - // We rely upon the lower layers (libp2p) to stop loops occurring from re-gossiped - // blocks. BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, } @@ -621,12 +478,7 @@ impl SimpleSync { /// Process a gossip message declaring a new attestation. /// /// Not currently implemented. - pub fn on_attestation_gossip( - &mut self, - _peer_id: PeerId, - msg: Attestation, - _network: &mut NetworkContext, - ) { + pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { match self.chain.process_attestation(msg) { Ok(outcome) => info!( self.log, @@ -640,289 +492,79 @@ impl SimpleSync { } } - /// Request some `BeaconBlockRoots` from the remote peer. - fn request_block_roots( - &mut self, - peer_id: PeerId, - req: BeaconBlockRootsRequest, - network: &mut NetworkContext, - ) { - // Potentially set state to sync. - if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { - debug!(self.log, "Entering downloading sync state."); - self.state = SyncState::Downloading; - } - - debug!( - self.log, - "RPCRequest(BeaconBlockRoots)"; - "count" => req.count, - "peer" => format!("{:?}", peer_id) - ); - - // TODO: handle count > max count. - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockRoots(req)); - } - - /// Request some `BeaconBlockHeaders` from the remote peer. - fn request_block_headers( - &mut self, - peer_id: PeerId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockHeaders)"; - "max_headers" => req.max_headers, - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req)); - } - - /// Request some `BeaconBlockBodies` from the remote peer. - fn request_block_bodies( - &mut self, - peer_id: PeerId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockBodies)"; - "count" => req.block_roots.len(), - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockBodies(req)); - } - - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { - !self - .chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - false - }) - } - /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { hello_message(&self.chain) } - - /// Helper function to attempt to process a partial block. - /// - /// If the block can be completed recursively call `process_block` - /// else request missing parts. - fn attempt_process_partial_block( - &mut self, - peer_id: PeerId, - block_root: Hash256, - network: &mut NetworkContext, - source: &str, - ) -> Option { - match self.import_queue.attempt_complete_block(block_root) { - PartialBeaconBlockCompletion::MissingBody => { - // Unable to complete the block because the block body is missing. - debug!( - self.log, "RequestParentBody"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block body from the peer. - self.request_block_bodies( - peer_id, - BeaconBlockBodiesRequest { - block_roots: vec![block_root], - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingHeader(slot) => { - // Unable to complete the block because the block header is missing. - debug!( - self.log, "RequestParentHeader"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block header from the peer. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: block_root, - start_slot: slot, - max_headers: 1, - skip_slots: 0, - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingRoot => { - // The `block_root` is not known to the queue. - debug!( - self.log, "MissingParentRoot"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Do nothing. - - None - } - PartialBeaconBlockCompletion::Complete(block) => { - // The block exists in the queue, attempt to process it - trace!( - self.log, "AttemptProcessParent"; - "source" => source, - "block_root" => format!("{}", block_root), - "parent_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - self.process_block(peer_id.clone(), block, network, source) - } - } - } - - /// Processes the `block` that was received from `peer_id`. - /// - /// If the block was submitted to the beacon chain without internal error, `Some(outcome)` is - /// returned, otherwise `None` is returned. Note: `Some(_)` does not necessarily indicate that - /// the block was successfully processed or valid. - /// - /// This function performs the following duties: - /// - /// - Attempting to import the block into the beacon chain. - /// - Logging - /// - Requesting unavailable blocks (e.g., if parent is unknown). - /// - Disconnecting faulty nodes. - /// - /// This function does not remove processed blocks from the import queue. - fn process_block( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - network: &mut NetworkContext, - source: &str, - ) -> Option { - let processing_result = self.chain.process_block(block.clone()); - - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - debug!( - self.log, "Imported block from network"; - "source" => source, - "slot" => block.slot, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // The parent has not been processed - trace!( - self.log, "ParentBlockUnknown"; - "source" => source, - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - // If the parent is in the `import_queue` attempt to complete it then process it. - // All other cases leave `parent` in `import_queue` and return original outcome. - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, parent, network, source) - { - // If processing parent is successful, re-process block and remove parent from queue - self.import_queue.remove(parent); - - // Attempt to process `block` again - match self.chain.process_block(block) { - Ok(outcome) => return Some(outcome), - Err(_) => return None, - } - } - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - warn!( - self.log, "FutureBlock"; - "source" => source, - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - network.disconnect(peer_id, GoodbyeReason::Fault); - } else { - // The block is in the future, but not too far. - debug!( - self.log, "QueuedFutureBlock"; - "source" => source, - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - } - } - _ => { - debug!( - self.log, "InvalidBlock"; - "source" => source, - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - } - } - - Some(outcome) - } else { - error!( - self.log, "BlockProcessingFailure"; - "source" => source, - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - - None - } - } } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { - let spec = &beacon_chain.spec; let state = &beacon_chain.head().beacon_state; HelloMessage { - network_id: spec.network_id, - //TODO: Correctly define the chain id - chain_id: spec.network_id as u64, - latest_finalized_root: state.finalized_checkpoint.root, - latest_finalized_epoch: state.finalized_checkpoint.epoch, - best_root: beacon_chain.head().beacon_block_root, - best_slot: state.slot, + fork_version: state.fork.current_version, + finalized_root: state.finalized_checkpoint.root, + finalized_epoch: state.finalized_checkpoint.epoch, + head_root: beacon_chain.head().beacon_block_root, + head_slot: state.slot, + } +} + +/// Wraps a Network Channel to employ various RPC/Sync related network functionality. +pub struct NetworkContext { + /// The network channel to relay messages to the Network service. + network_send: mpsc::UnboundedSender, + /// Logger for the `NetworkContext`. + log: slog::Logger, +} + +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { + Self { network_send, log } + } + + pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) + // TODO: disconnect peers. + } + + pub fn send_rpc_request( + &mut self, + request_id: Option, + peer_id: PeerId, + rpc_request: RPCRequest, + ) { + // use 0 as the default request id, when an ID is not required. + let request_id = request_id.unwrap_or_else(|| 0); + self.send_rpc_event(peer_id, RPCEvent::Request(request_id, rpc_request)); + } + + //TODO: Handle Error responses + pub fn send_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + rpc_response: RPCResponse, + ) { + self.send_rpc_event( + peer_id, + RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), + ); + } + + fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + self.send(peer_id, OutgoingMessage::RPC(rpc_event)) + } + + fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { + self.network_send + .try_send(NetworkMessage::Send(peer_id, outgoing_message)) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send RPC message to the network service" + ) + }); } } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index f442e247d..dff3f8d70 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::Topic; -use eth2_libp2p::BEACON_ATTESTATION_TOPIC; +use eth2_libp2p::{BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -144,7 +144,11 @@ impl AttestationService for AttestationServiceInstance { ); // valid attestation, propagate to the network - let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); self.network_chan diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b1a67399e..92a543ef3 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::BEACON_BLOCK_TOPIC; use eth2_libp2p::{PubsubMessage, Topic}; +use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -105,8 +105,12 @@ impl BeaconBlockService for BeaconBlockServiceInstance { "block_root" => format!("{}", block_root), ); - // get the network topic to send on - let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 243e4b716..797217af0 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -7,7 +7,6 @@ use env_logger::{Builder, Env}; use slog::{crit, o, warn, Drain, Level}; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; - pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml"; @@ -183,6 +182,7 @@ fn main() { .takes_value(true), ) /* +<<<<<<< HEAD * The "testnet" sub-command. * * Allows for creating a new datadir with testnet-specific configs.