From 40c0b70b22de83cb4fea86250397fa568d08dbc9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 24 Jul 2019 21:31:49 +1000 Subject: [PATCH 01/24] Add interop chain spec and rename chain_id --- beacon_node/http_server/src/api.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 10 +++---- beacon_node/rpc/src/beacon_node.rs | 2 +- beacon_node/src/main.rs | 3 +- beacon_node/src/run.rs | 18 +++++++++++- .../src/beacon_state/beacon_state_types.rs | 20 +++++++++++++ eth2/types/src/chain_spec.rs | 28 +++++++++++++++++-- protos/src/services.proto | 2 +- tests/ef_tests/eth2.0-spec-tests | 2 +- validator_client/src/main.rs | 9 ++++-- validator_client/src/service.rs | 6 ++-- 11 files changed, 83 insertions(+), 19 deletions(-) diff --git a/beacon_node/http_server/src/api.rs b/beacon_node/http_server/src/api.rs index a91080899..8cb023b02 100644 --- a/beacon_node/http_server/src/api.rs +++ b/beacon_node/http_server/src/api.rs @@ -64,7 +64,7 @@ fn handle_fork(req: &mut Request) -> IronResult(beacon_chain: &BeaconChain) -> HelloMes let state = &beacon_chain.head().beacon_state; HelloMessage { - //TODO: Correctly define the chain/network id - network_id: spec.chain_id, - chain_id: u64::from(spec.chain_id), - latest_finalized_root: state.finalized_checkpoint.root, - latest_finalized_epoch: state.finalized_checkpoint.epoch, + network_id: spec.network_id, + //TODO: Correctly define the chain id + chain_id: spec.network_id as u64, + latest_finalized_root: state.finalized_root, + latest_finalized_epoch: state.finalized_epoch, best_root: beacon_chain.head().beacon_block_root, best_slot: state.slot, } diff --git a/beacon_node/rpc/src/beacon_node.rs b/beacon_node/rpc/src/beacon_node.rs index 631601ac9..5d635c9d1 100644 --- a/beacon_node/rpc/src/beacon_node.rs +++ b/beacon_node/rpc/src/beacon_node.rs @@ -37,7 +37,7 @@ impl BeaconNodeService for BeaconNodeServiceInstance { node_info.set_fork(fork); node_info.set_genesis_time(genesis_time); node_info.set_genesis_slot(spec.genesis_slot.as_u64()); - node_info.set_chain_id(u32::from(spec.chain_id)); + node_info.set_network_id(u32::from(spec.network_id)); // send the node_info the requester let error_log = self.log.clone(); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index dd0c695b4..c61e0c6b6 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -136,6 +136,7 @@ fn main() { .help("Listen port for the HTTP server.") .takes_value(true), ) + /* Client related arguments */ .arg( Arg::with_name("api") .long("api") @@ -182,7 +183,7 @@ fn main() { from disk. A spec will be written to disk after this flag is used, so it is primarily used for creating eth2 spec files.") .takes_value(true) - .possible_values(&["mainnet", "minimal"]) + .possible_values(&["mainnet", "minimal", "interop"]) .default_value("minimal"), ) .arg( diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 010993988..c16d23e5f 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -13,7 +13,7 @@ use tokio::runtime::Builder; use tokio::runtime::Runtime; use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; -use types::{MainnetEthSpec, MinimalEthSpec}; +use types::{InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; /// Reads the configuration and initializes a `BeaconChain` with the required types and parameters. /// @@ -90,6 +90,22 @@ pub fn run_beacon_node( runtime, log, ), + ("disk", "interop") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), + ("memory", "interop") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), (db_type, spec) => { error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 1dc34e195..dd6ca3272 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -200,3 +200,23 @@ impl EthSpec for MinimalEthSpec { } pub type MinimalBeaconState = BeaconState; + +/// Interop testnet spec +#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] +pub struct InteropEthSpec; + +impl EthSpec for InteropEthSpec { + type ShardCount = U8; + type SlotsPerHistoricalRoot = U64; + type LatestRandaoMixesLength = U64; + type LatestActiveIndexRootsLength = U64; + type LatestSlashedExitLength = U64; + type SlotsPerEpoch = U8; + type GenesisEpoch = U0; + + fn default_spec() -> ChainSpec { + ChainSpec::interop() + } +} + +pub type InteropBeaconState = BeaconState; diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 2128c6ef1..d6eaa123d 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -92,7 +92,7 @@ pub struct ChainSpec { domain_transfer: u32, pub boot_nodes: Vec, - pub chain_id: u8, + pub network_id: u8, } impl ChainSpec { @@ -190,7 +190,7 @@ impl ChainSpec { * Network specific */ boot_nodes: vec![], - chain_id: 1, // mainnet chain id + network_id: 1, // mainnet network id } } @@ -202,13 +202,35 @@ impl ChainSpec { pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; + let genesis_slot = Slot::new(0); Self { target_committee_size: 4, shuffle_round_count: 10, min_genesis_active_validator_count: 64, max_epochs_per_crosslink: 4, - chain_id: 2, // lighthouse testnet chain id + min_attestation_inclusion_delay: 2, + genesis_slot, + network_id: 2, // lighthouse testnet network id + boot_nodes, + ..ChainSpec::mainnet() + } + } + + /// Interop testing spec + /// + /// This allows us to customize a chain spec for interop testing. + pub fn interop() -> Self { + let genesis_slot = Slot::new(0); + let boot_nodes = vec![]; + + Self { + seconds_per_slot: 12, + target_committee_size: 4, + shuffle_round_count: 10, + min_attestation_inclusion_delay: 2, + genesis_slot, + network_id: 13, boot_nodes, ..ChainSpec::mainnet() } diff --git a/protos/src/services.proto b/protos/src/services.proto index bf23ff391..ba0462bbe 100644 --- a/protos/src/services.proto +++ b/protos/src/services.proto @@ -45,7 +45,7 @@ service AttestationService { message NodeInfoResponse { string version = 1; Fork fork = 2; - uint32 chain_id = 3; + uint32 network_id = 3; uint64 genesis_time = 4; uint64 genesis_slot = 5; } diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index aaa1673f5..d40578264 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit aaa1673f508103e11304833e0456e4149f880065 +Subproject commit d405782646190595927cc0a59f504f7b00a760f3 diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index bd3919b5a..756f82991 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -14,7 +14,7 @@ use protos::services_grpc::ValidatorServiceClient; use slog::{crit, error, info, o, Drain, Level}; use std::fs; use std::path::PathBuf; -use types::{Keypair, MainnetEthSpec, MinimalEthSpec}; +use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; pub const DEFAULT_SPEC: &str = "minimal"; pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; @@ -70,7 +70,7 @@ fn main() { .short("s") .help("The title of the spec constants for chain config.") .takes_value(true) - .possible_values(&["mainnet", "minimal"]) + .possible_values(&["mainnet", "minimal", "interop"]) .default_value("minimal"), ) .arg( @@ -214,6 +214,11 @@ fn main() { eth2_config, log.clone(), ), + "interop" => ValidatorService::::start::( + client_config, + eth2_config, + log.clone(), + ), other => { crit!(log, "Unknown spec constants"; "title" => other); return; diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 3f99efe36..c4ccbc204 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -107,12 +107,12 @@ impl Service Service node_info.version.clone(), "Chain ID" => node_info.chain_id, "Genesis time" => genesis_time); + info!(log,"Beacon node connected"; "Node Version" => node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time); let proto_fork = node_info.get_fork(); let mut previous_version: [u8; 4] = [0; 4]; From 15c4062761a3ae855bdc237d5edcdf9bf9c8ae44 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 24 Jul 2019 22:25:37 +1000 Subject: [PATCH 02/24] Add ability to connect to raw libp2p nodes --- beacon_node/eth2-libp2p/src/config.rs | 16 ++++++++++++++++ beacon_node/eth2-libp2p/src/discovery.rs | 6 +++--- beacon_node/eth2-libp2p/src/service.rs | 11 +++++++++++ beacon_node/src/main.rs | 21 ++++++++++++++------- 4 files changed, 44 insertions(+), 10 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 7391dba8a..d04eae14b 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -1,6 +1,7 @@ use clap::ArgMatches; use enr::Enr; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; +use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; @@ -39,6 +40,9 @@ pub struct Config { /// List of nodes to initially connect to. pub boot_nodes: Vec, + /// List of libp2p nodes to initially connect to. + pub libp2p_nodes: Vec, + /// Client version pub client_version: String, @@ -66,6 +70,7 @@ impl Default for Config { .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], + libp2p_nodes: vec![], client_version: version::version(), topics: Vec::new(), } @@ -118,6 +123,17 @@ impl Config { .collect::, _>>()?; } + if let Some(libp2p_addresses_str) = args.value_of("libp2p-addresses") { + self.libp2p_nodes = libp2p_addresses_str + .split(',') + .map(|multiaddr| { + multiaddr + .parse() + .map_err(|_| format!("Invalid Multiaddr: {}", multiaddr)) + }) + .collect::, _>>()?; + } + if let Some(discovery_address_str) = args.value_of("discovery-address") { self.discovery_address = discovery_address_str .parse() diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index c2f008756..96cf71846 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -37,6 +37,9 @@ pub struct Discovery { /// The target number of connected peers on the libp2p interface. max_peers: usize, + /// directory to save ENR to + enr_dir: String, + /// The delay between peer discovery searches. peer_discovery_delay: Delay, @@ -54,9 +57,6 @@ pub struct Discovery { /// Logger for the discovery behaviour. log: slog::Logger, - - /// directory to save ENR to - enr_dir: String, } impl Discovery { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 05ae9e473..5c7c0c7f1 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -76,6 +76,17 @@ impl Service { ), }; + // attempt to connect to user-input libp2p nodes + for multiaddr in config.libp2p_nodes { + match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { + Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), + Err(err) => debug!( + log, + "Could not connect to node: {} error: {:?}", multiaddr, err + ), + }; + } + // subscribe to default gossipsub topics let mut topics = vec![]; //TODO: Handle multiple shard attestations. For now we simply use a separate topic for diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index c61e0c6b6..9a1af2e08 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -56,6 +56,13 @@ fn main() { .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") .takes_value(true), ) + .arg( + Arg::with_name("port") + .long("port") + .value_name("Lighthouse Port") + .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .takes_value(true), + ) .arg( Arg::with_name("maxpeers") .long("maxpeers") @@ -70,13 +77,6 @@ fn main() { .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.") .takes_value(true), ) - .arg( - Arg::with_name("port") - .long("port") - .value_name("Lighthouse Port") - .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") - .takes_value(true), - ) .arg( Arg::with_name("discovery-port") .long("disc-port") @@ -91,6 +91,13 @@ fn main() { .help("The IP address to broadcast to other peers on how to reach this node.") .takes_value(true), ) + .arg( + Arg::with_name("libp2p-addresses") + .long("libp2p-addresses") + .value_name("MULTIADDR") + .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR.") + .takes_value(true), + ) /* * gRPC parameters. */ From 04ce9ec95e5d292d348fb88711187f786f1fc2eb Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 26 Jul 2019 14:43:42 +1000 Subject: [PATCH 03/24] Adds Identify protocol, clean up RPC protocol name handling --- beacon_node/eth2-libp2p/src/behaviour.rs | 101 ++++++++++++++------ beacon_node/eth2-libp2p/src/rpc/protocol.rs | 92 +++++++----------- 2 files changed, 107 insertions(+), 86 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 37e3419a3..33acd41e1 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -10,39 +10,44 @@ use libp2p::{ }, discv5::Discv5Event, gossipsub::{Gossipsub, GossipsubEvent}, + identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{o, trace, warn}; +use slog::{debug, o, trace, warn}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::num::NonZeroU32; use std::time::Duration; -use types::{Attestation, BeaconBlock, EthSpec}; +use types::{Attestation, BeaconBlock}; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] -pub struct Behaviour { +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] +pub struct Behaviour { /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, - /// The serenity RPC specified in the wire-0 protocol. - serenity_rpc: RPC, + /// The Eth2 RPC specified in the wire-0 protocol. + eth2_rpc: RPC, /// Keep regular connection to peers and disconnect if absent. + // TODO: Remove Libp2p ping in favour of discv5 ping. ping: Ping, - /// Kademlia for peer discovery. + // TODO: Using id for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + identify: Identify, + /// Discovery behaviour. discovery: Discovery, #[behaviour(ignore)] /// The events generated by this behaviour to be consumed in the swarm poll. - events: Vec>, + events: Vec, /// Logger for behaviour actions. #[behaviour(ignore)] log: slog::Logger, } -impl Behaviour { +impl Behaviour { pub fn new( local_key: &Keypair, net_conf: &NetworkConfig, @@ -50,17 +55,25 @@ impl Behaviour { ) -> error::Result { let local_peer_id = local_key.public().clone().into_peer_id(); let behaviour_log = log.new(o!()); + let ping_config = PingConfig::new() .with_timeout(Duration::from_secs(30)) .with_interval(Duration::from_secs(20)) .with_max_failures(NonZeroU32::new(2).expect("2 != 0")) .with_keep_alive(false); + let identify = Identify::new( + "lighthouse/libp2p".into(), + version::version(), + local_key.public(), + ); + Ok(Behaviour { - serenity_rpc: RPC::new(log), + eth2_rpc: RPC::new(log), gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()), discovery: Discovery::new(local_key, net_conf, log)?, ping: Ping::new(ping_config), + identify, events: Vec::new(), log: behaviour_log, }) @@ -68,8 +81,8 @@ impl Behaviour { } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: GossipsubEvent) { match event { @@ -101,8 +114,8 @@ impl NetworkBehaviourEventProces } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: RPCMessage) { match event { @@ -119,19 +132,19 @@ impl NetworkBehaviourEventProces } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: PingEvent) { // not interested in ping responses at the moment. } } -impl Behaviour { +impl Behaviour { /// Consumes the events list when polled. fn poll( &mut self, - ) -> Async>> { + ) -> Async> { if !self.events.is_empty() { return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); } @@ -140,8 +153,36 @@ impl Behaviour { } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: IdentifyEvent) { + match event { + IdentifyEvent::Identified { + peer_id, mut info, .. + } => { + if info.listen_addrs.len() > 20 { + debug!( + self.log, + "More than 20 addresses have been identified, truncating" + ); + info.listen_addrs.truncate(20); + } + debug!(self.log, "Identified Peer"; "Peer" => format!("{}", peer_id), + "Protocol Version" => info.protocol_version, + "Agent Version" => info.agent_version, + "Listening Addresses" => format!("{:?}", info.listen_addrs), + "Protocols" => format!("{:?}", info.protocols) + ); + } + IdentifyEvent::Error { .. } => {} + IdentifyEvent::SendBack { .. } => {} + } + } +} + +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: Discv5Event) { // discv5 has no events to inject @@ -149,7 +190,7 @@ impl NetworkBehaviourEventProces } /// Implements the combined behaviour for the libp2p service. -impl Behaviour { +impl Behaviour { /* Pubsub behaviour functions */ /// Subscribes to a gossipsub topic. @@ -158,7 +199,7 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { + pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { let message_bytes = ssz_encode(&message); for topic in topics { self.gossipsub.publish(topic, message_bytes.clone()); @@ -169,7 +210,7 @@ impl Behaviour { /// Sends an RPC Request/Response via the RPC protocol. pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.serenity_rpc.send_rpc(peer_id, rpc_event); + self.eth2_rpc.send_rpc(peer_id, rpc_event); } /* Discovery / Peer management functions */ @@ -179,28 +220,28 @@ impl Behaviour { } /// The types of events than can be obtained from polling the behaviour. -pub enum BehaviourEvent { +pub enum BehaviourEvent { RPC(PeerId, RPCEvent), PeerDialed(PeerId), PeerDisconnected(PeerId), GossipMessage { source: PeerId, topics: Vec, - message: Box>, + message: Box, }, } /// Messages that are passed to and from the pubsub (Gossipsub) behaviour. #[derive(Debug, Clone, PartialEq)] -pub enum PubsubMessage { +pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - Block(BeaconBlock), + Block(BeaconBlock), /// Gossipsub message providing notification of a new attestation. - Attestation(Attestation), + Attestation(Attestation), } //TODO: Correctly encode/decode enums. Prefixing with integer for now. -impl Encode for PubsubMessage { +impl Encode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } @@ -229,7 +270,7 @@ impl Encode for PubsubMessage { } } -impl Decode for PubsubMessage { +impl Decode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index 8729de3a7..b606fc743 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -8,7 +8,7 @@ use futures::{ future::{self, FutureResult}, sink, stream, Sink, Stream, }; -use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; use std::io; use std::time::Duration; use tokio::codec::Framed; @@ -28,24 +28,22 @@ const REQUEST_TIMEOUT: u64 = 3; pub struct RPCProtocol; impl UpgradeInfo for RPCProtocol { - type Info = RawProtocolId; + type Info = ProtocolId; type InfoIter = Vec; fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new("hello", "1.0.0", "ssz").into(), - ProtocolId::new("goodbye", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into(), + ProtocolId::new("hello", "1.0.0", "ssz"), + ProtocolId::new("goodbye", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"), ] } } -/// The raw protocol id sent over the wire. -type RawProtocolId = Vec; - /// Tracks the types in a protocol id. +#[derive(Clone)] pub struct ProtocolId { /// The rpc message type/name. pub message_name: String, @@ -55,44 +53,31 @@ pub struct ProtocolId { /// The encoding of the RPC. pub encoding: String, + + /// The protocol id that is formed from the above fields. + protocol_id: String, } /// An RPC protocol ID. impl ProtocolId { pub fn new(message_name: &str, version: &str, encoding: &str) -> Self { + let protocol_id = format!( + "{}/{}/{}/{}", + PROTOCOL_PREFIX, message_name, version, encoding + ); + ProtocolId { message_name: message_name.into(), version: version.into(), encoding: encoding.into(), + protocol_id, } } - - /// Converts a raw RPC protocol id string into an `RPCProtocolId` - pub fn from_bytes(bytes: &[u8]) -> Result { - let protocol_string = String::from_utf8(bytes.to_vec()) - .map_err(|_| RPCError::InvalidProtocol("Invalid protocol Id"))?; - let protocol_list: Vec<&str> = protocol_string.as_str().split('/').take(7).collect(); - - if protocol_list.len() != 7 { - return Err(RPCError::InvalidProtocol("Not enough '/'")); - } - - Ok(ProtocolId { - message_name: protocol_list[4].into(), - version: protocol_list[5].into(), - encoding: protocol_list[6].into(), - }) - } } -impl Into for ProtocolId { - fn into(self) -> RawProtocolId { - format!( - "{}/{}/{}/{}", - PROTOCOL_PREFIX, self.message_name, self.version, self.encoding - ) - .as_bytes() - .to_vec() +impl ProtocolName for ProtocolId { + fn protocol_name(&self) -> &[u8] { + self.protocol_id.as_bytes() } } @@ -127,16 +112,11 @@ where fn upgrade_inbound( self, socket: upgrade::Negotiated, - protocol: RawProtocolId, + protocol: ProtocolId, ) -> Self::Future { - // TODO: Verify this - let protocol_id = - ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols"); - - match protocol_id.encoding.as_str() { + match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = - BaseInboundCodec::new(SSZInboundCodec::new(protocol_id, MAX_RPC_SIZE)); + let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = InboundCodec::SSZ(ssz_codec); Framed::new(socket, codec) .into_future() @@ -171,7 +151,7 @@ pub enum RPCRequest { } impl UpgradeInfo for RPCRequest { - type Info = RawProtocolId; + type Info = ProtocolId; type InfoIter = Vec; // add further protocols as we support more encodings/versions @@ -182,22 +162,25 @@ impl UpgradeInfo for RPCRequest { /// Implements the encoding per supported protocol for RPCRequest. impl RPCRequest { - pub fn supported_protocols(&self) -> Vec { + pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1.0.0", "ssz").into()], - RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz").into()], + RPCRequest::Hello(_) => vec![ + ProtocolId::new("hello", "1.0.0", "ssz"), + ProtocolId::new("goodbye", "1.0.0", "ssz"), + ], + RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], RPCRequest::BeaconBlockRoots(_) => { - vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")] } RPCRequest::BeaconBlockHeaders(_) => { - vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")] } RPCRequest::BeaconBlockBodies(_) => { - vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")] } RPCRequest::BeaconChainState(_) => { - vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")] } } } @@ -230,12 +213,9 @@ where socket: upgrade::Negotiated, protocol: Self::Info, ) -> Self::Future { - let protocol_id = - ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols"); - - match protocol_id.encoding.as_str() { + match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol_id, 4096)); + let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); let codec = OutboundCodec::SSZ(ssz_codec); Framed::new(socket, codec).send(self) } From 0613bc16fc54f5d02434ec7540500ba255ab5dc9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 6 Aug 2019 15:09:47 +1000 Subject: [PATCH 04/24] Update to latest libp2p, gossipsub improvements --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- beacon_node/eth2-libp2p/src/behaviour.rs | 6 ++---- beacon_node/eth2-libp2p/src/config.rs | 8 ++++++-- beacon_node/eth2-libp2p/src/discovery.rs | 6 ++---- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 7 ++++--- beacon_node/eth2-libp2p/src/rpc/mod.rs | 6 +++--- beacon_node/eth2-libp2p/src/service.rs | 20 +++++++++++++------- beacon_node/rpc/src/attestation.rs | 4 ++-- beacon_node/rpc/src/beacon_block.rs | 4 ++-- beacon_node/src/main.rs | 15 +++++++++++---- 11 files changed, 48 insertions(+), 34 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 405c72cc4..f5fe8a877 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 33acd41e1..fcb147949 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -4,14 +4,12 @@ use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; use futures::prelude::*; use libp2p::{ - core::{ - identity::Keypair, - swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, - }, + core::identity::Keypair, discv5::Discv5Event, gossipsub::{Gossipsub, GossipsubEvent}, identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, + swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index d04eae14b..44d07795b 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -64,9 +64,9 @@ impl Default for Config { discovery_port: 9000, max_peers: 10, //TODO: Set realistic values for production + // Note: This defaults topics to plain strings. Not hashes gs_config: GossipsubConfigBuilder::new() - .max_gossip_size(4_000_000) - .inactivity_timeout(Duration::from_secs(90)) + .max_transmit_size(1_000_000) .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], @@ -134,6 +134,10 @@ impl Config { .collect::, _>>()?; } + if let Some(topics_str) = args.value_of("topics") { + self.topics = topics_str.split(',').map(|s| s.into()).collect(); + } + if let Some(discovery_address_str) = args.value_of("discovery-address") { self.discovery_address = discovery_address_str .parse() diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 96cf71846..4c1794945 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -4,13 +4,11 @@ use crate::{error, NetworkConfig}; /// Currently using discv5 for peer discovery. /// use futures::prelude::*; -use libp2p::core::swarm::{ - ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, -}; -use libp2p::core::{identity::Keypair, Multiaddr, PeerId, ProtocolsHandler}; +use libp2p::core::{identity::Keypair, ConnectedPoint, Multiaddr, PeerId}; use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::multiaddr::Protocol; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; use slog::{debug, info, o, warn}; use std::collections::HashSet; use std::fs::File; diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 7a3b2e632..ca6ac3760 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -13,7 +13,7 @@ pub use behaviour::PubsubMessage; pub use config::{ Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX, }; -pub use libp2p::floodsub::{Topic, TopicBuilder, TopicHash}; +pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; pub use libp2p::{ diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 4e796f6fb..76e04d24e 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -5,10 +5,10 @@ use crate::rpc::protocol::{InboundFramed, OutboundFramed}; use core::marker::PhantomData; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::core::protocols_handler::{ +use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::protocols_handler::{ KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, }; -use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -273,7 +273,8 @@ where Self::Error, > { if let Some(err) = self.pending_error.take() { - return Err(err); + dbg!(&err); + //return Err(err); } // return any events that need to be reported diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 88060e602..5593660ff 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -6,9 +6,9 @@ use futures::prelude::*; use handler::RPCHandler; -use libp2p::core::protocols_handler::ProtocolsHandler; -use libp2p::core::swarm::{ - ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, +use libp2p::core::ConnectedPoint; +use libp2p::swarm::{ + protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, }; use libp2p::{Multiaddr, PeerId}; pub use methods::{ErrorMessage, HelloMessage, RPCErrorResponse, RPCResponse, RequestId}; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 5c7c0c7f1..5a2fc8d8b 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -3,7 +3,7 @@ use crate::error; use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; -use crate::{TopicBuilder, TopicHash}; +use crate::{Topic, TopicHash}; use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC}; use futures::prelude::*; use futures::Stream; @@ -90,15 +90,21 @@ impl Service { // subscribe to default gossipsub topics let mut topics = vec![]; //TODO: Handle multiple shard attestations. For now we simply use a separate topic for - //attestations - topics.push(BEACON_ATTESTATION_TOPIC.to_string()); - topics.push(BEACON_PUBSUB_TOPIC.to_string()); - topics.append(&mut config.topics.clone()); + // attestations + topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); + topics.push(Topic::new(BEACON_PUBSUB_TOPIC.into())); + topics.append( + &mut config + .topics + .iter() + .cloned() + .map(|s| Topic::new(s)) + .collect(), + ); let mut subscribed_topics = vec![]; for topic in topics { - let t = TopicBuilder::new(topic.clone()).build(); - if swarm.subscribe(t) { + if swarm.subscribe(topic.clone()) { trace!(log, "Subscribed to topic: {:?}", topic); subscribed_topics.push(topic); } else { diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 5ea8368fd..cbbe4de6e 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; -use eth2_libp2p::TopicBuilder; +use eth2_libp2p::Topic; use eth2_libp2p::BEACON_ATTESTATION_TOPIC; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -140,7 +140,7 @@ impl AttestationService for AttestationServiceInstance { ); // valid attestation, propagate to the network - let topic = TopicBuilder::new(BEACON_ATTESTATION_TOPIC).build(); + let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); let message = PubsubMessage::Attestation(attestation); self.network_chan diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b42bbb208..2a8ae2c6b 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::BEACON_PUBSUB_TOPIC; -use eth2_libp2p::{PubsubMessage, TopicBuilder}; +use eth2_libp2p::{PubsubMessage, Topic}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -106,7 +106,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { ); // get the network topic to send on - let topic = TopicBuilder::new(BEACON_PUBSUB_TOPIC).build(); + let topic = Topic::new(BEACON_PUBSUB_TOPIC.into()); let message = PubsubMessage::Block(block); // Publish the block to the p2p network via gossipsub. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 9a1af2e08..c85eeedac 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -52,14 +52,14 @@ fn main() { .arg( Arg::with_name("listen-address") .long("listen-address") - .value_name("Address") + .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") .takes_value(true), ) .arg( Arg::with_name("port") .long("port") - .value_name("Lighthouse Port") + .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") .takes_value(true), ) @@ -80,17 +80,24 @@ fn main() { .arg( Arg::with_name("discovery-port") .long("disc-port") - .value_name("DiscoveryPort") + .value_name("PORT") .help("The discovery UDP port.") .takes_value(true), ) .arg( Arg::with_name("discovery-address") .long("discovery-address") - .value_name("Address") + .value_name("ADDRESS") .help("The IP address to broadcast to other peers on how to reach this node.") .takes_value(true), ) + .arg( + Arg::with_name("topics") + .long("topics") + .value_name("STRING") + .help("One or more comma-delimited gossipsub topic strings to subscribe to.") + .takes_value(true), + ) .arg( Arg::with_name("libp2p-addresses") .long("libp2p-addresses") From 107bbdcccd66d4fa4125bc6f5b3f4fec3353032f Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 6 Aug 2019 17:54:38 +1000 Subject: [PATCH 05/24] Updates to latest interop branch. - Shifts decoding of objects into message handler. - Updates to latest interop gossipsub. - Adds interop spec constant. --- beacon_node/eth2-libp2p/Cargo.toml | 4 +- beacon_node/eth2-libp2p/src/behaviour.rs | 74 ++++++------- beacon_node/eth2-libp2p/src/config.rs | 2 +- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 17 ++- beacon_node/eth2-libp2p/src/rpc/mod.rs | 12 +-- beacon_node/eth2-libp2p/src/service.rs | 23 ++-- beacon_node/http_server/src/lib.rs | 2 +- beacon_node/network/src/message_handler.rs | 101 ++++++++++++------ beacon_node/network/src/service.rs | 33 +++--- beacon_node/network/src/sync/simple_sync.rs | 38 +++---- beacon_node/rpc/src/attestation.rs | 8 +- beacon_node/rpc/src/beacon_block.rs | 12 +-- beacon_node/rpc/src/lib.rs | 2 +- .../src/beacon_state/beacon_state_types.rs | 24 ++++- validator_client/src/main.rs | 2 +- 16 files changed, 199 insertions(+), 157 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index f5fe8a877..0ea182bc6 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index fcb147949..fc224e91a 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -2,6 +2,7 @@ use crate::discovery::Discovery; use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; +use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use libp2p::{ core::identity::Keypair, @@ -13,11 +14,10 @@ use libp2p::{ tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{debug, o, trace, warn}; -use ssz::{ssz_encode, Decode, DecodeError, Encode}; +use slog::{debug, o, trace}; +use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; -use types::{Attestation, BeaconBlock}; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core @@ -87,23 +87,12 @@ impl NetworkBehaviourEventProcess { trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg)); - let pubsub_message = match PubsubMessage::from_ssz_bytes(&gs_msg.data) { - //TODO: Punish peer on error - Err(e) => { - warn!( - self.log, - "Received undecodable message from Peer {:?} error", gs_msg.source; - "error" => format!("{:?}", e) - ); - return; - } - Ok(msg) => msg, - }; + let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); self.events.push(BehaviourEvent::GossipMessage { source: gs_msg.source, topics: gs_msg.topics, - message: Box::new(pubsub_message), + message: msg, }); } GossipsubEvent::Subscribed { .. } => {} @@ -225,7 +214,7 @@ pub enum BehaviourEvent { GossipMessage { source: PeerId, topics: Vec, - message: Box, + message: PubsubMessage, }, } @@ -233,41 +222,50 @@ pub enum BehaviourEvent { #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - Block(BeaconBlock), + Block(Vec), /// Gossipsub message providing notification of a new attestation. - Attestation(Attestation), + Attestation(Vec), + /// Gossipsub message from an unknown topic. + Unknown(Vec), +} + +impl PubsubMessage { + /* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will + * need to be modified. + * + * Also note that a message can be associated with many topics. As soon as one of the topics is + * known we match. If none of the topics are known we return an unknown state. + */ + fn from_topics(topics: &Vec, data: Vec) -> Self { + for topic in topics { + match topic.as_str() { + BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), + BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), + _ => {} + } + } + PubsubMessage::Unknown(data) + } } -//TODO: Correctly encode/decode enums. Prefixing with integer for now. impl Encode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); - - let mut encoder = ssz::SszEncoder::container(buf, offset); - match self { - PubsubMessage::Block(block_gossip) => { - encoder.append(&0_u32); - + PubsubMessage::Block(inner) + | PubsubMessage::Attestation(inner) + | PubsubMessage::Unknown(inner) => { // Encode the gossip as a Vec; - encoder.append(&block_gossip.as_ssz_bytes()); - } - PubsubMessage::Attestation(attestation_gossip) => { - encoder.append(&1_u32); - - // Encode the gossip as a Vec; - encoder.append(&attestation_gossip.as_ssz_bytes()); + buf.append(&mut inner.as_ssz_bytes()); } } - - encoder.finalize(); } } +/* impl Decode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false @@ -295,7 +293,9 @@ impl Decode for PubsubMessage { } } } +*/ +/* #[cfg(test)] mod test { use super::*; @@ -313,4 +313,6 @@ mod test { assert_eq!(original, decoded); } + } +*/ diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 44d07795b..ddf14cc04 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -7,7 +7,7 @@ use std::path::PathBuf; use std::time::Duration; /// The beacon node topic string to subscribe to. -pub const BEACON_PUBSUB_TOPIC: &str = "beacon_block"; +pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; pub const SHARD_TOPIC_PREFIX: &str = "shard"; diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index ca6ac3760..54a4f2a99 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -11,7 +11,7 @@ mod service; pub use behaviour::PubsubMessage; pub use config::{ - Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX, + Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, }; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 76e04d24e..355cc52ee 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -12,16 +12,14 @@ use libp2p::swarm::protocols_handler::{ use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; -use types::EthSpec; /// The time (in seconds) before a substream that is awaiting a response times out. pub const RESPONSE_TIMEOUT: u64 = 9; /// Implementation of `ProtocolsHandler` for the RPC protocol. -pub struct RPCHandler +pub struct RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, @@ -56,8 +54,8 @@ where /// After the given duration has elapsed, an inactive connection will shutdown. inactive_timeout: Duration, - /// Phantom EthSpec. - _phantom: PhantomData, + /// Marker to pin the generic stream. + _phantom: PhantomData, } /// An outbound substream is waiting a response from the user. @@ -90,10 +88,9 @@ where }, } -impl RPCHandler +impl RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { pub fn new( listen_protocol: SubstreamProtocol, @@ -145,20 +142,18 @@ where } } -impl Default for RPCHandler +impl Default for RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { fn default() -> Self { RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30)) } } -impl ProtocolsHandler for RPCHandler +impl ProtocolsHandler for RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { type InEvent = RPCEvent; type OutEvent = RPCEvent; diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 5593660ff..756a62e71 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -16,7 +16,6 @@ pub use protocol::{RPCError, RPCProtocol, RPCRequest}; use slog::o; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; -use types::EthSpec; pub(crate) mod codec; mod handler; @@ -50,16 +49,16 @@ impl RPCEvent { /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. -pub struct RPC { +pub struct RPC { /// Queue of events to processed. events: Vec>, /// Pins the generic substream. - marker: PhantomData<(TSubstream, E)>, + marker: PhantomData<(TSubstream)>, /// Slog logger for RPC behaviour. _log: slog::Logger, } -impl RPC { +impl RPC { pub fn new(log: &slog::Logger) -> Self { let log = log.new(o!("Service" => "Libp2p-RPC")); RPC { @@ -80,12 +79,11 @@ impl RPC { } } -impl NetworkBehaviour for RPC +impl NetworkBehaviour for RPC where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { - type ProtocolsHandler = RPCHandler; + type ProtocolsHandler = RPCHandler; type OutEvent = RPCMessage; fn new_handler(&mut self) -> Self::ProtocolsHandler { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 5a2fc8d8b..316aa0579 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -4,7 +4,7 @@ use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC}; +use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -21,25 +21,24 @@ use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; use std::time::Duration; -use types::EthSpec; type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour, E>; +type Libp2pBehaviour = Behaviour>; const NETWORK_KEY_FILENAME: &str = "key"; /// The configuration and state of the libp2p components for the beacon node. -pub struct Service { +pub struct Service { /// The libp2p Swarm handler. //TODO: Make this private - pub swarm: Swarm>, + pub swarm: Swarm, /// This node's PeerId. _local_peer_id: PeerId, /// The libp2p logger handle. pub log: slog::Logger, } -impl Service { +impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { debug!(log, "Network-libp2p Service starting"); @@ -92,7 +91,7 @@ impl Service { //TODO: Handle multiple shard attestations. For now we simply use a separate topic for // attestations topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); - topics.push(Topic::new(BEACON_PUBSUB_TOPIC.into())); + topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); topics.append( &mut config .topics @@ -121,8 +120,8 @@ impl Service { } } -impl Stream for Service { - type Item = Libp2pEvent; +impl Stream for Service { + type Item = Libp2pEvent; type Error = crate::error::Error; fn poll(&mut self) -> Poll, Self::Error> { @@ -136,7 +135,7 @@ impl Stream for Service { topics, message, } => { - trace!(self.log, "Pubsub message received: {:?}", message); + trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { source, topics, @@ -196,7 +195,7 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) } /// Events that can be obtained from polling the Libp2p Service. -pub enum Libp2pEvent { +pub enum Libp2pEvent { /// An RPC response request has been received on the swarm. RPC(PeerId, RPCEvent), /// Initiated the connection to a new peer. @@ -207,7 +206,7 @@ pub enum Libp2pEvent { PubsubMessage { source: PeerId, topics: Vec, - message: Box>, + message: PubsubMessage, }, } diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs index b20e43de8..f1d006a5b 100644 --- a/beacon_node/http_server/src/lib.rs +++ b/beacon_node/http_server/src/lib.rs @@ -76,7 +76,7 @@ pub fn create_iron_http_server( pub fn start_service( config: &HttpServerConfig, executor: &TaskExecutor, - _network_chan: mpsc::UnboundedSender>, + _network_chan: mpsc::UnboundedSender, beacon_chain: Arc>, db_path: PathBuf, metrics_registry: Registry, diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index eaddce533..72a507ad7 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -14,7 +14,7 @@ use slog::{debug, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{BeaconBlockHeader, EthSpec}; +use types::{Attestation, BeaconBlock, BeaconBlockHeader}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -23,14 +23,14 @@ pub struct MessageHandler { /// The syncing framework. sync: SimpleSync, /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, + network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } /// Types of messages the handler can receive. #[derive(Debug)] -pub enum HandlerMessage { +pub enum HandlerMessage { /// We have initiated a connection to a new peer. PeerDialed(PeerId), /// Peer has disconnected, @@ -38,17 +38,17 @@ pub enum HandlerMessage { /// An RPC response/request has been received. RPC(PeerId, RPCEvent), /// A gossip message has been received. - PubsubMessage(PeerId, Box>), + PubsubMessage(PeerId, PubsubMessage), } impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn spawn( beacon_chain: Arc>, - network_send: mpsc::UnboundedSender>, + network_send: mpsc::UnboundedSender, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, - ) -> error::Result>> { + ) -> error::Result> { debug!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -78,7 +78,7 @@ impl MessageHandler { } /// Handle all messages incoming from the network service. - fn handle_message(&mut self, message: HandlerMessage) { + fn handle_message(&mut self, message: HandlerMessage) { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { @@ -94,7 +94,7 @@ impl MessageHandler { } // we have received an RPC message request/response HandlerMessage::PubsubMessage(peer_id, gossip) => { - self.handle_gossip(peer_id, *gossip); + self.handle_gossip(peer_id, gossip); } } } @@ -218,6 +218,62 @@ impl MessageHandler { } } + /// Handle various RPC errors + fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { + //TODO: Handle error correctly + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + } + + /// Handle RPC messages + fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { + match gossip_message { + PubsubMessage::Block(message) => match self.decode_gossip_block(message) { + Err(e) => { + debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + Ok(block) => { + let _should_forward_on = + self.sync + .on_block_gossip(peer_id, block, &mut self.network_context); + } + }, + PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { + Err(e) => { + debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + Ok(attestation) => { + self.sync + .on_attestation_gossip(peer_id, attestation, &mut self.network_context) + } + }, + PubsubMessage::Unknown(message) => { + // Received a message from an unknown topic. Ignore for now + debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); + } + } + } + + /* Decoding of blocks and attestations from the network. + * + * TODO: Apply efficient decoding/verification of these objects + */ + + fn decode_gossip_block( + &self, + beacon_block: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + BeaconBlock::from_ssz_bytes(&beacon_block) + } + + fn decode_gossip_attestation( + &self, + beacon_block: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + Attestation::from_ssz_bytes(&beacon_block) + } + /// Verifies and decodes the ssz-encoded block bodies received from peers. fn decode_block_bodies( &self, @@ -241,39 +297,18 @@ impl MessageHandler { //TODO: Implement faster header verification before decoding entirely Vec::from_ssz_bytes(&headers_response.headers) } - - /// Handle various RPC errors - fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { - //TODO: Handle error correctly - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); - } - - /// Handle RPC messages - fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { - match gossip_message { - PubsubMessage::Block(message) => { - let _should_forward_on = - self.sync - .on_block_gossip(peer_id, message, &mut self.network_context); - } - PubsubMessage::Attestation(message) => { - self.sync - .on_attestation_gossip(peer_id, message, &mut self.network_context) - } - } - } } // TODO: RPC Rewrite makes this struct fairly pointless -pub struct NetworkContext { +pub struct NetworkContext { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender>, + network_send: mpsc::UnboundedSender, /// The `MessageHandler` logger. log: slog::Logger, } -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { Self { network_send, log } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 7a21f7f28..e5ca2a917 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -14,13 +14,12 @@ use slog::{debug, info, o, trace}; use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; -use types::EthSpec; /// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { - libp2p_service: Arc>>, + libp2p_service: Arc>, _libp2p_exit: oneshot::Sender<()>, - _network_send: mpsc::UnboundedSender>, + _network_send: mpsc::UnboundedSender, _phantom: PhantomData, //message_handler: MessageHandler, //message_handler_send: Sender } @@ -31,9 +30,9 @@ impl Service { config: &NetworkConfig, executor: &TaskExecutor, log: slog::Logger, - ) -> error::Result<(Arc, mpsc::UnboundedSender>)> { + ) -> error::Result<(Arc, mpsc::UnboundedSender)> { // build the network channel - let (network_send, network_recv) = mpsc::unbounded_channel::>(); + let (network_send, network_recv) = mpsc::unbounded_channel::(); // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); let message_handler_send = MessageHandler::spawn( @@ -65,15 +64,15 @@ impl Service { Ok((Arc::new(network_service), network_send)) } - pub fn libp2p_service(&self) -> Arc>> { + pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } } -fn spawn_service( - libp2p_service: Arc>>, - network_recv: mpsc::UnboundedReceiver>, - message_handler_send: mpsc::UnboundedSender>, +fn spawn_service( + libp2p_service: Arc>, + network_recv: mpsc::UnboundedReceiver, + message_handler_send: mpsc::UnboundedSender, executor: &TaskExecutor, log: slog::Logger, ) -> error::Result> { @@ -99,10 +98,10 @@ fn spawn_service( } //TODO: Potentially handle channel errors -fn network_service( - libp2p_service: Arc>>, - mut network_recv: mpsc::UnboundedReceiver>, - mut message_handler_send: mpsc::UnboundedSender>, +fn network_service( + libp2p_service: Arc>, + mut network_recv: mpsc::UnboundedReceiver, + mut message_handler_send: mpsc::UnboundedSender, log: slog::Logger, ) -> impl futures::Future { futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> { @@ -119,7 +118,7 @@ fn network_service( }, NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.lock().swarm.publish(topics, *message); + libp2p_service.lock().swarm.publish(topics, message); } }, Ok(Async::NotReady) => break, @@ -176,14 +175,14 @@ fn network_service( /// Types of messages that the network service can receive. #[derive(Debug)] -pub enum NetworkMessage { +pub enum NetworkMessage { /// Send a message to libp2p service. //TODO: Define typing for messages across the wire Send(PeerId, OutgoingMessage), /// Publish a message to pubsub mechanism. Publish { topics: Vec, - message: Box>, + message: PubsubMessage, }, } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 9a9d15503..40a1881dd 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -123,7 +123,7 @@ impl SimpleSync { /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { + pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); @@ -137,7 +137,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); @@ -156,7 +156,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); @@ -171,7 +171,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); @@ -278,7 +278,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -323,7 +323,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, res: BeaconBlockRootsResponse, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -387,7 +387,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -440,7 +440,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, headers: Vec, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -472,7 +472,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { let block_bodies: Vec> = req .block_roots @@ -518,7 +518,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, res: DecodedBeaconBlockBodiesResponse, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -557,7 +557,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, + network: &mut NetworkContext, ) -> bool { if let Some(outcome) = self.process_block(peer_id.clone(), block.clone(), network, &"gossip") @@ -627,7 +627,7 @@ impl SimpleSync { &mut self, _peer_id: PeerId, msg: Attestation, - _network: &mut NetworkContext, + _network: &mut NetworkContext, ) { match self.chain.process_attestation(msg) { Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"), @@ -642,7 +642,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { // Potentially set state to sync. if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { @@ -666,7 +666,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -683,7 +683,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -719,7 +719,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block_root: Hash256, - network: &mut NetworkContext, + network: &mut NetworkContext, source: &str, ) -> Option { match self.import_queue.attempt_complete_block(block_root) { @@ -812,7 +812,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, + network: &mut NetworkContext, source: &str, ) -> Option { let processing_result = self.chain.process_block(block.clone()); @@ -917,8 +917,8 @@ fn hello_message(beacon_chain: &BeaconChain) -> HelloMes network_id: spec.network_id, //TODO: Correctly define the chain id chain_id: spec.network_id as u64, - latest_finalized_root: state.finalized_root, - latest_finalized_epoch: state.finalized_epoch, + latest_finalized_root: state.finalized_checkpoint.root, + latest_finalized_epoch: state.finalized_checkpoint.epoch, best_root: beacon_chain.head().beacon_block_root, best_slot: state.slot, } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index cbbe4de6e..3de3639d8 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -11,7 +11,7 @@ use protos::services::{ }; use protos::services_grpc::AttestationService; use slog::{error, info, trace, warn}; -use ssz::{ssz_encode, Decode}; +use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; use types::Attestation; @@ -19,7 +19,7 @@ use types::Attestation; #[derive(Clone)] pub struct AttestationServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender>, + pub network_chan: mpsc::UnboundedSender, pub log: slog::Logger, } @@ -141,12 +141,12 @@ impl AttestationService for AttestationServiceInstance { // valid attestation, propagate to the network let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); - let message = PubsubMessage::Attestation(attestation); + let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); self.network_chan .try_send(NetworkMessage::Publish { topics: vec![topic], - message: Box::new(message), + message: message, }) .unwrap_or_else(|e| { error!( diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index 2a8ae2c6b..b1a67399e 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,5 +1,5 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::BEACON_PUBSUB_TOPIC; +use eth2_libp2p::BEACON_BLOCK_TOPIC; use eth2_libp2p::{PubsubMessage, Topic}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -11,7 +11,7 @@ use protos::services::{ use protos::services_grpc::BeaconBlockService; use slog::Logger; use slog::{error, info, trace, warn}; -use ssz::{ssz_encode, Decode}; +use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; use types::{BeaconBlock, Signature, Slot}; @@ -19,7 +19,7 @@ use types::{BeaconBlock, Signature, Slot}; #[derive(Clone)] pub struct BeaconBlockServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender>, + pub network_chan: mpsc::UnboundedSender, pub log: Logger, } @@ -106,14 +106,14 @@ impl BeaconBlockService for BeaconBlockServiceInstance { ); // get the network topic to send on - let topic = Topic::new(BEACON_PUBSUB_TOPIC.into()); - let message = PubsubMessage::Block(block); + let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); + let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. self.network_chan .try_send(NetworkMessage::Publish { topics: vec![topic], - message: Box::new(message), + message: message, }) .unwrap_or_else(|e| { error!( diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index de9039505..eef009292 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -25,7 +25,7 @@ use tokio::sync::mpsc; pub fn start_server( config: &RPCConfig, executor: &TaskExecutor, - network_chan: mpsc::UnboundedSender>, + network_chan: mpsc::UnboundedSender, beacon_chain: Arc>, log: &slog::Logger, ) -> exit_future::Signal { diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index dd6ca3272..0e76942dd 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -207,12 +207,26 @@ pub struct InteropEthSpec; impl EthSpec for InteropEthSpec { type ShardCount = U8; - type SlotsPerHistoricalRoot = U64; - type LatestRandaoMixesLength = U64; - type LatestActiveIndexRootsLength = U64; - type LatestSlashedExitLength = U64; type SlotsPerEpoch = U8; - type GenesisEpoch = U0; + type SlotsPerHistoricalRoot = U64; + type SlotsPerEth1VotingPeriod = U16; + type EpochsPerHistoricalVector = U64; + type EpochsPerSlashingsVector = U64; + type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch + + params_from_eth_spec!(MainnetEthSpec { + JustificationBitsLength, + MaxValidatorsPerCommittee, + GenesisEpoch, + HistoricalRootsLimit, + ValidatorRegistryLimit, + MaxProposerSlashings, + MaxAttesterSlashings, + MaxAttestations, + MaxDeposits, + MaxVoluntaryExits, + MaxTransfers + }); fn default_spec() -> ChainSpec { ChainSpec::interop() diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 756f82991..76acb2f1a 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -214,7 +214,7 @@ fn main() { eth2_config, log.clone(), ), - "interop" => ValidatorService::::start::( + "interop" => ValidatorService::::start( client_config, eth2_config, log.clone(), From 907a4e5a4b7f8e4a70a2c790d6f85daa48fbd45e Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 14:54:08 +1000 Subject: [PATCH 06/24] Configuration updates allow for verbosity CLI flag and spec constants --- beacon_node/client/src/config.rs | 12 +++++- beacon_node/src/main.rs | 61 ++++++++++++++++++++----------- eth2/utils/eth2_config/src/lib.rs | 7 ++++ validator_client/eth2_config.toml | 47 ------------------------ validator_client/src/main.rs | 59 +++++++++++++++++++----------- 5 files changed, 94 insertions(+), 92 deletions(-) delete mode 100644 validator_client/eth2_config.toml diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1a27de406..176625d77 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,3 +1,4 @@ +use crate::Eth2Config; use clap::ArgMatches; use http_server::HttpServerConfig; use network::NetworkConfig; @@ -56,8 +57,6 @@ impl Default for Config { log_file: PathBuf::from(""), db_type: "disk".to_string(), db_name: "chain_db".to_string(), - // Note: there are no default bootnodes specified. - // Once bootnodes are established, add them here. network: NetworkConfig::new(), rpc: rpc::RPCConfig::default(), http: HttpServerConfig::default(), @@ -129,6 +128,15 @@ impl Config { self.data_dir = PathBuf::from(dir); }; + if let Some(default_spec) = args.value_of("default-spec") { + match default_spec { + "mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants, + "minimal" => self.spec_constants = Eth2Config::minimal().spec_constants, + "interop" => self.spec_constants = Eth2Config::interop().spec_constants, + _ => {} // not supported + } + } + if let Some(dir) = args.value_of("db") { self.db_type = dir.to_string(); }; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index c85eeedac..be57c6c9d 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -193,12 +193,9 @@ fn main() { .long("default-spec") .value_name("TITLE") .short("default-spec") - .help("Specifies the default eth2 spec to be used. Overridden by any spec loaded - from disk. A spec will be written to disk after this flag is used, so it is - primarily used for creating eth2 spec files.") + .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") .takes_value(true) .possible_values(&["mainnet", "minimal", "interop"]) - .default_value("minimal"), ) .arg( Arg::with_name("recent-genesis") @@ -217,7 +214,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("info"), + .default_value("trace"), ) .arg( Arg::with_name("verbosity") @@ -316,26 +313,42 @@ fn main() { let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); - // Attempt to load the `Eth2Config` from file. + // Initialise the `Eth2Config`. // - // If the file doesn't exist, create a default one depending on the CLI flags. - let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = match matches.value_of("default-spec") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - _ => unreachable!(), // Guarded by slog. - }; - if let Err(e) = write_to_file(eth2_config_path, &default) { + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let cli_config = match matches.value_of("default-spec") { + Some("mainnet") => Some(Eth2Config::mainnet()), + Some("minimal") => Some(Eth2Config::minimal()), + Some("interop") => Some(Eth2Config::interop()), + _ => None, + }; + // if cli is specified, write the new config + let mut eth2_config = { + if let Some(cli_config) = cli_config { + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); return; } - default - } - Err(e) => { - crit!(log, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e)); - return; + cli_config + } else { + // config not specified, read from disk + match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + // set default to minimal + let eth2_config = Eth2Config::minimal(); + if let Err(e) = write_to_file(eth2_config_path, ð2_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + eth2_config + } + Err(e) => { + crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + } } }; @@ -348,6 +361,12 @@ fn main() { } }; + // check to ensure the spec constants between the client and eth2_config match + if eth2_config.spec_constants != client_config.spec_constants { + crit!(log, "Specification constants do not match."; "Client Config" => format!("{}", client_config.spec_constants), "Eth2 Config" => format!("{}", eth2_config.spec_constants)); + return; + } + // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} diff --git a/eth2/utils/eth2_config/src/lib.rs b/eth2/utils/eth2_config/src/lib.rs index 17cbc4211..794a27e4e 100644 --- a/eth2/utils/eth2_config/src/lib.rs +++ b/eth2/utils/eth2_config/src/lib.rs @@ -37,6 +37,13 @@ impl Eth2Config { spec: ChainSpec::minimal(), } } + + pub fn interop() -> Self { + Self { + spec_constants: "interop".to_string(), + spec: ChainSpec::interop(), + } + } } impl Eth2Config { diff --git a/validator_client/eth2_config.toml b/validator_client/eth2_config.toml deleted file mode 100644 index 1e0781378..000000000 --- a/validator_client/eth2_config.toml +++ /dev/null @@ -1,47 +0,0 @@ -spec_constants = "minimal" - -[spec] -target_committee_size = 4 -max_indices_per_attestation = 4096 -min_per_epoch_churn_limit = 4 -churn_limit_quotient = 65536 -base_rewards_per_epoch = 5 -shuffle_round_count = 10 -deposit_contract_tree_depth = 32 -min_deposit_amount = 1000000000 -max_effective_balance = 32000000000 -ejection_balance = 16000000000 -effective_balance_increment = 1000000000 -genesis_slot = 0 -zero_hash = "0x0000000000000000000000000000000000000000000000000000000000000000" -bls_withdrawal_prefix_byte = "0x00" -genesis_time = 4294967295 -seconds_per_slot = 6 -min_attestation_inclusion_delay = 2 -min_seed_lookahead = 1 -activation_exit_delay = 4 -slots_per_eth1_voting_period = 16 -slots_per_historical_root = 8192 -min_validator_withdrawability_delay = 256 -persistent_committee_period = 2048 -max_crosslink_epochs = 64 -min_epochs_to_inactivity_penalty = 4 -base_reward_quotient = 32 -whistleblowing_reward_quotient = 512 -proposer_reward_quotient = 8 -inactivity_penalty_quotient = 33554432 -min_slashing_penalty_quotient = 32 -max_proposer_slashings = 16 -max_attester_slashings = 1 -max_attestations = 128 -max_deposits = 16 -max_voluntary_exits = 16 -max_transfers = 0 -domain_beacon_proposer = 0 -domain_randao = 1 -domain_attestation = 2 -domain_deposit = 3 -domain_voluntary_exit = 4 -domain_transfer = 5 -boot_nodes = ["/ip4/127.0.0.1/tcp/9000"] -chain_id = 2 diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 76acb2f1a..0782df323 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -64,14 +64,13 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("spec-constants") - .long("spec-constants") + Arg::with_name("default-spec") + .long("default-spec") .value_name("TITLE") - .short("s") - .help("The title of the spec constants for chain config.") + .short("default-spec") + .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") .takes_value(true) .possible_values(&["mainnet", "minimal", "interop"]) - .default_value("minimal"), ) .arg( Arg::with_name("debug-level") @@ -126,7 +125,7 @@ fn main() { let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - // Attempt to lead the `ClientConfig` from disk. + // Attempt to load the `ClientConfig` from disk. // // If file doesn't exist, create a new, default one. let mut client_config = match read_from_file::( @@ -164,26 +163,42 @@ fn main() { .and_then(|s| Some(PathBuf::from(s))) .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); - // Attempt to load the `Eth2Config` from file. + // Initialise the `Eth2Config`. // - // If the file doesn't exist, create a default one depending on the CLI flags. - let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = match matches.value_of("spec-constants") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - _ => unreachable!(), // Guarded by slog. - }; - if let Err(e) = write_to_file(eth2_config_path, &default) { + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let cli_config = match matches.value_of("default-spec") { + Some("mainnet") => Some(Eth2Config::mainnet()), + Some("minimal") => Some(Eth2Config::minimal()), + Some("interop") => Some(Eth2Config::interop()), + _ => None, + }; + // if cli is specified, write the new config + let mut eth2_config = { + if let Some(cli_config) = cli_config { + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); return; } - default - } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; + cli_config + } else { + // config not specified, read from disk + match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + // set default to minimal + let eth2_config = Eth2Config::minimal(); + if let Err(e) = write_to_file(eth2_config_path, ð2_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + eth2_config + } + Err(e) => { + crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + } } }; From dba7bfc4e14d6bd57a7617d5464dfaa1d0f46581 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 15:17:21 +1000 Subject: [PATCH 07/24] Update submodules to master --- tests/ef_tests/eth2.0-spec-tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index d40578264..aaa1673f5 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit d405782646190595927cc0a59f504f7b00a760f3 +Subproject commit aaa1673f508103e11304833e0456e4149f880065 From b3e0aad7bfa3a3ebfd69f61163b18048438924e8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 15:55:09 +1000 Subject: [PATCH 08/24] Correct minimal chainspec modifications --- eth2/types/src/chain_spec.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index d6eaa123d..9dec626d4 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -202,15 +202,12 @@ impl ChainSpec { pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; - let genesis_slot = Slot::new(0); Self { target_committee_size: 4, shuffle_round_count: 10, min_genesis_active_validator_count: 64, max_epochs_per_crosslink: 4, - min_attestation_inclusion_delay: 2, - genesis_slot, network_id: 2, // lighthouse testnet network id boot_nodes, ..ChainSpec::mainnet() @@ -221,15 +218,12 @@ impl ChainSpec { /// /// This allows us to customize a chain spec for interop testing. pub fn interop() -> Self { - let genesis_slot = Slot::new(0); let boot_nodes = vec![]; Self { seconds_per_slot: 12, target_committee_size: 4, shuffle_round_count: 10, - min_attestation_inclusion_delay: 2, - genesis_slot, network_id: 13, boot_nodes, ..ChainSpec::mainnet() From 107f32642f2b82db7becce53bce7638f635834fa Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 16:33:21 +1000 Subject: [PATCH 09/24] Duplication of validator polls are no longer fatal --- validator_client/src/service.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index c4ccbc204..3ddb96e4c 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -23,7 +23,7 @@ use protos::services_grpc::{ AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient, ValidatorServiceClient, }; -use slog::{error, info, warn}; +use slog::{crit, error, info, warn}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::marker::PhantomData; use std::sync::Arc; @@ -37,7 +37,7 @@ use types::{ChainSpec, Epoch, EthSpec, Fork, Slot}; /// A fixed amount of time after a slot to perform operations. This gives the node time to complete /// per-slot processes. -const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(200); +const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); /// The validator service. This is the main thread that executes and maintains validator /// duties. @@ -106,7 +106,7 @@ impl Service Service self.current_slot, - "The Timer should poll a new slot" - ); + // this is a non-fatal error. If the slot clock repeats, the node could + // have been slow to process the previous slot and is now duplicating tasks. + // We ignore duplicated but raise a critical error. + if current_slot <= self.current_slot { + crit!( + self.log, + "The validator tried to duplicate a slot. Likely missed the previous slot" + ); + return Err("Duplicate slot".into()); + } self.current_slot = current_slot; info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64()); Ok(()) From 3210489a36892260799acfc2094b7d17e33c619a Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 9 Aug 2019 13:23:47 +1000 Subject: [PATCH 10/24] Apply PR suggestions --- beacon_node/eth2-libp2p/src/behaviour.rs | 58 ++-------------------- beacon_node/eth2-libp2p/src/rpc/handler.rs | 5 +- beacon_node/src/main.rs | 41 +++++++++------ validator_client/src/main.rs | 39 ++++++++++----- 4 files changed, 61 insertions(+), 82 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index fc224e91a..b87f8a061 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -19,6 +19,8 @@ use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; +const MAX_IDENTIFY_ADDRESSES: usize = 20; + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -148,12 +150,12 @@ impl NetworkBehaviourEventProcess { - if info.listen_addrs.len() > 20 { + if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( self.log, "More than 20 addresses have been identified, truncating" ); - info.listen_addrs.truncate(20); + info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); } debug!(self.log, "Identified Peer"; "Peer" => format!("{}", peer_id), "Protocol Version" => info.protocol_version, @@ -264,55 +266,3 @@ impl Encode for PubsubMessage { } } } - -/* -impl Decode for PubsubMessage { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = ssz::SszDecoderBuilder::new(&bytes); - - builder.register_type::()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - - let id: u32 = decoder.decode_next()?; - let body: Vec = decoder.decode_next()?; - - match id { - 0 => Ok(PubsubMessage::Block(BeaconBlock::from_ssz_bytes(&body)?)), - 1 => Ok(PubsubMessage::Attestation(Attestation::from_ssz_bytes( - &body, - )?)), - _ => Err(DecodeError::BytesInvalid( - "Invalid PubsubMessage id".to_string(), - )), - } - } -} -*/ - -/* -#[cfg(test)] -mod test { - use super::*; - use types::*; - - #[test] - fn ssz_encoding() { - let original = PubsubMessage::Block(BeaconBlock::::empty( - &MainnetEthSpec::default_spec(), - )); - - let encoded = ssz_encode(&original); - - let decoded = PubsubMessage::from_ssz_bytes(&encoded).unwrap(); - - assert_eq!(original, decoded); - } - -} -*/ diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 355cc52ee..dbc32c5a4 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -268,8 +268,11 @@ where Self::Error, > { if let Some(err) = self.pending_error.take() { + // Returning an error here will result in dropping any peer that doesn't support any of + // the RPC protocols. For our immediate purposes we permit this and simply log that an + // upgrade was not supported. + // TODO: Add a logger to the handler for trace output. dbg!(&err); - //return Err(err); } // return any events that need to be reported diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index be57c6c9d..b34259f5a 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -4,7 +4,7 @@ use clap::{App, Arg}; use client::{ClientConfig, Eth2Config}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; -use slog::{crit, o, Drain, Level}; +use slog::{crit, o, warn, Drain, Level}; use std::fs; use std::path::PathBuf; @@ -323,19 +323,36 @@ fn main() { Some("interop") => Some(Eth2Config::interop()), _ => None, }; - // if cli is specified, write the new config + // if a CLI flag is specified, write the new config if it doesn't exist, + // otherwise notify the user that the file will not be written. + let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { + Ok(config) => config, + Err(e) => { + crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); + return; + } + }; + let mut eth2_config = { if let Some(cli_config) = cli_config { - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; + if eth2_config_from_file.is_none() { + // write to file if one doesn't exist + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + } else { + warn!( + log, + "Eth2Config file exists. Configuration file is ignored, using default" + ); } cli_config } else { - // config not specified, read from disk - match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { + // CLI config not specified, read from disk + match eth2_config_from_file { + Some(config) => config, + None => { // set default to minimal let eth2_config = Eth2Config::minimal(); if let Err(e) = write_to_file(eth2_config_path, ð2_config) { @@ -344,10 +361,6 @@ fn main() { } eth2_config } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; - } } } }; @@ -363,7 +376,7 @@ fn main() { // check to ensure the spec constants between the client and eth2_config match if eth2_config.spec_constants != client_config.spec_constants { - crit!(log, "Specification constants do not match."; "Client Config" => format!("{}", client_config.spec_constants), "Eth2 Config" => format!("{}", eth2_config.spec_constants)); + crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants)); return; } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 0782df323..83a874df7 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -11,7 +11,7 @@ use crate::service::Service as ValidatorService; use clap::{App, Arg}; use eth2_config::{read_from_file, write_to_file, Eth2Config}; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, Drain, Level}; +use slog::{crit, error, info, o, warn, Drain, Level}; use std::fs; use std::path::PathBuf; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -173,19 +173,36 @@ fn main() { Some("interop") => Some(Eth2Config::interop()), _ => None, }; - // if cli is specified, write the new config + // if a CLI flag is specified, write the new config if it doesn't exist, + // otherwise notify the user that the file will not be written. + let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { + Ok(config) => config, + Err(e) => { + crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); + return; + } + }; + let mut eth2_config = { if let Some(cli_config) = cli_config { - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; + if eth2_config_from_file.is_none() { + // write to file if one doesn't exist + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + } else { + warn!( + log, + "Eth2Config file exists. Configuration file is ignored, using default" + ); } cli_config } else { - // config not specified, read from disk - match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { + // CLI config not specified, read from disk + match eth2_config_from_file { + Some(config) => config, + None => { // set default to minimal let eth2_config = Eth2Config::minimal(); if let Err(e) = write_to_file(eth2_config_path, ð2_config) { @@ -194,10 +211,6 @@ fn main() { } eth2_config } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; - } } } }; From ce5061603250b10f2e18a1090c5751f028460c32 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 11:31:36 +1000 Subject: [PATCH 11/24] Improve logging --- beacon_node/client/src/lib.rs | 11 ++------ beacon_node/client/src/notifier.rs | 2 +- beacon_node/eth2-libp2p/src/discovery.rs | 36 +++++++++++------------- beacon_node/eth2-libp2p/src/service.rs | 19 ++++++------- beacon_node/network/src/service.rs | 17 +++++------ 5 files changed, 38 insertions(+), 47 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 65ba071fa..4b64c1070 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -100,16 +100,9 @@ where } do_state_catchup(&beacon_chain, &log); - // Start the network service, libp2p and syncing threads - // TODO: Add beacon_chain reference to network parameters let network_config = &client_config.network; - let network_logger = log.new(o!("Service" => "Network")); - let (network, network_send) = NetworkService::new( - beacon_chain.clone(), - network_config, - executor, - network_logger, - )?; + let (network, network_send) = + NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; // spawn the RPC server let rpc_exit_signal = if client_config.rpc.enabled { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1c7cf3867..a763196c9 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -38,7 +38,7 @@ pub fn run( // Panics if libp2p is poisoned. let connected_peer_count = libp2p.lock().swarm.connected_peers(); - debug!(log, "libp2p"; "peer_count" => connected_peer_count); + debug!(log, "Libp2p connected peer status"; "peer_count" => connected_peer_count); if connected_peer_count <= WARN_PEER_COUNT { warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4c1794945..3e34b9b03 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -9,7 +9,7 @@ use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::multiaddr::Protocol; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use slog::{debug, info, o, warn}; +use slog::{debug, info, warn}; use std::collections::HashSet; use std::fs::File; use std::io::prelude::*; @@ -63,7 +63,7 @@ impl Discovery { config: &NetworkConfig, log: &slog::Logger, ) -> error::Result { - let log = log.new(o!("Service" => "Libp2p-Discovery")); + let log = log.clone(); // checks if current ENR matches that found on disk let local_enr = load_enr(local_key, config, &log)?; @@ -73,19 +73,19 @@ impl Discovery { None => String::from(""), }; - info!(log, "Local ENR: {}", local_enr.to_base64()); - debug!(log, "Local Node Id: {}", local_enr.node_id()); - debug!(log, "Local ENR seq: {}", local_enr.seq()); + info!(log, "ENR Initialised"; "ENR" => local_enr.to_base64(), "Seq" => local_enr.seq()); + debug!(log, "Discv5 Node ID Initialised"; "node_id" => format!("{}",local_enr.node_id())); let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address) - .map_err(|e| format!("Discv5 service failed: {:?}", e))?; + .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; // Add bootnodes to routing table for bootnode_enr in config.boot_nodes.clone() { debug!( log, - "Adding node to routing table: {}", - bootnode_enr.node_id() + "Adding node to routing table"; + "Node ID" => format!("{}", + bootnode_enr.node_id()) ); discovery.add_enr(bootnode_enr); } @@ -123,7 +123,7 @@ impl Discovery { fn find_peers(&mut self) { // pick a random NodeId let random_node = NodeId::random(); - debug!(self.log, "Searching for peers..."); + debug!(self.log, "Searching for peers"); self.discovery.find_node(random_node); // update the time until next discovery @@ -201,7 +201,7 @@ where } Ok(Async::NotReady) => break, Err(e) => { - warn!(self.log, "Discovery peer search failed: {:?}", e); + warn!(self.log, "Discovery peer search failed"; "Error" => format!("{:?}", e)); } } } @@ -227,16 +227,16 @@ where }); } Discv5Event::FindNodeResult { closer_peers, .. } => { - debug!(self.log, "Discv5 query found {} peers", closer_peers.len()); + debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len()); if closer_peers.is_empty() { - debug!(self.log, "Discv5 random query yielded empty results"); + debug!(self.log, "Discovery random query found no peers"); } for peer_id in closer_peers { // if we need more peers, attempt a connection if self.connected_peers.len() < self.max_peers && self.connected_peers.get(&peer_id).is_none() { - debug!(self.log, "Discv5: Peer discovered"; "Peer"=> format!("{:?}", peer_id)); + debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id)); return Async::Ready(NetworkBehaviourAction::DialPeer { peer_id, }); @@ -283,14 +283,12 @@ fn load_enr( Ok(_) => { match Enr::from_str(&enr_string) { Ok(enr) => { - debug!(log, "ENR found in file: {:?}", enr_f); - if enr.node_id() == local_enr.node_id() { if enr.ip() == config.discovery_address.into() && enr.tcp() == Some(config.libp2p_port) && enr.udp() == Some(config.discovery_port) { - debug!(log, "ENR loaded from file"); + debug!(log, "ENR loaded from file"; "File" => format!("{:?}", enr_f)); // the stored ENR has the same configuration, use it return Ok(enr); } @@ -300,11 +298,11 @@ fn load_enr( local_enr.set_seq(new_seq_no, local_key).map_err(|e| { format!("Could not update ENR sequence number: {:?}", e) })?; - debug!(log, "ENR sequence number increased to: {}", new_seq_no); + debug!(log, "ENR sequence number increased"; "Seq" => new_seq_no); } } Err(e) => { - warn!(log, "ENR from file could not be decoded: {:?}", e); + warn!(log, "ENR from file could not be decoded"; "Error" => format!("{:?}", e)); } } } @@ -327,7 +325,7 @@ fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) { Err(e) => { warn!( log, - "Could not write ENR to file: {:?}{:?}. Error: {}", dir, ENR_FILENAME, e + "Could not write ENR to file"; "File" => format!("{:?}{:?}",dir, ENR_FILENAME), "Error" => format!("{}", e) ); } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa0579..e0867e87f 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -40,13 +40,12 @@ pub struct Service { impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { - debug!(log, "Network-libp2p Service starting"); + trace!(log, "Libp2p Service starting"); // load the private key from CLI flag, disk or generate a new one let local_private_key = load_private_key(&config, &log); - let local_peer_id = PeerId::from(local_private_key.public()); - info!(log, "Local peer id: {:?}", local_peer_id); + info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); let mut swarm = { // Set up the transport - tcp/ws with secio and mplex/yamux @@ -67,21 +66,21 @@ impl Service { Ok(_) => { let mut log_address = listen_multiaddr; log_address.push(Protocol::P2p(local_peer_id.clone().into())); - info!(log, "Listening on: {}", log_address); + info!(log, "Listening established"; "Address" => format!("{}", log_address)); } Err(err) => warn!( log, - "Cannot listen on: {} because: {:?}", listen_multiaddr, err + "Failed to listen on address"; "Address" => format!("{}", listen_multiaddr), "Error" => format!("{:?}", err) ), }; // attempt to connect to user-input libp2p nodes for multiaddr in config.libp2p_nodes { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), + Ok(()) => debug!(log, "Dialing libp2p peer"; "Address" => format!("{}", multiaddr)), Err(err) => debug!( log, - "Could not connect to node: {} error: {:?}", multiaddr, err + "Could not connect to peer"; "Address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) ), }; } @@ -104,13 +103,13 @@ impl Service { let mut subscribed_topics = vec![]; for topic in topics { if swarm.subscribe(topic.clone()) { - trace!(log, "Subscribed to topic: {:?}", topic); + trace!(log, "Subscribed to topic"; "Topic" => format!("{}", topic)); subscribed_topics.push(topic); } else { - warn!(log, "Could not subscribe to topic: {:?}", topic) + warn!(log, "Could not subscribe to topic"; "Topic" => format!("{}", topic)); } } - info!(log, "Subscribed to topics: {:?}", subscribed_topics); + info!(log, "Subscribed to topics"; "Topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::>())); Ok(Service { _local_peer_id: local_peer_id, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e5ca2a917..df0404cfa 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -20,8 +20,7 @@ pub struct Service { libp2p_service: Arc>, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, - _phantom: PhantomData, //message_handler: MessageHandler, - //message_handler_send: Sender + _phantom: PhantomData, } impl Service { @@ -42,17 +41,19 @@ impl Service { message_handler_log, )?; + let network_log = log.new(o!("Service" => "Network")); // launch libp2p service - let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = Arc::new(Mutex::new(LibP2PService::new(config.clone(), libp2p_log)?)); + let libp2p_service = Arc::new(Mutex::new(LibP2PService::new( + config.clone(), + network_log.clone(), + )?)); - // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. let libp2p_exit = spawn_service( libp2p_service.clone(), network_recv, message_handler_send, executor, - log, + network_log, )?; let network_service = Service { libp2p_service, @@ -142,13 +143,13 @@ fn network_service( .map_err(|_| "Failed to send RPC to handler")?; } Libp2pEvent::PeerDialed(peer_id) => { - debug!(log, "Peer Dialed: {:?}", peer_id); + debug!(log, "Peer Dialed"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDialed(peer_id)) .map_err(|_| "Failed to send PeerDialed to handler")?; } Libp2pEvent::PeerDisconnected(peer_id) => { - debug!(log, "Peer Disconnected: {:?}", peer_id); + debug!(log, "Peer Disconnected"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDisconnected(peer_id)) .map_err(|_| "Failed to send PeerDisconnected to handler")?; From d83fa670681f96d705da89300e7c4ad126049bff Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 12:06:46 +1000 Subject: [PATCH 12/24] Subscribe to all required gossipsub topics --- beacon_node/eth2-libp2p/src/config.rs | 13 +++++++++---- beacon_node/eth2-libp2p/src/lib.rs | 4 +--- beacon_node/eth2-libp2p/src/service.rs | 24 +++++++++++++++++++----- 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index ddf14cc04..d7648ec3f 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -6,9 +6,14 @@ use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -/// The beacon node topic string to subscribe to. +/// The gossipsub topic names. +pub const TOPIC_PREFIX: &str = "eth2"; +pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; +pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; +pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; +pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SHARD_TOPIC_PREFIX: &str = "shard"; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -63,10 +68,10 @@ impl Default for Config { discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_port: 9000, max_peers: 10, - //TODO: Set realistic values for production - // Note: This defaults topics to plain strings. Not hashes + // Note: The topics by default are sent as plain strings. Hashes are an optional + // parameter. gs_config: GossipsubConfigBuilder::new() - .max_transmit_size(1_000_000) + .max_transmit_size(1_048_576) .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 54a4f2a99..7c3a93d61 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -10,9 +10,7 @@ pub mod rpc; mod service; pub use behaviour::PubsubMessage; -pub use config::{ - Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, -}; +pub use config::{Config as NetworkConfig, *}; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index e0867e87f..98718445b 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -1,10 +1,10 @@ use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage}; +use crate::config::*; use crate::error; use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -87,10 +87,24 @@ impl Service { // subscribe to default gossipsub topics let mut topics = vec![]; - //TODO: Handle multiple shard attestations. For now we simply use a separate topic for - // attestations - topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); - topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); + + /* Here we subscribe to all the required gossipsub topics required for interop. + * The topic builder adds the required prefix and postfix to the hardcoded topics that we + * must subscribe to. + */ + let topic_builder = |topic| { + Topic::new(format!( + "/{}/{}/{}", + TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX, + )) + }; + topics.push(topic_builder(BEACON_BLOCK_TOPIC)); + topics.push(topic_builder(BEACON_ATTESTATION_TOPIC)); + topics.push(topic_builder(VOLUNTARY_EXIT_TOPIC)); + topics.push(topic_builder(PROPOSER_SLASHING_TOPIC)); + topics.push(topic_builder(ATTESTER_SLASHING_TOPIC)); + + // Add any topics specified by the user topics.append( &mut config .topics From 80f15f5d700693520ed7ca722e1cd9b0227147c2 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 12:38:54 +1000 Subject: [PATCH 13/24] Correct gossipsub message encoding. Add extended topics --- beacon_node/eth2-libp2p/src/behaviour.rs | 54 ++++++++++++++---------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b87f8a061..749d2e5b4 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -1,8 +1,8 @@ +use crate::config::*; use crate::discovery::Discovery; use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use libp2p::{ core::identity::Keypair, @@ -15,7 +15,6 @@ use libp2p::{ NetworkBehaviour, PeerId, }; use slog::{debug, o, trace}; -use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; @@ -189,9 +188,9 @@ impl Behaviour { /// Publishes a message on the pubsub (gossipsub) behaviour. pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { - let message_bytes = ssz_encode(&message); + let message_data = message.to_data(); for topic in topics { - self.gossipsub.publish(topic, message_bytes.clone()); + self.gossipsub.publish(topic, message_data.clone()); } } @@ -220,13 +219,20 @@ pub enum BehaviourEvent { }, } -/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. +/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. These are encoded and +/// decoded upstream. #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. Block(Vec), /// Gossipsub message providing notification of a new attestation. Attestation(Vec), + /// Gossipsub message providing notification of a voluntary exit. + VoluntaryExit(Vec), + /// Gossipsub message providing notification of a new proposer slashing. + ProposerSlashing(Vec), + /// Gossipsub message providing notification of a new attester slashing. + AttesterSlashing(Vec), /// Gossipsub message from an unknown topic. Unknown(Vec), } @@ -240,29 +246,33 @@ impl PubsubMessage { */ fn from_topics(topics: &Vec, data: Vec) -> Self { for topic in topics { - match topic.as_str() { - BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), - BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), - _ => {} + // compare the prefix and postfix, then match on the topic + let topic_parts: Vec<&str> = topic.as_str().split('/').collect(); + if topic_parts.len() == 4 + && topic_parts[1] == TOPIC_PREFIX + && topic_parts[3] == TOPIC_ENCODING_POSTFIX + { + match topic_parts[2] { + BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), + BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), + VOLUNTARY_EXIT_TOPIC => return PubsubMessage::VoluntaryExit(data), + PROPOSER_SLASHING_TOPIC => return PubsubMessage::ProposerSlashing(data), + ATTESTER_SLASHING_TOPIC => return PubsubMessage::AttesterSlashing(data), + _ => {} + } } } PubsubMessage::Unknown(data) } -} -impl Encode for PubsubMessage { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_append(&self, buf: &mut Vec) { + fn to_data(self) -> Vec { match self { - PubsubMessage::Block(inner) - | PubsubMessage::Attestation(inner) - | PubsubMessage::Unknown(inner) => { - // Encode the gossip as a Vec; - buf.append(&mut inner.as_ssz_bytes()); - } + PubsubMessage::Block(data) + | PubsubMessage::Attestation(data) + | PubsubMessage::VoluntaryExit(data) + | PubsubMessage::ProposerSlashing(data) + | PubsubMessage::AttesterSlashing(data) + | PubsubMessage::Unknown(data) => data, } } } From 5a74239ebcf0473120cdfc1acb4bf31fcc338f24 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 14:58:33 +1000 Subject: [PATCH 14/24] Add decoding/encoding for extended gossip topics. Correct logging CLI --- beacon_node/Cargo.toml | 4 +- beacon_node/eth2-libp2p/src/config.rs | 2 + beacon_node/network/src/message_handler.rs | 86 ++++++++++++++++++---- beacon_node/src/main.rs | 17 +---- 4 files changed, 79 insertions(+), 30 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9124047e4..cba73b8a4 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -11,7 +11,7 @@ store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" ctrlc = { version = "3.1.1", features = ["termination"] } @@ -22,3 +22,5 @@ exit-future = "0.1.3" env_logger = "0.6.1" dirs = "2.0.1" logging = { path = "../eth2/utils/logging" } +slog-scope = "4.1.2" +slog-stdlog = "3.0.5" diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index d7648ec3f..7cb501c1f 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -7,6 +7,8 @@ use std::path::PathBuf; use std::time::Duration; /// The gossipsub topic names. +// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX +// For example /eth2/beacon_block/ssz pub const TOPIC_PREFIX: &str = "eth2"; pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 72a507ad7..b86dcb969 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -10,11 +10,13 @@ use eth2_libp2p::{ }; use futures::future::Future; use futures::stream::Stream; -use slog::{debug, warn}; +use slog::{debug, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{Attestation, BeaconBlock, BeaconBlockHeader}; +use types::{ + Attestation, AttesterSlashing, BeaconBlock, BeaconBlockHeader, ProposerSlashing, VoluntaryExit, +}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -49,7 +51,7 @@ impl MessageHandler { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { - debug!(log, "Service starting"); + trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -65,7 +67,6 @@ impl MessageHandler { }; // spawn handler task - // TODO: Handle manual termination of thread executor.spawn( handler_recv .for_each(move |msg| Ok(handler.handle_message(msg))) @@ -221,43 +222,79 @@ impl MessageHandler { /// Handle various RPC errors fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { //TODO: Handle error correctly - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "request_id" => format!("{}", request_id), "Error" => format!("{:?}", error)); } /// Handle RPC messages fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { - Err(e) => { - debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } Ok(block) => { let _should_forward_on = self.sync .on_block_gossip(peer_id, block, &mut self.network_context); } + Err(e) => { + debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Err(e) => { - debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } Ok(attestation) => { self.sync .on_attestation_gossip(peer_id, attestation, &mut self.network_context) } + Err(e) => { + debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } }, + PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) { + Ok(_exit) => { + // TODO: Handle exits + debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped exit"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + }, + PubsubMessage::ProposerSlashing(message) => { + match self.decode_gossip_proposer_slashing(message) { + Ok(_slashing) => { + // TODO: Handle proposer slashings + debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped proposer slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } + PubsubMessage::AttesterSlashing(message) => { + match self.decode_gossip_attestation_slashing(message) { + Ok(_slashing) => { + // TODO: Handle attester slashings + debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped attester slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } PubsubMessage::Unknown(message) => { // Received a message from an unknown topic. Ignore for now - debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); + debug!(self.log, "Unknown Gossip Message"; "peer_id" => format!("{}", peer_id), "Message" => format!("{:?}", message)); } } } - /* Decoding of blocks and attestations from the network. + /* Decoding of gossipsub objects from the network. + * + * The decoding is done in the message handler as it has access to to a `BeaconChain` and can + * therefore apply more efficient logic in decoding and verification. * * TODO: Apply efficient decoding/verification of these objects */ + /* Gossipsub Domain Decoding */ + // Note: These are not generics as type-specific verification will need to be applied. fn decode_gossip_block( &self, beacon_block: Vec, @@ -274,6 +311,29 @@ impl MessageHandler { Attestation::from_ssz_bytes(&beacon_block) } + fn decode_gossip_exit(&self, voluntary_exit: Vec) -> Result { + //TODO: Apply verification before decoding. + VoluntaryExit::from_ssz_bytes(&voluntary_exit) + } + + fn decode_gossip_proposer_slashing( + &self, + proposer_slashing: Vec, + ) -> Result { + //TODO: Apply verification before decoding. + ProposerSlashing::from_ssz_bytes(&proposer_slashing) + } + + fn decode_gossip_attestation_slashing( + &self, + attester_slashing: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + AttesterSlashing::from_ssz_bytes(&attester_slashing) + } + + /* Req/Resp Domain Decoding */ + /// Verifies and decodes the ssz-encoded block bodies received from peers. fn decode_block_bodies( &self, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index b34259f5a..086ccc5be 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -9,7 +9,6 @@ use std::fs; use std::path::PathBuf; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; - pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml"; @@ -214,14 +213,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("trace"), - ) - .arg( - Arg::with_name("verbosity") - .short("v") - .multiple(true) - .help("Sets the verbosity level") - .takes_value(true), + .default_value("info"), ) .get_matches(); @@ -241,13 +233,6 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let drain = match matches.occurrences_of("verbosity") { - 0 => drain.filter_level(Level::Info), - 1 => drain.filter_level(Level::Debug), - 2 => drain.filter_level(Level::Trace), - _ => drain.filter_level(Level::Trace), - }; - let mut log = slog::Logger::root(drain.fuse(), o!()); let data_dir = match matches From ec73dfe90b0568fcbc22a775b2e2bf509fde6370 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 17:46:39 +1000 Subject: [PATCH 15/24] Starting of req/resp overhaul --- beacon_node/eth2-libp2p/Cargo.toml | 1 + beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 114 ++++---------- beacon_node/eth2-libp2p/src/rpc/methods.rs | 153 +++++-------------- beacon_node/eth2-libp2p/src/rpc/protocol.rs | 55 +++---- 4 files changed, 96 insertions(+), 227 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 794b09712..55081aed5 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -26,3 +26,4 @@ smallvec = "0.6.10" fnv = "1.0.6" unsigned-varint = "0.2.2" bytes = "0.4.12" +tokio-io-timeout = "0.3.1" diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 8e2bdaa64..f7262118d 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -41,10 +41,8 @@ impl Encoder for SSZInboundCodec { RPCErrorResponse::Success(resp) => { match resp { RPCResponse::Hello(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockRoots(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockHeaders(res) => res.headers, // already raw bytes - RPCResponse::BeaconBlockBodies(res) => res.block_bodies, // already raw bytes - RPCResponse::BeaconChainState(res) => res.as_ssz_bytes(), + RPCResponse::BeaconBlocks(res) => res, // already raw bytes + RPCResponse::RecentBeaconBlocks(res) => res, // already raw bytes } } RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(), @@ -72,52 +70,30 @@ impl Decoder for SSZInboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol( - "Unknown GOODBYE version.as_str()", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockRoots( - BeaconBlockRootsRequest::from_ssz_bytes(&packet)?, + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::BeaconBlocks( + BeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockHeaders( - BeaconBlockHeadersRequest::from_ssz_bytes(&packet)?, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::RecentBeaconBlocks( + RecentBeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockBodies( - BeaconBlockBodiesRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconChainState( - BeaconChainStateRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown message name.")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), @@ -156,10 +132,8 @@ impl Encoder for SSZOutboundCodec { let bytes = match item { RPCRequest::Hello(req) => req.as_ssz_bytes(), RPCRequest::Goodbye(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockRoots(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockHeaders(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockBodies(req) => req.as_ssz_bytes(), - RPCRequest::BeaconChainState(req) => req.as_ssz_bytes(), + RPCRequest::BeaconBlocks(req) => req.as_ssz_bytes(), + RPCRequest::RecentBeaconBlocks(req) => req.as_ssz_bytes(), }; // length-prefix self.inner @@ -168,7 +142,11 @@ impl Encoder for SSZOutboundCodec { } } -// Decoder for outbound +// Decoder for outbound streams +// +// The majority of the decoding has now been pushed upstream due to the changing specification. +// We prefer to decode blocks and attestations with extra knowledge about the chain to perform +// faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZOutboundCodec { type Item = RPCResponse; type Error = RPCError; @@ -177,51 +155,21 @@ impl Decoder for SSZOutboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version.")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockRoots( - BeaconBlockRootsResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockHeaders( - BeaconBlockHeadersResponse { - headers: packet.to_vec(), - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockBodies( - BeaconBlockBodiesResponse { - block_bodies: packet.to_vec(), - // this gets filled in the protocol handler - block_roots: None, - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconChainState( - BeaconChainStateResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown method")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 2e5a9a7ff..8fef1a75a 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -2,7 +2,7 @@ use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz_derive::{Decode, Encode}; -use types::{BeaconBlockBody, Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; /* Request/Response data structures for RPC methods */ @@ -13,23 +13,20 @@ pub type RequestId = usize; /// The HELLO request/response handshake message. #[derive(Encode, Decode, Clone, Debug)] pub struct HelloMessage { - /// The network ID of the peer. - pub network_id: u8, + /// The fork version of the chain we are broadcasting. + pub fork_version: [u8; 4], - /// The chain id for the HELLO request. - pub chain_id: u64, + /// Latest finalized root. + pub finalized_root: Hash256, - /// The peers last finalized root. - pub latest_finalized_root: Hash256, + /// Latest finalized epoch. + pub finalized_epoch: Epoch, - /// The peers last finalized epoch. - pub latest_finalized_epoch: Epoch, + /// The latest block root. + pub head_root: Hash256, - /// The peers last block root. - pub best_root: Hash256, - - /// The peers last slot. - pub best_slot: Slot, + /// The slot associated with the latest block root. + pub head_slot: Slot, } /// The reason given for a `Goodbye` message. @@ -74,108 +71,42 @@ impl_decode_via_from!(GoodbyeReason, u64); /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsRequest { - /// The starting slot of the requested blocks. - pub start_slot: Slot, +pub struct BeaconBlocksRequest { + /// The hash tree root of a block on the requested chain. + pub head_block_root: Hash256, + + /// The starting slot to request blocks. + pub start_slot: u64, /// The number of blocks from the start slot. - pub count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers + pub count: u64, + + /// The step increment to receive blocks. + /// + /// A value of 1 returns every block. + /// A value of 2 returns every second block. + /// A value of 3 returns every third block and so on. + pub step: u64, } +// TODO: Currently handle encoding/decoding of blocks in the message handler. Leave this struct +// here in case encoding/decoding of ssz requires an object. +/* /// Response containing a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsResponse { +pub struct BeaconBlocksResponse { /// List of requested blocks and associated slots. - pub roots: Vec, -} - -/// Contains a block root and associated slot. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BlockRootSlot { - /// The block root. - pub block_root: Hash256, - - /// The block slot. - pub slot: Slot, -} - -/// The response of a beacon block roots request. -impl BeaconBlockRootsResponse { - /// Returns `true` if each `self.roots.slot[i]` is higher than the preceding `i`. - pub fn slots_are_ascending(&self) -> bool { - for window in self.roots.windows(2) { - if window[0].slot >= window[1].slot { - return false; - } - } - - true - } -} - -/// Request a number of beacon block headers from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersRequest { - /// The starting header hash of the requested headers. - pub start_root: Hash256, - - /// The starting slot of the requested headers. - pub start_slot: Slot, - - /// The maximum number of headers than can be returned. - pub max_headers: u64, - - /// The maximum number of slots to skip between blocks. - pub skip_slots: u64, -} - -/// Response containing requested block headers. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersResponse { - /// The list of ssz-encoded requested beacon block headers. - pub headers: Vec, + pub beacon_blocks: Vec, } +*/ /// Request a number of beacon block bodies from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesRequest { +pub struct RecentBeaconBlocksRequest { /// The list of beacon block bodies being requested. pub block_roots: Vec, } -/// Response containing the list of requested beacon block bodies. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesResponse { - /// The list of hashes that were sent in the request and match these roots response. None when - /// sending outbound. - pub block_roots: Option>, - /// The list of ssz-encoded beacon block bodies being requested. - pub block_bodies: Vec, -} - -/// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`. -pub struct DecodedBeaconBlockBodiesResponse { - /// The list of hashes sent in the request to get this response. - pub block_roots: Vec, - /// The valid decoded block bodies. - pub block_bodies: Vec>, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateRequest { - /// The tree hashes that a value is requested for. - pub hashes: Vec, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -// Note: TBD -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateResponse { - /// The values corresponding the to the requested tree hashes. - pub values: bool, //TBD - stubbed with encodable bool -} - /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -183,14 +114,10 @@ pub struct BeaconChainStateResponse { pub enum RPCResponse { /// A HELLO message. Hello(HelloMessage), - /// A response to a get BEACON_BLOCK_ROOTS request. - BeaconBlockRoots(BeaconBlockRootsResponse), - /// A response to a get BEACON_BLOCK_HEADERS request. - BeaconBlockHeaders(BeaconBlockHeadersResponse), - /// A response to a get BEACON_BLOCK_BODIES request. - BeaconBlockBodies(BeaconBlockBodiesResponse), - /// A response to a get BEACON_CHAIN_STATE request. - BeaconChainState(BeaconChainStateResponse), + /// A response to a get BEACON_BLOCKS request. + BeaconBlocks(Vec), + /// A response to a get RECENT_BEACON_BLOCKS request. + RecentBeaconBlocks(Vec), } #[derive(Debug)] @@ -206,8 +133,8 @@ impl RPCErrorResponse { pub fn as_u8(&self) -> u8 { match self { RPCErrorResponse::Success(_) => 0, - RPCErrorResponse::InvalidRequest(_) => 2, - RPCErrorResponse::ServerError(_) => 3, + RPCErrorResponse::InvalidRequest(_) => 1, + RPCErrorResponse::ServerError(_) => 2, RPCErrorResponse::Unknown(_) => 255, } } @@ -223,8 +150,8 @@ impl RPCErrorResponse { /// Builds an RPCErrorResponse from a response code and an ErrorMessage pub fn from_error(response_code: u8, err: ErrorMessage) -> Self { match response_code { - 2 => RPCErrorResponse::InvalidRequest(err), - 3 => RPCErrorResponse::ServerError(err), + 1 => RPCErrorResponse::InvalidRequest(err), + 2 => RPCErrorResponse::ServerError(err), _ => RPCErrorResponse::Unknown(err), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index b606fc743..be1efdf5d 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -16,13 +16,17 @@ use tokio::io::{AsyncRead, AsyncWrite}; use tokio::prelude::*; use tokio::timer::timeout; use tokio::util::FutureExt; +use tokio_io_timeout::TimeoutStream; /// The maximum bytes that can be sent across the RPC. const MAX_RPC_SIZE: usize = 4_194_304; // 4M /// The protocol prefix the RPC protocol id. -const PROTOCOL_PREFIX: &str = "/eth2/beacon_node/rpc"; -/// The number of seconds to wait for a request once a protocol has been established before the stream is terminated. -const REQUEST_TIMEOUT: u64 = 3; +const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; +/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). +const TTFB_TIMEOUT: u64 = 5; +/// The number of seconds to wait for the first bytes of a request once a protocol has been +/// established before the stream is terminated. +const REQUEST_TIMEOUT: u64 = 15; #[derive(Debug, Clone)] pub struct RPCProtocol; @@ -33,11 +37,10 @@ impl UpgradeInfo for RPCProtocol { fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"), + ProtocolId::new("hello", "1", "ssz"), + ProtocolId::new("goodbye", "1", "ssz"), + ProtocolId::new("beacon_blocks", "1", "ssz"), + ProtocolId::new("recent_beacon_blocks", "1", "ssz"), ] } } @@ -87,7 +90,7 @@ impl ProtocolName for ProtocolId { // handler to respond to once ready. pub type InboundOutput = (RPCRequest, InboundFramed); -pub type InboundFramed = Framed, InboundCodec>; +pub type InboundFramed = Framed>, InboundCodec>; type FnAndThen = fn( (Option, InboundFramed), ) -> FutureResult, RPCError>; @@ -118,7 +121,9 @@ where "ssz" | _ => { let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = InboundCodec::SSZ(ssz_codec); - Framed::new(socket, codec) + let mut timed_socket = TimeoutStream::new(socket); + timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); + Framed::new(timed_socket, codec) .into_future() .timeout(Duration::from_secs(REQUEST_TIMEOUT)) .map_err(RPCError::from as FnMapErr) @@ -144,10 +149,8 @@ where pub enum RPCRequest { Hello(HelloMessage), Goodbye(GoodbyeReason), - BeaconBlockRoots(BeaconBlockRootsRequest), - BeaconBlockHeaders(BeaconBlockHeadersRequest), - BeaconBlockBodies(BeaconBlockBodiesRequest), - BeaconChainState(BeaconChainStateRequest), + BeaconBlocks(BeaconBlocksRequest), + RecentBeaconBlocks(RecentBeaconBlocksRequest), } impl UpgradeInfo for RPCRequest { @@ -165,22 +168,11 @@ impl RPCRequest { pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Hello(_) => vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ], - RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], - RPCRequest::BeaconBlockRoots(_) => { - vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockHeaders(_) => { - vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockBodies(_) => { - vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")] - } - RPCRequest::BeaconChainState(_) => { - vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")] + RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1", "ssz")], + RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1", "ssz")], + RPCRequest::BeaconBlocks(_) => vec![ProtocolId::new("beacon_blocks", "1", "ssz")], + RPCRequest::RecentBeaconBlocks(_) => { + vec![ProtocolId::new("recent_beacon_blocks", "1", "ssz")] } } } @@ -215,7 +207,8 @@ where ) -> Self::Future { match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); + let ssz_codec = + BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = OutboundCodec::SSZ(ssz_codec); Framed::new(socket, codec).send(self) } From 66419d00eadc4068243364c93d651e473954f34c Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 9 Aug 2019 10:39:32 +1000 Subject: [PATCH 16/24] Remove redundant slog dependencies --- beacon_node/Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index cba73b8a4..32b7e9211 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -22,5 +22,3 @@ exit-future = "0.1.3" env_logger = "0.6.1" dirs = "2.0.1" logging = { path = "../eth2/utils/logging" } -slog-scope = "4.1.2" -slog-stdlog = "3.0.5" From 5d4d2f35e1dc59b2566af5b547a1b6132b940454 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 12 Aug 2019 22:07:59 +1000 Subject: [PATCH 17/24] Initial sync re-write. WIP --- beacon_node/eth2-libp2p/src/rpc/handler.rs | 36 +- beacon_node/network/src/message_handler.rs | 121 +++---- beacon_node/network/src/sync/manager.rs | 283 ++++++++++++++++ beacon_node/network/src/sync/simple_sync.rs | 349 +++++--------------- 4 files changed, 405 insertions(+), 384 deletions(-) create mode 100644 beacon_node/network/src/sync/manager.rs diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index dbc32c5a4..a69cd0cda 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -1,4 +1,4 @@ -use super::methods::{RPCErrorResponse, RPCResponse, RequestId}; +use super::methods::RequestId; use super::protocol::{RPCError, RPCProtocol, RPCRequest}; use super::RPCEvent; use crate::rpc::protocol::{InboundFramed, OutboundFramed}; @@ -13,8 +13,8 @@ use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; -/// The time (in seconds) before a substream that is awaiting a response times out. -pub const RESPONSE_TIMEOUT: u64 = 9; +/// The time (in seconds) before a substream that is awaiting a response from the user times out. +pub const RESPONSE_TIMEOUT: u64 = 10; /// Implementation of `ProtocolsHandler` for the RPC protocol. pub struct RPCHandler @@ -314,7 +314,7 @@ where Ok(Async::Ready(response)) => { if let Some(response) = response { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - build_response(rpc_event, response), + RPCEvent::Response(rpc_event.id(), response), ))); } else { // stream closed early @@ -365,31 +365,3 @@ where Ok(Async::NotReady) } } - -/// Given a response back from a peer and the request that sent it, construct a response to send -/// back to the user. This allows for some data manipulation of responses given requests. -fn build_response(rpc_event: RPCEvent, rpc_response: RPCErrorResponse) -> RPCEvent { - let id = rpc_event.id(); - - // handle the types of responses - match rpc_response { - RPCErrorResponse::Success(response) => { - match response { - // if the response is block roots, tag on the extra request data - RPCResponse::BeaconBlockBodies(mut resp) => { - if let RPCEvent::Request(_id, RPCRequest::BeaconBlockBodies(bodies_req)) = - rpc_event - { - resp.block_roots = Some(bodies_req.block_roots); - } - RPCEvent::Response( - id, - RPCErrorResponse::Success(RPCResponse::BeaconBlockBodies(resp)), - ) - } - _ => RPCEvent::Response(id, RPCErrorResponse::Success(response)), - } - } - _ => RPCEvent::Response(id, rpc_response), - } -} diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index b86dcb969..6a9a40369 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -14,9 +14,7 @@ use slog::{debug, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconBlockHeader, ProposerSlashing, VoluntaryExit, -}; +use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, VoluntaryExit}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -56,9 +54,9 @@ impl MessageHandler { let (handler_send, handler_recv) = mpsc::unbounded_channel(); // Initialise sync and begin processing in thread - // generate the Message handler let sync = SimpleSync::new(beacon_chain.clone(), &log); + // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, @@ -66,7 +64,7 @@ impl MessageHandler { log: log.clone(), }; - // spawn handler task + // spawn handler task and move the message handler instance into the spawned thread executor.spawn( handler_recv .for_each(move |msg| Ok(handler.handle_message(msg))) @@ -89,11 +87,11 @@ impl MessageHandler { HandlerMessage::PeerDisconnected(peer_id) => { self.sync.on_disconnect(peer_id); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::RPC(peer_id, rpc_event) => { self.handle_rpc_message(peer_id, rpc_event); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::PubsubMessage(peer_id, gossip) => { self.handle_gossip(peer_id, gossip); } @@ -106,7 +104,7 @@ impl MessageHandler { fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { match rpc_message { RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req), - RPCEvent::Response(_id, resp) => self.handle_rpc_response(peer_id, resp), + RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp), RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error), } } @@ -121,46 +119,39 @@ impl MessageHandler { &mut self.network_context, ), RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason), - RPCRequest::BeaconBlockRoots(request) => self.sync.on_beacon_block_roots_request( + RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( peer_id, request_id, request, &mut self.network_context, ), - RPCRequest::BeaconBlockHeaders(request) => self.sync.on_beacon_block_headers_request( + RPCRequest::RecentBeaconBlocks(request) => self.sync.on_recent_beacon_blocks_request( peer_id, request_id, request, &mut self.network_context, ), - RPCRequest::BeaconBlockBodies(request) => self.sync.on_beacon_block_bodies_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - warn!(self.log, "BeaconChainState RPC call is not supported."); - } } } /// An RPC response has been received from the network. // we match on id and ignore responses past the timeout. - fn handle_rpc_response(&mut self, peer_id: PeerId, error_response: RPCErrorResponse) { + fn handle_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + error_response: RPCErrorResponse, + ) { // an error could have occurred. - // TODO: Handle Error gracefully match error_response { RPCErrorResponse::InvalidRequest(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Invalid Request" => error.as_string()) + warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::ServerError(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Server Error" => error.as_string()) + warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Unknown(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Unknown Error" => error.as_string()) + warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Success(response) => { match response { @@ -171,49 +162,37 @@ impl MessageHandler { &mut self.network_context, ); } - RPCResponse::BeaconBlockRoots(response) => { - self.sync.on_beacon_block_roots_response( - peer_id, - response, - &mut self.network_context, - ); - } - RPCResponse::BeaconBlockHeaders(response) => { - match self.decode_block_headers(response) { - Ok(decoded_block_headers) => { - self.sync.on_beacon_block_headers_response( + RPCResponse::BeaconBlocks(response) => { + match self.decode_beacon_blocks(response) { + Ok(beacon_blocks) => { + self.sync.on_beacon_blocks_response( peer_id, - decoded_block_headers, + beacon_blocks, &mut self.network_context, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block headers";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconBlockBodies(response) => { - match self.decode_block_bodies(response) { - Ok(decoded_block_bodies) => { - self.sync.on_beacon_block_bodies_response( + RPCResponse::RecentBeaconBlocks(response) => { + match self.decode_beacon_blocks(response) { + Ok(beacon_blocks) => { + self.sync.on_recent_beacon_blocks_response( + request_id, peer_id, - decoded_block_bodies, + beacon_blocks, &mut self.network_context, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block bodies";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - // - // Theoretically, we shouldn't reach this code because we should never send a - // beacon state RPC request. - warn!(self.log, "BeaconChainState RPC call is not supported."); - } } } } @@ -334,36 +313,22 @@ impl MessageHandler { /* Req/Resp Domain Decoding */ - /// Verifies and decodes the ssz-encoded block bodies received from peers. - fn decode_block_bodies( + /// Verifies and decodes an ssz-encoded list of `BeaconBlock`s. This list may contain empty + /// entries encoded with an SSZ NULL. + fn decode_beacon_blocks( &self, - bodies_response: BeaconBlockBodiesResponse, - ) -> Result, DecodeError> { + beacon_blocks: &[u8], + ) -> Result>, DecodeError> { //TODO: Implement faster block verification before decoding entirely - let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?; - Ok(DecodedBeaconBlockBodiesResponse { - block_roots: bodies_response - .block_roots - .expect("Responses must have associated roots"), - block_bodies, - }) - } - - /// Verifies and decodes the ssz-encoded block headers received from peers. - fn decode_block_headers( - &self, - headers_response: BeaconBlockHeadersResponse, - ) -> Result, DecodeError> { - //TODO: Implement faster header verification before decoding entirely - Vec::from_ssz_bytes(&headers_response.headers) + Vec::from_ssz_bytes(&beacon_blocks) } } -// TODO: RPC Rewrite makes this struct fairly pointless +/// Wraps a Network Channel to employ various RPC/Sync related network functionality. pub struct NetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender, - /// The `MessageHandler` logger. + /// Logger for the `NetworkContext`. log: slog::Logger, } @@ -388,7 +353,7 @@ impl NetworkContext { &mut self, peer_id: PeerId, request_id: RequestId, - rpc_response: RPCResponse, + rpc_response: RPCErrorResponse, ) { self.send_rpc_event( peer_id, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs new file mode 100644 index 000000000..52c1a72c6 --- /dev/null +++ b/beacon_node/network/src/sync/manager.rs @@ -0,0 +1,283 @@ + +const MAXIMUM_BLOCKS_PER_REQUEST: usize = 10; +const SIMULTANEOUS_REQUESTS: usize = 10; +use super::simple_sync::FUTURE_SLOT_TOLERANCE; + +struct Chunk { + id: usize, + start_slot: Slot, + end_slot: Slot, + } + + +struct CompletedChunk { + peer_id: PeerId, + chunk: Chunk, + blocks: Vec, +} + +struct ProcessedChunk { + peer_id: PeerId, + chunk: Chunk, +} + +#[derive(PartialEq)] +pub enum SyncState { + Idle, + Downloading, + ColdSync { + max_wanted_slot: Slot, + max_wanted_hash: Hash256, + } +} + +pub enum SyncManagerState { + RequestBlocks(peer_id, BeaconBlockRequest), + Stalled, + Idle, +} + +pub struct PeerSyncInfo { + peer_id: PeerId, + fork_version: [u8,4], + finalized_root: Hash256, + finalized_epoch: Epoch, + head_root: Hash256, + head_slot: Slot, + requested_slot_skip: Option<(Slot, usize)>, +} + +pub(crate) struct SyncManager { + /// A reference to the underlying beacon chain. + chain: Arc>, + /// A mapping of Peers to their respective PeerSyncInfo. + available_peers: HashMap, + wanted_chunks: Vec, + pending_chunks: HashMap, + completed_chunks: Vec, + processed_chunks: Vec, // ordered + multi_peer_sections: HashMap + + current_requests: usize, + latest_wanted_slot: Option, + sync_status: SyncStatus, + to_process_chunk_id: usize, + log: Logger, + +} + +impl SyncManager { + /// Adds a sync-able peer and determines which blocks to download given the current state of + /// the chain, known peers and currently requested blocks. + fn add_sync_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo, network &mut NetworkContext) { + + let local = PeerSyncInfo::from(&self.chain); + let remote_finalized_slot = remote.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let local_finalized_slot = local.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + + // cold sync + if remote_finalized_slot > local.head_slot { + if let SyncState::Idle || SyncState::Downloading = self.sync_state { + info!(self.log, "Cold Sync Started", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); + self.sync_state = SyncState::ColdSync{Slot::from(0), remote.finalized_hash} + } + + if let SyncState::ColdSync{max_wanted_slot, max_wanted_hjash } = self.sync_state { + + // We don't assume that our current head is the canonical chain. So we request blocks from + // our last finalized slot to ensure we are on the finalized chain. + if max_wanted_slot < remote_finalized_slot { + let remaining_blocks = remote_finalized_slot - max_wanted_slot; + for chunk in (0..remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) { + self.wanted_chunks.push( + Chunk { + id: self.current_chunk_id, + previous_chunk: self.curent_chunk_id.saturating_sub(1), + start_slot: chunk*MAXIMUM_BLOCKS_PER_REQUEST + self.last_wanted_slot, + end_slot: (section+1)*MAXIMUM_BLOCKS_PER_REQUEST +self.last_wanted_slot, + }) + self.current_chunk_id +=1; + } + + // add any extra partial chunks + self.pending_section.push( Section { + start_slot: (remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) + 1, + end_slot: remote_finalized_slot, + }) + self.current_chunk_id +=1; + + info!(self.log, "Cold Sync Updated", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); + + self.sync_state = SyncState::ColdSync{remote_finalized_slot, remote.finalized_hash} + } + } + + else { // hot sync + if remote_head_slot > self.chain.head().beacon_state.slot { + if let SyncState::Idle = self.sync_state { + self.sync_state = SyncState::Downloading + info!(self.log, "Sync Started", "start_slot" => local.head_slot, "latest_known_head" => remote.head_slot.as_u64()); + } + self.latest_known_slot = remote_head_slot; + //TODO Build requests. + } + } + + available_peers.push(remote); + + } + + pub fn add_blocks(&mut self, chunk_id: RequestId, peer_id: PeerId, blocks: Vec) { + + if SyncState::ColdSync{max_wanted_slot, max_wanted_hash} = self.sync_state { + + let chunk = match self.pending_chunks.remove(&peer_id) { + Some(chunks) => { + match chunks.find(|chunk| chunk.id == chunk_id) { + Some(chunk) => chunk, + None => { + warn!(self.log, "Received blocks for an unknown chunk"; + "peer"=> peer_id); + return; + } + } + }, + None => { + warn!(self.log, "Received blocks without a request"; + "peer"=> peer_id); + return; + } + }; + + // add to completed + self.current_requests -= 1; + self.completed_chunks.push(CompletedChunk(peer_id, Chunk)); + } + } + + pub fn inject_error(id: RequestId, peer_id) { + if let SyncState::ColdSync{ _max_wanted_slot, _max_wanted_hash } { + match self.pending_chunks.get(&peer_id) { + Some(chunks) => { + if let Some(pos) = chunks.iter().position(|c| c.id == id) { + chunks.remove(pos); + } + }, + None => { + debug!(self.log, + "Received an error for an unknown request"; + "request_id" => id, + "peer" => peer_id + ); + } + } + } + } + + pub fn poll(&mut self) -> SyncManagerState { + + // if cold sync + if let SyncState::ColdSync(waiting_slot, max_wanted_slot, max_wanted_hash) = self.sync_state { + + // Try to process completed chunks + for completed_chunk in self.completed_chunks { + let chunk = completed_chunk.1; + let last_chunk_id = { + let no_processed_chunks = self.processed_chunks.len(); + if elements == 0 { 0 } else { self.processed_chunks[no_processed_chunks].id } + }; + if chunk.id == last_chunk_id + 1 { + // try and process the chunk + for block in chunk.blocks { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutCome::Processed { block_root} => { + // block successfully processed + }, + BlockProcessingOutcome::BlockIsAlreadyKnown => { + warn!( + self.log, "Block Already Known"; + "source" => source, + "sync" => "Cold Sync", + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + "peer" => format!("{:?}", chunk.0), + ); + }, + _ => { + // An error has occurred + // This could be due to the previous chunk or the current chunk. + // Re-issue both. + warn!( + self.log, "Faulty Chunk"; + "source" => source, + "sync" => "Cold Sync", + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + "peer" => format!("{:?}", chunk.0), + "outcome" => format!("{:?}", outcome), + ); + + // re-issue both chunks + // if both are the same peer. Downgrade the peer. + let past_chunk = self.processed_chunks.pop() + self.wanted_chunks.insert(0, chunk.clone()); + self.wanted_chunks.insert(0, past_chunk.clone()); + if chunk.0 == past_chunk.peer_id { + // downgrade peer + return SyncManagerState::DowngradePeer(chunk.0); + } + break; + } + } + } + } + // chunk successfully processed + debug!(self.log, + "Chunk Processed"; + "id" => chunk.id + "start_slot" => chunk.start_slot, + "end_slot" => chunk.end_slot, + ); + self.processed_chunks.push(chunk); + } + } + + // chunks completed, update the state + self.sync_state = SyncState::ColdSync{waiting_slot, max_wanted_slot, max_wanted_hash}; + + // Remove stales + + // Spawn requests + if self.current_requests <= SIMULTANEOUS_REQUESTS { + if !self.wanted_chunks.is_empty() { + let chunk = self.wanted_chunks.remove(0); + for n in (0..self.peers.len()).rev() { + let peer = self.peers.swap_remove(n); + let peer_finalized_slot = peer.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + if peer_finalized_slot >= chunk.end_slot { + *self.pending.chunks.entry(&peer_id).or_insert_with(|| Vec::new).push(chunk); + self.active_peers.push(peer); + self.current_requests +=1; + let block_request = BeaconBlockRequest { + head_block_root, + start_slot: chunk.start_slot, + count: chunk.end_slot - chunk.start_slot + step: 1 + } + return SyncManagerState::BlockRequest(peer, block_request); + } + } + // no peers for this chunk + self.wanted_chunks.push(chunk); + return SyncManagerState::Stalled + } + } + } + + // if hot sync + return SyncManagerState::Idle + + } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index c3271888a..e3d3d7cef 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -17,7 +17,7 @@ use types::{ /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: u64 = 100; -/// The amount of seconds a block (or partial block) may exist in the import queue. +/// The amount of seconds a block may exist in the import queue. const QUEUE_STALE_SECS: u64 = 100; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. @@ -30,23 +30,23 @@ const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { - network_id: u8, - chain_id: u64, - latest_finalized_root: Hash256, - latest_finalized_epoch: Epoch, - best_root: Hash256, - best_slot: Slot, + fork_version: [u8,4], + finalized_root: Hash256, + finalized_epoch: Epoch, + head_root: Hash256, + head_slot: Slot, + requested_slot_skip: Option<(Slot, usize)>, } impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { - network_id: hello.network_id, - chain_id: hello.chain_id, - latest_finalized_root: hello.latest_finalized_root, - latest_finalized_epoch: hello.latest_finalized_epoch, - best_root: hello.best_root, - best_slot: hello.best_slot, + fork_version: hello.fork_version, + finalized_root: hello.finalized_root, + finalized_epoch: hello.finalized_epoch, + head_root: hello.head_root, + head_slot: hello.head_slot, + requested_slot_skip: None, } } } @@ -71,8 +71,6 @@ pub struct SimpleSync { chain: Arc>, /// A mapping of Peers to their respective PeerSyncInfo. known_peers: HashMap, - /// A queue to allow importing of blocks - import_queue: ImportQueue, /// The current state of the syncing protocol. state: SyncState, log: slog::Logger, @@ -178,8 +176,8 @@ impl SimpleSync { let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - if local.network_id != remote.network_id { - // The node is on a different network, disconnect them. + if local.fork_version != remote.fork_version { + // The node is on a different network/fork, disconnect them. info!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), @@ -187,9 +185,9 @@ impl SimpleSync { ); network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch <= local.latest_finalized_epoch - && remote.latest_finalized_root != Hash256::zero() - && local.latest_finalized_root != Hash256::zero() + } else if remote.finalized_epoch <= local.finalized_epoch + && remote.finalized_root != Hash256::zero() + && local.finalized_root != Hash256::zero() && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) != Some(remote.latest_finalized_root)) { @@ -248,22 +246,37 @@ impl SimpleSync { "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, ); - let start_slot = local - .latest_finalized_epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let required_slots = remote.best_slot - start_slot; - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - start_slot, - count: required_slots.as_u64(), - }, - network, - ); + self.process_sync(); } } + self.proess_sync(&mut self) { + loop { + match self.sync_manager.poll() { + SyncManagerState::RequestBlocks(peer_id, req) { + debug!( + self.log, + "RPCRequest(BeaconBlockBodies)"; + "count" => req.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); + }, + SyncManagerState::Stalled { + // need more peers to continue sync + warn!(self.log, "No useable peers for sync"); + break; + }, + SyncManagerState::Idle { + // nothing to do + break; + } + } + } + } + + fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots(target_slot) @@ -272,213 +285,27 @@ impl SimpleSync { .map(|(root, _slot)| root) } - /// Handle a `BeaconBlockRoots` request from the peer. - pub fn on_beacon_block_roots_request( + /// Handle a `BeaconBlocks` request from the peer. + pub fn on_beacon_blocks_request( &mut self, peer_id: PeerId, request_id: RequestId, - req: BeaconBlockRootsRequest, + req: BeaconBlocksRequest, network: &mut NetworkContext, ) { let state = &self.chain.head().beacon_state; debug!( self.log, - "BlockRootsRequest"; + "BeaconBlocksRequest"; "peer" => format!("{:?}", peer_id), "count" => req.count, "start_slot" => req.start_slot, ); - let mut roots: Vec = self - .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) - .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) - .collect(); - - if roots.len() as u64 != req.count { - debug!( - self.log, - "BlockRootsRequest"; - "peer" => format!("{:?}", peer_id), - "msg" => "Failed to return all requested hashes", - "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), - "requested" => req.count, - "returned" => roots.len(), - ); - } - - roots.reverse(); - roots.dedup_by_key(|brs| brs.block_root); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots }), - ) - } - - /// Handle a `BeaconBlockRoots` response from the peer. - pub fn on_beacon_block_roots_response( - &mut self, - peer_id: PeerId, - res: BeaconBlockRootsResponse, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockRootsResponse"; - "peer" => format!("{:?}", peer_id), - "count" => res.roots.len(), - ); - - if res.roots.is_empty() { - warn!( - self.log, - "Peer returned empty block roots response"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - // The wire protocol specifies that slots must be in ascending order. - if !res.slots_are_ascending() { - warn!( - self.log, - "Peer returned block roots response with bad slot ordering"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - let new_roots = self - .import_queue - .enqueue_block_roots(&res.roots, peer_id.clone()); - - // No new roots means nothing to do. - // - // This check protects against future panics. - if new_roots.is_empty() { - return; - } - - // Determine the first (earliest) and last (latest) `BlockRootSlot` items. - // - // This logic relies upon slots to be in ascending order, which is enforced earlier. - let first = new_roots.first().expect("Non-empty list must have first"); - let last = new_roots.last().expect("Non-empty list must have last"); - - // Request all headers between the earliest and latest new `BlockRootSlot` items. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: first.block_root, - start_slot: first.slot, - max_headers: (last.slot - first.slot + 1).as_u64(), - skip_slots: 0, - }, - network, - ) - } - - /// Handle a `BeaconBlockHeaders` request from the peer. - pub fn on_beacon_block_headers_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - let state = &self.chain.head().beacon_state; - - debug!( - self.log, - "BlockHeadersRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.max_headers, - ); - - let count = req.max_headers; - - // Collect the block roots. - let mut roots: Vec = self - .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) - .map(|(root, _slot)| root) - .collect(); - - roots.reverse(); - roots.dedup(); - - let headers: Vec = roots - .into_iter() - .step_by(req.skip_slots as usize + 1) - .filter_map(|root| { - let block = self - .chain - .store - .get::>(&root) - .ok()?; - Some(block?.block_header()) - }) - .collect(); - - // ssz-encode the headers - let headers = headers.as_ssz_bytes(); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers }), - ) - } - - /// Handle a `BeaconBlockHeaders` response from the peer. - pub fn on_beacon_block_headers_response( - &mut self, - peer_id: PeerId, - headers: Vec, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockHeadersResponse"; - "peer" => format!("{:?}", peer_id), - "count" => headers.len(), - ); - - if headers.is_empty() { - warn!( - self.log, - "Peer returned empty block headers response. PeerId: {:?}", peer_id - ); - return; - } - - // Enqueue the headers, obtaining a list of the roots of the headers which were newly added - // to the queue. - let block_roots = self.import_queue.enqueue_headers(headers, peer_id.clone()); - - if !block_roots.is_empty() { - self.request_block_bodies(peer_id, BeaconBlockBodiesRequest { block_roots }, network); - } - } - - /// Handle a `BeaconBlockBodies` request from the peer. - pub fn on_beacon_block_bodies_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - let block_bodies: Vec> = req - .block_roots - .iter() - .filter_map(|root| { + let blocks = Vec> = self + .chain.rev_iter_block_roots().filter(|(_root, slot) req.start_slot <= slot && req.start_slot + req.count >= slot).take_while(|(_root, slot) req.start_slot <= *slot) + .filter_map(|root, slot| { if let Ok(Some(block)) = self.chain.store.get::>(root) { Some(block.body) } else { @@ -494,59 +321,49 @@ impl SimpleSync { }) .collect(); - debug!( - self.log, - "BlockBodiesRequest"; - "peer" => format!("{:?}", peer_id), - "requested" => req.block_roots.len(), - "returned" => block_bodies.len(), - ); + roots.reverse(); + roots.dedup_by_key(|brs| brs.block_root); - let bytes = block_bodies.as_ssz_bytes(); + if roots.len() as u64 != req.count { + debug!( + self.log, + "BeaconBlocksRequest"; + "peer" => format!("{:?}", peer_id), + "msg" => "Failed to return all requested hashes", + "start_slot" => req.start_slot, + "current_slot" => self.chain.present_slot(), + "requested" => req.count, + "returned" => roots.len(), + ); + } network.send_rpc_response( peer_id, request_id, - RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { - block_bodies: bytes, - block_roots: None, - }), + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), ) } - /// Handle a `BeaconBlockBodies` response from the peer. - pub fn on_beacon_block_bodies_response( + + /// Handle a `BeaconBlocks` response from the peer. + pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, - res: DecodedBeaconBlockBodiesResponse, + res: Vec>, network: &mut NetworkContext, ) { debug!( self.log, - "BlockBodiesResponse"; + "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), "count" => res.block_bodies.len(), ); - if !res.block_bodies.is_empty() { - // Import all blocks to queue - let last_root = self - .import_queue - .enqueue_bodies(res.block_bodies, peer_id.clone()); - - // Attempt to process all received bodies by recursively processing the latest block - if let Some(root) = last_root { - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, root, network, &"rpc") - { - // If processing is successful remove from `import_queue` - self.import_queue.remove(root); - } - } + if !res.is_empty() { + self.sync_manager.add_blocks(peer_id, blocks); } - // Clear out old entries - self.import_queue.remove_stale(); + self.process_sync(); } /// Process a gossip message declaring a new block. @@ -679,22 +496,6 @@ impl SimpleSync { network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req)); } - /// Request some `BeaconBlockBodies` from the remote peer. - fn request_block_bodies( - &mut self, - peer_id: PeerId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockBodies)"; - "count" => req.block_roots.len(), - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockBodies(req)); - } /// Returns `true` if `self.chain` has not yet processed this block. pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { From c259d6c00637e6372cc75afd1c6cd2debe009424 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 18 Aug 2019 03:36:13 +1000 Subject: [PATCH 18/24] First draft sync re-write. WIP --- beacon_node/network/src/message_handler.rs | 10 +- beacon_node/network/src/sync/import_queue.rs | 307 ------- beacon_node/network/src/sync/manager.rs | 810 +++++++++++++------ beacon_node/network/src/sync/simple_sync.rs | 409 ++-------- 4 files changed, 661 insertions(+), 875 deletions(-) delete mode 100644 beacon_node/network/src/sync/import_queue.rs diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 6a9a40369..fd10c5aea 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -118,7 +118,14 @@ impl MessageHandler { hello_message, &mut self.network_context, ), - RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason), + RPCRequest::Goodbye(goodbye_reason) => { + debug!( + self.log, "PeerGoodbye"; + "peer" => format!("{:?}", peer_id), + "reason" => format!("{:?}", reason), + ); + self.sync.on_disconnect(peer_id), + }, RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( peer_id, request_id, @@ -167,6 +174,7 @@ impl MessageHandler { Ok(beacon_blocks) => { self.sync.on_beacon_blocks_response( peer_id, + request_id, beacon_blocks, &mut self.network_context, ); diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs deleted file mode 100644 index 5503ed64f..000000000 --- a/beacon_node/network/src/sync/import_queue.rs +++ /dev/null @@ -1,307 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::PeerId; -use slog::error; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use tree_hash::TreeHash; -use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot}; - -/// Provides a queue for fully and partially built `BeaconBlock`s. -/// -/// The queue is fundamentally a `Vec` where no two items have the same -/// `item.block_root`. This struct it backed by a `Vec` not a `HashMap` for the following two -/// reasons: -/// -/// - When we receive a `BeaconBlockBody`, the only way we can find it's matching -/// `BeaconBlockHeader` is to find a header such that `header.beacon_block_body == -/// tree_hash_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of -/// `BeaconBlockBody` as the key. -/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore -/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`. -pub struct ImportQueue { - pub chain: Arc>, - /// Partially imported blocks, keyed by the root of `BeaconBlockBody`. - partials: HashMap>, - /// Time before a queue entry is considered state. - pub stale_time: Duration, - /// Logging - log: slog::Logger, -} - -impl ImportQueue { - /// Return a new, empty queue. - pub fn new(chain: Arc>, stale_time: Duration, log: slog::Logger) -> Self { - Self { - chain, - partials: HashMap::new(), - stale_time, - log, - } - } - - /// Returns true of the if the `BlockRoot` is found in the `import_queue`. - pub fn contains_block_root(&self, block_root: Hash256) -> bool { - self.partials.contains_key(&block_root) - } - - /// Attempts to complete the `BlockRoot` if it is found in the `import_queue`. - /// - /// Returns an Enum with a `PartialBeaconBlockCompletion`. - /// Does not remove the `block_root` from the `import_queue`. - pub fn attempt_complete_block( - &self, - block_root: Hash256, - ) -> PartialBeaconBlockCompletion { - if let Some(partial) = self.partials.get(&block_root) { - partial.attempt_complete() - } else { - PartialBeaconBlockCompletion::MissingRoot - } - } - - /// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial - /// if it exists. - pub fn remove(&mut self, block_root: Hash256) -> Option> { - self.partials.remove(&block_root) - } - - /// Flushes all stale entries from the queue. - /// - /// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the - /// past. - pub fn remove_stale(&mut self) { - let stale_time = self.stale_time; - - self.partials - .retain(|_, partial| partial.inserted + stale_time > Instant::now()) - } - - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_not_seen_block(&self, block_root: &Hash256) -> bool { - self.chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - true - }) - } - - /// Adds the `block_roots` to the partials queue. - /// - /// If a `block_root` is not in the queue and has not been processed by the chain it is added - /// to the queue and it's block root is included in the output. - pub fn enqueue_block_roots( - &mut self, - block_roots: &[BlockRootSlot], - sender: PeerId, - ) -> Vec { - // TODO: This will currently not return a `BlockRootSlot` if this root exists but there is no header. - // It would be more robust if it did. - let new_block_root_slots: Vec = block_roots - .iter() - // Ignore any roots already stored in the queue. - .filter(|brs| !self.contains_block_root(brs.block_root)) - // Ignore any roots already processed by the chain. - .filter(|brs| self.chain_has_not_seen_block(&brs.block_root)) - .cloned() - .collect(); - - self.partials.extend( - new_block_root_slots - .iter() - .map(|brs| PartialBeaconBlock { - slot: brs.slot, - block_root: brs.block_root, - sender: sender.clone(), - header: None, - body: None, - inserted: Instant::now(), - }) - .map(|partial| (partial.block_root, partial)), - ); - - new_block_root_slots - } - - /// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for - /// which we should use to request `BeaconBlockBodies`. - /// - /// If a `header` is not in the queue and has not been processed by the chain it is added to - /// the queue and it's block root is included in the output. - /// - /// If a `header` is already in the queue, but not yet processed by the chain the block root is - /// not included in the output and the `inserted` time for the partial record is set to - /// `Instant::now()`. Updating the `inserted` time stops the partial from becoming stale. - pub fn enqueue_headers( - &mut self, - headers: Vec, - sender: PeerId, - ) -> Vec { - let mut required_bodies: Vec = vec![]; - - for header in headers { - let block_root = Hash256::from_slice(&header.canonical_root()[..]); - - if self.chain_has_not_seen_block(&block_root) - && !self.insert_header(block_root, header, sender.clone()) - { - // If a body is empty - required_bodies.push(block_root); - } - } - - required_bodies - } - - /// If there is a matching `header` for this `body`, adds it to the queue. - /// - /// If there is no `header` for the `body`, the body is simply discarded. - pub fn enqueue_bodies( - &mut self, - bodies: Vec>, - sender: PeerId, - ) -> Option { - let mut last_block_hash = None; - for body in bodies { - last_block_hash = self.insert_body(body, sender.clone()); - } - - last_block_hash - } - - pub fn enqueue_full_blocks(&mut self, blocks: Vec>, sender: PeerId) { - for block in blocks { - self.insert_full_block(block, sender.clone()); - } - } - - /// Inserts a header to the queue. - /// - /// If the header already exists, the `inserted` time is set to `now` and not other - /// modifications are made. - /// Returns true is `body` exists. - fn insert_header( - &mut self, - block_root: Hash256, - header: BeaconBlockHeader, - sender: PeerId, - ) -> bool { - let mut exists = false; - self.partials - .entry(block_root) - .and_modify(|partial| { - partial.header = Some(header.clone()); - partial.inserted = Instant::now(); - if partial.body.is_some() { - exists = true; - } - }) - .or_insert_with(|| PartialBeaconBlock { - slot: header.slot, - block_root, - header: Some(header), - body: None, - inserted: Instant::now(), - sender, - }); - exists - } - - /// Updates an existing partial with the `body`. - /// - /// If the body already existed, the `inserted` time is set to `now`. - /// - /// Returns the block hash of the inserted body - fn insert_body( - &mut self, - body: BeaconBlockBody, - sender: PeerId, - ) -> Option { - let body_root = Hash256::from_slice(&body.tree_hash_root()[..]); - let mut last_root = None; - - self.partials.iter_mut().for_each(|(root, mut p)| { - if let Some(header) = &mut p.header { - if body_root == header.body_root { - p.inserted = Instant::now(); - p.body = Some(body.clone()); - p.sender = sender.clone(); - last_root = Some(*root); - } - } - }); - - last_root - } - - /// Updates an existing `partial` with the completed block, or adds a new (complete) partial. - /// - /// If the partial already existed, the `inserted` time is set to `now`. - fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { - let block_root = Hash256::from_slice(&block.canonical_root()[..]); - - let partial = PartialBeaconBlock { - slot: block.slot, - block_root, - header: Some(block.block_header()), - body: Some(block.body), - inserted: Instant::now(), - sender, - }; - - self.partials - .entry(block_root) - .and_modify(|existing_partial| *existing_partial = partial.clone()) - .or_insert(partial); - } -} - -/// Individual components of a `BeaconBlock`, potentially all that are required to form a full -/// `BeaconBlock`. -#[derive(Clone, Debug)] -pub struct PartialBeaconBlock { - pub slot: Slot, - /// `BeaconBlock` root. - pub block_root: Hash256, - pub header: Option, - pub body: Option>, - /// The instant at which this record was created or last meaningfully modified. Used to - /// determine if an entry is stale and should be removed. - pub inserted: Instant, - /// The `PeerId` that last meaningfully contributed to this item. - pub sender: PeerId, -} - -impl PartialBeaconBlock { - /// Attempts to build a block. - /// - /// Does not comsume the `PartialBeaconBlock`. - pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion { - if self.header.is_none() { - PartialBeaconBlockCompletion::MissingHeader(self.slot) - } else if self.body.is_none() { - PartialBeaconBlockCompletion::MissingBody - } else { - PartialBeaconBlockCompletion::Complete( - self.header - .clone() - .unwrap() - .into_block(self.body.clone().unwrap()), - ) - } - } -} - -/// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`. -pub enum PartialBeaconBlockCompletion { - /// The partial contains a valid BeaconBlock. - Complete(BeaconBlock), - /// The partial does not exist. - MissingRoot, - /// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`. - MissingHeader(Slot), - /// The partial contains a `BeaconBlockRoot` and `BeaconBlockHeader` but no `BeaconBlockBody`. - MissingBody, -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 52c1a72c6..a4ce544ec 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,283 +1,639 @@ +const MAX_BLOCKS_PER_REQUEST: usize = 10; -const MAXIMUM_BLOCKS_PER_REQUEST: usize = 10; -const SIMULTANEOUS_REQUESTS: usize = 10; -use super::simple_sync::FUTURE_SLOT_TOLERANCE; +/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. +const SLOT_IMPORT_TOLERANCE: u64 = 10; -struct Chunk { - id: usize, - start_slot: Slot, - end_slot: Slot, - } +const PARENT_FAIL_TOLERANCE: usize = 3; +const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE*2; - -struct CompletedChunk { - peer_id: PeerId, - chunk: Chunk, - blocks: Vec, +enum BlockRequestsState { + QueuedForward, + QueuedBackward, + Pending(RequestId), + Complete, } -struct ProcessedChunk { - peer_id: PeerId, - chunk: Chunk, +struct BlockRequests { + target_head_slot: Slot + target_head_root: Hash256, + downloaded_blocks: Vec, + state: State, } -#[derive(PartialEq)] -pub enum SyncState { - Idle, - Downloading, - ColdSync { - max_wanted_slot: Slot, - max_wanted_hash: Hash256, +struct ParentRequests { + downloaded_blocks: Vec, + attempts: usize, + last_submitted_peer: PeerId, // to downvote the submitting peer. + state: BlockRequestsState, +} + +impl BlockRequests { + + // gets the start slot for next batch + // last block slot downloaded plus 1 + fn next_start_slot(&self) -> Option { + if !self.downloaded_blocks.is_empty() { + match self.state { + BlockRequestsState::QueuedForward => { + let last_element_index = self.downloaded_blocks.len() -1; + Some(downloaded_blocks[last_element_index].slot.add(1)) + } + BlockRequestsState::QueuedBackward => { + let earliest_known_slot = self.downloaded_blocks[0].slot; + Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) + } + } + } + else { + None + } } } -pub enum SyncManagerState { - RequestBlocks(peer_id, BeaconBlockRequest), +enum ManagerState { + Syncing, + Regular, Stalled, - Idle, } -pub struct PeerSyncInfo { - peer_id: PeerId, - fork_version: [u8,4], - finalized_root: Hash256, - finalized_epoch: Epoch, - head_root: Hash256, - head_slot: Slot, - requested_slot_skip: Option<(Slot, usize)>, +enum ImportManagerOutcome { + Idle, + RequestBlocks{ + peer_id: PeerId, + request_id: RequestId, + request: BeaconBlocksRequest, + }, + RecentRequest(PeerId, RecentBeaconBlocksRequest), + DownvotePeer(PeerId), } -pub(crate) struct SyncManager { + +pub struct ImportManager { /// A reference to the underlying beacon chain. chain: Arc>, - /// A mapping of Peers to their respective PeerSyncInfo. - available_peers: HashMap, - wanted_chunks: Vec, - pending_chunks: HashMap, - completed_chunks: Vec, - processed_chunks: Vec, // ordered - multi_peer_sections: HashMap - - current_requests: usize, - latest_wanted_slot: Option, - sync_status: SyncStatus, - to_process_chunk_id: usize, + state: MangerState, + import_queue: HashMap, + parent_queue: Vec, + full_peers: Hashset, + current_req_id: usize, log: Logger, - } -impl SyncManager { - /// Adds a sync-able peer and determines which blocks to download given the current state of - /// the chain, known peers and currently requested blocks. - fn add_sync_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo, network &mut NetworkContext) { +impl ImportManager { + pub fn add_peer(&mut self, peer_id, remote: PeerSyncInfo) { + // TODO: Improve comments. + // initially try to download blocks from our current head + // then backwards search all the way back to our finalized epoch until we match on a chain + // has to be done sequentially to find next slot to start the batch from + let local = PeerSyncInfo::from(&self.chain); - let remote_finalized_slot = remote.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let local_finalized_slot = local.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - // cold sync - if remote_finalized_slot > local.head_slot { - if let SyncState::Idle || SyncState::Downloading = self.sync_state { - info!(self.log, "Cold Sync Started", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); - self.sync_state = SyncState::ColdSync{Slot::from(0), remote.finalized_hash} - } - - if let SyncState::ColdSync{max_wanted_slot, max_wanted_hjash } = self.sync_state { - - // We don't assume that our current head is the canonical chain. So we request blocks from - // our last finalized slot to ensure we are on the finalized chain. - if max_wanted_slot < remote_finalized_slot { - let remaining_blocks = remote_finalized_slot - max_wanted_slot; - for chunk in (0..remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) { - self.wanted_chunks.push( - Chunk { - id: self.current_chunk_id, - previous_chunk: self.curent_chunk_id.saturating_sub(1), - start_slot: chunk*MAXIMUM_BLOCKS_PER_REQUEST + self.last_wanted_slot, - end_slot: (section+1)*MAXIMUM_BLOCKS_PER_REQUEST +self.last_wanted_slot, - }) - self.current_chunk_id +=1; - } - - // add any extra partial chunks - self.pending_section.push( Section { - start_slot: (remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) + 1, - end_slot: remote_finalized_slot, - }) - self.current_chunk_id +=1; - - info!(self.log, "Cold Sync Updated", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); - - self.sync_state = SyncState::ColdSync{remote_finalized_slot, remote.finalized_hash} - } + // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync + if remote.head_slot.sub(local.head_slot) < SLOT_IMPORT_TOLERANCE { + trace!(self.log, "Ignoring full sync with peer"; + "peer" => peer_id, + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local.head_slot, + ); + // remove the peer from the queue if it exists + self.import_queue.remove(&peer_id); + return; } - else { // hot sync - if remote_head_slot > self.chain.head().beacon_state.slot { - if let SyncState::Idle = self.sync_state { - self.sync_state = SyncState::Downloading - info!(self.log, "Sync Started", "start_slot" => local.head_slot, "latest_known_head" => remote.head_slot.as_u64()); + if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { + // update the target head slot + if remote.head_slot > requested_block.target_head_slot { + block_requests.target_head_slot = remote.head_slot; } - self.latest_known_slot = remote_head_slot; - //TODO Build requests. + } else { + let block_requests = BlockRequests { + target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called + target_head_root: remote.head_root, + downloaded_blocks: Vec::new(), + state: RequestedBlockState::Queued } + self.import_queue.insert(peer_id, block_requests); } - available_peers.push(remote); - } - pub fn add_blocks(&mut self, chunk_id: RequestId, peer_id: PeerId, blocks: Vec) { - - if SyncState::ColdSync{max_wanted_slot, max_wanted_hash} = self.sync_state { - - let chunk = match self.pending_chunks.remove(&peer_id) { - Some(chunks) => { - match chunks.find(|chunk| chunk.id == chunk_id) { - Some(chunk) => chunk, - None => { - warn!(self.log, "Received blocks for an unknown chunk"; - "peer"=> peer_id); - return; - } - } - }, - None => { - warn!(self.log, "Received blocks without a request"; - "peer"=> peer_id); + pub fn beacon_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { + + // find the request + let block_requests = match self.import_queue.get_mut(&peer_id) { + Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + None => { + // No pending request, invalid request_id or coding error + warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id); return; - } - }; + } + }; - // add to completed - self.current_requests -= 1; - self.completed_chunks.push(CompletedChunk(peer_id, Chunk)); + // The response should contain at least one block. + // + // If we are syncing up to a target head block, at least the target head block should be + // returned. If we are syncing back to our last finalized block the request should return + // at least the last block we received (last known block). In diagram form: + // + // unknown blocks requested blocks downloaded blocks + // |-------------------|------------------------|------------------------| + // ^finalized slot ^ requested start slot ^ last known block ^ remote head + + if blocks.is_empty() { + warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); + block_requests.state = RequestedBlockState::Failed; + return; + } + + // Add the newly downloaded blocks to the current list of downloaded blocks. This also + // determines if we are syncing forward or backward. + let syncing_forwards = { + if block_requests.blocks.is_empty() { + block_requests.blocks.push(blocks); + true + } + else if block_requests.blocks[0].slot < blocks[0].slot { // syncing forwards + // verify the peer hasn't sent overlapping blocks - ensuring the strictly + // increasing blocks in a batch will be verified during the processing + if block_requests.next_slot() > blocks[0].slot { + warn!(self.log, "BeaconBlocks response returned duplicate blocks", "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_slot()); + block_requests.state = RequestedBlockState::Failed; + return; + } + + block_requests.blocks.push(blocks); + true + } + else { false } + }; + + + // Determine if more blocks need to be downloaded. There are a few cases: + // - We have downloaded a batch from our head_slot, which has not reached the remotes head + // (target head). Therefore we need to download another sequential batch. + // - The latest batch includes blocks that greater than or equal to the target_head slot, + // which means we have caught up to their head. We then check to see if the first + // block downloaded matches our head. If so, we are on the same chain and can process + // the blocks. If not we need to sync back further until we are on the same chain. So + // request more blocks. + // - We are syncing backwards (from our head slot) and need to check if we are on the same + // chain. If so, process the blocks, if not, request more blocks all the way up to + // our last finalized slot. + + if syncing_forwards { + // does the batch contain the target_head_slot + let last_element_index = block_requests.blocks.len()-1; + if block_requests[last_element_index].slot >= block_requests.target_slot { + // if the batch is on our chain, this is complete and we can then process. + // Otherwise start backwards syncing until we reach a common chain. + let earliest_slot = block_requests_blocks[0].slot + if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { + block_requests.state = RequestedBlockState::Complete; + return; + } + + // not on the same chain, request blocks backwards + // binary search, request half the distance between the earliest block and our + // finalized slot + let state = &beacon_chain.head().beacon_state; + let local_finalized_slot = state.finalized_checkpoint.epoch; //TODO: Convert to slot + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.blocks[0] { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = RequestedBlockState::Failed; + return; + } + + // Start a backwards sync by requesting earlier blocks + // There can be duplication in downloaded blocks here if there are a large number + // of skip slots. In all cases we at least re-download the earliest known block. + // It is unlikely that a backwards sync in required, so we accept this duplication + // for now. + block_requests.state = RequestedBlockState::QueuedBackward; + } + else { + // batch doesn't contain the head slot, request the next batch + block_requests.state = RequestedBlockState::QueuedForward; + } + } + else { + // syncing backwards + // if the batch is on our chain, this is complete and we can then process. + // Otherwise continue backwards + let earliest_slot = block_requests_blocks[0].slot + if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { + block_requests.state = RequestedBlockState::Complete; + return; + } + block_requests.state = RequestedBlockState::QueuedBackward; + } } - pub fn inject_error(id: RequestId, peer_id) { - if let SyncState::ColdSync{ _max_wanted_slot, _max_wanted_hash } { - match self.pending_chunks.get(&peer_id) { - Some(chunks) => { - if let Some(pos) = chunks.iter().position(|c| c.id == id) { - chunks.remove(pos); - } - }, - None => { - debug!(self.log, - "Received an error for an unknown request"; - "request_id" => id, - "peer" => peer_id - ); + pub fn recent_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { + + // find the request + let parent_request = match self.parent_queue.get_mut(&peer_id) { + Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + None => { + // No pending request, invalid request_id or coding error + warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id); + return; + } + }; + + // if an empty response is given, the peer didn't have the requested block, try again + if blocks.is_empty() { + parent_request.attempts += 1; + parent_request.state = RequestedBlockState::QueuedForward; + parent_request.last_submitted_peer = peer_id; + return; + } + + // currently only support a single block lookup. Reject any response that has more than 1 + // block + if blocks.len() != 1 { + //TODO: Potentially downvote the peer + debug!(self.log, "Peer sent more than 1 parent. Ignoring"; + "peer_id" => peer_id, + "no_parents" => blocks.len() + ); + return; + } + + + // queue for processing + parent_request.state = RequestedBlockState::Complete; + } + + + pub fn inject_error(peer_id: PeerId, id: RequestId) { + //TODO: Remove block state from pending + } + + pub fn peer_disconnect(peer_id: PeerId) { + self.import_queue.remove(&peer_id); + self.full_peers.remove(&peer_id); + self.update_state(); + } + + pub fn add_full_peer(peer_id: PeerId) { + debug!( + self.log, "Fully synced peer added"; + "peer" => format!("{:?}", peer_id), + ); + self.full_peers.insert(peer_id); + self.update_state(); + } + + pub fn add_unknown_block(&mut self,block: BeaconBlock) { + // if we are not in regular sync mode, ignore this block + if self.state == ManagerState::Regular { + return; + } + + // make sure this block is not already being searched for + // TODO: Potentially store a hashset of blocks for O(1) lookups + for parent_req in self.parent_queue.iter() { + if let Some(_) = parent_req.downloaded_blocks.iter().find(|d_block| d_block == block) { + // we are already searching for this block, ignore it + return; + } + } + + let req = ParentRequests { + downloaded_blocks: vec![block], + failed_attempts: 0, + state: RequestedBlockState::QueuedBackward + } + + self.parent_queue.push(req); + } + + pub fn poll() -> ImportManagerOutcome { + + loop { + // update the state of the manager + self.update_state(); + + // process potential block requests + if let Some(outcome) = self.process_potential_block_requests() { + return outcome; + } + + // process any complete long-range batches + if let Some(outcome) = self.process_complete_batches() { + return outcome; + } + + // process any parent block lookup-requests + if let Some(outcome) = self.process_parent_requests() { + return outcome; + } + + // process any complete parent lookups + if let (re_run, outcome) = self.process_complete_parent_requests() { + if let Some(outcome) = outcome { + return outcome; + } + else if !re_run { + break; } } } + + return ImportManagerOutcome::Idle; + } - pub fn poll(&mut self) -> SyncManagerState { - // if cold sync - if let SyncState::ColdSync(waiting_slot, max_wanted_slot, max_wanted_hash) = self.sync_state { + fn update_state(&mut self) { + let previous_state = self.state; + self.state = { + if !self.import_queue.is_empty() { + ManagerState::Syncing + } + else if !self.full_peers.is_empty() { + ManagerState::Regualar + } + else { + ManagerState::Stalled } + }; + if self.state != previous_state { + info!(self.log, "Syncing state updated", + "old_state" => format!("{:?}", previous_state) + "new_state" => format!("{:?}", self.state) + ); + } + } - // Try to process completed chunks - for completed_chunk in self.completed_chunks { - let chunk = completed_chunk.1; - let last_chunk_id = { - let no_processed_chunks = self.processed_chunks.len(); - if elements == 0 { 0 } else { self.processed_chunks[no_processed_chunks].id } - }; - if chunk.id == last_chunk_id + 1 { - // try and process the chunk - for block in chunk.blocks { - let processing_result = self.chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutCome::Processed { block_root} => { - // block successfully processed - }, - BlockProcessingOutcome::BlockIsAlreadyKnown => { - warn!( - self.log, "Block Already Known"; - "source" => source, - "sync" => "Cold Sync", - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", chunk.0), - ); - }, - _ => { - // An error has occurred - // This could be due to the previous chunk or the current chunk. - // Re-issue both. - warn!( - self.log, "Faulty Chunk"; - "source" => source, - "sync" => "Cold Sync", - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", chunk.0), - "outcome" => format!("{:?}", outcome), - ); - // re-issue both chunks - // if both are the same peer. Downgrade the peer. - let past_chunk = self.processed_chunks.pop() - self.wanted_chunks.insert(0, chunk.clone()); - self.wanted_chunks.insert(0, past_chunk.clone()); - if chunk.0 == past_chunk.peer_id { - // downgrade peer - return SyncManagerState::DowngradePeer(chunk.0); - } - break; - } - } - } - } - // chunk successfully processed - debug!(self.log, - "Chunk Processed"; - "id" => chunk.id - "start_slot" => chunk.start_slot, - "end_slot" => chunk.end_slot, + fn process_potential_block_requests(&mut self) -> Option { + // check if an outbound request is required + // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p + // layer and not needed here. + // If any in queued state we submit a request. + + + // remove any failed batches + self.import_queue.retain(|peer_id, block_request| { + if block_request.state == RequestedBlockState::Failed { + debug!(self.log, "Block import from peer failed", + "peer_id" => peer_id, + "downloaded_blocks" => block_request.downloaded.blocks.len() ); - self.processed_chunks.push(chunk); - } + false } + else { true } + }); - // chunks completed, update the state - self.sync_state = SyncState::ColdSync{waiting_slot, max_wanted_slot, max_wanted_hash}; - // Remove stales + for (peer_id, block_requests) in self.import_queue.iter_mut() { + if let Some(request) = requests.iter().find(|req| req.state == RequestedBlockState::QueuedForward || req.state == RequestedBlockState::QueuedBackward) { - // Spawn requests - if self.current_requests <= SIMULTANEOUS_REQUESTS { - if !self.wanted_chunks.is_empty() { - let chunk = self.wanted_chunks.remove(0); - for n in (0..self.peers.len()).rev() { - let peer = self.peers.swap_remove(n); - let peer_finalized_slot = peer.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - if peer_finalized_slot >= chunk.end_slot { - *self.pending.chunks.entry(&peer_id).or_insert_with(|| Vec::new).push(chunk); - self.active_peers.push(peer); - self.current_requests +=1; - let block_request = BeaconBlockRequest { - head_block_root, - start_slot: chunk.start_slot, - count: chunk.end_slot - chunk.start_slot - step: 1 - } - return SyncManagerState::BlockRequest(peer, block_request); - } - } - // no peers for this chunk - self.wanted_chunks.push(chunk); - return SyncManagerState::Stalled + let request.state = RequestedBlockState::Pending(self.current_req_id); + self.current_req_id +=1; + + let req = BeaconBlocksRequest { + head_block_root: request.target_root, + start_slot: request.next_start_slot().unwrap_or_else(|| self.chain.head().slot), + count: MAX_BLOCKS_PER_REQUEST, + step: 0 } + return Some(ImportManagerOutCome::RequestBlocks{ peer_id, req }); } } - // if hot sync - return SyncManagerState::Idle + None + } + + fn process_complete_batches(&mut self) -> Option { + + let completed_batches = self.import_queue.iter().filter(|_peer, block_requests| block_requests.state == RequestedState::Complete).map(|peer, _| peer).collect::>(); + for peer_id in completed_batches { + let block_requests = self.import_queue.remove(&peer_id).unwrap("key exists"); + match self.process_blocks(block_requests.downloaded_blocks) { + Ok(()) => { + //TODO: Verify it's impossible to have empty downloaded_blocks + last_element = block_requests.downloaded_blocks.len() -1 + debug!(self.log, "Blocks processed successfully"; + "peer" => peer_id, + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + ); + // Re-HELLO to ensure we are up to the latest head + return Some(ImportManagerOutcome::Hello(peer_id)); + } + Err(e) => { + last_element = block_requests.downloaded_blocks.len() -1 + warn!(self.log, "Block processing failed"; + "peer" => peer_id, + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + } + } + } + None + } + + + fn process_parent_requests(&mut self) -> Option { + + // remove any failed requests + self.parent_queue.retain(|parent_request| { + if parent_request.state == RequestedBlockState::Failed { + debug!(self.log, "Parent import failed", + "block" => parent_request.downloaded_blocks[0].hash, + "siblings found" => parent_request.len() + ); + false + } + else { true } + }); + + // check to make sure there are peers to search for the parent from + if self.full_peers.is_empty() { + return; + } + + // check if parents need to be searched for + for parent_request in self.parent_queue.iter_mut() { + if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { + parent_request.state == BlockRequestsState::Failed + continue; + } + else if parent_request.state == BlockRequestsState::QueuedForward { + parent_request.state = BlockRequestsState::Pending(self.current_req_id); + self.current_req_id +=1; + let parent_hash = + let req = RecentBeaconBlocksRequest { + block_roots: vec![parent_hash], + }; + + // select a random fully synced peer to attempt to download the parent block + let peer_id = self.full_peers.iter().next().expect("List is not empty"); + + return Some(ImportManagerOutcome::RecentRequest(peer_id, req); + } + } + + None + } + + + fn process_complete_parent_requests(&mut self) => (bool, Option) { + + // flag to determine if there is more process to drive or if the manager can be switched to + // an idle state + let mut re_run = false; + + // verify the last added block is the parent of the last requested block + let last_index = parent_requests.downloaded_blocks.len() -1; + let expected_hash = parent_requests.downloaded_blocks[last_index].parent ; + let block_hash = parent_requests.downloaded_blocks[0].tree_hash_root(); + if block_hash != expected_hash { + //TODO: Potentially downvote the peer + debug!(self.log, "Peer sent invalid parent. Ignoring"; + "peer_id" => peer_id, + "received_block" => block_hash, + "expected_parent" => expected_hash, + ); + return; + } + + // Find any parent_requests ready to be processed + for completed_request in self.parent_queue.iter_mut().filter(|req| req.state == BlockRequestsState::Complete) { + // try and process the list of blocks up to the requested block + while !completed_request.downloaded_blocks.is_empty() { + let block = completed_request.downloaded_blocks.pop(); + match self.chain_process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent } => { + // need to keep looking for parents + completed_request.downloaded_blocks.push(block); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + break; + } + Ok(BlockProcessingOutcome::Processed { _ } => { } + Ok(outcome) => { // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts +=1; + trace!( + self.log, "Invalid parent block"; + "outcome" => format!("{:?}", outcome); + "peer" => format!("{:?}", completed_request.last_submitted_peer), + ); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + } + Err(e) => { + completed_request.failed_attempts +=1; + warn!( + self.log, "Parent processing error"; + "error" => format!("{:?}", e); + ); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + } + } + } + } + + // remove any full completed and processed parent chains + self.parent_queue.retain(|req| if req.state == BlockRequestsState::Complete { false } else { true }); + (re_run, None) } + + + fn process_blocks( + &mut self, + blocks: Vec>, + ) -> Result<(), String> { + + for block in blocks { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. + trace!( + self.log, "Imported block from network"; + "source" => source, + "slot" => block.slot, + "block_root" => format!("{}", block_root), + "peer" => format!("{:?}", peer_id), + ); + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + self.log, "ParentBlockUnknown"; + "source" => source, + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!("Block at slot {} has an unknown parent.", block.slot)); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + trace!( + self.log, "FutureBlock"; + "source" => source, + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + return Err(format!("Block at slot {} is too far in the future", block.slot)); + } else { + // The block is in the future, but not too far. + trace!( + self.log, "QueuedFutureBlock"; + "source" => source, + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + } + } + _ => { + trace!( + self.log, "InvalidBlock"; + "source" => source, + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + "peer" => format!("{:?}", peer_id), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } + } + Ok(()) + } else { + trace!( + self.log, "BlockProcessingFailure"; + "source" => source, + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!("Unexpected block processing error: {:?}", processing_result)); + } + } + } +} diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 6e5cada23..a7f5ced40 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -14,11 +14,6 @@ use types::{ Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, }; -/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. -const SLOT_IMPORT_TOLERANCE: u64 = 100; - -/// The amount of seconds a block may exist in the import queue. -const QUEUE_STALE_SECS: u64 = 100; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. @@ -35,9 +30,11 @@ pub struct PeerSyncInfo { finalized_epoch: Epoch, head_root: Hash256, head_slot: Slot, - requested_slot_skip: Option<(Slot, usize)>, } + + + impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { @@ -69,10 +66,7 @@ pub enum SyncState { pub struct SimpleSync { /// A reference to the underlying beacon chain. chain: Arc>, - /// A mapping of Peers to their respective PeerSyncInfo. - known_peers: HashMap, - /// The current state of the syncing protocol. - state: SyncState, + manager: ImportManager, log: slog::Logger, } @@ -81,49 +75,24 @@ impl SimpleSync { pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); - let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS); - - let import_queue = - ImportQueue::new(beacon_chain.clone(), queue_item_stale_time, log.clone()); SimpleSync { chain: beacon_chain.clone(), - known_peers: HashMap::new(), - import_queue, - state: SyncState::Idle, + manager: ImportManager::new(), log: sync_logger, } } - /// Handle a `Goodbye` message from a peer. - /// - /// Removes the peer from `known_peers`. - pub fn on_goodbye(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - info!( - self.log, "PeerGoodbye"; - "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", reason), - ); - - self.known_peers.remove(&peer_id); - } - /// Handle a peer disconnect. /// - /// Removes the peer from `known_peers`. + /// Removes the peer from the manager. pub fn on_disconnect(&mut self, peer_id: PeerId) { - info!( - self.log, "Peer Disconnected"; - "peer" => format!("{:?}", peer_id), - ); - self.known_peers.remove(&peer_id); + self.manager.peer_disconnect(&peer_id); } /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); - network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); } @@ -137,7 +106,7 @@ impl SimpleSync { hello: HelloMessage, network: &mut NetworkContext, ) { - debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. network.send_rpc_response( @@ -156,7 +125,7 @@ impl SimpleSync { hello: HelloMessage, network: &mut NetworkContext, ) { - debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); // Process the hello message, without sending back another hello. self.process_hello(peer_id, hello, network); @@ -178,7 +147,7 @@ impl SimpleSync { if local.fork_version != remote.fork_version { // The node is on a different network/fork, disconnect them. - info!( + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "network_id" @@ -195,7 +164,7 @@ impl SimpleSync { // different to the one in our chain. // // Therefore, the node is on a different chain and we should not communicate with them. - info!( + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" @@ -227,13 +196,10 @@ impl SimpleSync { .exists::>(&remote.best_root) .unwrap_or_else(|_| false) { - // If the node's best-block is already known to us, we have nothing to request. - debug!( - self.log, - "NaivePeer"; - "peer" => format!("{:?}", peer_id), - "reason" => "best block is known" - ); + // If the node's best-block is already known to us and they are close to our current + // head, treat them as a fully sync'd peer. + self.import_manager.add_full_peer(peer_id); + self.process_sync(); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -246,43 +212,60 @@ impl SimpleSync { "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, ); - + self.import_manager.add_peer(peer_id, remote); self.process_sync(); } } self.proess_sync(&mut self) { loop { - match self.sync_manager.poll() { - SyncManagerState::RequestBlocks(peer_id, req) { - debug!( + match self.import_manager.poll() { + ImportManagerOutcome::RequestBlocks(peer_id, req) { + trace!( self.log, - "RPCRequest(BeaconBlockBodies)"; - "count" => req.block_roots.len(), + "RPC Request"; + "method" => "BeaconBlocks", + "count" => req.count, "peer" => format!("{:?}", peer_id) ); network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); }, - SyncManagerState::Stalled { - // need more peers to continue sync - warn!(self.log, "No useable peers for sync"); - break; + ImportManagerOutcome::RecentRequest(peer_id, req) { + trace!( + self.log, + "RPC Request"; + "method" => "RecentBeaconBlocks", + "count" => req.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + }, + ImportManagerOutcome::DownvotePeer(peer_id) { + trace!( + self.log, + "Peer downvoted"; + "peer" => format!("{:?}", peer_id) + ); + // TODO: Implement reputation + network.disconnect(peer_id.clone(), GoodbyeReason::Fault); }, SyncManagerState::Idle { // nothing to do - break; + return; } } } } + /* fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) } + */ /// Handle a `BeaconBlocks` request from the peer. pub fn on_beacon_blocks_request( @@ -346,8 +329,8 @@ impl SimpleSync { pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, + request_id: RequestId, res: Vec>, - network: &mut NetworkContext, ) { debug!( self.log, @@ -356,9 +339,26 @@ impl SimpleSync { "count" => res.block_bodies.len(), ); - if !res.is_empty() { - self.sync_manager.add_blocks(peer_id, blocks); - } + self.import_manager.beacon_blocks_response(peer_id, request_id, blocks); + + self.process_sync(); + } + + /// Handle a `RecentBeaconBlocks` response from the peer. + pub fn on_recent_beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + res: Vec>, + ) { + debug!( + self.log, + "BeaconBlocksResponse"; + "peer" => format!("{:?}", peer_id), + "count" => res.block_bodies.len(), + ); + + self.import_manager.recent_blocks_response(peer_id, request_id, blocks); self.process_sync(); } @@ -372,7 +372,6 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, ) -> bool { if let Some(outcome) = self.process_block(peer_id.clone(), block.clone(), network, &"gossip") @@ -380,53 +379,17 @@ impl SimpleSync { match outcome { BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, BlockProcessingOutcome::ParentUnknown { parent } => { - // Add this block to the queue - self.import_queue - .enqueue_full_blocks(vec![block.clone()], peer_id.clone()); - debug!( - self.log, "RequestParentBlock"; - "parent_root" => format!("{}", parent), - "parent_slot" => block.slot - 1, - "peer" => format!("{:?}", peer_id), - ); - - // Request roots between parent and start of finality from peer. - let start_slot = self - .chain - .head() - .beacon_state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - // Request blocks between `latest_finalized_slot` and the `block` - start_slot, - count: block.slot.as_u64() - start_slot.as_u64(), - }, - network, - ); - - // Clean the stale entries from the queue. - self.import_queue.remove_stale(); - + // Inform the sync manager to find parents for this block + self.import_manager.add_unknown_block(block.clone()); SHOULD_FORWARD_GOSSIP_BLOCK } - BlockProcessingOutcome::FutureSlot { present_slot, block_slot, } if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => { - self.import_queue - .enqueue_full_blocks(vec![block], peer_id.clone()); - + //TODO: Decide the logic here SHOULD_FORWARD_GOSSIP_BLOCK } - // Note: known blocks are forwarded on the gossip network. - // - // We rely upon the lower layers (libp2p) to stop loops occurring from re-gossiped - // blocks. BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, } @@ -457,48 +420,8 @@ impl SimpleSync { } } - /// Request some `BeaconBlockRoots` from the remote peer. - fn request_block_roots( - &mut self, - peer_id: PeerId, - req: BeaconBlockRootsRequest, - network: &mut NetworkContext, - ) { - // Potentially set state to sync. - if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { - debug!(self.log, "Entering downloading sync state."); - self.state = SyncState::Downloading; - } - - debug!( - self.log, - "RPCRequest(BeaconBlockRoots)"; - "count" => req.count, - "peer" => format!("{:?}", peer_id) - ); - - // TODO: handle count > max count. - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockRoots(req)); - } - - /// Request some `BeaconBlockHeaders` from the remote peer. - fn request_block_headers( - &mut self, - peer_id: PeerId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockHeaders)"; - "max_headers" => req.max_headers, - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req)); - } - +/* /// Returns `true` if `self.chain` has not yet processed this block. pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { !self @@ -509,207 +432,13 @@ impl SimpleSync { false }) } + */ /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { hello_message(&self.chain) } - /// Helper function to attempt to process a partial block. - /// - /// If the block can be completed recursively call `process_block` - /// else request missing parts. - fn attempt_process_partial_block( - &mut self, - peer_id: PeerId, - block_root: Hash256, - network: &mut NetworkContext, - source: &str, - ) -> Option { - match self.import_queue.attempt_complete_block(block_root) { - PartialBeaconBlockCompletion::MissingBody => { - // Unable to complete the block because the block body is missing. - debug!( - self.log, "RequestParentBody"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block body from the peer. - self.request_block_bodies( - peer_id, - BeaconBlockBodiesRequest { - block_roots: vec![block_root], - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingHeader(slot) => { - // Unable to complete the block because the block header is missing. - debug!( - self.log, "RequestParentHeader"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block header from the peer. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: block_root, - start_slot: slot, - max_headers: 1, - skip_slots: 0, - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingRoot => { - // The `block_root` is not known to the queue. - debug!( - self.log, "MissingParentRoot"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Do nothing. - - None - } - PartialBeaconBlockCompletion::Complete(block) => { - // The block exists in the queue, attempt to process it - trace!( - self.log, "AttemptProcessParent"; - "source" => source, - "block_root" => format!("{}", block_root), - "parent_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - self.process_block(peer_id.clone(), block, network, source) - } - } - } - - /// Processes the `block` that was received from `peer_id`. - /// - /// If the block was submitted to the beacon chain without internal error, `Some(outcome)` is - /// returned, otherwise `None` is returned. Note: `Some(_)` does not necessarily indicate that - /// the block was successfully processed or valid. - /// - /// This function performs the following duties: - /// - /// - Attempting to import the block into the beacon chain. - /// - Logging - /// - Requesting unavailable blocks (e.g., if parent is unknown). - /// - Disconnecting faulty nodes. - /// - /// This function does not remove processed blocks from the import queue. - fn process_block( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - network: &mut NetworkContext, - source: &str, - ) -> Option { - let processing_result = self.chain.process_block(block.clone()); - - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - debug!( - self.log, "Imported block from network"; - "source" => source, - "slot" => block.slot, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // The parent has not been processed - trace!( - self.log, "ParentBlockUnknown"; - "source" => source, - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - // If the parent is in the `import_queue` attempt to complete it then process it. - // All other cases leave `parent` in `import_queue` and return original outcome. - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, parent, network, source) - { - // If processing parent is successful, re-process block and remove parent from queue - self.import_queue.remove(parent); - - // Attempt to process `block` again - match self.chain.process_block(block) { - Ok(outcome) => return Some(outcome), - Err(_) => return None, - } - } - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - warn!( - self.log, "FutureBlock"; - "source" => source, - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - network.disconnect(peer_id, GoodbyeReason::Fault); - } else { - // The block is in the future, but not too far. - debug!( - self.log, "QueuedFutureBlock"; - "source" => source, - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - } - } - _ => { - debug!( - self.log, "InvalidBlock"; - "source" => source, - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - } - } - - Some(outcome) - } else { - error!( - self.log, "BlockProcessingFailure"; - "source" => source, - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - - None - } - } } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. From 0c3fdcd57c2ffb08d36a2af4f038fe0ac92cc7d8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 15:53:53 +1000 Subject: [PATCH 19/24] Bootstrap (#501) * Renamed fork_choice::process_attestation_from_block * Processing attestation in fork choice * Retrieving state from store and checking signature * Looser check on beacon state validity. * Cleaned up get_attestation_state * Expanded fork choice api to provide latest validator message. * Checking if the an attestation contains a latest message * Correct process_attestation error handling. * Copy paste error in comment fixed. * Tidy ancestor iterators * Getting attestation slot via helper method * Refactored attestation creation in test utils * Revert "Refactored attestation creation in test utils" This reverts commit 4d277fe4239a7194758b18fb5c00dfe0b8231306. * Integration tests for free attestation processing * Implicit conflicts resolved. * formatting * Do first pass on Grants code * Add another attestation processing test * Tidy attestation processing * Remove old code fragment * Add non-compiling half finished changes * Simplify, fix bugs, add tests for chain iters * Remove attestation processing from op pool * Fix bug with fork choice, tidy * Fix overly restrictive check in fork choice. * Ensure committee cache is build during attn proc * Ignore unknown blocks at fork choice * Various minor fixes * Make fork choice write lock in to read lock * Remove unused method * Tidy comments * Fix attestation prod. target roots change * Fix compile error in store iters * Reject any attestation prior to finalization * Begin metrics refactor * Move beacon_chain to new metrics structure. * Make metrics not panic if already defined * Use global prometheus gather at rest api * Unify common metric fns into a crate * Add heavy metering to block processing * Remove hypen from prometheus metric name * Add more beacon chain metrics * Add beacon chain persistence metric * Prune op pool on finalization * Add extra prom beacon chain metrics * Prefix BeaconChain metrics with "beacon_" * Add more store metrics * Add basic metrics to libp2p * Add metrics to HTTP server * Remove old `http_server` crate * Update metrics names to be more like standard * Fix broken beacon chain metrics, add slot clock metrics * Add lighthouse_metrics gather fn * Remove http args * Fix wrong state given to op pool prune * Make prom metric names more consistent * Add more metrics, tidy existing metrics * Fix store block read metrics * Tidy attestation metrics * Fix minor PR comments * Fix minor PR comments * Remove duplicated attestation finalization check * Remove awkward `let` statement * Add first attempts at HTTP bootstrap * Add beacon_block methods to rest api * Fix serde for block.body.grafitti * Allow travis failures on beta (see desc) There's a non-backward compatible change in `cargo fmt`. Stable and beta do not agree. * Add network routes to API * Fix rustc warnings * Add best_slot method * Add --bootstrap arg to beacon node * Get bootstrapper working for ENR address * Store intermediate states during block processing * Allow bootstrapper to scrape libp2p address * Update bootstrapper libp2p address finding * Add comments * Tidy API to be more consistent with recent decisions * Address some review comments * Make BeaconChainTypes Send + Sync + 'static * Add `/network/listen_port` API endpoint * Abandon starting the node if libp2p doesn't start * Update bootstrapper for API changes * Remove unnecessary trait bounds --- beacon_node/beacon_chain/src/beacon_chain.rs | 27 ++- beacon_node/beacon_chain/src/test_utils.rs | 4 +- beacon_node/client/Cargo.toml | 3 + beacon_node/client/src/beacon_chain_types.rs | 17 +- beacon_node/client/src/bootstrapper.rs | 210 +++++++++++++++++++ beacon_node/client/src/config.rs | 40 +++- beacon_node/client/src/lib.rs | 7 +- beacon_node/client/src/notifier.rs | 6 +- beacon_node/eth2-libp2p/src/behaviour.rs | 4 + beacon_node/eth2-libp2p/src/discovery.rs | 9 + beacon_node/eth2-libp2p/src/lib.rs | 3 +- beacon_node/eth2-libp2p/src/service.rs | 19 +- beacon_node/network/src/service.rs | 50 ++++- beacon_node/rest_api/Cargo.toml | 2 + beacon_node/rest_api/src/beacon.rs | 150 ++++++++++++- beacon_node/rest_api/src/helpers.rs | 28 ++- beacon_node/rest_api/src/lib.rs | 30 ++- beacon_node/rest_api/src/network.rs | 108 ++++++++++ beacon_node/rest_api/src/spec.rs | 27 +++ beacon_node/src/main.rs | 15 ++ beacon_node/src/run.rs | 9 +- eth2/types/src/beacon_block_body.rs | 7 +- eth2/types/src/utils/serde_utils.rs | 16 +- 23 files changed, 742 insertions(+), 49 deletions(-) create mode 100644 beacon_node/client/src/bootstrapper.rs create mode 100644 beacon_node/rest_api/src/network.rs create mode 100644 beacon_node/rest_api/src/spec.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e8e2f49dd..5feefd841 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,7 +77,7 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } -pub trait BeaconChainTypes { +pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; @@ -870,9 +870,16 @@ impl BeaconChain { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); + // Keep a list of any states that were "skipped" (block-less) in between the parent state + // slot and the block slot. These will need to be stored in the database. + let mut intermediate_states = vec![]; + // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; - for _ in state.slot.as_u64()..block.slot.as_u64() { + for i in state.slot.as_u64()..block.slot.as_u64() { + if i > 0 { + intermediate_states.push(state.clone()); + } per_slot_processing(&mut state, &self.spec)?; } @@ -911,6 +918,22 @@ impl BeaconChain { let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); + // Store all the states between the parent block state and this blocks slot before storing + // the final state. + for (i, intermediate_state) in intermediate_states.iter().enumerate() { + // To avoid doing an unnecessary tree hash, use the following (slot + 1) state's + // state_roots field to find the root. + let following_state = match intermediate_states.get(i + 1) { + Some(following_state) => following_state, + None => &state, + }; + let intermediate_state_root = + following_state.get_state_root(intermediate_state.slot)?; + + self.store + .put(&intermediate_state_root, intermediate_state)?; + } + // Store the block and state. self.store.put(&block_root, &block)?; self.store.put(&state_root, &state)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 298c637db..09f4749ea 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -54,7 +54,7 @@ where impl BeaconChainTypes for CommonTypes where - L: LmdGhost, + L: LmdGhost + 'static, E: EthSpec, { type Store = MemoryStore; @@ -69,7 +69,7 @@ where /// Used for testing. pub struct BeaconChainHarness where - L: LmdGhost, + L: LmdGhost + 'static, E: EthSpec, { pub chain: BeaconChain>, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index b13f175a9..9b5a9cf42 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } +eth2-libp2p = { path = "../eth2-libp2p" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } prometheus = "^0.6" @@ -26,3 +27,5 @@ clap = "2.32.0" dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" +reqwest = "0.9" +url = "1.2" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index 0b86c9583..5168c067a 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,3 +1,4 @@ +use crate::bootstrapper::Bootstrapper; use crate::error::Result; use crate::{config::GenesisState, ClientConfig}; use beacon_chain::{ @@ -35,7 +36,11 @@ pub struct ClientType { _phantom_u: PhantomData, } -impl BeaconChainTypes for ClientType { +impl BeaconChainTypes for ClientType +where + S: Store + 'static, + E: EthSpec, +{ type Store = S; type SlotClock = SystemTimeSlotClock; type LmdGhost = ThreadSafeReducedTree; @@ -74,6 +79,16 @@ where serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? } + GenesisState::HttpBootstrap { server } => { + let bootstrapper = Bootstrapper::from_server_string(server.to_string()) + .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; + + let (state, _block) = bootstrapper + .genesis() + .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; + + state + } }; let mut genesis_block = BeaconBlock::empty(&spec); diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs new file mode 100644 index 000000000..c94d9a51d --- /dev/null +++ b/beacon_node/client/src/bootstrapper.rs @@ -0,0 +1,210 @@ +use eth2_libp2p::{ + multiaddr::{Multiaddr, Protocol}, + Enr, +}; +use reqwest::{Error as HttpError, Url}; +use serde::Deserialize; +use std::borrow::Cow; +use std::net::Ipv4Addr; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; +use url::Host; + +#[derive(Debug)] +enum Error { + InvalidUrl, + HttpError(HttpError), +} + +impl From for Error { + fn from(e: HttpError) -> Error { + Error::HttpError(e) + } +} + +/// Used to load "bootstrap" information from the HTTP API of another Lighthouse beacon node. +/// +/// Bootstrapping information includes things like genesis and finalized states and blocks, and +/// libp2p connection details. +pub struct Bootstrapper { + url: Url, +} + +impl Bootstrapper { + /// Parses the given `server` as a URL, instantiating `Self`. + pub fn from_server_string(server: String) -> Result { + Ok(Self { + url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, + }) + } + + /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. + /// + /// The address is created by querying the HTTP server for its listening libp2p addresses. + /// Then, we find the first TCP port in those addresses and combine the port with the URL of + /// the server. + /// + /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of + /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of + /// `/ipv4/172.0.0.1/tcp/9000`. + pub fn best_effort_multiaddr(&self) -> Option { + let tcp_port = self.listen_port().ok()?; + + let mut multiaddr = Multiaddr::with_capacity(2); + + match self.url.host()? { + Host::Ipv4(addr) => multiaddr.push(Protocol::Ip4(addr)), + Host::Domain(s) => multiaddr.push(Protocol::Dns4(Cow::Borrowed(s))), + _ => return None, + }; + + multiaddr.push(Protocol::Tcp(tcp_port)); + + Some(multiaddr) + } + + /// Returns the IPv4 address of the server URL, unless it contains a FQDN. + pub fn server_ipv4_addr(&self) -> Option { + match self.url.host()? { + Host::Ipv4(addr) => Some(addr), + _ => None, + } + } + + /// Returns the servers ENR address. + pub fn enr(&self) -> Result { + get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) + } + + /// Returns the servers listening libp2p addresses. + pub fn listen_port(&self) -> Result { + get_listen_port(self.url.clone()).map_err(|e| format!("Unable to get listen port: {:?}", e)) + } + + /// Returns the genesis block and state. + pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { + let genesis_slot = Slot::new(0); + + let block = get_block(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis block: {:?}", e))? + .beacon_block; + let state = get_state(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis state: {:?}", e))? + .beacon_state; + + Ok((state, block)) + } + + /// Returns the most recent finalized state and block. + pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { + let slots_per_epoch = get_slots_per_epoch(self.url.clone()) + .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; + let finalized_slot = get_finalized_slot(self.url.clone(), slots_per_epoch.as_u64()) + .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; + + let block = get_block(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized block: {:?}", e))? + .beacon_block; + let state = get_state(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized state: {:?}", e))? + .beacon_state; + + Ok((state, block)) + } +} + +fn get_slots_per_epoch(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("slots_per_epoch"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("latest_finalized_checkpoint"); + }) + .map_err(|_| Error::InvalidUrl)?; + + let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; + + Ok(checkpoint.epoch.start_slot(slots_per_epoch)) +} + +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} + +fn get_state(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("state"); + }) + .map_err(|_| Error::InvalidUrl)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct BlockResponse { + pub root: Hash256, + pub beacon_block: BeaconBlock, +} + +fn get_block(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("block"); + }) + .map_err(|_| Error::InvalidUrl)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_enr(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("network").push("enr"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_listen_port(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("network").push("listen_port"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index fcc2cc7da..ea8186dbc 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,8 +1,8 @@ -use crate::Eth2Config; +use crate::{Bootstrapper, Eth2Config}; use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, Drain}; +use slog::{info, o, warn, Drain}; use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; @@ -46,6 +46,8 @@ pub enum GenesisState { }, /// Load a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, + /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. + HttpBootstrap { server: String }, } impl Default for Config { @@ -147,6 +149,40 @@ impl Config { self.update_logger(log)?; }; + // If the `--bootstrap` flag is provided, overwrite the default configuration. + if let Some(server) = args.value_of("bootstrap") { + do_bootstrapping(self, server.to_string(), &log)?; + } + Ok(()) } } + +/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and +/// adding them to the `config`. +fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { + // Set the genesis state source. + config.genesis_state = GenesisState::HttpBootstrap { + server: server.to_string(), + }; + + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + config.network.boot_nodes.push(bootstrapper.enr()?); + + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() { + info!( + log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) + ); + config.network.libp2p_nodes.push(server_multiaddr); + } else { + warn!( + log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); + } + + Ok(()) +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 5c37ac3e9..6405e05e7 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,6 +1,7 @@ extern crate slog; mod beacon_chain_types; +mod bootstrapper; mod config; pub mod error; @@ -21,7 +22,8 @@ use tokio::timer::Interval; pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; -pub use config::Config as ClientConfig; +pub use bootstrapper::Bootstrapper; +pub use config::{Config as ClientConfig, GenesisState}; pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients @@ -47,7 +49,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( @@ -121,6 +123,7 @@ where &client_config.rest_api, executor, beacon_chain.clone(), + network.clone(), client_config.db_path().expect("unable to read datadir"), &log, ) { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1c7cf3867..78e50ac79 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -17,11 +17,7 @@ pub const WARN_PEER_COUNT: usize = 1; /// durations. /// /// Presently unused, but remains for future use. -pub fn run( - client: &Client, - executor: TaskExecutor, - exit: Exit, -) { +pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) { // notification heartbeat let interval = Interval::new( Instant::now(), diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b87f8a061..9158fe485 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -78,6 +78,10 @@ impl Behaviour { log: behaviour_log, }) } + + pub fn discovery(&self) -> &Discovery { + &self.discovery + } } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index ca98db324..87d5dd558 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -103,6 +103,10 @@ impl Discovery { }) } + pub fn local_enr(&self) -> &Enr { + self.discovery.local_enr() + } + /// Manually search for peers. This restarts the discovery round, sparking multiple rapid /// queries. pub fn discover_peers(&mut self) { @@ -120,6 +124,11 @@ impl Discovery { self.connected_peers.len() } + /// The current number of connected libp2p peers. + pub fn connected_peer_set(&self) -> &HashSet { + &self.connected_peers + } + /// Search for new peers using the underlying discovery mechanism. fn find_peers(&mut self) { // pick a random NodeId diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 33d5ba9ed..4c84469ce 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -17,12 +17,13 @@ pub use behaviour::PubsubMessage; pub use config::{ Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, }; +pub use libp2p::enr::Enr; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; pub use libp2p::{ gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, - PeerId, + PeerId, Swarm, }; pub use rpc::RPCEvent; pub use service::Libp2pEvent; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa0579..e208dbeca 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -16,7 +16,7 @@ use libp2p::core::{ upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; use libp2p::{core, secio, PeerId, Swarm, Transport}; -use slog::{debug, info, trace, warn}; +use slog::{crit, debug, info, trace, warn}; use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; @@ -33,7 +33,7 @@ pub struct Service { //TODO: Make this private pub swarm: Swarm, /// This node's PeerId. - _local_peer_id: PeerId, + pub local_peer_id: PeerId, /// The libp2p logger handle. pub log: slog::Logger, } @@ -69,10 +69,15 @@ impl Service { log_address.push(Protocol::P2p(local_peer_id.clone().into())); info!(log, "Listening on: {}", log_address); } - Err(err) => warn!( - log, - "Cannot listen on: {} because: {:?}", listen_multiaddr, err - ), + Err(err) => { + crit!( + log, + "Unable to listen on libp2p address"; + "error" => format!("{:?}", err), + "listen_multiaddr" => format!("{}", listen_multiaddr), + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } }; // attempt to connect to user-input libp2p nodes @@ -113,7 +118,7 @@ impl Service { info!(log, "Subscribed to topics: {:?}", subscribed_topics); Ok(Service { - _local_peer_id: local_peer_id, + local_peer_id, swarm, log, }) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e5ca2a917..152f4dc77 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,7 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use core::marker::PhantomData; use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Topic; -use eth2_libp2p::{Libp2pEvent, PeerId}; +use eth2_libp2p::{Enr, Libp2pEvent, Multiaddr, PeerId, Swarm}; use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; @@ -18,6 +18,7 @@ use tokio::sync::{mpsc, oneshot}; /// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { libp2p_service: Arc>, + libp2p_port: u16, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, _phantom: PhantomData, //message_handler: MessageHandler, @@ -56,6 +57,7 @@ impl Service { )?; let network_service = Service { libp2p_service, + libp2p_port: config.libp2p_port, _libp2p_exit: libp2p_exit, _network_send: network_send.clone(), _phantom: PhantomData, @@ -64,6 +66,52 @@ impl Service { Ok((Arc::new(network_service), network_send)) } + /// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect + /// to. + pub fn local_enr(&self) -> Enr { + self.libp2p_service + .lock() + .swarm + .discovery() + .local_enr() + .clone() + } + + /// Returns the local libp2p PeerID. + pub fn local_peer_id(&self) -> PeerId { + self.libp2p_service.lock().local_peer_id.clone() + } + + /// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on. + pub fn listen_multiaddrs(&self) -> Vec { + Swarm::listeners(&self.libp2p_service.lock().swarm) + .cloned() + .collect() + } + + /// Returns the libp2p port that this node has been configured to listen using. + pub fn listen_port(&self) -> u16 { + self.libp2p_port + } + + /// Returns the number of libp2p connected peers. + pub fn connected_peers(&self) -> usize { + self.libp2p_service.lock().swarm.connected_peers() + } + + /// Returns the set of `PeerId` that are connected via libp2p. + pub fn connected_peer_set(&self) -> Vec { + self.libp2p_service + .lock() + .swarm + .discovery() + .connected_peer_set() + .iter() + .cloned() + .collect() + } + + /// Provides a reference to the underlying libp2p service. pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index c7026014c..cac196d9c 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -7,6 +7,8 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] beacon_chain = { path = "../beacon_chain" } +network = { path = "../network" } +eth2-libp2p = { path = "../eth2-libp2p" } store = { path = "../store" } version = { path = "../version" } serde = { version = "1.0", features = ["derive"] } diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index cef23abe8..1c66a2819 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -2,9 +2,114 @@ use super::{success_response, ApiResult}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; +use serde::Serialize; use std::sync::Arc; use store::Store; -use types::BeaconState; +use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; + +#[derive(Serialize)] +pub struct HeadResponse { + pub slot: Slot, + pub block_root: Hash256, + pub state_root: Hash256, +} + +/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. +pub fn get_head(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let head = HeadResponse { + slot: beacon_chain.head().beacon_state.slot, + block_root: beacon_chain.head().beacon_block_root, + state_root: beacon_chain.head().beacon_state_root, + }; + + let json: String = serde_json::to_string(&head) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + +#[derive(Serialize)] +#[serde(bound = "T: EthSpec")] +pub struct BlockResponse { + pub root: Hash256, + pub beacon_block: BeaconBlock, +} + +/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. +pub fn get_block(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let query_params = ["root", "slot"]; + let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; + + let block_root = match (key.as_ref(), value) { + ("slot", value) => { + let target = parse_slot(&value)?; + + block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })? + } + ("root", value) => parse_root(&value)?, + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), + }; + + let block = beacon_chain + .store + .get::>(&block_root)? + .ok_or_else(|| { + ApiError::NotFound(format!( + "Unable to find BeaconBlock for root {}", + block_root + )) + })?; + + let response = BlockResponse { + root: block_root, + beacon_block: block, + }; + + let json: String = serde_json::to_string(&response).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e)) + })?; + + Ok(success_response(Body::from(json))) +} + +/// HTTP handler to return a `BeaconBlock` root at a given `slot`. +pub fn get_block_root(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; + let target = parse_slot(&slot_string)?; + + let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })?; + + let json: String = serde_json::to_string(&root) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + +#[derive(Serialize)] +#[serde(bound = "T: EthSpec")] +pub struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} /// HTTP handler to return a `BeaconState` at a given `root` or `slot`. /// @@ -19,26 +124,34 @@ pub fn get_state(req: Request) -> ApiResult let query_params = ["root", "slot"]; let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; - let state: BeaconState = match (key.as_ref(), value) { + let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, ("root", value) => { let root = &parse_root(&value)?; - beacon_chain + let state = beacon_chain .store .get(root)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))? + .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?; + + (*root, state) } - _ => unreachable!("Guarded by UrlQuery::from_request()"), + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; - let json: String = serde_json::to_string(&state) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconState: {:?}", e)))?; + let response = StateResponse { + root, + beacon_state: state, + }; + + let json: String = serde_json::to_string(&response).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e)) + })?; Ok(success_response(Body::from(json))) } -/// HTTP handler to return a `BeaconState` root at a given or `slot`. +/// HTTP handler to return a `BeaconState` root at a given `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. @@ -58,3 +171,24 @@ pub fn get_state_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } + +/// HTTP handler to return the highest finalized slot. +pub fn get_latest_finalized_checkpoint( + req: Request, +) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let checkpoint = beacon_chain + .head() + .beacon_state + .finalized_checkpoint + .clone(); + + let json: String = serde_json::to_string(&checkpoint) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 2a429076c..5365086df 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -31,22 +31,40 @@ pub fn parse_root(string: &str) -> Result { } } -/// Returns a `BeaconState` in the canonical chain of `beacon_chain` at the given `slot`, if -/// possible. +/// Returns the root of the `BeaconBlock` in the canonical chain of `beacon_chain` at the given +/// `slot`, if possible. +/// +/// May return a root for a previous slot, in the case of skip slots. +pub fn block_root_at_slot( + beacon_chain: &BeaconChain, + target: Slot, +) -> Option { + beacon_chain + .rev_iter_block_roots() + .take_while(|(_root, slot)| *slot >= target) + .find(|(_root, slot)| *slot == target) + .map(|(root, _slot)| root) +} + +/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given +/// `slot`, if possible. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn state_at_slot( beacon_chain: &BeaconChain, slot: Slot, -) -> Result, ApiError> { +) -> Result<(Hash256, BeaconState), ApiError> { let head_state = &beacon_chain.head().beacon_state; if head_state.slot == slot { // The request slot is the same as the best block (head) slot. // I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary. - Ok(beacon_chain.head().beacon_state.clone()) + Ok(( + beacon_chain.head().beacon_state_root, + beacon_chain.head().beacon_state.clone(), + )) } else { let root = state_root_at_slot(beacon_chain, slot)?; @@ -55,7 +73,7 @@ pub fn state_at_slot( .get(&root)? .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?; - Ok(state) + Ok((root, state)) } } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 57019deea..964dd7998 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,15 +1,18 @@ #[macro_use] extern crate lazy_static; +extern crate network as client_network; mod beacon; mod config; mod helpers; mod metrics; +mod network; mod node; +mod spec; mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; -pub use config::Config as ApiConfig; +use client_network::Service as NetworkService; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; @@ -20,6 +23,9 @@ use std::sync::Arc; use tokio::runtime::TaskExecutor; use url_query::UrlQuery; +pub use beacon::{BlockResponse, HeadResponse, StateResponse}; +pub use config::Config as ApiConfig; + #[derive(PartialEq, Debug)] pub enum ApiError { MethodNotAllowed(String), @@ -67,10 +73,11 @@ impl From for ApiError { } } -pub fn start_server( +pub fn start_server( config: &ApiConfig, executor: &TaskExecutor, beacon_chain: Arc>, + network_service: Arc>, db_path: PathBuf, log: &slog::Logger, ) -> Result { @@ -98,6 +105,7 @@ pub fn start_server( let log = server_log.clone(); let beacon_chain = server_bc.clone(); let db_path = db_path.clone(); + let network_service = network_service.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { @@ -108,16 +116,34 @@ pub fn start_server( req.extensions_mut() .insert::>>(beacon_chain.clone()); req.extensions_mut().insert::(db_path.clone()); + req.extensions_mut() + .insert::>>(network_service.clone()); let path = req.uri().path().to_string(); // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { + (&Method::GET, "/beacon/head") => beacon::get_head::(req), + (&Method::GET, "/beacon/block") => beacon::get_block::(req), + (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), + (&Method::GET, "/beacon/latest_finalized_checkpoint") => { + beacon::get_latest_finalized_checkpoint::(req) + } (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), (&Method::GET, "/metrics") => metrics::get_prometheus::(req), + (&Method::GET, "/network/enr") => network::get_enr::(req), + (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), + (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), + (&Method::GET, "/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), + (&Method::GET, "/network/listen_addresses") => { + network::get_listen_addresses::(req) + } (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), + (&Method::GET, "/spec") => spec::get_spec::(req), + (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), }; diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs new file mode 100644 index 000000000..a3e4c5ee7 --- /dev/null +++ b/beacon_node/rest_api/src/network.rs @@ -0,0 +1,108 @@ +use crate::{success_response, ApiError, ApiResult, NetworkService}; +use beacon_chain::BeaconChainTypes; +use eth2_libp2p::{Enr, Multiaddr, PeerId}; +use hyper::{Body, Request}; +use std::sync::Arc; + +/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// +/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +pub fn get_listen_addresses(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let multiaddresses: Vec = network.listen_multiaddrs(); + + Ok(success_response(Body::from( + serde_json::to_string(&multiaddresses) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// +/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +pub fn get_listen_port(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + Ok(success_response(Body::from( + serde_json::to_string(&network.listen_port()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize port: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the Discv5 ENR from the client's libp2p service. +/// +/// ENR is encoded as base64 string. +pub fn get_enr(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let enr: Enr = network.local_enr(); + + Ok(success_response(Body::from( + serde_json::to_string(&enr.to_base64()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the `PeerId` from the client's libp2p service. +/// +/// PeerId is encoded as base58 string. +pub fn get_peer_id(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let peer_id: PeerId = network.local_peer_id(); + + Ok(success_response(Body::from( + serde_json::to_string(&peer_id.to_base58()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the number of peers connected in the client's libp2p service. +pub fn get_peer_count(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let connected_peers: usize = network.connected_peers(); + + Ok(success_response(Body::from( + serde_json::to_string(&connected_peers) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the list of peers connected to the client's libp2p service. +/// +/// Peers are presented as a list of `PeerId::to_string()`. +pub fn get_peer_list(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let connected_peers: Vec = network + .connected_peer_set() + .iter() + .map(PeerId::to_string) + .collect(); + + Ok(success_response(Body::from( + serde_json::to_string(&connected_peers).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize Vec: {:?}", e)) + })?, + ))) +} diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs new file mode 100644 index 000000000..d0c8e4368 --- /dev/null +++ b/beacon_node/rest_api/src/spec.rs @@ -0,0 +1,27 @@ +use super::{success_response, ApiResult}; +use crate::ApiError; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use hyper::{Body, Request}; +use std::sync::Arc; +use types::EthSpec; + +/// HTTP handler to return the full spec object. +pub fn get_spec(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let json: String = serde_json::to_string(&beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + +/// HTTP handler to return the full spec object. +pub fn get_slots_per_epoch(_req: Request) -> ApiResult { + let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize epoch: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 9a52f2638..04366baa7 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -200,6 +200,16 @@ fn main() { .help("Sets the verbosity level") .takes_value(true), ) + /* + * Bootstrap. + */ + .arg( + Arg::with_name("bootstrap") + .long("bootstrap") + .value_name("HTTP_SERVER") + .help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.") + .takes_value(true) + ) .get_matches(); // build the initial logger @@ -227,6 +237,11 @@ fn main() { let mut log = slog::Logger::root(drain.fuse(), o!()); + warn!( + log, + "Ethereum 2.0 is pre-release. This software is experimental." + ); + let data_dir = match matches .value_of("datadir") .and_then(|v| Some(PathBuf::from(v))) diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index c16d23e5f..f88cb7460 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -4,7 +4,7 @@ use client::{ }; use futures::sync::oneshot; use futures::Future; -use slog::{error, info, warn}; +use slog::{error, info}; use std::cell::RefCell; use std::path::Path; use std::path::PathBuf; @@ -42,11 +42,6 @@ pub fn run_beacon_node( let other_client_config = client_config.clone(); - warn!( - log, - "Ethereum 2.0 is pre-release. This software is experimental." - ); - info!( log, "BeaconNode init"; @@ -123,7 +118,7 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone, T::Store: OpenDatabase, { let store = T::Store::open_database(&db_path)?; diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 64dc229ed..c1f66b816 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::utils::graffiti_from_hex_str; +use crate::utils::{graffiti_from_hex_str, graffiti_to_hex_str}; use crate::*; use serde_derive::{Deserialize, Serialize}; @@ -16,7 +16,10 @@ use tree_hash_derive::TreeHash; pub struct BeaconBlockBody { pub randao_reveal: Signature, pub eth1_data: Eth1Data, - #[serde(deserialize_with = "graffiti_from_hex_str")] + #[serde( + serialize_with = "graffiti_to_hex_str", + deserialize_with = "graffiti_from_hex_str" + )] pub graffiti: [u8; 32], pub proposer_slashings: VariableList, pub attester_slashings: VariableList, T::MaxAttesterSlashings>, diff --git a/eth2/types/src/utils/serde_utils.rs b/eth2/types/src/utils/serde_utils.rs index 4b46fc0dc..a9b27d75b 100644 --- a/eth2/types/src/utils/serde_utils.rs +++ b/eth2/types/src/utils/serde_utils.rs @@ -46,8 +46,20 @@ where Ok(array) } -// #[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. -pub fn fork_to_hex_str(bytes: &[u8; 4], serializer: S) -> Result +pub fn fork_to_hex_str(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + +pub fn graffiti_to_hex_str( + bytes: &[u8; GRAFFITI_BYTES_LEN], + serializer: S, +) -> Result where S: Serializer, { From b078385362293fda872ee4dc62d0e1f8888005a8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sat, 24 Aug 2019 01:09:29 +1000 Subject: [PATCH 20/24] Improved syncing compilation issues --- beacon_node/network/src/message_handler.rs | 118 +--- beacon_node/network/src/sync/manager.rs | 696 +++++++++++--------- beacon_node/network/src/sync/mod.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 336 ++++++---- 4 files changed, 622 insertions(+), 530 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index fd10c5aea..7a1a4ad31 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -22,8 +22,6 @@ pub struct MessageHandler { _chain: Arc>, /// The syncing framework. sync: SimpleSync, - /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } @@ -52,15 +50,13 @@ impl MessageHandler { trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // Initialise sync and begin processing in thread - let sync = SimpleSync::new(beacon_chain.clone(), &log); + let sync = SimpleSync::new(beacon_chain.clone(), network_send, &log); // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, - network_context: NetworkContext::new(network_send, log.clone()), log: log.clone(), }; @@ -81,7 +77,7 @@ impl MessageHandler { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { - self.sync.on_connect(peer_id, &mut self.network_context); + self.sync.on_connect(peer_id); } // A peer has disconnected HandlerMessage::PeerDisconnected(peer_id) => { @@ -112,32 +108,24 @@ impl MessageHandler { /// A new RPC request has been received from the network. fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { match request { - RPCRequest::Hello(hello_message) => self.sync.on_hello_request( - peer_id, - request_id, - hello_message, - &mut self.network_context, - ), + RPCRequest::Hello(hello_message) => { + self.sync + .on_hello_request(peer_id, request_id, hello_message) + } RPCRequest::Goodbye(goodbye_reason) => { debug!( self.log, "PeerGoodbye"; "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", reason), + "reason" => format!("{:?}", goodbye_reason), ); - self.sync.on_disconnect(peer_id), - }, - RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::RecentBeaconBlocks(request) => self.sync.on_recent_beacon_blocks_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), + self.sync.on_disconnect(peer_id); + } + RPCRequest::BeaconBlocks(request) => self + .sync + .on_beacon_blocks_request(peer_id, request_id, request), + RPCRequest::RecentBeaconBlocks(request) => self + .sync + .on_recent_beacon_blocks_request(peer_id, request_id, request), } } @@ -163,20 +151,15 @@ impl MessageHandler { RPCErrorResponse::Success(response) => { match response { RPCResponse::Hello(hello_message) => { - self.sync.on_hello_response( - peer_id, - hello_message, - &mut self.network_context, - ); + self.sync.on_hello_response(peer_id, hello_message); } RPCResponse::BeaconBlocks(response) => { - match self.decode_beacon_blocks(response) { + match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { self.sync.on_beacon_blocks_response( peer_id, request_id, beacon_blocks, - &mut self.network_context, ); } Err(e) => { @@ -186,13 +169,12 @@ impl MessageHandler { } } RPCResponse::RecentBeaconBlocks(response) => { - match self.decode_beacon_blocks(response) { + match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { self.sync.on_recent_beacon_blocks_response( - request_id, peer_id, + request_id, beacon_blocks, - &mut self.network_context, ); } Err(e) => { @@ -217,19 +199,14 @@ impl MessageHandler { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { Ok(block) => { - let _should_forward_on = - self.sync - .on_block_gossip(peer_id, block, &mut self.network_context); + let _should_forward_on = self.sync.on_block_gossip(peer_id, block); } Err(e) => { debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Ok(attestation) => { - self.sync - .on_attestation_gossip(peer_id, attestation, &mut self.network_context) - } + Ok(attestation) => self.sync.on_attestation_gossip(peer_id, attestation), Err(e) => { debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } @@ -331,56 +308,3 @@ impl MessageHandler { Vec::from_ssz_bytes(&beacon_blocks) } } - -/// Wraps a Network Channel to employ various RPC/Sync related network functionality. -pub struct NetworkContext { - /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender, - /// Logger for the `NetworkContext`. - log: slog::Logger, -} - -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { - Self { network_send, log } - } - - pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) - // TODO: disconnect peers. - } - - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { - // Note: There is currently no use of keeping track of requests. However the functionality - // is left here for future revisions. - self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); - } - - //TODO: Handle Error responses - pub fn send_rpc_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - rpc_response: RPCErrorResponse, - ) { - self.send_rpc_event( - peer_id, - RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), - ); - } - - fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.send(peer_id, OutgoingMessage::RPC(rpc_event)) - } - - fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { - self.network_send - .try_send(NetworkMessage::Send(peer_id, outgoing_message)) - .unwrap_or_else(|_| { - warn!( - self.log, - "Could not send RPC message to the network service" - ) - }); - } -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a4ce544ec..f5c669455 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,129 +1,164 @@ -const MAX_BLOCKS_PER_REQUEST: usize = 10; +use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::RequestId; +use eth2_libp2p::PeerId; +use slog::{debug, info, trace, warn, Logger}; +use std::collections::{HashMap, HashSet}; +use std::ops::{Add, Sub}; +use std::sync::Arc; +use types::{BeaconBlock, EthSpec, Hash256, Slot}; + +const MAX_BLOCKS_PER_REQUEST: u64 = 10; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. -const SLOT_IMPORT_TOLERANCE: u64 = 10; +const SLOT_IMPORT_TOLERANCE: usize = 10; const PARENT_FAIL_TOLERANCE: usize = 3; -const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE*2; +const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +#[derive(PartialEq)] enum BlockRequestsState { QueuedForward, QueuedBackward, Pending(RequestId), Complete, + Failed, } -struct BlockRequests { - target_head_slot: Slot +struct BlockRequests { + target_head_slot: Slot, target_head_root: Hash256, - downloaded_blocks: Vec, - state: State, + downloaded_blocks: Vec>, + state: BlockRequestsState, } -struct ParentRequests { - downloaded_blocks: Vec, - attempts: usize, +struct ParentRequests { + downloaded_blocks: Vec>, + failed_attempts: usize, last_submitted_peer: PeerId, // to downvote the submitting peer. state: BlockRequestsState, } -impl BlockRequests { - +impl BlockRequests { // gets the start slot for next batch // last block slot downloaded plus 1 fn next_start_slot(&self) -> Option { if !self.downloaded_blocks.is_empty() { match self.state { BlockRequestsState::QueuedForward => { - let last_element_index = self.downloaded_blocks.len() -1; - Some(downloaded_blocks[last_element_index].slot.add(1)) + let last_element_index = self.downloaded_blocks.len() - 1; + Some(self.downloaded_blocks[last_element_index].slot.add(1)) } BlockRequestsState::QueuedBackward => { let earliest_known_slot = self.downloaded_blocks[0].slot; Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) } + _ => { + // pending/complete/failed + None + } } - } - else { + } else { None } } } +#[derive(PartialEq, Debug, Clone)] enum ManagerState { Syncing, Regular, Stalled, } -enum ImportManagerOutcome { +pub(crate) enum ImportManagerOutcome { Idle, - RequestBlocks{ + RequestBlocks { peer_id: PeerId, request_id: RequestId, request: BeaconBlocksRequest, }, + /// Updates information with peer via requesting another HELLO handshake. + Hello(PeerId), RecentRequest(PeerId, RecentBeaconBlocksRequest), DownvotePeer(PeerId), } - -pub struct ImportManager { +pub struct ImportManager { /// A reference to the underlying beacon chain. chain: Arc>, - state: MangerState, - import_queue: HashMap, - parent_queue: Vec, - full_peers: Hashset, + state: ManagerState, + import_queue: HashMap>, + parent_queue: Vec>, + full_peers: HashSet, current_req_id: usize, log: Logger, } -impl ImportManager { +impl ImportManager { + pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + ImportManager { + chain: beacon_chain.clone(), + state: ManagerState::Regular, + import_queue: HashMap::new(), + parent_queue: Vec::new(), + full_peers: HashSet::new(), + current_req_id: 0, + log: log.clone(), + } + } - pub fn add_peer(&mut self, peer_id, remote: PeerSyncInfo) { + pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { // TODO: Improve comments. // initially try to download blocks from our current head // then backwards search all the way back to our finalized epoch until we match on a chain // has to be done sequentially to find next slot to start the batch from - + let local = PeerSyncInfo::from(&self.chain); // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync - if remote.head_slot.sub(local.head_slot) < SLOT_IMPORT_TOLERANCE { + if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; - "peer" => peer_id, - "peer_head_slot" => remote.head_slot, - "local_head_slot" => local.head_slot, - ); + "peer" => format!("{:?}", peer_id), + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local.head_slot, + ); // remove the peer from the queue if it exists - self.import_queue.remove(&peer_id); + self.import_queue.remove(&peer_id); return; } if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { // update the target head slot - if remote.head_slot > requested_block.target_head_slot { + if remote.head_slot > block_requests.target_head_slot { block_requests.target_head_slot = remote.head_slot; } - } else { + } else { let block_requests = BlockRequests { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, downloaded_blocks: Vec::new(), - state: RequestedBlockState::Queued - } + state: BlockRequestsState::QueuedForward, + }; self.import_queue.insert(peer_id, block_requests); } - } - pub fn beacon_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { - + pub fn beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + mut blocks: Vec>, + ) { // find the request - let block_requests = match self.import_queue.get_mut(&peer_id) { - Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, - None => { + let block_requests = match self + .import_queue + .get_mut(&peer_id) + .filter(|r| r.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, + _ => { // No pending request, invalid request_id or coding error warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id); return; @@ -142,100 +177,115 @@ impl ImportManager { if blocks.is_empty() { warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.state = RequestedBlockState::Failed; + block_requests.state = BlockRequestsState::Failed; return; } // Add the newly downloaded blocks to the current list of downloaded blocks. This also // determines if we are syncing forward or backward. let syncing_forwards = { - if block_requests.blocks.is_empty() { - block_requests.blocks.push(blocks); + if block_requests.downloaded_blocks.is_empty() { + block_requests.downloaded_blocks.append(&mut blocks); true - } - else if block_requests.blocks[0].slot < blocks[0].slot { // syncing forwards - // verify the peer hasn't sent overlapping blocks - ensuring the strictly - // increasing blocks in a batch will be verified during the processing - if block_requests.next_slot() > blocks[0].slot { - warn!(self.log, "BeaconBlocks response returned duplicate blocks", "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_slot()); - block_requests.state = RequestedBlockState::Failed; - return; - } - - block_requests.blocks.push(blocks); - true + } else if block_requests.downloaded_blocks[0].slot < blocks[0].slot { + // syncing forwards + // verify the peer hasn't sent overlapping blocks - ensuring the strictly + // increasing blocks in a batch will be verified during the processing + if block_requests.next_start_slot() > Some(blocks[0].slot) { + warn!(self.log, "BeaconBlocks response returned duplicate blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_start_slot()); + block_requests.state = BlockRequestsState::Failed; + return; } - else { false } + + block_requests.downloaded_blocks.append(&mut blocks); + true + } else { + false + } }; - // Determine if more blocks need to be downloaded. There are a few cases: // - We have downloaded a batch from our head_slot, which has not reached the remotes head // (target head). Therefore we need to download another sequential batch. // - The latest batch includes blocks that greater than or equal to the target_head slot, - // which means we have caught up to their head. We then check to see if the first + // which means we have caught up to their head. We then check to see if the first // block downloaded matches our head. If so, we are on the same chain and can process // the blocks. If not we need to sync back further until we are on the same chain. So // request more blocks. // - We are syncing backwards (from our head slot) and need to check if we are on the same // chain. If so, process the blocks, if not, request more blocks all the way up to // our last finalized slot. - + if syncing_forwards { // does the batch contain the target_head_slot - let last_element_index = block_requests.blocks.len()-1; - if block_requests[last_element_index].slot >= block_requests.target_slot { + let last_element_index = block_requests.downloaded_blocks.len() - 1; + if block_requests.downloaded_blocks[last_element_index].slot + >= block_requests.target_head_slot + { // if the batch is on our chain, this is complete and we can then process. // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests_blocks[0].slot - if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { - block_requests.state = RequestedBlockState::Complete; + let earliest_slot = block_requests.downloaded_blocks[0].slot; + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == root_at_slot(self.chain, earliest_slot) + { + block_requests.state = BlockRequestsState::Complete; return; } // not on the same chain, request blocks backwards - // binary search, request half the distance between the earliest block and our - // finalized slot - let state = &beacon_chain.head().beacon_state; - let local_finalized_slot = state.finalized_checkpoint.epoch; //TODO: Convert to slot - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.blocks[0] { + let state = &self.chain.head().beacon_state; + let local_finalized_slot = state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.downloaded_blocks[0].slot { warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = RequestedBlockState::Failed; + block_requests.state = BlockRequestsState::Failed; return; } - // Start a backwards sync by requesting earlier blocks + // Start a backwards sync by requesting earlier blocks // There can be duplication in downloaded blocks here if there are a large number // of skip slots. In all cases we at least re-download the earliest known block. // It is unlikely that a backwards sync in required, so we accept this duplication // for now. - block_requests.state = RequestedBlockState::QueuedBackward; + block_requests.state = BlockRequestsState::QueuedBackward; + } else { + // batch doesn't contain the head slot, request the next batch + block_requests.state = BlockRequestsState::QueuedForward; } - else { - // batch doesn't contain the head slot, request the next batch - block_requests.state = RequestedBlockState::QueuedForward; - } - } - else { + } else { // syncing backwards // if the batch is on our chain, this is complete and we can then process. // Otherwise continue backwards - let earliest_slot = block_requests_blocks[0].slot - if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { - block_requests.state = RequestedBlockState::Complete; + let earliest_slot = block_requests.downloaded_blocks[0].slot; + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == root_at_slot(self.chain, earliest_slot) + { + block_requests.state = BlockRequestsState::Complete; return; } - block_requests.state = RequestedBlockState::QueuedBackward; - + block_requests.state = BlockRequestsState::QueuedBackward; } } - pub fn recent_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { - + pub fn recent_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blocks: Vec>, + ) { // find the request - let parent_request = match self.parent_queue.get_mut(&peer_id) { - Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + let parent_request = match self + .parent_queue + .iter_mut() + .find(|request| request.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, None => { // No pending request, invalid request_id or coding error warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id); @@ -245,8 +295,8 @@ impl ImportManager { // if an empty response is given, the peer didn't have the requested block, try again if blocks.is_empty() { - parent_request.attempts += 1; - parent_request.state = RequestedBlockState::QueuedForward; + parent_request.failed_attempts += 1; + parent_request.state = BlockRequestsState::QueuedForward; parent_request.last_submitted_peer = peer_id; return; } @@ -256,29 +306,27 @@ impl ImportManager { if blocks.len() != 1 { //TODO: Potentially downvote the peer debug!(self.log, "Peer sent more than 1 parent. Ignoring"; - "peer_id" => peer_id, - "no_parents" => blocks.len() - ); + "peer_id" => format!("{:?}", peer_id), + "no_parents" => blocks.len() + ); return; } - // queue for processing - parent_request.state = RequestedBlockState::Complete; + parent_request.state = BlockRequestsState::Complete; } - pub fn inject_error(peer_id: PeerId, id: RequestId) { //TODO: Remove block state from pending } - pub fn peer_disconnect(peer_id: PeerId) { - self.import_queue.remove(&peer_id); - self.full_peers.remove(&peer_id); + pub fn peer_disconnect(&mut self, peer_id: &PeerId) { + self.import_queue.remove(peer_id); + self.full_peers.remove(peer_id); self.update_state(); } - pub fn add_full_peer(peer_id: PeerId) { + pub fn add_full_peer(&mut self, peer_id: PeerId) { debug!( self.log, "Fully synced peer added"; "peer" => format!("{:?}", peer_id), @@ -287,32 +335,36 @@ impl ImportManager { self.update_state(); } - pub fn add_unknown_block(&mut self,block: BeaconBlock) { + pub fn add_unknown_block(&mut self, block: BeaconBlock, peer_id: PeerId) { // if we are not in regular sync mode, ignore this block - if self.state == ManagerState::Regular { + if let ManagerState::Regular = self.state { return; } // make sure this block is not already being searched for // TODO: Potentially store a hashset of blocks for O(1) lookups for parent_req in self.parent_queue.iter() { - if let Some(_) = parent_req.downloaded_blocks.iter().find(|d_block| d_block == block) { + if let Some(_) = parent_req + .downloaded_blocks + .iter() + .find(|d_block| d_block == &&block) + { // we are already searching for this block, ignore it return; } } - let req = ParentRequests { + let req = ParentRequests { downloaded_blocks: vec![block], failed_attempts: 0, - state: RequestedBlockState::QueuedBackward - } + last_submitted_peer: peer_id, + state: BlockRequestsState::QueuedBackward, + }; self.parent_queue.push(req); } - pub fn poll() -> ImportManagerOutcome { - + pub fn poll(&mut self) -> ImportManagerOutcome { loop { // update the state of the manager self.update_state(); @@ -336,304 +388,340 @@ impl ImportManager { if let (re_run, outcome) = self.process_complete_parent_requests() { if let Some(outcome) = outcome { return outcome; - } - else if !re_run { + } else if !re_run { break; } } } - - return ImportManagerOutcome::Idle; + return ImportManagerOutcome::Idle; } - fn update_state(&mut self) { - let previous_state = self.state; + let previous_state = self.state.clone(); self.state = { if !self.import_queue.is_empty() { ManagerState::Syncing + } else if !self.full_peers.is_empty() { + ManagerState::Regular + } else { + ManagerState::Stalled } - else if !self.full_peers.is_empty() { - ManagerState::Regualar - } - else { - ManagerState::Stalled } }; if self.state != previous_state { - info!(self.log, "Syncing state updated", - "old_state" => format!("{:?}", previous_state) - "new_state" => format!("{:?}", self.state) - ); + info!(self.log, "Syncing state updated"; + "old_state" => format!("{:?}", previous_state), + "new_state" => format!("{:?}", self.state), + ); } } - - - fn process_potential_block_requests(&mut self) -> Option { + fn process_potential_block_requests(&mut self) -> Option { // check if an outbound request is required // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p // layer and not needed here. - // If any in queued state we submit a request. - + // If any in queued state we submit a request. // remove any failed batches self.import_queue.retain(|peer_id, block_request| { - if block_request.state == RequestedBlockState::Failed { - debug!(self.log, "Block import from peer failed", - "peer_id" => peer_id, - "downloaded_blocks" => block_request.downloaded.blocks.len() - ); + if let BlockRequestsState::Failed = block_request.state { + debug!(self.log, "Block import from peer failed"; + "peer_id" => format!("{:?}", peer_id), + "downloaded_blocks" => block_request.downloaded_blocks.len() + ); false + } else { + true } - else { true } }); + // process queued block requests + for (peer_id, block_requests) in self.import_queue.iter_mut().find(|(_peer_id, req)| { + req.state == BlockRequestsState::QueuedForward + || req.state == BlockRequestsState::QueuedBackward + }) { + let request_id = self.current_req_id; + block_requests.state = BlockRequestsState::Pending(request_id); + self.current_req_id += 1; - for (peer_id, block_requests) in self.import_queue.iter_mut() { - if let Some(request) = requests.iter().find(|req| req.state == RequestedBlockState::QueuedForward || req.state == RequestedBlockState::QueuedBackward) { - - let request.state = RequestedBlockState::Pending(self.current_req_id); - self.current_req_id +=1; - - let req = BeaconBlocksRequest { - head_block_root: request.target_root, - start_slot: request.next_start_slot().unwrap_or_else(|| self.chain.head().slot), - count: MAX_BLOCKS_PER_REQUEST, - step: 0 - } - return Some(ImportManagerOutCome::RequestBlocks{ peer_id, req }); - } + let request = BeaconBlocksRequest { + head_block_root: block_requests.target_head_root, + start_slot: block_requests + .next_start_slot() + .unwrap_or_else(|| self.chain.best_slot()) + .as_u64(), + count: MAX_BLOCKS_PER_REQUEST, + step: 0, + }; + return Some(ImportManagerOutcome::RequestBlocks { + peer_id: peer_id.clone(), + request, + request_id, + }); } None } fn process_complete_batches(&mut self) -> Option { - - let completed_batches = self.import_queue.iter().filter(|_peer, block_requests| block_requests.state == RequestedState::Complete).map(|peer, _| peer).collect::>(); + let completed_batches = self + .import_queue + .iter() + .filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete) + .map(|(peer, _)| peer) + .cloned() + .collect::>(); for peer_id in completed_batches { - let block_requests = self.import_queue.remove(&peer_id).unwrap("key exists"); - match self.process_blocks(block_requests.downloaded_blocks) { - Ok(()) => { - //TODO: Verify it's impossible to have empty downloaded_blocks - last_element = block_requests.downloaded_blocks.len() -1 - debug!(self.log, "Blocks processed successfully"; - "peer" => peer_id, - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - ); - // Re-HELLO to ensure we are up to the latest head - return Some(ImportManagerOutcome::Hello(peer_id)); - } - Err(e) => { - last_element = block_requests.downloaded_blocks.len() -1 - warn!(self.log, "Block processing failed"; - "peer" => peer_id, - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - "error" => format!("{:?}", e), - ); - return Some(ImportManagerOutcome::DownvotePeer(peer_id)); - } + let block_requests = self.import_queue.remove(&peer_id).expect("key exists"); + match self.process_blocks(block_requests.downloaded_blocks.clone()) { + Ok(()) => { + //TODO: Verify it's impossible to have empty downloaded_blocks + let last_element = block_requests.downloaded_blocks.len() - 1; + debug!(self.log, "Blocks processed successfully"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + ); + // Re-HELLO to ensure we are up to the latest head + return Some(ImportManagerOutcome::Hello(peer_id)); } + Err(e) => { + let last_element = block_requests.downloaded_blocks.len() - 1; + warn!(self.log, "Block processing failed"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + } + } } None } - fn process_parent_requests(&mut self) -> Option { - // remove any failed requests self.parent_queue.retain(|parent_request| { - if parent_request.state == RequestedBlockState::Failed { - debug!(self.log, "Parent import failed", - "block" => parent_request.downloaded_blocks[0].hash, - "siblings found" => parent_request.len() - ); + if parent_request.state == BlockRequestsState::Failed { + debug!(self.log, "Parent import failed"; + "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), + "ancestors_found" => parent_request.downloaded_blocks.len() + ); false + } else { + true } - else { true } }); // check to make sure there are peers to search for the parent from if self.full_peers.is_empty() { - return; + return None; } // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { - parent_request.state == BlockRequestsState::Failed - continue; - } - else if parent_request.state == BlockRequestsState::QueuedForward { + parent_request.state == BlockRequestsState::Failed; + continue; + } else if parent_request.state == BlockRequestsState::QueuedForward { parent_request.state = BlockRequestsState::Pending(self.current_req_id); - self.current_req_id +=1; - let parent_hash = + self.current_req_id += 1; + let last_element_index = parent_request.downloaded_blocks.len() - 1; + let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root; let req = RecentBeaconBlocksRequest { block_roots: vec![parent_hash], }; // select a random fully synced peer to attempt to download the parent block - let peer_id = self.full_peers.iter().next().expect("List is not empty"); + let peer_id = self.full_peers.iter().next().expect("List is not empty"); - return Some(ImportManagerOutcome::RecentRequest(peer_id, req); + return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); } } None - } - - - fn process_complete_parent_requests(&mut self) => (bool, Option) { + } + fn process_complete_parent_requests(&mut self) -> (bool, Option) { // flag to determine if there is more process to drive or if the manager can be switched to // an idle state - let mut re_run = false; - - // verify the last added block is the parent of the last requested block - let last_index = parent_requests.downloaded_blocks.len() -1; - let expected_hash = parent_requests.downloaded_blocks[last_index].parent ; - let block_hash = parent_requests.downloaded_blocks[0].tree_hash_root(); - if block_hash != expected_hash { - //TODO: Potentially downvote the peer - debug!(self.log, "Peer sent invalid parent. Ignoring"; - "peer_id" => peer_id, - "received_block" => block_hash, - "expected_parent" => expected_hash, - ); - return; - } + let mut re_run = false; // Find any parent_requests ready to be processed - for completed_request in self.parent_queue.iter_mut().filter(|req| req.state == BlockRequestsState::Complete) { + for completed_request in self + .parent_queue + .iter_mut() + .filter(|req| req.state == BlockRequestsState::Complete) + { + // verify the last added block is the parent of the last requested block + let last_index = completed_request.downloaded_blocks.len() - 1; + let expected_hash = completed_request.downloaded_blocks[last_index].parent_root; + // Note: the length must be greater than 1 so this cannot panic. + let block_hash = completed_request.downloaded_blocks[last_index - 1].canonical_root(); + if block_hash != expected_hash { + // remove the head block + let _ = completed_request.downloaded_blocks.pop(); + completed_request.state = BlockRequestsState::QueuedForward; + //TODO: Potentially downvote the peer + let peer = completed_request.last_submitted_peer.clone(); + debug!(self.log, "Peer sent invalid parent. Ignoring"; + "peer_id" => format!("{:?}",peer), + "received_block" => format!("{}", block_hash), + "expected_parent" => format!("{}", expected_hash), + ); + return (true, Some(ImportManagerOutcome::DownvotePeer(peer))); + } + // try and process the list of blocks up to the requested block while !completed_request.downloaded_blocks.is_empty() { - let block = completed_request.downloaded_blocks.pop(); - match self.chain_process_block(block.clone()) { - Ok(BlockProcessingOutcome::ParentUnknown { parent } => { + let block = completed_request + .downloaded_blocks + .pop() + .expect("Block must exist exist"); + match self.chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { // need to keep looking for parents completed_request.downloaded_blocks.push(block); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; break; } - Ok(BlockProcessingOutcome::Processed { _ } => { } - Ok(outcome) => { // it's a future slot or an invalid block, remove it and try again - completed_request.failed_attempts +=1; + Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} + Ok(outcome) => { + // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts += 1; trace!( self.log, "Invalid parent block"; - "outcome" => format!("{:?}", outcome); + "outcome" => format!("{:?}", outcome), "peer" => format!("{:?}", completed_request.last_submitted_peer), ); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; - return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); } - Err(e) => { - completed_request.failed_attempts +=1; + Err(e) => { + completed_request.failed_attempts += 1; warn!( self.log, "Parent processing error"; - "error" => format!("{:?}", e); + "error" => format!("{:?}", e) ); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; - return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); - } + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); } + } } } // remove any full completed and processed parent chains - self.parent_queue.retain(|req| if req.state == BlockRequestsState::Complete { false } else { true }); + self.parent_queue.retain(|req| { + if req.state == BlockRequestsState::Complete { + false + } else { + true + } + }); (re_run, None) - } - - fn process_blocks( - &mut self, - blocks: Vec>, - ) -> Result<(), String> { - + fn process_blocks(&mut self, blocks: Vec>) -> Result<(), String> { for block in blocks { - let processing_result = self.chain.process_block(block.clone()); + let processing_result = self.chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - trace!( - self.log, "Imported block from network"; - "source" => source, - "slot" => block.slot, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // blocks should be sequential and all parents should exist - trace!( - self.log, "ParentBlockUnknown"; - "source" => source, - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - ); - return Err(format!("Block at slot {} has an unknown parent.", block.slot)); - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. trace!( - self.log, "FutureBlock"; - "source" => source, - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - return Err(format!("Block at slot {} is too far in the future", block.slot)); - } else { - // The block is in the future, but not too far. - trace!( - self.log, "QueuedFutureBlock"; - "source" => source, - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), + self.log, "Imported block from network"; + "slot" => block.slot, + "block_root" => format!("{}", block_root), ); } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + self.log, "ParentBlockUnknown"; + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!( + "Block at slot {} has an unknown parent.", + block.slot + )); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + trace!( + self.log, "FutureBlock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + return Err(format!( + "Block at slot {} is too far in the future", + block.slot + )); + } else { + // The block is in the future, but not too far. + trace!( + self.log, "QueuedFutureBlock"; + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } + } + _ => { + trace!( + self.log, "InvalidBlock"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } } - _ => { - trace!( - self.log, "InvalidBlock"; - "source" => source, - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - return Err(format!("Invalid block at slot {}", block.slot)); - } + } else { + trace!( + self.log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!( + "Unexpected block processing error: {:?}", + processing_result + )); } - Ok(()) - } else { - trace!( - self.log, "BlockProcessingFailure"; - "source" => source, - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - return Err(format!("Unexpected block processing error: {:?}", processing_result)); } - } + Ok(()) } } + +fn root_at_slot( + chain: Arc>, + target_slot: Slot, +) -> Option { + chain + .rev_iter_block_roots() + .find(|(_root, slot)| *slot == target_slot) + .map(|(root, _slot)| root) +} diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index fac1b46eb..b26d78c14 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -1,4 +1,4 @@ -mod import_queue; +mod manager; /// Syncing for lighthouse. /// /// Stores the various syncing methods for the beacon chain. diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index a7f5ced40..deadf214d 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,8 +1,9 @@ -use super::import_queue::{ImportQueue, PartialBeaconBlockCompletion}; -use crate::message_handler::NetworkContext; +use super::manager::{ImportManager, ImportManagerOutcome}; +use crate::service::{NetworkMessage, OutgoingMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, error, info, o, trace, warn}; use ssz::Encode; @@ -10,14 +11,14 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use store::Store; +use tokio::sync::mpsc; use types::{ Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, }; - /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. -const FUTURE_SLOT_TOLERANCE: u64 = 1; +pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -25,16 +26,13 @@ const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { - fork_version: [u8,4], - finalized_root: Hash256, - finalized_epoch: Epoch, - head_root: Hash256, - head_slot: Slot, + fork_version: [u8; 4], + pub finalized_root: Hash256, + pub finalized_epoch: Epoch, + pub head_root: Hash256, + pub head_slot: Slot, } - - - impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { @@ -43,7 +41,6 @@ impl From for PeerSyncInfo { finalized_epoch: hello.finalized_epoch, head_root: hello.head_root, head_slot: hello.head_slot, - requested_slot_skip: None, } } } @@ -66,18 +63,24 @@ pub enum SyncState { pub struct SimpleSync { /// A reference to the underlying beacon chain. chain: Arc>, - manager: ImportManager, + manager: ImportManager, + network: NetworkContext, log: slog::Logger, } impl SimpleSync { /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. - pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + pub fn new( + beacon_chain: Arc>, + network_send: mpsc::UnboundedSender, + log: &slog::Logger, + ) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); SimpleSync { chain: beacon_chain.clone(), - manager: ImportManager::new(), + manager: ImportManager::new(beacon_chain, log), + network: NetworkContext::new(network_send, log.clone()), log: sync_logger, } } @@ -92,8 +95,9 @@ impl SimpleSync { /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + pub fn on_connect(&mut self, peer_id: PeerId) { + self.network + .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -104,42 +108,31 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, ) { trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. - network.send_rpc_response( + self.network.send_rpc_response( peer_id.clone(), request_id, RPCResponse::Hello(hello_message(&self.chain)), ); - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` response from a peer. - pub fn on_hello_response( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { + pub fn on_hello_response(&mut self, peer_id: PeerId, hello: HelloMessage) { trace!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); // Process the hello message, without sending back another hello. - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` message, requesting new blocks if appropriate. /// /// Disconnects the peer if required. - fn process_hello( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { + fn process_hello(&mut self, peer_id: PeerId, hello: HelloMessage) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); @@ -153,12 +146,13 @@ impl SimpleSync { "reason" => "network_id" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); } else if remote.finalized_epoch <= local.finalized_epoch && remote.finalized_root != Hash256::zero() && local.finalized_root != Hash256::zero() - && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) - != Some(remote.latest_finalized_root)) + && (self.root_at_slot(start_slot(remote.finalized_epoch)) + != Some(remote.finalized_root)) { // The remotes finalized epoch is less than or greater than ours, but the block root is // different to the one in our chain. @@ -169,8 +163,9 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch < local.latest_finalized_epoch { + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch < local.finalized_epoch { // The node has a lower finalized epoch, their chain is not useful to us. There are two // cases where a node can have a lower finalized epoch: // @@ -193,12 +188,12 @@ impl SimpleSync { } else if self .chain .store - .exists::>(&remote.best_root) + .exists::>(&remote.head_root) .unwrap_or_else(|_| false) { // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - self.import_manager.add_full_peer(peer_id); + self.manager.add_full_peer(peer_id); self.process_sync(); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. @@ -208,29 +203,45 @@ impl SimpleSync { debug!( self.log, "UsefulPeer"; "peer" => format!("{:?}", peer_id), - "local_finalized_epoch" => local.latest_finalized_epoch, - "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, + "local_finalized_epoch" => local.finalized_epoch, + "remote_latest_finalized_epoch" => remote.finalized_epoch, ); - self.import_manager.add_peer(peer_id, remote); + self.manager.add_peer(peer_id, remote); self.process_sync(); } } - self.proess_sync(&mut self) { + fn process_sync(&mut self) { loop { - match self.import_manager.poll() { - ImportManagerOutcome::RequestBlocks(peer_id, req) { + match self.manager.poll() { + ImportManagerOutcome::Hello(peer_id) => { + trace!( + self.log, + "RPC Request"; + "method" => "HELLO", + "peer" => format!("{:?}", peer_id) + ); + self.network + .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + } + ImportManagerOutcome::RequestBlocks { + peer_id, + request_id, + request, + } => { trace!( self.log, "RPC Request"; "method" => "BeaconBlocks", - "count" => req.count, + "id" => request_id, + "count" => request.count, "peer" => format!("{:?}", peer_id) ); - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); - }, - ImportManagerOutcome::RecentRequest(peer_id, req) { + self.network + .send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(request)); + } + ImportManagerOutcome::RecentRequest(peer_id, req) => { trace!( self.log, "RPC Request"; @@ -238,18 +249,20 @@ impl SimpleSync { "count" => req.block_roots.len(), "peer" => format!("{:?}", peer_id) ); - network.send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); - }, - ImportManagerOutcome::DownvotePeer(peer_id) { + self.network + .send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + } + ImportManagerOutcome::DownvotePeer(peer_id) => { trace!( self.log, "Peer downvoted"; "peer" => format!("{:?}", peer_id) ); // TODO: Implement reputation - network.disconnect(peer_id.clone(), GoodbyeReason::Fault); - }, - SyncManagerState::Idle { + self.network + .disconnect(peer_id.clone(), GoodbyeReason::Fault); + } + ImportManagerOutcome::Idle => { // nothing to do return; } @@ -257,37 +270,26 @@ impl SimpleSync { } } - - /* fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) } - */ - /// Handle a `BeaconBlocks` request from the peer. - pub fn on_beacon_blocks_request( + /// Handle a `RecentBeaconBlocks` request from the peer. + pub fn on_recent_beacon_blocks_request( &mut self, peer_id: PeerId, request_id: RequestId, - req: BeaconBlocksRequest, - network: &mut NetworkContext, + request: RecentBeaconBlocksRequest, ) { - debug!( - self.log, - "BeaconBlocksRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.count, - "start_slot" => req.start_slot, - ); - - let blocks = Vec> = self - .chain.rev_iter_block_roots().filter(|(_root, slot) req.start_slot <= slot && req.start_slot + req.count >= slot).take_while(|(_root, slot) req.start_slot <= *slot) - .filter_map(|root, slot| { + let blocks: Vec> = request + .block_roots + .iter() + .filter_map(|root| { if let Ok(Some(block)) = self.chain.store.get::>(root) { - Some(block.body) + Some(block) } else { debug!( self.log, @@ -301,10 +303,63 @@ impl SimpleSync { }) .collect(); - roots.reverse(); - roots.dedup_by_key(|brs| brs.block_root); + debug!( + self.log, + "BlockBodiesRequest"; + "peer" => format!("{:?}", peer_id), + "requested" => request.block_roots.len(), + "returned" => blocks.len(), + ); - if roots.len() as u64 != req.count { + self.network.send_rpc_response( + peer_id, + request_id, + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), + ) + } + + /// Handle a `BeaconBlocks` request from the peer. + pub fn on_beacon_blocks_request( + &mut self, + peer_id: PeerId, + request_id: RequestId, + req: BeaconBlocksRequest, + ) { + debug!( + self.log, + "BeaconBlocksRequest"; + "peer" => format!("{:?}", peer_id), + "count" => req.count, + "start_slot" => req.start_slot, + ); + + let mut blocks: Vec> = self + .chain + .rev_iter_block_roots() + .filter(|(_root, slot)| { + req.start_slot <= slot.as_u64() && req.start_slot + req.count >= slot.as_u64() + }) + .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) + .filter_map(|(root, _slot)| { + if let Ok(Some(block)) = self.chain.store.get::>(&root) { + Some(block) + } else { + debug!( + self.log, + "Peer requested unknown block"; + "peer" => format!("{:?}", peer_id), + "request_root" => format!("{:}", root), + ); + + None + } + }) + .collect(); + + blocks.reverse(); + blocks.dedup_by_key(|brs| brs.slot); + + if blocks.len() as u64 != req.count { debug!( self.log, "BeaconBlocksRequest"; @@ -313,33 +368,33 @@ impl SimpleSync { "start_slot" => req.start_slot, "current_slot" => self.chain.present_slot(), "requested" => req.count, - "returned" => roots.len(), + "returned" => blocks.len(), ); } - network.send_rpc_response( + self.network.send_rpc_response( peer_id, request_id, RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), ) } - /// Handle a `BeaconBlocks` response from the peer. pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, request_id: RequestId, - res: Vec>, + beacon_blocks: Vec>, ) { debug!( self.log, "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), - "count" => res.block_bodies.len(), + "count" => beacon_blocks.len(), ); - self.import_manager.beacon_blocks_response(peer_id, request_id, blocks); + self.manager + .beacon_blocks_response(peer_id, request_id, beacon_blocks); self.process_sync(); } @@ -349,16 +404,17 @@ impl SimpleSync { &mut self, peer_id: PeerId, request_id: RequestId, - res: Vec>, + beacon_blocks: Vec>, ) { debug!( self.log, "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), - "count" => res.block_bodies.len(), + "count" => beacon_blocks.len(), ); - self.import_manager.recent_blocks_response(peer_id, request_id, blocks); + self.manager + .recent_blocks_response(peer_id, request_id, beacon_blocks); self.process_sync(); } @@ -368,19 +424,13 @@ impl SimpleSync { /// Attempts to apply to block to the beacon chain. May queue the block for later processing. /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. - pub fn on_block_gossip( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - ) -> bool { - if let Some(outcome) = - self.process_block(peer_id.clone(), block.clone(), network, &"gossip") - { + pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { + if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, - BlockProcessingOutcome::ParentUnknown { parent } => { + BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block - self.import_manager.add_unknown_block(block.clone()); + self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::FutureSlot { @@ -401,12 +451,7 @@ impl SimpleSync { /// Process a gossip message declaring a new attestation. /// /// Not currently implemented. - pub fn on_attestation_gossip( - &mut self, - _peer_id: PeerId, - msg: Attestation, - _network: &mut NetworkContext, - ) { + pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { match self.chain.process_attestation(msg) { Ok(outcome) => info!( self.log, @@ -420,39 +465,74 @@ impl SimpleSync { } } - -/* - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { - !self - .chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - false - }) - } - */ - /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { hello_message(&self.chain) } - } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { - let spec = &beacon_chain.spec; let state = &beacon_chain.head().beacon_state; HelloMessage { - network_id: spec.network_id, - //TODO: Correctly define the chain id - chain_id: spec.network_id as u64, - latest_finalized_root: state.finalized_checkpoint.root, - latest_finalized_epoch: state.finalized_checkpoint.epoch, - best_root: beacon_chain.head().beacon_block_root, - best_slot: state.slot, + fork_version: state.fork.current_version, + finalized_root: state.finalized_checkpoint.root, + finalized_epoch: state.finalized_checkpoint.epoch, + head_root: beacon_chain.head().beacon_block_root, + head_slot: state.slot, + } +} + +/// Wraps a Network Channel to employ various RPC/Sync related network functionality. +pub struct NetworkContext { + /// The network channel to relay messages to the Network service. + network_send: mpsc::UnboundedSender, + /// Logger for the `NetworkContext`. + log: slog::Logger, +} + +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { + Self { network_send, log } + } + + pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) + // TODO: disconnect peers. + } + + pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { + // Note: There is currently no use of keeping track of requests. However the functionality + // is left here for future revisions. + self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); + } + + //TODO: Handle Error responses + pub fn send_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + rpc_response: RPCResponse, + ) { + self.send_rpc_event( + peer_id, + RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), + ); + } + + fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + self.send(peer_id, OutgoingMessage::RPC(rpc_event)) + } + + fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { + self.network_send + .try_send(NetworkMessage::Send(peer_id, outgoing_message)) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send RPC message to the network service" + ) + }); } } From 0d56df474a6df70353a89970329f3c08068eef23 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 00:27:47 +1000 Subject: [PATCH 21/24] Main batch sync debugging --- beacon_node/client/src/lib.rs | 6 +- beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 20 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 4 +- beacon_node/network/src/message_handler.rs | 6 +- beacon_node/network/src/sync/manager.rs | 240 +++++++++---------- beacon_node/network/src/sync/simple_sync.rs | 99 +++++--- 6 files changed, 219 insertions(+), 156 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 4b64c1070..7e6449a98 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -41,7 +41,7 @@ pub struct Client { /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API - pub api_exit_signal: Option, + // pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, /// Marker to pin the beacon chain generics. @@ -134,6 +134,7 @@ where None }; + /* // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -151,6 +152,7 @@ where } else { None }; + */ let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { @@ -184,7 +186,7 @@ where http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), - api_exit_signal, + //api_exit_signal, log, network, phantom: PhantomData, diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index f7262118d..260a00346 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -171,7 +171,25 @@ impl Decoder for SSZOutboundCodec { }, _ => unreachable!("Cannot negotiate an unknown protocol"), }, - Ok(None) => Ok(None), + Ok(None) => { + // the object sent could be a empty. We return the empty object if this is the case + match self.protocol.message_name.as_str() { + "hello" => match self.protocol.version.as_str() { + "1" => Ok(None), // cannot have an empty HELLO message. The stream has terminated unexpectedly + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + _ => unreachable!("Cannot negotiate an unknown protocol"), + } + } Err(e) => Err(e), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index a69cd0cda..07322875f 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -317,11 +317,11 @@ where RPCEvent::Response(rpc_event.id(), response), ))); } else { - // stream closed early + // stream closed early or nothing was sent return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error( rpc_event.id(), - RPCError::Custom("Stream Closed Early".into()), + RPCError::Custom("Stream closed early. Empty response".into()), ), ))); } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 7a1a4ad31..c14fc970d 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,8 +1,7 @@ use crate::error; -use crate::service::{NetworkMessage, OutgoingMessage}; +use crate::service::NetworkMessage; use crate::sync::SimpleSync; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; use eth2_libp2p::{ behaviour::PubsubMessage, rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId}, @@ -304,6 +303,9 @@ impl MessageHandler { &self, beacon_blocks: &[u8], ) -> Result>, DecodeError> { + if beacon_blocks.is_empty() { + return Ok(Vec::new()); + } //TODO: Implement faster block verification before decoding entirely Vec::from_ssz_bytes(&beacon_blocks) } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f5c669455..b81da0991 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -13,14 +13,12 @@ const MAX_BLOCKS_PER_REQUEST: u64 = 10; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: usize = 10; - const PARENT_FAIL_TOLERANCE: usize = 3; const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; #[derive(PartialEq)] enum BlockRequestsState { - QueuedForward, - QueuedBackward, + Queued, Pending(RequestId), Complete, Failed, @@ -31,6 +29,10 @@ struct BlockRequests { target_head_root: Hash256, downloaded_blocks: Vec>, state: BlockRequestsState, + /// Specifies whether the current state is syncing forwards or backwards. + forward_sync: bool, + /// The current `start_slot` of the batched block request. + current_start_slot: Slot, } struct ParentRequests { @@ -43,25 +45,13 @@ struct ParentRequests { impl BlockRequests { // gets the start slot for next batch // last block slot downloaded plus 1 - fn next_start_slot(&self) -> Option { - if !self.downloaded_blocks.is_empty() { - match self.state { - BlockRequestsState::QueuedForward => { - let last_element_index = self.downloaded_blocks.len() - 1; - Some(self.downloaded_blocks[last_element_index].slot.add(1)) - } - BlockRequestsState::QueuedBackward => { - let earliest_known_slot = self.downloaded_blocks[0].slot; - Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) - } - _ => { - // pending/complete/failed - None - } - } + fn update_start_slot(&mut self) { + if self.forward_sync { + self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); } else { - None + self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); } + self.state = BlockRequestsState::Queued; } } @@ -117,7 +107,7 @@ impl ImportManager { let local = PeerSyncInfo::from(&self.chain); - // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync + // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; "peer" => format!("{:?}", peer_id), @@ -139,7 +129,9 @@ impl ImportManager { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, downloaded_blocks: Vec::new(), - state: BlockRequestsState::QueuedForward, + state: BlockRequestsState::Queued, + forward_sync: true, + current_start_slot: self.chain.best_slot(), }; self.import_queue.insert(peer_id, block_requests); } @@ -165,8 +157,6 @@ impl ImportManager { } }; - // The response should contain at least one block. - // // If we are syncing up to a target head block, at least the target head block should be // returned. If we are syncing back to our last finalized block the request should return // at least the last block we received (last known block). In diagram form: @@ -176,33 +166,30 @@ impl ImportManager { // ^finalized slot ^ requested start slot ^ last known block ^ remote head if blocks.is_empty() { - warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; + debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); + block_requests.update_start_slot(); return; } - // Add the newly downloaded blocks to the current list of downloaded blocks. This also - // determines if we are syncing forward or backward. - let syncing_forwards = { - if block_requests.downloaded_blocks.is_empty() { - block_requests.downloaded_blocks.append(&mut blocks); - true - } else if block_requests.downloaded_blocks[0].slot < blocks[0].slot { - // syncing forwards - // verify the peer hasn't sent overlapping blocks - ensuring the strictly - // increasing blocks in a batch will be verified during the processing - if block_requests.next_start_slot() > Some(blocks[0].slot) { - warn!(self.log, "BeaconBlocks response returned duplicate blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_start_slot()); - block_requests.state = BlockRequestsState::Failed; - return; - } - - block_requests.downloaded_blocks.append(&mut blocks); - true - } else { - false - } - }; + // verify the range of received blocks + // Note that the order of blocks is verified in block processing + let last_sent_slot = blocks[blocks.len() - 1].slot; + if block_requests.current_start_slot > blocks[0].slot + || block_requests + .current_start_slot + .add(MAX_BLOCKS_PER_REQUEST) + < last_sent_slot + { + //TODO: Downvote peer - add a reason to failed + dbg!(&blocks); + warn!(self.log, "BeaconBlocks response returned out of range blocks"; + "request_id" => request_id, + "response_initial_slot" => blocks[0].slot, + "requested_initial_slot" => block_requests.current_start_slot); + // consider this sync failed + block_requests.state = BlockRequestsState::Failed; + return; + } // Determine if more blocks need to be downloaded. There are a few cases: // - We have downloaded a batch from our head_slot, which has not reached the remotes head @@ -216,61 +203,60 @@ impl ImportManager { // chain. If so, process the blocks, if not, request more blocks all the way up to // our last finalized slot. - if syncing_forwards { - // does the batch contain the target_head_slot - let last_element_index = block_requests.downloaded_blocks.len() - 1; - if block_requests.downloaded_blocks[last_element_index].slot - >= block_requests.target_head_slot - { - // if the batch is on our chain, this is complete and we can then process. - // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests.downloaded_blocks[0].slot; - //TODO: Decide which is faster. Reading block from db and comparing or calculating - //the hash tree root and comparing. - if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(self.chain, earliest_slot) - { - block_requests.state = BlockRequestsState::Complete; - return; - } - - // not on the same chain, request blocks backwards - let state = &self.chain.head().beacon_state; - let local_finalized_slot = state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.downloaded_blocks[0].slot { - warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; - return; - } - - // Start a backwards sync by requesting earlier blocks - // There can be duplication in downloaded blocks here if there are a large number - // of skip slots. In all cases we at least re-download the earliest known block. - // It is unlikely that a backwards sync in required, so we accept this duplication - // for now. - block_requests.state = BlockRequestsState::QueuedBackward; - } else { - // batch doesn't contain the head slot, request the next batch - block_requests.state = BlockRequestsState::QueuedForward; - } + if block_requests.forward_sync { + // append blocks if syncing forward + block_requests.downloaded_blocks.append(&mut blocks); } else { - // syncing backwards + // prepend blocks if syncing backwards + block_requests.downloaded_blocks.splice(..0, blocks); + } + + // does the batch contain the target_head_slot + let last_element_index = block_requests.downloaded_blocks.len() - 1; + if block_requests.downloaded_blocks[last_element_index].slot + >= block_requests.target_head_slot + || !block_requests.forward_sync + { // if the batch is on our chain, this is complete and we can then process. - // Otherwise continue backwards + // Otherwise start backwards syncing until we reach a common chain. let earliest_slot = block_requests.downloaded_blocks[0].slot; + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(self.chain, earliest_slot) + == root_at_slot(&self.chain, earliest_slot) { block_requests.state = BlockRequestsState::Complete; return; } - block_requests.state = BlockRequestsState::QueuedBackward; + + // not on the same chain, request blocks backwards + let state = &self.chain.head().beacon_state; + let local_finalized_slot = state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.current_start_slot { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = BlockRequestsState::Failed; + return; + } + + // if this is a forward sync, then we have reached the head without a common chain + // and we need to start syncing backwards. + if block_requests.forward_sync { + // Start a backwards sync by requesting earlier blocks + block_requests.forward_sync = false; + block_requests.current_start_slot = std::cmp::min( + self.chain.best_slot(), + block_requests.downloaded_blocks[0].slot, + ); + } } + + // update the start slot and re-queue the batch + block_requests.update_start_slot(); } pub fn recent_blocks_response( @@ -296,7 +282,7 @@ impl ImportManager { // if an empty response is given, the peer didn't have the requested block, try again if blocks.is_empty() { parent_request.failed_attempts += 1; - parent_request.state = BlockRequestsState::QueuedForward; + parent_request.state = BlockRequestsState::Queued; parent_request.last_submitted_peer = peer_id; return; } @@ -316,7 +302,7 @@ impl ImportManager { parent_request.state = BlockRequestsState::Complete; } - pub fn inject_error(peer_id: PeerId, id: RequestId) { + pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { //TODO: Remove block state from pending } @@ -358,13 +344,13 @@ impl ImportManager { downloaded_blocks: vec![block], failed_attempts: 0, last_submitted_peer: peer_id, - state: BlockRequestsState::QueuedBackward, + state: BlockRequestsState::Queued, }; self.parent_queue.push(req); } - pub fn poll(&mut self) -> ImportManagerOutcome { + pub(crate) fn poll(&mut self) -> ImportManagerOutcome { loop { // update the state of the manager self.update_state(); @@ -385,12 +371,11 @@ impl ImportManager { } // process any complete parent lookups - if let (re_run, outcome) = self.process_complete_parent_requests() { - if let Some(outcome) = outcome { - return outcome; - } else if !re_run { - break; - } + let (re_run, outcome) = self.process_complete_parent_requests(); + if let Some(outcome) = outcome { + return outcome; + } else if !re_run { + break; } } @@ -423,9 +408,10 @@ impl ImportManager { // If any in queued state we submit a request. // remove any failed batches + let debug_log = &self.log; self.import_queue.retain(|peer_id, block_request| { if let BlockRequestsState::Failed = block_request.state { - debug!(self.log, "Block import from peer failed"; + debug!(debug_log, "Block import from peer failed"; "peer_id" => format!("{:?}", peer_id), "downloaded_blocks" => block_request.downloaded_blocks.len() ); @@ -436,20 +422,18 @@ impl ImportManager { }); // process queued block requests - for (peer_id, block_requests) in self.import_queue.iter_mut().find(|(_peer_id, req)| { - req.state == BlockRequestsState::QueuedForward - || req.state == BlockRequestsState::QueuedBackward - }) { + for (peer_id, block_requests) in self + .import_queue + .iter_mut() + .find(|(_peer_id, req)| req.state == BlockRequestsState::Queued) + { let request_id = self.current_req_id; block_requests.state = BlockRequestsState::Pending(request_id); self.current_req_id += 1; let request = BeaconBlocksRequest { head_block_root: block_requests.target_head_root, - start_slot: block_requests - .next_start_slot() - .unwrap_or_else(|| self.chain.best_slot()) - .as_u64(), + start_slot: block_requests.current_start_slot.as_u64(), count: MAX_BLOCKS_PER_REQUEST, step: 0, }; @@ -504,9 +488,10 @@ impl ImportManager { fn process_parent_requests(&mut self) -> Option { // remove any failed requests + let debug_log = &self.log; self.parent_queue.retain(|parent_request| { if parent_request.state == BlockRequestsState::Failed { - debug!(self.log, "Parent import failed"; + debug!(debug_log, "Parent import failed"; "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), "ancestors_found" => parent_request.downloaded_blocks.len() ); @@ -524,9 +509,15 @@ impl ImportManager { // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { - parent_request.state == BlockRequestsState::Failed; + parent_request.state = BlockRequestsState::Failed; continue; - } else if parent_request.state == BlockRequestsState::QueuedForward { + } else if parent_request.state == BlockRequestsState::Queued { + // check the depth isn't too large + if parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + parent_request.state = BlockRequestsState::Failed; + continue; + } + parent_request.state = BlockRequestsState::Pending(self.current_req_id); self.current_req_id += 1; let last_element_index = parent_request.downloaded_blocks.len() - 1; @@ -564,7 +555,7 @@ impl ImportManager { if block_hash != expected_hash { // remove the head block let _ = completed_request.downloaded_blocks.pop(); - completed_request.state = BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; //TODO: Potentially downvote the peer let peer = completed_request.last_submitted_peer.clone(); debug!(self.log, "Peer sent invalid parent. Ignoring"; @@ -585,7 +576,7 @@ impl ImportManager { Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { // need to keep looking for parents completed_request.downloaded_blocks.push(block); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; break; } @@ -598,7 +589,7 @@ impl ImportManager { "outcome" => format!("{:?}", outcome), "peer" => format!("{:?}", completed_request.last_submitted_peer), ); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; return ( re_run, @@ -613,7 +604,7 @@ impl ImportManager { self.log, "Parent processing error"; "error" => format!("{:?}", e) ); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; return ( re_run, @@ -691,6 +682,13 @@ impl ImportManager { ); } } + BlockProcessingOutcome::FinalizedSlot => { + trace!( + self.log, "Finalized or earlier block processed"; + "outcome" => format!("{:?}", outcome), + ); + // block reached our finalized slot or was earlier, move to the next block + } _ => { trace!( self.log, "InvalidBlock"; @@ -717,7 +715,7 @@ impl ImportManager { } fn root_at_slot( - chain: Arc>, + chain: &Arc>, target_slot: Slot, ) -> Option { chain diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index deadf214d..924b2de9b 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -2,24 +2,22 @@ use super::manager::{ImportManager, ImportManagerOutcome}; use crate::service::{NetworkMessage, OutgoingMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; -use slog::{debug, error, info, o, trace, warn}; +use slog::{debug, info, o, trace, warn}; use ssz::Encode; -use std::collections::HashMap; +use std::ops::Sub; use std::sync::Arc; -use std::time::Duration; use store::Store; use tokio::sync::mpsc; -use types::{ - Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, -}; +use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; +/// The number of slots behind our head that we still treat a peer as a fully synced peer. +const FULL_PEER_TOLERANCE: u64 = 10; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -54,8 +52,8 @@ impl From<&Arc>> for PeerSyncInfo { /// The current syncing state. #[derive(PartialEq)] pub enum SyncState { - Idle, - Downloading, + _Idle, + _Downloading, _Stopped, } @@ -97,7 +95,7 @@ impl SimpleSync { /// Sends a `Hello` message to the peer. pub fn on_connect(&mut self, peer_id: PeerId) { self.network - .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -193,8 +191,16 @@ impl SimpleSync { { // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - self.manager.add_full_peer(peer_id); - self.process_sync(); + if self.chain.best_slot().sub(remote.head_slot).as_u64() < FULL_PEER_TOLERANCE { + self.manager.add_full_peer(peer_id); + self.process_sync(); + } else { + debug!( + self.log, + "Out of sync peer connected"; + "peer" => format!("{:?}", peer_id), + ); + } } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -222,8 +228,11 @@ impl SimpleSync { "method" => "HELLO", "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + self.network.send_rpc_request( + None, + peer_id, + RPCRequest::Hello(hello_message(&self.chain)), + ); } ImportManagerOutcome::RequestBlocks { peer_id, @@ -238,8 +247,11 @@ impl SimpleSync { "count" => request.count, "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(request)); + self.network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::BeaconBlocks(request), + ); } ImportManagerOutcome::RecentRequest(peer_id, req) => { trace!( @@ -249,8 +261,11 @@ impl SimpleSync { "count" => req.block_roots.len(), "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + self.network.send_rpc_request( + None, + peer_id.clone(), + RPCRequest::RecentBeaconBlocks(req), + ); } ImportManagerOutcome::DownvotePeer(peer_id) => { trace!( @@ -270,6 +285,7 @@ impl SimpleSync { } } + //TODO: Move to beacon chain fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() @@ -333,36 +349,58 @@ impl SimpleSync { "start_slot" => req.start_slot, ); + //TODO: Optimize this + // Currently for skipped slots, the blocks returned could be less than the requested range. + // In the current implementation we read from the db then filter out out-of-range blocks. + // Improving the db schema to prevent this would be ideal. + let mut blocks: Vec> = self .chain .rev_iter_block_roots() .filter(|(_root, slot)| { - req.start_slot <= slot.as_u64() && req.start_slot + req.count >= slot.as_u64() + req.start_slot <= slot.as_u64() && req.start_slot + req.count > slot.as_u64() }) .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) .filter_map(|(root, _slot)| { if let Ok(Some(block)) = self.chain.store.get::>(&root) { Some(block) } else { - debug!( + warn!( self.log, - "Peer requested unknown block"; - "peer" => format!("{:?}", peer_id), + "Block in the chain is not in the store"; "request_root" => format!("{:}", root), ); None } }) + .filter(|block| block.slot >= req.start_slot) .collect(); + // TODO: Again find a more elegant way to include genesis if needed + // if the genesis is requested, add it in + if req.start_slot == 0 { + if let Ok(Some(genesis)) = self + .chain + .store + .get::>(&self.chain.genesis_block_root) + { + blocks.push(genesis); + } else { + warn!( + self.log, + "Requested genesis, which is not in the chain store"; + ); + } + } + blocks.reverse(); blocks.dedup_by_key(|brs| brs.slot); if blocks.len() as u64 != req.count { debug!( self.log, - "BeaconBlocksRequest"; + "BeaconBlocksRequest response"; "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, @@ -498,14 +536,19 @@ impl NetworkContext { } pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) + self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) // TODO: disconnect peers. } - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { - // Note: There is currently no use of keeping track of requests. However the functionality - // is left here for future revisions. - self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); + pub fn send_rpc_request( + &mut self, + request_id: Option, + peer_id: PeerId, + rpc_request: RPCRequest, + ) { + // use 0 as the default request id, when an ID is not required. + let request_id = request_id.unwrap_or_else(|| 0); + self.send_rpc_event(peer_id, RPCEvent::Request(request_id, rpc_request)); } //TODO: Handle Error responses From 7ee080db6021b2fb4b47056ce0a666020b71b3d9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 08:25:54 +1000 Subject: [PATCH 22/24] Updated syncing algorithm --- beacon_node/client/src/lib.rs | 6 ++---- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- beacon_node/eth2-libp2p/src/behaviour.rs | 3 +-- beacon_node/eth2-libp2p/src/service.rs | 2 +- beacon_node/network/src/service.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 8 +++++++- beacon_node/rpc/src/attestation.rs | 8 ++++++-- beacon_node/rpc/src/beacon_block.rs | 10 +++++++--- 8 files changed, 27 insertions(+), 16 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7e6449a98..4b64c1070 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -41,7 +41,7 @@ pub struct Client { /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API - // pub api_exit_signal: Option, + pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, /// Marker to pin the beacon chain generics. @@ -134,7 +134,6 @@ where None }; - /* // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -152,7 +151,6 @@ where } else { None }; - */ let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { @@ -186,7 +184,7 @@ where http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), - //api_exit_signal, + api_exit_signal, log, network, phantom: PhantomData, diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 55081aed5..a379bcead 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a56865a4077ac54767136b4bee627c9734720a6b" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a56865a4077ac54767136b4bee627c9734720a6b", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b4822de4c..29725e0ce 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -16,7 +16,6 @@ use libp2p::{ NetworkBehaviour, PeerId, }; use slog::{debug, o, trace}; -use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; @@ -189,7 +188,7 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { + pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) { let message_data = message.to_data(); for topic in topics { self.gossipsub.publish(topic, message_data.clone()); diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 98718445b..9945b1586 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -148,7 +148,7 @@ impl Stream for Service { topics, message, } => { - trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); + trace!(self.log, "Gossipsub message received"; "service" => "Swarm"); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { source, topics, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index df0404cfa..4800a7efb 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -119,7 +119,7 @@ fn network_service( }, NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.lock().swarm.publish(topics, message); + libp2p_service.lock().swarm.publish(&topics, message); } }, Ok(Async::NotReady) => break, diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 924b2de9b..bee9310d3 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -465,9 +465,15 @@ impl SimpleSync { pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { - BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, + BlockProcessingOutcome::Processed { .. } => { + trace!(self.log, "Gossipsub block processed"; + "peer_id" => format!("{:?}",peer_id)); + SHOULD_FORWARD_GOSSIP_BLOCK + } BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block + trace!(self.log, "Unknown parent gossip"; + "peer_id" => format!("{:?}",peer_id)); self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index f442e247d..dff3f8d70 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::Topic; -use eth2_libp2p::BEACON_ATTESTATION_TOPIC; +use eth2_libp2p::{BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -144,7 +144,11 @@ impl AttestationService for AttestationServiceInstance { ); // valid attestation, propagate to the network - let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); self.network_chan diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b1a67399e..92a543ef3 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::BEACON_BLOCK_TOPIC; use eth2_libp2p::{PubsubMessage, Topic}; +use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -105,8 +105,12 @@ impl BeaconBlockService for BeaconBlockServiceInstance { "block_root" => format!("{}", block_root), ); - // get the network topic to send on - let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. From 9cdcc7d198b9e0e48e89870ef050958e8bb94abd Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 10:02:54 +1000 Subject: [PATCH 23/24] Update to latest libp2p --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 7517c2980..92c2c80d4 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a56865a4077ac54767136b4bee627c9734720a6b" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a56865a4077ac54767136b4bee627c9734720a6b", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" From 1bea1755c46d17fff9c6cea56e55691bf5cfa1b9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 10:13:17 +1000 Subject: [PATCH 24/24] Remove redundant code --- beacon_node/eth2-libp2p/src/rpc/methods.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 8fef1a75a..d912bcfa1 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -89,17 +89,6 @@ pub struct BeaconBlocksRequest { pub step: u64, } -// TODO: Currently handle encoding/decoding of blocks in the message handler. Leave this struct -// here in case encoding/decoding of ssz requires an object. -/* -/// Response containing a number of beacon block roots from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlocksResponse { - /// List of requested blocks and associated slots. - pub beacon_blocks: Vec, -} -*/ - /// Request a number of beacon block bodies from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct RecentBeaconBlocksRequest {