Merge branch 'master' into attestation-processing
This commit is contained in:
commit
cd11eb15a5
@ -308,7 +308,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// It is important to note that the `beacon_state` returned may not match the present slot. It
|
/// It is important to note that the `beacon_state` returned may not match the present slot. It
|
||||||
/// is the state as it was when the head block was received, which could be some slots prior to
|
/// is the state as it was when the head block was received, which could be some slots prior to
|
||||||
/// now.
|
/// now.
|
||||||
pub fn head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>> {
|
pub fn head<'a>(&'a self) -> RwLockReadGuard<'a, CheckPoint<T::EthSpec>> {
|
||||||
self.canonical_head.read()
|
self.canonical_head.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use crate::Eth2Config;
|
||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use http_server::HttpServerConfig;
|
use http_server::HttpServerConfig;
|
||||||
use network::NetworkConfig;
|
use network::NetworkConfig;
|
||||||
@ -25,7 +26,7 @@ pub struct Config {
|
|||||||
pub network: network::NetworkConfig,
|
pub network: network::NetworkConfig,
|
||||||
pub rpc: rpc::RPCConfig,
|
pub rpc: rpc::RPCConfig,
|
||||||
pub http: HttpServerConfig,
|
pub http: HttpServerConfig,
|
||||||
pub rest_api: rest_api::APIConfig,
|
pub rest_api: rest_api::ApiConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@ -56,12 +57,10 @@ impl Default for Config {
|
|||||||
log_file: PathBuf::from(""),
|
log_file: PathBuf::from(""),
|
||||||
db_type: "disk".to_string(),
|
db_type: "disk".to_string(),
|
||||||
db_name: "chain_db".to_string(),
|
db_name: "chain_db".to_string(),
|
||||||
// Note: there are no default bootnodes specified.
|
|
||||||
// Once bootnodes are established, add them here.
|
|
||||||
network: NetworkConfig::new(),
|
network: NetworkConfig::new(),
|
||||||
rpc: rpc::RPCConfig::default(),
|
rpc: rpc::RPCConfig::default(),
|
||||||
http: HttpServerConfig::default(),
|
http: HttpServerConfig::default(),
|
||||||
rest_api: rest_api::APIConfig::default(),
|
rest_api: rest_api::ApiConfig::default(),
|
||||||
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
||||||
genesis_state: GenesisState::RecentGenesis {
|
genesis_state: GenesisState::RecentGenesis {
|
||||||
validator_count: TESTNET_VALIDATOR_COUNT,
|
validator_count: TESTNET_VALIDATOR_COUNT,
|
||||||
@ -129,6 +128,15 @@ impl Config {
|
|||||||
self.data_dir = PathBuf::from(dir);
|
self.data_dir = PathBuf::from(dir);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if let Some(default_spec) = args.value_of("default-spec") {
|
||||||
|
match default_spec {
|
||||||
|
"mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants,
|
||||||
|
"minimal" => self.spec_constants = Eth2Config::minimal().spec_constants,
|
||||||
|
"interop" => self.spec_constants = Eth2Config::interop().spec_constants,
|
||||||
|
_ => {} // not supported
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(dir) = args.value_of("db") {
|
if let Some(dir) = args.value_of("db") {
|
||||||
self.db_type = dir.to_string();
|
self.db_type = dir.to_string();
|
||||||
};
|
};
|
||||||
|
@ -7,8 +7,8 @@ edition = "2018"
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
#SigP repository
|
#SigP repository
|
||||||
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" }
|
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" }
|
||||||
enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b", features = ["serde"] }
|
enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] }
|
||||||
types = { path = "../../eth2/types" }
|
types = { path = "../../eth2/types" }
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
|
@ -2,47 +2,52 @@ use crate::discovery::Discovery;
|
|||||||
use crate::rpc::{RPCEvent, RPCMessage, RPC};
|
use crate::rpc::{RPCEvent, RPCMessage, RPC};
|
||||||
use crate::{error, NetworkConfig};
|
use crate::{error, NetworkConfig};
|
||||||
use crate::{Topic, TopicHash};
|
use crate::{Topic, TopicHash};
|
||||||
|
use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::{
|
core::identity::Keypair,
|
||||||
identity::Keypair,
|
|
||||||
swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess},
|
|
||||||
},
|
|
||||||
discv5::Discv5Event,
|
discv5::Discv5Event,
|
||||||
gossipsub::{Gossipsub, GossipsubEvent},
|
gossipsub::{Gossipsub, GossipsubEvent},
|
||||||
|
identify::{Identify, IdentifyEvent},
|
||||||
ping::{Ping, PingConfig, PingEvent},
|
ping::{Ping, PingConfig, PingEvent},
|
||||||
|
swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess},
|
||||||
tokio_io::{AsyncRead, AsyncWrite},
|
tokio_io::{AsyncRead, AsyncWrite},
|
||||||
NetworkBehaviour, PeerId,
|
NetworkBehaviour, PeerId,
|
||||||
};
|
};
|
||||||
use slog::{o, trace, warn};
|
use slog::{debug, o, trace};
|
||||||
use ssz::{ssz_encode, Decode, DecodeError, Encode};
|
use ssz::{ssz_encode, Encode};
|
||||||
use std::num::NonZeroU32;
|
use std::num::NonZeroU32;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use types::{Attestation, BeaconBlock, EthSpec};
|
|
||||||
|
const MAX_IDENTIFY_ADDRESSES: usize = 20;
|
||||||
|
|
||||||
/// Builds the network behaviour that manages the core protocols of eth2.
|
/// Builds the network behaviour that manages the core protocols of eth2.
|
||||||
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
|
/// This core behaviour is managed by `Behaviour` which adds peer management to all core
|
||||||
/// behaviours.
|
/// behaviours.
|
||||||
#[derive(NetworkBehaviour)]
|
#[derive(NetworkBehaviour)]
|
||||||
#[behaviour(out_event = "BehaviourEvent<E>", poll_method = "poll")]
|
#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")]
|
||||||
pub struct Behaviour<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> {
|
pub struct Behaviour<TSubstream: AsyncRead + AsyncWrite> {
|
||||||
/// The routing pub-sub mechanism for eth2.
|
/// The routing pub-sub mechanism for eth2.
|
||||||
gossipsub: Gossipsub<TSubstream>,
|
gossipsub: Gossipsub<TSubstream>,
|
||||||
/// The serenity RPC specified in the wire-0 protocol.
|
/// The Eth2 RPC specified in the wire-0 protocol.
|
||||||
serenity_rpc: RPC<TSubstream, E>,
|
eth2_rpc: RPC<TSubstream>,
|
||||||
/// Keep regular connection to peers and disconnect if absent.
|
/// Keep regular connection to peers and disconnect if absent.
|
||||||
|
// TODO: Remove Libp2p ping in favour of discv5 ping.
|
||||||
ping: Ping<TSubstream>,
|
ping: Ping<TSubstream>,
|
||||||
/// Kademlia for peer discovery.
|
// TODO: Using id for initial interop. This will be removed by mainnet.
|
||||||
|
/// Provides IP addresses and peer information.
|
||||||
|
identify: Identify<TSubstream>,
|
||||||
|
/// Discovery behaviour.
|
||||||
discovery: Discovery<TSubstream>,
|
discovery: Discovery<TSubstream>,
|
||||||
#[behaviour(ignore)]
|
#[behaviour(ignore)]
|
||||||
/// The events generated by this behaviour to be consumed in the swarm poll.
|
/// The events generated by this behaviour to be consumed in the swarm poll.
|
||||||
events: Vec<BehaviourEvent<E>>,
|
events: Vec<BehaviourEvent>,
|
||||||
/// Logger for behaviour actions.
|
/// Logger for behaviour actions.
|
||||||
#[behaviour(ignore)]
|
#[behaviour(ignore)]
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
local_key: &Keypair,
|
local_key: &Keypair,
|
||||||
net_conf: &NetworkConfig,
|
net_conf: &NetworkConfig,
|
||||||
@ -50,17 +55,25 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
|||||||
) -> error::Result<Self> {
|
) -> error::Result<Self> {
|
||||||
let local_peer_id = local_key.public().clone().into_peer_id();
|
let local_peer_id = local_key.public().clone().into_peer_id();
|
||||||
let behaviour_log = log.new(o!());
|
let behaviour_log = log.new(o!());
|
||||||
|
|
||||||
let ping_config = PingConfig::new()
|
let ping_config = PingConfig::new()
|
||||||
.with_timeout(Duration::from_secs(30))
|
.with_timeout(Duration::from_secs(30))
|
||||||
.with_interval(Duration::from_secs(20))
|
.with_interval(Duration::from_secs(20))
|
||||||
.with_max_failures(NonZeroU32::new(2).expect("2 != 0"))
|
.with_max_failures(NonZeroU32::new(2).expect("2 != 0"))
|
||||||
.with_keep_alive(false);
|
.with_keep_alive(false);
|
||||||
|
|
||||||
|
let identify = Identify::new(
|
||||||
|
"lighthouse/libp2p".into(),
|
||||||
|
version::version(),
|
||||||
|
local_key.public(),
|
||||||
|
);
|
||||||
|
|
||||||
Ok(Behaviour {
|
Ok(Behaviour {
|
||||||
serenity_rpc: RPC::new(log),
|
eth2_rpc: RPC::new(log),
|
||||||
gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()),
|
gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()),
|
||||||
discovery: Discovery::new(local_key, net_conf, log)?,
|
discovery: Discovery::new(local_key, net_conf, log)?,
|
||||||
ping: Ping::new(ping_config),
|
ping: Ping::new(ping_config),
|
||||||
|
identify,
|
||||||
events: Vec::new(),
|
events: Vec::new(),
|
||||||
log: behaviour_log,
|
log: behaviour_log,
|
||||||
})
|
})
|
||||||
@ -68,31 +81,20 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<GossipsubEvent>
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubEvent>
|
||||||
for Behaviour<TSubstream, E>
|
for Behaviour<TSubstream>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, event: GossipsubEvent) {
|
fn inject_event(&mut self, event: GossipsubEvent) {
|
||||||
match event {
|
match event {
|
||||||
GossipsubEvent::Message(gs_msg) => {
|
GossipsubEvent::Message(gs_msg) => {
|
||||||
trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg));
|
trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg));
|
||||||
|
|
||||||
let pubsub_message = match PubsubMessage::from_ssz_bytes(&gs_msg.data) {
|
let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data);
|
||||||
//TODO: Punish peer on error
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
self.log,
|
|
||||||
"Received undecodable message from Peer {:?} error", gs_msg.source;
|
|
||||||
"error" => format!("{:?}", e)
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Ok(msg) => msg,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.events.push(BehaviourEvent::GossipMessage {
|
self.events.push(BehaviourEvent::GossipMessage {
|
||||||
source: gs_msg.source,
|
source: gs_msg.source,
|
||||||
topics: gs_msg.topics,
|
topics: gs_msg.topics,
|
||||||
message: Box::new(pubsub_message),
|
message: msg,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
GossipsubEvent::Subscribed { .. } => {}
|
GossipsubEvent::Subscribed { .. } => {}
|
||||||
@ -101,8 +103,8 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProces
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<RPCMessage>
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<RPCMessage>
|
||||||
for Behaviour<TSubstream, E>
|
for Behaviour<TSubstream>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, event: RPCMessage) {
|
fn inject_event(&mut self, event: RPCMessage) {
|
||||||
match event {
|
match event {
|
||||||
@ -119,19 +121,19 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProces
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<PingEvent>
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<PingEvent>
|
||||||
for Behaviour<TSubstream, E>
|
for Behaviour<TSubstream>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, _event: PingEvent) {
|
fn inject_event(&mut self, _event: PingEvent) {
|
||||||
// not interested in ping responses at the moment.
|
// not interested in ping responses at the moment.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||||
/// Consumes the events list when polled.
|
/// Consumes the events list when polled.
|
||||||
fn poll<TBehaviourIn>(
|
fn poll<TBehaviourIn>(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> Async<NetworkBehaviourAction<TBehaviourIn, BehaviourEvent<E>>> {
|
) -> Async<NetworkBehaviourAction<TBehaviourIn, BehaviourEvent>> {
|
||||||
if !self.events.is_empty() {
|
if !self.events.is_empty() {
|
||||||
return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0)));
|
return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0)));
|
||||||
}
|
}
|
||||||
@ -140,8 +142,36 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProcess<Discv5Event>
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<IdentifyEvent>
|
||||||
for Behaviour<TSubstream, E>
|
for Behaviour<TSubstream>
|
||||||
|
{
|
||||||
|
fn inject_event(&mut self, event: IdentifyEvent) {
|
||||||
|
match event {
|
||||||
|
IdentifyEvent::Identified {
|
||||||
|
peer_id, mut info, ..
|
||||||
|
} => {
|
||||||
|
if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"More than 20 addresses have been identified, truncating"
|
||||||
|
);
|
||||||
|
info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES);
|
||||||
|
}
|
||||||
|
debug!(self.log, "Identified Peer"; "Peer" => format!("{}", peer_id),
|
||||||
|
"Protocol Version" => info.protocol_version,
|
||||||
|
"Agent Version" => info.agent_version,
|
||||||
|
"Listening Addresses" => format!("{:?}", info.listen_addrs),
|
||||||
|
"Protocols" => format!("{:?}", info.protocols)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
IdentifyEvent::Error { .. } => {}
|
||||||
|
IdentifyEvent::SendBack { .. } => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<Discv5Event>
|
||||||
|
for Behaviour<TSubstream>
|
||||||
{
|
{
|
||||||
fn inject_event(&mut self, _event: Discv5Event) {
|
fn inject_event(&mut self, _event: Discv5Event) {
|
||||||
// discv5 has no events to inject
|
// discv5 has no events to inject
|
||||||
@ -149,7 +179,7 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> NetworkBehaviourEventProces
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Implements the combined behaviour for the libp2p service.
|
/// Implements the combined behaviour for the libp2p service.
|
||||||
impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||||
/* Pubsub behaviour functions */
|
/* Pubsub behaviour functions */
|
||||||
|
|
||||||
/// Subscribes to a gossipsub topic.
|
/// Subscribes to a gossipsub topic.
|
||||||
@ -158,7 +188,7 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Publishes a message on the pubsub (gossipsub) behaviour.
|
/// Publishes a message on the pubsub (gossipsub) behaviour.
|
||||||
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage<E>) {
|
pub fn publish(&mut self, topics: Vec<Topic>, message: PubsubMessage) {
|
||||||
let message_bytes = ssz_encode(&message);
|
let message_bytes = ssz_encode(&message);
|
||||||
for topic in topics {
|
for topic in topics {
|
||||||
self.gossipsub.publish(topic, message_bytes.clone());
|
self.gossipsub.publish(topic, message_bytes.clone());
|
||||||
@ -169,7 +199,7 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
|||||||
|
|
||||||
/// Sends an RPC Request/Response via the RPC protocol.
|
/// Sends an RPC Request/Response via the RPC protocol.
|
||||||
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
|
pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) {
|
||||||
self.serenity_rpc.send_rpc(peer_id, rpc_event);
|
self.eth2_rpc.send_rpc(peer_id, rpc_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Discovery / Peer management functions */
|
/* Discovery / Peer management functions */
|
||||||
@ -179,99 +209,60 @@ impl<TSubstream: AsyncRead + AsyncWrite, E: EthSpec> Behaviour<TSubstream, E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The types of events than can be obtained from polling the behaviour.
|
/// The types of events than can be obtained from polling the behaviour.
|
||||||
pub enum BehaviourEvent<E: EthSpec> {
|
pub enum BehaviourEvent {
|
||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
PeerDialed(PeerId),
|
PeerDialed(PeerId),
|
||||||
PeerDisconnected(PeerId),
|
PeerDisconnected(PeerId),
|
||||||
GossipMessage {
|
GossipMessage {
|
||||||
source: PeerId,
|
source: PeerId,
|
||||||
topics: Vec<TopicHash>,
|
topics: Vec<TopicHash>,
|
||||||
message: Box<PubsubMessage<E>>,
|
message: PubsubMessage,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Messages that are passed to and from the pubsub (Gossipsub) behaviour.
|
/// Messages that are passed to and from the pubsub (Gossipsub) behaviour.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum PubsubMessage<E: EthSpec> {
|
pub enum PubsubMessage {
|
||||||
/// Gossipsub message providing notification of a new block.
|
/// Gossipsub message providing notification of a new block.
|
||||||
Block(BeaconBlock<E>),
|
Block(Vec<u8>),
|
||||||
/// Gossipsub message providing notification of a new attestation.
|
/// Gossipsub message providing notification of a new attestation.
|
||||||
Attestation(Attestation<E>),
|
Attestation(Vec<u8>),
|
||||||
|
/// Gossipsub message from an unknown topic.
|
||||||
|
Unknown(Vec<u8>),
|
||||||
}
|
}
|
||||||
|
|
||||||
//TODO: Correctly encode/decode enums. Prefixing with integer for now.
|
impl PubsubMessage {
|
||||||
impl<E: EthSpec> Encode for PubsubMessage<E> {
|
/* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will
|
||||||
|
* need to be modified.
|
||||||
|
*
|
||||||
|
* Also note that a message can be associated with many topics. As soon as one of the topics is
|
||||||
|
* known we match. If none of the topics are known we return an unknown state.
|
||||||
|
*/
|
||||||
|
fn from_topics(topics: &Vec<TopicHash>, data: Vec<u8>) -> Self {
|
||||||
|
for topic in topics {
|
||||||
|
match topic.as_str() {
|
||||||
|
BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data),
|
||||||
|
BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data),
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PubsubMessage::Unknown(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Encode for PubsubMessage {
|
||||||
fn is_ssz_fixed_len() -> bool {
|
fn is_ssz_fixed_len() -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
||||||
let offset = <u32 as Encode>::ssz_fixed_len() + <Vec<u8> as Encode>::ssz_fixed_len();
|
|
||||||
|
|
||||||
let mut encoder = ssz::SszEncoder::container(buf, offset);
|
|
||||||
|
|
||||||
match self {
|
match self {
|
||||||
PubsubMessage::Block(block_gossip) => {
|
PubsubMessage::Block(inner)
|
||||||
encoder.append(&0_u32);
|
| PubsubMessage::Attestation(inner)
|
||||||
|
| PubsubMessage::Unknown(inner) => {
|
||||||
// Encode the gossip as a Vec<u8>;
|
// Encode the gossip as a Vec<u8>;
|
||||||
encoder.append(&block_gossip.as_ssz_bytes());
|
buf.append(&mut inner.as_ssz_bytes());
|
||||||
}
|
|
||||||
PubsubMessage::Attestation(attestation_gossip) => {
|
|
||||||
encoder.append(&1_u32);
|
|
||||||
|
|
||||||
// Encode the gossip as a Vec<u8>;
|
|
||||||
encoder.append(&attestation_gossip.as_ssz_bytes());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
encoder.finalize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: EthSpec> Decode for PubsubMessage<E> {
|
|
||||||
fn is_ssz_fixed_len() -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
|
|
||||||
let mut builder = ssz::SszDecoderBuilder::new(&bytes);
|
|
||||||
|
|
||||||
builder.register_type::<u32>()?;
|
|
||||||
builder.register_type::<Vec<u8>>()?;
|
|
||||||
|
|
||||||
let mut decoder = builder.build()?;
|
|
||||||
|
|
||||||
let id: u32 = decoder.decode_next()?;
|
|
||||||
let body: Vec<u8> = decoder.decode_next()?;
|
|
||||||
|
|
||||||
match id {
|
|
||||||
0 => Ok(PubsubMessage::Block(BeaconBlock::from_ssz_bytes(&body)?)),
|
|
||||||
1 => Ok(PubsubMessage::Attestation(Attestation::from_ssz_bytes(
|
|
||||||
&body,
|
|
||||||
)?)),
|
|
||||||
_ => Err(DecodeError::BytesInvalid(
|
|
||||||
"Invalid PubsubMessage id".to_string(),
|
|
||||||
)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ssz_encoding() {
|
|
||||||
let original = PubsubMessage::Block(BeaconBlock::<MainnetEthSpec>::empty(
|
|
||||||
&MainnetEthSpec::default_spec(),
|
|
||||||
));
|
|
||||||
|
|
||||||
let encoded = ssz_encode(&original);
|
|
||||||
|
|
||||||
let decoded = PubsubMessage::from_ssz_bytes(&encoded).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(original, decoded);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use enr::Enr;
|
use enr::Enr;
|
||||||
use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder};
|
use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder};
|
||||||
|
use libp2p::Multiaddr;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
/// The beacon node topic string to subscribe to.
|
/// The beacon node topic string to subscribe to.
|
||||||
pub const BEACON_PUBSUB_TOPIC: &str = "beacon_block";
|
pub const BEACON_BLOCK_TOPIC: &str = "beacon_block";
|
||||||
pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation";
|
pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation";
|
||||||
pub const SHARD_TOPIC_PREFIX: &str = "shard";
|
pub const SHARD_TOPIC_PREFIX: &str = "shard";
|
||||||
|
|
||||||
@ -39,6 +40,9 @@ pub struct Config {
|
|||||||
/// List of nodes to initially connect to.
|
/// List of nodes to initially connect to.
|
||||||
pub boot_nodes: Vec<Enr>,
|
pub boot_nodes: Vec<Enr>,
|
||||||
|
|
||||||
|
/// List of libp2p nodes to initially connect to.
|
||||||
|
pub libp2p_nodes: Vec<Multiaddr>,
|
||||||
|
|
||||||
/// Client version
|
/// Client version
|
||||||
pub client_version: String,
|
pub client_version: String,
|
||||||
|
|
||||||
@ -60,12 +64,13 @@ impl Default for Config {
|
|||||||
discovery_port: 9000,
|
discovery_port: 9000,
|
||||||
max_peers: 10,
|
max_peers: 10,
|
||||||
//TODO: Set realistic values for production
|
//TODO: Set realistic values for production
|
||||||
|
// Note: This defaults topics to plain strings. Not hashes
|
||||||
gs_config: GossipsubConfigBuilder::new()
|
gs_config: GossipsubConfigBuilder::new()
|
||||||
.max_gossip_size(4_000_000)
|
.max_transmit_size(1_000_000)
|
||||||
.inactivity_timeout(Duration::from_secs(90))
|
|
||||||
.heartbeat_interval(Duration::from_secs(20))
|
.heartbeat_interval(Duration::from_secs(20))
|
||||||
.build(),
|
.build(),
|
||||||
boot_nodes: vec![],
|
boot_nodes: vec![],
|
||||||
|
libp2p_nodes: vec![],
|
||||||
client_version: version::version(),
|
client_version: version::version(),
|
||||||
topics: Vec::new(),
|
topics: Vec::new(),
|
||||||
}
|
}
|
||||||
@ -118,6 +123,21 @@ impl Config {
|
|||||||
.collect::<Result<Vec<Enr>, _>>()?;
|
.collect::<Result<Vec<Enr>, _>>()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(libp2p_addresses_str) = args.value_of("libp2p-addresses") {
|
||||||
|
self.libp2p_nodes = libp2p_addresses_str
|
||||||
|
.split(',')
|
||||||
|
.map(|multiaddr| {
|
||||||
|
multiaddr
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| format!("Invalid Multiaddr: {}", multiaddr))
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<Multiaddr>, _>>()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(topics_str) = args.value_of("topics") {
|
||||||
|
self.topics = topics_str.split(',').map(|s| s.into()).collect();
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(discovery_address_str) = args.value_of("discovery-address") {
|
if let Some(discovery_address_str) = args.value_of("discovery-address") {
|
||||||
self.discovery_address = discovery_address_str
|
self.discovery_address = discovery_address_str
|
||||||
.parse()
|
.parse()
|
||||||
|
@ -4,13 +4,11 @@ use crate::{error, NetworkConfig};
|
|||||||
/// Currently using discv5 for peer discovery.
|
/// Currently using discv5 for peer discovery.
|
||||||
///
|
///
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p::core::swarm::{
|
use libp2p::core::{identity::Keypair, ConnectedPoint, Multiaddr, PeerId};
|
||||||
ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
|
||||||
};
|
|
||||||
use libp2p::core::{identity::Keypair, Multiaddr, PeerId, ProtocolsHandler};
|
|
||||||
use libp2p::discv5::{Discv5, Discv5Event};
|
use libp2p::discv5::{Discv5, Discv5Event};
|
||||||
use libp2p::enr::{Enr, EnrBuilder, NodeId};
|
use libp2p::enr::{Enr, EnrBuilder, NodeId};
|
||||||
use libp2p::multiaddr::Protocol;
|
use libp2p::multiaddr::Protocol;
|
||||||
|
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler};
|
||||||
use slog::{debug, info, o, warn};
|
use slog::{debug, info, o, warn};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@ -37,6 +35,9 @@ pub struct Discovery<TSubstream> {
|
|||||||
/// The target number of connected peers on the libp2p interface.
|
/// The target number of connected peers on the libp2p interface.
|
||||||
max_peers: usize,
|
max_peers: usize,
|
||||||
|
|
||||||
|
/// directory to save ENR to
|
||||||
|
enr_dir: String,
|
||||||
|
|
||||||
/// The delay between peer discovery searches.
|
/// The delay between peer discovery searches.
|
||||||
peer_discovery_delay: Delay,
|
peer_discovery_delay: Delay,
|
||||||
|
|
||||||
@ -54,9 +55,6 @@ pub struct Discovery<TSubstream> {
|
|||||||
|
|
||||||
/// Logger for the discovery behaviour.
|
/// Logger for the discovery behaviour.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
|
|
||||||
/// directory to save ENR to
|
|
||||||
enr_dir: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream> Discovery<TSubstream> {
|
impl<TSubstream> Discovery<TSubstream> {
|
||||||
|
@ -11,9 +11,9 @@ mod service;
|
|||||||
|
|
||||||
pub use behaviour::PubsubMessage;
|
pub use behaviour::PubsubMessage;
|
||||||
pub use config::{
|
pub use config::{
|
||||||
Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX,
|
Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX,
|
||||||
};
|
};
|
||||||
pub use libp2p::floodsub::{Topic, TopicBuilder, TopicHash};
|
pub use libp2p::gossipsub::{Topic, TopicHash};
|
||||||
pub use libp2p::multiaddr;
|
pub use libp2p::multiaddr;
|
||||||
pub use libp2p::Multiaddr;
|
pub use libp2p::Multiaddr;
|
||||||
pub use libp2p::{
|
pub use libp2p::{
|
||||||
|
@ -5,23 +5,21 @@ use crate::rpc::protocol::{InboundFramed, OutboundFramed};
|
|||||||
use core::marker::PhantomData;
|
use core::marker::PhantomData;
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use libp2p::core::protocols_handler::{
|
use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade};
|
||||||
|
use libp2p::swarm::protocols_handler::{
|
||||||
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
|
||||||
};
|
};
|
||||||
use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade};
|
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
use tokio_io::{AsyncRead, AsyncWrite};
|
||||||
use types::EthSpec;
|
|
||||||
|
|
||||||
/// The time (in seconds) before a substream that is awaiting a response times out.
|
/// The time (in seconds) before a substream that is awaiting a response times out.
|
||||||
pub const RESPONSE_TIMEOUT: u64 = 9;
|
pub const RESPONSE_TIMEOUT: u64 = 9;
|
||||||
|
|
||||||
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
/// Implementation of `ProtocolsHandler` for the RPC protocol.
|
||||||
pub struct RPCHandler<TSubstream, E>
|
pub struct RPCHandler<TSubstream>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
E: EthSpec,
|
|
||||||
{
|
{
|
||||||
/// The upgrade for inbound substreams.
|
/// The upgrade for inbound substreams.
|
||||||
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
||||||
@ -56,8 +54,8 @@ where
|
|||||||
/// After the given duration has elapsed, an inactive connection will shutdown.
|
/// After the given duration has elapsed, an inactive connection will shutdown.
|
||||||
inactive_timeout: Duration,
|
inactive_timeout: Duration,
|
||||||
|
|
||||||
/// Phantom EthSpec.
|
/// Marker to pin the generic stream.
|
||||||
_phantom: PhantomData<E>,
|
_phantom: PhantomData<TSubstream>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An outbound substream is waiting a response from the user.
|
/// An outbound substream is waiting a response from the user.
|
||||||
@ -90,10 +88,9 @@ where
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream, E> RPCHandler<TSubstream, E>
|
impl<TSubstream> RPCHandler<TSubstream>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
E: EthSpec,
|
|
||||||
{
|
{
|
||||||
pub fn new(
|
pub fn new(
|
||||||
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
listen_protocol: SubstreamProtocol<RPCProtocol>,
|
||||||
@ -145,20 +142,18 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream, E> Default for RPCHandler<TSubstream, E>
|
impl<TSubstream> Default for RPCHandler<TSubstream>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
E: EthSpec,
|
|
||||||
{
|
{
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30))
|
RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream, E> ProtocolsHandler for RPCHandler<TSubstream, E>
|
impl<TSubstream> ProtocolsHandler for RPCHandler<TSubstream>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
E: EthSpec,
|
|
||||||
{
|
{
|
||||||
type InEvent = RPCEvent;
|
type InEvent = RPCEvent;
|
||||||
type OutEvent = RPCEvent;
|
type OutEvent = RPCEvent;
|
||||||
@ -273,7 +268,11 @@ where
|
|||||||
Self::Error,
|
Self::Error,
|
||||||
> {
|
> {
|
||||||
if let Some(err) = self.pending_error.take() {
|
if let Some(err) = self.pending_error.take() {
|
||||||
return Err(err);
|
// Returning an error here will result in dropping any peer that doesn't support any of
|
||||||
|
// the RPC protocols. For our immediate purposes we permit this and simply log that an
|
||||||
|
// upgrade was not supported.
|
||||||
|
// TODO: Add a logger to the handler for trace output.
|
||||||
|
dbg!(&err);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return any events that need to be reported
|
// return any events that need to be reported
|
||||||
|
@ -6,9 +6,9 @@
|
|||||||
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use handler::RPCHandler;
|
use handler::RPCHandler;
|
||||||
use libp2p::core::protocols_handler::ProtocolsHandler;
|
use libp2p::core::ConnectedPoint;
|
||||||
use libp2p::core::swarm::{
|
use libp2p::swarm::{
|
||||||
ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
||||||
};
|
};
|
||||||
use libp2p::{Multiaddr, PeerId};
|
use libp2p::{Multiaddr, PeerId};
|
||||||
pub use methods::{ErrorMessage, HelloMessage, RPCErrorResponse, RPCResponse, RequestId};
|
pub use methods::{ErrorMessage, HelloMessage, RPCErrorResponse, RPCResponse, RequestId};
|
||||||
@ -16,7 +16,6 @@ pub use protocol::{RPCError, RPCProtocol, RPCRequest};
|
|||||||
use slog::o;
|
use slog::o;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
use types::EthSpec;
|
|
||||||
|
|
||||||
pub(crate) mod codec;
|
pub(crate) mod codec;
|
||||||
mod handler;
|
mod handler;
|
||||||
@ -50,16 +49,16 @@ impl RPCEvent {
|
|||||||
|
|
||||||
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
|
||||||
/// logic.
|
/// logic.
|
||||||
pub struct RPC<TSubstream, E: EthSpec> {
|
pub struct RPC<TSubstream> {
|
||||||
/// Queue of events to processed.
|
/// Queue of events to processed.
|
||||||
events: Vec<NetworkBehaviourAction<RPCEvent, RPCMessage>>,
|
events: Vec<NetworkBehaviourAction<RPCEvent, RPCMessage>>,
|
||||||
/// Pins the generic substream.
|
/// Pins the generic substream.
|
||||||
marker: PhantomData<(TSubstream, E)>,
|
marker: PhantomData<(TSubstream)>,
|
||||||
/// Slog logger for RPC behaviour.
|
/// Slog logger for RPC behaviour.
|
||||||
_log: slog::Logger,
|
_log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream, E: EthSpec> RPC<TSubstream, E> {
|
impl<TSubstream> RPC<TSubstream> {
|
||||||
pub fn new(log: &slog::Logger) -> Self {
|
pub fn new(log: &slog::Logger) -> Self {
|
||||||
let log = log.new(o!("Service" => "Libp2p-RPC"));
|
let log = log.new(o!("Service" => "Libp2p-RPC"));
|
||||||
RPC {
|
RPC {
|
||||||
@ -80,12 +79,11 @@ impl<TSubstream, E: EthSpec> RPC<TSubstream, E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSubstream, E> NetworkBehaviour for RPC<TSubstream, E>
|
impl<TSubstream> NetworkBehaviour for RPC<TSubstream>
|
||||||
where
|
where
|
||||||
TSubstream: AsyncRead + AsyncWrite,
|
TSubstream: AsyncRead + AsyncWrite,
|
||||||
E: EthSpec,
|
|
||||||
{
|
{
|
||||||
type ProtocolsHandler = RPCHandler<TSubstream, E>;
|
type ProtocolsHandler = RPCHandler<TSubstream>;
|
||||||
type OutEvent = RPCMessage;
|
type OutEvent = RPCMessage;
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
fn new_handler(&mut self) -> Self::ProtocolsHandler {
|
||||||
|
@ -8,7 +8,7 @@ use futures::{
|
|||||||
future::{self, FutureResult},
|
future::{self, FutureResult},
|
||||||
sink, stream, Sink, Stream,
|
sink, stream, Sink, Stream,
|
||||||
};
|
};
|
||||||
use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo};
|
use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::codec::Framed;
|
use tokio::codec::Framed;
|
||||||
@ -28,24 +28,22 @@ const REQUEST_TIMEOUT: u64 = 3;
|
|||||||
pub struct RPCProtocol;
|
pub struct RPCProtocol;
|
||||||
|
|
||||||
impl UpgradeInfo for RPCProtocol {
|
impl UpgradeInfo for RPCProtocol {
|
||||||
type Info = RawProtocolId;
|
type Info = ProtocolId;
|
||||||
type InfoIter = Vec<Self::Info>;
|
type InfoIter = Vec<Self::Info>;
|
||||||
|
|
||||||
fn protocol_info(&self) -> Self::InfoIter {
|
fn protocol_info(&self) -> Self::InfoIter {
|
||||||
vec![
|
vec![
|
||||||
ProtocolId::new("hello", "1.0.0", "ssz").into(),
|
ProtocolId::new("hello", "1.0.0", "ssz"),
|
||||||
ProtocolId::new("goodbye", "1.0.0", "ssz").into(),
|
ProtocolId::new("goodbye", "1.0.0", "ssz"),
|
||||||
ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into(),
|
ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"),
|
||||||
ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into(),
|
ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"),
|
||||||
ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into(),
|
ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The raw protocol id sent over the wire.
|
|
||||||
type RawProtocolId = Vec<u8>;
|
|
||||||
|
|
||||||
/// Tracks the types in a protocol id.
|
/// Tracks the types in a protocol id.
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct ProtocolId {
|
pub struct ProtocolId {
|
||||||
/// The rpc message type/name.
|
/// The rpc message type/name.
|
||||||
pub message_name: String,
|
pub message_name: String,
|
||||||
@ -55,44 +53,31 @@ pub struct ProtocolId {
|
|||||||
|
|
||||||
/// The encoding of the RPC.
|
/// The encoding of the RPC.
|
||||||
pub encoding: String,
|
pub encoding: String,
|
||||||
|
|
||||||
|
/// The protocol id that is formed from the above fields.
|
||||||
|
protocol_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An RPC protocol ID.
|
/// An RPC protocol ID.
|
||||||
impl ProtocolId {
|
impl ProtocolId {
|
||||||
pub fn new(message_name: &str, version: &str, encoding: &str) -> Self {
|
pub fn new(message_name: &str, version: &str, encoding: &str) -> Self {
|
||||||
|
let protocol_id = format!(
|
||||||
|
"{}/{}/{}/{}",
|
||||||
|
PROTOCOL_PREFIX, message_name, version, encoding
|
||||||
|
);
|
||||||
|
|
||||||
ProtocolId {
|
ProtocolId {
|
||||||
message_name: message_name.into(),
|
message_name: message_name.into(),
|
||||||
version: version.into(),
|
version: version.into(),
|
||||||
encoding: encoding.into(),
|
encoding: encoding.into(),
|
||||||
|
protocol_id,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Converts a raw RPC protocol id string into an `RPCProtocolId`
|
impl ProtocolName for ProtocolId {
|
||||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, RPCError> {
|
fn protocol_name(&self) -> &[u8] {
|
||||||
let protocol_string = String::from_utf8(bytes.to_vec())
|
self.protocol_id.as_bytes()
|
||||||
.map_err(|_| RPCError::InvalidProtocol("Invalid protocol Id"))?;
|
|
||||||
let protocol_list: Vec<&str> = protocol_string.as_str().split('/').take(7).collect();
|
|
||||||
|
|
||||||
if protocol_list.len() != 7 {
|
|
||||||
return Err(RPCError::InvalidProtocol("Not enough '/'"));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ProtocolId {
|
|
||||||
message_name: protocol_list[4].into(),
|
|
||||||
version: protocol_list[5].into(),
|
|
||||||
encoding: protocol_list[6].into(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<RawProtocolId> for ProtocolId {
|
|
||||||
fn into(self) -> RawProtocolId {
|
|
||||||
format!(
|
|
||||||
"{}/{}/{}/{}",
|
|
||||||
PROTOCOL_PREFIX, self.message_name, self.version, self.encoding
|
|
||||||
)
|
|
||||||
.as_bytes()
|
|
||||||
.to_vec()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,16 +112,11 @@ where
|
|||||||
fn upgrade_inbound(
|
fn upgrade_inbound(
|
||||||
self,
|
self,
|
||||||
socket: upgrade::Negotiated<TSocket>,
|
socket: upgrade::Negotiated<TSocket>,
|
||||||
protocol: RawProtocolId,
|
protocol: ProtocolId,
|
||||||
) -> Self::Future {
|
) -> Self::Future {
|
||||||
// TODO: Verify this
|
match protocol.encoding.as_str() {
|
||||||
let protocol_id =
|
|
||||||
ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols");
|
|
||||||
|
|
||||||
match protocol_id.encoding.as_str() {
|
|
||||||
"ssz" | _ => {
|
"ssz" | _ => {
|
||||||
let ssz_codec =
|
let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE));
|
||||||
BaseInboundCodec::new(SSZInboundCodec::new(protocol_id, MAX_RPC_SIZE));
|
|
||||||
let codec = InboundCodec::SSZ(ssz_codec);
|
let codec = InboundCodec::SSZ(ssz_codec);
|
||||||
Framed::new(socket, codec)
|
Framed::new(socket, codec)
|
||||||
.into_future()
|
.into_future()
|
||||||
@ -171,7 +151,7 @@ pub enum RPCRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl UpgradeInfo for RPCRequest {
|
impl UpgradeInfo for RPCRequest {
|
||||||
type Info = RawProtocolId;
|
type Info = ProtocolId;
|
||||||
type InfoIter = Vec<Self::Info>;
|
type InfoIter = Vec<Self::Info>;
|
||||||
|
|
||||||
// add further protocols as we support more encodings/versions
|
// add further protocols as we support more encodings/versions
|
||||||
@ -182,22 +162,25 @@ impl UpgradeInfo for RPCRequest {
|
|||||||
|
|
||||||
/// Implements the encoding per supported protocol for RPCRequest.
|
/// Implements the encoding per supported protocol for RPCRequest.
|
||||||
impl RPCRequest {
|
impl RPCRequest {
|
||||||
pub fn supported_protocols(&self) -> Vec<RawProtocolId> {
|
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
|
||||||
match self {
|
match self {
|
||||||
// add more protocols when versions/encodings are supported
|
// add more protocols when versions/encodings are supported
|
||||||
RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1.0.0", "ssz").into()],
|
RPCRequest::Hello(_) => vec![
|
||||||
RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz").into()],
|
ProtocolId::new("hello", "1.0.0", "ssz"),
|
||||||
|
ProtocolId::new("goodbye", "1.0.0", "ssz"),
|
||||||
|
],
|
||||||
|
RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")],
|
||||||
RPCRequest::BeaconBlockRoots(_) => {
|
RPCRequest::BeaconBlockRoots(_) => {
|
||||||
vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into()]
|
vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")]
|
||||||
}
|
}
|
||||||
RPCRequest::BeaconBlockHeaders(_) => {
|
RPCRequest::BeaconBlockHeaders(_) => {
|
||||||
vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into()]
|
vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")]
|
||||||
}
|
}
|
||||||
RPCRequest::BeaconBlockBodies(_) => {
|
RPCRequest::BeaconBlockBodies(_) => {
|
||||||
vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into()]
|
vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")]
|
||||||
}
|
}
|
||||||
RPCRequest::BeaconChainState(_) => {
|
RPCRequest::BeaconChainState(_) => {
|
||||||
vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz").into()]
|
vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -230,12 +213,9 @@ where
|
|||||||
socket: upgrade::Negotiated<TSocket>,
|
socket: upgrade::Negotiated<TSocket>,
|
||||||
protocol: Self::Info,
|
protocol: Self::Info,
|
||||||
) -> Self::Future {
|
) -> Self::Future {
|
||||||
let protocol_id =
|
match protocol.encoding.as_str() {
|
||||||
ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols");
|
|
||||||
|
|
||||||
match protocol_id.encoding.as_str() {
|
|
||||||
"ssz" | _ => {
|
"ssz" | _ => {
|
||||||
let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol_id, 4096));
|
let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096));
|
||||||
let codec = OutboundCodec::SSZ(ssz_codec);
|
let codec = OutboundCodec::SSZ(ssz_codec);
|
||||||
Framed::new(socket, codec).send(self)
|
Framed::new(socket, codec).send(self)
|
||||||
}
|
}
|
||||||
|
@ -3,8 +3,8 @@ use crate::error;
|
|||||||
use crate::multiaddr::Protocol;
|
use crate::multiaddr::Protocol;
|
||||||
use crate::rpc::RPCEvent;
|
use crate::rpc::RPCEvent;
|
||||||
use crate::NetworkConfig;
|
use crate::NetworkConfig;
|
||||||
use crate::{TopicBuilder, TopicHash};
|
use crate::{Topic, TopicHash};
|
||||||
use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC};
|
use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use libp2p::core::{
|
use libp2p::core::{
|
||||||
@ -21,25 +21,24 @@ use std::fs::File;
|
|||||||
use std::io::prelude::*;
|
use std::io::prelude::*;
|
||||||
use std::io::{Error, ErrorKind};
|
use std::io::{Error, ErrorKind};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use types::EthSpec;
|
|
||||||
|
|
||||||
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
|
type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>;
|
||||||
type Libp2pBehaviour<E> = Behaviour<Substream<StreamMuxerBox>, E>;
|
type Libp2pBehaviour = Behaviour<Substream<StreamMuxerBox>>;
|
||||||
|
|
||||||
const NETWORK_KEY_FILENAME: &str = "key";
|
const NETWORK_KEY_FILENAME: &str = "key";
|
||||||
|
|
||||||
/// The configuration and state of the libp2p components for the beacon node.
|
/// The configuration and state of the libp2p components for the beacon node.
|
||||||
pub struct Service<E: EthSpec> {
|
pub struct Service {
|
||||||
/// The libp2p Swarm handler.
|
/// The libp2p Swarm handler.
|
||||||
//TODO: Make this private
|
//TODO: Make this private
|
||||||
pub swarm: Swarm<Libp2pStream, Libp2pBehaviour<E>>,
|
pub swarm: Swarm<Libp2pStream, Libp2pBehaviour>,
|
||||||
/// This node's PeerId.
|
/// This node's PeerId.
|
||||||
_local_peer_id: PeerId,
|
_local_peer_id: PeerId,
|
||||||
/// The libp2p logger handle.
|
/// The libp2p logger handle.
|
||||||
pub log: slog::Logger,
|
pub log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> Service<E> {
|
impl Service {
|
||||||
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
|
pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result<Self> {
|
||||||
debug!(log, "Network-libp2p Service starting");
|
debug!(log, "Network-libp2p Service starting");
|
||||||
|
|
||||||
@ -76,18 +75,35 @@ impl<E: EthSpec> Service<E> {
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// attempt to connect to user-input libp2p nodes
|
||||||
|
for multiaddr in config.libp2p_nodes {
|
||||||
|
match Swarm::dial_addr(&mut swarm, multiaddr.clone()) {
|
||||||
|
Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr),
|
||||||
|
Err(err) => debug!(
|
||||||
|
log,
|
||||||
|
"Could not connect to node: {} error: {:?}", multiaddr, err
|
||||||
|
),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// subscribe to default gossipsub topics
|
// subscribe to default gossipsub topics
|
||||||
let mut topics = vec![];
|
let mut topics = vec![];
|
||||||
//TODO: Handle multiple shard attestations. For now we simply use a separate topic for
|
//TODO: Handle multiple shard attestations. For now we simply use a separate topic for
|
||||||
// attestations
|
// attestations
|
||||||
topics.push(BEACON_ATTESTATION_TOPIC.to_string());
|
topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into()));
|
||||||
topics.push(BEACON_PUBSUB_TOPIC.to_string());
|
topics.push(Topic::new(BEACON_BLOCK_TOPIC.into()));
|
||||||
topics.append(&mut config.topics.clone());
|
topics.append(
|
||||||
|
&mut config
|
||||||
|
.topics
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(|s| Topic::new(s))
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
|
||||||
let mut subscribed_topics = vec![];
|
let mut subscribed_topics = vec![];
|
||||||
for topic in topics {
|
for topic in topics {
|
||||||
let t = TopicBuilder::new(topic.clone()).build();
|
if swarm.subscribe(topic.clone()) {
|
||||||
if swarm.subscribe(t) {
|
|
||||||
trace!(log, "Subscribed to topic: {:?}", topic);
|
trace!(log, "Subscribed to topic: {:?}", topic);
|
||||||
subscribed_topics.push(topic);
|
subscribed_topics.push(topic);
|
||||||
} else {
|
} else {
|
||||||
@ -104,8 +120,8 @@ impl<E: EthSpec> Service<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> Stream for Service<E> {
|
impl Stream for Service {
|
||||||
type Item = Libp2pEvent<E>;
|
type Item = Libp2pEvent;
|
||||||
type Error = crate::error::Error;
|
type Error = crate::error::Error;
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||||
@ -119,7 +135,7 @@ impl<E: EthSpec> Stream for Service<E> {
|
|||||||
topics,
|
topics,
|
||||||
message,
|
message,
|
||||||
} => {
|
} => {
|
||||||
trace!(self.log, "Pubsub message received: {:?}", message);
|
trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message));
|
||||||
return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage {
|
return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage {
|
||||||
source,
|
source,
|
||||||
topics,
|
topics,
|
||||||
@ -179,7 +195,7 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Events that can be obtained from polling the Libp2p Service.
|
/// Events that can be obtained from polling the Libp2p Service.
|
||||||
pub enum Libp2pEvent<E: EthSpec> {
|
pub enum Libp2pEvent {
|
||||||
/// An RPC response request has been received on the swarm.
|
/// An RPC response request has been received on the swarm.
|
||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
/// Initiated the connection to a new peer.
|
/// Initiated the connection to a new peer.
|
||||||
@ -190,7 +206,7 @@ pub enum Libp2pEvent<E: EthSpec> {
|
|||||||
PubsubMessage {
|
PubsubMessage {
|
||||||
source: PeerId,
|
source: PeerId,
|
||||||
topics: Vec<TopicHash>,
|
topics: Vec<TopicHash>,
|
||||||
message: Box<PubsubMessage<E>>,
|
message: PubsubMessage,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ fn handle_fork<T: BeaconChainTypes + 'static>(req: &mut Request) -> IronResult<R
|
|||||||
|
|
||||||
let response = json!({
|
let response = json!({
|
||||||
"fork": beacon_chain.head().beacon_state.fork,
|
"fork": beacon_chain.head().beacon_state.fork,
|
||||||
"chain_id": beacon_chain.spec.chain_id
|
"network_id": beacon_chain.spec.network_id
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(Response::with((Status::Ok, response.to_string())))
|
Ok(Response::with((Status::Ok, response.to_string())))
|
||||||
|
@ -76,7 +76,7 @@ pub fn create_iron_http_server<T: BeaconChainTypes + 'static>(
|
|||||||
pub fn start_service<T: BeaconChainTypes + 'static>(
|
pub fn start_service<T: BeaconChainTypes + 'static>(
|
||||||
config: &HttpServerConfig,
|
config: &HttpServerConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
_network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
_network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
db_path: PathBuf,
|
db_path: PathBuf,
|
||||||
metrics_registry: Registry,
|
metrics_registry: Registry,
|
||||||
|
@ -14,7 +14,7 @@ use slog::{debug, warn};
|
|||||||
use ssz::{Decode, DecodeError};
|
use ssz::{Decode, DecodeError};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::{BeaconBlockHeader, EthSpec};
|
use types::{Attestation, BeaconBlock, BeaconBlockHeader};
|
||||||
|
|
||||||
/// Handles messages received from the network and client and organises syncing.
|
/// Handles messages received from the network and client and organises syncing.
|
||||||
pub struct MessageHandler<T: BeaconChainTypes> {
|
pub struct MessageHandler<T: BeaconChainTypes> {
|
||||||
@ -23,14 +23,14 @@ pub struct MessageHandler<T: BeaconChainTypes> {
|
|||||||
/// The syncing framework.
|
/// The syncing framework.
|
||||||
sync: SimpleSync<T>,
|
sync: SimpleSync<T>,
|
||||||
/// The context required to send messages to, and process messages from peers.
|
/// The context required to send messages to, and process messages from peers.
|
||||||
network_context: NetworkContext<T::EthSpec>,
|
network_context: NetworkContext,
|
||||||
/// The `MessageHandler` logger.
|
/// The `MessageHandler` logger.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Types of messages the handler can receive.
|
/// Types of messages the handler can receive.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum HandlerMessage<E: EthSpec> {
|
pub enum HandlerMessage {
|
||||||
/// We have initiated a connection to a new peer.
|
/// We have initiated a connection to a new peer.
|
||||||
PeerDialed(PeerId),
|
PeerDialed(PeerId),
|
||||||
/// Peer has disconnected,
|
/// Peer has disconnected,
|
||||||
@ -38,17 +38,17 @@ pub enum HandlerMessage<E: EthSpec> {
|
|||||||
/// An RPC response/request has been received.
|
/// An RPC response/request has been received.
|
||||||
RPC(PeerId, RPCEvent),
|
RPC(PeerId, RPCEvent),
|
||||||
/// A gossip message has been received.
|
/// A gossip message has been received.
|
||||||
PubsubMessage(PeerId, Box<PubsubMessage<E>>),
|
PubsubMessage(PeerId, PubsubMessage),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
||||||
/// Initializes and runs the MessageHandler.
|
/// Initializes and runs the MessageHandler.
|
||||||
pub fn spawn(
|
pub fn spawn(
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
executor: &tokio::runtime::TaskExecutor,
|
executor: &tokio::runtime::TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> error::Result<mpsc::UnboundedSender<HandlerMessage<T::EthSpec>>> {
|
) -> error::Result<mpsc::UnboundedSender<HandlerMessage>> {
|
||||||
debug!(log, "Service starting");
|
debug!(log, "Service starting");
|
||||||
|
|
||||||
let (handler_send, handler_recv) = mpsc::unbounded_channel();
|
let (handler_send, handler_recv) = mpsc::unbounded_channel();
|
||||||
@ -78,7 +78,7 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handle all messages incoming from the network service.
|
/// Handle all messages incoming from the network service.
|
||||||
fn handle_message(&mut self, message: HandlerMessage<T::EthSpec>) {
|
fn handle_message(&mut self, message: HandlerMessage) {
|
||||||
match message {
|
match message {
|
||||||
// we have initiated a connection to a peer
|
// we have initiated a connection to a peer
|
||||||
HandlerMessage::PeerDialed(peer_id) => {
|
HandlerMessage::PeerDialed(peer_id) => {
|
||||||
@ -94,7 +94,7 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
}
|
}
|
||||||
// we have received an RPC message request/response
|
// we have received an RPC message request/response
|
||||||
HandlerMessage::PubsubMessage(peer_id, gossip) => {
|
HandlerMessage::PubsubMessage(peer_id, gossip) => {
|
||||||
self.handle_gossip(peer_id, *gossip);
|
self.handle_gossip(peer_id, gossip);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -218,6 +218,62 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handle various RPC errors
|
||||||
|
fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) {
|
||||||
|
//TODO: Handle error correctly
|
||||||
|
warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle RPC messages
|
||||||
|
fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) {
|
||||||
|
match gossip_message {
|
||||||
|
PubsubMessage::Block(message) => match self.decode_gossip_block(message) {
|
||||||
|
Err(e) => {
|
||||||
|
debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e));
|
||||||
|
}
|
||||||
|
Ok(block) => {
|
||||||
|
let _should_forward_on =
|
||||||
|
self.sync
|
||||||
|
.on_block_gossip(peer_id, block, &mut self.network_context);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) {
|
||||||
|
Err(e) => {
|
||||||
|
debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e));
|
||||||
|
}
|
||||||
|
Ok(attestation) => {
|
||||||
|
self.sync
|
||||||
|
.on_attestation_gossip(peer_id, attestation, &mut self.network_context)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PubsubMessage::Unknown(message) => {
|
||||||
|
// Received a message from an unknown topic. Ignore for now
|
||||||
|
debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Decoding of blocks and attestations from the network.
|
||||||
|
*
|
||||||
|
* TODO: Apply efficient decoding/verification of these objects
|
||||||
|
*/
|
||||||
|
|
||||||
|
fn decode_gossip_block(
|
||||||
|
&self,
|
||||||
|
beacon_block: Vec<u8>,
|
||||||
|
) -> Result<BeaconBlock<T::EthSpec>, DecodeError> {
|
||||||
|
//TODO: Apply verification before decoding.
|
||||||
|
BeaconBlock::from_ssz_bytes(&beacon_block)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode_gossip_attestation(
|
||||||
|
&self,
|
||||||
|
beacon_block: Vec<u8>,
|
||||||
|
) -> Result<Attestation<T::EthSpec>, DecodeError> {
|
||||||
|
//TODO: Apply verification before decoding.
|
||||||
|
Attestation::from_ssz_bytes(&beacon_block)
|
||||||
|
}
|
||||||
|
|
||||||
/// Verifies and decodes the ssz-encoded block bodies received from peers.
|
/// Verifies and decodes the ssz-encoded block bodies received from peers.
|
||||||
fn decode_block_bodies(
|
fn decode_block_bodies(
|
||||||
&self,
|
&self,
|
||||||
@ -241,39 +297,18 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
|
|||||||
//TODO: Implement faster header verification before decoding entirely
|
//TODO: Implement faster header verification before decoding entirely
|
||||||
Vec::from_ssz_bytes(&headers_response.headers)
|
Vec::from_ssz_bytes(&headers_response.headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle various RPC errors
|
|
||||||
fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) {
|
|
||||||
//TODO: Handle error correctly
|
|
||||||
warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle RPC messages
|
|
||||||
fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage<T::EthSpec>) {
|
|
||||||
match gossip_message {
|
|
||||||
PubsubMessage::Block(message) => {
|
|
||||||
let _should_forward_on =
|
|
||||||
self.sync
|
|
||||||
.on_block_gossip(peer_id, message, &mut self.network_context);
|
|
||||||
}
|
|
||||||
PubsubMessage::Attestation(message) => {
|
|
||||||
self.sync
|
|
||||||
.on_attestation_gossip(peer_id, message, &mut self.network_context)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: RPC Rewrite makes this struct fairly pointless
|
// TODO: RPC Rewrite makes this struct fairly pointless
|
||||||
pub struct NetworkContext<E: EthSpec> {
|
pub struct NetworkContext {
|
||||||
/// The network channel to relay messages to the Network service.
|
/// The network channel to relay messages to the Network service.
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage<E>>,
|
network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
/// The `MessageHandler` logger.
|
/// The `MessageHandler` logger.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> NetworkContext<E> {
|
impl NetworkContext {
|
||||||
pub fn new(network_send: mpsc::UnboundedSender<NetworkMessage<E>>, log: slog::Logger) -> Self {
|
pub fn new(network_send: mpsc::UnboundedSender<NetworkMessage>, log: slog::Logger) -> Self {
|
||||||
Self { network_send, log }
|
Self { network_send, log }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,13 +14,12 @@ use slog::{debug, info, o, trace};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
use types::EthSpec;
|
|
||||||
|
|
||||||
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
||||||
pub struct Service<T: BeaconChainTypes> {
|
pub struct Service<T: BeaconChainTypes> {
|
||||||
libp2p_service: Arc<Mutex<LibP2PService<T::EthSpec>>>,
|
libp2p_service: Arc<Mutex<LibP2PService>>,
|
||||||
_libp2p_exit: oneshot::Sender<()>,
|
_libp2p_exit: oneshot::Sender<()>,
|
||||||
_network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
_network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
_phantom: PhantomData<T>, //message_handler: MessageHandler,
|
_phantom: PhantomData<T>, //message_handler: MessageHandler,
|
||||||
//message_handler_send: Sender<HandlerMessage>
|
//message_handler_send: Sender<HandlerMessage>
|
||||||
}
|
}
|
||||||
@ -31,9 +30,9 @@ impl<T: BeaconChainTypes + 'static> Service<T> {
|
|||||||
config: &NetworkConfig,
|
config: &NetworkConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> error::Result<(Arc<Self>, mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>)> {
|
) -> error::Result<(Arc<Self>, mpsc::UnboundedSender<NetworkMessage>)> {
|
||||||
// build the network channel
|
// build the network channel
|
||||||
let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage<_>>();
|
let (network_send, network_recv) = mpsc::unbounded_channel::<NetworkMessage>();
|
||||||
// launch message handler thread
|
// launch message handler thread
|
||||||
let message_handler_log = log.new(o!("Service" => "MessageHandler"));
|
let message_handler_log = log.new(o!("Service" => "MessageHandler"));
|
||||||
let message_handler_send = MessageHandler::spawn(
|
let message_handler_send = MessageHandler::spawn(
|
||||||
@ -65,15 +64,15 @@ impl<T: BeaconChainTypes + 'static> Service<T> {
|
|||||||
Ok((Arc::new(network_service), network_send))
|
Ok((Arc::new(network_service), network_send))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn libp2p_service(&self) -> Arc<Mutex<LibP2PService<T::EthSpec>>> {
|
pub fn libp2p_service(&self) -> Arc<Mutex<LibP2PService>> {
|
||||||
self.libp2p_service.clone()
|
self.libp2p_service.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_service<E: EthSpec>(
|
fn spawn_service(
|
||||||
libp2p_service: Arc<Mutex<LibP2PService<E>>>,
|
libp2p_service: Arc<Mutex<LibP2PService>>,
|
||||||
network_recv: mpsc::UnboundedReceiver<NetworkMessage<E>>,
|
network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
|
||||||
message_handler_send: mpsc::UnboundedSender<HandlerMessage<E>>,
|
message_handler_send: mpsc::UnboundedSender<HandlerMessage>,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> error::Result<tokio::sync::oneshot::Sender<()>> {
|
) -> error::Result<tokio::sync::oneshot::Sender<()>> {
|
||||||
@ -99,10 +98,10 @@ fn spawn_service<E: EthSpec>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
//TODO: Potentially handle channel errors
|
//TODO: Potentially handle channel errors
|
||||||
fn network_service<E: EthSpec>(
|
fn network_service(
|
||||||
libp2p_service: Arc<Mutex<LibP2PService<E>>>,
|
libp2p_service: Arc<Mutex<LibP2PService>>,
|
||||||
mut network_recv: mpsc::UnboundedReceiver<NetworkMessage<E>>,
|
mut network_recv: mpsc::UnboundedReceiver<NetworkMessage>,
|
||||||
mut message_handler_send: mpsc::UnboundedSender<HandlerMessage<E>>,
|
mut message_handler_send: mpsc::UnboundedSender<HandlerMessage>,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> impl futures::Future<Item = (), Error = eth2_libp2p::error::Error> {
|
) -> impl futures::Future<Item = (), Error = eth2_libp2p::error::Error> {
|
||||||
futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> {
|
futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> {
|
||||||
@ -119,7 +118,7 @@ fn network_service<E: EthSpec>(
|
|||||||
},
|
},
|
||||||
NetworkMessage::Publish { topics, message } => {
|
NetworkMessage::Publish { topics, message } => {
|
||||||
debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics));
|
debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics));
|
||||||
libp2p_service.lock().swarm.publish(topics, *message);
|
libp2p_service.lock().swarm.publish(topics, message);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Ok(Async::NotReady) => break,
|
Ok(Async::NotReady) => break,
|
||||||
@ -176,14 +175,14 @@ fn network_service<E: EthSpec>(
|
|||||||
|
|
||||||
/// Types of messages that the network service can receive.
|
/// Types of messages that the network service can receive.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum NetworkMessage<E: EthSpec> {
|
pub enum NetworkMessage {
|
||||||
/// Send a message to libp2p service.
|
/// Send a message to libp2p service.
|
||||||
//TODO: Define typing for messages across the wire
|
//TODO: Define typing for messages across the wire
|
||||||
Send(PeerId, OutgoingMessage),
|
Send(PeerId, OutgoingMessage),
|
||||||
/// Publish a message to pubsub mechanism.
|
/// Publish a message to pubsub mechanism.
|
||||||
Publish {
|
Publish {
|
||||||
topics: Vec<Topic>,
|
topics: Vec<Topic>,
|
||||||
message: Box<PubsubMessage<E>>,
|
message: PubsubMessage,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
/// Handle the connection of a new peer.
|
/// Handle the connection of a new peer.
|
||||||
///
|
///
|
||||||
/// Sends a `Hello` message to the peer.
|
/// Sends a `Hello` message to the peer.
|
||||||
pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext<T::EthSpec>) {
|
pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) {
|
||||||
info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id));
|
info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain)));
|
network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain)));
|
||||||
@ -137,7 +137,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id));
|
debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
@ -156,7 +156,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id));
|
debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id));
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
hello: HelloMessage,
|
hello: HelloMessage,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
let remote = PeerSyncInfo::from(hello);
|
let remote = PeerSyncInfo::from(hello);
|
||||||
let local = PeerSyncInfo::from(&self.chain);
|
let local = PeerSyncInfo::from(&self.chain);
|
||||||
@ -277,7 +277,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
req: BeaconBlockRootsRequest,
|
req: BeaconBlockRootsRequest,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -323,7 +323,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
res: BeaconBlockRootsResponse,
|
res: BeaconBlockRootsResponse,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -387,7 +387,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
req: BeaconBlockHeadersRequest,
|
req: BeaconBlockHeadersRequest,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -438,7 +438,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
headers: Vec<BeaconBlockHeader>,
|
headers: Vec<BeaconBlockHeader>,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -470,7 +470,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
req: BeaconBlockBodiesRequest,
|
req: BeaconBlockBodiesRequest,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
let block_bodies: Vec<BeaconBlockBody<_>> = req
|
let block_bodies: Vec<BeaconBlockBody<_>> = req
|
||||||
.block_roots
|
.block_roots
|
||||||
@ -516,7 +516,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
res: DecodedBeaconBlockBodiesResponse<T::EthSpec>,
|
res: DecodedBeaconBlockBodiesResponse<T::EthSpec>,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -555,7 +555,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block: BeaconBlock<T::EthSpec>,
|
block: BeaconBlock<T::EthSpec>,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
if let Some(outcome) =
|
if let Some(outcome) =
|
||||||
self.process_block(peer_id.clone(), block.clone(), network, &"gossip")
|
self.process_block(peer_id.clone(), block.clone(), network, &"gossip")
|
||||||
@ -625,7 +625,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
_peer_id: PeerId,
|
_peer_id: PeerId,
|
||||||
msg: Attestation<T::EthSpec>,
|
msg: Attestation<T::EthSpec>,
|
||||||
_network: &mut NetworkContext<T::EthSpec>,
|
_network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
match self.chain.process_attestation(msg) {
|
match self.chain.process_attestation(msg) {
|
||||||
Ok(outcome) => info!(
|
Ok(outcome) => info!(
|
||||||
@ -645,7 +645,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
req: BeaconBlockRootsRequest,
|
req: BeaconBlockRootsRequest,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
// Potentially set state to sync.
|
// Potentially set state to sync.
|
||||||
if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE {
|
if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE {
|
||||||
@ -669,7 +669,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
req: BeaconBlockHeadersRequest,
|
req: BeaconBlockHeadersRequest,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -686,7 +686,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
req: BeaconBlockBodiesRequest,
|
req: BeaconBlockBodiesRequest,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -722,7 +722,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
source: &str,
|
source: &str,
|
||||||
) -> Option<BlockProcessingOutcome> {
|
) -> Option<BlockProcessingOutcome> {
|
||||||
match self.import_queue.attempt_complete_block(block_root) {
|
match self.import_queue.attempt_complete_block(block_root) {
|
||||||
@ -815,7 +815,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block: BeaconBlock<T::EthSpec>,
|
block: BeaconBlock<T::EthSpec>,
|
||||||
network: &mut NetworkContext<T::EthSpec>,
|
network: &mut NetworkContext,
|
||||||
source: &str,
|
source: &str,
|
||||||
) -> Option<BlockProcessingOutcome> {
|
) -> Option<BlockProcessingOutcome> {
|
||||||
let processing_result = self.chain.process_block(block.clone());
|
let processing_result = self.chain.process_block(block.clone());
|
||||||
@ -917,9 +917,9 @@ fn hello_message<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) -> HelloMes
|
|||||||
let state = &beacon_chain.head().beacon_state;
|
let state = &beacon_chain.head().beacon_state;
|
||||||
|
|
||||||
HelloMessage {
|
HelloMessage {
|
||||||
//TODO: Correctly define the chain/network id
|
network_id: spec.network_id,
|
||||||
network_id: spec.chain_id,
|
//TODO: Correctly define the chain id
|
||||||
chain_id: u64::from(spec.chain_id),
|
chain_id: spec.network_id as u64,
|
||||||
latest_finalized_root: state.finalized_checkpoint.root,
|
latest_finalized_root: state.finalized_checkpoint.root,
|
||||||
latest_finalized_epoch: state.finalized_checkpoint.epoch,
|
latest_finalized_epoch: state.finalized_checkpoint.epoch,
|
||||||
best_root: beacon_chain.head().beacon_block_root,
|
best_root: beacon_chain.head().beacon_block_root,
|
||||||
|
@ -7,16 +7,19 @@ edition = "2018"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
[dependencies]
|
[dependencies]
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
|
store = { path = "../store" }
|
||||||
version = { path = "../version" }
|
version = { path = "../version" }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "^1.0"
|
serde_json = "^1.0"
|
||||||
slog = "^2.2.3"
|
slog = "^2.2.3"
|
||||||
slog-term = "^2.4.0"
|
slog-term = "^2.4.0"
|
||||||
slog-async = "^2.3.0"
|
slog-async = "^2.3.0"
|
||||||
|
state_processing = { path = "../../eth2/state_processing" }
|
||||||
|
types = { path = "../../eth2/types" }
|
||||||
clap = "2.32.0"
|
clap = "2.32.0"
|
||||||
http = "^0.1.17"
|
http = "^0.1.17"
|
||||||
hyper = "0.12.32"
|
hyper = "0.12.32"
|
||||||
hyper-router = "^0.5"
|
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
exit-future = "0.1.3"
|
exit-future = "0.1.3"
|
||||||
tokio = "0.1.17"
|
tokio = "0.1.17"
|
||||||
|
url = "2.0"
|
||||||
|
60
beacon_node/rest_api/src/beacon.rs
Normal file
60
beacon_node/rest_api/src/beacon.rs
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
use super::{success_response, ApiResult};
|
||||||
|
use crate::{helpers::*, ApiError, UrlQuery};
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use hyper::{Body, Request};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use store::Store;
|
||||||
|
use types::BeaconState;
|
||||||
|
|
||||||
|
/// HTTP handler to return a `BeaconState` at a given `root` or `slot`.
|
||||||
|
///
|
||||||
|
/// Will not return a state if the request slot is in the future. Will return states higher than
|
||||||
|
/// the current head by skipping slots.
|
||||||
|
pub fn get_state<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||||
|
let beacon_chain = req
|
||||||
|
.extensions()
|
||||||
|
.get::<Arc<BeaconChain<T>>>()
|
||||||
|
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||||
|
|
||||||
|
let query_params = ["root", "slot"];
|
||||||
|
let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?;
|
||||||
|
|
||||||
|
let state: BeaconState<T::EthSpec> = match (key.as_ref(), value) {
|
||||||
|
("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?,
|
||||||
|
("root", value) => {
|
||||||
|
let root = &parse_root(&value)?;
|
||||||
|
|
||||||
|
beacon_chain
|
||||||
|
.store
|
||||||
|
.get(root)?
|
||||||
|
.ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?
|
||||||
|
}
|
||||||
|
_ => unreachable!("Guarded by UrlQuery::from_request()"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let json: String = serde_json::to_string(&state)
|
||||||
|
.map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconState: {:?}", e)))?;
|
||||||
|
|
||||||
|
Ok(success_response(Body::from(json)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// HTTP handler to return a `BeaconState` root at a given or `slot`.
|
||||||
|
///
|
||||||
|
/// Will not return a state if the request slot is in the future. Will return states higher than
|
||||||
|
/// the current head by skipping slots.
|
||||||
|
pub fn get_state_root<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||||
|
let beacon_chain = req
|
||||||
|
.extensions()
|
||||||
|
.get::<Arc<BeaconChain<T>>>()
|
||||||
|
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||||
|
|
||||||
|
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
|
||||||
|
let slot = parse_slot(&slot_string)?;
|
||||||
|
|
||||||
|
let root = state_root_at_slot(&beacon_chain, slot)?;
|
||||||
|
|
||||||
|
let json: String = serde_json::to_string(&root)
|
||||||
|
.map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?;
|
||||||
|
|
||||||
|
Ok(success_response(Body::from(json)))
|
||||||
|
}
|
@ -1,65 +0,0 @@
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
|
||||||
use serde::Serialize;
|
|
||||||
use slog::info;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use version;
|
|
||||||
|
|
||||||
use super::{path_from_request, success_response, APIResult, APIService};
|
|
||||||
|
|
||||||
use hyper::{Body, Request, Response};
|
|
||||||
use hyper_router::{Route, RouterBuilder};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct BeaconNodeServiceInstance<T: BeaconChainTypes + 'static> {
|
|
||||||
pub marker: std::marker::PhantomData<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A string which uniquely identifies the client implementation and its version; similar to [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3).
|
|
||||||
#[derive(Serialize)]
|
|
||||||
pub struct Version(String);
|
|
||||||
impl From<String> for Version {
|
|
||||||
fn from(x: String) -> Self {
|
|
||||||
Version(x)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The genesis_time configured for the beacon node, which is the unix time at which the Eth2.0 chain began.
|
|
||||||
#[derive(Serialize)]
|
|
||||||
pub struct GenesisTime(u64);
|
|
||||||
impl From<u64> for GenesisTime {
|
|
||||||
fn from(x: u64) -> Self {
|
|
||||||
GenesisTime(x)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: BeaconChainTypes + 'static> APIService for BeaconNodeServiceInstance<T> {
|
|
||||||
fn add_routes(&mut self, router_builder: RouterBuilder) -> Result<RouterBuilder, hyper::Error> {
|
|
||||||
let router_builder = router_builder
|
|
||||||
.add(Route::get("/version").using(result_to_response!(get_version)))
|
|
||||||
.add(Route::get("/genesis_time").using(result_to_response!(get_genesis_time::<T>)));
|
|
||||||
Ok(router_builder)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read the version string from the current Lighthouse build.
|
|
||||||
fn get_version(_req: Request<Body>) -> APIResult {
|
|
||||||
let ver = Version::from(version::version());
|
|
||||||
let body = Body::from(
|
|
||||||
serde_json::to_string(&ver).expect("Version should always be serialializable as JSON."),
|
|
||||||
);
|
|
||||||
Ok(success_response(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read the genesis time from the current beacon chain state.
|
|
||||||
fn get_genesis_time<T: BeaconChainTypes + 'static>(req: Request<Body>) -> APIResult {
|
|
||||||
let beacon_chain = req.extensions().get::<Arc<BeaconChain<T>>>().unwrap();
|
|
||||||
let gen_time = {
|
|
||||||
let state = &beacon_chain.head().beacon_state;
|
|
||||||
state.genesis_time
|
|
||||||
};
|
|
||||||
let body = Body::from(
|
|
||||||
serde_json::to_string(&gen_time)
|
|
||||||
.expect("Genesis should time always have a valid JSON serialization."),
|
|
||||||
);
|
|
||||||
Ok(success_response(body))
|
|
||||||
}
|
|
156
beacon_node/rest_api/src/helpers.rs
Normal file
156
beacon_node/rest_api/src/helpers.rs
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
use crate::ApiError;
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use store::{iter::AncestorIter, Store};
|
||||||
|
use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot};
|
||||||
|
|
||||||
|
/// Parse a slot from a `0x` preixed string.
|
||||||
|
///
|
||||||
|
/// E.g., `"1234"`
|
||||||
|
pub fn parse_slot(string: &str) -> Result<Slot, ApiError> {
|
||||||
|
string
|
||||||
|
.parse::<u64>()
|
||||||
|
.map(Slot::from)
|
||||||
|
.map_err(|e| ApiError::InvalidQueryParams(format!("Unable to parse slot: {:?}", e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse a root from a `0x` preixed string.
|
||||||
|
///
|
||||||
|
/// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"`
|
||||||
|
pub fn parse_root(string: &str) -> Result<Hash256, ApiError> {
|
||||||
|
const PREFIX: &str = "0x";
|
||||||
|
|
||||||
|
if string.starts_with(PREFIX) {
|
||||||
|
let trimmed = string.trim_start_matches(PREFIX);
|
||||||
|
trimmed
|
||||||
|
.parse()
|
||||||
|
.map_err(|e| ApiError::InvalidQueryParams(format!("Unable to parse root: {:?}", e)))
|
||||||
|
} else {
|
||||||
|
Err(ApiError::InvalidQueryParams(
|
||||||
|
"Root must have a '0x' prefix".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `BeaconState` in the canonical chain of `beacon_chain` at the given `slot`, if
|
||||||
|
/// possible.
|
||||||
|
///
|
||||||
|
/// Will not return a state if the request slot is in the future. Will return states higher than
|
||||||
|
/// the current head by skipping slots.
|
||||||
|
pub fn state_at_slot<T: BeaconChainTypes>(
|
||||||
|
beacon_chain: &BeaconChain<T>,
|
||||||
|
slot: Slot,
|
||||||
|
) -> Result<BeaconState<T::EthSpec>, ApiError> {
|
||||||
|
let head_state = &beacon_chain.head().beacon_state;
|
||||||
|
|
||||||
|
if head_state.slot == slot {
|
||||||
|
// The request slot is the same as the best block (head) slot.
|
||||||
|
|
||||||
|
// I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary.
|
||||||
|
Ok(beacon_chain.head().beacon_state.clone())
|
||||||
|
} else {
|
||||||
|
let root = state_root_at_slot(beacon_chain, slot)?;
|
||||||
|
|
||||||
|
let state: BeaconState<T::EthSpec> = beacon_chain
|
||||||
|
.store
|
||||||
|
.get(&root)?
|
||||||
|
.ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?;
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the root of the `BeaconState` in the canonical chain of `beacon_chain` at the given
|
||||||
|
/// `slot`, if possible.
|
||||||
|
///
|
||||||
|
/// Will not return a state root if the request slot is in the future. Will return state roots
|
||||||
|
/// higher than the current head by skipping slots.
|
||||||
|
pub fn state_root_at_slot<T: BeaconChainTypes>(
|
||||||
|
beacon_chain: &BeaconChain<T>,
|
||||||
|
slot: Slot,
|
||||||
|
) -> Result<Hash256, ApiError> {
|
||||||
|
let head_state = &beacon_chain.head().beacon_state;
|
||||||
|
let current_slot = beacon_chain
|
||||||
|
.read_slot_clock()
|
||||||
|
.ok_or_else(|| ApiError::ServerError("Unable to read slot clock".to_string()))?;
|
||||||
|
|
||||||
|
// There are four scenarios when obtaining a state for a given slot:
|
||||||
|
//
|
||||||
|
// 1. The request slot is in the future.
|
||||||
|
// 2. The request slot is the same as the best block (head) slot.
|
||||||
|
// 3. The request slot is prior to the head slot.
|
||||||
|
// 4. The request slot is later than the head slot.
|
||||||
|
if current_slot < slot {
|
||||||
|
// 1. The request slot is in the future. Reject the request.
|
||||||
|
//
|
||||||
|
// We could actually speculate about future state roots by skipping slots, however that's
|
||||||
|
// likely to cause confusion for API users.
|
||||||
|
Err(ApiError::InvalidQueryParams(format!(
|
||||||
|
"Requested slot {} is past the current slot {}",
|
||||||
|
slot, current_slot
|
||||||
|
)))
|
||||||
|
} else if head_state.slot == slot {
|
||||||
|
// 2. The request slot is the same as the best block (head) slot.
|
||||||
|
//
|
||||||
|
// The head state root is stored in memory, return a reference.
|
||||||
|
Ok(beacon_chain.head().beacon_state_root)
|
||||||
|
} else if head_state.slot > slot {
|
||||||
|
// 3. The request slot is prior to the head slot.
|
||||||
|
//
|
||||||
|
// Iterate through the state roots on the head state to find the root for that
|
||||||
|
// slot. Once the root is found, load it from the database.
|
||||||
|
Ok(head_state
|
||||||
|
.try_iter_ancestor_roots(beacon_chain.store.clone())
|
||||||
|
.ok_or_else(|| ApiError::ServerError("Failed to create roots iterator".to_string()))?
|
||||||
|
.find(|(_root, s)| *s == slot)
|
||||||
|
.map(|(root, _slot)| root)
|
||||||
|
.ok_or_else(|| ApiError::NotFound(format!("Unable to find state at slot {}", slot)))?)
|
||||||
|
} else {
|
||||||
|
// 4. The request slot is later than the head slot.
|
||||||
|
//
|
||||||
|
// Use `per_slot_processing` to advance the head state to the present slot,
|
||||||
|
// assuming that all slots do not contain a block (i.e., they are skipped slots).
|
||||||
|
let mut state = beacon_chain.head().beacon_state.clone();
|
||||||
|
let spec = &T::EthSpec::default_spec();
|
||||||
|
|
||||||
|
for _ in state.slot.as_u64()..slot.as_u64() {
|
||||||
|
// Ensure the next epoch state caches are built in case of an epoch transition.
|
||||||
|
state.build_committee_cache(RelativeEpoch::Next, spec)?;
|
||||||
|
|
||||||
|
state_processing::per_slot_processing(&mut state, spec)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: this is an expensive operation. Once the tree hash cache is implement it may be
|
||||||
|
// used here.
|
||||||
|
Ok(state.canonical_root())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_root_works() {
|
||||||
|
assert_eq!(
|
||||||
|
parse_root("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
Ok(Hash256::zero())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
parse_root("0x000000000000000000000000000000000000000000000000000000000000002a"),
|
||||||
|
Ok(Hash256::from_low_u64_be(42))
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
parse_root("0000000000000000000000000000000000000000000000000000000000000042").is_err()
|
||||||
|
);
|
||||||
|
assert!(parse_root("0x").is_err());
|
||||||
|
assert!(parse_root("0x00").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_slot_works() {
|
||||||
|
assert_eq!(parse_slot("0"), Ok(Slot::new(0)));
|
||||||
|
assert_eq!(parse_slot("42"), Ok(Slot::new(42)));
|
||||||
|
assert_eq!(parse_slot("10000000"), Ok(Slot::new(10_000_000)));
|
||||||
|
assert!(parse_slot("cats").is_err());
|
||||||
|
}
|
||||||
|
}
|
@ -1,37 +1,42 @@
|
|||||||
extern crate futures;
|
extern crate futures;
|
||||||
extern crate hyper;
|
extern crate hyper;
|
||||||
#[macro_use]
|
mod beacon;
|
||||||
mod macros;
|
mod config;
|
||||||
mod beacon_node;
|
mod helpers;
|
||||||
pub mod config;
|
mod node;
|
||||||
|
mod url_query;
|
||||||
|
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
pub use config::Config as APIConfig;
|
pub use config::Config as ApiConfig;
|
||||||
|
use hyper::rt::Future;
|
||||||
|
use hyper::service::service_fn_ok;
|
||||||
|
use hyper::{Body, Method, Response, Server, StatusCode};
|
||||||
use slog::{info, o, warn};
|
use slog::{info, o, warn};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
|
use url_query::UrlQuery;
|
||||||
|
|
||||||
use crate::beacon_node::BeaconNodeServiceInstance;
|
#[derive(PartialEq, Debug)]
|
||||||
use hyper::rt::Future;
|
pub enum ApiError {
|
||||||
use hyper::service::{service_fn, Service};
|
MethodNotAllowed(String),
|
||||||
use hyper::{Body, Request, Response, Server, StatusCode};
|
ServerError(String),
|
||||||
use hyper_router::{RouterBuilder, RouterService};
|
NotImplemented(String),
|
||||||
|
InvalidQueryParams(String),
|
||||||
pub enum APIError {
|
NotFound(String),
|
||||||
MethodNotAllowed { desc: String },
|
ImATeapot(String), // Just in case.
|
||||||
ServerError { desc: String },
|
|
||||||
NotImplemented { desc: String },
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type APIResult = Result<Response<Body>, APIError>;
|
pub type ApiResult = Result<Response<Body>, ApiError>;
|
||||||
|
|
||||||
impl Into<Response<Body>> for APIError {
|
impl Into<Response<Body>> for ApiError {
|
||||||
fn into(self) -> Response<Body> {
|
fn into(self) -> Response<Body> {
|
||||||
let status_code: (StatusCode, String) = match self {
|
let status_code: (StatusCode, String) = match self {
|
||||||
APIError::MethodNotAllowed { desc } => (StatusCode::METHOD_NOT_ALLOWED, desc),
|
ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc),
|
||||||
APIError::ServerError { desc } => (StatusCode::INTERNAL_SERVER_ERROR, desc),
|
ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc),
|
||||||
APIError::NotImplemented { desc } => (StatusCode::NOT_IMPLEMENTED, desc),
|
ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc),
|
||||||
|
ApiError::InvalidQueryParams(desc) => (StatusCode::BAD_REQUEST, desc),
|
||||||
|
ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc),
|
||||||
|
ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc),
|
||||||
};
|
};
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.status(status_code.0)
|
.status(status_code.0)
|
||||||
@ -40,17 +45,31 @@ impl Into<Response<Body>> for APIError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait APIService {
|
impl From<store::Error> for ApiError {
|
||||||
fn add_routes(&mut self, router_builder: RouterBuilder) -> Result<RouterBuilder, hyper::Error>;
|
fn from(e: store::Error) -> ApiError {
|
||||||
|
ApiError::ServerError(format!("Database error: {:?}", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<types::BeaconStateError> for ApiError {
|
||||||
|
fn from(e: types::BeaconStateError) -> ApiError {
|
||||||
|
ApiError::ServerError(format!("BeaconState error: {:?}", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<state_processing::per_slot_processing::Error> for ApiError {
|
||||||
|
fn from(e: state_processing::per_slot_processing::Error) -> ApiError {
|
||||||
|
ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||||
config: &APIConfig,
|
config: &ApiConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> Result<exit_future::Signal, hyper::Error> {
|
) -> Result<exit_future::Signal, hyper::Error> {
|
||||||
let log = log.new(o!("Service" => "API"));
|
let log = log.new(o!("Service" => "Api"));
|
||||||
|
|
||||||
// build a channel to kill the HTTP server
|
// build a channel to kill the HTTP server
|
||||||
let (exit_signal, exit) = exit_future::signal();
|
let (exit_signal, exit) = exit_future::signal();
|
||||||
@ -68,62 +87,65 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
|||||||
let server_log = log.clone();
|
let server_log = log.clone();
|
||||||
let server_bc = beacon_chain.clone();
|
let server_bc = beacon_chain.clone();
|
||||||
|
|
||||||
// Create the service closure
|
|
||||||
let service = move || {
|
let service = move || {
|
||||||
//TODO: This router must be moved out of this closure, so it isn't rebuilt for every connection.
|
let log = server_log.clone();
|
||||||
let mut router = build_router_service::<T>();
|
let beacon_chain = server_bc.clone();
|
||||||
|
|
||||||
// Clone our stateful objects, for use in handler closure
|
|
||||||
let service_log = server_log.clone();
|
|
||||||
let service_bc = server_bc.clone();
|
|
||||||
|
|
||||||
// Create a simple handler for the router, inject our stateful objects into the request.
|
// Create a simple handler for the router, inject our stateful objects into the request.
|
||||||
service_fn(move |mut req| {
|
service_fn_ok(move |mut req| {
|
||||||
|
req.extensions_mut().insert::<slog::Logger>(log.clone());
|
||||||
req.extensions_mut()
|
req.extensions_mut()
|
||||||
.insert::<slog::Logger>(service_log.clone());
|
.insert::<Arc<BeaconChain<T>>>(beacon_chain.clone());
|
||||||
req.extensions_mut()
|
|
||||||
.insert::<Arc<BeaconChain<T>>>(service_bc.clone());
|
let path = req.uri().path().to_string();
|
||||||
router.call(req)
|
|
||||||
|
// Route the request to the correct handler.
|
||||||
|
let result = match (req.method(), path.as_ref()) {
|
||||||
|
(&Method::GET, "/beacon/state") => beacon::get_state::<T>(req),
|
||||||
|
(&Method::GET, "/beacon/state_root") => beacon::get_state_root::<T>(req),
|
||||||
|
(&Method::GET, "/node/version") => node::get_version(req),
|
||||||
|
(&Method::GET, "/node/genesis_time") => node::get_genesis_time::<T>(req),
|
||||||
|
_ => Err(ApiError::MethodNotAllowed(path.clone())),
|
||||||
|
};
|
||||||
|
|
||||||
|
match result {
|
||||||
|
// Return the `hyper::Response`.
|
||||||
|
Ok(response) => {
|
||||||
|
slog::debug!(log, "Request successful: {:?}", path);
|
||||||
|
response
|
||||||
|
}
|
||||||
|
// Map the `ApiError` into `hyper::Response`.
|
||||||
|
Err(e) => {
|
||||||
|
slog::debug!(log, "Request failure: {:?}", path);
|
||||||
|
e.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let log_clone = log.clone();
|
||||||
let server = Server::bind(&bind_addr)
|
let server = Server::bind(&bind_addr)
|
||||||
.serve(service)
|
.serve(service)
|
||||||
.with_graceful_shutdown(server_exit)
|
.with_graceful_shutdown(server_exit)
|
||||||
.map_err(move |e| {
|
.map_err(move |e| {
|
||||||
warn!(
|
warn!(
|
||||||
log,
|
log_clone,
|
||||||
"API failed to start, Unable to bind"; "address" => format!("{:?}", e)
|
"API failed to start, Unable to bind"; "address" => format!("{:?}", e)
|
||||||
)
|
)
|
||||||
});
|
});
|
||||||
|
|
||||||
|
info!(
|
||||||
|
log,
|
||||||
|
"REST API started";
|
||||||
|
"address" => format!("{}", config.listen_address),
|
||||||
|
"port" => config.port,
|
||||||
|
);
|
||||||
|
|
||||||
executor.spawn(server);
|
executor.spawn(server);
|
||||||
|
|
||||||
Ok(exit_signal)
|
Ok(exit_signal)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn build_router_service<T: BeaconChainTypes + 'static>() -> RouterService {
|
|
||||||
let mut router_builder = RouterBuilder::new();
|
|
||||||
|
|
||||||
let mut bn_service: BeaconNodeServiceInstance<T> = BeaconNodeServiceInstance {
|
|
||||||
marker: std::marker::PhantomData,
|
|
||||||
};
|
|
||||||
|
|
||||||
router_builder = bn_service
|
|
||||||
.add_routes(router_builder)
|
|
||||||
.expect("The routes should always be made.");
|
|
||||||
|
|
||||||
RouterService::new(router_builder.build())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn path_from_request(req: &Request<Body>) -> String {
|
|
||||||
req.uri()
|
|
||||||
.path_and_query()
|
|
||||||
.as_ref()
|
|
||||||
.map(|pq| String::from(pq.as_str()))
|
|
||||||
.unwrap_or(String::new())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn success_response(body: Body) -> Response<Body> {
|
fn success_response(body: Body) -> Response<Body> {
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.status(StatusCode::OK)
|
.status(StatusCode::OK)
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
macro_rules! result_to_response {
|
|
||||||
($handler: path) => {
|
|
||||||
|req: Request<Body>| -> Response<Body> {
|
|
||||||
let log = req
|
|
||||||
.extensions()
|
|
||||||
.get::<slog::Logger>()
|
|
||||||
.expect("Our logger should be on req.")
|
|
||||||
.clone();
|
|
||||||
let path = path_from_request(&req);
|
|
||||||
let result = $handler(req);
|
|
||||||
match result {
|
|
||||||
Ok(response) => {
|
|
||||||
info!(log, "Request successful: {:?}", path);
|
|
||||||
response
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
info!(log, "Request failure: {:?}", path);
|
|
||||||
e.into()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
25
beacon_node/rest_api/src/node.rs
Normal file
25
beacon_node/rest_api/src/node.rs
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
use crate::{success_response, ApiResult};
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use hyper::{Body, Request};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use version;
|
||||||
|
|
||||||
|
/// Read the version string from the current Lighthouse build.
|
||||||
|
pub fn get_version(_req: Request<Body>) -> ApiResult {
|
||||||
|
let body = Body::from(
|
||||||
|
serde_json::to_string(&version::version())
|
||||||
|
.expect("Version should always be serialializable as JSON."),
|
||||||
|
);
|
||||||
|
Ok(success_response(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read the genesis time from the current beacon chain state.
|
||||||
|
pub fn get_genesis_time<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||||
|
let beacon_chain = req.extensions().get::<Arc<BeaconChain<T>>>().unwrap();
|
||||||
|
let gen_time: u64 = beacon_chain.head().beacon_state.genesis_time;
|
||||||
|
let body = Body::from(
|
||||||
|
serde_json::to_string(&gen_time)
|
||||||
|
.expect("Genesis should time always have a valid JSON serialization."),
|
||||||
|
);
|
||||||
|
Ok(success_response(body))
|
||||||
|
}
|
112
beacon_node/rest_api/src/url_query.rs
Normal file
112
beacon_node/rest_api/src/url_query.rs
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
use crate::ApiError;
|
||||||
|
use hyper::Request;
|
||||||
|
|
||||||
|
/// Provides handy functions for parsing the query parameters of a URL.
|
||||||
|
pub struct UrlQuery<'a>(url::form_urlencoded::Parse<'a>);
|
||||||
|
|
||||||
|
impl<'a> UrlQuery<'a> {
|
||||||
|
/// Instantiate from an existing `Request`.
|
||||||
|
///
|
||||||
|
/// Returns `Err` if `req` does not contain any query parameters.
|
||||||
|
pub fn from_request<T>(req: &'a Request<T>) -> Result<Self, ApiError> {
|
||||||
|
let query_str = req.uri().query().ok_or_else(|| {
|
||||||
|
ApiError::InvalidQueryParams(
|
||||||
|
"URL query must be valid and contain at least one
|
||||||
|
key."
|
||||||
|
.to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(UrlQuery(url::form_urlencoded::parse(query_str.as_bytes())))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the first `(key, value)` pair found where the `key` is in `keys`.
|
||||||
|
///
|
||||||
|
/// If no match is found, an `InvalidQueryParams` error is returned.
|
||||||
|
pub fn first_of(mut self, keys: &[&str]) -> Result<(String, String), ApiError> {
|
||||||
|
self.0
|
||||||
|
.find(|(key, _value)| keys.contains(&&**key))
|
||||||
|
.map(|(key, value)| (key.into_owned(), value.into_owned()))
|
||||||
|
.ok_or_else(|| {
|
||||||
|
ApiError::InvalidQueryParams(format!(
|
||||||
|
"URL query must contain at least one of the following keys: {:?}",
|
||||||
|
keys
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the value for `key`, if and only if `key` is the only key present in the query
|
||||||
|
/// parameters.
|
||||||
|
pub fn only_one(self, key: &str) -> Result<String, ApiError> {
|
||||||
|
let queries: Vec<_> = self
|
||||||
|
.0
|
||||||
|
.map(|(k, v)| (k.into_owned(), v.into_owned()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if queries.len() == 1 {
|
||||||
|
let (first_key, first_value) = &queries[0]; // Must have 0 index if len is 1.
|
||||||
|
if first_key == key {
|
||||||
|
Ok(first_value.to_string())
|
||||||
|
} else {
|
||||||
|
Err(ApiError::InvalidQueryParams(format!(
|
||||||
|
"Only the {} query parameter is supported",
|
||||||
|
key
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(ApiError::InvalidQueryParams(format!(
|
||||||
|
"Only one query parameter is allowed, {} supplied",
|
||||||
|
queries.len()
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn only_one() {
|
||||||
|
let get_result = |addr: &str, key: &str| -> Result<String, ApiError> {
|
||||||
|
UrlQuery(url::Url::parse(addr).unwrap().query_pairs()).only_one(key)
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(get_result("http://cat.io/?a=42", "a"), Ok("42".to_string()));
|
||||||
|
assert!(get_result("http://cat.io/?a=42", "b").is_err());
|
||||||
|
assert!(get_result("http://cat.io/?a=42&b=12", "a").is_err());
|
||||||
|
assert!(get_result("http://cat.io/", "").is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn first_of() {
|
||||||
|
let url = url::Url::parse("http://lighthouse.io/cats?a=42&b=12&c=100").unwrap();
|
||||||
|
let get_query = || UrlQuery(url.query_pairs());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
get_query().first_of(&["a"]),
|
||||||
|
Ok(("a".to_string(), "42".to_string()))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_query().first_of(&["a", "b", "c"]),
|
||||||
|
Ok(("a".to_string(), "42".to_string()))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_query().first_of(&["a", "a", "a"]),
|
||||||
|
Ok(("a".to_string(), "42".to_string()))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_query().first_of(&["a", "b", "c"]),
|
||||||
|
Ok(("a".to_string(), "42".to_string()))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_query().first_of(&["b", "c"]),
|
||||||
|
Ok(("b".to_string(), "12".to_string()))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
get_query().first_of(&["c"]),
|
||||||
|
Ok(("c".to_string(), "100".to_string()))
|
||||||
|
);
|
||||||
|
assert!(get_query().first_of(&["nothing"]).is_err());
|
||||||
|
}
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||||
use eth2_libp2p::PubsubMessage;
|
use eth2_libp2p::PubsubMessage;
|
||||||
use eth2_libp2p::TopicBuilder;
|
use eth2_libp2p::Topic;
|
||||||
use eth2_libp2p::BEACON_ATTESTATION_TOPIC;
|
use eth2_libp2p::BEACON_ATTESTATION_TOPIC;
|
||||||
use futures::Future;
|
use futures::Future;
|
||||||
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
||||||
@ -11,7 +11,7 @@ use protos::services::{
|
|||||||
};
|
};
|
||||||
use protos::services_grpc::AttestationService;
|
use protos::services_grpc::AttestationService;
|
||||||
use slog::{error, info, trace, warn};
|
use slog::{error, info, trace, warn};
|
||||||
use ssz::{ssz_encode, Decode};
|
use ssz::{ssz_encode, Decode, Encode};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::Attestation;
|
use types::Attestation;
|
||||||
@ -19,7 +19,7 @@ use types::Attestation;
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AttestationServiceInstance<T: BeaconChainTypes> {
|
pub struct AttestationServiceInstance<T: BeaconChainTypes> {
|
||||||
pub chain: Arc<BeaconChain<T>>,
|
pub chain: Arc<BeaconChain<T>>,
|
||||||
pub network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
pub log: slog::Logger,
|
pub log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,13 +144,13 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// valid attestation, propagate to the network
|
// valid attestation, propagate to the network
|
||||||
let topic = TopicBuilder::new(BEACON_ATTESTATION_TOPIC).build();
|
let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into());
|
||||||
let message = PubsubMessage::Attestation(attestation);
|
let message = PubsubMessage::Attestation(attestation.as_ssz_bytes());
|
||||||
|
|
||||||
self.network_chan
|
self.network_chan
|
||||||
.try_send(NetworkMessage::Publish {
|
.try_send(NetworkMessage::Publish {
|
||||||
topics: vec![topic],
|
topics: vec![topic],
|
||||||
message: Box::new(message),
|
message: message,
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
error!(
|
error!(
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
|
||||||
use eth2_libp2p::BEACON_PUBSUB_TOPIC;
|
use eth2_libp2p::BEACON_BLOCK_TOPIC;
|
||||||
use eth2_libp2p::{PubsubMessage, TopicBuilder};
|
use eth2_libp2p::{PubsubMessage, Topic};
|
||||||
use futures::Future;
|
use futures::Future;
|
||||||
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
@ -11,7 +11,7 @@ use protos::services::{
|
|||||||
use protos::services_grpc::BeaconBlockService;
|
use protos::services_grpc::BeaconBlockService;
|
||||||
use slog::Logger;
|
use slog::Logger;
|
||||||
use slog::{error, info, trace, warn};
|
use slog::{error, info, trace, warn};
|
||||||
use ssz::{ssz_encode, Decode};
|
use ssz::{ssz_encode, Decode, Encode};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::{BeaconBlock, Signature, Slot};
|
use types::{BeaconBlock, Signature, Slot};
|
||||||
@ -19,7 +19,7 @@ use types::{BeaconBlock, Signature, Slot};
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BeaconBlockServiceInstance<T: BeaconChainTypes> {
|
pub struct BeaconBlockServiceInstance<T: BeaconChainTypes> {
|
||||||
pub chain: Arc<BeaconChain<T>>,
|
pub chain: Arc<BeaconChain<T>>,
|
||||||
pub network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
pub network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
pub log: Logger,
|
pub log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,14 +106,14 @@ impl<T: BeaconChainTypes> BeaconBlockService for BeaconBlockServiceInstance<T> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// get the network topic to send on
|
// get the network topic to send on
|
||||||
let topic = TopicBuilder::new(BEACON_PUBSUB_TOPIC).build();
|
let topic = Topic::new(BEACON_BLOCK_TOPIC.into());
|
||||||
let message = PubsubMessage::Block(block);
|
let message = PubsubMessage::Block(block.as_ssz_bytes());
|
||||||
|
|
||||||
// Publish the block to the p2p network via gossipsub.
|
// Publish the block to the p2p network via gossipsub.
|
||||||
self.network_chan
|
self.network_chan
|
||||||
.try_send(NetworkMessage::Publish {
|
.try_send(NetworkMessage::Publish {
|
||||||
topics: vec![topic],
|
topics: vec![topic],
|
||||||
message: Box::new(message),
|
message: message,
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
error!(
|
error!(
|
||||||
|
@ -37,7 +37,7 @@ impl<T: BeaconChainTypes> BeaconNodeService for BeaconNodeServiceInstance<T> {
|
|||||||
node_info.set_fork(fork);
|
node_info.set_fork(fork);
|
||||||
node_info.set_genesis_time(genesis_time);
|
node_info.set_genesis_time(genesis_time);
|
||||||
node_info.set_genesis_slot(spec.genesis_slot.as_u64());
|
node_info.set_genesis_slot(spec.genesis_slot.as_u64());
|
||||||
node_info.set_chain_id(u32::from(spec.chain_id));
|
node_info.set_network_id(u32::from(spec.network_id));
|
||||||
|
|
||||||
// send the node_info the requester
|
// send the node_info the requester
|
||||||
let error_log = self.log.clone();
|
let error_log = self.log.clone();
|
||||||
|
@ -25,7 +25,7 @@ use tokio::sync::mpsc;
|
|||||||
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||||
config: &RPCConfig,
|
config: &RPCConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
network_chan: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> exit_future::Signal {
|
) -> exit_future::Signal {
|
||||||
|
@ -4,7 +4,7 @@ use clap::{App, Arg};
|
|||||||
use client::{ClientConfig, Eth2Config};
|
use client::{ClientConfig, Eth2Config};
|
||||||
use env_logger::{Builder, Env};
|
use env_logger::{Builder, Env};
|
||||||
use eth2_config::{read_from_file, write_to_file};
|
use eth2_config::{read_from_file, write_to_file};
|
||||||
use slog::{crit, o, Drain, Level};
|
use slog::{crit, o, warn, Drain, Level};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
@ -52,10 +52,17 @@ fn main() {
|
|||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("listen-address")
|
Arg::with_name("listen-address")
|
||||||
.long("listen-address")
|
.long("listen-address")
|
||||||
.value_name("Address")
|
.value_name("ADDRESS")
|
||||||
.help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).")
|
.help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("port")
|
||||||
|
.long("port")
|
||||||
|
.value_name("PORT")
|
||||||
|
.help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("maxpeers")
|
Arg::with_name("maxpeers")
|
||||||
.long("maxpeers")
|
.long("maxpeers")
|
||||||
@ -70,27 +77,34 @@ fn main() {
|
|||||||
.help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.")
|
.help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name("port")
|
|
||||||
.long("port")
|
|
||||||
.value_name("Lighthouse Port")
|
|
||||||
.help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.")
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("discovery-port")
|
Arg::with_name("discovery-port")
|
||||||
.long("disc-port")
|
.long("disc-port")
|
||||||
.value_name("DiscoveryPort")
|
.value_name("PORT")
|
||||||
.help("The discovery UDP port.")
|
.help("The discovery UDP port.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("discovery-address")
|
Arg::with_name("discovery-address")
|
||||||
.long("discovery-address")
|
.long("discovery-address")
|
||||||
.value_name("Address")
|
.value_name("ADDRESS")
|
||||||
.help("The IP address to broadcast to other peers on how to reach this node.")
|
.help("The IP address to broadcast to other peers on how to reach this node.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("topics")
|
||||||
|
.long("topics")
|
||||||
|
.value_name("STRING")
|
||||||
|
.help("One or more comma-delimited gossipsub topic strings to subscribe to.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("libp2p-addresses")
|
||||||
|
.long("libp2p-addresses")
|
||||||
|
.value_name("MULTIADDR")
|
||||||
|
.help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR.")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
/*
|
/*
|
||||||
* gRPC parameters.
|
* gRPC parameters.
|
||||||
*/
|
*/
|
||||||
@ -136,6 +150,7 @@ fn main() {
|
|||||||
.help("Listen port for the HTTP server.")
|
.help("Listen port for the HTTP server.")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
|
/* Client related arguments */
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("api")
|
Arg::with_name("api")
|
||||||
.long("api")
|
.long("api")
|
||||||
@ -178,12 +193,9 @@ fn main() {
|
|||||||
.long("default-spec")
|
.long("default-spec")
|
||||||
.value_name("TITLE")
|
.value_name("TITLE")
|
||||||
.short("default-spec")
|
.short("default-spec")
|
||||||
.help("Specifies the default eth2 spec to be used. Overridden by any spec loaded
|
.help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.")
|
||||||
from disk. A spec will be written to disk after this flag is used, so it is
|
|
||||||
primarily used for creating eth2 spec files.")
|
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.possible_values(&["mainnet", "minimal"])
|
.possible_values(&["mainnet", "minimal", "interop"])
|
||||||
.default_value("minimal"),
|
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("recent-genesis")
|
Arg::with_name("recent-genesis")
|
||||||
@ -198,11 +210,10 @@ fn main() {
|
|||||||
Arg::with_name("debug-level")
|
Arg::with_name("debug-level")
|
||||||
.long("debug-level")
|
.long("debug-level")
|
||||||
.value_name("LEVEL")
|
.value_name("LEVEL")
|
||||||
.short("s")
|
|
||||||
.help("The title of the spec constants for chain config.")
|
.help("The title of the spec constants for chain config.")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.possible_values(&["info", "debug", "trace", "warn", "error", "crit"])
|
.possible_values(&["info", "debug", "trace", "warn", "error", "crit"])
|
||||||
.default_value("info"),
|
.default_value("trace"),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("verbosity")
|
Arg::with_name("verbosity")
|
||||||
@ -301,27 +312,56 @@ fn main() {
|
|||||||
|
|
||||||
let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME);
|
let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME);
|
||||||
|
|
||||||
// Attempt to load the `Eth2Config` from file.
|
// Initialise the `Eth2Config`.
|
||||||
//
|
//
|
||||||
// If the file doesn't exist, create a default one depending on the CLI flags.
|
// If a CLI parameter is set, overwrite any config file present.
|
||||||
let mut eth2_config = match read_from_file::<Eth2Config>(eth2_config_path.clone()) {
|
// If a parameter is not set, use either the config file present or default to minimal.
|
||||||
Ok(Some(c)) => c,
|
let cli_config = match matches.value_of("default-spec") {
|
||||||
Ok(None) => {
|
Some("mainnet") => Some(Eth2Config::mainnet()),
|
||||||
let default = match matches.value_of("default-spec") {
|
Some("minimal") => Some(Eth2Config::minimal()),
|
||||||
Some("mainnet") => Eth2Config::mainnet(),
|
Some("interop") => Some(Eth2Config::interop()),
|
||||||
Some("minimal") => Eth2Config::minimal(),
|
_ => None,
|
||||||
_ => unreachable!(), // Guarded by slog.
|
|
||||||
};
|
};
|
||||||
if let Err(e) = write_to_file(eth2_config_path, &default) {
|
// if a CLI flag is specified, write the new config if it doesn't exist,
|
||||||
|
// otherwise notify the user that the file will not be written.
|
||||||
|
let eth2_config_from_file = match read_from_file::<Eth2Config>(eth2_config_path.clone()) {
|
||||||
|
Ok(config) => config,
|
||||||
|
Err(e) => {
|
||||||
|
crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut eth2_config = {
|
||||||
|
if let Some(cli_config) = cli_config {
|
||||||
|
if eth2_config_from_file.is_none() {
|
||||||
|
// write to file if one doesn't exist
|
||||||
|
if let Err(e) = write_to_file(eth2_config_path, &cli_config) {
|
||||||
crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
default
|
} else {
|
||||||
|
warn!(
|
||||||
|
log,
|
||||||
|
"Eth2Config file exists. Configuration file is ignored, using default"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
cli_config
|
||||||
crit!(log, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e));
|
} else {
|
||||||
|
// CLI config not specified, read from disk
|
||||||
|
match eth2_config_from_file {
|
||||||
|
Some(config) => config,
|
||||||
|
None => {
|
||||||
|
// set default to minimal
|
||||||
|
let eth2_config = Eth2Config::minimal();
|
||||||
|
if let Err(e) = write_to_file(eth2_config_path, ð2_config) {
|
||||||
|
crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
eth2_config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Update the eth2 config with any CLI flags.
|
// Update the eth2 config with any CLI flags.
|
||||||
@ -333,6 +373,12 @@ fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// check to ensure the spec constants between the client and eth2_config match
|
||||||
|
if eth2_config.spec_constants != client_config.spec_constants {
|
||||||
|
crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Start the node using a `tokio` executor.
|
// Start the node using a `tokio` executor.
|
||||||
match run::run_beacon_node(client_config, eth2_config, &log) {
|
match run::run_beacon_node(client_config, eth2_config, &log) {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
|
@ -13,7 +13,7 @@ use tokio::runtime::Builder;
|
|||||||
use tokio::runtime::Runtime;
|
use tokio::runtime::Runtime;
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
use tokio_timer::clock::Clock;
|
use tokio_timer::clock::Clock;
|
||||||
use types::{MainnetEthSpec, MinimalEthSpec};
|
use types::{InteropEthSpec, MainnetEthSpec, MinimalEthSpec};
|
||||||
|
|
||||||
/// Reads the configuration and initializes a `BeaconChain` with the required types and parameters.
|
/// Reads the configuration and initializes a `BeaconChain` with the required types and parameters.
|
||||||
///
|
///
|
||||||
@ -90,6 +90,22 @@ pub fn run_beacon_node(
|
|||||||
runtime,
|
runtime,
|
||||||
log,
|
log,
|
||||||
),
|
),
|
||||||
|
("disk", "interop") => run::<ClientType<DiskStore, InteropEthSpec>>(
|
||||||
|
&db_path,
|
||||||
|
client_config,
|
||||||
|
eth2_config,
|
||||||
|
executor,
|
||||||
|
runtime,
|
||||||
|
log,
|
||||||
|
),
|
||||||
|
("memory", "interop") => run::<ClientType<MemoryStore, InteropEthSpec>>(
|
||||||
|
&db_path,
|
||||||
|
client_config,
|
||||||
|
eth2_config,
|
||||||
|
executor,
|
||||||
|
runtime,
|
||||||
|
log,
|
||||||
|
),
|
||||||
(db_type, spec) => {
|
(db_type, spec) => {
|
||||||
error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type);
|
error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type);
|
||||||
Err("Unknown specification and/or db_type.".into())
|
Err("Unknown specification and/or db_type.".into())
|
||||||
|
@ -24,6 +24,15 @@ impl<'a, U: Store, E: EthSpec> AncestorIter<U, BlockRootsIterator<'a, E, U>> for
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a, U: Store, E: EthSpec> AncestorIter<U, StateRootsIterator<'a, E, U>> for BeaconState<E> {
|
||||||
|
/// Iterates across all the prior state roots of `self`, starting at the most recent and ending
|
||||||
|
/// at genesis.
|
||||||
|
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<StateRootsIterator<'a, E, U>> {
|
||||||
|
// The `self.clone()` here is wasteful.
|
||||||
|
Some(StateRootsIterator::owned(store, self.clone(), self.slot))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
||||||
store: Arc<U>,
|
store: Arc<U>,
|
||||||
|
@ -200,3 +200,37 @@ impl EthSpec for MinimalEthSpec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub type MinimalBeaconState = BeaconState<MinimalEthSpec>;
|
pub type MinimalBeaconState = BeaconState<MinimalEthSpec>;
|
||||||
|
|
||||||
|
/// Interop testnet spec
|
||||||
|
#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)]
|
||||||
|
pub struct InteropEthSpec;
|
||||||
|
|
||||||
|
impl EthSpec for InteropEthSpec {
|
||||||
|
type ShardCount = U8;
|
||||||
|
type SlotsPerEpoch = U8;
|
||||||
|
type SlotsPerHistoricalRoot = U64;
|
||||||
|
type SlotsPerEth1VotingPeriod = U16;
|
||||||
|
type EpochsPerHistoricalVector = U64;
|
||||||
|
type EpochsPerSlashingsVector = U64;
|
||||||
|
type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch
|
||||||
|
|
||||||
|
params_from_eth_spec!(MainnetEthSpec {
|
||||||
|
JustificationBitsLength,
|
||||||
|
MaxValidatorsPerCommittee,
|
||||||
|
GenesisEpoch,
|
||||||
|
HistoricalRootsLimit,
|
||||||
|
ValidatorRegistryLimit,
|
||||||
|
MaxProposerSlashings,
|
||||||
|
MaxAttesterSlashings,
|
||||||
|
MaxAttestations,
|
||||||
|
MaxDeposits,
|
||||||
|
MaxVoluntaryExits,
|
||||||
|
MaxTransfers
|
||||||
|
});
|
||||||
|
|
||||||
|
fn default_spec() -> ChainSpec {
|
||||||
|
ChainSpec::interop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type InteropBeaconState = BeaconState<InteropEthSpec>;
|
||||||
|
@ -92,7 +92,7 @@ pub struct ChainSpec {
|
|||||||
domain_transfer: u32,
|
domain_transfer: u32,
|
||||||
|
|
||||||
pub boot_nodes: Vec<String>,
|
pub boot_nodes: Vec<String>,
|
||||||
pub chain_id: u8,
|
pub network_id: u8,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChainSpec {
|
impl ChainSpec {
|
||||||
@ -190,7 +190,7 @@ impl ChainSpec {
|
|||||||
* Network specific
|
* Network specific
|
||||||
*/
|
*/
|
||||||
boot_nodes: vec![],
|
boot_nodes: vec![],
|
||||||
chain_id: 1, // mainnet chain id
|
network_id: 1, // mainnet network id
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,7 +208,23 @@ impl ChainSpec {
|
|||||||
shuffle_round_count: 10,
|
shuffle_round_count: 10,
|
||||||
min_genesis_active_validator_count: 64,
|
min_genesis_active_validator_count: 64,
|
||||||
max_epochs_per_crosslink: 4,
|
max_epochs_per_crosslink: 4,
|
||||||
chain_id: 2, // lighthouse testnet chain id
|
network_id: 2, // lighthouse testnet network id
|
||||||
|
boot_nodes,
|
||||||
|
..ChainSpec::mainnet()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Interop testing spec
|
||||||
|
///
|
||||||
|
/// This allows us to customize a chain spec for interop testing.
|
||||||
|
pub fn interop() -> Self {
|
||||||
|
let boot_nodes = vec![];
|
||||||
|
|
||||||
|
Self {
|
||||||
|
seconds_per_slot: 12,
|
||||||
|
target_committee_size: 4,
|
||||||
|
shuffle_round_count: 10,
|
||||||
|
network_id: 13,
|
||||||
boot_nodes,
|
boot_nodes,
|
||||||
..ChainSpec::mainnet()
|
..ChainSpec::mainnet()
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,13 @@ impl Eth2Config {
|
|||||||
spec: ChainSpec::minimal(),
|
spec: ChainSpec::minimal(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn interop() -> Self {
|
||||||
|
Self {
|
||||||
|
spec_constants: "interop".to_string(),
|
||||||
|
spec: ChainSpec::interop(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eth2Config {
|
impl Eth2Config {
|
||||||
|
@ -45,7 +45,7 @@ service AttestationService {
|
|||||||
message NodeInfoResponse {
|
message NodeInfoResponse {
|
||||||
string version = 1;
|
string version = 1;
|
||||||
Fork fork = 2;
|
Fork fork = 2;
|
||||||
uint32 chain_id = 3;
|
uint32 network_id = 3;
|
||||||
uint64 genesis_time = 4;
|
uint64 genesis_time = 4;
|
||||||
uint64 genesis_slot = 5;
|
uint64 genesis_slot = 5;
|
||||||
}
|
}
|
||||||
|
@ -1,47 +0,0 @@
|
|||||||
spec_constants = "minimal"
|
|
||||||
|
|
||||||
[spec]
|
|
||||||
target_committee_size = 4
|
|
||||||
max_indices_per_attestation = 4096
|
|
||||||
min_per_epoch_churn_limit = 4
|
|
||||||
churn_limit_quotient = 65536
|
|
||||||
base_rewards_per_epoch = 5
|
|
||||||
shuffle_round_count = 10
|
|
||||||
deposit_contract_tree_depth = 32
|
|
||||||
min_deposit_amount = 1000000000
|
|
||||||
max_effective_balance = 32000000000
|
|
||||||
ejection_balance = 16000000000
|
|
||||||
effective_balance_increment = 1000000000
|
|
||||||
genesis_slot = 0
|
|
||||||
zero_hash = "0x0000000000000000000000000000000000000000000000000000000000000000"
|
|
||||||
bls_withdrawal_prefix_byte = "0x00"
|
|
||||||
genesis_time = 4294967295
|
|
||||||
seconds_per_slot = 6
|
|
||||||
min_attestation_inclusion_delay = 2
|
|
||||||
min_seed_lookahead = 1
|
|
||||||
activation_exit_delay = 4
|
|
||||||
slots_per_eth1_voting_period = 16
|
|
||||||
slots_per_historical_root = 8192
|
|
||||||
min_validator_withdrawability_delay = 256
|
|
||||||
persistent_committee_period = 2048
|
|
||||||
max_crosslink_epochs = 64
|
|
||||||
min_epochs_to_inactivity_penalty = 4
|
|
||||||
base_reward_quotient = 32
|
|
||||||
whistleblowing_reward_quotient = 512
|
|
||||||
proposer_reward_quotient = 8
|
|
||||||
inactivity_penalty_quotient = 33554432
|
|
||||||
min_slashing_penalty_quotient = 32
|
|
||||||
max_proposer_slashings = 16
|
|
||||||
max_attester_slashings = 1
|
|
||||||
max_attestations = 128
|
|
||||||
max_deposits = 16
|
|
||||||
max_voluntary_exits = 16
|
|
||||||
max_transfers = 0
|
|
||||||
domain_beacon_proposer = 0
|
|
||||||
domain_randao = 1
|
|
||||||
domain_attestation = 2
|
|
||||||
domain_deposit = 3
|
|
||||||
domain_voluntary_exit = 4
|
|
||||||
domain_transfer = 5
|
|
||||||
boot_nodes = ["/ip4/127.0.0.1/tcp/9000"]
|
|
||||||
chain_id = 2
|
|
@ -11,10 +11,10 @@ use crate::service::Service as ValidatorService;
|
|||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use eth2_config::{read_from_file, write_to_file, Eth2Config};
|
use eth2_config::{read_from_file, write_to_file, Eth2Config};
|
||||||
use protos::services_grpc::ValidatorServiceClient;
|
use protos::services_grpc::ValidatorServiceClient;
|
||||||
use slog::{crit, error, info, o, Drain, Level};
|
use slog::{crit, error, info, o, warn, Drain, Level};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::{Keypair, MainnetEthSpec, MinimalEthSpec};
|
use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec};
|
||||||
|
|
||||||
pub const DEFAULT_SPEC: &str = "minimal";
|
pub const DEFAULT_SPEC: &str = "minimal";
|
||||||
pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator";
|
pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator";
|
||||||
@ -64,14 +64,13 @@ fn main() {
|
|||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("spec-constants")
|
Arg::with_name("default-spec")
|
||||||
.long("spec-constants")
|
.long("default-spec")
|
||||||
.value_name("TITLE")
|
.value_name("TITLE")
|
||||||
.short("s")
|
.short("default-spec")
|
||||||
.help("The title of the spec constants for chain config.")
|
.help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.possible_values(&["mainnet", "minimal"])
|
.possible_values(&["mainnet", "minimal", "interop"])
|
||||||
.default_value("minimal"),
|
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("debug-level")
|
Arg::with_name("debug-level")
|
||||||
@ -126,7 +125,7 @@ fn main() {
|
|||||||
|
|
||||||
let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME);
|
let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME);
|
||||||
|
|
||||||
// Attempt to lead the `ClientConfig` from disk.
|
// Attempt to load the `ClientConfig` from disk.
|
||||||
//
|
//
|
||||||
// If file doesn't exist, create a new, default one.
|
// If file doesn't exist, create a new, default one.
|
||||||
let mut client_config = match read_from_file::<ValidatorClientConfig>(
|
let mut client_config = match read_from_file::<ValidatorClientConfig>(
|
||||||
@ -164,27 +163,56 @@ fn main() {
|
|||||||
.and_then(|s| Some(PathBuf::from(s)))
|
.and_then(|s| Some(PathBuf::from(s)))
|
||||||
.unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME));
|
.unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME));
|
||||||
|
|
||||||
// Attempt to load the `Eth2Config` from file.
|
// Initialise the `Eth2Config`.
|
||||||
//
|
//
|
||||||
// If the file doesn't exist, create a default one depending on the CLI flags.
|
// If a CLI parameter is set, overwrite any config file present.
|
||||||
let mut eth2_config = match read_from_file::<Eth2Config>(eth2_config_path.clone()) {
|
// If a parameter is not set, use either the config file present or default to minimal.
|
||||||
Ok(Some(c)) => c,
|
let cli_config = match matches.value_of("default-spec") {
|
||||||
Ok(None) => {
|
Some("mainnet") => Some(Eth2Config::mainnet()),
|
||||||
let default = match matches.value_of("spec-constants") {
|
Some("minimal") => Some(Eth2Config::minimal()),
|
||||||
Some("mainnet") => Eth2Config::mainnet(),
|
Some("interop") => Some(Eth2Config::interop()),
|
||||||
Some("minimal") => Eth2Config::minimal(),
|
_ => None,
|
||||||
_ => unreachable!(), // Guarded by slog.
|
|
||||||
};
|
};
|
||||||
if let Err(e) = write_to_file(eth2_config_path, &default) {
|
// if a CLI flag is specified, write the new config if it doesn't exist,
|
||||||
|
// otherwise notify the user that the file will not be written.
|
||||||
|
let eth2_config_from_file = match read_from_file::<Eth2Config>(eth2_config_path.clone()) {
|
||||||
|
Ok(config) => config,
|
||||||
|
Err(e) => {
|
||||||
|
crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut eth2_config = {
|
||||||
|
if let Some(cli_config) = cli_config {
|
||||||
|
if eth2_config_from_file.is_none() {
|
||||||
|
// write to file if one doesn't exist
|
||||||
|
if let Err(e) = write_to_file(eth2_config_path, &cli_config) {
|
||||||
crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
default
|
} else {
|
||||||
|
warn!(
|
||||||
|
log,
|
||||||
|
"Eth2Config file exists. Configuration file is ignored, using default"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Err(e) => {
|
cli_config
|
||||||
crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e));
|
} else {
|
||||||
|
// CLI config not specified, read from disk
|
||||||
|
match eth2_config_from_file {
|
||||||
|
Some(config) => config,
|
||||||
|
None => {
|
||||||
|
// set default to minimal
|
||||||
|
let eth2_config = Eth2Config::minimal();
|
||||||
|
if let Err(e) = write_to_file(eth2_config_path, ð2_config) {
|
||||||
|
crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
eth2_config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Update the eth2 config with any CLI flags.
|
// Update the eth2 config with any CLI flags.
|
||||||
@ -214,6 +242,11 @@ fn main() {
|
|||||||
eth2_config,
|
eth2_config,
|
||||||
log.clone(),
|
log.clone(),
|
||||||
),
|
),
|
||||||
|
"interop" => ValidatorService::<ValidatorServiceClient, Keypair, InteropEthSpec>::start(
|
||||||
|
client_config,
|
||||||
|
eth2_config,
|
||||||
|
log.clone(),
|
||||||
|
),
|
||||||
other => {
|
other => {
|
||||||
crit!(log, "Unknown spec constants"; "title" => other);
|
crit!(log, "Unknown spec constants"; "title" => other);
|
||||||
return;
|
return;
|
||||||
|
@ -23,7 +23,7 @@ use protos::services_grpc::{
|
|||||||
AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient,
|
AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient,
|
||||||
ValidatorServiceClient,
|
ValidatorServiceClient,
|
||||||
};
|
};
|
||||||
use slog::{error, info, warn};
|
use slog::{crit, error, info, warn};
|
||||||
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -37,7 +37,7 @@ use types::{ChainSpec, Epoch, EthSpec, Fork, Slot};
|
|||||||
|
|
||||||
/// A fixed amount of time after a slot to perform operations. This gives the node time to complete
|
/// A fixed amount of time after a slot to perform operations. This gives the node time to complete
|
||||||
/// per-slot processes.
|
/// per-slot processes.
|
||||||
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(200);
|
const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100);
|
||||||
|
|
||||||
/// The validator service. This is the main thread that executes and maintains validator
|
/// The validator service. This is the main thread that executes and maintains validator
|
||||||
/// duties.
|
/// duties.
|
||||||
@ -106,13 +106,13 @@ impl<B: BeaconNodeDuties + 'static, S: Signer + 'static, E: EthSpec> Service<B,
|
|||||||
);
|
);
|
||||||
return Err("Genesis time in the future".into());
|
return Err("Genesis time in the future".into());
|
||||||
}
|
}
|
||||||
// verify the node's chain id
|
// verify the node's network id
|
||||||
if eth2_config.spec.chain_id != info.chain_id as u8 {
|
if eth2_config.spec.network_id != info.network_id as u8 {
|
||||||
error!(
|
error!(
|
||||||
log,
|
log,
|
||||||
"Beacon Node's genesis time is in the future. No work to do.\n Exiting"
|
"Beacon Node's genesis time is in the future. No work to do.\n Exiting"
|
||||||
);
|
);
|
||||||
return Err(format!("Beacon node has the wrong chain id. Expected chain id: {}, node's chain id: {}", eth2_config.spec.chain_id, info.chain_id).into());
|
return Err(format!("Beacon node has the wrong chain id. Expected chain id: {}, node's chain id: {}", eth2_config.spec.network_id, info.network_id).into());
|
||||||
}
|
}
|
||||||
break info;
|
break info;
|
||||||
}
|
}
|
||||||
@ -123,7 +123,7 @@ impl<B: BeaconNodeDuties + 'static, S: Signer + 'static, E: EthSpec> Service<B,
|
|||||||
let genesis_time = node_info.get_genesis_time();
|
let genesis_time = node_info.get_genesis_time();
|
||||||
let genesis_slot = Slot::from(node_info.get_genesis_slot());
|
let genesis_slot = Slot::from(node_info.get_genesis_slot());
|
||||||
|
|
||||||
info!(log,"Beacon node connected"; "Node Version" => node_info.version.clone(), "Chain ID" => node_info.chain_id, "Genesis time" => genesis_time);
|
info!(log,"Beacon node connected"; "Node Version" => node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time);
|
||||||
|
|
||||||
let proto_fork = node_info.get_fork();
|
let proto_fork = node_info.get_fork();
|
||||||
let mut previous_version: [u8; 4] = [0; 4];
|
let mut previous_version: [u8; 4] = [0; 4];
|
||||||
@ -303,12 +303,16 @@ impl<B: BeaconNodeDuties + 'static, S: Signer + 'static, E: EthSpec> Service<B,
|
|||||||
|
|
||||||
let current_epoch = current_slot.epoch(self.slots_per_epoch);
|
let current_epoch = current_slot.epoch(self.slots_per_epoch);
|
||||||
|
|
||||||
// this is a fatal error. If the slot clock repeats, there is something wrong with
|
// this is a non-fatal error. If the slot clock repeats, the node could
|
||||||
// the timer, terminate immediately.
|
// have been slow to process the previous slot and is now duplicating tasks.
|
||||||
assert!(
|
// We ignore duplicated but raise a critical error.
|
||||||
current_slot > self.current_slot,
|
if current_slot <= self.current_slot {
|
||||||
"The Timer should poll a new slot"
|
crit!(
|
||||||
|
self.log,
|
||||||
|
"The validator tried to duplicate a slot. Likely missed the previous slot"
|
||||||
);
|
);
|
||||||
|
return Err("Duplicate slot".into());
|
||||||
|
}
|
||||||
self.current_slot = current_slot;
|
self.current_slot = current_slot;
|
||||||
info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64());
|
info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64());
|
||||||
Ok(())
|
Ok(())
|
||||||
|
Loading…
Reference in New Issue
Block a user