diff --git a/.gitmodules b/.gitmodules index 1b0e150ce..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "tests/ef_tests/eth2.0-spec-tests"] - path = tests/ef_tests/eth2.0-spec-tests - url = https://github.com/ethereum/eth2.0-spec-tests diff --git a/Cargo.toml b/Cargo.toml index f087539e6..9b31060a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "eth2/utils/logging", "eth2/utils/eth2_hashing", "eth2/utils/lighthouse_metrics", + "eth2/utils/lighthouse_bootstrap", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", @@ -32,8 +33,9 @@ members = [ "beacon_node/rpc", "beacon_node/version", "beacon_node/beacon_chain", + "beacon_node/websocket_server", "tests/ef_tests", - "tests/cli_util", + "lcli", "protos", "validator_client", "account_manager", diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..d5517ed23 --- /dev/null +++ b/Makefile @@ -0,0 +1,31 @@ +TESTS_TAG := v0.8.3 +TESTS = general minimal mainnet + +TESTS_BASE_DIR := ./tests/ef_tests +REPO_NAME := eth2.0-spec-tests +OUTPUT_DIR := $(TESTS_BASE_DIR)/$(REPO_NAME) + +BASE_URL := https://github.com/ethereum/$(REPO_NAME)/releases/download/$(SPEC_VERSION) + +release: + cargo build --all --release + +clean_ef_tests: + rm -r $(OUTPUT_DIR) + +ef_tests: download_tests extract_tests + mkdir $(OUTPUT_DIR) + for test in $(TESTS); do \ + tar -C $(OUTPUT_DIR) -xvf $(TESTS_BASE_DIR)/$$test.tar ;\ + rm $(TESTS_BASE_DIR)/$$test.tar ;\ + done + +extract_tests: + for test in $(TESTS); do \ + gzip -df $(TESTS_BASE_DIR)/$$test.tar.gz ;\ + done + +download_tests: + for test in $(TESTS); do \ + wget -P $(TESTS_BASE_DIR) $(BASE_URL)/$$test.tar.gz; \ + done diff --git a/account_manager/src/main.rs b/account_manager/src/main.rs index b7448ddf2..ae3823049 100644 --- a/account_manager/src/main.rs +++ b/account_manager/src/main.rs @@ -125,9 +125,13 @@ fn main() { } } } - _ => panic!( - "The account manager must be run with a subcommand. See help for more information." - ), + _ => { + crit!( + log, + "The account manager must be run with a subcommand. See help for more information." + ); + return; + } } } diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 32b7e9211..0e4299018 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,11 +6,14 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } +beacon_chain = { path = "beacon_chain" } types = { path = "../eth2/types" } store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" +rand = "0.7" slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 14b072a23..02a45d137 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -4,19 +4,29 @@ version = "0.1.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2018" +[features] + +write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. + [dependencies] +eth2_config = { path = "../../eth2/utils/eth2_config" } +merkle_proof = { path = "../../eth2/utils/merkle_proof" } store = { path = "../store" } parking_lot = "0.7" lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } +lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } rayon = "1.0" serde = "1.0" serde_derive = "1.0" +serde_yaml = "0.8" +serde_json = "^1.0" slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } +eth2_hashing = { path = "../../eth2/utils/eth2_hashing" } eth2_ssz = "0.1" eth2_ssz_derive = "0.1" state_processing = { path = "../../eth2/state_processing" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a454bd946..731165f81 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,16 +1,18 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; +use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; +use crate::events::{EventHandler, EventKind}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; -use log::trace; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; -use parking_lot::{RwLock, RwLockReadGuard}; -use slog::{error, info, warn, Logger}; +use parking_lot::RwLock; +use slog::{error, info, trace, warn, Logger}; use slot_clock::SlotClock; +use ssz::Encode; use state_processing::per_block_processing::{ errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, @@ -21,7 +23,10 @@ use state_processing::per_block_processing::{ use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, }; +use std::fs; +use std::io::prelude::*; use std::sync::Arc; +use std::time::Duration; use store::iter::{BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; @@ -33,6 +38,12 @@ use types::*; // |-------must be this long------| pub const GRAFFITI: &str = "sigp/lighthouse-0.0.0-prerelease"; +/// If true, everytime a block is processed the pre-state, post-state and block are written to SSZ +/// files in the temp directory. +/// +/// Only useful for testing. +const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); + #[derive(Debug, PartialEq)] pub enum BlockProcessingOutcome { /// Block was valid and imported into the block graph. @@ -45,11 +56,14 @@ pub enum BlockProcessingOutcome { block_slot: Slot, }, /// The block state_root does not match the generated state. - StateRootMismatch, + StateRootMismatch { block: Hash256, local: Hash256 }, /// The block was a genesis block, these blocks cannot be re-imported. GenesisBlock, /// The slot is finalized, no need to import. - FinalizedSlot, + WouldRevertFinalizedSlot { + block_slot: Slot, + finalized_slot: Slot, + }, /// Block is already known, no need to re-import. BlockIsAlreadyKnown, /// The block could not be applied to the state, it is invalid. @@ -80,7 +94,9 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; + type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; + type EventHandler: EventHandler; } /// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block @@ -94,17 +110,17 @@ pub struct BeaconChain { /// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for /// inclusion in a block. pub op_pool: OperationPool, + /// Provides information from the Ethereum 1 (PoW) chain. + pub eth1_chain: Eth1Chain, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. canonical_head: RwLock>, - /// The same state from `self.canonical_head`, but updated at the start of each slot with a - /// skip slot if no block is received. This is effectively a cache that avoids repeating calls - /// to `per_slot_processing`. - state: RwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: ForkChoice, + /// A handler for events generated by the beacon chain. + pub event_handler: T::EventHandler, /// Logging to CLI, etc. log: Logger, } @@ -113,7 +129,8 @@ impl BeaconChain { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( store: Arc, - slot_clock: T::SlotClock, + eth1_backend: T::Eth1Chain, + event_handler: T::EventHandler, mut genesis_state: BeaconState, mut genesis_block: BeaconBlock, spec: ChainSpec, @@ -140,20 +157,28 @@ impl BeaconChain { genesis_state_root, )); - info!(log, "BeaconChain init"; - "genesis_validator_count" => genesis_state.validators.len(), - "genesis_state_root" => format!("{}", genesis_state_root), - "genesis_block_root" => format!("{}", genesis_block_root), + // Slot clock + let slot_clock = T::SlotClock::new( + spec.genesis_slot, + Duration::from_secs(genesis_state.genesis_time), + Duration::from_millis(spec.milliseconds_per_slot), + ); + + info!(log, "Beacon chain initialized from genesis"; + "validator_count" => genesis_state.validators.len(), + "state_root" => format!("{}", genesis_state_root), + "block_root" => format!("{}", genesis_block_root), ); Ok(Self { spec, slot_clock, op_pool: OperationPool::new(), - state: RwLock::new(genesis_state), + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), + event_handler, store, log, }) @@ -162,6 +187,8 @@ impl BeaconChain { /// Attempt to load an existing instance from the given `store`. pub fn from_store( store: Arc, + eth1_backend: T::Eth1Chain, + event_handler: T::EventHandler, spec: ChainSpec, log: Logger, ) -> Result>, Error> { @@ -172,24 +199,34 @@ impl BeaconChain { Ok(Some(p)) => p, }; + let state = &p.canonical_head.beacon_state; + let slot_clock = T::SlotClock::new( spec.genesis_slot, - p.state.genesis_time, - spec.seconds_per_slot, + Duration::from_secs(state.genesis_time), + Duration::from_millis(spec.milliseconds_per_slot), ); let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; - let op_pool = p.op_pool.into_operation_pool(&p.state, &spec); + let op_pool = p.op_pool.into_operation_pool(state, &spec); + + info!(log, "Beacon chain initialized from store"; + "head_root" => format!("{}", p.canonical_head.beacon_block_root), + "head_epoch" => format!("{}", p.canonical_head.beacon_block.slot.epoch(T::EthSpec::slots_per_epoch())), + "finalized_root" => format!("{}", last_finalized_root), + "finalized_epoch" => format!("{}", last_finalized_block.slot.epoch(T::EthSpec::slots_per_epoch())), + ); Ok(Some(BeaconChain { spec, slot_clock, fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), op_pool, + event_handler, + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head: RwLock::new(p.canonical_head), - state: RwLock::new(p.state), genesis_block_root: p.genesis_block_root, store, log, @@ -204,7 +241,6 @@ impl BeaconChain { canonical_head: self.canonical_head.read().clone(), op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool), genesis_block_root: self.genesis_block_root, - state: self.state.read().clone(), }; let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); @@ -215,6 +251,25 @@ impl BeaconChain { Ok(()) } + /// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is + /// unavailable. + /// + /// The slot might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative slot). + pub fn slot(&self) -> Result { + self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot) + } + + /// Returns the epoch _right now_ according to `self.slot_clock`. Returns `Err` if the epoch is + /// unavailable. + /// + /// The epoch might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative epoch). + pub fn epoch(&self) -> Result { + self.slot() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + } + /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. @@ -260,13 +315,11 @@ impl BeaconChain { /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. pub fn rev_iter_block_roots(&self) -> ReverseBlockRootIterator { - let state = &self.head().beacon_state; - let block_root = self.head().beacon_block_root; - let block_slot = state.slot; + let head = self.head(); - let iter = BlockRootsIterator::owned(self.store.clone(), state.clone()); + let iter = BlockRootsIterator::owned(self.store.clone(), head.beacon_state); - ReverseBlockRootIterator::new((block_root, block_slot), iter) + ReverseBlockRootIterator::new((head.beacon_block_root, head.beacon_block.slot), iter) } /// Iterates across all `(state_root, slot)` pairs from the head of the chain (inclusive) to @@ -279,13 +332,12 @@ impl BeaconChain { /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. pub fn rev_iter_state_roots(&self) -> ReverseStateRootIterator { - let state = &self.head().beacon_state; - let state_root = self.head().beacon_state_root; - let state_slot = state.slot; + let head = self.head(); + let slot = head.beacon_state.slot; - let iter = StateRootsIterator::owned(self.store.clone(), state.clone()); + let iter = StateRootsIterator::owned(self.store.clone(), head.beacon_state); - ReverseStateRootIterator::new((state_root, state_slot), iter) + ReverseStateRootIterator::new((head.beacon_state_root, slot), iter) } /// Returns the block at the given root, if any. @@ -300,20 +352,69 @@ impl BeaconChain { Ok(self.store.get(block_root)?) } - /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been - /// updated to match the current slot clock. - pub fn speculative_state(&self) -> Result>, Error> { - Ok(self.state.read()) - } - - /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the - /// fork-choice rule). + /// Returns a `Checkpoint` representing the head block and state. Contains the "best block"; + /// the head of the canonical `BeaconChain`. /// /// It is important to note that the `beacon_state` returned may not match the present slot. It /// is the state as it was when the head block was received, which could be some slots prior to /// now. - pub fn head<'a>(&'a self) -> RwLockReadGuard<'a, CheckPoint> { - self.canonical_head.read() + pub fn head(&self) -> CheckPoint { + self.canonical_head.read().clone() + } + + /// Returns the `BeaconState` at the given slot. + /// + /// Returns `None` when the state is not found in the database or there is an error skipping + /// to a future state. + pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { + let head_state = self.head().beacon_state; + + if slot == head_state.slot { + Ok(head_state) + } else if slot > head_state.slot { + let head_state_slot = head_state.slot; + let mut state = head_state; + while state.slot < slot { + match per_slot_processing(&mut state, &self.spec) { + Ok(()) => (), + Err(e) => { + warn!( + self.log, + "Unable to load state at slot"; + "error" => format!("{:?}", e), + "head_slot" => head_state_slot, + "requested_slot" => slot + ); + return Err(Error::NoStateForSlot(slot)); + } + }; + } + Ok(state) + } else { + let state_root = self + .rev_iter_state_roots() + .take_while(|(_root, current_slot)| *current_slot >= slot) + .find(|(_root, current_slot)| *current_slot == slot) + .map(|(root, _slot)| root) + .ok_or_else(|| Error::NoStateForSlot(slot))?; + + Ok(self + .store + .get(&state_root)? + .ok_or_else(|| Error::NoStateForSlot(slot))?) + } + } + + /// Returns the `BeaconState` the current slot (viz., `self.slot()`). + /// + /// - A reference to the head state (note: this keeps a read lock on the head, try to use + /// sparingly). + /// - The head state, but with skipped slots (for states later than the head). + /// + /// Returns `None` when there is an error skipping to a future state or the slot clock cannot + /// be read. + pub fn wall_clock_state(&self) -> Result, Error> { + self.state_at_slot(self.slot()?) } /// Returns the slot of the highest block in the canonical chain. @@ -321,41 +422,6 @@ impl BeaconChain { self.canonical_head.read().beacon_block.slot } - /// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`. - pub fn catchup_state(&self) -> Result<(), Error> { - let spec = &self.spec; - - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; - - if self.state.read().slot < present_slot { - let mut state = self.state.write(); - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - per_slot_processing(&mut *state, spec)?; - } - - state.build_all_caches(spec)?; - } - - Ok(()) - } - - /// Build all of the caches on the current state. - /// - /// Ideally this shouldn't be required, however we leave it here for testing. - pub fn ensure_state_caches_are_built(&self) -> Result<(), Error> { - self.state.write().build_all_caches(&self.spec)?; - - Ok(()) - } - /// Returns the validator index (if any) for the given public key. /// /// Information is retrieved from the present `beacon_state.validators`. @@ -368,26 +434,19 @@ impl BeaconChain { None } - /// Reads the slot clock, returns `None` if the slot is unavailable. + /// Returns the block canonical root of the current canonical chain at a given slot. /// - /// The slot might be unavailable due to an error with the system clock, or if the present time - /// is before genesis (i.e., a negative slot). - /// - /// This is distinct to `present_slot`, which simply reads the latest state. If a - /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, - /// `self.state` should undergo per slot processing. - pub fn read_slot_clock(&self) -> Option { - match self.slot_clock.present_slot() { - Ok(Some(some_slot)) => Some(some_slot), - Ok(None) => None, - _ => None, - } + /// Returns None if a block doesn't exist at the slot. + pub fn root_at_slot(&self, target_slot: Slot) -> Option { + self.rev_iter_block_roots() + .find(|(_root, slot)| *slot == target_slot) + .map(|(root, _slot)| root) } /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// genesis. pub fn slots_since_genesis(&self) -> Option { - let now = self.read_slot_clock()?; + let now = self.slot().ok()?; let genesis_slot = self.spec.genesis_slot; if now < genesis_slot { @@ -397,32 +456,33 @@ impl BeaconChain { } } - /// Returns slot of the present state. - /// - /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If - /// `self.state` has not been transitioned it is possible for the system clock to be on a - /// different slot to what is returned from this call. - pub fn present_slot(&self) -> Slot { - self.state.read().slot - } - /// Returns the block proposer for a given slot. /// /// Information is read from the present `beacon_state` shuffling, only information from the /// present epoch is available. pub fn block_proposer(&self, slot: Slot) -> Result { - // Ensures that the present state has been advanced to the present slot, skipping slots if - // blocks are not present. - self.catchup_state()?; + let epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; - // TODO: permit lookups of the proposer at any slot. - let index = self.state.read().get_beacon_proposer_index( - slot, - RelativeEpoch::Current, - &self.spec, - )?; + let mut state = if epoch(slot) == epoch(head_state.slot) { + self.head().beacon_state.clone() + } else { + self.state_at_slot(slot)? + }; - Ok(index) + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + if epoch(state.slot) != epoch(slot) { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in proposer lookup: state: {}, requested: {}", + epoch(state.slot), + epoch(slot) + ))); + } + + state + .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) + .map_err(Into::into) } /// Returns the attestation slot and shard for a given validator index. @@ -432,15 +492,29 @@ impl BeaconChain { pub fn validator_attestation_slot_and_shard( &self, validator_index: usize, - ) -> Result, BeaconStateError> { - trace!( - "BeaconChain::validator_attestation_slot_and_shard: validator_index: {}", - validator_index - ); - if let Some(attestation_duty) = self - .state - .read() - .get_attestation_duties(validator_index, RelativeEpoch::Current)? + epoch: Epoch, + ) -> Result, Error> { + let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; + + let mut state = if epoch == as_epoch(head_state.slot) { + self.head().beacon_state.clone() + } else { + self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? + }; + + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + if as_epoch(state.slot) != epoch { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", + as_epoch(state.slot), + epoch + ))); + } + + if let Some(attestation_duty) = + state.get_attestation_duties(validator_index, RelativeEpoch::Current)? { Ok(Some((attestation_duty.slot, attestation_duty.shard))) } else { @@ -448,15 +522,23 @@ impl BeaconChain { } } - /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. + /// Produce an `AttestationData` that is valid for the given `slot` `shard`. /// - /// Attests to the canonical chain. - pub fn produce_attestation_data(&self, shard: u64) -> Result { - let state = self.state.read(); - let head_block_root = self.head().beacon_block_root; - let head_block_slot = self.head().beacon_block.slot; + /// Always attests to the canonical chain. + pub fn produce_attestation_data( + &self, + shard: u64, + slot: Slot, + ) -> Result { + let state = self.state_at_slot(slot)?; + let head = self.head(); - self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) + self.produce_attestation_data_for_block( + shard, + head.beacon_block_root, + head.beacon_block.slot, + &state, + ) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -522,6 +604,14 @@ impl BeaconChain { metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_SUCCESSES); metrics::stop_timer(timer); + trace!( + self.log, + "Produced beacon attestation data"; + "beacon_block_root" => format!("{}", head_block_root), + "shard" => shard, + "slot" => state.slot + ); + Ok(AttestationData { beacon_block_root: head_block_root, source: state.current_justified_checkpoint.clone(), @@ -545,6 +635,59 @@ impl BeaconChain { pub fn process_attestation( &self, attestation: Attestation, + ) -> Result { + let outcome = self.process_attestation_internal(attestation.clone()); + + match &outcome { + Ok(outcome) => match outcome { + AttestationProcessingOutcome::Processed => { + trace!( + self.log, + "Beacon attestation imported"; + "shard" => attestation.data.crosslink.shard, + "target_epoch" => attestation.data.target.epoch, + ); + let _ = self + .event_handler + .register(EventKind::BeaconAttestationImported { + attestation: Box::new(attestation), + }); + } + other => { + warn!( + self.log, + "Beacon attestation rejected"; + "reason" => format!("{:?}", other), + ); + let _ = self + .event_handler + .register(EventKind::BeaconAttestationRejected { + reason: format!("Invalid attestation: {:?}", other), + attestation: Box::new(attestation), + }); + } + }, + Err(e) => { + error!( + self.log, + "Beacon attestation processing error"; + "error" => format!("{:?}", e), + ); + let _ = self + .event_handler + .register(EventKind::BeaconAttestationRejected { + reason: format!("Internal error: {:?}", e), + attestation: Box::new(attestation), + }); + } + } + + outcome + } + + pub fn process_attestation_internal( + &self, + attestation: Attestation, ) -> Result { metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); @@ -560,7 +703,6 @@ impl BeaconChain { // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. - // Take a read lock on the head beacon state. let state = &self.head().beacon_state; // If it turns out that the attestation was made using the head state, then there @@ -590,12 +732,6 @@ impl BeaconChain { ); } - // Ensure the read-lock from `self.head()` is dropped. - // - // This is likely unnecessary, however it remains as a reminder to ensure this lock - // isn't hogged. - std::mem::drop(state); - // Use the `data.beacon_block_root` to load the state from the latest non-skipped // slot preceding the attestation's creation. // @@ -634,7 +770,7 @@ impl BeaconChain { // has a higher slot than the attestation. // // Permitting this would allow for attesters to vote on _future_ slots. - if attestation_slot > state.slot { + if state.slot > attestation_slot { Ok(AttestationProcessingOutcome::AttestsToFutureState { state: state.slot, attestation: attestation_slot, @@ -738,8 +874,19 @@ impl BeaconChain { } else { // Provide the attestation to fork choice, updating the validator latest messages but // _without_ finding and updating the head. - self.fork_choice - .process_attestation(&state, &attestation, block)?; + if let Err(e) = self + .fork_choice + .process_attestation(&state, &attestation, block) + { + error!( + self.log, + "Add attestation to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), + "beacon_block_root" => format!("{}", attestation.data.beacon_block_root), + "error" => format!("{:?}", e) + ); + return Err(e.into()); + } // Provide the valid attestation to op pool, which may choose to retain the // attestation for inclusion in a future block. @@ -764,14 +911,34 @@ impl BeaconChain { /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> { - self.op_pool - .insert_voluntary_exit(exit, &*self.state.read(), &self.spec) + match self.wall_clock_state() { + Ok(state) => self.op_pool.insert_voluntary_exit(exit, &state, &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process voluntary exit"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { - self.op_pool - .insert_transfer(transfer, &*self.state.read(), &self.spec) + match self.wall_clock_state() { + Ok(state) => self.op_pool.insert_transfer(transfer, &state, &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process transfer"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. @@ -779,8 +946,21 @@ impl BeaconChain { &self, proposer_slashing: ProposerSlashing, ) -> Result<(), ProposerSlashingValidationError> { - self.op_pool - .insert_proposer_slashing(proposer_slashing, &*self.state.read(), &self.spec) + match self.wall_clock_state() { + Ok(state) => { + self.op_pool + .insert_proposer_slashing(proposer_slashing, &state, &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process proposer slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some attester slashing and queue it for inclusion in an appropriate block. @@ -788,8 +968,21 @@ impl BeaconChain { &self, attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { - self.op_pool - .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec) + match self.wall_clock_state() { + Ok(state) => { + self.op_pool + .insert_attester_slashing(attester_slashing, &state, &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process attester slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some block and attempt to add it to block DAG. @@ -798,25 +991,79 @@ impl BeaconChain { pub fn process_block( &self, block: BeaconBlock, + ) -> Result { + let outcome = self.process_block_internal(block.clone()); + + match &outcome { + Ok(outcome) => match outcome { + BlockProcessingOutcome::Processed { block_root } => { + trace!( + self.log, + "Beacon block imported"; + "block_root" => format!("{:?}", block_root), + "block_slot" => format!("{:?}", block_root), + ); + let _ = self.event_handler.register(EventKind::BeaconBlockImported { + block_root: *block_root, + block: Box::new(block), + }); + } + other => { + warn!( + self.log, + "Beacon block rejected"; + "reason" => format!("{:?}", other), + ); + let _ = self.event_handler.register(EventKind::BeaconBlockRejected { + reason: format!("Invalid block: {:?}", other), + block: Box::new(block), + }); + } + }, + Err(e) => { + error!( + self.log, + "Beacon block processing error"; + "error" => format!("{:?}", e), + ); + let _ = self.event_handler.register(EventKind::BeaconBlockRejected { + reason: format!("Internal error: {:?}", e), + block: Box::new(block), + }); + } + } + + outcome + } + + /// Accept some block and attempt to add it to block DAG. + /// + /// Will accept blocks from prior slots, however it will reject any block from a future slot. + fn process_block_internal( + &self, + block: BeaconBlock, ) -> Result { metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self - .state - .read() + .head() + .beacon_state .finalized_checkpoint .epoch .start_slot(T::EthSpec::slots_per_epoch()); - if block.slot <= finalized_slot { - return Ok(BlockProcessingOutcome::FinalizedSlot); - } - if block.slot == 0 { return Ok(BlockProcessingOutcome::GenesisBlock); } + if block.slot <= finalized_slot { + return Ok(BlockProcessingOutcome::WouldRevertFinalizedSlot { + block_slot: block.slot, + finalized_slot: finalized_slot, + }); + } + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); let block_root = block.canonical_root(); @@ -827,9 +1074,7 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } - let present_slot = self - .read_slot_clock() - .ok_or_else(|| Error::UnableToReadSlot)?; + let present_slot = self.slot()?; if block.slot > present_slot { return Ok(BlockProcessingOutcome::FutureSlot { @@ -858,7 +1103,7 @@ impl BeaconChain { }; // Load the parent blocks state from the database, returning an error if it is not found. - // It is an error because if know the parent block we should also know the parent state. + // It is an error because if we know the parent block we should also know the parent state. let parent_state_root = parent_block.state_root; let parent_state = self .store @@ -867,6 +1112,8 @@ impl BeaconChain { metrics::stop_timer(db_read_timer); + write_block(&block, block_root, &self.log); + let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); // Keep a list of any states that were "skipped" (block-less) in between the parent state @@ -891,6 +1138,12 @@ impl BeaconChain { metrics::stop_timer(committee_timer); + write_state( + &format!("state_pre_block_{}", block_root), + &state, + &self.log, + ); + let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); // Apply the received block to its parent state (which has been transitioned into this @@ -915,8 +1168,17 @@ impl BeaconChain { let state_root = state.canonical_root(); + write_state( + &format!("state_post_block_{}", block_root), + &state, + &self.log, + ); + if block.state_root != state_root { - return Ok(BlockProcessingOutcome::StateRootMismatch); + return Ok(BlockProcessingOutcome::StateRootMismatch { + block: block.state_root, + local: state_root, + }); } metrics::stop_timer(state_root_timer); @@ -952,10 +1214,10 @@ impl BeaconChain { if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { error!( self.log, - "fork choice failed to process_block"; - "error" => format!("{:?}", e), + "Add block to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), "block_root" => format!("{}", block_root), - "block_slot" => format!("{}", block.slot) + "error" => format!("{:?}", e), ) } @@ -988,20 +1250,20 @@ impl BeaconChain { Ok(BlockProcessingOutcome::Processed { block_root }) } - /// Produce a new block at the present slot. + /// Produce a new block at the given `slot`. /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. pub fn produce_block( &self, randao_reveal: Signature, + slot: Slot, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - let state = self.state.read().clone(); - let slot = self - .read_slot_clock() - .ok_or_else(|| BlockProductionError::UnableToReadSlot)?; + let state = self + .state_at_slot(slot - 1) + .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - self.produce_block_on_state(state, slot, randao_reveal) + self.produce_block_on_state(state.clone(), slot, randao_reveal) } /// Produce a block for some `slot` upon the given `state`. @@ -1050,16 +1312,12 @@ impl BeaconChain { body: BeaconBlockBody { randao_reveal, // TODO: replace with real data. - eth1_data: Eth1Data { - deposit_count: state.eth1_data.deposit_count, - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }, + eth1_data: self.eth1_chain.eth1_data_for_block_production(&state)?, graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), attestations: self.op_pool.get_attestations(&state, &self.spec).into(), - deposits: self.op_pool.get_deposits(&state).into(), + deposits: self.eth1_chain.deposits_for_block_inclusion(&state)?.into(), voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), transfers: self.op_pool.get_transfers(&state, &self.spec).into(), }, @@ -1080,6 +1338,14 @@ impl BeaconChain { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); metrics::stop_timer(timer); + trace!( + self.log, + "Produced beacon block"; + "parent" => format!("{}", block.parent_root), + "attestations" => block.body.attestations.len(), + "slot" => block.slot + ); + Ok((block, state)) } @@ -1111,21 +1377,28 @@ impl BeaconChain { let previous_slot = self.head().beacon_block.slot; let new_slot = beacon_block.slot; + let is_reorg = self.head().beacon_block_root != beacon_block.parent_root; + // If we switched to a new chain (instead of building atop the present chain). - if self.head().beacon_block_root != beacon_block.parent_root { + if is_reorg { metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); warn!( self.log, "Beacon chain re-org"; + "previous_head" => format!("{}", self.head().beacon_block_root), "previous_slot" => previous_slot, + "new_head_parent" => format!("{}", beacon_block.parent_root), + "new_head" => format!("{}", beacon_block_root), "new_slot" => new_slot ); } else { info!( self.log, - "new head block"; + "New head beacon block"; "justified_root" => format!("{}", beacon_state.current_justified_checkpoint.root), + "justified_epoch" => beacon_state.current_justified_checkpoint.epoch, "finalized_root" => format!("{}", beacon_state.finalized_checkpoint.root), + "finalized_epoch" => beacon_state.finalized_checkpoint.epoch, "root" => format!("{}", beacon_block_root), "slot" => new_slot, ); @@ -1142,12 +1415,34 @@ impl BeaconChain { new_epoch: new_finalized_epoch, }) } else { - self.update_canonical_head(CheckPoint { + let previous_head_beacon_block_root = self.canonical_head.read().beacon_block_root; + let current_head_beacon_block_root = beacon_block_root; + + let mut new_head = CheckPoint { beacon_block, beacon_block_root, beacon_state, beacon_state_root, - })?; + }; + + new_head.beacon_state.build_all_caches(&self.spec)?; + + let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); + + // Update the checkpoint that stores the head of the chain at the time it received the + // block. + *self.canonical_head.write() = new_head; + + metrics::stop_timer(timer); + + // Save `self` to `self.store`. + self.persist()?; + + let _ = self.event_handler.register(EventKind::BeaconHeadChanged { + reorg: is_reorg, + previous_head_beacon_block_root, + current_head_beacon_block_root, + }); if new_finalized_epoch != old_finalized_epoch { self.after_finalization(old_finalized_epoch, finalized_root)?; @@ -1169,41 +1464,6 @@ impl BeaconChain { result } - /// Update the canonical head to `new_head`. - fn update_canonical_head(&self, new_head: CheckPoint) -> Result<(), Error> { - let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); - - // Update the checkpoint that stores the head of the chain at the time it received the - // block. - *self.canonical_head.write() = new_head; - - // Update the always-at-the-present-slot state we keep around for performance gains. - *self.state.write() = { - let mut state = self.canonical_head.read().beacon_state.clone(); - - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - per_slot_processing(&mut state, &self.spec)?; - } - - state.build_all_caches(&self.spec)?; - - state - }; - - // Save `self` to `self.store`. - self.persist()?; - - metrics::stop_timer(timer); - - Ok(()) - } - /// Called after `self` has had a new block finalized. /// /// Performs pruning and finality-based optimizations. @@ -1235,6 +1495,11 @@ impl BeaconChain { self.op_pool.prune_all(&finalized_state, &self.spec); + let _ = self.event_handler.register(EventKind::BeaconFinalization { + epoch: new_finalized_epoch, + root: finalized_block_root, + }); + Ok(()) } } @@ -1295,6 +1560,49 @@ impl BeaconChain { } } +fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { + if WRITE_BLOCK_PROCESSING_SSZ { + let root = Hash256::from_slice(&state.tree_hash_root()); + let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); + + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&state.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log state"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), + } + } +} + +fn write_block(block: &BeaconBlock, root: Hash256, log: &Logger) { + if WRITE_BLOCK_PROCESSING_SSZ { + let filename = format!("block_slot_{}_root{}.ssz", block.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); + + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&block.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log block"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), + } + } +} + impl From for Error { fn from(e: DBError) -> Error { Error::DBError(e) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs new file mode 100644 index 000000000..357644a2d --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -0,0 +1,334 @@ +use crate::{BeaconChain, BeaconChainTypes}; +use eth2_hashing::hash; +use lighthouse_bootstrap::Bootstrapper; +use merkle_proof::MerkleTree; +use rayon::prelude::*; +use slog::Logger; +use ssz::{Decode, Encode}; +use state_processing::initialize_beacon_state_from_eth1; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::SystemTime; +use tree_hash::{SignedRoot, TreeHash}; +use types::{ + BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, + Keypair, PublicKey, Signature, +}; + +enum BuildStrategy { + FromGenesis { + genesis_state: Box>, + genesis_block: Box>, + }, + LoadFromStore, +} + +pub struct BeaconChainBuilder { + build_strategy: BuildStrategy, + spec: ChainSpec, + log: Logger, +} + +impl BeaconChainBuilder { + pub fn recent_genesis( + keypairs: &[Keypair], + minutes: u64, + spec: ChainSpec, + log: Logger, + ) -> Result { + Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) + } + + pub fn quick_start( + genesis_time: u64, + keypairs: &[Keypair], + spec: ChainSpec, + log: Logger, + ) -> Result { + let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_yaml::from_reader(file) + .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn ssz_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let mut file = File::open(file.clone()) + .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; + + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; + + let genesis_state = BeaconState::from_ssz_bytes(&bytes) + .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_json::from_reader(file) + .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { + let bootstrapper = Bootstrapper::connect(server.to_string(), &log) + .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; + + let (genesis_state, genesis_block) = bootstrapper + .genesis() + .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; + + Ok(Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block), + genesis_state: Box::new(genesis_state), + }, + spec, + log, + }) + } + + fn from_genesis_state( + genesis_state: BeaconState, + spec: ChainSpec, + log: Logger, + ) -> Self { + Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block(&genesis_state, &spec)), + genesis_state: Box::new(genesis_state), + }, + spec, + log, + } + } + + pub fn from_store(spec: ChainSpec, log: Logger) -> Self { + Self { + build_strategy: BuildStrategy::LoadFromStore, + spec, + log, + } + } + + pub fn build( + self, + store: Arc, + eth1_backend: T::Eth1Chain, + event_handler: T::EventHandler, + ) -> Result, String> { + Ok(match self.build_strategy { + BuildStrategy::LoadFromStore => { + BeaconChain::from_store(store, eth1_backend, event_handler, self.spec, self.log) + .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? + .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))? + } + BuildStrategy::FromGenesis { + genesis_block, + genesis_state, + } => BeaconChain::from_genesis( + store, + eth1_backend, + event_handler, + genesis_state.as_ref().clone(), + genesis_block.as_ref().clone(), + self.spec, + self.log, + ) + .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, + }) + } +} + +fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) -> BeaconBlock { + let mut genesis_block = BeaconBlock::empty(&spec); + + genesis_block.state_root = genesis_state.canonical_root(); + + genesis_block +} + +/// Builds a genesis state as defined by the Eth2 interop procedure (see below). +/// +/// Reference: +/// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start +fn interop_genesis_state( + keypairs: &[Keypair], + genesis_time: u64, + spec: &ChainSpec, +) -> Result, String> { + let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); + let eth1_timestamp = 2_u64.pow(40); + let amount = spec.max_effective_balance; + + let withdrawal_credentials = |pubkey: &PublicKey| { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) + }; + + let datas = keypairs + .into_par_iter() + .map(|keypair| { + let mut data = DepositData { + withdrawal_credentials: withdrawal_credentials(&keypair.pk), + pubkey: keypair.pk.clone().into(), + amount, + signature: Signature::empty_signature().into(), + }; + + let domain = spec.get_domain( + spec.genesis_slot.epoch(T::slots_per_epoch()), + Domain::Deposit, + &Fork::default(), + ); + data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); + + data + }) + .collect::>(); + + let deposit_root_leaves = datas + .par_iter() + .map(|data| Hash256::from_slice(&data.tree_hash_root())) + .collect::>(); + + let mut proofs = vec![]; + for i in 1..=deposit_root_leaves.len() { + // Note: this implementation is not so efficient. + // + // If `MerkleTree` had a push method, we could just build one tree and sample it instead of + // rebuilding the tree for each deposit. + let tree = MerkleTree::create( + &deposit_root_leaves[0..i], + spec.deposit_contract_tree_depth as usize, + ); + + let (_, mut proof) = tree.generate_proof(i - 1, spec.deposit_contract_tree_depth as usize); + proof.push(Hash256::from_slice(&int_to_bytes32(i))); + + assert_eq!( + proof.len(), + spec.deposit_contract_tree_depth as usize + 1, + "Deposit proof should be correct len" + ); + + proofs.push(proof); + } + + let deposits = datas + .into_par_iter() + .zip(proofs.into_par_iter()) + .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| Deposit { proof, data }) + .collect::>(); + + let mut state = + initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + state.genesis_time = genesis_time; + + // Invalid all the caches after all the manual state surgery. + state.drop_all_caches(); + + Ok(state) +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: usize) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} + +/// Returns the system time, mod 30 minutes. +/// +/// Used for easily creating testnets. +fn recent_genesis_time(minutes: u64) -> u64 { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); + now - secs_after_last_period +} + +#[cfg(test)] +mod test { + use super::*; + use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; + + type TestEthSpec = MinimalEthSpec; + + #[test] + fn interop_state() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state::(&keypairs, genesis_time, spec) + .expect("should build state"); + + assert_eq!( + state.eth1_data.block_hash, + Hash256::from_slice(&[0x42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time, genesis_time, + "genesis time should be as specified" + ); + + for b in &state.balances { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for v in &state.validators { + let creds = v.withdrawal_credentials.as_bytes(); + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ) + } + + assert_eq!( + state.balances.len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators.len(), + validator_count, + "validator count should be correct" + ); + } +} diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index cd3a07bcd..030689928 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,3 +1,4 @@ +use crate::eth1_chain::Error as Eth1ChainError; use crate::fork_choice::Error as ForkChoiceError; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::BlockProcessingError; @@ -23,6 +24,8 @@ pub enum BeaconChainError { previous_epoch: Epoch, new_epoch: Epoch, }, + SlotClockDidNotStart, + NoStateForSlot(Slot), UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), @@ -31,24 +34,30 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + UnableToAdvanceState(String), NoStateForAttestation { beacon_block_root: Hash256, }, AttestationValidationError(AttestationValidationError), + /// Returned when an internal check fails, indicating corrupt data. + InvariantViolated(String), } easy_from_to!(SlotProcessingError, BeaconChainError); +easy_from_to!(AttestationValidationError, BeaconChainError); #[derive(Debug, PartialEq)] pub enum BlockProductionError { UnableToGetBlockRootFromState, UnableToReadSlot, + UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), + Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), } easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); -easy_from_to!(AttestationValidationError, BeaconChainError); +easy_from_to!(Eth1ChainError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs new file mode 100644 index 000000000..e4ccee3ba --- /dev/null +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -0,0 +1,110 @@ +use crate::BeaconChainTypes; +use eth2_hashing::hash; +use std::marker::PhantomData; +use types::{BeaconState, Deposit, Eth1Data, EthSpec, Hash256}; + +type Result = std::result::Result; + +/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. +pub struct Eth1Chain { + backend: T::Eth1Chain, +} + +impl Eth1Chain { + pub fn new(backend: T::Eth1Chain) -> Self { + Self { backend } + } + + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. + pub fn eth1_data_for_block_production( + &self, + state: &BeaconState, + ) -> Result { + self.backend.eth1_data(state) + } + + /// Returns a list of `Deposits` that may be included in a block. + /// + /// Including all of the returned `Deposits` in a block should _not_ cause it to become + /// invalid. + pub fn deposits_for_block_inclusion( + &self, + state: &BeaconState, + ) -> Result> { + let deposits = self.backend.queued_deposits(state)?; + + // TODO: truncate deposits if required. + + Ok(deposits) + } +} + +#[derive(Debug, PartialEq)] +pub enum Error { + /// Unable to return an Eth1Data for the given epoch. + EpochUnavailable, + /// An error from the backend service (e.g., the web3 data fetcher). + BackendError(String), +} + +pub trait Eth1ChainBackend: Sized + Send + Sync { + fn new(server: String) -> Result; + + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. + fn eth1_data(&self, beacon_state: &BeaconState) -> Result; + + /// Returns all `Deposits` between `state.eth1_deposit_index` and + /// `state.eth1_data.deposit_count`. + /// + /// # Note: + /// + /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may + /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; +} + +pub struct InteropEth1ChainBackend { + _phantom: PhantomData, +} + +impl Eth1ChainBackend for InteropEth1ChainBackend { + fn new(_server: String) -> Result { + Ok(Self::default()) + } + + fn eth1_data(&self, state: &BeaconState) -> Result { + let current_epoch = state.current_epoch(); + let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; + let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; + + let deposit_root = hash(&int_to_bytes32(current_voting_period)); + let block_hash = hash(&deposit_root); + + Ok(Eth1Data { + deposit_root: Hash256::from_slice(&deposit_root), + deposit_count: state.eth1_deposit_index, + block_hash: Hash256::from_slice(&block_hash), + }) + } + + fn queued_deposits(&self, _: &BeaconState) -> Result> { + Ok(vec![]) + } +} + +impl Default for InteropEth1ChainBackend { + fn default() -> Self { + Self { + _phantom: PhantomData, + } + } +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: u64) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs new file mode 100644 index 000000000..c93a13c8a --- /dev/null +++ b/beacon_node/beacon_chain/src/events.rs @@ -0,0 +1,55 @@ +use serde_derive::{Deserialize, Serialize}; +use std::marker::PhantomData; +use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256}; + +pub trait EventHandler: Sized + Send + Sync { + fn register(&self, kind: EventKind) -> Result<(), String>; +} + +pub struct NullEventHandler(PhantomData); + +impl EventHandler for NullEventHandler { + fn register(&self, _kind: EventKind) -> Result<(), String> { + Ok(()) + } +} + +impl Default for NullEventHandler { + fn default() -> Self { + NullEventHandler(PhantomData) + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde( + bound = "T: EthSpec", + rename_all = "snake_case", + tag = "event", + content = "data" +)] +pub enum EventKind { + BeaconHeadChanged { + reorg: bool, + current_head_beacon_block_root: Hash256, + previous_head_beacon_block_root: Hash256, + }, + BeaconFinalization { + epoch: Epoch, + root: Hash256, + }, + BeaconBlockImported { + block_root: Hash256, + block: Box>, + }, + BeaconBlockRejected { + reason: String, + block: Box>, + }, + BeaconAttestationImported { + attestation: Box>, + }, + BeaconAttestationRejected { + reason: String, + attestation: Box>, + }, +} diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 77fdaacdc..26084e04a 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -199,6 +199,14 @@ impl ForkChoice { self.backend.latest_message(validator_index) } + /// Runs an integrity verification function on the underlying fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + pub fn verify_integrity(&self) -> core::result::Result<(), String> { + self.backend.verify_integrity() + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index cc7725dd8..7f7e4ec2b 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -3,8 +3,11 @@ extern crate lazy_static; mod beacon_chain; +mod beacon_chain_builder; mod checkpoint; mod errors; +mod eth1_chain; +pub mod events; mod fork_choice; mod iter; mod metrics; @@ -16,6 +19,8 @@ pub use self::beacon_chain::{ }; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; +pub use beacon_chain_builder::BeaconChainBuilder; +pub use eth1_chain::{Eth1ChainBackend, InteropEth1ChainBackend}; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 8b9f78dc5..a85f78ac8 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -3,7 +3,7 @@ use operation_pool::PersistedOperationPool; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error as StoreError, StoreItem}; -use types::{BeaconState, Hash256}; +use types::Hash256; /// 32-byte key for accessing the `PersistedBeaconChain`. pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA"; @@ -13,7 +13,6 @@ pub struct PersistedBeaconChain { pub canonical_head: CheckPoint, pub op_pool: PersistedOperationPool, pub genesis_block_root: Hash256, - pub state: BeaconState, } impl StoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index a10a892a7..97b802ddf 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,23 +1,28 @@ -use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use crate::{ + events::NullEventHandler, AttestationProcessingOutcome, BeaconChain, BeaconChainBuilder, + BeaconChainTypes, BlockProcessingOutcome, InteropEth1ChainBackend, +}; use lmd_ghost::LmdGhost; use rayon::prelude::*; -use sloggers::{null::NullLoggerBuilder, Build}; -use slot_clock::SlotClock; +use sloggers::{terminal::TerminalLoggerBuilder, types::Severity, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use store::MemoryStore; -use store::Store; use tree_hash::{SignedRoot, TreeHash}; use types::{ - test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, - AttestationDataAndCustodyBit, BeaconBlock, BeaconState, BitList, ChainSpec, Domain, EthSpec, - Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, + AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, + BitList, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, + Slot, }; +pub use types::test_utils::generate_deterministic_keypairs; + pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +pub const HARNESS_GENESIS_TIME: u64 = 1567552690; // 4th September 2019 + /// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { @@ -61,7 +66,9 @@ where type Store = MemoryStore; type SlotClock = TestingSlotClock; type LmdGhost = L; + type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; + type EventHandler = NullEventHandler; } /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and @@ -84,53 +91,25 @@ where E: EthSpec, { /// Instantiate a new harness with `validator_count` initial validators. - pub fn new(validator_count: usize) -> Self { - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( - validator_count, - &E::default_spec(), - ); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with an initial validator for each key supplied. - pub fn from_keypairs(keypairs: Vec) -> Self { - let state_builder = TestingBeaconStateBuilder::from_keypairs(keypairs, &E::default_spec()); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with the given genesis state and a keypair for each of the - /// initial validators in the given state. - pub fn from_state_and_keypairs(genesis_state: BeaconState, keypairs: Vec) -> Self { + pub fn new(keypairs: Vec) -> Self { let spec = E::default_spec(); + let log = TerminalLoggerBuilder::new() + .level(Severity::Warning) + .build() + .expect("logger should build"); + let store = Arc::new(MemoryStore::open()); - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - let builder = NullLoggerBuilder; - let log = builder.build().expect("logger should build"); - - // Slot clock - let slot_clock = TestingSlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - let chain = BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - log, - ) - .expect("Terminate if beacon chain generation fails"); + let chain = + BeaconChainBuilder::quick_start(HARNESS_GENESIS_TIME, &keypairs, spec.clone(), log) + .unwrap_or_else(|e| panic!("Failed to create beacon chain builder: {}", e)) + .build( + store.clone(), + InteropEth1ChainBackend::default(), + NullEventHandler::default(), + ) + .unwrap_or_else(|e| panic!("Failed to build beacon chain: {}", e)); Self { chain, @@ -144,7 +123,6 @@ where /// Does not produce blocks or attestations. pub fn advance_slot(&self) { self.chain.slot_clock.advance_slot(); - self.chain.catchup_state().expect("should catchup state"); } /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the @@ -166,26 +144,27 @@ where // Determine the slot for the first block (or skipped block). let state_slot = match block_strategy { BlockStrategy::OnCanonicalHead => { - self.chain.read_slot_clock().expect("should know slot") - 1 + self.chain.slot().expect("should have a slot") - 1 } BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; - self.get_state_at_slot(state_slot) + self.chain + .state_at_slot(state_slot) + .expect("should find state for slot") + .clone() }; // Determine the first slot where a block should be built. let mut slot = match block_strategy { - BlockStrategy::OnCanonicalHead => { - self.chain.read_slot_clock().expect("should know slot") - } + BlockStrategy::OnCanonicalHead => self.chain.slot().expect("should have a slot"), BlockStrategy::ForkCanonicalChainAt { first_slot, .. } => first_slot, }; let mut head_block_root = None; for _ in 0..num_blocks { - while self.chain.read_slot_clock().expect("should have a slot") < slot { + while self.chain.slot().expect("should have a slot") < slot { self.advance_slot(); } @@ -211,21 +190,6 @@ where head_block_root.expect("did not produce any blocks") } - fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { - let state_root = self - .chain - .rev_iter_state_roots() - .find(|(_hash, slot)| *slot == state_slot) - .map(|(hash, _slot)| hash) - .expect("could not find state root"); - - self.chain - .store - .get(&state_root) - .expect("should read db") - .expect("should find state root") - } - /// Returns a newly created block, signed by the proposer for the given slot. fn build_block( &self, @@ -299,9 +263,14 @@ where ) .into_iter() .for_each(|attestation| { - self.chain + match self + .chain .process_attestation(attestation) - .expect("should process attestation"); + .expect("should not error during attestation processing") + { + AttestationProcessingOutcome::Processed => (), + other => panic!("did not successfully process attestation: {:?}", other), + } }); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 22b667f15..82fc88216 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -3,11 +3,14 @@ #[macro_use] extern crate lazy_static; -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, - BEACON_CHAIN_DB_KEY, -}; use beacon_chain::AttestationProcessingOutcome; +use beacon_chain::{ + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, + BEACON_CHAIN_DB_KEY, + }, + BlockProcessingOutcome, +}; use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; @@ -25,7 +28,7 @@ lazy_static! { type TestForkChoice = ThreadSafeReducedTree; fn get_harness(validator_count: usize) -> BeaconChainHarness { - let harness = BeaconChainHarness::from_keypairs(KEYPAIRS[0..validator_count].to_vec()); + let harness = BeaconChainHarness::new(KEYPAIRS[0..validator_count].to_vec()); harness.advance_slot(); @@ -322,7 +325,9 @@ fn roundtrip_operation_pool() { let p: PersistedBeaconChain> = harness.chain.store.get(&key).unwrap().unwrap(); - let restored_op_pool = p.op_pool.into_operation_pool(&p.state, &harness.spec); + let restored_op_pool = p + .op_pool + .into_operation_pool(&p.canonical_head.beacon_state, &harness.spec); assert_eq!(harness.chain.op_pool, restored_op_pool); } @@ -459,3 +464,48 @@ fn free_attestations_added_to_fork_choice_all_updated() { } } } + +fn run_skip_slot_test(skip_slots: u64) { + let num_validators = 8; + let harness_a = get_harness(num_validators); + let harness_b = get_harness(num_validators); + + for _ in 0..skip_slots { + harness_a.advance_slot(); + harness_b.advance_slot(); + } + + harness_a.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ); + + assert_eq!( + harness_a.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); + assert_eq!(harness_b.chain.head().beacon_block.slot, Slot::new(0)); + + assert_eq!( + harness_b + .chain + .process_block(harness_a.chain.head().beacon_block.clone()), + Ok(BlockProcessingOutcome::Processed { + block_root: harness_a.chain.head().beacon_block_root + }) + ); + + assert_eq!( + harness_b.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); +} + +#[test] +fn produces_and_processes_with_genesis_skip_slots() { + for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { + run_skip_slot_test(i) + } +} diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9b5a9cf42..e55721793 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,10 +6,12 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } +store = { path = "../store" } network = { path = "../network" } eth2-libp2p = { path = "../eth2-libp2p" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } +websocket_server = { path = "../websocket_server" } prometheus = "^0.6" types = { path = "../../eth2/types" } tree_hash = "0.1" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs deleted file mode 100644 index 5168c067a..000000000 --- a/beacon_node/client/src/beacon_chain_types.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::bootstrapper::Bootstrapper; -use crate::error::Result; -use crate::{config::GenesisState, ClientConfig}; -use beacon_chain::{ - lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, - slot_clock::SystemTimeSlotClock, - store::Store, - BeaconChain, BeaconChainTypes, -}; -use slog::{crit, info, Logger}; -use slot_clock::SlotClock; -use std::fs::File; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::SystemTime; -use tree_hash::TreeHash; -use types::{ - test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, -}; - -/// Provides a new, initialized `BeaconChain` -pub trait InitialiseBeaconChain { - fn initialise_beacon_chain( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, - ) -> Result> { - maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, config, spec, log) - } -} - -#[derive(Clone)] -pub struct ClientType { - _phantom_t: PhantomData, - _phantom_u: PhantomData, -} - -impl BeaconChainTypes for ClientType -where - S: Store + 'static, - E: EthSpec, -{ - type Store = S; - type SlotClock = SystemTimeSlotClock; - type LmdGhost = ThreadSafeReducedTree; - type EthSpec = E; -} -impl InitialiseBeaconChain for ClientType {} - -/// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis. -fn maybe_load_from_store_for_testnet( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, -) -> Result> -where - T: BeaconChainTypes, - T::LmdGhost: LmdGhost, -{ - let genesis_state = match &config.genesis_state { - GenesisState::Mainnet => { - crit!(log, "This release does not support mainnet genesis state."); - return Err("Mainnet is unsupported".into()); - } - GenesisState::RecentGenesis { validator_count } => { - generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) - } - GenesisState::Generated { - validator_count, - genesis_time, - } => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec), - GenesisState::Yaml { file } => { - let file = File::open(file).map_err(|e| { - format!("Unable to open YAML genesis state file {:?}: {:?}", file, e) - })?; - - serde_yaml::from_reader(file) - .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? - } - GenesisState::HttpBootstrap { server } => { - let bootstrapper = Bootstrapper::from_server_string(server.to_string()) - .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; - - let (state, _block) = bootstrapper - .genesis() - .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; - - state - } - }; - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - let genesis_block_root = genesis_block.canonical_root(); - - // Slot clock - let slot_clock = T::SlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - // Try load an existing `BeaconChain` from the store. If unable, create a new one. - if let Ok(Some(beacon_chain)) = - BeaconChain::from_store(store.clone(), spec.clone(), log.clone()) - { - // Here we check to ensure that the `BeaconChain` loaded from store has the expected - // genesis block. - // - // Without this check, it's possible that there will be an existing DB with a `BeaconChain` - // that has different parameters than provided to this executable. - if beacon_chain.genesis_block_root == genesis_block_root { - info!( - log, - "Loaded BeaconChain from store"; - "slot" => beacon_chain.head().beacon_state.slot, - "best_slot" => beacon_chain.best_slot(), - ); - - Ok(beacon_chain) - } else { - crit!( - log, - "The BeaconChain loaded from disk has an incorrect genesis root. \ - This may be caused by an old database in located in datadir." - ); - Err("Incorrect genesis root".into()) - } - } else { - BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec, - log.clone(), - ) - .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e).into()) - } -} - -fn generate_testnet_genesis_state( - validator_count: usize, - genesis_time: u64, - spec: &ChainSpec, -) -> BeaconState { - let (mut genesis_state, _keypairs) = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec) - .build(); - - genesis_state.genesis_time = genesis_time; - - genesis_state -} - -/// Returns the system time, mod 30 minutes. -/// -/// Used for easily creating testnets. -fn recent_genesis_time() -> u64 { - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. - now - secs_after_last_period -} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ea8186dbc..997808cb4 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,15 +1,11 @@ -use crate::{Bootstrapper, Eth2Config}; use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, warn, Drain}; +use slog::{info, o, Drain}; use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; -/// The number initial validators when starting the `Minimal`. -const TESTNET_VALIDATOR_COUNT: usize = 16; - /// The number initial validators when starting the `Minimal`. const TESTNET_SPEC_CONSTANTS: &str = "minimal"; @@ -21,33 +17,74 @@ pub struct Config { db_name: String, pub log_file: PathBuf, pub spec_constants: String, - pub genesis_state: GenesisState, + /// Defines how we should initialize a BeaconChain instances. + /// + /// This field is not serialized, there for it will not be written to (or loaded from) config + /// files. It can only be configured via the CLI. + #[serde(skip)] + pub beacon_chain_start_method: BeaconChainStartMethod, + pub eth1_backend_method: Eth1BackendMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, + pub websocket_server: websocket_server::Config, } -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum GenesisState { - /// Use the mainnet genesis state. - /// - /// Mainnet genesis state is not presently known, so this is a place-holder. +/// Defines how the client should initialize a BeaconChain. +/// +/// In general, there are two methods: +/// - resuming a new chain, or +/// - initializing a new one. +#[derive(Debug, Clone)] +pub enum BeaconChainStartMethod { + /// Resume from an existing BeaconChain, loaded from the existing local database. + Resume, + /// Resume from an existing BeaconChain, loaded from the existing local database. Mainnet, - /// Generate a state with `validator_count` validators, all with well-known secret keys. + /// Create a new beacon chain that can connect to mainnet. /// /// Set the genesis time to be the start of the previous 30-minute window. - RecentGenesis { validator_count: usize }, - /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known + RecentGenesis { + validator_count: usize, + minutes: u64, + }, + /// Create a new beacon chain with `genesis_time` and `validator_count` validators, all with well-known /// secret keys. Generated { validator_count: usize, genesis_time: u64, }, - /// Load a YAML-encoded genesis state from a file. + /// Create a new beacon chain by loading a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, - /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. - HttpBootstrap { server: String }, + /// Create a new beacon chain by loading a SSZ-encoded genesis state from a file. + Ssz { file: PathBuf }, + /// Create a new beacon chain by loading a JSON-encoded genesis state from a file. + Json { file: PathBuf }, + /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and + /// finalized states and blocks. + HttpBootstrap { server: String, port: Option }, +} + +impl Default for BeaconChainStartMethod { + fn default() -> Self { + BeaconChainStartMethod::Resume + } +} + +/// Defines which Eth1 backend the client should use. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum Eth1BackendMethod { + /// Use the mocked eth1 backend used in interop testing + Interop, + /// Use a web3 connection to a running Eth1 node. + Web3 { server: String }, +} + +impl Default for Eth1BackendMethod { + fn default() -> Self { + Eth1BackendMethod::Interop + } } impl Default for Config { @@ -58,12 +95,12 @@ impl Default for Config { db_type: "disk".to_string(), db_name: "chain_db".to_string(), network: NetworkConfig::new(), - rpc: rpc::RPCConfig::default(), - rest_api: rest_api::ApiConfig::default(), + rpc: <_>::default(), + rest_api: <_>::default(), + websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), - genesis_state: GenesisState::RecentGenesis { - validator_count: TESTNET_VALIDATOR_COUNT, - }, + beacon_chain_start_method: <_>::default(), + eth1_backend_method: <_>::default(), } } } @@ -76,6 +113,8 @@ impl Config { } /// Returns the core path for the client. + /// + /// Creates the directory if it does not exist. pub fn data_dir(&self) -> Option { let path = dirs::home_dir()?.join(&self.data_dir); fs::create_dir_all(&path).ok()?; @@ -127,15 +166,6 @@ impl Config { self.data_dir = PathBuf::from(dir); }; - if let Some(default_spec) = args.value_of("default-spec") { - match default_spec { - "mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants, - "minimal" => self.spec_constants = Eth2Config::minimal().spec_constants, - "interop" => self.spec_constants = Eth2Config::interop().spec_constants, - _ => {} // not supported - } - } - if let Some(dir) = args.value_of("db") { self.db_type = dir.to_string(); }; @@ -143,46 +173,13 @@ impl Config { self.network.apply_cli_args(args)?; self.rpc.apply_cli_args(args)?; self.rest_api.apply_cli_args(args)?; + self.websocket_server.apply_cli_args(args)?; if let Some(log_file) = args.value_of("logfile") { self.log_file = PathBuf::from(log_file); self.update_logger(log)?; }; - // If the `--bootstrap` flag is provided, overwrite the default configuration. - if let Some(server) = args.value_of("bootstrap") { - do_bootstrapping(self, server.to_string(), &log)?; - } - Ok(()) } } - -/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and -/// adding them to the `config`. -fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { - // Set the genesis state source. - config.genesis_state = GenesisState::HttpBootstrap { - server: server.to_string(), - }; - - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - - config.network.boot_nodes.push(bootstrapper.enr()?); - - if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() { - info!( - log, - "Estimated bootstrapper libp2p address"; - "multiaddr" => format!("{:?}", server_multiaddr) - ); - config.network.libp2p_nodes.push(server_multiaddr); - } else { - warn!( - log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); - } - - Ok(()) -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7dd7118a7..fc5e9f860 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,97 +1,216 @@ extern crate slog; -mod beacon_chain_types; -mod bootstrapper; mod config; pub mod error; pub mod notifier; -use beacon_chain::BeaconChain; +use beacon_chain::{ + lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, + test_utils::generate_deterministic_keypairs, BeaconChain, BeaconChainBuilder, +}; use exit_future::Signal; use futures::{future::Future, Stream}; use network::Service as NetworkService; -use slog::{error, info, o}; +use slog::{crit, error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; +use types::EthSpec; +use websocket_server::WebSocketSender; -pub use beacon_chain::BeaconChainTypes; -pub use beacon_chain_types::ClientType; -pub use beacon_chain_types::InitialiseBeaconChain; -pub use bootstrapper::Bootstrapper; -pub use config::{Config as ClientConfig, GenesisState}; +pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend, InteropEth1ChainBackend}; +pub use config::{BeaconChainStartMethod, Config as ClientConfig, Eth1BackendMethod}; pub use eth2_config::Eth2Config; +#[derive(Clone)] +pub struct RuntimeBeaconChainTypes { + _phantom_s: PhantomData, + _phantom_e: PhantomData, +} + +impl BeaconChainTypes for RuntimeBeaconChainTypes +where + S: Store + 'static, + E: EthSpec, +{ + type Store = S; + type SlotClock = SystemTimeSlotClock; + type LmdGhost = ThreadSafeReducedTree; + type Eth1Chain = InteropEth1ChainBackend; + type EthSpec = E; + type EventHandler = WebSocketSender; +} + /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. -pub struct Client { +pub struct Client +where + S: Store + Clone + 'static, + E: EthSpec, +{ /// Configuration for the lighthouse client. _client_config: ClientConfig, /// The beacon chain for the running client. - beacon_chain: Arc>, + beacon_chain: Arc>>, /// Reference to the network service. - pub network: Arc>, + pub network: Arc>>, /// Signal to terminate the RPC server. pub rpc_exit_signal: Option, /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API pub api_exit_signal: Option, + /// Signal to terminate the websocket server + pub websocket_exit_signal: Option, /// The clients logger. log: slog::Logger, - /// Marker to pin the beacon chain generics. - phantom: PhantomData, } -impl Client +impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, + S: Store + Clone + 'static, + E: EthSpec, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( client_config: ClientConfig, eth2_config: Eth2Config, - store: T::Store, + store: S, log: slog::Logger, executor: &TaskExecutor, ) -> error::Result { let store = Arc::new(store); - let seconds_per_slot = eth2_config.spec.seconds_per_slot; + let milliseconds_per_slot = eth2_config.spec.milliseconds_per_slot; - // Load a `BeaconChain` from the store, or create a new one if it does not exist. - let beacon_chain = Arc::new(T::initialise_beacon_chain( - store, - &client_config, - eth2_config.spec.clone(), - log.clone(), - )?); + let spec = ð2_config.spec.clone(); - if beacon_chain.read_slot_clock().is_none() { - panic!("Cannot start client before genesis!") - } + let beacon_chain_builder = match &client_config.beacon_chain_start_method { + BeaconChainStartMethod::Resume => { + info!( + log, + "Starting beacon chain"; + "method" => "resume" + ); + BeaconChainBuilder::from_store(spec.clone(), log.clone()) + } + BeaconChainStartMethod::Mainnet => { + crit!(log, "No mainnet beacon chain startup specification."); + return Err("Mainnet launch is not yet announced.".into()); + } + BeaconChainStartMethod::RecentGenesis { + validator_count, + minutes, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "minutes" => minutes, + "method" => "recent" + ); + BeaconChainBuilder::recent_genesis( + &generate_deterministic_keypairs(*validator_count), + *minutes, + spec.clone(), + log.clone(), + )? + } + BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "genesis_time" => genesis_time, + "method" => "quick" + ); + BeaconChainBuilder::quick_start( + *genesis_time, + &generate_deterministic_keypairs(*validator_count), + spec.clone(), + log.clone(), + )? + } + BeaconChainStartMethod::Yaml { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "yaml" + ); + BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::Ssz { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "ssz" + ); + BeaconChainBuilder::ssz_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::Json { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "json" + ); + BeaconChainBuilder::json_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::HttpBootstrap { server, port } => { + info!( + log, + "Starting beacon chain"; + "port" => port, + "server" => server, + "method" => "bootstrap" + ); + BeaconChainBuilder::http_bootstrap(server, spec.clone(), log.clone())? + } + }; - // Block starting the client until we have caught the state up to the current slot. - // - // If we don't block here we create an initial scenario where we're unable to process any - // blocks and we're basically useless. - { - let state_slot = beacon_chain.head().beacon_state.slot; - let wall_clock_slot = beacon_chain.read_slot_clock().unwrap(); - let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); + let eth1_backend = + InteropEth1ChainBackend::new(String::new()).map_err(|e| format!("{:?}", e))?; + + // Start the websocket server. + let (websocket_sender, websocket_exit_signal): (WebSocketSender, Option<_>) = + if client_config.websocket_server.enabled { + let (sender, exit) = websocket_server::start_server( + &client_config.websocket_server, + executor, + &log, + )?; + (sender, Some(exit)) + } else { + (WebSocketSender::dummy(), None) + }; + + let beacon_chain: Arc>> = Arc::new( + beacon_chain_builder + .build(store, eth1_backend, websocket_sender) + .map_err(error::Error::from)?, + ); + + let since_epoch = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {}", e))?; + let since_genesis = Duration::from_secs(beacon_chain.head().beacon_state.genesis_time); + + if since_genesis > since_epoch { info!( log, - "BeaconState cache init"; - "state_slot" => state_slot, - "wall_clock_slot" => wall_clock_slot, - "slots_since_genesis" => slots_since_genesis, - "catchup_distance" => wall_clock_slot - state_slot, + "Starting node prior to genesis"; + "now" => since_epoch.as_secs(), + "genesis_seconds" => since_genesis.as_secs(), ); } - do_state_catchup(&beacon_chain, &log); let network_config = &client_config.network; let (network, network_send) = @@ -117,7 +236,9 @@ where executor, beacon_chain.clone(), network.clone(), + network_send.clone(), client_config.db_path().expect("unable to read datadir"), + eth2_config.clone(), &log, ) { Ok(s) => Some(s), @@ -131,11 +252,11 @@ where }; let (slot_timer_exit_signal, exit) = exit_future::signal(); - if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { + if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_secs(seconds_per_slot); + let slot_duration = Duration::from_millis(milliseconds_per_slot); //TODO: Handle checked add correctly Interval::new(Instant::now() + duration_to_next_slot, slot_duration) }; @@ -146,7 +267,7 @@ where exit.until( interval .for_each(move |_| { - do_state_catchup(&chain, &log); + log_new_slot(&chain, &log); Ok(()) }) @@ -162,49 +283,33 @@ where rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), api_exit_signal, + websocket_exit_signal, log, network, - phantom: PhantomData, }) } } -impl Drop for Client { +impl Drop for Client { fn drop(&mut self) { // Save the beacon chain to it's store before dropping. let _result = self.beacon_chain.persist(); } } -fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { - // Only attempt to `catchup_state` if we can read the slot clock. - if let Some(current_slot) = chain.read_slot_clock() { - let state_catchup_result = chain.catchup_state(); +fn log_new_slot(chain: &Arc>, log: &slog::Logger) { + let best_slot = chain.head().beacon_block.slot; + let latest_block_root = chain.head().beacon_block_root; - let best_slot = chain.head().beacon_block.slot; - let latest_block_root = chain.head().beacon_block_root; - - let common = o!( + if let Ok(current_slot) = chain.slot() { + info!( + log, + "Slot start"; "skip_slots" => current_slot.saturating_sub(best_slot), "best_block_root" => format!("{}", latest_block_root), "best_block_slot" => best_slot, "slot" => current_slot, - ); - - if let Err(e) = state_catchup_result { - error!( - log, - "State catchup failed"; - "error" => format!("{:?}", e), - common - ) - } else { - info!( - log, - "Slot start"; - common - ) - } + ) } else { error!( log, diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index d705637cb..20da963ec 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,11 +1,12 @@ use crate::Client; -use beacon_chain::BeaconChainTypes; use exit_future::Exit; use futures::{Future, Stream}; use slog::{debug, o, warn}; use std::time::{Duration, Instant}; +use store::Store; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; +use types::EthSpec; /// The interval between heartbeat events. pub const HEARTBEAT_INTERVAL_SECONDS: u64 = 15; @@ -17,7 +18,11 @@ pub const WARN_PEER_COUNT: usize = 1; /// durations. /// /// Presently unused, but remains for future use. -pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) { +pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) +where + S: Store + Clone + 'static, + E: EthSpec, +{ // notification heartbeat let interval = Interval::new( Instant::now(), @@ -34,10 +39,10 @@ pub fn run(client: &Client, executor: TaskExecutor, exit // Panics if libp2p is poisoned. let connected_peer_count = libp2p.lock().swarm.connected_peers(); - debug!(log, "Libp2p connected peer status"; "peer_count" => connected_peer_count); + debug!(log, "Connected peer status"; "peer_count" => connected_peer_count); if connected_peer_count <= WARN_PEER_COUNT { - warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); + warn!(log, "Low peer count"; "peer_count" => connected_peer_count); } Ok(()) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index caa5b28e4..ccc6efb6d 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -6,9 +6,10 @@ edition = "2018" [dependencies] clap = "2.32.0" +hex = "0.3" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "61036890d574f5b46573952b20def2baafd6a6e9" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "61036890d574f5b46573952b20def2baafd6a6e9", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "8ac9c744197faaadc0e2b64fed7470ac4e2a41ca" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "8ac9c744197faaadc0e2b64fed7470ac4e2a41ca", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 2c574e46a..a47d32ec2 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -15,7 +15,7 @@ use libp2p::{ tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{debug, o, trace}; +use slog::{debug, o}; use std::num::NonZeroU32; use std::time::Duration; @@ -90,13 +90,15 @@ impl NetworkBehaviourEventProcess { - trace!(self.log, "Received GossipEvent"); - + GossipsubEvent::Message(propagation_source, gs_msg) => { + let id = gs_msg.id(); let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); + // Note: We are keeping track here of the peer that sent us the message, not the + // peer that originally published the message. self.events.push(BehaviourEvent::GossipMessage { - source: gs_msg.source, + id, + source: propagation_source, topics: gs_msg.topics, message: msg, }); @@ -199,6 +201,13 @@ impl Behaviour { } } + /// Forwards a message that is waiting in gossipsub's mcache. Messages are only propagated + /// once validated by the beacon chain. + pub fn propagate_message(&mut self, propagation_source: &PeerId, message_id: String) { + self.gossipsub + .propagate_message(&message_id, propagation_source); + } + /* Eth2 RPC behaviour functions */ /// Sends an RPC Request/Response via the RPC protocol. @@ -214,12 +223,21 @@ impl Behaviour { /// The types of events than can be obtained from polling the behaviour. pub enum BehaviourEvent { + /// A received RPC event and the peer that it was received from. RPC(PeerId, RPCEvent), + /// We have completed an initial connection to a new peer. PeerDialed(PeerId), + /// A peer has disconnected. PeerDisconnected(PeerId), + /// A gossipsub message has been received. GossipMessage { + /// The gossipsub message id. Used when propagating blocks after validation. + id: String, + /// The peer from which we received this message, not the peer that published it. source: PeerId, + /// The topics that this message was sent on. topics: Vec, + /// The message itself. message: PubsubMessage, }, } diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 7cb501c1f..fa20d2cdd 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -40,6 +40,12 @@ pub struct Config { /// Target number of connected peers. pub max_peers: usize, + /// A secp256k1 secret key, as bytes in ASCII-encoded hex. + /// + /// With or without `0x` prefix. + #[serde(skip)] + pub secret_key_hex: Option, + /// Gossipsub configuration parameters. #[serde(skip)] pub gs_config: GossipsubConfig, @@ -70,11 +76,13 @@ impl Default for Config { discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_port: 9000, max_peers: 10, + secret_key_hex: None, // Note: The topics by default are sent as plain strings. Hashes are an optional // parameter. gs_config: GossipsubConfigBuilder::new() .max_transmit_size(1_048_576) - .heartbeat_interval(Duration::from_secs(20)) + .heartbeat_interval(Duration::from_secs(20)) // TODO: Reduce for mainnet + .propagate_messages(false) // require validation before propagation .build(), boot_nodes: vec![], libp2p_nodes: vec![], @@ -157,6 +165,10 @@ impl Config { .map_err(|_| format!("Invalid discovery port: {}", disc_port_str))?; } + if let Some(p2p_priv_key) = args.value_of("p2p-priv-key") { + self.secret_key_hex = Some(p2p_priv_key.to_string()); + } + Ok(()) } } diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4a8aba2b1..69ca39ad7 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -114,7 +114,7 @@ impl Discovery { self.find_peers(); } - /// Add an Enr to the routing table of the discovery mechanism. + /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { self.discovery.add_enr(enr); } @@ -169,6 +169,7 @@ where fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) { self.connected_peers.insert(peer_id); + // TODO: Drop peers if over max_peer limit metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs index a8a239867..973567473 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs @@ -101,13 +101,15 @@ where type Error = ::Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + // if we have only received the response code, wait for more bytes + if src.len() == 1 { + return Ok(None); + } + // using the response code determine which kind of payload needs to be decoded. let response_code = { if let Some(resp_code) = self.response_code { resp_code } else { - // buffer should not be empty - debug_assert!(!src.is_empty()); - let resp_byte = src.split_to(1); let mut resp_code_byte = [0; 1]; resp_code_byte.copy_from_slice(&resp_byte); diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 260a00346..d0e4d01cf 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -4,7 +4,7 @@ use crate::rpc::{ protocol::{ProtocolId, RPCError}, }; use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse}; -use bytes::{Bytes, BytesMut}; +use bytes::{BufMut, Bytes, BytesMut}; use ssz::{Decode, Encode}; use tokio::codec::{Decoder, Encoder}; use unsigned_varint::codec::UviBytes; @@ -56,6 +56,10 @@ impl Encoder for SSZInboundCodec { .inner .encode(Bytes::from(bytes), dst) .map_err(RPCError::from); + } else { + // payload is empty, add a 0-byte length prefix + dst.reserve(1); + dst.put_u8(0); } Ok(()) } @@ -152,45 +156,49 @@ impl Decoder for SSZOutboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - match self.inner.decode(src).map_err(RPCError::from) { - Ok(Some(packet)) => match self.protocol.message_name.as_str() { + if src.len() == 1 && src[0] == 0_u8 { + // the object is empty. We return the empty object if this is the case + match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( - &packet, - )?))), + "1" => Err(RPCError::Custom( + "Hello stream terminated unexpectedly".into(), + )), // cannot have an empty HELLO message. The stream has terminated unexpectedly _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), "beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), + "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), _ => unreachable!("Cannot negotiate an unknown version"), }, "recent_beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), _ => unreachable!("Cannot negotiate an unknown version"), }, _ => unreachable!("Cannot negotiate an unknown protocol"), - }, - Ok(None) => { - // the object sent could be a empty. We return the empty object if this is the case - match self.protocol.message_name.as_str() { + } + } else { + match self.inner.decode(src).map_err(RPCError::from) { + Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1" => Ok(None), // cannot have an empty HELLO message. The stream has terminated unexpectedly + "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( + &packet, + )?))), _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), "beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), + "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), _ => unreachable!("Cannot negotiate an unknown version"), }, "recent_beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), _ => unreachable!("Cannot negotiate an unknown version"), }, _ => unreachable!("Cannot negotiate an unknown protocol"), - } + }, + Ok(None) => Ok(None), // waiting for more bytes + Err(e) => Err(e), } - Err(e) => Err(e), } } } diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index d912bcfa1..ee8ad4860 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -1,6 +1,5 @@ //!Available RPC methods types and ids. -use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz_derive::{Decode, Encode}; use types::{Epoch, Hash256, Slot}; @@ -66,8 +65,38 @@ impl Into for GoodbyeReason { } } -impl_encode_via_from!(GoodbyeReason, u64); -impl_decode_via_from!(GoodbyeReason, u64); +impl ssz::Encode for GoodbyeReason { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + 0_u64.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + let conv: u64 = self.clone().into(); + conv.ssz_append(buf) + } +} + +impl ssz::Decode for GoodbyeReason { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + u64::from_ssz_bytes(bytes).and_then(|n| Ok(n.into())) + } +} /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] @@ -157,3 +186,53 @@ impl ErrorMessage { String::from_utf8(self.error_message.clone()).unwrap_or_else(|_| "".into()) } } + +impl std::fmt::Display for HelloMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Hello Message: Fork Version: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_version, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot) + } +} + +impl std::fmt::Display for RPCResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCResponse::Hello(hello) => write!(f, "{}", hello), + RPCResponse::BeaconBlocks(data) => write!(f, ", len: {}", data.len()), + RPCResponse::RecentBeaconBlocks(data) => { + write!(f, ", len: {}", data.len()) + } + } + } +} + +impl std::fmt::Display for RPCErrorResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCErrorResponse::Success(res) => write!(f, "{}", res), + RPCErrorResponse::InvalidRequest(err) => write!(f, "Invalid Request: {:?}", err), + RPCErrorResponse::ServerError(err) => write!(f, "Server Error: {:?}", err), + RPCErrorResponse::Unknown(err) => write!(f, "Unknown Error: {:?}", err), + } + } +} + +impl std::fmt::Display for GoodbyeReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GoodbyeReason::ClientShutdown => write!(f, "Client Shutdown"), + GoodbyeReason::IrrelevantNetwork => write!(f, "Irrelevant Network"), + GoodbyeReason::Fault => write!(f, "Fault"), + GoodbyeReason::Unknown => write!(f, "Unknown Reason"), + } + } +} + +impl std::fmt::Display for BeaconBlocksRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Head Block Root: {}, Start Slot: {}, Count: {}, Step: {}", + self.head_block_root, self.start_slot, self.count, self.step + ) + } +} diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 756a62e71..2076615a9 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -47,6 +47,16 @@ impl RPCEvent { } } +impl std::fmt::Display for RPCEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCEvent::Request(id, req) => write!(f, "RPC Request(Id: {}, {})", id, req), + RPCEvent::Response(id, res) => write!(f, "RPC Response(Id: {}, {})", id, res), + RPCEvent::Error(id, err) => write!(f, "RPC Request(Id: {}, Error: {:?})", id, err), + } + } +} + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index be1efdf5d..401fa8b9e 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -288,3 +288,14 @@ impl std::error::Error for RPCError { } } } + +impl std::fmt::Display for RPCRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCRequest::Hello(hello) => write!(f, "Hello Message: {}", hello), + RPCRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), + RPCRequest::BeaconBlocks(req) => write!(f, "Beacon Blocks: {}", req), + RPCRequest::RecentBeaconBlocks(req) => write!(f, "Recent Beacon Blocks: {:?}", req), + } + } +} diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 34781927c..f9c06a532 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -42,16 +42,21 @@ impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { trace!(log, "Libp2p Service starting"); + let local_keypair = if let Some(hex_bytes) = &config.secret_key_hex { + keypair_from_hex(hex_bytes)? + } else { + load_private_key(&config, &log) + }; + // load the private key from CLI flag, disk or generate a new one - let local_private_key = load_private_key(&config, &log); - let local_peer_id = PeerId::from(local_private_key.public()); + let local_peer_id = PeerId::from(local_keypair.public()); info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); let mut swarm = { // Set up the transport - tcp/ws with secio and mplex/yamux - let transport = build_transport(local_private_key.clone()); + let transport = build_transport(local_keypair.clone()); // Lighthouse network behaviour - let behaviour = Behaviour::new(&local_private_key, &config, &log)?; + let behaviour = Behaviour::new(&local_keypair, &config, &log)?; Swarm::new(transport, behaviour, local_peer_id.clone()) }; @@ -79,15 +84,32 @@ impl Service { } }; - // attempt to connect to user-input libp2p nodes - for multiaddr in config.libp2p_nodes { + // helper closure for dialing peers + let mut dial_addr = |multiaddr: Multiaddr| { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)), Err(err) => debug!( log, - "Could not connect to peer"; "address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) + "Could not connect to peer"; "address" => format!("{}", multiaddr), "error" => format!("{:?}", err) ), }; + }; + + // attempt to connect to user-input libp2p nodes + for multiaddr in config.libp2p_nodes { + dial_addr(multiaddr); + } + + // attempt to connect to any specified boot-nodes + for bootnode_enr in config.boot_nodes { + for multiaddr in bootnode_enr.multiaddr() { + // ignore udp multiaddr if it exists + let components = multiaddr.iter().collect::>(); + if let Protocol::Udp(_) = components[1] { + continue; + } + dial_addr(multiaddr); + } } // subscribe to default gossipsub topics @@ -145,16 +167,16 @@ impl Stream for Service { fn poll(&mut self) -> Poll, Self::Error> { loop { match self.swarm.poll() { - //Behaviour events Ok(Async::Ready(Some(event))) => match event { - // TODO: Stub here for debugging BehaviourEvent::GossipMessage { + id, source, topics, message, } => { trace!(self.log, "Gossipsub message received"; "service" => "Swarm"); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { + id, source, topics, message, @@ -222,12 +244,34 @@ pub enum Libp2pEvent { PeerDisconnected(PeerId), /// Received pubsub message. PubsubMessage { + id: String, source: PeerId, topics: Vec, message: PubsubMessage, }, } +fn keypair_from_hex(hex_bytes: &str) -> error::Result { + let hex_bytes = if hex_bytes.starts_with("0x") { + hex_bytes[2..].to_string() + } else { + hex_bytes.to_string() + }; + + hex::decode(&hex_bytes) + .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) + .and_then(keypair_from_bytes) +} + +fn keypair_from_bytes(mut bytes: Vec) -> error::Result { + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + .map(|secret| { + let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); + Keypair::Secp256k1(keypair) + }) + .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) +} + /// Loads a private key from disk. If this fails, a new key is /// generated and is then saved to disk. /// diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index dc08bd311..ffeba96ec 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -13,9 +13,11 @@ store = { path = "../store" } eth2-libp2p = { path = "../eth2-libp2p" } types = { path = "../../eth2/types" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } +hex = "0.3" eth2_ssz = "0.1" tree_hash = "0.1" futures = "0.1.25" error-chain = "0.12.0" tokio = "0.1.16" parking_lot = "0.9.0" +smallvec = "0.6.10" diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index c14fc970d..898304272 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,6 +1,6 @@ use crate::error; use crate::service::NetworkMessage; -use crate::sync::SimpleSync; +use crate::sync::MessageProcessor; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{ behaviour::PubsubMessage, @@ -9,18 +9,22 @@ use eth2_libp2p::{ }; use futures::future::Future; use futures::stream::Stream; -use slog::{debug, trace, warn}; +use slog::{debug, o, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, VoluntaryExit}; -/// Handles messages received from the network and client and organises syncing. +/// Handles messages received from the network and client and organises syncing. This +/// functionality of this struct is to validate an decode messages from the network before +/// passing them to the internal message processor. The message processor spawns a syncing thread +/// which manages which blocks need to be requested and processed. pub struct MessageHandler { - /// Currently loaded and initialised beacon chain. - _chain: Arc>, - /// The syncing framework. - sync: SimpleSync, + /// A channel to the network service to allow for gossip propagation. + network_send: mpsc::UnboundedSender, + /// Processes validated and decoded messages from the network. Has direct access to the + /// sync manager. + message_processor: MessageProcessor, /// The `MessageHandler` logger. log: slog::Logger, } @@ -34,8 +38,9 @@ pub enum HandlerMessage { PeerDisconnected(PeerId), /// An RPC response/request has been received. RPC(PeerId, RPCEvent), - /// A gossip message has been received. - PubsubMessage(PeerId, PubsubMessage), + /// A gossip message has been received. The fields are: message id, the peer that sent us this + /// message and the message itself. + PubsubMessage(String, PeerId, PubsubMessage), } impl MessageHandler { @@ -46,17 +51,20 @@ impl MessageHandler { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { - trace!(log, "Service starting"); + let message_handler_log = log.new(o!("Service"=> "Message Handler")); + trace!(message_handler_log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // Initialise sync and begin processing in thread - let sync = SimpleSync::new(beacon_chain.clone(), network_send, &log); + + // Initialise a message instance, which itself spawns the syncing thread. + let message_processor = + MessageProcessor::new(executor, beacon_chain, network_send.clone(), &log); // generate the Message handler let mut handler = MessageHandler { - _chain: beacon_chain.clone(), - sync, - log: log.clone(), + network_send, + message_processor, + log: message_handler_log, }; // spawn handler task and move the message handler instance into the spawned thread @@ -76,19 +84,19 @@ impl MessageHandler { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { - self.sync.on_connect(peer_id); + self.message_processor.on_connect(peer_id); } // A peer has disconnected HandlerMessage::PeerDisconnected(peer_id) => { - self.sync.on_disconnect(peer_id); + self.message_processor.on_disconnect(peer_id); } // An RPC message request/response has been received HandlerMessage::RPC(peer_id, rpc_event) => { self.handle_rpc_message(peer_id, rpc_event); } // An RPC message request/response has been received - HandlerMessage::PubsubMessage(peer_id, gossip) => { - self.handle_gossip(peer_id, gossip); + HandlerMessage::PubsubMessage(id, peer_id, gossip) => { + self.handle_gossip(id, peer_id, gossip); } } } @@ -108,7 +116,7 @@ impl MessageHandler { fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { match request { RPCRequest::Hello(hello_message) => { - self.sync + self.message_processor .on_hello_request(peer_id, request_id, hello_message) } RPCRequest::Goodbye(goodbye_reason) => { @@ -117,13 +125,13 @@ impl MessageHandler { "peer" => format!("{:?}", peer_id), "reason" => format!("{:?}", goodbye_reason), ); - self.sync.on_disconnect(peer_id); + self.message_processor.on_disconnect(peer_id); } RPCRequest::BeaconBlocks(request) => self - .sync + .message_processor .on_beacon_blocks_request(peer_id, request_id, request), RPCRequest::RecentBeaconBlocks(request) => self - .sync + .message_processor .on_recent_beacon_blocks_request(peer_id, request_id, request), } } @@ -150,12 +158,13 @@ impl MessageHandler { RPCErrorResponse::Success(response) => { match response { RPCResponse::Hello(hello_message) => { - self.sync.on_hello_response(peer_id, hello_message); + self.message_processor + .on_hello_response(peer_id, hello_message); } RPCResponse::BeaconBlocks(response) => { match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { - self.sync.on_beacon_blocks_response( + self.message_processor.on_beacon_blocks_response( peer_id, request_id, beacon_blocks, @@ -170,7 +179,7 @@ impl MessageHandler { RPCResponse::RecentBeaconBlocks(response) => { match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { - self.sync.on_recent_beacon_blocks_response( + self.message_processor.on_recent_beacon_blocks_response( peer_id, request_id, beacon_blocks, @@ -194,24 +203,37 @@ impl MessageHandler { } /// Handle RPC messages - fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { + fn handle_gossip(&mut self, id: String, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { Ok(block) => { - let _should_forward_on = self.sync.on_block_gossip(peer_id, block); + let should_forward_on = self + .message_processor + .on_block_gossip(peer_id.clone(), block); + // TODO: Apply more sophisticated validation and decoding logic + if should_forward_on { + self.propagate_message(id, peer_id.clone()); + } } Err(e) => { debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Ok(attestation) => self.sync.on_attestation_gossip(peer_id, attestation), + Ok(attestation) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); + self.message_processor + .on_attestation_gossip(peer_id, attestation); + } Err(e) => { debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) { Ok(_exit) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); // TODO: Handle exits debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); } @@ -222,6 +244,8 @@ impl MessageHandler { PubsubMessage::ProposerSlashing(message) => { match self.decode_gossip_proposer_slashing(message) { Ok(_slashing) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); // TODO: Handle proposer slashings debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); } @@ -233,6 +257,8 @@ impl MessageHandler { PubsubMessage::AttesterSlashing(message) => { match self.decode_gossip_attestation_slashing(message) { Ok(_slashing) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); // TODO: Handle attester slashings debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); } @@ -248,6 +274,21 @@ impl MessageHandler { } } + /// Informs the network service that the message should be forwarded to other peers. + fn propagate_message(&mut self, message_id: String, propagation_source: PeerId) { + self.network_send + .try_send(NetworkMessage::Propagate { + propagation_source, + message_id, + }) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send propagation request to the network service" + ) + }); + } + /* Decoding of gossipsub objects from the network. * * The decoding is done in the message handler as it has access to to a `BeaconChain` and can diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a8b3c74b6..1357b5495 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -34,13 +34,8 @@ impl Service { // build the network channel let (network_send, network_recv) = mpsc::unbounded_channel::(); // launch message handler thread - let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = MessageHandler::spawn( - beacon_chain, - network_send.clone(), - executor, - message_handler_log, - )?; + let message_handler_send = + MessageHandler::spawn(beacon_chain, network_send.clone(), executor, log.clone())?; let network_log = log.new(o!("Service" => "Network")); // launch libp2p service @@ -159,12 +154,23 @@ fn network_service( // poll the network channel match network_recv.poll() { Ok(Async::Ready(Some(message))) => match message { - NetworkMessage::Send(peer_id, outgoing_message) => match outgoing_message { - OutgoingMessage::RPC(rpc_event) => { - trace!(log, "Sending RPC Event: {:?}", rpc_event); - libp2p_service.lock().swarm.send_rpc(peer_id, rpc_event); - } - }, + NetworkMessage::RPC(peer_id, rpc_event) => { + trace!(log, "{}", rpc_event); + libp2p_service.lock().swarm.send_rpc(peer_id, rpc_event); + } + NetworkMessage::Propagate { + propagation_source, + message_id, + } => { + trace!(log, "Propagating gossipsub message"; + "propagation_peer" => format!("{:?}", propagation_source), + "message_id" => format!("{}", message_id), + ); + libp2p_service + .lock() + .swarm + .propagate_message(&propagation_source, message_id); + } NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); libp2p_service.lock().swarm.publish(&topics, message); @@ -185,7 +191,7 @@ fn network_service( match libp2p_service.lock().poll() { Ok(Async::Ready(Some(event))) => match event { Libp2pEvent::RPC(peer_id, rpc_event) => { - trace!(log, "RPC Event: RPC message received: {:?}", rpc_event); + trace!(log, "{}", rpc_event); message_handler_send .try_send(HandlerMessage::RPC(peer_id, rpc_event)) .map_err(|_| "Failed to send RPC to handler")?; @@ -203,13 +209,14 @@ fn network_service( .map_err(|_| "Failed to send PeerDisconnected to handler")?; } Libp2pEvent::PubsubMessage { - source, message, .. + id, + source, + message, + .. } => { - //TODO: Decide if we need to propagate the topic upwards. (Potentially for - //attestations) message_handler_send - .try_send(HandlerMessage::PubsubMessage(source, message)) - .map_err(|_| " failed to send pubsub message to handler")?; + .try_send(HandlerMessage::PubsubMessage(id, source, message)) + .map_err(|_| "Failed to send pubsub message to handler")?; } }, Ok(Async::Ready(None)) => unreachable!("Stream never ends"), @@ -225,19 +232,16 @@ fn network_service( /// Types of messages that the network service can receive. #[derive(Debug)] pub enum NetworkMessage { - /// Send a message to libp2p service. - //TODO: Define typing for messages across the wire - Send(PeerId, OutgoingMessage), - /// Publish a message to pubsub mechanism. + /// Send an RPC message to the libp2p service. + RPC(PeerId, RPCEvent), + /// Publish a message to gossipsub. Publish { topics: Vec, message: PubsubMessage, }, -} - -/// Type of outgoing messages that can be sent through the network service. -#[derive(Debug)] -pub enum OutgoingMessage { - /// Send an RPC request/response. - RPC(RPCEvent), + /// Propagate a received gossipsub message + Propagate { + propagation_source: PeerId, + message_id: String, + }, } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index b81da0991..9e92ade76 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,113 +1,309 @@ -use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; +//! The `SyncManager` facilities the block syncing logic of lighthouse. The current networking +//! specification provides two methods from which to obtain blocks from peers. The `BeaconBlocks` +//! request and the `RecentBeaconBlocks` request. The former is used to obtain a large number of +//! blocks and the latter allows for searching for blocks given a block-hash. +//! +//! These two RPC methods are designed for two type of syncing. +//! - Long range (batch) sync, when a client is out of date and needs to the latest head. +//! - Parent lookup - when a peer provides us a block whose parent is unknown to us. +//! +//! Both of these syncing strategies are built into the `SyncManager`. +//! +//! +//! Currently the long-range (batch) syncing method functions by opportunistically downloading +//! batches blocks from all peers who know about a chain that we do not. When a new peer connects +//! which has a later head that is greater than `SLOT_IMPORT_TOLERANCE` from our current head slot, +//! the manager's state becomes `Syncing` and begins a batch syncing process with this peer. If +//! further peers connect, this process is run in parallel with those peers, until our head is +//! within `SLOT_IMPORT_TOLERANCE` of all connected peers. +//! +//! Batch Syncing +//! +//! This syncing process start by requesting `MAX_BLOCKS_PER_REQUEST` blocks from a peer with an +//! unknown chain (with a greater slot height) starting from our current head slot. If the earliest +//! block returned is known to us, then the group of blocks returned form part of a known chain, +//! and we process this batch of blocks, before requesting more batches forward and processing +//! those in turn until we reach the peer's chain's head. If the first batch doesn't contain a +//! block we know of, we must iteratively request blocks backwards (until our latest finalized head +//! slot) until we find a common ancestor before we can start processing the blocks. If no common +//! ancestor is found, the peer has a chain which is not part of our finalized head slot and we +//! drop the peer and the downloaded blocks. +//! Once we are fully synced with all known peers, the state of the manager becomes `Regular` which +//! then allows for parent lookups of propagated blocks. +//! +//! A schematic version of this logic with two chain variations looks like the following. +//! +//! |----------------------|---------------------------------| +//! ^finalized head ^current local head ^remotes head +//! +//! +//! An example of the remotes chain diverging before our current head. +//! |---------------------------| +//! ^---------------------------------------------| +//! ^chain diverges |initial batch| ^remotes head +//! +//! In this example, we cannot process the initial batch as it is not on a known chain. We must +//! then backwards sync until we reach a common chain to begin forwarding batch syncing. +//! +//! +//! Parent Lookup +//! +//! When a block with an unknown parent is received and we are in `Regular` sync mode, the block is +//! queued for lookup. A round-robin approach is used to request the parent from the known list of +//! fully sync'd peers. If `PARENT_FAIL_TOLERANCE` attempts at requesting the block fails, we +//! drop the propagated block and downvote the peer that sent it to us. + +use super::simple_sync::{hello_message, NetworkContext, PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::RequestId; +use eth2_libp2p::rpc::{RPCRequest, RequestId}; use eth2_libp2p::PeerId; +use futures::prelude::*; use slog::{debug, info, trace, warn, Logger}; +use smallvec::SmallVec; use std::collections::{HashMap, HashSet}; use std::ops::{Add, Sub}; -use std::sync::Arc; +use std::sync::Weak; +use tokio::sync::{mpsc, oneshot}; use types::{BeaconBlock, EthSpec, Hash256, Slot}; -const MAX_BLOCKS_PER_REQUEST: u64 = 10; +/// Blocks are downloaded in batches from peers. This constant specifies how many blocks per batch +/// is requested. Currently the value is small for testing. This will be incremented for +/// production. +const MAX_BLOCKS_PER_REQUEST: u64 = 50; -/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. +/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync +/// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a +/// fully sync'd peer. const SLOT_IMPORT_TOLERANCE: usize = 10; +/// How many attempts we try to find a parent of a block before we give up trying . const PARENT_FAIL_TOLERANCE: usize = 3; +/// The maximum depth we will search for a parent block. In principle we should have sync'd any +/// canonical chain to its head once the peer connects. A chain should not appear where it's depth +/// is further back than the most recent head slot. const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +/// The number of empty batches we tolerate before dropping the peer. This prevents endless +/// requests to peers who never return blocks. +const EMPTY_BATCH_TOLERANCE: usize = 100; + +#[derive(Debug)] +/// A message than can be sent to the sync manager thread. +pub enum SyncMessage { + /// A useful peer has been discovered. + AddPeer(PeerId, PeerSyncInfo), + /// A `BeaconBlocks` response has been received. + BeaconBlocksResponse { + peer_id: PeerId, + request_id: RequestId, + beacon_blocks: Vec>, + }, + /// A `RecentBeaconBlocks` response has been received. + RecentBeaconBlocksResponse { + peer_id: PeerId, + request_id: RequestId, + beacon_blocks: Vec>, + }, + /// A block with an unknown parent has been received. + UnknownBlock(PeerId, BeaconBlock), + /// A peer has disconnected. + Disconnect(PeerId), + /// An RPC Error has occurred on a request. + _RPCError(RequestId), +} #[derive(PartialEq)] +/// The current state of a block or batches lookup. enum BlockRequestsState { + /// The object is queued to be downloaded from a peer but has not yet been requested. Queued, + /// The batch or parent has been requested with the `RequestId` and we are awaiting a response. Pending(RequestId), - Complete, + /// The downloaded blocks are ready to be processed by the beacon chain. For a batch process + /// this means we have found a common chain. + ReadyToProcess, + /// A failure has occurred and we will drop and downvote the peer that caused the request. Failed, } +/// The state of batch requests. +enum SyncDirection { + /// The batch has just been initialised and we need to check to see if a backward sync is + /// required on first batch response. + Initial, + /// We are syncing forwards, the next batch should contain higher slot numbers than is + /// predecessor. + Forwards, + /// We are syncing backwards and looking for a common ancestor chain before we can start + /// processing the downloaded blocks. + Backwards, +} + +/// `BlockRequests` keep track of the long-range (batch) sync process per peer. struct BlockRequests { + /// The peer's head slot and the target of this batch download. target_head_slot: Slot, + /// The peer's head root, used to specify which chain of blocks we are downloading from the + /// blocks. target_head_root: Hash256, + /// The blocks that we have currently downloaded from the peer that are yet to be processed. downloaded_blocks: Vec>, + /// The number of blocks successfully processed in this request. + blocks_processed: usize, + /// The number of empty batches we have consecutively received. If a peer returns more than + /// EMPTY_BATCHES_TOLERANCE, they are dropped. + consecutive_empty_batches: usize, + /// The current state of this batch request. state: BlockRequestsState, - /// Specifies whether the current state is syncing forwards or backwards. - forward_sync: bool, + /// Specifies the current direction of this batch request. + sync_direction: SyncDirection, /// The current `start_slot` of the batched block request. current_start_slot: Slot, } +/// Maintains a sequential list of parents to lookup and the lookup's current state. struct ParentRequests { + /// The blocks that have currently been downloaded. downloaded_blocks: Vec>, + /// The number of failed attempts to retrieve a parent block. If too many attempts occur, this + /// lookup is failed and rejected. failed_attempts: usize, - last_submitted_peer: PeerId, // to downvote the submitting peer. + /// The peer who last submitted a block. If the chain ends or fails, this is the peer that is + /// downvoted. + last_submitted_peer: PeerId, + /// The current state of the parent lookup. state: BlockRequestsState, } impl BlockRequests { - // gets the start slot for next batch - // last block slot downloaded plus 1 + /// Gets the next start slot for a batch and transitions the state to a Queued state. fn update_start_slot(&mut self) { - if self.forward_sync { - self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); - } else { - self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); + match self.sync_direction { + SyncDirection::Initial | SyncDirection::Forwards => { + self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); + } + SyncDirection::Backwards => { + self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); + } } self.state = BlockRequestsState::Queued; } } #[derive(PartialEq, Debug, Clone)] +/// The current state of the `ImportManager`. enum ManagerState { + /// The manager is performing a long-range (batch) sync. In this mode, parent lookups are + /// disabled. Syncing, + /// The manager is up to date with all known peers and is connected to at least one + /// fully-syncing peer. In this state, parent lookups are enabled. Regular, + /// No useful peers are connected. Long-range sync's cannot proceed and we have no useful + /// peers to download parents for. More peers need to be connected before we can proceed. Stalled, } -pub(crate) enum ImportManagerOutcome { - Idle, - RequestBlocks { - peer_id: PeerId, - request_id: RequestId, - request: BeaconBlocksRequest, - }, - /// Updates information with peer via requesting another HELLO handshake. - Hello(PeerId), - RecentRequest(PeerId, RecentBeaconBlocksRequest), - DownvotePeer(PeerId), -} - -pub struct ImportManager { - /// A reference to the underlying beacon chain. - chain: Arc>, +/// The primary object for handling and driving all the current syncing logic. It maintains the +/// current state of the syncing process, the number of useful peers, downloaded blocks and +/// controls the logic behind both the long-range (batch) sync and the on-going potential parent +/// look-up of blocks. +pub struct SyncManager { + /// A weak reference to the underlying beacon chain. + chain: Weak>, + /// The current state of the import manager. state: ManagerState, + /// A receiving channel sent by the message processor thread. + input_channel: mpsc::UnboundedReceiver>, + /// A network context to contact the network service. + network: NetworkContext, + /// A collection of `BlockRequest` per peer that is currently being downloaded. Used in the + /// long-range (batch) sync process. import_queue: HashMap>, - parent_queue: Vec>, + /// A collection of parent block lookups. + parent_queue: SmallVec<[ParentRequests; 3]>, + /// The collection of known, connected, fully-sync'd peers. full_peers: HashSet, + /// The current request Id. This is used to keep track of responses to various outbound + /// requests. This is an internal accounting mechanism, request id's are never sent to any + /// peers. current_req_id: usize, + /// The logger for the import manager. log: Logger, } -impl ImportManager { - pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { - ImportManager { - chain: beacon_chain.clone(), - state: ManagerState::Regular, - import_queue: HashMap::new(), - parent_queue: Vec::new(), - full_peers: HashSet::new(), - current_req_id: 0, - log: log.clone(), - } - } +/// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon +/// chain. This allows the chain to be +/// dropped during the syncing process which will gracefully end the `SyncManager`. +pub fn spawn( + executor: &tokio::runtime::TaskExecutor, + beacon_chain: Weak>, + network: NetworkContext, + log: slog::Logger, +) -> ( + mpsc::UnboundedSender>, + oneshot::Sender<()>, +) { + // generate the exit channel + let (sync_exit, exit_rx) = tokio::sync::oneshot::channel(); + // generate the message channel + let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); + // create an instance of the SyncManager + let sync_manager = SyncManager { + chain: beacon_chain, + state: ManagerState::Stalled, + input_channel: sync_recv, + network, + import_queue: HashMap::new(), + parent_queue: SmallVec::new(), + full_peers: HashSet::new(), + current_req_id: 0, + log: log.clone(), + }; + + // spawn the sync manager thread + debug!(log, "Sync Manager started"); + executor.spawn( + sync_manager + .select(exit_rx.then(|_| Ok(()))) + .then(move |_| { + info!(log.clone(), "Sync Manager shutdown"); + Ok(()) + }), + ); + (sync_send, sync_exit) +} + +impl SyncManager { + /* Input Handling Functions */ + + /// A peer has connected which has blocks that are unknown to us. + /// + /// This function handles the logic associated with the connection of a new peer. If the peer + /// is sufficiently ahead of our current head, a long-range (batch) sync is started and + /// batches of blocks are queued to download from the peer. Batched blocks begin at our + /// current head. If the resulting downloaded blocks are part of our current chain, we + /// continue with a forward sync. If not, we download blocks (in batches) backwards until we + /// reach a common ancestor. Batches are then processed and downloaded sequentially forwards. + /// + /// If the peer is within the `SLOT_IMPORT_TOLERANCE`, then it's head is sufficiently close to + /// ours that we consider it fully sync'd with respect to our current chain. pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { - // TODO: Improve comments. - // initially try to download blocks from our current head - // then backwards search all the way back to our finalized epoch until we match on a chain - // has to be done sequentially to find next slot to start the batch from + // ensure the beacon chain still exists + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + warn!(self.log, + "Beacon chain dropped. Peer not considered for sync"; + "peer_id" => format!("{:?}", peer_id)); + return; + } + }; - let local = PeerSyncInfo::from(&self.chain); + let local = PeerSyncInfo::from(&chain); - // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync + // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync, + // consider it a fully-sync'd peer. if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; "peer" => format!("{:?}", peer_id), @@ -116,34 +312,64 @@ impl ImportManager { ); // remove the peer from the queue if it exists self.import_queue.remove(&peer_id); + self.add_full_peer(peer_id); + // return; } + // Check if the peer is significantly behind us. If within `SLOT_IMPORT_TOLERANCE` + // treat them as a fully synced peer. If not, ignore them in the sync process + if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { + self.add_full_peer(peer_id.clone()); + } else { + debug!( + self.log, + "Out of sync peer connected"; + "peer" => format!("{:?}", peer_id), + ); + return; + } + + // Check if we are already downloading blocks from this peer, if so update, if not set up + // a new request structure if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { // update the target head slot if remote.head_slot > block_requests.target_head_slot { block_requests.target_head_slot = remote.head_slot; } } else { + // not already downloading blocks from this peer let block_requests = BlockRequests { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, + consecutive_empty_batches: 0, downloaded_blocks: Vec::new(), + blocks_processed: 0, state: BlockRequestsState::Queued, - forward_sync: true, - current_start_slot: self.chain.best_slot(), + sync_direction: SyncDirection::Initial, + current_start_slot: chain.best_slot(), }; self.import_queue.insert(peer_id, block_requests); } } + /// A `BeaconBlocks` request has received a response. This function process the response. pub fn beacon_blocks_response( &mut self, peer_id: PeerId, request_id: RequestId, mut blocks: Vec>, ) { - // find the request + // ensure the underlying chain still exists + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + trace!(self.log, "Chain dropped. Sync terminating"); + return; + } + }; + + // find the request associated with this response let block_requests = match self .import_queue .get_mut(&peer_id) @@ -167,10 +393,25 @@ impl ImportManager { if blocks.is_empty() { debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.update_start_slot(); + block_requests.consecutive_empty_batches += 1; + if block_requests.consecutive_empty_batches >= EMPTY_BATCH_TOLERANCE { + warn!(self.log, "Peer returned too many empty block batches"; + "peer" => format!("{:?}", peer_id)); + block_requests.state = BlockRequestsState::Failed; + } else if block_requests.current_start_slot + MAX_BLOCKS_PER_REQUEST + >= block_requests.target_head_slot + { + warn!(self.log, "Peer did not return blocks it claimed to possess"; + "peer" => format!("{:?}", peer_id)); + block_requests.state = BlockRequestsState::Failed; + } else { + block_requests.update_start_slot(); + } return; } + block_requests.consecutive_empty_batches = 0; + // verify the range of received blocks // Note that the order of blocks is verified in block processing let last_sent_slot = blocks[blocks.len() - 1].slot; @@ -180,90 +421,96 @@ impl ImportManager { .add(MAX_BLOCKS_PER_REQUEST) < last_sent_slot { - //TODO: Downvote peer - add a reason to failed - dbg!(&blocks); warn!(self.log, "BeaconBlocks response returned out of range blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.current_start_slot); + downvote_peer(&mut self.network, &self.log, peer_id); // consider this sync failed block_requests.state = BlockRequestsState::Failed; return; } // Determine if more blocks need to be downloaded. There are a few cases: - // - We have downloaded a batch from our head_slot, which has not reached the remotes head - // (target head). Therefore we need to download another sequential batch. - // - The latest batch includes blocks that greater than or equal to the target_head slot, - // which means we have caught up to their head. We then check to see if the first - // block downloaded matches our head. If so, we are on the same chain and can process - // the blocks. If not we need to sync back further until we are on the same chain. So - // request more blocks. - // - We are syncing backwards (from our head slot) and need to check if we are on the same - // chain. If so, process the blocks, if not, request more blocks all the way up to - // our last finalized slot. + // - We are in initial sync mode - We have requested blocks and need to determine if this + // is part of a known chain to determine the whether to start syncing backwards or continue + // syncing forwards. + // - We are syncing backwards and need to verify if we have found a common ancestor in + // order to start processing the downloaded blocks. + // - We are syncing forwards. We mark this as complete and check if any further blocks are + // required to download when processing the batch. - if block_requests.forward_sync { - // append blocks if syncing forward - block_requests.downloaded_blocks.append(&mut blocks); - } else { - // prepend blocks if syncing backwards - block_requests.downloaded_blocks.splice(..0, blocks); - } + match block_requests.sync_direction { + SyncDirection::Initial => { + block_requests.downloaded_blocks.append(&mut blocks); - // does the batch contain the target_head_slot - let last_element_index = block_requests.downloaded_blocks.len() - 1; - if block_requests.downloaded_blocks[last_element_index].slot - >= block_requests.target_head_slot - || !block_requests.forward_sync - { - // if the batch is on our chain, this is complete and we can then process. - // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests.downloaded_blocks[0].slot; - //TODO: Decide which is faster. Reading block from db and comparing or calculating - //the hash tree root and comparing. - if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(&self.chain, earliest_slot) - { - block_requests.state = BlockRequestsState::Complete; - return; + // this batch is the first batch downloaded. Check if we can process or if we need + // to backwards search. + + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. + let earliest_slot = block_requests.downloaded_blocks[0].slot; + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == chain.root_at_slot(earliest_slot) + { + // we have a common head, start processing and begin a forwards sync + block_requests.sync_direction = SyncDirection::Forwards; + block_requests.state = BlockRequestsState::ReadyToProcess; + return; + } + // no common head, begin a backwards search + block_requests.sync_direction = SyncDirection::Backwards; + block_requests.current_start_slot = + std::cmp::min(chain.best_slot(), block_requests.downloaded_blocks[0].slot); + block_requests.update_start_slot(); } - - // not on the same chain, request blocks backwards - let state = &self.chain.head().beacon_state; - let local_finalized_slot = state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.current_start_slot { - warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; - return; + SyncDirection::Forwards => { + // continue processing all blocks forwards, verify the end in the processing + block_requests.downloaded_blocks.append(&mut blocks); + block_requests.state = BlockRequestsState::ReadyToProcess; } + SyncDirection::Backwards => { + block_requests.downloaded_blocks.splice(..0, blocks); - // if this is a forward sync, then we have reached the head without a common chain - // and we need to start syncing backwards. - if block_requests.forward_sync { - // Start a backwards sync by requesting earlier blocks - block_requests.forward_sync = false; - block_requests.current_start_slot = std::cmp::min( - self.chain.best_slot(), - block_requests.downloaded_blocks[0].slot, - ); + // verify the request hasn't failed by having no common ancestor chain + // get our local finalized_slot + let local_finalized_slot = { + let state = &chain.head().beacon_state; + state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }; + + if local_finalized_slot >= block_requests.current_start_slot { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = BlockRequestsState::Failed; + return; + } + + // check if we have reached a common chain ancestor + let earliest_slot = block_requests.downloaded_blocks[0].slot; + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == chain.root_at_slot(earliest_slot) + { + // we have a common head, start processing and begin a forwards sync + block_requests.sync_direction = SyncDirection::Forwards; + block_requests.state = BlockRequestsState::ReadyToProcess; + return; + } + + // no common chain, haven't passed last_finalized_head, so continue backwards + // search + block_requests.update_start_slot(); } } - - // update the start slot and re-queue the batch - block_requests.update_start_slot(); } pub fn recent_blocks_response( &mut self, peer_id: PeerId, request_id: RequestId, - blocks: Vec>, + mut blocks: Vec>, ) { // find the request let parent_request = match self @@ -298,32 +545,18 @@ impl ImportManager { return; } + // add the block to response + parent_request + .downloaded_blocks + .push(blocks.pop().expect("must exist")); + // queue for processing - parent_request.state = BlockRequestsState::Complete; + parent_request.state = BlockRequestsState::ReadyToProcess; } - pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { - //TODO: Remove block state from pending - } - - pub fn peer_disconnect(&mut self, peer_id: &PeerId) { - self.import_queue.remove(peer_id); - self.full_peers.remove(peer_id); - self.update_state(); - } - - pub fn add_full_peer(&mut self, peer_id: PeerId) { - debug!( - self.log, "Fully synced peer added"; - "peer" => format!("{:?}", peer_id), - ); - self.full_peers.insert(peer_id); - self.update_state(); - } - - pub fn add_unknown_block(&mut self, block: BeaconBlock, peer_id: PeerId) { + fn add_unknown_block(&mut self, peer_id: PeerId, block: BeaconBlock) { // if we are not in regular sync mode, ignore this block - if let ManagerState::Regular = self.state { + if self.state != ManagerState::Regular { return; } @@ -350,38 +583,28 @@ impl ImportManager { self.parent_queue.push(req); } - pub(crate) fn poll(&mut self) -> ImportManagerOutcome { - loop { - // update the state of the manager - self.update_state(); - - // process potential block requests - if let Some(outcome) = self.process_potential_block_requests() { - return outcome; - } - - // process any complete long-range batches - if let Some(outcome) = self.process_complete_batches() { - return outcome; - } - - // process any parent block lookup-requests - if let Some(outcome) = self.process_parent_requests() { - return outcome; - } - - // process any complete parent lookups - let (re_run, outcome) = self.process_complete_parent_requests(); - if let Some(outcome) = outcome { - return outcome; - } else if !re_run { - break; - } - } - - return ImportManagerOutcome::Idle; + fn inject_error(&mut self, _id: RequestId) { + //TODO: Remove block state from pending } + fn peer_disconnect(&mut self, peer_id: &PeerId) { + self.import_queue.remove(peer_id); + self.full_peers.remove(peer_id); + self.update_state(); + } + + fn add_full_peer(&mut self, peer_id: PeerId) { + debug!( + self.log, "Fully synced peer added"; + "peer" => format!("{:?}", peer_id), + ); + self.full_peers.insert(peer_id); + } + + /* Processing State Functions */ + // These functions are called in the main poll function to transition the state of the sync + // manager + fn update_state(&mut self) { let previous_state = self.state.clone(); self.state = { @@ -401,20 +624,22 @@ impl ImportManager { } } - fn process_potential_block_requests(&mut self) -> Option { + fn process_potential_block_requests(&mut self) { // check if an outbound request is required // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p - // layer and not needed here. - // If any in queued state we submit a request. + // layer and not needed here. Therefore we create many outbound requests and let the RPC + // handle the number of simultaneous requests. Request all queued objects. // remove any failed batches let debug_log = &self.log; + let full_peer_ref = &mut self.full_peers; self.import_queue.retain(|peer_id, block_request| { if let BlockRequestsState::Failed = block_request.state { debug!(debug_log, "Block import from peer failed"; "peer_id" => format!("{:?}", peer_id), - "downloaded_blocks" => block_request.downloaded_blocks.len() + "downloaded_blocks" => block_request.blocks_processed ); + full_peer_ref.remove(peer_id); false } else { true @@ -422,71 +647,99 @@ impl ImportManager { }); // process queued block requests - for (peer_id, block_requests) in self - .import_queue - .iter_mut() - .find(|(_peer_id, req)| req.state == BlockRequestsState::Queued) - { - let request_id = self.current_req_id; - block_requests.state = BlockRequestsState::Pending(request_id); - self.current_req_id += 1; + for (peer_id, block_requests) in self.import_queue.iter_mut() { + if block_requests.state == BlockRequestsState::Queued { + let request_id = self.current_req_id; + block_requests.state = BlockRequestsState::Pending(request_id); + self.current_req_id += 1; - let request = BeaconBlocksRequest { - head_block_root: block_requests.target_head_root, - start_slot: block_requests.current_start_slot.as_u64(), - count: MAX_BLOCKS_PER_REQUEST, - step: 0, - }; - return Some(ImportManagerOutcome::RequestBlocks { - peer_id: peer_id.clone(), - request, - request_id, - }); - } - - None - } - - fn process_complete_batches(&mut self) -> Option { - let completed_batches = self - .import_queue - .iter() - .filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete) - .map(|(peer, _)| peer) - .cloned() - .collect::>(); - for peer_id in completed_batches { - let block_requests = self.import_queue.remove(&peer_id).expect("key exists"); - match self.process_blocks(block_requests.downloaded_blocks.clone()) { - Ok(()) => { - //TODO: Verify it's impossible to have empty downloaded_blocks - let last_element = block_requests.downloaded_blocks.len() - 1; - debug!(self.log, "Blocks processed successfully"; - "peer" => format!("{:?}", peer_id), - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - ); - // Re-HELLO to ensure we are up to the latest head - return Some(ImportManagerOutcome::Hello(peer_id)); - } - Err(e) => { - let last_element = block_requests.downloaded_blocks.len() - 1; - warn!(self.log, "Block processing failed"; - "peer" => format!("{:?}", peer_id), - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - "error" => format!("{:?}", e), - ); - return Some(ImportManagerOutcome::DownvotePeer(peer_id)); - } + let request = BeaconBlocksRequest { + head_block_root: block_requests.target_head_root, + start_slot: block_requests.current_start_slot.as_u64(), + count: MAX_BLOCKS_PER_REQUEST, + step: 0, + }; + request_blocks( + &mut self.network, + &self.log, + peer_id.clone(), + request_id, + request, + ); } } - None } - fn process_parent_requests(&mut self) -> Option { + fn process_complete_batches(&mut self) -> bool { + // This function can queue extra blocks and the main poll loop will need to be re-executed + // to process these. This flag indicates that the main poll loop has to continue. + let mut re_run_poll = false; + + // create reference variables to be moved into subsequent closure + let chain_ref = self.chain.clone(); + let log_ref = &self.log; + let network_ref = &mut self.network; + + self.import_queue.retain(|peer_id, block_requests| { + if block_requests.state == BlockRequestsState::ReadyToProcess { + let downloaded_blocks = + std::mem::replace(&mut block_requests.downloaded_blocks, Vec::new()); + let last_element = downloaded_blocks.len() - 1; + let start_slot = downloaded_blocks[0].slot; + let end_slot = downloaded_blocks[last_element].slot; + + match process_blocks(chain_ref.clone(), downloaded_blocks, log_ref) { + Ok(()) => { + debug!(log_ref, "Blocks processed successfully"; + "peer" => format!("{:?}", peer_id), + "start_slot" => start_slot, + "end_slot" => end_slot, + "no_blocks" => last_element + 1, + ); + block_requests.blocks_processed += last_element + 1; + + // check if the batch is complete, by verifying if we have reached the + // target head + if end_slot >= block_requests.target_head_slot { + // Completed, re-hello the peer to ensure we are up to the latest head + hello_peer(network_ref, log_ref, chain_ref.clone(), peer_id.clone()); + // remove the request + false + } else { + // have not reached the end, queue another batch + block_requests.update_start_slot(); + re_run_poll = true; + // keep the batch + true + } + } + Err(e) => { + warn!(log_ref, "Block processing failed"; + "peer" => format!("{:?}", peer_id), + "start_slot" => start_slot, + "end_slot" => end_slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + downvote_peer(network_ref, log_ref, peer_id.clone()); + false + } + } + } else { + // not ready to process + true + } + }); + + re_run_poll + } + + fn process_parent_requests(&mut self) { + // check to make sure there are peers to search for the parent from + if self.full_peers.is_empty() { + return; + } + // remove any failed requests let debug_log = &self.log; self.parent_queue.retain(|parent_request| { @@ -501,11 +754,6 @@ impl ImportManager { } }); - // check to make sure there are peers to search for the parent from - if self.full_peers.is_empty() { - return None; - } - // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { @@ -518,34 +766,38 @@ impl ImportManager { continue; } - parent_request.state = BlockRequestsState::Pending(self.current_req_id); + let request_id = self.current_req_id; + parent_request.state = BlockRequestsState::Pending(request_id); self.current_req_id += 1; let last_element_index = parent_request.downloaded_blocks.len() - 1; let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root; - let req = RecentBeaconBlocksRequest { + let request = RecentBeaconBlocksRequest { block_roots: vec![parent_hash], }; // select a random fully synced peer to attempt to download the parent block let peer_id = self.full_peers.iter().next().expect("List is not empty"); - return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); + recent_blocks_request( + &mut self.network, + &self.log, + peer_id.clone(), + request_id, + request, + ); } } - - None } - fn process_complete_parent_requests(&mut self) -> (bool, Option) { - // flag to determine if there is more process to drive or if the manager can be switched to - // an idle state - let mut re_run = false; + fn process_complete_parent_requests(&mut self) -> bool { + // returned value indicating whether the manager can be switched to idle or not + let mut re_run_poll = false; // Find any parent_requests ready to be processed for completed_request in self .parent_queue .iter_mut() - .filter(|req| req.state == BlockRequestsState::Complete) + .filter(|req| req.state == BlockRequestsState::ReadyToProcess) { // verify the last added block is the parent of the last requested block let last_index = completed_request.downloaded_blocks.len() - 1; @@ -563,7 +815,8 @@ impl ImportManager { "received_block" => format!("{}", block_hash), "expected_parent" => format!("{}", expected_hash), ); - return (true, Some(ImportManagerOutcome::DownvotePeer(peer))); + re_run_poll = true; + downvote_peer(&mut self.network, &self.log, peer); } // try and process the list of blocks up to the requested block @@ -572,72 +825,158 @@ impl ImportManager { .downloaded_blocks .pop() .expect("Block must exist exist"); - match self.chain.process_block(block.clone()) { - Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { - // need to keep looking for parents - completed_request.downloaded_blocks.push(block); - completed_request.state = BlockRequestsState::Queued; - re_run = true; - break; - } - Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} - Ok(outcome) => { - // it's a future slot or an invalid block, remove it and try again - completed_request.failed_attempts += 1; - trace!( - self.log, "Invalid parent block"; - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", completed_request.last_submitted_peer), - ); - completed_request.state = BlockRequestsState::Queued; - re_run = true; - return ( - re_run, - Some(ImportManagerOutcome::DownvotePeer( + + // check if the chain exists + if let Some(chain) = self.chain.upgrade() { + match chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { + // need to keep looking for parents + completed_request.downloaded_blocks.push(block); + completed_request.state = BlockRequestsState::Queued; + re_run_poll = true; + break; + } + Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} + Ok(outcome) => { + // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts += 1; + trace!( + self.log, "Invalid parent block"; + "outcome" => format!("{:?}", outcome), + "peer" => format!("{:?}", completed_request.last_submitted_peer), + ); + completed_request.state = BlockRequestsState::Queued; + re_run_poll = true; + downvote_peer( + &mut self.network, + &self.log, completed_request.last_submitted_peer.clone(), - )), - ); - } - Err(e) => { - completed_request.failed_attempts += 1; - warn!( - self.log, "Parent processing error"; - "error" => format!("{:?}", e) - ); - completed_request.state = BlockRequestsState::Queued; - re_run = true; - return ( - re_run, - Some(ImportManagerOutcome::DownvotePeer( + ); + return re_run_poll; + } + Err(e) => { + completed_request.failed_attempts += 1; + warn!( + self.log, "Parent processing error"; + "error" => format!("{:?}", e) + ); + completed_request.state = BlockRequestsState::Queued; + re_run_poll = true; + downvote_peer( + &mut self.network, + &self.log, completed_request.last_submitted_peer.clone(), - )), - ); + ); + return re_run_poll; + } } + } else { + // chain doesn't exist - clear the event queue and return + return false; } } } - // remove any full completed and processed parent chains + // remove any fully processed parent chains self.parent_queue.retain(|req| { - if req.state == BlockRequestsState::Complete { + if req.state == BlockRequestsState::ReadyToProcess { false } else { true } }); - (re_run, None) + re_run_poll } +} - fn process_blocks(&mut self, blocks: Vec>) -> Result<(), String> { - for block in blocks { - let processing_result = self.chain.process_block(block.clone()); +/* Network Context Helper Functions */ + +fn hello_peer( + network: &mut NetworkContext, + log: &slog::Logger, + chain: Weak>, + peer_id: PeerId, +) { + trace!( + log, + "RPC Request"; + "method" => "HELLO", + "peer" => format!("{:?}", peer_id) + ); + if let Some(chain) = chain.upgrade() { + network.send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&chain))); + } +} + +fn request_blocks( + network: &mut NetworkContext, + log: &slog::Logger, + peer_id: PeerId, + request_id: RequestId, + request: BeaconBlocksRequest, +) { + trace!( + log, + "RPC Request"; + "method" => "BeaconBlocks", + "id" => request_id, + "count" => request.count, + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::BeaconBlocks(request), + ); +} + +fn recent_blocks_request( + network: &mut NetworkContext, + log: &slog::Logger, + peer_id: PeerId, + request_id: RequestId, + request: RecentBeaconBlocksRequest, +) { + trace!( + log, + "RPC Request"; + "method" => "RecentBeaconBlocks", + "count" => request.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::RecentBeaconBlocks(request), + ); +} + +fn downvote_peer(network: &mut NetworkContext, log: &slog::Logger, peer_id: PeerId) { + trace!( + log, + "Peer downvoted"; + "peer" => format!("{:?}", peer_id) + ); + // TODO: Implement reputation + network.disconnect(peer_id.clone(), GoodbyeReason::Fault); +} + +// Helper function to process blocks which only consumes the chain and blocks to process +fn process_blocks( + weak_chain: Weak>, + blocks: Vec>, + log: &Logger, +) -> Result<(), String> { + for block in blocks { + if let Some(chain) = weak_chain.upgrade() { + let processing_result = chain.process_block(block.clone()); if let Ok(outcome) = processing_result { match outcome { BlockProcessingOutcome::Processed { block_root } => { // The block was valid and we processed it successfully. trace!( - self.log, "Imported block from network"; + log, "Imported block from network"; "slot" => block.slot, "block_root" => format!("{}", block_root), ); @@ -645,7 +984,7 @@ impl ImportManager { BlockProcessingOutcome::ParentUnknown { parent } => { // blocks should be sequential and all parents should exist trace!( - self.log, "ParentBlockUnknown"; + log, "Parent block is unknown"; "parent_root" => format!("{}", parent), "baby_block_slot" => block.slot, ); @@ -654,6 +993,13 @@ impl ImportManager { block.slot )); } + BlockProcessingOutcome::BlockIsAlreadyKnown => { + // this block is already known to us, move to the next + debug!( + log, "Imported a block that is already known"; + "block_slot" => block.slot, + ); + } BlockProcessingOutcome::FutureSlot { present_slot, block_slot, @@ -661,7 +1007,7 @@ impl ImportManager { if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { // The block is too far in the future, drop it. trace!( - self.log, "FutureBlock"; + log, "Block is ahead of our slot clock"; "msg" => "block for future slot rejected, check your time", "present_slot" => present_slot, "block_slot" => block_slot, @@ -674,24 +1020,29 @@ impl ImportManager { } else { // The block is in the future, but not too far. trace!( - self.log, "QueuedFutureBlock"; - "msg" => "queuing future block, check your time", + log, "Block is slightly ahead of our slot clock, ignoring."; "present_slot" => present_slot, "block_slot" => block_slot, "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, ); } } - BlockProcessingOutcome::FinalizedSlot => { + BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { trace!( - self.log, "Finalized or earlier block processed"; + log, "Finalized or earlier block processed"; "outcome" => format!("{:?}", outcome), ); // block reached our finalized slot or was earlier, move to the next block } - _ => { + BlockProcessingOutcome::GenesisBlock => { trace!( - self.log, "InvalidBlock"; + log, "Genesis block was processed"; + "outcome" => format!("{:?}", outcome), + ); + } + _ => { + warn!( + log, "Invalid block received"; "msg" => "peer sent invalid block", "outcome" => format!("{:?}", outcome), ); @@ -699,8 +1050,8 @@ impl ImportManager { } } } else { - trace!( - self.log, "BlockProcessingFailure"; + warn!( + log, "BlockProcessingFailure"; "msg" => "unexpected condition in processing block.", "outcome" => format!("{:?}", processing_result) ); @@ -709,17 +1060,96 @@ impl ImportManager { processing_result )); } + } else { + return Ok(()); // terminate early due to dropped beacon chain } - Ok(()) } + + Ok(()) } -fn root_at_slot( - chain: &Arc>, - target_slot: Slot, -) -> Option { - chain - .rev_iter_block_roots() - .find(|(_root, slot)| *slot == target_slot) - .map(|(root, _slot)| root) +impl Future for SyncManager { + type Item = (); + type Error = String; + + fn poll(&mut self) -> Result, Self::Error> { + // process any inbound messages + loop { + match self.input_channel.poll() { + Ok(Async::Ready(Some(message))) => match message { + SyncMessage::AddPeer(peer_id, info) => { + self.add_peer(peer_id, info); + } + SyncMessage::BeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + } => { + self.beacon_blocks_response(peer_id, request_id, beacon_blocks); + } + SyncMessage::RecentBeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + } => { + self.recent_blocks_response(peer_id, request_id, beacon_blocks); + } + SyncMessage::UnknownBlock(peer_id, block) => { + self.add_unknown_block(peer_id, block); + } + SyncMessage::Disconnect(peer_id) => { + self.peer_disconnect(&peer_id); + } + SyncMessage::_RPCError(request_id) => { + self.inject_error(request_id); + } + }, + Ok(Async::NotReady) => break, + Ok(Async::Ready(None)) => { + return Err("Sync manager channel closed".into()); + } + Err(e) => { + return Err(format!("Sync Manager channel error: {:?}", e)); + } + } + } + + loop { + //TODO: Optimize the lookups. Potentially keep state of whether each of these functions + //need to be called. + let mut re_run = false; + + // only process batch requests if there are any + if !self.import_queue.is_empty() { + // process potential block requests + self.process_potential_block_requests(); + + // process any complete long-range batches + re_run = re_run || self.process_complete_batches(); + } + + // only process parent objects if we are in regular sync + if !self.parent_queue.is_empty() { + // process any parent block lookup-requests + self.process_parent_requests(); + + // process any complete parent lookups + re_run = re_run || self.process_complete_parent_requests(); + } + + // Shutdown the thread if the chain has termined + if let None = self.chain.upgrade() { + return Ok(Async::Ready(())); + } + + if !re_run { + break; + } + } + + // update the state of the manager + self.update_state(); + + return Ok(Async::NotReady); + } } diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index b26d78c14..58ec386aa 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -4,7 +4,7 @@ mod manager; /// Stores the various syncing methods for the beacon chain. mod simple_sync; -pub use simple_sync::SimpleSync; +pub use simple_sync::MessageProcessor; /// Currently implemented sync methods. pub enum SyncMethod { diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 573ac9dd1..83aa7ebd2 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,23 +1,26 @@ -use super::manager::{ImportManager, ImportManagerOutcome}; -use crate::service::{NetworkMessage, OutgoingMessage}; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use super::manager::SyncMessage; +use crate::service::NetworkMessage; +use beacon_chain::{ + AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, +}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; -use slog::{debug, info, o, trace, warn}; +use slog::{debug, error, info, o, trace, warn}; use ssz::Encode; -use std::ops::Sub; use std::sync::Arc; use store::Store; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; +use tree_hash::SignedRoot; use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; +//TODO: Put a maximum limit on the number of block that can be requested. +//TODO: Rate limit requests + /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; -/// The number of slots behind our head that we still treat a peer as a fully synced peer. -const FULL_PEER_TOLERANCE: u64 = 10; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -49,45 +52,63 @@ impl From<&Arc>> for PeerSyncInfo { } } -/// The current syncing state. -#[derive(PartialEq)] -pub enum SyncState { - _Idle, - _Downloading, - _Stopped, -} - -/// Simple Syncing protocol. -pub struct SimpleSync { +/// Processes validated messages from the network. It relays necessary data to the syncing thread +/// and processes blocks from the pubsub network. +pub struct MessageProcessor { /// A reference to the underlying beacon chain. chain: Arc>, - manager: ImportManager, + /// A channel to the syncing thread. + sync_send: mpsc::UnboundedSender>, + /// A oneshot channel for destroying the sync thread. + _sync_exit: oneshot::Sender<()>, + /// A nextwork context to return and handle RPC requests. network: NetworkContext, + /// The `RPCHandler` logger. log: slog::Logger, } -impl SimpleSync { - /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. +impl MessageProcessor { + /// Instantiate a `MessageProcessor` instance pub fn new( + executor: &tokio::runtime::TaskExecutor, beacon_chain: Arc>, network_send: mpsc::UnboundedSender, log: &slog::Logger, ) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); + let sync_network_context = NetworkContext::new(network_send.clone(), sync_logger.clone()); - SimpleSync { - chain: beacon_chain.clone(), - manager: ImportManager::new(beacon_chain, log), + // spawn the sync thread + let (sync_send, _sync_exit) = super::manager::spawn( + executor, + Arc::downgrade(&beacon_chain), + sync_network_context, + sync_logger, + ); + + MessageProcessor { + chain: beacon_chain, + sync_send, + _sync_exit, network: NetworkContext::new(network_send, log.clone()), - log: sync_logger, + log: log.clone(), } } + fn send_to_sync(&mut self, message: SyncMessage) { + self.sync_send.try_send(message).unwrap_or_else(|_| { + warn!( + self.log, + "Could not send message to the sync service"; + ) + }); + } + /// Handle a peer disconnect. /// /// Removes the peer from the manager. pub fn on_disconnect(&mut self, peer_id: PeerId) { - self.manager.peer_disconnect(&peer_id); + self.send_to_sync(SyncMessage::Disconnect(peer_id)); } /// Handle the connection of a new peer. @@ -107,6 +128,7 @@ impl SimpleSync { request_id: RequestId, hello: HelloMessage, ) { + // ignore hello responses if we are shutting down trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. @@ -149,7 +171,7 @@ impl SimpleSync { } else if remote.finalized_epoch <= local.finalized_epoch && remote.finalized_root != Hash256::zero() && local.finalized_root != Hash256::zero() - && (self.root_at_slot(start_slot(remote.finalized_epoch)) + && (self.chain.root_at_slot(start_slot(remote.finalized_epoch)) != Some(remote.finalized_root)) { // The remotes finalized epoch is less than or greater than ours, but the block root is @@ -189,18 +211,16 @@ impl SimpleSync { .exists::>(&remote.head_root) .unwrap_or_else(|_| false) { + trace!( + self.log, "Peer with known chain found"; + "peer" => format!("{:?}", peer_id), + "remote_head_slot" => remote.head_slot, + "remote_latest_finalized_epoch" => remote.finalized_epoch, + ); + // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - if self.chain.best_slot().sub(remote.head_slot).as_u64() < FULL_PEER_TOLERANCE { - self.manager.add_full_peer(peer_id); - self.process_sync(); - } else { - debug!( - self.log, - "Out of sync peer connected"; - "peer" => format!("{:?}", peer_id), - ); - } + self.send_to_sync(SyncMessage::AddPeer(peer_id, remote)); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -212,87 +232,10 @@ impl SimpleSync { "local_finalized_epoch" => local.finalized_epoch, "remote_latest_finalized_epoch" => remote.finalized_epoch, ); - - self.manager.add_peer(peer_id, remote); - self.process_sync(); + self.send_to_sync(SyncMessage::AddPeer(peer_id, remote)); } } - fn process_sync(&mut self) { - loop { - match self.manager.poll() { - ImportManagerOutcome::Hello(peer_id) => { - trace!( - self.log, - "RPC Request"; - "method" => "HELLO", - "peer" => format!("{:?}", peer_id) - ); - self.network.send_rpc_request( - None, - peer_id, - RPCRequest::Hello(hello_message(&self.chain)), - ); - } - ImportManagerOutcome::RequestBlocks { - peer_id, - request_id, - request, - } => { - trace!( - self.log, - "RPC Request"; - "method" => "BeaconBlocks", - "id" => request_id, - "count" => request.count, - "peer" => format!("{:?}", peer_id) - ); - self.network.send_rpc_request( - Some(request_id), - peer_id.clone(), - RPCRequest::BeaconBlocks(request), - ); - } - ImportManagerOutcome::RecentRequest(peer_id, req) => { - trace!( - self.log, - "RPC Request"; - "method" => "RecentBeaconBlocks", - "count" => req.block_roots.len(), - "peer" => format!("{:?}", peer_id) - ); - self.network.send_rpc_request( - None, - peer_id.clone(), - RPCRequest::RecentBeaconBlocks(req), - ); - } - ImportManagerOutcome::DownvotePeer(peer_id) => { - trace!( - self.log, - "Peer downvoted"; - "peer" => format!("{:?}", peer_id) - ); - // TODO: Implement reputation - self.network - .disconnect(peer_id.clone(), GoodbyeReason::Fault); - } - ImportManagerOutcome::Idle => { - // nothing to do - return; - } - } - } - } - - //TODO: Move to beacon chain - fn root_at_slot(&self, target_slot: Slot) -> Option { - self.chain - .rev_iter_block_roots() - .find(|(_root, slot)| *slot == target_slot) - .map(|(root, _slot)| root) - } - /// Handle a `RecentBeaconBlocks` request from the peer. pub fn on_recent_beacon_blocks_request( &mut self, @@ -321,7 +264,7 @@ impl SimpleSync { debug!( self.log, - "BlockBodiesRequest"; + "RecentBeaconBlocksRequest"; "peer" => format!("{:?}", peer_id), "requested" => request.block_roots.len(), "returned" => blocks.len(), @@ -380,18 +323,16 @@ impl SimpleSync { blocks.reverse(); blocks.dedup_by_key(|brs| brs.slot); - if blocks.len() as u64 != req.count { - debug!( - self.log, - "BeaconBlocksRequest response"; - "peer" => format!("{:?}", peer_id), - "msg" => "Failed to return all requested hashes", - "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), - "requested" => req.count, - "returned" => blocks.len(), - ); - } + debug!( + self.log, + "BeaconBlocksRequest response"; + "peer" => format!("{:?}", peer_id), + "msg" => "Failed to return all requested hashes", + "start_slot" => req.start_slot, + "current_slot" => self.chain.slot().unwrap_or_else(|_| Slot::from(0_u64)).as_u64(), + "requested" => req.count, + "returned" => blocks.len(), + ); self.network.send_rpc_response( peer_id, @@ -414,10 +355,11 @@ impl SimpleSync { "count" => beacon_blocks.len(), ); - self.manager - .beacon_blocks_response(peer_id, request_id, beacon_blocks); - - self.process_sync(); + self.send_to_sync(SyncMessage::BeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + }); } /// Handle a `RecentBeaconBlocks` response from the peer. @@ -429,15 +371,16 @@ impl SimpleSync { ) { debug!( self.log, - "BeaconBlocksResponse"; + "RecentBeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), "count" => beacon_blocks.len(), ); - self.manager - .recent_blocks_response(peer_id, request_id, beacon_blocks); - - self.process_sync(); + self.send_to_sync(SyncMessage::RecentBeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + }); } /// Process a gossip message declaring a new block. @@ -446,8 +389,8 @@ impl SimpleSync { /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { - if let Ok(outcome) = self.chain.process_block(block.clone()) { - match outcome { + match self.chain.process_block(block.clone()) { + Ok(outcome) => match outcome { BlockProcessingOutcome::Processed { .. } => { trace!(self.log, "Gossipsub block processed"; "peer_id" => format!("{:?}",peer_id)); @@ -455,9 +398,9 @@ impl SimpleSync { } BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block - trace!(self.log, "Unknown parent gossip"; + trace!(self.log, "Block with unknown parent received"; "peer_id" => format!("{:?}",peer_id)); - self.manager.add_unknown_block(block.clone(), peer_id); + self.send_to_sync(SyncMessage::UnknownBlock(peer_id, block.clone())); SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::FutureSlot { @@ -468,10 +411,36 @@ impl SimpleSync { SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, - _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, + other => { + warn!( + self.log, + "Invalid gossip beacon block"; + "outcome" => format!("{:?}", other), + "block root" => format!("{}", Hash256::from_slice(&block.signed_root()[..])), + "block slot" => block.slot + ); + trace!( + self.log, + "Invalid gossip beacon block ssz"; + "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), + ); + SHOULD_NOT_FORWARD_GOSSIP_BLOCK //TODO: Decide if we want to forward these + } + }, + Err(e) => { + error!( + self.log, + "Error processing gossip beacon block"; + "error" => format!("{:?}", e), + "block slot" => block.slot + ); + trace!( + self.log, + "Erroneous gossip beacon block ssz"; + "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), + ); + SHOULD_NOT_FORWARD_GOSSIP_BLOCK } - } else { - SHOULD_NOT_FORWARD_GOSSIP_BLOCK } } @@ -479,27 +448,37 @@ impl SimpleSync { /// /// Not currently implemented. pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { - match self.chain.process_attestation(msg) { - Ok(outcome) => info!( - self.log, - "Processed attestation"; - "source" => "gossip", - "outcome" => format!("{:?}", outcome) - ), + match self.chain.process_attestation(msg.clone()) { + Ok(outcome) => { + info!( + self.log, + "Processed attestation"; + "source" => "gossip", + "outcome" => format!("{:?}", outcome) + ); + + if outcome != AttestationProcessingOutcome::Processed { + trace!( + self.log, + "Invalid gossip attestation ssz"; + "ssz" => format!("0x{}", hex::encode(msg.as_ssz_bytes())), + ); + } + } Err(e) => { - warn!(self.log, "InvalidAttestation"; "source" => "gossip", "error" => format!("{:?}", e)) + trace!( + self.log, + "Erroneous gossip attestation ssz"; + "ssz" => format!("0x{}", hex::encode(msg.as_ssz_bytes())), + ); + error!(self.log, "Invalid gossip attestation"; "error" => format!("{:?}", e)); } } } - - /// Generates our current state in the form of a HELLO RPC message. - pub fn generate_hello(&self) -> HelloMessage { - hello_message(&self.chain) - } } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. -fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { +pub(crate) fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { let state = &beacon_chain.head().beacon_state; HelloMessage { @@ -525,6 +504,12 @@ impl NetworkContext { } pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + warn!( + &self.log, + "Disconnecting peer (RPC)"; + "reason" => format!("{:?}", reason), + "peer_id" => format!("{:?}", peer_id), + ); self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) // TODO: disconnect peers. } @@ -554,12 +539,8 @@ impl NetworkContext { } fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.send(peer_id, OutgoingMessage::RPC(rpc_event)) - } - - fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { self.network_send - .try_send(NetworkMessage::Send(peer_id, outgoing_message)) + .try_send(NetworkMessage::RPC(peer_id, rpc_event)) .unwrap_or_else(|_| { warn!( self.log, diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index cc69faec9..7ea21eeba 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -14,20 +14,26 @@ store = { path = "../store" } version = { path = "../version" } serde = { version = "1.0", features = ["derive"] } serde_json = "^1.0" +serde_yaml = "0.8" slog = "^2.2.3" slog-term = "^2.4.0" slog-async = "^2.3.0" +eth2_ssz = { path = "../../eth2/utils/ssz" } +eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } clap = "2.32.0" http = "^0.1.17" prometheus = { version = "^0.6", features = ["process"] } -hyper = "0.12.32" -futures = "0.1" +hyper = "0.12.34" exit-future = "0.1.3" tokio = "0.1.17" url = "2.0" lazy_static = "1.3.0" +eth2_config = { path = "../../eth2/utils/eth2_config" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } hex = "0.3.2" +parking_lot = "0.9" +futures = "0.1.25" + diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 1c66a2819..c1a9da6ee 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,17 +1,25 @@ -use super::{success_response, ApiResult}; -use crate::{helpers::*, ApiError, UrlQuery}; +use crate::helpers::*; +use crate::response_builder::ResponseBuilder; +use crate::{ApiError, ApiResult, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use serde::Serialize; +use ssz_derive::Encode; use std::sync::Arc; use store::Store; -use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; +use types::{BeaconBlock, BeaconState, Epoch, EthSpec, Hash256, Slot, Validator}; -#[derive(Serialize)] +#[derive(Serialize, Encode)] pub struct HeadResponse { pub slot: Slot, pub block_root: Hash256, pub state_root: Hash256, + pub finalized_slot: Slot, + pub finalized_block_root: Hash256, + pub justified_slot: Slot, + pub justified_block_root: Hash256, + pub previous_justified_slot: Slot, + pub previous_justified_block_root: Hash256, } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. @@ -21,19 +29,36 @@ pub fn get_head(req: Request) -> ApiResult .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let chain_head = beacon_chain.head(); + let head = HeadResponse { - slot: beacon_chain.head().beacon_state.slot, - block_root: beacon_chain.head().beacon_block_root, - state_root: beacon_chain.head().beacon_state_root, + slot: chain_head.beacon_state.slot, + block_root: chain_head.beacon_block_root, + state_root: chain_head.beacon_state_root, + finalized_slot: chain_head + .beacon_state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + finalized_block_root: chain_head.beacon_state.finalized_checkpoint.root, + justified_slot: chain_head + .beacon_state + .current_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + justified_block_root: chain_head.beacon_state.current_justified_checkpoint.root, + previous_justified_slot: chain_head + .beacon_state + .previous_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, }; - let json: String = serde_json::to_string(&head) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req)?.body(&head) } -#[derive(Serialize)] +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct BlockResponse { pub root: Hash256, @@ -55,7 +80,7 @@ pub fn get_block(req: Request) -> ApiResult let target = parse_slot(&value)?; block_root_at_slot(&beacon_chain, target).ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) })? } ("root", value) => parse_root(&value)?, @@ -67,7 +92,7 @@ pub fn get_block(req: Request) -> ApiResult .get::>(&block_root)? .ok_or_else(|| { ApiError::NotFound(format!( - "Unable to find BeaconBlock for root {}", + "Unable to find BeaconBlock for root {:?}", block_root )) })?; @@ -77,34 +102,62 @@ pub fn get_block(req: Request) -> ApiResult beacon_block: block, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req)?.body(&response) } /// HTTP handler to return a `BeaconBlock` root at a given `slot`. pub fn get_block_root(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let target = parse_slot(&slot_string)?; let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) })?; - let json: String = serde_json::to_string(&root) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req)?.body(&root) } -#[derive(Serialize)] +/// HTTP handler to return the `Fork` of the current head. +pub fn get_fork(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + ResponseBuilder::new(&req)?.body(&beacon_chain.head().beacon_state.fork) +} + +/// HTTP handler to return the set of validators for an `Epoch` +/// +/// The `Epoch` parameter can be any epoch number. If it is not specified, +/// the current epoch is assumed. +pub fn get_validators(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + + let epoch = match UrlQuery::from_request(&req) { + // We have some parameters, so make sure it's the epoch one and parse it + Ok(query) => query + .only_one("epoch")? + .parse::() + .map(Epoch::from) + .map_err(|e| { + ApiError::BadRequest(format!("Invalid epoch parameter, must be a u64. {:?}", e)) + })?, + // In this case, our url query did not contain any parameters, so we take the default + Err(_) => beacon_chain.epoch().map_err(|e| { + ApiError::ServerError(format!("Unable to determine current epoch: {:?}", e)) + })?, + }; + + let all_validators = &beacon_chain.head().beacon_state.validators; + let active_vals: Vec = all_validators + .iter() + .filter(|v| v.is_active_at(epoch)) + .cloned() + .collect(); + + ResponseBuilder::new(&req)?.body(&active_vals) +} + +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct StateResponse { pub root: Hash256, @@ -116,13 +169,23 @@ pub struct StateResponse { /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = beacon_chain.head().beacon_state; - let query_params = ["root", "slot"]; - let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; + let (key, value) = match UrlQuery::from_request(&req) { + Ok(query) => { + // We have *some* parameters, just check them. + let query_params = ["root", "slot"]; + query.first_of(&query_params)? + } + Err(ApiError::BadRequest(_)) => { + // No parameters provided at all, use current slot. + (String::from("slot"), head_state.slot.to_string()) + } + Err(e) => { + return Err(e); + } + }; let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, @@ -132,7 +195,7 @@ pub fn get_state(req: Request) -> ApiResult let state = beacon_chain .store .get(root)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?; + .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; (*root, state) } @@ -144,11 +207,7 @@ pub fn get_state(req: Request) -> ApiResult beacon_state: state, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req)?.body(&response) } /// HTTP handler to return a `BeaconState` root at a given `slot`. @@ -156,39 +215,33 @@ pub fn get_state(req: Request) -> ApiResult /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state_root(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let slot = parse_slot(&slot_string)?; let root = state_root_at_slot(&beacon_chain, slot)?; - let json: String = serde_json::to_string(&root) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req)?.body(&root) } /// HTTP handler to return the highest finalized slot. -pub fn get_latest_finalized_checkpoint( +pub fn get_current_finalized_checkpoint( req: Request, ) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = beacon_chain.head().beacon_state; - let checkpoint = beacon_chain - .head() - .beacon_state - .finalized_checkpoint - .clone(); + let checkpoint = head_state.finalized_checkpoint.clone(); - let json: String = serde_json::to_string(&checkpoint) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req)?.body(&checkpoint) +} + +/// HTTP handler to return a `BeaconState` at the genesis block. +pub fn get_genesis_state(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + + let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; + + ResponseBuilder::new(&req)?.body(&state) } diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs index 90ac0821b..c262a128a 100644 --- a/beacon_node/rest_api/src/config.rs +++ b/beacon_node/rest_api/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: true, // rest_api enabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5052, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("api") { - self.enabled = true; + if args.is_present("no-api") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("api-address") { diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs new file mode 100644 index 000000000..9f815a7d3 --- /dev/null +++ b/beacon_node/rest_api/src/error.rs @@ -0,0 +1,86 @@ +use crate::BoxFut; +use hyper::{Body, Response, StatusCode}; +use std::error::Error as StdError; + +#[derive(PartialEq, Debug, Clone)] +pub enum ApiError { + MethodNotAllowed(String), + ServerError(String), + NotImplemented(String), + BadRequest(String), + NotFound(String), + UnsupportedType(String), + ImATeapot(String), // Just in case. + ProcessingError(String), // A 202 error, for when a block/attestation cannot be processed, but still transmitted. +} + +pub type ApiResult = Result, ApiError>; + +impl ApiError { + pub fn status_code(self) -> (StatusCode, String) { + match self { + ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), + ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), + ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), + ApiError::BadRequest(desc) => (StatusCode::BAD_REQUEST, desc), + ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), + ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc), + ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), + ApiError::ProcessingError(desc) => (StatusCode::ACCEPTED, desc), + } + } +} + +impl Into> for ApiError { + fn into(self) -> Response { + let status_code = self.status_code(); + Response::builder() + .status(status_code.0) + .header("content-type", "text/plain; charset=utf-8") + .body(Body::from(status_code.1)) + .expect("Response should always be created.") + } +} + +impl Into for ApiError { + fn into(self) -> BoxFut { + Box::new(futures::future::err(self)) + } +} + +impl From for ApiError { + fn from(e: store::Error) -> ApiError { + ApiError::ServerError(format!("Database error: {:?}", e)) + } +} + +impl From for ApiError { + fn from(e: types::BeaconStateError) -> ApiError { + ApiError::ServerError(format!("BeaconState error: {:?}", e)) + } +} + +impl From for ApiError { + fn from(e: state_processing::per_slot_processing::Error) -> ApiError { + ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e)) + } +} + +impl From for ApiError { + fn from(e: hyper::error::Error) -> ApiError { + ApiError::ServerError(format!("Networking error: {:?}", e)) + } +} + +impl StdError for ApiError { + fn cause(&self) -> Option<&dyn StdError> { + None + } +} + +impl std::fmt::Display for ApiError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let status = self.clone().status_code(); + write!(f, "{:?}: {:?}", status.0, status.1) + } +} diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 88755fcde..a711246b0 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -1,12 +1,20 @@ use crate::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; +use eth2_libp2p::{PubsubMessage, Topic}; +use eth2_libp2p::{ + BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX, +}; use hex; -use hyper::{Body, Request, StatusCode}; -use serde::de::value::StringDeserializer; -use serde_json::Deserializer; +use http::header; +use hyper::{Body, Request}; +use network::NetworkMessage; +use parking_lot::RwLock; +use ssz::Encode; +use std::sync::Arc; use store::{iter::AncestorIter, Store}; -use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; +use tokio::sync::mpsc; +use types::{Attestation, BeaconBlock, BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; /// Parse a slot from a `0x` preixed string. /// @@ -15,7 +23,22 @@ pub fn parse_slot(string: &str) -> Result { string .parse::() .map(Slot::from) - .map_err(|e| ApiError::InvalidQueryParams(format!("Unable to parse slot: {:?}", e))) + .map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e))) +} + +/// Checks the provided request to ensure that the `content-type` header. +/// +/// The content-type header should either be omitted, in which case JSON is assumed, or it should +/// explicity specify `application/json`. If anything else is provided, an error is returned. +pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> { + match req.headers().get(header::CONTENT_TYPE) { + Some(h) if h == "application/json" => Ok(()), + Some(h) => Err(ApiError::BadRequest(format!( + "The provided content-type {:?} is not available, this endpoint only supports json.", + h + ))), + _ => Ok(()), + } } /// Parse a root from a `0x` preixed string. @@ -28,9 +51,9 @@ pub fn parse_root(string: &str) -> Result { let trimmed = string.trim_start_matches(PREFIX); trimmed .parse() - .map_err(|e| ApiError::InvalidQueryParams(format!("Unable to parse root: {:?}", e))) + .map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e))) } else { - Err(ApiError::InvalidQueryParams( + Err(ApiError::BadRequest( "Root must have a '0x' prefix".to_string(), )) } @@ -41,13 +64,13 @@ pub fn parse_pubkey(string: &str) -> Result { const PREFIX: &str = "0x"; if string.starts_with(PREFIX) { let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX)) - .map_err(|e| ApiError::InvalidQueryParams(format!("Invalid hex string: {:?}", e)))?; + .map_err(|e| ApiError::BadRequest(format!("Invalid hex string: {:?}", e)))?; let pubkey = PublicKey::from_bytes(pubkey_bytes.as_slice()).map_err(|e| { - ApiError::InvalidQueryParams(format!("Unable to deserialize public key: {:?}.", e)) + ApiError::BadRequest(format!("Unable to deserialize public key: {:?}.", e)) })?; return Ok(pubkey); } else { - return Err(ApiError::InvalidQueryParams( + return Err(ApiError::BadRequest( "Public key must have a '0x' prefix".to_string(), )); } @@ -110,8 +133,8 @@ pub fn state_root_at_slot( ) -> Result { let head_state = &beacon_chain.head().beacon_state; let current_slot = beacon_chain - .read_slot_clock() - .ok_or_else(|| ApiError::ServerError("Unable to read slot clock".to_string()))?; + .slot() + .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; // There are four scenarios when obtaining a state for a given slot: // @@ -124,7 +147,7 @@ pub fn state_root_at_slot( // // We could actually speculate about future state roots by skipping slots, however that's // likely to cause confusion for API users. - Err(ApiError::InvalidQueryParams(format!( + Err(ApiError::BadRequest(format!( "Requested slot {} is past the current slot {}", slot, current_slot ))) @@ -171,6 +194,78 @@ pub fn implementation_pending_response(_req: Request) -> ApiResult { )) } +pub fn get_beacon_chain_from_request( + req: &Request, +) -> Result<(Arc>), ApiError> { + // Get beacon state + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?; + + Ok(beacon_chain.clone()) +} + +pub fn get_logger_from_request(req: &Request) -> slog::Logger { + let log = req + .extensions() + .get::() + .expect("Should always get the logger from the request, since we put it in there."); + log.to_owned() +} + +pub fn publish_beacon_block_to_network( + chan: Arc>>, + block: BeaconBlock, +) -> Result<(), ApiError> { + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); + let message = PubsubMessage::Block(block.as_ssz_bytes()); + + // Publish the block to the p2p network via gossipsub. + if let Err(e) = chan.write().try_send(NetworkMessage::Publish { + topics: vec![topic], + message, + }) { + return Err(ApiError::ServerError(format!( + "Unable to send new block to network: {:?}", + e + ))); + } + + Ok(()) +} + +pub fn publish_attestation_to_network( + chan: Arc>>, + attestation: Attestation, +) -> Result<(), ApiError> { + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); + let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); + + // Publish the attestation to the p2p network via gossipsub. + if let Err(e) = chan.write().try_send(NetworkMessage::Publish { + topics: vec![topic], + message, + }) { + return Err(ApiError::ServerError(format!( + "Unable to send new attestation to network: {:?}", + e + ))); + } + + Ok(()) +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index b943a1d45..133fc3a26 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,76 +1,192 @@ #[macro_use] +mod macros; +#[macro_use] extern crate lazy_static; extern crate network as client_network; mod beacon; mod config; +mod error; mod helpers; mod metrics; mod network; mod node; +mod response_builder; mod spec; mod url_query; mod validator; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use client_network::NetworkMessage; use client_network::Service as NetworkService; +use error::{ApiError, ApiResult}; +use eth2_config::Eth2Config; +use futures::future::IntoFuture; use hyper::rt::Future; -use hyper::service::service_fn_ok; -use hyper::{Body, Method, Response, Server, StatusCode}; +use hyper::service::Service; +use hyper::{Body, Method, Request, Response, Server}; +use parking_lot::RwLock; use slog::{info, o, warn}; use std::ops::Deref; use std::path::PathBuf; use std::sync::Arc; use tokio::runtime::TaskExecutor; +use tokio::sync::mpsc; use url_query::UrlQuery; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; -#[derive(PartialEq, Debug)] -pub enum ApiError { - MethodNotAllowed(String), - ServerError(String), - NotImplemented(String), - InvalidQueryParams(String), - NotFound(String), - ImATeapot(String), // Just in case. +type BoxFut = Box, Error = ApiError> + Send>; + +pub struct ApiService { + log: slog::Logger, + beacon_chain: Arc>, + db_path: DBPath, + network_service: Arc>, + network_channel: Arc>>, + eth2_config: Arc, } -pub type ApiResult = Result, ApiError>; +fn into_boxfut(item: F) -> BoxFut +where + F: IntoFuture, Error = ApiError>, + F::Future: Send, +{ + Box::new(item.into_future()) +} -impl Into> for ApiError { - fn into(self) -> Response { - let status_code: (StatusCode, String) = match self { - ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), - ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), - ApiError::InvalidQueryParams(desc) => (StatusCode::BAD_REQUEST, desc), - ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), - ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), +impl Service for ApiService { + type ReqBody = Body; + type ResBody = Body; + type Error = ApiError; + type Future = BoxFut; + + fn call(&mut self, mut req: Request) -> Self::Future { + metrics::inc_counter(&metrics::REQUEST_COUNT); + let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); + + // Add all the useful bits into the request, so that we can pull them out in the individual + // functions. + req.extensions_mut() + .insert::(self.log.clone()); + req.extensions_mut() + .insert::>>(self.beacon_chain.clone()); + req.extensions_mut().insert::(self.db_path.clone()); + req.extensions_mut() + .insert::>>(self.network_service.clone()); + req.extensions_mut() + .insert::>>>( + self.network_channel.clone(), + ); + req.extensions_mut() + .insert::>(self.eth2_config.clone()); + + let path = req.uri().path().to_string(); + + // Route the request to the correct handler. + let result = match (req.method(), path.as_ref()) { + // Methods for Client + (&Method::GET, "/node/version") => into_boxfut(node::get_version(req)), + (&Method::GET, "/node/genesis_time") => into_boxfut(node::get_genesis_time::(req)), + (&Method::GET, "/node/syncing") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + + // Methods for Network + (&Method::GET, "/network/enr") => into_boxfut(network::get_enr::(req)), + (&Method::GET, "/network/peer_count") => into_boxfut(network::get_peer_count::(req)), + (&Method::GET, "/network/peer_id") => into_boxfut(network::get_peer_id::(req)), + (&Method::GET, "/network/peers") => into_boxfut(network::get_peer_list::(req)), + (&Method::GET, "/network/listen_port") => { + into_boxfut(network::get_listen_port::(req)) + } + (&Method::GET, "/network/listen_addresses") => { + into_boxfut(network::get_listen_addresses::(req)) + } + + // Methods for Beacon Node + (&Method::GET, "/beacon/head") => into_boxfut(beacon::get_head::(req)), + (&Method::GET, "/beacon/block") => into_boxfut(beacon::get_block::(req)), + (&Method::GET, "/beacon/block_root") => into_boxfut(beacon::get_block_root::(req)), + (&Method::GET, "/beacon/blocks") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + (&Method::GET, "/beacon/fork") => into_boxfut(beacon::get_fork::(req)), + (&Method::GET, "/beacon/attestations") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + (&Method::GET, "/beacon/attestations/pending") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + + (&Method::GET, "/beacon/validators") => into_boxfut(beacon::get_validators::(req)), + (&Method::GET, "/beacon/validators/indicies") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + (&Method::GET, "/beacon/validators/pubkeys") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + + // Methods for Validator + (&Method::GET, "/beacon/validator/duties") => { + into_boxfut(validator::get_validator_duties::(req)) + } + (&Method::GET, "/beacon/validator/block") => { + into_boxfut(validator::get_new_beacon_block::(req)) + } + (&Method::POST, "/beacon/validator/block") => validator::publish_beacon_block::(req), + (&Method::GET, "/beacon/validator/attestation") => { + into_boxfut(validator::get_new_attestation::(req)) + } + (&Method::POST, "/beacon/validator/attestation") => { + validator::publish_attestation::(req) + } + + (&Method::GET, "/beacon/state") => into_boxfut(beacon::get_state::(req)), + (&Method::GET, "/beacon/state_root") => into_boxfut(beacon::get_state_root::(req)), + (&Method::GET, "/beacon/state/current_finalized_checkpoint") => { + into_boxfut(beacon::get_current_finalized_checkpoint::(req)) + } + (&Method::GET, "/beacon/state/genesis") => { + into_boxfut(beacon::get_genesis_state::(req)) + } + //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances + + // Methods for bootstrap and checking configuration + (&Method::GET, "/spec") => into_boxfut(spec::get_spec::(req)), + (&Method::GET, "/spec/slots_per_epoch") => { + into_boxfut(spec::get_slots_per_epoch::(req)) + } + (&Method::GET, "/spec/deposit_contract") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + (&Method::GET, "/spec/eth2_config") => into_boxfut(spec::get_eth2_config::(req)), + + (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::(req)), + + _ => Box::new(futures::future::err(ApiError::NotFound( + "Request path and/or method not found.".to_owned(), + ))), }; - Response::builder() - .status(status_code.0) - .body(Body::from(status_code.1)) - .expect("Response should always be created.") - } -} -impl From for ApiError { - fn from(e: store::Error) -> ApiError { - ApiError::ServerError(format!("Database error: {:?}", e)) - } -} + let response = match result.wait() { + // Return the `hyper::Response`. + Ok(response) => { + metrics::inc_counter(&metrics::SUCCESS_COUNT); + slog::debug!(self.log, "Request successful: {:?}", path); + response + } + // Map the `ApiError` into `hyper::Response`. + Err(e) => { + slog::debug!(self.log, "Request failure: {:?}", path); + e.into() + } + }; -impl From for ApiError { - fn from(e: types::BeaconStateError) -> ApiError { - ApiError::ServerError(format!("BeaconState error: {:?}", e)) - } -} + metrics::stop_timer(timer); -impl From for ApiError { - fn from(e: state_processing::per_slot_processing::Error) -> ApiError { - ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e)) + Box::new(futures::future::ok(response)) } } @@ -79,7 +195,9 @@ pub fn start_server( executor: &TaskExecutor, beacon_chain: Arc>, network_service: Arc>, + network_chan: mpsc::UnboundedSender, db_path: PathBuf, + eth2_config: Eth2Config, log: &slog::Logger, ) -> Result { let log = log.new(o!("Service" => "Api")); @@ -101,113 +219,16 @@ pub fn start_server( // Clone our stateful objects, for use in service closure. let server_log = log.clone(); let server_bc = beacon_chain.clone(); + let eth2_config = Arc::new(eth2_config); - let service = move || { - let log = server_log.clone(); - let beacon_chain = server_bc.clone(); - let db_path = db_path.clone(); - let network_service = network_service.clone(); - - // Create a simple handler for the router, inject our stateful objects into the request. - service_fn_ok(move |mut req| { - metrics::inc_counter(&metrics::REQUEST_COUNT); - let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); - - req.extensions_mut().insert::(log.clone()); - req.extensions_mut() - .insert::>>(beacon_chain.clone()); - req.extensions_mut().insert::(db_path.clone()); - req.extensions_mut() - .insert::>>(network_service.clone()); - - let path = req.uri().path().to_string(); - - // Route the request to the correct handler. - let result = match (req.method(), path.as_ref()) { - // Methods for Beacon Node - //TODO: Remove? - //(&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), - (&Method::GET, "/beacon/head") => beacon::get_head::(req), - (&Method::GET, "/beacon/block") => beacon::get_block::(req), - (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), - //TODO Is the below replaced by finalized_checkpoint? - (&Method::GET, "/beacon/chainhead") => { - helpers::implementation_pending_response(req) - } - (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), - (&Method::GET, "/beacon/latest_finalized_checkpoint") => { - beacon::get_latest_finalized_checkpoint::(req) - } - (&Method::GET, "/beacon/state") => beacon::get_state::(req), - (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), - - //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances - - // Methods for Client - (&Method::GET, "/metrics") => metrics::get_prometheus::(req), - (&Method::GET, "/network/enr") => network::get_enr::(req), - (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), - (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), - (&Method::GET, "/network/peers") => network::get_peer_list::(req), - (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), - (&Method::GET, "/network/listen_addresses") => { - network::get_listen_addresses::(req) - } - (&Method::GET, "/node/version") => node::get_version(req), - (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), - (&Method::GET, "/node/deposit_contract") => { - helpers::implementation_pending_response(req) - } - (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), - (&Method::GET, "/node/fork") => helpers::implementation_pending_response(req), - - // Methods for Network - (&Method::GET, "/network/enr") => network::get_enr::(req), - (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), - (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), - (&Method::GET, "/network/peers") => network::get_peer_list::(req), - (&Method::GET, "/network/listen_addresses") => { - network::get_listen_addresses::(req) - } - - // Methods for Validator - (&Method::GET, "/validator/duties") => validator::get_validator_duties::(req), - (&Method::GET, "/validator/block") => helpers::implementation_pending_response(req), - (&Method::POST, "/validator/block") => { - helpers::implementation_pending_response(req) - } - (&Method::GET, "/validator/attestation") => { - helpers::implementation_pending_response(req) - } - (&Method::POST, "/validator/attestation") => { - helpers::implementation_pending_response(req) - } - - (&Method::GET, "/spec") => spec::get_spec::(req), - (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), - - _ => Err(ApiError::NotFound( - "Request path and/or method not found.".to_owned(), - )), - }; - - let response = match result { - // Return the `hyper::Response`. - Ok(response) => { - metrics::inc_counter(&metrics::SUCCESS_COUNT); - slog::debug!(log, "Request successful: {:?}", path); - response - } - // Map the `ApiError` into `hyper::Response`. - Err(e) => { - slog::debug!(log, "Request failure: {:?}", path); - e.into() - } - }; - - metrics::stop_timer(timer); - - response + let service = move || -> futures::future::FutureResult, String> { + futures::future::ok(ApiService { + log: server_log.clone(), + beacon_chain: server_bc.clone(), + db_path: db_path.clone(), + network_service: network_service.clone(), + network_channel: Arc::new(RwLock::new(network_chan.clone())), + eth2_config: eth2_config.clone(), }) }; @@ -217,16 +238,16 @@ pub fn start_server( .with_graceful_shutdown(server_exit) .map_err(move |e| { warn!( - log_clone, - "API failed to start, Unable to bind"; "address" => format!("{:?}", e) + log_clone, + "API failed to start, Unable to bind"; "address" => format!("{:?}", e) ) }); info!( - log, - "REST API started"; - "address" => format!("{}", config.listen_address), - "port" => config.port, + log, + "REST API started"; + "address" => format!("{}", config.listen_address), + "port" => config.port, ); executor.spawn(server); @@ -234,13 +255,6 @@ pub fn start_server( Ok(exit_signal) } -fn success_response(body: Body) -> Response { - Response::builder() - .status(StatusCode::OK) - .body(body) - .expect("We should always be able to make response from the success body.") -} - #[derive(Clone)] pub struct DBPath(PathBuf); diff --git a/beacon_node/rest_api/src/macros.rs b/beacon_node/rest_api/src/macros.rs new file mode 100644 index 000000000..e95cfb8ae --- /dev/null +++ b/beacon_node/rest_api/src/macros.rs @@ -0,0 +1,13 @@ +macro_rules! try_future { + ($expr:expr) => { + match $expr { + core::result::Result::Ok(val) => val, + core::result::Result::Err(err) => { + return Box::new(futures::future::err(std::convert::From::from(err))) + } + } + }; + ($expr:expr,) => { + $crate::try_future!($expr) + }; +} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 064359337..e9d98434e 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,8 +1,9 @@ -use crate::{success_response, ApiError, ApiResult, DBPath}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::helpers::get_beacon_chain_from_request; +use crate::response_builder::ResponseBuilder; +use crate::{ApiError, ApiResult, DBPath}; +use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; -use std::sync::Arc; pub use lighthouse_metrics::*; @@ -30,10 +31,7 @@ pub fn get_prometheus(req: Request) -> ApiR let mut buffer = vec![]; let encoder = TextEncoder::new(); - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let db_path = req .extensions() .get::() @@ -64,6 +62,6 @@ pub fn get_prometheus(req: Request) -> ApiR .unwrap(); String::from_utf8(buffer) - .map(|string| success_response(Body::from(string))) - .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) + .map(|string| ResponseBuilder::new(&req)?.body_text(string)) + .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e)))? } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index a3e4c5ee7..f193ef8ea 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -1,108 +1,78 @@ -use crate::{success_response, ApiError, ApiResult, NetworkService}; +use crate::error::ApiResult; +use crate::response_builder::ResponseBuilder; +use crate::NetworkService; use beacon_chain::BeaconChainTypes; -use eth2_libp2p::{Enr, Multiaddr, PeerId}; +use eth2_libp2p::{Multiaddr, PeerId}; use hyper::{Body, Request}; use std::sync::Arc; -/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// HTTP handler to return the list of libp2p multiaddr the client is listening on. /// /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. pub fn get_listen_addresses(req: Request) -> ApiResult { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; - + .expect("The network service should always be there, we put it there"); let multiaddresses: Vec = network.listen_multiaddrs(); - - Ok(success_response(Body::from( - serde_json::to_string(&multiaddresses) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + ResponseBuilder::new(&req)?.body_no_ssz(&multiaddresses) } -/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// HTTP handler to return the network port the client is listening on. /// -/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +/// Returns the TCP port number in its plain form (which is also valid JSON serialization) pub fn get_listen_port(req: Request) -> ApiResult { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; - - Ok(success_response(Body::from( - serde_json::to_string(&network.listen_port()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize port: {:?}", e)))?, - ))) + .expect("The network service should always be there, we put it there") + .clone(); + ResponseBuilder::new(&req)?.body(&network.listen_port()) } -/// HTTP handle to return the Discv5 ENR from the client's libp2p service. +/// HTTP handler to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. pub fn get_enr(req: Request) -> ApiResult { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; - - let enr: Enr = network.local_enr(); - - Ok(success_response(Body::from( - serde_json::to_string(&enr.to_base64()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + .expect("The network service should always be there, we put it there"); + ResponseBuilder::new(&req)?.body_no_ssz(&network.local_enr().to_base64()) } -/// HTTP handle to return the `PeerId` from the client's libp2p service. +/// HTTP handler to return the `PeerId` from the client's libp2p service. /// /// PeerId is encoded as base58 string. pub fn get_peer_id(req: Request) -> ApiResult { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; - - let peer_id: PeerId = network.local_peer_id(); - - Ok(success_response(Body::from( - serde_json::to_string(&peer_id.to_base58()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + .expect("The network service should always be there, we put it there"); + ResponseBuilder::new(&req)?.body_no_ssz(&network.local_peer_id().to_base58()) } -/// HTTP handle to return the number of peers connected in the client's libp2p service. +/// HTTP handler to return the number of peers connected in the client's libp2p service. pub fn get_peer_count(req: Request) -> ApiResult { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; - - let connected_peers: usize = network.connected_peers(); - - Ok(success_response(Body::from( - serde_json::to_string(&connected_peers) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + .expect("The network service should always be there, we put it there"); + ResponseBuilder::new(&req)?.body(&network.connected_peers()) } -/// HTTP handle to return the list of peers connected to the client's libp2p service. +/// HTTP handler to return the list of peers connected to the client's libp2p service. /// /// Peers are presented as a list of `PeerId::to_string()`. pub fn get_peer_list(req: Request) -> ApiResult { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; - + .expect("The network service should always be there, we put it there"); let connected_peers: Vec = network .connected_peer_set() .iter() .map(PeerId::to_string) .collect(); - - Ok(success_response(Body::from( - serde_json::to_string(&connected_peers).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize Vec: {:?}", e)) - })?, - ))) + ResponseBuilder::new(&req)?.body_no_ssz(&connected_peers) } diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 4dbd41229..882edcfd5 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,25 +1,17 @@ -use crate::{success_response, ApiResult}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::helpers::get_beacon_chain_from_request; +use crate::response_builder::ResponseBuilder; +use crate::ApiResult; +use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; -use std::sync::Arc; use version; /// Read the version string from the current Lighthouse build. -pub fn get_version(_req: Request) -> ApiResult { - let body = Body::from( - serde_json::to_string(&version::version()) - .expect("Version should always be serialializable as JSON."), - ); - Ok(success_response(body)) +pub fn get_version(req: Request) -> ApiResult { + ResponseBuilder::new(&req)?.body_no_ssz(&version::version()) } /// Read the genesis time from the current beacon chain state. pub fn get_genesis_time(req: Request) -> ApiResult { - let beacon_chain = req.extensions().get::>>().unwrap(); - let gen_time: u64 = beacon_chain.head().beacon_state.genesis_time; - let body = Body::from( - serde_json::to_string(&gen_time) - .expect("Genesis should time always have a valid JSON serialization."), - ); - Ok(success_response(body)) + let beacon_chain = get_beacon_chain_from_request::(&req)?; + ResponseBuilder::new(&req)?.body(&beacon_chain.head().beacon_state.genesis_time) } diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs new file mode 100644 index 000000000..d5b530f8a --- /dev/null +++ b/beacon_node/rest_api/src/response_builder.rs @@ -0,0 +1,99 @@ +use super::{ApiError, ApiResult}; +use http::header; +use hyper::{Body, Request, Response, StatusCode}; +use serde::Serialize; +use ssz::Encode; + +pub enum Encoding { + JSON, + SSZ, + YAML, + TEXT, +} + +pub struct ResponseBuilder { + encoding: Encoding, +} + +impl ResponseBuilder { + pub fn new(req: &Request) -> Result { + let content_header: String = req + .headers() + .get(header::CONTENT_TYPE) + .map_or(Ok(""), |h| h.to_str()) + .map_err(|e| { + ApiError::BadRequest(format!( + "The content-type header contains invalid characters: {:?}", + e + )) + }) + .map(|h| String::from(h))?; + + // JSON is our default encoding, unless something else is requested. + let encoding = match content_header { + ref h if h.starts_with("application/ssz") => Encoding::SSZ, + ref h if h.starts_with("application/yaml") => Encoding::YAML, + ref h if h.starts_with("text/") => Encoding::TEXT, + _ => Encoding::JSON, + }; + Ok(Self { encoding }) + } + + pub fn body(self, item: &T) -> ApiResult { + match self.encoding { + Encoding::SSZ => Response::builder() + .status(StatusCode::OK) + .header("content-type", "application/ssz") + .body(Body::from(item.as_ssz_bytes())) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))), + _ => self.body_no_ssz(item), + } + } + + pub fn body_no_ssz(self, item: &T) -> ApiResult { + let (body, content_type) = match self.encoding { + Encoding::JSON => ( + Body::from(serde_json::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?), + "application/json", + ), + Encoding::SSZ => { + return Err(ApiError::UnsupportedType( + "Response cannot be encoded as SSZ.".into(), + )); + } + Encoding::YAML => ( + Body::from(serde_yaml::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as YAML: {:?}", + e + )) + })?), + "application/yaml", + ), + Encoding::TEXT => { + return Err(ApiError::UnsupportedType( + "Response cannot be encoded as plain text.".into(), + )); + } + }; + + Response::builder() + .status(StatusCode::OK) + .header("content-type", content_type) + .body(Body::from(body)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } + + pub fn body_text(self, text: String) -> ApiResult { + Response::builder() + .status(StatusCode::OK) + .header("content-type", "text/plain; charset=utf-8") + .body(Body::from(text)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } +} diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index d0c8e4368..083ff5ad4 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -1,27 +1,30 @@ -use super::{success_response, ApiResult}; +use super::ApiResult; +use crate::helpers::get_beacon_chain_from_request; +use crate::response_builder::ResponseBuilder; use crate::ApiError; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::BeaconChainTypes; +use eth2_config::Eth2Config; use hyper::{Body, Request}; use std::sync::Arc; use types::EthSpec; /// HTTP handler to return the full spec object. pub fn get_spec(req: Request) -> ApiResult { - let beacon_chain = req + let beacon_chain = get_beacon_chain_from_request::(&req)?; + ResponseBuilder::new(&req)?.body_no_ssz(&beacon_chain.spec) +} + +/// HTTP handler to return the full Eth2Config object. +pub fn get_eth2_config(req: Request) -> ApiResult { + let eth2_config = req .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + .get::>() + .ok_or_else(|| ApiError::ServerError("Eth2Config extension missing".to_string()))?; - let json: String = serde_json::to_string(&beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req)?.body_no_ssz(eth2_config.as_ref()) } /// HTTP handler to return the full spec object. -pub fn get_slots_per_epoch(_req: Request) -> ApiResult { - let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize epoch: {:?}", e)))?; - - Ok(success_response(Body::from(json))) +pub fn get_slots_per_epoch(req: Request) -> ApiResult { + ResponseBuilder::new(&req)?.body(&T::EthSpec::slots_per_epoch()) } diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs index e39a9a449..f0c587a32 100644 --- a/beacon_node/rest_api/src/url_query.rs +++ b/beacon_node/rest_api/src/url_query.rs @@ -12,7 +12,7 @@ impl<'a> UrlQuery<'a> { /// Returns `Err` if `req` does not contain any query parameters. pub fn from_request(req: &'a Request) -> Result { let query_str = req.uri().query().ok_or_else(|| { - ApiError::InvalidQueryParams( + ApiError::BadRequest( "URL query must be valid and contain at least one key.".to_string(), ) })?; @@ -28,7 +28,7 @@ impl<'a> UrlQuery<'a> { .find(|(key, _value)| keys.contains(&&**key)) .map(|(key, value)| (key.into_owned(), value.into_owned())) .ok_or_else(|| { - ApiError::InvalidQueryParams(format!( + ApiError::BadRequest(format!( "URL query must contain at least one of the following keys: {:?}", keys )) @@ -48,13 +48,13 @@ impl<'a> UrlQuery<'a> { if first_key == key { Ok(first_value.to_string()) } else { - Err(ApiError::InvalidQueryParams(format!( + Err(ApiError::BadRequest(format!( "Only the {} query parameter is supported", key ))) } } else { - Err(ApiError::InvalidQueryParams(format!( + Err(ApiError::BadRequest(format!( "Only one query parameter is allowed, {} supplied", queries.len() ))) @@ -64,7 +64,7 @@ impl<'a> UrlQuery<'a> { /// Returns a vector of all values present where `key` is in `keys /// /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn all_of(mut self, key: &str) -> Result, ApiError> { + pub fn all_of(self, key: &str) -> Result, ApiError> { let queries: Vec<_> = self .0 .filter_map(|(k, v)| { diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 4294f9c20..60c0eed06 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,13 +1,23 @@ -use super::{success_response, ApiResult}; -use crate::{helpers::*, ApiError, UrlQuery}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bls::PublicKey; +use crate::helpers::{ + check_content_type_for_json, get_beacon_chain_from_request, get_logger_from_request, + parse_pubkey, publish_attestation_to_network, publish_beacon_block_to_network, +}; +use crate::response_builder::ResponseBuilder; +use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; +use beacon_chain::{AttestationProcessingOutcome, BeaconChainTypes, BlockProcessingOutcome}; +use bls::{AggregateSignature, PublicKey, Signature}; +use futures::future::Future; +use futures::stream::Stream; use hyper::{Body, Request}; +use network::NetworkMessage; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; +use slog::{info, trace, warn}; use std::sync::Arc; -use store::Store; +use tokio; +use tokio::sync::mpsc; use types::beacon_state::EthSpec; -use types::{BeaconBlock, BeaconState, Epoch, RelativeEpoch, Shard, Slot}; +use types::{Attestation, BeaconBlock, BitList, Epoch, RelativeEpoch, Shard, Slot}; #[derive(Debug, Serialize, Deserialize)] pub struct ValidatorDuty { @@ -34,49 +44,47 @@ impl ValidatorDuty { /// HTTP Handler to retrieve a the duties for a set of validators during a particular epoch pub fn get_validator_duties(req: Request) -> ApiResult { - // Get beacon state - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - let _ = beacon_chain - .ensure_state_caches_are_built() - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; - let head_state = beacon_chain - .speculative_state() - .expect("This is legacy code and should be removed."); + let log = get_logger_from_request(&req); + slog::trace!(log, "Validator duties requested of API: {:?}", &req); + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let mut head_state = beacon_chain.head().beacon_state; + slog::trace!(log, "Got head state from request."); // Parse and check query parameters let query = UrlQuery::from_request(&req)?; - let current_epoch = head_state.current_epoch(); let epoch = match query.first_of(&["epoch"]) { - Ok((_, v)) => Epoch::new(v.parse::().map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid epoch parameter, must be a u64. {:?}", e)) - })?), + Ok((_, v)) => { + slog::trace!(log, "Requested epoch {:?}", v); + Epoch::new(v.parse::().map_err(|e| { + slog::info!(log, "Invalid epoch {:?}", e); + ApiError::BadRequest(format!("Invalid epoch parameter, must be a u64. {:?}", e)) + })?) + } Err(_) => { // epoch not supplied, use the current epoch + slog::info!(log, "Using default epoch {:?}", current_epoch); current_epoch } }; let relative_epoch = RelativeEpoch::from_epoch(current_epoch, epoch).map_err(|e| { - ApiError::InvalidQueryParams(format!( + slog::info!(log, "Requested epoch out of range."); + ApiError::BadRequest(format!( "Cannot get RelativeEpoch, epoch out of range: {:?}", e )) })?; - //TODO: Handle an array of validators, currently only takes one - let mut validators: Vec = match query.all_of("validator_pubkeys") { - Ok(v) => v - .iter() - .map(|pk| parse_pubkey(pk)) - .collect::, _>>()?, - Err(e) => { - return Err(e); - } - }; + let validators: Vec = query + .all_of("validator_pubkeys")? + .iter() + .map(|pk| parse_pubkey(pk)) + .collect::, _>>()?; let mut duties: Vec = Vec::new(); + // Build cache for the requested epoch + head_state + .build_committee_cache(relative_epoch, &beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; // Get a list of all validators for this epoch let validator_proposers: Vec = epoch .slot_iter(T::EthSpec::slots_per_epoch()) @@ -141,9 +149,263 @@ pub fn get_validator_duties(req: Request) - duties.append(&mut vec![duty]); } - let body = Body::from( - serde_json::to_string(&duties) - .expect("We should always be able to serialize the duties we created."), - ); - Ok(success_response(body)) + ResponseBuilder::new(&req)?.body_no_ssz(&duties) +} + +/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. +pub fn get_new_beacon_block(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + + let query = UrlQuery::from_request(&req)?; + let slot = query + .first_of(&["slot"]) + .map(|(_key, value)| value)? + .parse::() + .map(Slot::from) + .map_err(|e| { + ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?; + let randao_bytes = query + .first_of(&["randao_reveal"]) + .map(|(_key, value)| value) + .map(hex::decode)? + .map_err(|e| { + ApiError::BadRequest(format!("Invalid hex string for randao_reveal: {:?}", e)) + })?; + let randao_reveal = Signature::from_bytes(randao_bytes.as_slice()).map_err(|e| { + ApiError::BadRequest(format!("randao_reveal is not a valid signature: {:?}", e)) + })?; + + let (new_block, _state) = beacon_chain + .produce_block(randao_reveal, slot) + .map_err(|e| { + ApiError::ServerError(format!( + "Beacon node is not able to produce a block: {:?}", + e + )) + })?; + + ResponseBuilder::new(&req)?.body(&new_block) +} + +/// HTTP Handler to publish a BeaconBlock, which has been signed by a validator. +pub fn publish_beacon_block(req: Request) -> BoxFut { + let _ = try_future!(check_content_type_for_json(&req)); + let log = get_logger_from_request(&req); + let beacon_chain = try_future!(get_beacon_chain_from_request::(&req)); + // Get the network sending channel from the request, for later transmission + let network_chan = req + .extensions() + .get::>>>() + .expect("Should always get the network channel from the request, since we put it in there.") + .clone(); + + let response_builder = ResponseBuilder::new(&req); + + let body = req.into_body(); + trace!( + log, + "Got the request body, now going to parse it into a block." + ); + Box::new(body + .concat2() + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}",e))) + .map(|chunk| chunk.iter().cloned().collect::>()) + .and_then(|chunks| { + serde_json::from_slice(&chunks.as_slice()).map_err(|e| { + ApiError::BadRequest(format!( + "Unable to deserialize JSON into a BeaconBlock: {:?}", + e + )) + }) + }) + .and_then(move |block: BeaconBlock| { + let slot = block.slot; + match beacon_chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::Processed { block_root }) => { + // Block was processed, publish via gossipsub + info!(log, "Processed valid block from API, transmitting to network."; "block_slot" => slot, "block_root" => format!("{}", block_root)); + publish_beacon_block_to_network::(network_chan, block) + } + Ok(outcome) => { + warn!(log, "BeaconBlock could not be processed, but is being sent to the network anyway."; "outcome" => format!("{:?}", outcome)); + publish_beacon_block_to_network::(network_chan, block)?; + Err(ApiError::ProcessingError(format!( + "The BeaconBlock could not be processed, but has still been published: {:?}", + outcome + ))) + } + Err(e) => { + Err(ApiError::ServerError(format!( + "Error while processing block: {:?}", + e + ))) + } + } + }).and_then(|_| { + response_builder?.body_no_ssz(&()) + })) +} + +/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. +pub fn get_new_attestation(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let mut head_state = beacon_chain.head().beacon_state; + + let query = UrlQuery::from_request(&req)?; + let val_pk_str = query + .first_of(&["validator_pubkey"]) + .map(|(_key, value)| value)?; + let val_pk = parse_pubkey(val_pk_str.as_str())?; + + head_state + .update_pubkey_cache() + .map_err(|e| ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)))?; + // Get the validator index from the supplied public key + // If it does not exist in the index, we cannot continue. + let val_index = head_state + .get_validator_index(&val_pk) + .map_err(|e| { + ApiError::ServerError(format!("Unable to read validator index cache. {:?}", e)) + })? + .ok_or(ApiError::BadRequest( + "The provided validator public key does not correspond to a validator index.".into(), + ))?; + + // Build cache for the requested epoch + head_state + .build_committee_cache(RelativeEpoch::Current, &beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; + // Get the duties of the validator, to make sure they match up. + // If they don't have duties this epoch, then return an error + let val_duty = head_state + .get_attestation_duties(val_index, RelativeEpoch::Current) + .map_err(|e| { + ApiError::ServerError(format!( + "unable to read cache for attestation duties: {:?}", + e + )) + })? + .ok_or(ApiError::BadRequest("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into()))?; + + // Check that we are requesting an attestation during the slot where it is relevant. + let present_slot = beacon_chain.slot().map_err(|e| ApiError::ServerError( + format!("Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun. {:?}", e) + ))?; + if val_duty.slot != present_slot { + return Err(ApiError::BadRequest(format!("Validator is only able to request an attestation during the slot they are allocated. Current slot: {:?}, allocated slot: {:?}", head_state.slot, val_duty.slot))); + } + + // Parse the POC bit and insert it into the aggregation bits + let poc_bit = query + .first_of(&["poc_bit"]) + .map(|(_key, value)| value)? + .parse::() + .map_err(|e| { + ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?; + + let mut aggregation_bits = BitList::with_capacity(val_duty.committee_len) + .expect("An empty BitList should always be created, or we have bigger problems."); + aggregation_bits + .set(val_duty.committee_index, poc_bit) + .map_err(|e| { + ApiError::ServerError(format!( + "Unable to set aggregation bits for the attestation: {:?}", + e + )) + })?; + + // Allow a provided slot parameter to check against the expected slot as a sanity check only. + // Presently, we don't support attestations at future or past slots. + let requested_slot = query + .first_of(&["slot"]) + .map(|(_key, value)| value)? + .parse::() + .map(Slot::from) + .map_err(|e| { + ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?; + let current_slot = beacon_chain.head().beacon_state.slot.as_u64(); + if requested_slot != current_slot { + return Err(ApiError::BadRequest(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); + } + + let shard = query + .first_of(&["shard"]) + .map(|(_key, value)| value)? + .parse::() + .map_err(|e| ApiError::BadRequest(format!("Shard is not a valid u64 value: {:?}", e)))?; + + let attestation_data = beacon_chain + .produce_attestation_data(shard, current_slot.into()) + .map_err(|e| ApiError::ServerError(format!("Could not produce an attestation: {:?}", e)))?; + + let attestation: Attestation = Attestation { + aggregation_bits, + data: attestation_data, + custody_bits: BitList::with_capacity(val_duty.committee_len) + .expect("Should be able to create an empty BitList for the custody bits."), + signature: AggregateSignature::new(), + }; + + ResponseBuilder::new(&req)?.body(&attestation) +} + +/// HTTP Handler to publish an Attestation, which has been signed by a validator. +pub fn publish_attestation(req: Request) -> BoxFut { + let _ = try_future!(check_content_type_for_json(&req)); + let log = get_logger_from_request(&req); + let beacon_chain = try_future!(get_beacon_chain_from_request::(&req)); + // Get the network sending channel from the request, for later transmission + let network_chan = req + .extensions() + .get::>>>() + .expect("Should always get the network channel from the request, since we put it in there.") + .clone(); + + let response_builder = ResponseBuilder::new(&req); + + let body = req.into_body(); + trace!( + log, + "Got the request body, now going to parse it into an attesation." + ); + Box::new(body + .concat2() + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}",e))) + .map(|chunk| chunk.iter().cloned().collect::>()) + .and_then(|chunks| { + serde_json::from_slice(&chunks.as_slice()).map_err(|e| { + ApiError::BadRequest(format!( + "Unable to deserialize JSON into a BeaconBlock: {:?}", + e + )) + }) + }) + .and_then(move |attestation: Attestation| { + match beacon_chain.process_attestation(attestation.clone()) { + Ok(AttestationProcessingOutcome::Processed) => { + // Block was processed, publish via gossipsub + info!(log, "Processed valid attestation from API, transmitting to network."); + publish_attestation_to_network::(network_chan, attestation) + } + Ok(outcome) => { + warn!(log, "Attestation could not be processed, but is being sent to the network anyway."; "outcome" => format!("{:?}", outcome)); + publish_attestation_to_network::(network_chan, attestation)?; + Err(ApiError::ProcessingError(format!( + "The Attestation could not be processed, but has still been published: {:?}", + outcome + ))) + } + Err(e) => { + Err(ApiError::ServerError(format!( + "Error while processing attestation: {:?}", + e + ))) + } + } + }).and_then(|_| { + response_builder?.body_no_ssz(&()) + })) } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 68d3829ee..f4b49049a 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -14,7 +14,7 @@ use slog::{error, info, trace, warn}; use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; -use types::Attestation; +use types::{Attestation, Slot}; #[derive(Clone)] pub struct AttestationServiceInstance { @@ -37,49 +37,13 @@ impl AttestationService for AttestationServiceInstance { req.get_slot() ); - // verify the slot, drop lock on state afterwards - { - let slot_requested = req.get_slot(); - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); - - // Start by performing some checks - // Check that the AttestationData is for the current slot (otherwise it will not be valid) - if slot_requested > state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::OutOfRange, - Some( - "AttestationData request for a slot that is in the future.".to_string(), - ), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - // currently cannot handle past slots. TODO: Handle this case - else if slot_requested < state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::InvalidArgument, - Some("AttestationData request for a slot that is in the past.".to_string()), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - } - // Then get the AttestationData from the beacon chain let shard = req.get_shard(); - let attestation_data = match self.chain.produce_attestation_data(shard) { + let slot_requested = req.get_slot(); + let attestation_data = match self + .chain + .produce_attestation_data(shard, Slot::from(slot_requested)) + { Ok(v) => v, Err(e) => { // Could not produce an attestation diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index 92a543ef3..346d7e263 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -34,8 +34,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { trace!(self.log, "Generating a beacon block"; "req" => format!("{:?}", req)); // decode the request - // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let _requested_slot = Slot::from(req.get_slot()); + let requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,7 +50,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let produced_block = match self.chain.produce_block(randao_reveal) { + let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block @@ -67,6 +66,11 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; + assert_eq!( + produced_block.slot, requested_slot, + "should produce at the requested slot" + ); + let mut block = BeaconBlockProto::new(); block.set_ssz(ssz_encode(&produced_block)); diff --git a/beacon_node/rpc/src/config.rs b/beacon_node/rpc/src/config.rs index 0f031ddc6..47eff6824 100644 --- a/beacon_node/rpc/src/config.rs +++ b/beacon_node/rpc/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: false, // rpc disabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5051, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("rpc") { - self.enabled = true; + if args.is_present("no-grpc") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("rpc-address") { diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index eef009292..59902ff43 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -80,7 +80,12 @@ pub fn start_server( let spawn_rpc = { server.start(); for &(ref host, port) in server.bind_addrs() { - info!(log, "gRPC listening on {}:{}", host, port); + info!( + log, + "gRPC API started"; + "port" => port, + "host" => host, + ); } rpc_exit.and_then(move |_| { info!(log, "RPC Server shutting down"); diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 080c828a7..0533e2558 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -25,39 +25,41 @@ impl ValidatorService for ValidatorServiceInstance { req: GetDutiesRequest, sink: UnarySink, ) { - let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); + let validators = req.get_validators(); - let spec = &self.chain.spec; - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); let epoch = Epoch::from(req.get_epoch()); + let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); + + let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { + state.clone() + } else { + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::FailedPrecondition, + Some("No state".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + }; + + let _ = state.build_all_caches(&self.chain.spec); + + assert_eq!( + state.current_epoch(), + epoch, + "Retrieved state should be from the same epoch" + ); + let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); - let relative_epoch = - match RelativeEpoch::from_epoch(state.slot.epoch(T::EthSpec::slots_per_epoch()), epoch) - { - Ok(v) => v, - Err(e) => { - // incorrect epoch - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::FailedPrecondition, - Some(format!("Invalid epoch: {:?}", e)), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - } - }; - let validator_proposers: Result, _> = epoch .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec)) + .map(|slot| { + state.get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.chain.spec) + }) .collect(); let validator_proposers = match validator_proposers { Ok(v) => v, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs new file mode 100644 index 000000000..5cfb45287 --- /dev/null +++ b/beacon_node/src/config.rs @@ -0,0 +1,575 @@ +use clap::ArgMatches; +use client::{BeaconChainStartMethod, ClientConfig, Eth1BackendMethod, Eth2Config}; +use eth2_config::{read_from_file, write_to_file}; +use lighthouse_bootstrap::Bootstrapper; +use rand::{distributions::Alphanumeric, Rng}; +use slog::{crit, info, warn, Logger}; +use std::fs; +use std::net::Ipv4Addr; +use std::path::{Path, PathBuf}; + +pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; +pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; +pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; + +type Result = std::result::Result; +type Config = (ClientConfig, Eth2Config, Logger); + +/// Gets the fully-initialized global client and eth2 configuration objects. +/// +/// The top-level `clap` arguments should be provied as `cli_args`. +/// +/// The output of this function depends primarily upon the given `cli_args`, however it's behaviour +/// may be influenced by other external services like the contents of the file system or the +/// response of some remote server. +pub fn get_configs(cli_args: &ArgMatches, core_log: Logger) -> Result { + let log = core_log.clone(); + + let mut builder = ConfigBuilder::new(cli_args, core_log)?; + + if let Some(server) = cli_args.value_of("eth1-server") { + builder.set_eth1_backend_method(Eth1BackendMethod::Web3 { + server: server.into(), + }) + } else { + builder.set_eth1_backend_method(Eth1BackendMethod::Interop) + } + + match cli_args.subcommand() { + ("testnet", Some(sub_cmd_args)) => { + process_testnet_subcommand(&mut builder, sub_cmd_args, &log)? + } + // No sub-command assumes a resume operation. + _ => { + info!( + log, + "Resuming from existing datadir"; + "path" => format!("{:?}", builder.client_config.data_dir) + ); + + // If no primary subcommand was given, start the beacon chain from an existing + // database. + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Resume); + + // Whilst there is no large testnet or mainnet force the user to specify how they want + // to start a new chain (e.g., from a genesis YAML file, another node, etc). + if !builder.client_config.data_dir.exists() { + return Err( + "No datadir found. To start a new beacon chain, see `testnet --help`. \ + Use `--datadir` to specify a different directory" + .into(), + ); + } + + // If the `testnet` command was not provided, attempt to load an existing datadir and + // continue with an existing chain. + builder.load_from_datadir()?; + } + }; + + builder.build(cli_args) +} + +/// Process the `testnet` CLI subcommand arguments, updating the `builder`. +fn process_testnet_subcommand( + builder: &mut ConfigBuilder, + cli_args: &ArgMatches, + log: &Logger, +) -> Result<()> { + if cli_args.is_present("random-datadir") { + builder.set_random_datadir()?; + } + + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + + let is_bootstrap = cli_args.subcommand_name() == Some("bootstrap"); + + if let Some(path_string) = cli_args.value_of("eth2-config") { + if is_bootstrap { + return Err("Cannot supply --eth2-config when using bootstrap".to_string()); + } + + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse eth2-config path: {:?}", e))?; + builder.load_eth2_config(path)?; + } else { + builder.update_spec_from_subcommand(&cli_args)?; + } + + if let Some(slot_time) = cli_args.value_of("slot-time") { + if is_bootstrap { + return Err("Cannot supply --slot-time flag whilst using bootstrap.".into()); + } + + let slot_time = slot_time + .parse::() + .map_err(|e| format!("Unable to parse slot-time: {:?}", e))?; + + builder.set_slot_time(slot_time); + } + + if let Some(path_string) = cli_args.value_of("client-config") { + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse client config path: {:?}", e))?; + builder.load_client_config(path)?; + } + + info!( + log, + "Creating new datadir"; + "path" => format!("{:?}", builder.client_config.data_dir) + ); + + // When using the testnet command we listen on all addresses. + builder.set_listen_addresses("0.0.0.0".into())?; + warn!(log, "All services listening on 0.0.0.0"); + + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). + match cli_args.subcommand() { + ("bootstrap", Some(cli_args)) => { + let server = cli_args + .value_of("server") + .ok_or_else(|| "No bootstrap server specified")?; + let port: Option = cli_args + .value_of("libp2p-port") + .and_then(|s| s.parse::().ok()); + + builder.import_bootstrap_libp2p_address(server, port)?; + builder.import_bootstrap_enr_address(server)?; + builder.import_bootstrap_eth2_config(server)?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { + server: server.to_string(), + port, + }) + } + ("recent", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + let minutes = cli_args + .value_of("minutes") + .ok_or_else(|| "No recent genesis minutes supplied")? + .parse::() + .map_err(|e| format!("Unable to parse minutes: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::RecentGenesis { + validator_count, + minutes, + }) + } + ("quick", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + let genesis_time = cli_args + .value_of("genesis_time") + .ok_or_else(|| "No genesis time supplied")? + .parse::() + .map_err(|e| format!("Unable to parse genesis time: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + }) + } + ("file", Some(cli_args)) => { + let file = cli_args + .value_of("file") + .ok_or_else(|| "No filename specified")? + .parse::() + .map_err(|e| format!("Unable to parse filename: {:?}", e))?; + + let format = cli_args + .value_of("format") + .ok_or_else(|| "No file format specified")?; + + let start_method = match format { + "yaml" => BeaconChainStartMethod::Yaml { file }, + "ssz" => BeaconChainStartMethod::Ssz { file }, + "json" => BeaconChainStartMethod::Json { file }, + other => return Err(format!("Unknown genesis file format: {}", other)), + }; + + builder.set_beacon_chain_start_method(start_method) + } + (cmd, Some(_)) => { + return Err(format!( + "Invalid valid method specified: {}. See 'testnet --help'.", + cmd + )) + } + _ => return Err("No testnet method specified. See 'testnet --help'.".into()), + }; + + builder.write_configs_to_new_datadir()?; + + Ok(()) +} + +/// Allows for building a set of configurations based upon `clap` arguments. +struct ConfigBuilder { + log: Logger, + eth2_config: Eth2Config, + client_config: ClientConfig, +} + +impl ConfigBuilder { + /// Create a new builder with default settings. + pub fn new(cli_args: &ArgMatches, log: Logger) -> Result { + // Read the `--datadir` flag. + // + // If it's not present, try and find the home directory (`~`) and push the default data + // directory onto it. + let data_dir: PathBuf = cli_args + .value_of("datadir") + .map(|string| PathBuf::from(string)) + .or_else(|| { + dirs::home_dir().map(|mut home| { + home.push(DEFAULT_DATA_DIR); + home + }) + }) + .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; + + let mut client_config = ClientConfig::default(); + client_config.data_dir = data_dir; + + Ok(Self { + log, + eth2_config: Eth2Config::minimal(), + client_config, + }) + } + + /// Clears any configuration files that would interfere with writing new configs. + /// + /// Moves the following files in `data_dir` into a backup directory: + /// + /// - Client config + /// - Eth2 config + /// - The entire database directory + pub fn clean_datadir(&mut self) -> Result<()> { + let backup_dir = { + let mut s = String::from("backup_"); + s.push_str(&random_string(6)); + self.client_config.data_dir.join(s) + }; + + fs::create_dir_all(&backup_dir) + .map_err(|e| format!("Unable to create config backup dir: {:?}", e))?; + + let move_to_backup_dir = |path: &Path| -> Result<()> { + let file_name = path + .file_name() + .ok_or_else(|| "Invalid path found during datadir clean (no filename).")?; + + let mut new = path.to_path_buf(); + new.pop(); + new.push(backup_dir.clone()); + new.push(file_name); + + let _ = fs::rename(path, new); + + Ok(()) + }; + + move_to_backup_dir(&self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; + move_to_backup_dir(&self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; + + if let Some(db_path) = self.client_config.db_path() { + move_to_backup_dir(&db_path)?; + } + + Ok(()) + } + + /// Sets the method for starting the beacon chain. + pub fn set_beacon_chain_start_method(&mut self, method: BeaconChainStartMethod) { + self.client_config.beacon_chain_start_method = method; + } + + /// Sets the method for starting the beacon chain. + pub fn set_eth1_backend_method(&mut self, method: Eth1BackendMethod) { + self.client_config.eth1_backend_method = method; + } + + /// Import the libp2p address for `server` into the list of libp2p nodes to connect with. + /// + /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, + /// attempts to connect to the `server` via HTTP and retrieve it's libp2p listen port. + pub fn import_bootstrap_libp2p_address( + &mut self, + server: &str, + port: Option, + ) -> Result<()> { + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; + + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr(port) { + info!( + self.log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) + ); + + self.client_config + .network + .libp2p_nodes + .push(server_multiaddr); + } else { + warn!( + self.log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); + }; + + Ok(()) + } + + /// Import the enr address for `server` into the list of initial enrs (boot nodes). + pub fn import_bootstrap_enr_address(&mut self, server: &str) -> Result<()> { + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; + + if let Ok(enr) = bootstrapper.enr() { + info!( + self.log, + "Loaded bootstrapper libp2p address"; + "enr" => format!("{:?}", enr) + ); + + self.client_config.network.boot_nodes.push(enr); + } else { + warn!( + self.log, + "Unable to estimate a bootstrapper enr address, this node may not find any peers." + ); + }; + + Ok(()) + } + + /// Set the config data_dir to be an random directory. + /// + /// Useful for easily spinning up ephemeral testnets. + pub fn set_random_datadir(&mut self) -> Result<()> { + self.client_config + .data_dir + .push(format!("random_{}", random_string(6))); + self.client_config.network.network_dir = self.client_config.data_dir.join("network"); + + Ok(()) + } + + /// Imports an `Eth2Config` from `server`, returning an error if this fails. + pub fn import_bootstrap_eth2_config(&mut self, server: &str) -> Result<()> { + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; + + self.update_eth2_config(bootstrapper.eth2_config()?); + + Ok(()) + } + + fn update_eth2_config(&mut self, eth2_config: Eth2Config) { + self.eth2_config = eth2_config; + } + + fn set_slot_time(&mut self, milliseconds_per_slot: u64) { + self.eth2_config.spec.milliseconds_per_slot = milliseconds_per_slot; + } + + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. + /// + /// Returns an error if the `--spec` flag is not present in the given `cli_args`. + pub fn update_spec_from_subcommand(&mut self, cli_args: &ArgMatches) -> Result<()> { + // Re-initialise the `Eth2Config`. + // + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let eth2_config = match cli_args.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("Unable to determine specification type.".into()), + }; + + self.client_config.spec_constants = cli_args + .value_of("spec") + .expect("Guarded by prior match statement") + .to_string(); + self.eth2_config = eth2_config; + + Ok(()) + } + + /// Writes the configs in `self` to `self.data_dir`. + /// + /// Returns an error if `self.data_dir` already exists. + pub fn write_configs_to_new_datadir(&mut self) -> Result<()> { + let db_exists = self + .client_config + .db_path() + .map(|d| d.exists()) + .unwrap_or_else(|| false); + + // Do not permit creating a new config when the datadir exists. + if db_exists { + return Err("Database already exists. See `-f` or `-r` in `testnet --help`".into()); + } + + // Create `datadir` and any non-existing parent directories. + fs::create_dir_all(&self.client_config.data_dir).map_err(|e| { + crit!(self.log, "Failed to initialize data dir"; "error" => format!("{}", e)); + format!("{}", e) + })?; + + let client_config_file = self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME); + if client_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + CLIENT_CONFIG_FILENAME + )); + } else { + // Write the onfig to a TOML file in the datadir. + write_to_file( + self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME), + &self.client_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + } + + let eth2_config_file = self.client_config.data_dir.join(ETH2_CONFIG_FILENAME); + if eth2_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + ETH2_CONFIG_FILENAME + )); + } else { + // Write the config to a TOML file in the datadir. + write_to_file( + self.client_config.data_dir.join(ETH2_CONFIG_FILENAME), + &self.eth2_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + } + + Ok(()) + } + + /// Attempts to load the client and eth2 configs from `self.data_dir`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_from_datadir(&mut self) -> Result<()> { + // Check to ensure the datadir exists. + // + // For now we return an error. In the future we may decide to boot a default (e.g., + // public testnet or mainnet). + if !self.client_config.data_dir.exists() { + return Err( + "No datadir found. Either create a new testnet or specify a different `--datadir`." + .into(), + ); + } + + // If there is a path to a databse in the config, ensure it exists. + if !self + .client_config + .db_path() + .map(|path| path.exists()) + .unwrap_or_else(|| true) + { + return Err( + "No database found in datadir. Use 'testnet -f' to overwrite the existing \ + datadir, or specify a different `--datadir`." + .into(), + ); + } + + self.load_eth2_config(self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; + self.load_client_config(self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; + + Ok(()) + } + + /// Attempts to load the client config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_client_config(&mut self, path: PathBuf) -> Result<()> { + self.client_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; + + Ok(()) + } + + /// Attempts to load the eth2 config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_eth2_config(&mut self, path: PathBuf) -> Result<()> { + self.eth2_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; + + Ok(()) + } + + /// Sets all listening addresses to the given `addr`. + pub fn set_listen_addresses(&mut self, addr: String) -> Result<()> { + let addr = addr + .parse::() + .map_err(|e| format!("Unable to parse default listen address: {:?}", e))?; + + self.client_config.network.listen_address = addr.clone().into(); + self.client_config.rpc.listen_address = addr.clone(); + self.client_config.rest_api.listen_address = addr.clone(); + + Ok(()) + } + + /// Consumes self, returning the configs. + /// + /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand + /// cli_args). + pub fn build(mut self, cli_args: &ArgMatches) -> Result { + self.eth2_config.apply_cli_args(cli_args)?; + self.client_config.apply_cli_args(cli_args, &mut self.log)?; + + if let Some(bump) = cli_args.value_of("port-bump") { + let bump = bump + .parse::() + .map_err(|e| format!("Unable to parse port bump: {}", e))?; + + self.client_config.network.libp2p_port += bump; + self.client_config.network.discovery_port += bump; + self.client_config.rpc.port += bump; + self.client_config.rest_api.port += bump; + self.client_config.websocket_server.port += bump; + } + + if self.eth2_config.spec_constants != self.client_config.spec_constants { + crit!(self.log, "Specification constants do not match."; + "client_config" => format!("{}", self.client_config.spec_constants), + "eth2_config" => format!("{}", self.eth2_config.spec_constants) + ); + return Err("Specification constant mismatch".into()); + } + + Ok((self.client_config, self.eth2_config, self.log)) + } +} + +fn random_string(len: usize) -> String { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(len) + .collect::() +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 0eb5b83b4..7bc7e8abe 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,12 +1,10 @@ +mod config; mod run; -use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config}; +use clap::{App, Arg, SubCommand}; +use config::get_configs; use env_logger::{Builder, Env}; -use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; -use std::fs; -use std::path::PathBuf; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; @@ -30,36 +28,49 @@ fn main() { .value_name("DIR") .help("Data directory for keys and databases.") .takes_value(true) + .global(true) ) .arg( Arg::with_name("logfile") .long("logfile") - .value_name("logfile") + .value_name("FILE") .help("File path where output will be written.") .takes_value(true), ) .arg( Arg::with_name("network-dir") .long("network-dir") - .value_name("NETWORK-DIR") + .value_name("DIR") .help("Data directory for network keys.") .takes_value(true) + .global(true) ) /* * Network parameters. */ + .arg( + Arg::with_name("port-bump") + .long("port-bump") + .short("b") + .value_name("INCREMENT") + .help("Sets all listening TCP/UDP ports to default values, but with each port increased by \ + INCREMENT. Useful when starting multiple nodes on a single machine. Using increments \ + in multiples of 10 is recommended.") + .takes_value(true), + ) .arg( Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") - .takes_value(true), + .takes_value(true) ) .arg( Arg::with_name("port") .long("port") .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -72,7 +83,7 @@ fn main() { Arg::with_name("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) - .value_name("BOOTNODES") + .value_name("ENR-LIST") .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.") .takes_value(true), ) @@ -81,6 +92,7 @@ fn main() { .long("disc-port") .value_name("PORT") .help("The discovery UDP port.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -104,52 +116,93 @@ fn main() { .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR.") .takes_value(true), ) + .arg( + Arg::with_name("p2p-priv-key") + .long("p2p-priv-key") + .value_name("HEX") + .help("A secp256k1 secret key, represented as ASCII-encoded hex bytes (with or without 0x prefix).") + .takes_value(true), + ) /* * gRPC parameters. */ .arg( - Arg::with_name("rpc") - .long("rpc") - .value_name("RPC") - .help("Enable the RPC server.") + Arg::with_name("no-grpc") + .long("no-grpc") + .help("Disable the gRPC server.") .takes_value(false), ) .arg( Arg::with_name("rpc-address") .long("rpc-address") - .value_name("Address") + .value_name("ADDRESS") .help("Listen address for RPC endpoint.") .takes_value(true), ) .arg( Arg::with_name("rpc-port") .long("rpc-port") + .value_name("PORT") .help("Listen port for RPC endpoint.") + .conflicts_with("port-bump") .takes_value(true), ) - /* Client related arguments */ + /* REST API related arguments */ .arg( - Arg::with_name("api") - .long("api") - .value_name("API") - .help("Enable the RESTful HTTP API server.") + Arg::with_name("no-api") + .long("no-api") + .help("Disable RESTful HTTP API server.") .takes_value(false), ) .arg( Arg::with_name("api-address") .long("api-address") - .value_name("APIADDRESS") + .value_name("ADDRESS") .help("Set the listen address for the RESTful HTTP API server.") .takes_value(true), ) .arg( Arg::with_name("api-port") .long("api-port") - .value_name("APIPORT") + .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") + .conflicts_with("port-bump") + .takes_value(true), + ) + /* Websocket related arguments */ + .arg( + Arg::with_name("no-ws") + .long("no-ws") + .help("Disable websocket server.") + .takes_value(false), + ) + .arg( + Arg::with_name("ws-address") + .long("ws-address") + .value_name("ADDRESS") + .help("Set the listen address for the websocket server.") + .conflicts_with_all(&["no-ws"]) + .takes_value(true), + ) + .arg( + Arg::with_name("ws-port") + .long("ws-port") + .value_name("PORT") + .help("Set the listen TCP port for the websocket server.") + .conflicts_with_all(&["no-ws", "port-bump"]) .takes_value(true), ) + /* + * Eth1 Integration + */ + .arg( + Arg::with_name("eth1-server") + .long("eth1-server") + .value_name("SERVER") + .help("Specifies the server for a web3 connection to the Eth1 chain.") + .takes_value(true) + ) /* * Database parameters. */ @@ -160,25 +213,7 @@ fn main() { .help("Type of database to use.") .takes_value(true) .possible_values(&["disk", "memory"]) - .default_value("memory"), - ) - /* - * Specification/testnet params. - */ - .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) - ) - .arg( - Arg::with_name("recent-genesis") - .long("recent-genesis") - .short("r") - .help("When present, genesis will be within 30 minutes prior. Only for testing"), + .default_value("disk"), ) /* * Logging. @@ -192,22 +227,138 @@ fn main() { .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) .default_value("trace"), ) - .arg( - Arg::with_name("verbosity") - .short("v") - .multiple(true) - .help("Sets the verbosity level") - .takes_value(true), - ) /* - * Bootstrap. + * The "testnet" sub-command. + * + * Allows for creating a new datadir with testnet-specific configs. */ - .arg( - Arg::with_name("bootstrap") - .long("bootstrap") - .value_name("HTTP_SERVER") - .help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.") - .takes_value(true) + .subcommand(SubCommand::with_name("testnet") + .about("Create a new Lighthouse datadir using a testnet strategy.") + .arg( + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type. Only effective when creating a new datadir.") + .takes_value(true) + .required(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .default_value("minimal") + ) + .arg( + Arg::with_name("eth2-config") + .long("eth2-config") + .value_name("TOML_FILE") + .help("A existing eth2_spec TOML file (e.g., eth2_spec.toml).") + .takes_value(true) + .conflicts_with("spec") + ) + .arg( + Arg::with_name("client-config") + .long("client-config") + .value_name("TOML_FILE") + .help("An existing beacon_node TOML file (e.g., beacon_node.toml).") + .takes_value(true) + ) + .arg( + Arg::with_name("random-datadir") + .long("random-datadir") + .short("r") + .help("If present, append a random string to the datadir path. Useful for fast development \ + iteration.") + ) + .arg( + Arg::with_name("force") + .long("force") + .short("f") + .help("If present, will create new config and database files and move the any existing to a \ + backup directory.") + .conflicts_with("random-datadir") + ) + .arg( + Arg::with_name("slot-time") + .long("slot-time") + .short("t") + .value_name("MILLISECONDS") + .help("Defines the slot time when creating a new testnet.") + ) + /* + * `boostrap` + * + * Start a new node by downloading genesis and network info from another node via the + * HTTP API. + */ + .subcommand(SubCommand::with_name("bootstrap") + .about("Connects to the given HTTP server, downloads a genesis state and attempts to peer with it.") + .arg(Arg::with_name("server") + .value_name("HTTP_SERVER") + .required(true) + .default_value("http://localhost:5052") + .help("A HTTP server, with a http:// prefix")) + .arg(Arg::with_name("libp2p-port") + .short("p") + .long("port") + .value_name("TCP_PORT") + .help("A libp2p listen port used to peer with the bootstrap server. This flag is useful \ + when port-fowarding is used: you may connect using a different port than \ + the one the server is immediately listening on.")) + ) + /* + * `recent` + * + * Start a new node, with a specified number of validators with a genesis time in the last + * 30-minutes. + */ + .subcommand(SubCommand::with_name("recent") + .about("Creates a new genesis state where the genesis time was at the previous \ + MINUTES boundary (e.g., when MINUTES == 30; 12:00, 12:30, 13:00, etc.)") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + .arg(Arg::with_name("minutes") + .long("minutes") + .short("m") + .value_name("MINUTES") + .required(true) + .default_value("15") + .help("The maximum number of minutes that will have elapsed before genesis")) + ) + /* + * `quick` + * + * Start a new node, specifying the number of validators and genesis time + */ + .subcommand(SubCommand::with_name("quick") + .about("Creates a new genesis state from the specified validator count and genesis time. \ + Compatible with the `quick-start genesis` defined in the eth2.0-pm repo.") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + .arg(Arg::with_name("genesis_time") + .value_name("UNIX_EPOCH_SECONDS") + .required(true) + .help("The genesis time for the given state.")) + ) + /* + * `yaml` + * + * Start a new node, using a genesis state loaded from a YAML file + */ + .subcommand(SubCommand::with_name("file") + .about("Creates a new datadir where the genesis state is read from YAML. May fail to parse \ + a file that was generated to a different spec than that specified by --spec.") + .arg(Arg::with_name("format") + .value_name("FORMAT") + .required(true) + .possible_values(&["yaml", "ssz", "json"]) + .help("The encoding of the state in the file.")) + .arg(Arg::with_name("file") + .value_name("YAML_FILE") + .required(true) + .help("A YAML file from which to read the state")) + ) ) .get_matches(); @@ -227,143 +378,34 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); + + if std::mem::size_of::() != 8 { + crit!( + log, + "Lighthouse only supports 64bit CPUs"; + "detected" => format!("{}bit", std::mem::size_of::() * 8) + ); + } warn!( log, "Ethereum 2.0 is pre-release. This software is experimental." ); - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; + let log_clone = log.clone(); - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} - Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } - } - - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. + // Load the process-wide configuration. // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::(client_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ClientConfig::default(); - if let Err(e) = write_to_file(client_config_path, &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } + // May load this from disk or create a new configuration, depending on the CLI flags supplied. + let (client_config, eth2_config, log) = match get_configs(&matches, log) { + Ok(configs) => configs, Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); + crit!(log_clone, "Failed to load configuration. Exiting"; "error" => e); return; } }; - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); - return; - } - }; - - // check to ensure the spec constants between the client and eth2_config match - if eth2_config.spec_constants != client_config.spec_constants { - crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants)); - return; - } - // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index f88cb7460..3d6607552 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,19 +1,17 @@ -use client::{ - error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config, - InitialiseBeaconChain, -}; +use client::{error, notifier, Client, ClientConfig, Eth1BackendMethod, Eth2Config}; use futures::sync::oneshot; use futures::Future; use slog::{error, info}; use std::cell::RefCell; use std::path::Path; use std::path::PathBuf; +use store::Store; use store::{DiskStore, MemoryStore}; use tokio::runtime::Builder; use tokio::runtime::Runtime; use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; -use types::{InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; +use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; /// Reads the configuration and initializes a `BeaconChain` with the required types and parameters. /// @@ -44,63 +42,29 @@ pub fn run_beacon_node( info!( log, - "BeaconNode init"; - "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), - "data_dir" => format!("{:?}", other_client_config.data_dir()), - "network_dir" => format!("{:?}", other_client_config.network.network_dir), - "spec_constants" => &spec_constants, + "Starting beacon node"; + "p2p_listen_address" => format!("{}", &other_client_config.network.listen_address), "db_type" => &other_client_config.db_type, + "spec_constants" => &spec_constants, ); + macro_rules! run_client { + ($store: ty, $eth_spec: ty) => { + run::<$store, $eth_spec>(&db_path, client_config, eth2_config, executor, runtime, log) + }; + } + + if let Eth1BackendMethod::Web3 { .. } = client_config.eth1_backend_method { + return Err("Starting from web3 backend is not supported for interop.".into()); + } + match (db_type.as_str(), spec_constants.as_str()) { - ("disk", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), + ("disk", "minimal") => run_client!(DiskStore, MinimalEthSpec), + ("disk", "mainnet") => run_client!(DiskStore, MainnetEthSpec), + ("disk", "interop") => run_client!(DiskStore, InteropEthSpec), + ("memory", "minimal") => run_client!(MemoryStore, MinimalEthSpec), + ("memory", "mainnet") => run_client!(MemoryStore, MainnetEthSpec), + ("memory", "interop") => run_client!(MemoryStore, InteropEthSpec), (db_type, spec) => { error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) @@ -109,7 +73,7 @@ pub fn run_beacon_node( } /// Performs the type-generic parts of launching a `BeaconChain`. -fn run( +fn run( db_path: &Path, client_config: ClientConfig, eth2_config: Eth2Config, @@ -118,12 +82,13 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, - T::Store: OpenDatabase, + S: Store + Clone + 'static + OpenDatabase, + E: EthSpec, { - let store = T::Store::open_database(&db_path)?; + let store = S::open_database(&db_path)?; - let client: Client = Client::new(client_config, eth2_config, store, log.clone(), &executor)?; + let client: Client = + Client::new(client_config, eth2_config, store, log.clone(), &executor)?; // run service until ctrl-c let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml new file mode 100644 index 000000000..48f046e07 --- /dev/null +++ b/beacon_node/websocket_server/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "websocket_server" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +beacon_chain = { path = "../beacon_chain" } +clap = "2.32.0" +exit-future = "0.1.3" +futures = "0.1.25" +serde = "1.0" +serde_derive = "1.0" +serde_json = "^1.0" +slog = "^2.2.3" +tokio = "0.1.16" +types = { path = "../../eth2/types" } +ws = "0.9" diff --git a/beacon_node/websocket_server/src/config.rs b/beacon_node/websocket_server/src/config.rs new file mode 100644 index 000000000..c07f0da83 --- /dev/null +++ b/beacon_node/websocket_server/src/config.rs @@ -0,0 +1,45 @@ +use clap::ArgMatches; +use serde::{Deserialize, Serialize}; +use std::net::Ipv4Addr; + +/// The core configuration of a Lighthouse beacon node. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + /// The IPv4 address the REST API HTTP server will listen on. + pub listen_address: Ipv4Addr, + /// The port the REST API HTTP server will listen on. + pub port: u16, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: true, + listen_address: Ipv4Addr::new(127, 0, 0, 1), + port: 5053, + } + } +} + +impl Config { + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { + if args.is_present("no-ws") { + self.enabled = false; + } + + if let Some(rpc_address) = args.value_of("ws-address") { + self.listen_address = rpc_address + .parse::() + .map_err(|_| "ws-address is not a valid IPv4 address.")?; + } + + if let Some(rpc_port) = args.value_of("ws-port") { + self.port = rpc_port + .parse::() + .map_err(|_| "ws-port is not a valid u16.")?; + } + + Ok(()) + } +} diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs new file mode 100644 index 000000000..c161224c7 --- /dev/null +++ b/beacon_node/websocket_server/src/lib.rs @@ -0,0 +1,117 @@ +use beacon_chain::events::{EventHandler, EventKind}; +use futures::Future; +use slog::{debug, error, info, warn, Logger}; +use std::marker::PhantomData; +use std::thread; +use tokio::runtime::TaskExecutor; +use types::EthSpec; +use ws::{Sender, WebSocket}; + +mod config; + +pub use config::Config; + +pub struct WebSocketSender { + sender: Option, + _phantom: PhantomData, +} + +impl WebSocketSender { + /// Creates a dummy websocket server that never starts and where all future calls are no-ops. + pub fn dummy() -> Self { + Self { + sender: None, + _phantom: PhantomData, + } + } + + pub fn send_string(&self, string: String) -> Result<(), String> { + if let Some(sender) = &self.sender { + sender + .send(string) + .map_err(|e| format!("Unable to broadcast to websocket clients: {:?}", e)) + } else { + Ok(()) + } + } +} + +impl EventHandler for WebSocketSender { + fn register(&self, kind: EventKind) -> Result<(), String> { + self.send_string( + serde_json::to_string(&kind) + .map_err(|e| format!("Unable to serialize event: {:?}", e))?, + ) + } +} + +pub fn start_server( + config: &Config, + executor: &TaskExecutor, + log: &Logger, +) -> Result<(WebSocketSender, exit_future::Signal), String> { + let server_string = format!("{}:{}", config.listen_address, config.port); + + info!( + log, + "Websocket server starting"; + "listen_address" => &server_string + ); + + // Create a server that simply ignores any incoming messages. + let server = WebSocket::new(|_| |_| Ok(())) + .map_err(|e| format!("Failed to initialize websocket server: {:?}", e))?; + + let broadcaster = server.broadcaster(); + + // Produce a signal/channel that can gracefully shutdown the websocket server. + let exit_signal = { + let (exit_signal, exit) = exit_future::signal(); + + let log_inner = log.clone(); + let broadcaster_inner = server.broadcaster(); + let exit_future = exit.and_then(move |_| { + if let Err(e) = broadcaster_inner.shutdown() { + warn!( + log_inner, + "Websocket server errored on shutdown"; + "error" => format!("{:?}", e) + ); + } else { + info!(log_inner, "Websocket server shutdown"); + } + Ok(()) + }); + + // Place a future on the executor that will shutdown the websocket server when the + // application exits. + executor.spawn(exit_future); + + exit_signal + }; + + let log_inner = log.clone(); + let _handle = thread::spawn(move || match server.listen(server_string) { + Ok(_) => { + debug!( + log_inner, + "Websocket server thread stopped"; + ); + } + Err(e) => { + error!( + log_inner, + "Websocket server failed to start"; + "error" => format!("{:?}", e) + ); + } + }); + + Ok(( + WebSocketSender { + sender: Some(broadcaster), + _phantom: PhantomData, + }, + exit_signal, + )) +} diff --git a/book/.gitignore b/book/.gitignore new file mode 100644 index 000000000..7585238ef --- /dev/null +++ b/book/.gitignore @@ -0,0 +1 @@ +book diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 000000000..829c7b99c --- /dev/null +++ b/book/book.toml @@ -0,0 +1,6 @@ +[book] +authors = ["Paul Hauner"] +language = "en" +multilingual = false +src = "src" +title = "Lighthouse" diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md new file mode 100644 index 000000000..01613f9fd --- /dev/null +++ b/book/src/SUMMARY.md @@ -0,0 +1,11 @@ +# Summary + +* [Introduction](./intro.md) +* [Development Environment](./setup.md) +* [Websocket Interface](./websockets.md) +* [Simple Local Testnet](./simple-testnet.md) +* [Interop](./interop.md) + * [Environment](./interop-environment.md) + * [CLI Overview](./interop-cli.md) + * [Scenarios](./interop-scenarios.md) + * [Cheat-sheet](./interop-cheat-sheet.md) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md new file mode 100644 index 000000000..7fea539ea --- /dev/null +++ b/book/src/interop-cheat-sheet.md @@ -0,0 +1,149 @@ +# Interop Cheat-sheet + +This document contains a list of tips and tricks that may be useful during +interop testing. + +- When starting a beacon node: + - [Specify a boot node by multiaddr](#boot-node-multiaddr) + - [Specify a boot node by ENR](#boot-node-enr) + - [Avoid port clashes when starting multiple nodes](#port-bump) + - [Specify a custom slot time](#slot-time) +- Using the beacon node HTTP API: + - [Pretty-print the genesis state and state root](#http-state) + - [Curl a node's ENR](#http-enr) + - [Curl a node's connected peers](#http-peer-ids) + - [Curl a node's local peer id](#http-peer-id) + - [Curl a node's listening multiaddrs](#http-listen-addresses) + - [Curl a node's beacon chain head](#http-head) + - [Curl a node's finalized checkpoint](#http-finalized) + +## Category: CLI + +The `--help` command provides detail on the CLI interface. Here are some +interop-specific CLI commands. + + +### Specify a boot node by multiaddr + +You can specify a static list of multiaddrs when booting Lighthouse using +the `--libp2p-addresses` command. + +#### Example: + +``` +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 +``` + + +### Specify a boot node by ENR + +You can specify a static list of Discv5 addresses when booting Lighthouse using +the `--boot-nodes` command. + +#### Example: + +``` +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 +``` + + +### Avoid port clashes when starting nodes + +Starting a second Lighthouse node on the same machine will fail due to TCP/UDP +port collisions. Use the `-b` (`--port-bump`) flag to increase all listening +ports by some `n`. + +#### Example: + +Increase all ports by `10` (using multiples of `10` is recommended). + +``` +$ ./beacon_node -b 10 +``` + + +### Start a testnet with a custom slot time + +Lighthouse can run at quite low slot times when there are few validators (e.g., +`500 ms` slot times should be fine for 8 validators). + +#### Example + +The `-t` (`--slot-time`) flag specifies the milliseconds per slot. + +``` +$ ./beacon_node testnet -t 500 recent 8 +``` + +> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with +> this flag. + +## Category: HTTP API + +Examples assume there is a Lighthouse node exposing a HTTP API on +`localhost:5052`. Responses are JSON. + + +### Pretty-print the genesis state and state root + +Returns the genesis state and state root in your terminal, in YAML. + +``` +$ curl --header "Content-Type: application/yaml" "localhost:5052/beacon/state?slot=0" +``` + + +### Get the node's ENR + +``` +$ curl localhost:5052/network/enr + +"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% +``` + + +### Get a list of connected peer ids + +``` +$ curl localhost:5052/network/peers + +["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% +``` + + +### Get the node's peer id + +``` +curl localhost:5052/network/peer_id + +"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% +``` + + +### Get the list of listening libp2p addresses + +Lists all the libp2p multiaddrs that the node is listening on. + +``` +curl localhost:5052/network/listen_addresses + +["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% +``` + + +### Get the node's beacon chain head + +``` +curl localhost:5052/beacon/head + +{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% +``` + + +### Get the node's finalized checkpoint + +``` +curl localhost:5052/beacon/latest_finalized_checkpoint + +{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% +``` diff --git a/book/src/interop-cli.md b/book/src/interop-cli.md new file mode 100644 index 000000000..3dad845f3 --- /dev/null +++ b/book/src/interop-cli.md @@ -0,0 +1,29 @@ +# Interop CLI Overview + +The Lighthouse CLI has two primary tasks: + +- **Resuming** an existing database with `$ ./beacon_node`. +- **Creating** a new testnet database using `$ ./beacon_node testnet`. + +_See [Scenarios](./interop-scenarios.md) for methods we've anticipated will be +used interop._ + +## Creating a new database + +There are several methods for creating a new beacon node database: + +- `quick`: using the `(validator_client, genesis_time)` tuple. +- `recent`: as above but `genesis_time` is set to the start of some recent time + window. +- `file`: loads the genesis file from disk in one of multiple formats. +- `bootstrap`: a Lighthouse-specific method where we connect to a running node + and download it's specification and genesis state via the HTTP API. + +See `$ ./beacon_node testnet --help` for more detail. + +## Resuming from an existing database + +Once a database has been created, it can be resumed by running `$ ./beacon_node`. + +Presently, this command will fail if no existing database is found. You must +use the `$ ./beacon_node testnet` command to create a new database. diff --git a/book/src/interop-environment.md b/book/src/interop-environment.md new file mode 100644 index 000000000..6d3568e29 --- /dev/null +++ b/book/src/interop-environment.md @@ -0,0 +1,30 @@ +# Interop Environment + +All that is required for inter-op is a built and tested [development +environment](./setup.md). + +## Repositories + +You will only require the [sigp/lighthouse](http://github.com/sigp/lighthouse) +library. + +To allow for faster build/test iterations we will use the +[`interop`](https://github.com/sigp/lighthouse/tree/interop) branch of +[sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) for +September 2019 interop. **Please use ensure you `git checkout interop` after +cloning the repo.** + +## File System + +When lighthouse boots, it will create the following +directories: + +- `~/.lighthouse`: database and configuration for the beacon node. +- `~/.lighthouse-validator`: database and configuration for the validator + client. + +After building the binaries with `cargo build --release --all`, there will be a +`target/release` directory in the root of the Lighthouse repository. This is +where the `beacon_node` and `validator_client` binaries are located. + +You do not need to create any of these directories manually. diff --git a/book/src/interop-scenarios.md b/book/src/interop-scenarios.md new file mode 100644 index 000000000..5e44d822a --- /dev/null +++ b/book/src/interop-scenarios.md @@ -0,0 +1,101 @@ +# Interop Scenarios + +Here we demonstrate some expected interop scenarios. + +All scenarios assume a working [development environment](./setup.md) and +commands are based in the `target/release` directory (this is the build dir for +`cargo`). + +Additional functions can be found in the [interop +cheat-sheet](./interop-cheat-sheet.md). + +### Table of contents + +- [Starting from a`validator_count, genesis_time` tuple](#quick-start) +- [Starting a node from a genesis state file](#state-file) +- [Starting a validator client](#val-client) +- [Exporting a genesis state file](#export) from a running Lighthouse + node + + + +### Start beacon node given a validator count and genesis_time + + +To start a brand-new beacon node (with no history) use: + +``` +$ ./beacon_node testnet -f quick 8 +``` + +Where `GENESIS_TIME` is in [unix time](https://duckduckgo.com/?q=unix+time&t=ffab&ia=answer). + +> Notes: +> +> - This method conforms the ["Quick-start +genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) +method in the `ethereum/eth2.0-pm` repository. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is the validator count and `1567222226` is the genesis time. +> - See `$ ./beacon_node testnet quick --help` for more configuration options. + + +### Start Beacon Node given a genesis state file + +A genesis state can be read from file using the `testnet file` subcommand. +There are three supported formats: + +- `ssz` (default) +- `json` +- `yaml` + +Start a new node using `/tmp/genesis.ssz` as the genesis state: + +``` +$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - See `$ ./beacon_node testnet file --help` for more configuration options. +> - The `--spec` flag is required to allow SSZ parsing of fixed-length lists. + + +### Start an auto-configured validator client + +To start a brand-new validator client (with no history) use: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> will be used. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. +> - The validator client will operate very unsafely in `testnet` mode, happily +> swapping between chains and creating double-votes. + + +### Exporting a genesis file + +Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: + +- `application/json` +- `application/yaml` +- `application/ssz` + +Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: + +``` +$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz +``` diff --git a/book/src/interop.md b/book/src/interop.md new file mode 100644 index 000000000..cb119d59d --- /dev/null +++ b/book/src/interop.md @@ -0,0 +1,11 @@ +# Lighthouse Interop Guide + +This guide is intended for other Ethereum 2.0 client developers performing +inter-operability testing with Lighthouse. + +## Chapters + +- Read about the required [development environment](./interop-environment.md). +- Get an [overview](./interop-cli.md) of the Lighthouse CLI. +- See how we expect to handle some [interop scenarios](./interop-scenarios.md). +- See the [interop cheat-sheet](./interop-cheat-sheet.md) for useful CLI tips. diff --git a/book/src/intro.md b/book/src/intro.md new file mode 100644 index 000000000..ccf867a54 --- /dev/null +++ b/book/src/intro.md @@ -0,0 +1,27 @@ +# Lighthouse Documentation + +[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Chat Badge]][Chat Link] + +[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg +[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines +[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da +[Chat Link]: https://discord.gg/cyAszAh +[Doc Status]:https://img.shields.io/badge/rust--docs-master-orange +[Doc Link]: http://lighthouse-docs.sigmaprime.io/ + +Lighthouse is an **Ethereum 2.0 client** that connects to other Ethereum 2.0 +clients to form a resilient and decentralized proof-of-stake blockchain. + +It is written in Rust, maintained by Sigma Prime and funded by the Ethereum +Foundation, Consensys and other individuals and organisations. + +## Developer Resources + +Documentation is presently targeted at **researchers and developers**. It +assumes significant prior knowledge of Ethereum 2.0. + +Topics: + +- Get started with [development environment setup](./setup.md). +- See the [interop docs](./interop.md). +- [Run a simple testnet](./simple-testnet.md) in Only Three CLI Commandsâ„¢. diff --git a/book/src/setup.md b/book/src/setup.md new file mode 100644 index 000000000..532de3fc0 --- /dev/null +++ b/book/src/setup.md @@ -0,0 +1,81 @@ +# Development Environment Setup + +Follow this guide to get a Lighthouse development environment up-and-running. + +See the [Quick instructions](#quick-instructions) for a summary or the +[Detailed instructions](#detailed-instructions) for clarification. + +## Quick instructions + +1. Install Rust + Cargo with [rustup](https://rustup.rs/). +1. Install build dependencies using your package manager. + - `$ clang protobuf libssl-dev cmake` +1. Clone the [sigp/lighthouse](https://github.com/sigp/lighthouse). +1. In the root of the repo, run the tests with `cargo test --all --release`. +1. Then, build the binaries with `cargo build --all --release`. +1. Lighthouse is now fully built and tested. + +_Note: first-time compilation may take several minutes._ + +## Detailed instructions + +A fully-featured development environment can be achieved with the following +steps: + + 1. Install [rustup](https://rustup.rs/). + 1. Use the command `rustup show` to get information about the Rust + installation. You should see that the active tool-chain is the stable + version. + - Updates can be performed using` rustup update`, Lighthouse generally + requires a recent version of Rust. + 1. Install build dependencies (Arch packages are listed here, your + distribution will likely be similar): + - `clang`: required by RocksDB. + - `protobuf`: required for protobuf serialization (gRPC) + - `libssl-dev`: also gRPC + - `cmake`: required for building protobuf + 1. Clone the repository with submodules: `git clone + https://github.com/sigp/lighthouse`. + 1. Change directory to the root of the repository. + 1. Run the test suite with `cargo test --all --release`. The build and test + process can take several minutes. If you experience any failures on + `master`, please raise an + [issue](https://github.com/sigp/lighthouse/issues). + +### Notes: + +Lighthouse targets Rust `stable` but generally runs on `nightly` too. + +#### Note for Windows users: + +Perl may also be required to build lighthouse. You can install [Strawberry +Perl](http://strawberryperl.com/), or alternatively use a choco install command +`choco install strawberryperl`. + +Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues +compiling in Windows. You can specify a known working version by editing +version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`. + +## eth2.0-spec-tests + +The +[ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests/) +repository contains a large set of tests that verify Lighthouse behaviour +against the Ethereum Foundation specifications. + +The `tests/ef_tests` crate runs these tests and it has some interesting +behaviours: + +- If the `tests/ef_tests/eth2.0-spec-tests` directory is not present, all tests + indicate a `pass` when they did not actually run. +- If that directory _is_ present, the tests are executed faithfully, failing if + a discrepancy is found. + +The `tests/ef_tests/eth2.0-spec-tests` directory is not present by default. To +obtain it, use the Makefile in the root of the repository: + +``` +make ef_tests +``` + +_Note: this will download 100+ MB of test files from the [ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests/)._ diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md new file mode 100644 index 000000000..b6fa19d6f --- /dev/null +++ b/book/src/simple-testnet.md @@ -0,0 +1,81 @@ +# Simple Local Testnet + +You can setup a local, two-node testnet in **Only Three CLI Commandsâ„¢**. + +Follow the [Quick instructions](#tldr) version if you're confident, or see +[Detailed instructions](#detail) for more. + + +## Quick instructions + +Setup a development environment, build the project and navigate to the +`target/release` directory. + +1. Start the first node: `$ ./beacon_node testnet -f recent 8` +1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` +1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` + +_Repeat #3 to add more nodes._ + +## Detailed instructions + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +## Starting the Beacon Node + +Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: + +``` +$ ./beacon_node testnet -f recent 8 +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is number of validators with deposits in the genesis state. +> - See `$ ./beacon_node testnet recent --help` for more configuration options, +> including `minimal`/`mainnet` specification. + +## Starting the Validator Client + +In a new terminal window, start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command uses predictable, well-known private keys. Since +> this is just a local testnet, these are fine. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. + +## Adding another Beacon Node + +You may connect another (non-validating) node to your local network using the +lighthouse `bootstrap` command. + +In a new terminal terminal, run: + + +``` +$ ./beacon_node -b 10 testnet -r bootstrap +``` + +> Notes: +> +> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of +> the new node to `10` higher. Your first node's HTTP server was at TCP +> `5052` but this one will be at `5062`. +> - The `-r` flag creates a new data directory with a random string appended +> (avoids data directory collisions between nodes). +> - The default bootstrap HTTP address is `http://localhost:5052`. The new node +> will download configuration via HTTP before starting sync via libp2p. +> - See `$ ./beacon_node testnet bootstrap --help` for more configuration. diff --git a/book/src/testnets.md b/book/src/testnets.md new file mode 100644 index 000000000..180673fb3 --- /dev/null +++ b/book/src/testnets.md @@ -0,0 +1,10 @@ +# Testnets + +Lighthouse does not offer a public testnet _yet_. In the meantime, it's easy to +start a local testnet: + +- [Run a simple testnet](testnets.html) in Only Three CLI Commandsâ„¢. +- Developers of other Eth2 clients should see the [interop guide](interop.html). +- The [sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repo + contains a `docker-compose` setup that runs a multi-node network with + built-in metrics and monitoring dashboards, all from your local machine. diff --git a/book/src/websockets.md b/book/src/websockets.md new file mode 100644 index 000000000..2b91bd88f --- /dev/null +++ b/book/src/websockets.md @@ -0,0 +1,108 @@ +# Websocket Interface + +By default, a Lighthouse `beacon_node` exposes a websocket server on `localhost:5053`. + +The following CLI flags control the websocket server: + +- `--no-ws`: disable the websocket server. +- `--ws-port`: specify the listen port of the server. +- `--ws-address`: specify the listen address of the server. + +All clients connected to the websocket server will receive the same stream of events, all triggered +by the `BeaconChain`. Each event is a JSON object with the following schema: + +```json +{ + "event": "string", + "data": "object" +} +``` + +## Events + +The following events may be emitted: + +### Beacon Head Changed + +Occurs whenever the canonical head of the beacon chain changes. + +```json +{ + "event": "beacon_head_changed", + "data": { + "reorg": "boolean", + "current_head_beacon_block_root": "string", + "previous_head_beacon_block_root": "string" + } +} +``` + +### Beacon Finalization + +Occurs whenever the finalized checkpoint of the canonical head changes. + +```json +{ + "event": "beacon_finalization", + "data": { + "epoch": "number", + "root": "string" + } +} +``` + +### Beacon Block Imported + +Occurs whenever the beacon node imports a valid block. + +```json +{ + "event": "beacon_block_imported", + "data": { + "block": "object" + } +} +``` + +### Beacon Block Rejected + +Occurs whenever the beacon node rejects a block because it is invalid or an +error occurred during validation. + +```json +{ + "event": "beacon_block_rejected", + "data": { + "reason": "string", + "block": "object" + } +} +``` + +### Beacon Attestation Imported + +Occurs whenever the beacon node imports a valid attestation. + +```json +{ + "event": "beacon_attestation_imported", + "data": { + "attestation": "object" + } +} +``` + +### Beacon Attestation Rejected + +Occurs whenever the beacon node rejects an attestation because it is invalid or +an error occurred during validation. + +```json +{ + "event": "beacon_attestation_rejected", + "data": { + "reason": "string", + "attestation": "object" + } +} +``` diff --git a/docs/rest_oapi.yaml b/docs/api_spec.yaml similarity index 59% rename from docs/rest_oapi.yaml rename to docs/api_spec.yaml index dea892c18..23608807e 100644 --- a/docs/rest_oapi.yaml +++ b/docs/api_spec.yaml @@ -2,7 +2,7 @@ openapi: "3.0.2" info: title: "Lighthouse REST API" description: "" - version: "0.1.0" + version: "0.2.0" license: name: "Apache 2.0" url: "https://www.apache.org/licenses/LICENSE-2.0.html" @@ -13,6 +13,8 @@ tags: description: Endpoints which will be implemented for phase 1 of Ethereum Serenity - name: Future description: Potential future endpoints or optional nice-to-haves + - name: RFC + description: Do we need these endpoints at all? This is a request for comments if you think they're useful. paths: /node/version: @@ -47,21 +49,6 @@ paths: 500: $ref: '#/components/responses/InternalError' - /node/deposit_contract: - get: - tags: - - Phase0 - summary: "Get the address of the Ethereum 1 deposit contract." - description: "Requests the address of the deposit contract on the Ethereum 1 chain, which was used to start the current beacon chain." - responses: - 200: - description: Request successful - content: - application/json: - schema: - $ref: '#/components/schemas/ethereum_address' - 500: - $ref: '#/components/responses/InternalError' /node/syncing: get: @@ -85,55 +72,23 @@ paths: 500: $ref: '#/components/responses/InternalError' - /node/fork: + /network/enr: get: tags: - Phase0 - summary: "Get fork information from running beacon node." - description: "Requests the beacon node to provide which fork version it is currently on." + summary: "Get the node's Ethereum Node Record (ENR)." + description: "The Ethereum Node Record (ENR) contains a compressed public key, an IPv4 address, a TCP port and a UDP port, which is all encoded using base64. This endpoint fetches the base64 encoded version of the ENR for the running beacon node." responses: 200: description: Request successful content: application/json: schema: - type: object - properties: - fork: - $ref: '#/components/schemas/Fork' - chain_id: - type: integer - format: uint64 - description: "Sometimes called the network id, this number discerns the active chain for the beacon node. Analogous to Eth1.0 JSON-RPC net_version." + $ref: '#/components/schemas/ENR' 500: $ref: '#/components/responses/InternalError' - /node/stats: - get: - tags: - - Future - summary: "Get operational information about the node." - description: "Fetches some operational information about the node's process, such as memory usage, database size, etc." - responses: - 200: - description: Request successful - content: - application/json: - schema: - type: object - properties: - memory_usage: - type: integer - format: uint64 - description: "The amount of memory used by the currently running beacon node process, expressed in bytes." - uptime: - type: integer - format: uint64 - description: "The number of seconds that have elapsed since beacon node process was started." - #TODO: what other useful process information could be expressed here? - - - /node/network/peer_count: + /network/peer_count: get: tags: - Phase0 @@ -148,8 +103,29 @@ paths: type: integer format: uint64 example: 25 + 500: + $ref: '#/components/responses/InternalError' - /node/network/peers: + /network/peer_id: + get: + tags: + - Phase0 + summary: "Get the node's libp2p peer ID." + description: "Requests the node to provide it's libp2p ['peer ID'](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md), which is a base58 encoded SHA2-256 'multihash' of the node's public key struct." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: string + format: byte + example: "QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N" + pattern: "^[1-9A-HJ-NP-Za-km-z]{46}$" + 500: + $ref: '#/components/responses/InternalError' + + /network/peers: get: tags: - Phase0 @@ -164,32 +140,49 @@ paths: type: array items: $ref: '#/components/schemas/Peer' + 500: + $ref: '#/components/responses/InternalError' - /node/network/listening: + /network/listen_port: get: tags: - Phase0 - summary: "Identify if the beacon node is listening for networking connections, and on what address." - description: "Requests that the beacon node identify whether it is listening for incoming networking connections, and if so, what network address(es) are being used." + summary: "Get the TCP port number for the libp2p listener." + description: "Libp2p is configured to listen to a particular TCP port upon startup of the beacon node. This endpoint returns the port number that the beacon node is listening on. Please note, this is for the libp2p communications, not for discovery." responses: 200: description: Request successful content: application/json: schema: - type: object - properties: - listening: - type: boolean - nullable: false - description: "True if the node is listening for incoming network connections. False if networking has been disabled or if the node has been configured to only connect with a static set of peers." - listen_address: - $ref: '#/components/schemas/multiaddr' + type: integer + format: uint16 + example: 9000 + 500: + $ref: '#/components/responses/InternalError' - /node/network/stats: + /network/listen_addresses: get: tags: - - Future + - Phase0 + summary: "Identify the port and addresses listened to by the beacon node." + description: "Libp2p is configured to listen to a particular address, on a particular port. This address is represented the [`multiaddr`](https://multiformats.io/multiaddr/) format, and this endpoint requests the beacon node to list all listening addresses in this format." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/multiaddr' + 500: + $ref: '#/components/responses/InternalError' + + /network/stats: + get: + tags: + - RFC summary: "Get some simple network statistics from the node." description: "Request that the beacon node provide some historical summary information about its networking interface." #TODO: Do we actually collect these stats? Should we? @@ -215,10 +208,10 @@ paths: description: "The total number of unique peers (by multiaddr) that have been discovered since the beacon node instance was started." #TODO: This might be too difficult to collect - /node/network/block_discovery: + /network/block_discovery: get: tags: - - Future + - RFC summary: "Identify the time at which particular blocks were first seen." description: "Request the node to provide the time at which particular blocks were first seen on the network." parameters: @@ -252,179 +245,132 @@ paths: format: uint64 description: "UNIX time in milliseconds that the block was first discovered, either from a network peer or the validator client." - - - #TODO: Add the endpoints that enable a validator to join, exit, withdraw, etc. - /validator/duties: + /beacon/head: get: tags: - Phase0 - summary: "Get validator duties for the requested validators." - description: "Requests the beacon node to provide a set of _duties_, which are actions that should be performed by validators, for a particular epoch. Duties should only need to be checked once per epoch, however a chain reorganization (of > MIN_SEED_LOOKAHEAD epochs) could occur, resulting in a change of duties. For full safety, this API call should be polled at every slot to ensure that chain reorganizations are recognized, and to ensure that the beacon node is properly synchronized. If no epoch parameter is provided, then the current epoch is assumed." + summary: "Detail the current perspective of the beacon node." + description: "Request the beacon node to identify the most up-to-date information about the beacon chain from its perspective. This includes the latest block, which slots have been finalized, etc." + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + description: "The latest information about the head of the beacon chain." + properties: + slot: + type: integer + format: uint64 + description: "The slot of the head block." + block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The merkle tree root of the canonical head block in the beacon node." + state_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The merkle tree root of the current beacon state." + finalized_slot: + type: integer + format: uint64 + description: "The slot number of the most recent finalized slot." + finalized_block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root for the most recent finalized block." + justified_slot: + type: integer + format: uint64 + description: "The slot number of the most recent justified slot." + justified_block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root of the most recent justified block." + previous_justified_slot: + type: integer + format: uint64 + description: "The slot number of the second most recent justified slot." + previous_justified_block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root of the second most recent justified block." + 500: + $ref: '#/components/responses/InternalError' + + + /beacon/block: + get: + tags: + - Phase0 + summary: 'Retrieve blocks by root or slot.' + description: "Request that the beacon node return beacon chain blocks that match the provided criteria (a block root or beacon chain slot). Only one of the parameters can be be provided at a time." parameters: - - name: validator_pubkeys - in: query - required: true - description: "An array of hex-encoded BLS public keys" - schema: - type: array - items: - $ref: '#/components/schemas/pubkey' - minItems: 1 - - name: epoch - in: query - required: false - schema: + - name: root + description: "Filter by block root." + in: query + required: false + schema: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + - name: slot + description: "Filter blocks by slot number. Only one block which has been finalized, or is believed to be the canonical block for that slot, is returned." + in: query + required: false + schema: type: integer format: uint64 responses: 200: - description: Success response + description: Success response. content: application/json: schema: type: array items: - $ref: '#/components/schemas/ValidatorDuty' + $ref: '#/components/schemas/BeaconBlock' 400: $ref: '#/components/responses/InvalidRequest' - 406: - description: "Duties cannot be provided for the requested epoch." 500: $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - /validator/block: + /beacon/block_root: get: tags: - Phase0 - summary: "Produce a new block, without signature." - description: "Requests a beacon node to produce a valid block, which can then be signed by a validator." + summary: "Retrieve the canonical block root, given a particular slot." + description: "Request that the beacon node return the root of the canonical beacon chain block, which matches the provided slot number." parameters: - - name: slot - in: query - required: true - description: "The slot for which the block should be proposed." - schema: - type: integer - format: uint64 - - name: randao_reveal - in: query - required: true - description: "The validator's randao reveal value." - schema: - type: string - format: byte - responses: - 200: - description: Success response - content: - application/json: - schema: - $ref: '#/components/schemas/BeaconBlock' - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - post: - tags: - - Phase0 - summary: "Publish a signed block." - description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: beacon_block - in: query - required: true - description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed." - schema: - $ref: '#/components/schemas/BeaconBlock' - responses: - 200: - description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." - 202: - description: "The block failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - - /validator/attestation: - get: - tags: - - Phase0 - summary: "Produce an attestation, without signature." - description: "Requests that the beacon node produce an IndexedAttestation, with a blank signature field, which the validator will then sign." - parameters: - - name: validator_pubkey - in: query - required: true - description: "Uniquely identifying which validator this attestation is to be produced for." - schema: - $ref: '#/components/schemas/pubkey' - - name: poc_bit - in: query - required: true - description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `IndexedAttestation`." - schema: + - name: slot + description: "Filter blocks by slot number. Only one block which has been finalized, or is believed to be the canonical block for that slot, is returned." + in: query + required: true + schema: type: integer - format: uint32 - minimum: 0 - maximum: 1 - - name: slot - in: query - required: true - description: "The slot for which the attestation should be proposed." - schema: - type: integer - - name: shard - in: query - required: true - description: "The shard number for which the attestation is to be proposed." - schema: - type: integer + format: uint64 responses: 200: - description: Success response + description: Success response. content: application/json: schema: - $ref: '#/components/schemas/IndexedAttestation' + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The 0x prefixed block root." 400: $ref: '#/components/responses/InvalidRequest' 500: $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - post: - tags: - - Phase0 - summary: "Publish a signed attestation." - description: "Instructs the beacon node to broadcast a newly signed IndexedAttestation object to the intended shard subnet. The beacon node is not required to validate the signed IndexedAttestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: attestation - in: query - required: true - description: "An `IndexedAttestation` structure, as originally provided by the beacon node, but now with the signature field completed." - schema: - $ref: '#/components/schemas/IndexedAttestation' - responses: - 200: - description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." - 202: - description: "The attestation failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - /chain/beacon/blocks: + /beacon/blocks: get: tags: - Phase0 @@ -468,59 +414,25 @@ paths: $ref: '#/components/responses/InvalidRequest' #TODO: Make this request error more specific if one of the parameters is not provided correctly. - /chain/beacon/chainhead: + + /beacon/fork: get: tags: - Phase0 - summary: "Detail the current perspective of the beacon node." - description: "Request the beacon node to identify the most up-to-date information about the beacon chain from its perspective. This includes the latest block, which slots have been finalized, etc." + summary: 'Retrieve the current Fork information.' + description: 'Request the beacon node identify the fork it is currently on, from the beacon state.' responses: 200: - description: Success response + description: Success response. content: application/json: schema: - type: object - description: "The latest information about the head of the beacon chain." - properties: - block_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The merkle tree root of the canonical head block in the beacon node." - block_slot: - type: integer - format: uint64 - description: "The slot of the head block." - finalized_slot: - type: integer - format: uint64 - description: "The slot number of the most recent finalized slot." - finalized_block_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The block root for the most recent finalized block." - justified_slot: - type: integer - format: uint64 - description: "The slot number of the most recent justified slot." - justified_block_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The block root of the most recent justified block." - previous_justified_slot: - type: integer - format: uint64 - description: "The slot number of the second most recent justified slot." - previous_justified_block_root: - type: integer - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The block root of the second most recent justified block." + $ref: '#/components/schemas/Fork' + 500: + $ref: '#/components/responses/InternalError' - /chain/beacon/attestations: + + /beacon/attestations: get: tags: - Phase0 @@ -564,7 +476,7 @@ paths: $ref: '#/components/responses/InvalidRequest' #TODO: Make this request error more specific if one of the parameters is not provided correctly. - /chain/beacon/attestations/pending: + /beacon/attestations/pending: get: tags: - Phase0 @@ -583,7 +495,7 @@ paths: $ref: '#/components/responses/InvalidRequest' #TODO: Make this request error more specific if one of the parameters is not provided correctly. - /chain/beacon/validators: + /beacon/validators: get: tags: - Phase0 @@ -612,12 +524,12 @@ paths: validators: type: array items: - $ref: '#/components/schemas/ValidatorInfo' + $ref: '#/components/schemas/Validator' - /chain/beacon/validators/activesetchanges: + /beacon/validators/activesetchanges: get: tags: - - Phase0 + - RFC summary: "Retrieve the changes in active validator set." description: "Request that the beacon node describe the changes that occurred at the specified epoch, as compared with the prior epoch." parameters: @@ -656,10 +568,10 @@ paths: items: $ref: '#/components/schemas/pubkey' - /chain/beacon/validators/assignments: + /beacon/validators/assignments: get: tags: - - Phase0 + - RFC summary: "Retrieve the assigned responsibilities for validators in a particular epoch." description: "Request that the beacon node list the duties which have been assigned to the active validator set in a particular epoch." parameters: @@ -688,7 +600,7 @@ paths: $ref: '#/components/schemas/ValidatorDuty' #TODO: This does not include the crosslink committee value, which must be included for Phase1? - /chain/beacon/validators/indices: + /beacon/validators/indices: get: tags: - Phase0 @@ -714,7 +626,7 @@ paths: items: $ref: '#/components/schemas/ValidatorIndexMapping' - /chain/beacon/validators/pubkeys: + /beacon/validators/pubkeys: get: tags: - Phase0 @@ -742,10 +654,10 @@ paths: items: $ref: '#/components/schemas/ValidatorIndexMapping' - /chain/beacon/validators/balances: + /beacon/validators/balances: get: tags: - - Phase0 + - RFC summary: "Retrieve the balances of validators at a specified epoch." description: "Retrieve the balances of validators at a specified epoch (or the current epoch if none specified). The list of balances can be filtered by providing a list of validator public keys or indices." parameters: @@ -803,10 +715,10 @@ paths: format: uint64 description: "The balance of the validator at the specified epoch, expressed in Gwei" - /chain/beacon/validators/participation: + /beacon/validators/participation: get: tags: - - Phase0 + - RFC summary: "Retrieve aggregate information about validator participation in an epoch." description: "Retrieve some aggregate information about the participation of validators in a specified epoch (or the current epoch if none specified)." parameters: @@ -848,10 +760,10 @@ paths: format: uint64 description: "The total amount of ether, expressed in Gwei, that is eligible for voting in the specified epoch." - /chain/beacon/validators/queue: + /beacon/validators/queue: get: tags: - - Phase0 + - RFC summary: "Retrieve information about the validator queue at the specified epoch." description: "Retrieve information about the queue of validators for the specified epoch (or the current epoch if none specified)." parameters: @@ -889,6 +801,407 @@ paths: items: $ref: '#/components/schemas/pubkey' + #TODO: Add the endpoints that enable a validator to join, exit, withdraw, etc. + /beacon/validator/duties: + get: + tags: + - Phase0 + summary: "Get validator duties for the requested validators." + description: "Requests the beacon node to provide a set of _duties_, which are actions that should be performed by validators, for a particular epoch. Duties should only need to be checked once per epoch, however a chain reorganization (of > MIN_SEED_LOOKAHEAD epochs) could occur, resulting in a change of duties. For full safety, this API call should be polled at every slot to ensure that chain reorganizations are recognized, and to ensure that the beacon node is properly synchronized. If no epoch parameter is provided, then the current epoch is assumed." + parameters: + - name: validator_pubkeys + in: query + required: true + description: "An array of hex-encoded BLS public keys" + schema: + type: array + items: + $ref: '#/components/schemas/pubkey' + minItems: 1 + - name: epoch + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ValidatorDuty' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /beacon/validator/block: + get: + tags: + - Phase0 + summary: "Produce a new block, without signature." + description: "Requests a beacon node to produce a valid block, which can then be signed by a validator." + parameters: + - name: slot + in: query + required: true + description: "The slot for which the block should be proposed." + schema: + type: integer + format: uint64 + - name: randao_reveal + in: query + required: true + description: "The validator's randao reveal value." + schema: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "A valid BLS signature." + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconBlock' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - Phase0 + summary: "Publish a signed block." + description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" + requestBody: + description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed. Must be sent in JSON format in the body of the request." + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconBlock' + responses: + 200: + description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The block failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /beacon/validator/attestation: + get: + tags: + - Phase0 + summary: "Produce an attestation, without signature." + description: "Requests that the beacon node produce an Attestation, with a blank signature field, which the validator will then sign." + parameters: + - name: validator_pubkey + in: query + required: true + description: "Uniquely identifying which validator this attestation is to be produced for." + schema: + $ref: '#/components/schemas/pubkey' + - name: poc_bit + in: query + required: true + description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `Attestation`." + schema: + type: integer + format: uint32 + minimum: 0 + maximum: 1 + - name: slot + in: query + required: true + description: "The slot for which the attestation should be proposed." + schema: + type: integer + - name: shard + in: query + required: true + description: "The shard number for which the attestation is to be proposed." + schema: + type: integer + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/Attestation' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - Phase0 + summary: "Publish a signed attestation." + description: "Instructs the beacon node to broadcast a newly signed Attestation object to the intended shard subnet. The beacon node is not required to validate the signed Attestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" + requestBody: + description: "An `Attestation` structure, as originally provided by the beacon node, but now with the signature field completed. Must be sent in JSON format in the body of the request." + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/Attestation' + responses: + 200: + description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The attestation failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /beacon/state: + get: + tags: + - Phase0 + summary: "Get the full beacon state, at a particular slot or block root." + description: "Requests the beacon node to provide the full beacon state object, and the state root, given a particular slot number or block root. If no parameters are provided, the latest slot of the beacon node (the 'head' slot) is used." + parameters: + - name: root + description: "The block root at which the state should be provided." + in: query + required: false + schema: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + - name: slot + description: "The slot number at which the state should be provided." + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + beacon_state: + $ref: '#/components/schemas/BeaconState' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + + /beacon/state_root: + get: + tags: + - Phase0 + summary: "Get the beacon state root, at a particular slot." + description: "Requests the beacon node to provide the root of the beacon state object, given a particular slot number." + parameters: + - name: slot + description: "The slot number at which the state should be provided." + in: query + required: true + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The state root" + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + + /beacon/state/current_finalized_checkpoint: + get: + tags: + - Phase0 + summary: "Get the current finalized checkpoint." + #TODO: is this description correct? + description: "Requests the beacon node to provide the checkpoint for the current finalized epoch." + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/Checkpoint' + 500: + $ref: '#/components/responses/InternalError' + + /beacon/state/genesis: + get: + tags: + - Phase0 + summary: "Get the full beacon state, as it was at genesis." + description: "Requests the beacon node to provide the full beacon state object and the state root, as it was for the genesis block." + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconState' + application/yaml: + schema: + $ref: '#/components/schemas/BeaconState' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + + /spec: + get: + tags: + - Phase0 + summary: "Get the current ChainSpec configuration." + description: "Requests the beacon node to provide the configuration that it has used to start the beacon chain." + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/ChainSpec' + 500: + $ref: '#/components/responses/InternalError' + + /spec/slots_per_epoch: + get: + tags: + - Phase0 + summary: "Get the configured number of slots per epoch." + description: "The number of slots in each epoch is part of the Eth2.0 spec. This function simply returns an integer representing this value." + responses: + 200: + description: Success response + content: + application/json: + schema: + type: integer + format: uint64 + example: 64 + 500: + $ref: '#/components/responses/InternalError' + + /spec/deposit_contract: + get: + tags: + - Phase0 + summary: "Get the address of the Ethereum 1 deposit contract." + description: "Requests the address of the deposit contract on the Ethereum 1 chain, which was used to start the current beacon chain." + responses: + 200: + description: Request successful + content: + application/json: + schema: + $ref: '#/components/schemas/ethereum_address' + 500: + $ref: '#/components/responses/InternalError' + + /spec/eth2_config: + get: + tags: + - Phase0 + summary: "Gets the Eth2.0 spec, including the identifying string." + description: "" + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + spec_constants: + type: string + example: "mainnet" + spec: + $ref: '#/components/schemas/ChainSpec' + 500: + $ref: '#/components/responses/InternalError' + + /metrics: + get: + tags: + - Phase0 + summary: "Get Promethius metrics for the node" + description: "Fetches a range of metrics for measuring nodes health. It is intended for this endpoint to be consumed by Promethius." + responses: + 200: + description: Request successful + content: + text/plain: + example: "# HELP beacon_head_state_active_validators_total Count of active validators at the head of the chain + # TYPE beacon_head_state_active_validators_total gauge + beacon_head_state_active_validators_total 16 + # HELP beacon_head_state_current_justified_epoch Current justified epoch at the head of the chain + # TYPE beacon_head_state_current_justified_epoch gauge + beacon_head_state_current_justified_epoch 0 + # HELP beacon_head_state_current_justified_root Current justified root at the head of the chain + # TYPE beacon_head_state_current_justified_root gauge + beacon_head_state_current_justified_root 0 + # HELP beacon_head_state_eth1_deposit_index Eth1 deposit index at the head of the chain + # TYPE beacon_head_state_eth1_deposit_index gauge + beacon_head_state_eth1_deposit_index 16 + # HELP beacon_head_state_finalized_epoch Finalized epoch at the head of the chain + # TYPE beacon_head_state_finalized_epoch gauge + beacon_head_state_finalized_epoch 0 + # HELP beacon_head_state_finalized_root Finalized root at the head of the chain + # TYPE beacon_head_state_finalized_root gauge + beacon_head_state_finalized_root 0 + # HELP beacon_head_state_latest_block_slot Latest block slot at the head of the chain + # TYPE beacon_head_state_latest_block_slot gauge + beacon_head_state_latest_block_slot 0 + # HELP beacon_head_state_previous_justified_epoch Previous justified epoch at the head of the chain + # TYPE beacon_head_state_previous_justified_epoch gauge + beacon_head_state_previous_justified_epoch 0 + # HELP beacon_head_state_previous_justified_root Previous justified root at the head of the chain + # TYPE beacon_head_state_previous_justified_root gauge + beacon_head_state_previous_justified_root 0 + # HELP beacon_head_state_root Root of the block at the head of the chain + # TYPE beacon_head_state_root gauge + beacon_head_state_root -7566315470565629000 + # HELP beacon_head_state_shard_total Count of shards in the beacon chain + # TYPE beacon_head_state_shard_total gauge + beacon_head_state_shard_total 8 + # HELP beacon_head_state_slashed_validators_total Count of all slashed validators at the head of the chain + # TYPE beacon_head_state_slashed_validators_total gauge + beacon_head_state_slashed_validators_total 0" + components: schemas: pubkey: @@ -928,6 +1241,34 @@ components: pattern: "^0x[a-fA-F0-9]{64}$" description: "A hex encoded ethereum address." + ENR: + type: string + format: byte + example: "-IW4QHzEZbIB0YN47bVlsUrGbcL9vl21n7xF5gRKjMNkJ4MxfcwiqrsE7Ows8EnzOvC8P4ZyAjfOhr2ffk0bWAxDGq8BgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjzKzqo5c33ydUUHrWJ4FWwIXJa2MN9BBsgZkj6mhthp" + pattern: "^[^-A-Za-z0-9+/=]+$" + + Shard: + type: integer + format: uint64 + description: "A shard number." + example: 5 + maximum: 1023 + minimum: 0 + + Checkpoint: + type: object + description: "A checkpoint." + properties: + epoch: + type: integer + format: uint64 + description: "The epoch to which the checkpoint applies." + root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A block root, which is being checkpointed." + Peer: type: object properties: @@ -957,7 +1298,7 @@ components: format: uint64 description: "The global ValidatorIndex value." - ValidatorInfo: + Validator: type: object properties: public_key: @@ -967,6 +1308,16 @@ components: format: bytes pattern: "^0x[a-fA-F0-9]{64}$" description: "The 32 byte hash of the public key which the validator uses for withdrawing their rewards." + example: "0x00ec7ef7780c9d151597924036262dd28dc60e1228f4da6fecf9d402cb3f3594" + effective_balance: + type: integer + format: uint64 + description: "The effective balance of the validator, measured in Gwei." + example: 32000000000 + slashed: + type: boolean + description: "Whether the validator has or has not been slashed." + example: false activation_eligiblity_epoch: type: integer format: uint64 @@ -980,18 +1331,13 @@ components: format: uint64 nullable: true description: "Epoch when the validator was exited, or null if the validator has not exited." + example: 18446744073709551615 withdrawable_epoch: type: integer format: uint64 nullable: true description: "Epoch when the validator is eligible to withdraw their funds, or null if the validator has not exited." - slashed: - type: boolean - description: "Whether the validator has or has not been slashed." - effective_balance: - type: integer - format: uint64 - description: "The effective balance of the validator, measured in Gwei." + example: 18446744073709551615 ValidatorDuty: type: object @@ -1029,6 +1375,25 @@ components: format: uint64 description: "Globally, the estimated most recent slot number, or current target slot number." + Eth1Data: + type: object + description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." + properties: + deposit_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the deposit tree." + deposit_count: + type: integer + format: uint64 + description: "Total number of deposits." + block_hash: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Ethereum 1.x block hash." + BeaconBlock: description: "The [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) object from the Eth2.0 spec." allOf: @@ -1085,24 +1450,7 @@ components: pattern: "^0x[a-fA-F0-9]{192}$" description: "The RanDAO reveal value provided by the validator." eth1_data: - title: Eth1Data - type: object - description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." - properties: - deposit_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the deposit tree." - deposit_count: - type: integer - format: uint64 - description: "Total number of deposits." - block_hash: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Ethereum 1.x block hash." + $ref: '#/components/schemas/Eth1Data' graffiti: type: string format: byte @@ -1130,9 +1478,9 @@ components: description: "The [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) object from the Eth2.0 spec." properties: attestation_1: - $ref: '#/components/schemas/IndexedAttestation' + $ref: '#/components/schemas/Attestation' attestation_2: - $ref: '#/components/schemas/IndexedAttestation' + $ref: '#/components/schemas/Attestation' attestations: type: array items: @@ -1236,6 +1584,161 @@ components: pattern: "^0x[a-fA-F0-9]{192}$" description: "Sender signature." + BeaconState: + type: object + description: "The [`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#beaconstate) object from the Eth2.0 spec." + properties: + genesis_time: + $ref: '#/components/schemas/genesis_time' + slot: + type: integer + format: uint64 + description: "The latest slot, which the state represents." + fork: + $ref: '#/components/schemas/Fork' + latest_block_header: + $ref: '#/components/schemas/BeaconBlockHeader' + #TODO: Are these descriptions correct? + block_roots: + type: array + description: "The historical block roots." + minLength: 8192 + maxLength: 8192 #The SLOTS_PER_HISTORICAL_ROOT value from the Eth2.0 Spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A block root" + state_roots: + type: array + description: "The historical state roots." + minLength: 8192 + maxLength: 8192 #The SLOTS_PER_HISTORICAL_ROOT value from the Eth2.0 Spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A state root" + historical_roots: + type: array + #TODO: are these historical *state* roots? + description: "The historical state roots." + maxLength: 16777216 #The HISTORICAL_ROOTS_LIMIT value from the Eth2.0 Spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A state root" + eth1_data: + $ref: '#/components/schemas/Eth1Data' + eth1_data_votes: + type: array + description: "The validator votes for the Eth1Data." + maxLength: 1024 #The SLOTS_PER_ETH1_VOTING_PERIOD value from the Eth2.0 spec. + items: + $ref: '#/components/schemas/Eth1Data' + eth1_deposit_index: + type: integer + format: uint64 + #TODO: Clarify this description + description: "The index of the Eth1 deposit." + validators: + type: array + description: "A list of the current validators." + maxLength: 1099511627776 + items: + $ref: '#/components/schemas/Validator' + balances: + type: array + description: "An array of the validator balances." + maxLength: 1099511627776 + items: + type: integer + format: uint64 + description: "The validator balance in GWei." + start_shard: + $ref: '#/components/schemas/Shard' + randao_mixes: + type: array + description: "The hashes for the randao mix." + minLength: 65536 + maxLength: 65536 #The EPOCHS_PER_HISTORICAL_VECTOR value from the Eth2.0 spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A randao mix hash." + active_index_roots: + type: array + description: "Active index digests for light clients." + minLength: 65536 + maxLength: 65536 #The EPOCHS_PER_HISTORICAL_VECTOR value from the Eth2.0 spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Active index digest" + compact_committees_roots: + type: array + description: "Committee digests for light clients." + minLength: 65536 + maxLength: 65536 #The EPOCHS_PER_HISTORICAL_VECTOR value from the Eth2.0 spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Committee digest." + slashings: + type: array + description: "Per-epoch sums of slashed effective balances." + minLength: 8192 + maxLength: 8192 #The EPOCHS_PER_SLASHINGS_VECTOR value from the Eth2.0 spec. + items: + type: integer + format: uint64 + description: "Sum of slashed balance for an epoch." + previous_epoch_attestations: + type: array + description: "A list of attestations in the previous epoch." + maxLength: 8192 # MAX_ATTESTATIONS * SLOTS_PER_EPOCH from the Eth2.0 spec. + items: + $ref: '#/components/schemas/PendingAttestation' + current_epoch_attestations: + type: array + description: "A list of attestations in the current epoch." + maxLength: 8192 # MAX_ATTESTATIONS * SLOTS_PER_EPOCH from the Eth2.0 spec. + items: + $ref: '#/components/schemas/PendingAttestation' + previous_crosslinks: + type: array + description: "The shard crosslinks from the previous epoch." + minLength: 1024 + maxLength: 1024 #The SHARD_COUNT value from the Eth2.0 spec + items: + $ref: '#/components/schemas/Crosslink' + current_crosslinks: + type: array + description: "The shard crosslinks for the current epoch." + minLength: 1024 + maxLength: 1024 #The SHARD_COUNT value from the Eth2.0 spec + items: + $ref: '#/components/schemas/Crosslink' + justification_bits: + type: array + description: "Bit set for every recent justified epoch." + minLength: 4 + maxLength: 4 #The JUSTIFICATION_BITS_LENGTH from the Eth2.0 spec. + items: + type: boolean + #TODO: Check this description + description: "Whethere the recent epochs have been finalized." + previous_justified_checkpoint: + $ref: '#/components/schemas/Checkpoint' + current_justified_checkpoint: + $ref: '#/components/schemas/Checkpoint' + finalized_checkpoint: + $ref: '#/components/schemas/Checkpoint' + Fork: type: object description: "The [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#Fork) object from the Eth2.0 spec." @@ -1302,6 +1805,35 @@ components: data: $ref: '#/components/schemas/AttestationData' + PendingAttestation: + type: object + description: "The [`PendingAttestation`](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/specs/core/0_beacon-chain.md#pendingattestation) object from the Eth2.0 spec." + properties: + aggregation_bits: + type: array + description: "The bits representing aggregation of validator signatures and attestations." + maxLength: 4096 #The MAX_VALIDATORS_PER_COMMITTEE value from the Eth2.0 spec. + items: + type: boolean + description: "Whether the validator has been aggregated or not" + data: + $ref: '#/components/schemas/AttestationData' + inclusion_delay: + type: integer + format: uint64 + description: "The Slot at which it should be included." + proposer_index: + type: integer + format: uint64 + #TODO: This is the block proposer index, not the attestaion right? + description: "The ValidatorIndex of the block proposer" + + + + + + + AttestationData: type: object description: "The [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object from the Eth2.0 spec." @@ -1330,32 +1862,185 @@ components: pattern: "^0x[a-fA-F0-9]{64}$" description: "Target root from FFG vote." crosslink: - title: CrossLink - type: object - description: "The [`Crosslink`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#crosslink) object from the Eth2.0 spec, contains data from epochs [`start_epoch`, `end_epoch`)." - properties: - shard: - type: integer - format: uint64 - description: "The shard number." - start_epoch: - type: integer - format: uint64 - description: "The first epoch which the crosslinking data references." - end_epoch: - type: integer - format: uint64 - description: "The 'end' epoch referred to by the crosslinking data; no data in this Crosslink should refer to the `end_epoch` since it is not included in the crosslinking data interval." - parent_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the previous crosslink." - data_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the crosslinked shard data since the previous crosslink." + $ref: '#/components/schemas/Crosslink' + + + Crosslink: + type: object + description: "The [`Crosslink`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#crosslink) object from the Eth2.0 spec, contains data from epochs [`start_epoch`, `end_epoch`)." + properties: + shard: + type: integer + format: uint64 + description: "The shard number." + start_epoch: + type: integer + format: uint64 + description: "The first epoch which the crosslinking data references." + end_epoch: + type: integer + format: uint64 + description: "The 'end' epoch referred to by the crosslinking data; no data in this Crosslink should refer to the `end_epoch` since it is not included in the crosslinking data interval." + parent_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the previous crosslink." + data_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the crosslinked shard data since the previous crosslink." + + ChainSpec: + type: object + description: "Stores all of the values which specify a particular chain. The `ChainSpec` object in Lighthouse" + properties: + base_rewards_per_epoch: + type: integer + format: uint64 + example: 5 + deposit_contract_tree_depth: + type: integer + format: uint64 + example: 32 + seconds_per_day: + type: integer + format: uint64 + example: 86400 + target_committee_size: + type: integer + format: uint64 + example: 128 + min_per_epoch_churn_limit: + type: integer + format: uint64 + example: 4 + churn_limit_quotient: + type: integer + format: uint64 + example: 65536 + shuffle_round_count: + type: integer + format: uint8 + example: 90 + min_genesis_active_validator_count: + type: integer + format: uint64 + example: 65536 + min_genesis_time: + type: integer + format: uint64 + example: 1578009600 + min_deposit_amount: + type: integer + format: uint64 + example: 1000000000 + max_effective_balance: + type: integer + format: uint64 + example: 32000000000 + ejection_balance: + type: integer + format: uint64 + example: 16000000000 + effective_balance_increment: + type: integer + format: uint64 + example: 1000000000 + genesis_slot: + type: integer + format: uint64 + example: 0 + bls_withdrawal_prefix_byte: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{2}$" + example: "0x00" + milliseconds_per_slot: + type: integer + format: uint64 + example: 6000 + min_attestation_inclusion_delay: + type: integer + format: uint64 + example: 1 + min_seed_lookahead: + type: integer + format: uint64 + example: 1 + activation_exit_delay: + type: integer + format: uint64 + example: 4 + min_validator_withdrawability_delay: + type: integer + format: uint64 + example: 256 + persistent_committee_period: + type: integer + format: uint64 + example: 2048 + max_epochs_per_crosslink: + type: integer + format: uint64 + example: 64 + min_epochs_to_inactivity_penalty: + type: integer + format: uint64 + example: 4 + base_reward_factor: + type: integer + format: uint64 + example: 64 + whistleblower_reward_quotient: + type: integer + format: uint64 + example: 512 + proposer_reward_quotient: + type: integer + format: uint64 + example: 8 + inactivity_penalty_quotient: + type: integer + format: uint64 + example: 33554432 + min_slashing_penalty_quotient: + type: integer + format: uint64 + example: 32 + domain_beacon_proposer: + type: integer + format: uint32 + example: 0 + domain_randao: + type: integer + format: uint32 + example: 1 + domain_attestation: + type: integer + format: uint32 + example: 2 + domain_deposit: + type: integer + format: uint32 + example: 3 + domain_voluntary_exit: + type: integer + format: uint32 + example: 4 + domain_transfer: + type: integer + format: uint32 + example: 5 + boot_nodes: + type: array + items: + $ref: '#/components/schemas/ENR' + network_id: + type: integer + format: uint8 + example: 2 responses: diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index 95cd0679c..167cd36ea 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -46,4 +46,10 @@ pub trait LmdGhost: Send + Sync { /// Returns the latest message for a given validator index. fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>; + + /// Runs an integrity verification function on fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + fn verify_integrity(&self) -> Result<()>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index deda02e1f..73fab13bf 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -43,16 +43,6 @@ impl fmt::Debug for ThreadSafeReducedTree { } } -impl ThreadSafeReducedTree -where - T: Store, - E: EthSpec, -{ - pub fn verify_integrity(&self) -> std::result::Result<(), String> { - self.core.read().verify_integrity() - } -} - impl LmdGhost for ThreadSafeReducedTree where T: Store, @@ -80,7 +70,7 @@ where fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> { self.core .write() - .add_weightless_node(block.slot, block_hash) + .maybe_add_weightless_node(block.slot, block_hash) .map_err(|e| format!("process_block failed: {:?}", e)) } @@ -113,6 +103,10 @@ where fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.core.read().latest_message(validator_index) } + + fn verify_integrity(&self) -> std::result::Result<(), String> { + self.core.read().verify_integrity() + } } struct ReducedTree { @@ -163,15 +157,7 @@ where /// The given `new_root` must be in the block tree (but not necessarily in the reduced tree). /// Any nodes which are not a descendant of `new_root` will be removed from the store. pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> { - if !self.nodes.contains_key(&new_root) { - let node = Node { - block_hash: new_root, - voters: vec![], - ..Node::default() - }; - - self.add_node(node)?; - } + self.maybe_add_weightless_node(new_slot, new_root)?; self.retain_subtree(self.root.0, new_root)?; @@ -247,7 +233,7 @@ where // // In this case, we add a weightless node at `start_block_root`. if !self.nodes.contains_key(&start_block_root) { - self.add_weightless_node(start_block_slot, start_block_root)?; + self.maybe_add_weightless_node(start_block_slot, start_block_root)?; }; let _root_weight = self.update_weight(start_block_root, weight_fn)?; @@ -325,51 +311,53 @@ where /// become redundant and removed from the reduced tree. fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> { if let Some(vote) = *self.latest_votes.get(validator_index) { - self.get_mut_node(vote.hash)?.remove_voter(validator_index); - let node = self.get_node(vote.hash)?.clone(); + if self.nodes.contains_key(&vote.hash) { + self.get_mut_node(vote.hash)?.remove_voter(validator_index); + let node = self.get_node(vote.hash)?.clone(); - if let Some(parent_hash) = node.parent_hash { - if node.has_votes() || node.children.len() > 1 { - // A node with votes or more than one child is never removed. - } else if node.children.len() == 1 { - // A node which has only one child may be removed. - // - // Load the child of the node and set it's parent to be the parent of this - // node (viz., graft the node's child to the node's parent) - let child = self.get_mut_node(node.children[0])?; - child.parent_hash = node.parent_hash; + if let Some(parent_hash) = node.parent_hash { + if node.has_votes() || node.children.len() > 1 { + // A node with votes or more than one child is never removed. + } else if node.children.len() == 1 { + // A node which has only one child may be removed. + // + // Load the child of the node and set it's parent to be the parent of this + // node (viz., graft the node's child to the node's parent) + let child = self.get_mut_node(node.children[0])?; + child.parent_hash = node.parent_hash; - // Graft the parent of this node to it's child. - if let Some(parent_hash) = node.parent_hash { - let parent = self.get_mut_node(parent_hash)?; - parent.replace_child(node.block_hash, node.children[0])?; + // Graft the parent of this node to it's child. + if let Some(parent_hash) = node.parent_hash { + let parent = self.get_mut_node(parent_hash)?; + parent.replace_child(node.block_hash, node.children[0])?; + } + + self.nodes.remove(&vote.hash); + } else if node.children.is_empty() { + // Remove the to-be-deleted node from it's parent. + if let Some(parent_hash) = node.parent_hash { + self.get_mut_node(parent_hash)? + .remove_child(node.block_hash)?; + } + + self.nodes.remove(&vote.hash); + + // A node which has no children may be deleted and potentially it's parent + // too. + self.maybe_delete_node(parent_hash)?; + } else { + // It is impossible for a node to have a number of children that is not 0, 1 or + // greater than one. + // + // This code is strictly unnecessary, however we keep it for readability. + unreachable!(); } - - self.nodes.remove(&vote.hash); - } else if node.children.is_empty() { - // Remove the to-be-deleted node from it's parent. - if let Some(parent_hash) = node.parent_hash { - self.get_mut_node(parent_hash)? - .remove_child(node.block_hash)?; - } - - self.nodes.remove(&vote.hash); - - // A node which has no children may be deleted and potentially it's parent - // too. - self.maybe_delete_node(parent_hash)?; } else { - // It is impossible for a node to have a number of children that is not 0, 1 or - // greater than one. - // - // This code is strictly unnecessary, however we keep it for readability. - unreachable!(); + // A node without a parent is the genesis/finalized node and should never be removed. } - } else { - // A node without a parent is the genesis/finalized node and should never be removed. - } - self.latest_votes.insert(validator_index, Some(vote)); + self.latest_votes.insert(validator_index, Some(vote)); + } } Ok(()) @@ -384,25 +372,30 @@ where /// - it does not have any votes. fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> { let should_delete = { - let node = self.get_node(hash)?.clone(); + if let Ok(node) = self.get_node(hash) { + let node = node.clone(); - if let Some(parent_hash) = node.parent_hash { - if (node.children.len() == 1) && !node.has_votes() { - let child_hash = node.children[0]; + if let Some(parent_hash) = node.parent_hash { + if (node.children.len() == 1) && !node.has_votes() { + let child_hash = node.children[0]; - // Graft the single descendant `node` to the `parent` of node. - self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); + // Graft the single descendant `node` to the `parent` of node. + self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); - // Detach `node` from `parent`, replacing it with `child`. - self.get_mut_node(parent_hash)? - .replace_child(hash, child_hash)?; + // Detach `node` from `parent`, replacing it with `child`. + self.get_mut_node(parent_hash)? + .replace_child(hash, child_hash)?; - true + true + } else { + false + } } else { + // A node without a parent is the genesis node and should not be deleted. false } } else { - // A node without a parent is the genesis node and should not be deleted. + // No need to delete a node that does not exist. false } }; @@ -430,7 +423,7 @@ where Ok(()) } - fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { + fn maybe_add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { if slot > self.root_slot() && !self.nodes.contains_key(&hash) { let node = Node { block_hash: hash, @@ -477,6 +470,7 @@ where // descendant of both `node` and `prev_in_tree`. if self .iter_ancestors(child_hash)? + .take_while(|(_, slot)| *slot >= self.root_slot()) .any(|(ancestor, _slot)| ancestor == node.block_hash) { let child = self.get_mut_node(child_hash)?; @@ -562,6 +556,7 @@ where fn find_prev_in_tree(&mut self, hash: Hash256) -> Option { self.iter_ancestors(hash) .ok()? + .take_while(|(_, slot)| *slot >= self.root_slot()) .find(|(root, _slot)| self.nodes.contains_key(root)) .and_then(|(root, _slot)| Some(root)) } @@ -569,8 +564,12 @@ where /// For the two given block roots (`a_root` and `b_root`), find the first block they share in /// the tree. Viz, find the block that these two distinct blocks forked from. fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result { - let mut a_iter = self.iter_ancestors(a_root)?; - let mut b_iter = self.iter_ancestors(b_root)?; + let mut a_iter = self + .iter_ancestors(a_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); + let mut b_iter = self + .iter_ancestors(b_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); // Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two // blocks at the same slot, or `None` if we have gone past genesis or the root of this tree. diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index 4c79a704e..49e9ff738 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -4,7 +4,8 @@ extern crate lazy_static; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, + generate_deterministic_keypairs, AttestationStrategy, + BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, }; use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; @@ -51,7 +52,7 @@ struct ForkedHarness { impl ForkedHarness { /// A new standard instance of with constant parameters. pub fn new() -> Self { - let harness = BeaconChainHarness::new(VALIDATOR_COUNT); + let harness = BeaconChainHarness::new(generate_deterministic_keypairs(VALIDATOR_COUNT)); // Move past the zero slot. harness.advance_slot(); diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0badf3807..bb64c3ca2 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -16,9 +16,9 @@ use state_processing::per_block_processing::errors::{ }; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_attestation_for_block_inclusion, - verify_attestation_for_state, verify_attester_slashing, verify_exit, - verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, - verify_transfer_time_independent_only, VerifySignatures, + verify_attester_slashing, verify_exit, verify_exit_time_independent_only, + verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, + VerifySignatures, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index 65d5a2f30..633c5bfef 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -15,6 +15,7 @@ serde = "1.0" serde_derive = "1.0" lazy_static = "0.1" serde_yaml = "0.8" +eth2_ssz = { path = "../utils/ssz" } beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } lmd_ghost = { path = "../lmd_ghost" } diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 28afd0614..bdbe57b8e 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -2,6 +2,7 @@ extern crate env_logger; use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use ssz::Encode; use state_processing::{test_utils::BlockBuilder, BlockSignatureStrategy, VerifySignatures}; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, MainnetEthSpec, MinimalEthSpec, Slot}; @@ -393,6 +394,32 @@ fn bench_block( }) .sample_size(10), ); + + let local_block = block.clone(); + c.bench( + &title, + Benchmark::new("ssz_serialize_block", move |b| { + b.iter_batched_ref( + || (), + |_| black_box(local_block.as_ssz_bytes()), + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); + + let local_block = block.clone(); + c.bench( + &title, + Benchmark::new("ssz_block_len", move |b| { + b.iter_batched_ref( + || (), + |_| black_box(local_block.ssz_bytes_len()), + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); } criterion_group!(benches, all_benches,); diff --git a/eth2/state_processing/src/common/get_attesting_indices.rs b/eth2/state_processing/src/common/get_attesting_indices.rs index f558909f6..adb71801a 100644 --- a/eth2/state_processing/src/common/get_attesting_indices.rs +++ b/eth2/state_processing/src/common/get_attesting_indices.rs @@ -17,11 +17,9 @@ pub fn get_attesting_indices( target_relative_epoch, )?; - /* TODO(freeze): re-enable this? - if bitlist.len() > committee.committee.len() { + if bitlist.len() != committee.committee.len() { return Err(BeaconStateError::InvalidBitfield); } - */ Ok(committee .committee diff --git a/eth2/state_processing/src/common/get_compact_committees_root.rs b/eth2/state_processing/src/common/get_compact_committees_root.rs index 75edb3549..b8ab4345f 100644 --- a/eth2/state_processing/src/common/get_compact_committees_root.rs +++ b/eth2/state_processing/src/common/get_compact_committees_root.rs @@ -3,7 +3,7 @@ use types::*; /// Return the compact committee root at `relative_epoch`. /// -/// Spec v0.8.0 +/// Spec v0.8.3 pub fn get_compact_committees_root( state: &BeaconState, relative_epoch: RelativeEpoch, @@ -11,28 +11,13 @@ pub fn get_compact_committees_root( ) -> Result { let mut committees = FixedVector::<_, T::ShardCount>::from_elem(CompactCommittee::::default()); - // FIXME: this is a spec bug, whereby the start shard for the epoch after the next epoch - // is mistakenly used. The start shard from the cache SHOULD work. - // Waiting on a release to fix https://github.com/ethereum/eth2.0-specs/issues/1315 - let start_shard = if relative_epoch == RelativeEpoch::Next { - state.next_epoch_start_shard(spec)? - } else { - state.get_epoch_start_shard(relative_epoch)? - }; + let start_shard = state.get_epoch_start_shard(relative_epoch)?; for committee_number in 0..state.get_committee_count(relative_epoch)? { let shard = (start_shard + committee_number) % T::ShardCount::to_u64(); - // FIXME: this is a partial workaround for the above, but it only works in the case - // where there's a committee for every shard in every epoch. It works for the minimal - // tests but not the mainnet ones. - let fake_shard = if relative_epoch == RelativeEpoch::Next { - (shard + 1) % T::ShardCount::to_u64() - } else { - shard - }; for &index in state - .get_crosslink_committee_for_shard(fake_shard, relative_epoch)? + .get_crosslink_committee_for_shard(shard, relative_epoch)? .committee { let validator = state diff --git a/eth2/state_processing/src/common/get_indexed_attestation.rs b/eth2/state_processing/src/common/get_indexed_attestation.rs index 2507c76f2..5e9362331 100644 --- a/eth2/state_processing/src/common/get_indexed_attestation.rs +++ b/eth2/state_processing/src/common/get_indexed_attestation.rs @@ -11,6 +11,8 @@ pub fn get_indexed_attestation( state: &BeaconState, attestation: &Attestation, ) -> Result> { + // Note: we rely on both calls to `get_attesting_indices` to check the bitfield lengths + // against the committee length let attesting_indices = get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; diff --git a/eth2/state_processing/src/per_block_processing/signature_sets.rs b/eth2/state_processing/src/per_block_processing/signature_sets.rs index dec529247..4f1a06670 100644 --- a/eth2/state_processing/src/per_block_processing/signature_sets.rs +++ b/eth2/state_processing/src/per_block_processing/signature_sets.rs @@ -42,8 +42,12 @@ pub fn block_proposal_signature_set<'a, T: EthSpec>( block_signed_root: Option, spec: &'a ChainSpec, ) -> Result> { - let block_proposer = &state.validators - [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; + let proposer_index = + state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?; + let block_proposer = &state + .validators + .get(proposer_index) + .ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?; let domain = spec.get_domain( block.slot.epoch(T::slots_per_epoch()), diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index cf64dc85e..f419d5fae 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -1,4 +1,5 @@ #![cfg(all(test, not(feature = "fake_crypto")))] + use super::block_processing_builder::BlockProcessingBuilder; use super::errors::*; use crate::{per_block_processing, BlockSignatureStrategy}; diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 8d6153aea..bcac1dc27 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,8 +1,5 @@ use crate::common::get_compact_committees_root; -use apply_rewards::process_rewards_and_penalties; use errors::EpochProcessingError as Error; -use process_slashings::process_slashings; -use registry_updates::process_registry_updates; use std::collections::HashMap; use tree_hash::TreeHash; use types::*; @@ -17,6 +14,10 @@ pub mod tests; pub mod validator_statuses; pub mod winning_root; +pub use apply_rewards::process_rewards_and_penalties; +pub use process_slashings::process_slashings; +pub use registry_updates::process_registry_updates; + /// Maps a shard to a winning root. /// /// It is generated during crosslink processing and later used to reward/penalize validators. @@ -47,15 +48,10 @@ pub fn per_epoch_processing( process_justification_and_finalization(state, &validator_statuses.total_balances)?; // Crosslinks. - let winning_root_for_shards = process_crosslinks(state, spec)?; + process_crosslinks(state, spec)?; // Rewards and Penalties. - process_rewards_and_penalties( - state, - &mut validator_statuses, - &winning_root_for_shards, - spec, - )?; + process_rewards_and_penalties(state, &mut validator_statuses, spec)?; // Registry Updates. process_registry_updates(state, spec)?; @@ -159,9 +155,7 @@ pub fn process_justification_and_finalization( pub fn process_crosslinks( state: &mut BeaconState, spec: &ChainSpec, -) -> Result { - let mut winning_root_for_shards: WinningRootHashSet = HashMap::new(); - +) -> Result<(), Error> { state.previous_crosslinks = state.current_crosslinks.clone(); for &relative_epoch in &[RelativeEpoch::Previous, RelativeEpoch::Current] { @@ -181,12 +175,11 @@ pub fn process_crosslinks( if 3 * winning_root.total_attesting_balance >= 2 * total_committee_balance { state.current_crosslinks[shard as usize] = winning_root.crosslink.clone(); } - winning_root_for_shards.insert(shard, winning_root); } } } - Ok(winning_root_for_shards) + Ok(()) } /// Finish up an epoch update. @@ -218,45 +211,29 @@ pub fn process_final_updates( } } - // Update start shard. - state.start_shard = state.next_epoch_start_shard(spec)?; - - // This is a hack to allow us to update index roots and slashed balances for the next epoch. - // - // The indentation here is to make it obvious where the weird stuff happens. - { - state.slot += 1; - - // Set active index root - let index_epoch = next_epoch + spec.activation_exit_delay; - let indices_list = VariableList::::from( - state.get_active_validator_indices(index_epoch), - ); - state.set_active_index_root( - index_epoch, - Hash256::from_slice(&indices_list.tree_hash_root()), - spec, - )?; - - // Reset slashings - state.set_slashings(next_epoch, 0)?; - - // Set randao mix - state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; - - state.slot -= 1; - } + // Set active index root + let index_epoch = next_epoch + spec.activation_exit_delay; + let indices_list = VariableList::::from( + state.get_active_validator_indices(index_epoch), + ); + state.set_active_index_root( + index_epoch, + Hash256::from_slice(&indices_list.tree_hash_root()), + spec, + )?; // Set committees root - // Note: we do this out-of-order w.r.t. to the spec, because we don't want the slot to be - // incremented. It's safe because the updates to slashings and the RANDAO mix (above) don't - // affect this. state.set_compact_committee_root( next_epoch, get_compact_committees_root(state, RelativeEpoch::Next, spec)?, - spec, )?; + // Reset slashings + state.set_slashings(next_epoch, 0)?; + + // Set randao mix + state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; + // Set historical root accumulator if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 { let historical_batch = state.historical_batch(); @@ -265,6 +242,9 @@ pub fn process_final_updates( .push(Hash256::from_slice(&historical_batch.tree_hash_root()))?; } + // Update start shard. + state.start_shard = state.get_epoch_start_shard(RelativeEpoch::Next)?; + // Rotate current/previous epoch attestations state.previous_epoch_attestations = std::mem::replace(&mut state.current_epoch_attestations, VariableList::empty()); diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 9bd53077a..6de9ed872 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -1,5 +1,5 @@ use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; -use super::{Error, WinningRootHashSet}; +use super::Error; use integer_sqrt::IntegerSquareRoot; use types::*; @@ -36,7 +36,6 @@ impl std::ops::AddAssign for Delta { pub fn process_rewards_and_penalties( state: &mut BeaconState, validator_statuses: &mut ValidatorStatuses, - winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { if state.current_epoch() == T::genesis_epoch() { @@ -53,15 +52,13 @@ pub fn process_rewards_and_penalties( let mut deltas = vec![Delta::default(); state.balances.len()]; get_attestation_deltas(&mut deltas, state, &validator_statuses, spec)?; + + // Update statuses with the information from winning roots. + validator_statuses.process_winning_roots(state, spec)?; + get_crosslink_deltas(&mut deltas, state, &validator_statuses, spec)?; - get_proposer_deltas( - &mut deltas, - state, - validator_statuses, - winning_root_for_shards, - spec, - )?; + get_proposer_deltas(&mut deltas, state, validator_statuses, spec)?; // Apply the deltas, over-flowing but not under-flowing (saturating at 0 instead). for (i, delta) in deltas.iter().enumerate() { @@ -79,12 +76,8 @@ fn get_proposer_deltas( deltas: &mut Vec, state: &BeaconState, validator_statuses: &mut ValidatorStatuses, - winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { - // Update statuses with the information from winning roots. - validator_statuses.process_winning_roots(state, winning_root_for_shards, spec)?; - for (index, validator) in validator_statuses.statuses.iter().enumerate() { if validator.is_previous_epoch_attester { let inclusion = validator diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 8a7d07d57..3280b981f 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,4 +1,4 @@ -use super::WinningRootHashSet; +use super::{winning_root::winning_root, WinningRootHashSet}; use crate::common::get_attesting_indices; use types::*; @@ -292,9 +292,29 @@ impl ValidatorStatuses { pub fn process_winning_roots( &mut self, state: &BeaconState, - winning_roots: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), BeaconStateError> { + // We must re-calculate the winning roots here because it is possible that they have + // changed since the first time they were calculated. + // + // This is because we altered the state during the first time we calculated the winning + // roots. + let winning_root_for_shards = { + let mut winning_root_for_shards = WinningRootHashSet::new(); + let relative_epoch = RelativeEpoch::Previous; + + let epoch = relative_epoch.into_epoch(state.current_epoch()); + for offset in 0..state.get_committee_count(relative_epoch)? { + let shard = (state.get_epoch_start_shard(relative_epoch)? + offset) + % T::ShardCount::to_u64(); + if let Some(winning_root) = winning_root(state, shard, epoch, spec)? { + winning_root_for_shards.insert(shard, winning_root); + } + } + + winning_root_for_shards + }; + // Loop through each slot in the previous epoch. for slot in state.previous_epoch().slot_iter(T::slots_per_epoch()) { let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?; @@ -302,7 +322,7 @@ impl ValidatorStatuses { // Loop through each committee in the slot. for c in crosslink_committees_at_slot { // If there was some winning crosslink root for the committee's shard. - if let Some(winning_root) = winning_roots.get(&c.shard) { + if let Some(winning_root) = winning_root_for_shards.get(&c.shard) { let total_committee_balance = state.get_total_balance(&c.committee, spec)?; for &validator_index in &winning_root.attesting_validator_indices { // Take note of the balance information for the winning root, it will be diff --git a/eth2/state_processing/src/per_epoch_processing/winning_root.rs b/eth2/state_processing/src/per_epoch_processing/winning_root.rs index 874e11d6c..82a6b0ff1 100644 --- a/eth2/state_processing/src/per_epoch_processing/winning_root.rs +++ b/eth2/state_processing/src/per_epoch_processing/winning_root.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet}; use tree_hash::TreeHash; use types::*; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct WinningRoot { pub crosslink: Crosslink, pub attesting_validator_indices: Vec, diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 43b66f3ed..a7390c850 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -1,3 +1,5 @@ +#![cfg(not(feature = "fake_crypto"))] + use state_processing::{ per_block_processing, test_utils::BlockBuilder, BlockProcessingError, BlockSignatureStrategy, }; diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 36cfc39ec..95d7a0317 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -31,3 +31,4 @@ tree_hash_derive = "0.2" [dev-dependencies] env_logger = "0.6.0" +serde_json = "^1.0" diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index f2e63598f..4d82ce126 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -4,25 +4,13 @@ use crate::{Checkpoint, Crosslink, Hash256}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use tree_hash::TreeHash; -use tree_hash_derive::{SignedRoot, TreeHash}; +use tree_hash_derive::TreeHash; /// The data upon which an attestation is based. /// /// Spec v0.8.0 #[derive( - Debug, - Clone, - PartialEq, - Eq, - Serialize, - Deserialize, - Hash, - Encode, - Decode, - TreeHash, - TestRandom, - SignedRoot, + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, )] pub struct AttestationData { // LMD GHOST vote diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index fb923fc06..6b2b44d0e 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -60,6 +60,22 @@ pub enum Error { SszTypesError(ssz_types::Error), } +/// Control whether an epoch-indexed field can be indexed at the next epoch or not. +#[derive(Debug, PartialEq, Clone, Copy)] +enum AllowNextEpoch { + True, + False, +} + +impl AllowNextEpoch { + fn upper_bound_of(self, current_epoch: Epoch) -> Epoch { + match self { + AllowNextEpoch::True => current_epoch + 1, + AllowNextEpoch::False => current_epoch, + } + } +} + /// The state of the `BeaconChain` at some slot. /// /// Spec v0.8.0 @@ -108,12 +124,12 @@ where pub start_shard: u64, pub randao_mixes: FixedVector, #[compare_fields(as_slice)] - active_index_roots: FixedVector, + pub active_index_roots: FixedVector, #[compare_fields(as_slice)] - compact_committees_roots: FixedVector, + pub compact_committees_roots: FixedVector, // Slashings - slashings: FixedVector, + pub slashings: FixedVector, // Attestations pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, @@ -282,14 +298,6 @@ impl BeaconState { Ok(cache.epoch_start_shard()) } - pub fn next_epoch_start_shard(&self, spec: &ChainSpec) -> Result { - let cache = self.cache(RelativeEpoch::Current)?; - let active_validator_count = cache.active_validator_count(); - let shard_delta = T::get_shard_delta(active_validator_count, spec.target_committee_size); - - Ok((self.start_shard + shard_delta) % T::ShardCount::to_u64()) - } - /// Get the slot of an attestation. /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. @@ -463,12 +471,16 @@ impl BeaconState { /// Safely obtains the index for `randao_mixes` /// - /// Spec v0.8.0 - fn get_randao_mix_index(&self, epoch: Epoch) -> Result { + /// Spec v0.8.1 + fn get_randao_mix_index( + &self, + epoch: Epoch, + allow_next_epoch: AllowNextEpoch, + ) -> Result { let current_epoch = self.current_epoch(); let len = T::EpochsPerHistoricalVector::to_u64(); - if epoch + len > current_epoch && epoch <= current_epoch { + if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { Ok(epoch.as_usize() % len as usize) } else { Err(Error::EpochOutOfBounds) @@ -496,7 +508,7 @@ impl BeaconState { /// /// Spec v0.8.1 pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { - let i = self.get_randao_mix_index(epoch)?; + let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; Ok(&self.randao_mixes[i]) } @@ -504,21 +516,29 @@ impl BeaconState { /// /// Spec v0.8.1 pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { - let i = self.get_randao_mix_index(epoch)?; + let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; self.randao_mixes[i] = mix; Ok(()) } /// Safely obtains the index for `active_index_roots`, given some `epoch`. /// + /// If `allow_next_epoch` is `True`, then we allow an _extra_ one epoch of lookahead. + /// /// Spec v0.8.1 - fn get_active_index_root_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + fn get_active_index_root_index( + &self, + epoch: Epoch, + spec: &ChainSpec, + allow_next_epoch: AllowNextEpoch, + ) -> Result { let current_epoch = self.current_epoch(); let lookahead = spec.activation_exit_delay; let lookback = self.active_index_roots.len() as u64 - lookahead; + let epoch_upper_bound = allow_next_epoch.upper_bound_of(current_epoch) + lookahead; - if epoch + lookback > current_epoch && current_epoch + lookahead >= epoch { + if current_epoch < epoch + lookback && epoch <= epoch_upper_bound { Ok(epoch.as_usize() % self.active_index_roots.len()) } else { Err(Error::EpochOutOfBounds) @@ -529,7 +549,7 @@ impl BeaconState { /// /// Spec v0.8.1 pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Result { - let i = self.get_active_index_root_index(epoch, spec)?; + let i = self.get_active_index_root_index(epoch, spec, AllowNextEpoch::False)?; Ok(self.active_index_roots[i]) } @@ -542,7 +562,7 @@ impl BeaconState { index_root: Hash256, spec: &ChainSpec, ) -> Result<(), Error> { - let i = self.get_active_index_root_index(epoch, spec)?; + let i = self.get_active_index_root_index(epoch, spec, AllowNextEpoch::True)?; self.active_index_roots[i] = index_root; Ok(()) } @@ -556,19 +576,17 @@ impl BeaconState { /// Safely obtains the index for `compact_committees_roots`, given some `epoch`. /// - /// Spec v0.8.0 + /// Spec v0.8.1 fn get_compact_committee_root_index( &self, epoch: Epoch, - spec: &ChainSpec, + allow_next_epoch: AllowNextEpoch, ) -> Result { let current_epoch = self.current_epoch(); + let len = T::EpochsPerHistoricalVector::to_u64(); - let lookahead = spec.activation_exit_delay; - let lookback = self.compact_committees_roots.len() as u64 - lookahead; - - if epoch + lookback > current_epoch && current_epoch + lookahead >= epoch { - Ok(epoch.as_usize() % self.compact_committees_roots.len()) + if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { + Ok(epoch.as_usize() % len as usize) } else { Err(Error::EpochOutOfBounds) } @@ -576,26 +594,21 @@ impl BeaconState { /// Return the `compact_committee_root` at a recent `epoch`. /// - /// Spec v0.8.0 - pub fn get_compact_committee_root( - &self, - epoch: Epoch, - spec: &ChainSpec, - ) -> Result { - let i = self.get_compact_committee_root_index(epoch, spec)?; + /// Spec v0.8.1 + pub fn get_compact_committee_root(&self, epoch: Epoch) -> Result { + let i = self.get_compact_committee_root_index(epoch, AllowNextEpoch::False)?; Ok(self.compact_committees_roots[i]) } /// Set the `compact_committee_root` at a recent `epoch`. /// - /// Spec v0.8.0 + /// Spec v0.8.1 pub fn set_compact_committee_root( &mut self, epoch: Epoch, index_root: Hash256, - spec: &ChainSpec, ) -> Result<(), Error> { - let i = self.get_compact_committee_root_index(epoch, spec)?; + let i = self.get_compact_committee_root_index(epoch, AllowNextEpoch::True)?; self.compact_committees_roots[i] = index_root; Ok(()) } @@ -646,14 +659,19 @@ impl BeaconState { /// Safely obtain the index for `slashings`, given some `epoch`. /// - /// Spec v0.8.0 - fn get_slashings_index(&self, epoch: Epoch) -> Result { + /// Spec v0.8.1 + fn get_slashings_index( + &self, + epoch: Epoch, + allow_next_epoch: AllowNextEpoch, + ) -> Result { // We allow the slashings vector to be accessed at any cached epoch at or before - // the current epoch. - if epoch <= self.current_epoch() - && epoch + T::EpochsPerSlashingsVector::to_u64() >= self.current_epoch() + 1 + // the current epoch, or the next epoch if `AllowNextEpoch::True` is passed. + let current_epoch = self.current_epoch(); + if current_epoch < epoch + T::EpochsPerSlashingsVector::to_u64() + && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { - Ok((epoch.as_u64() % T::EpochsPerSlashingsVector::to_u64()) as usize) + Ok(epoch.as_usize() % T::EpochsPerSlashingsVector::to_usize()) } else { Err(Error::EpochOutOfBounds) } @@ -668,17 +686,17 @@ impl BeaconState { /// Get the total slashed balances for some epoch. /// - /// Spec v0.8.0 + /// Spec v0.8.1 pub fn get_slashings(&self, epoch: Epoch) -> Result { - let i = self.get_slashings_index(epoch)?; + let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; Ok(self.slashings[i]) } /// Set the total slashed balances for some epoch. /// - /// Spec v0.8.0 + /// Spec v0.8.1 pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { - let i = self.get_slashings_index(epoch)?; + let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; self.slashings[i] = value; Ok(()) } diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 0e76942dd..f589b3d3e 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -120,6 +120,13 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { fn epochs_per_historical_vector() -> usize { Self::EpochsPerHistoricalVector::to_usize() } + + /// Returns the `SLOTS_PER_ETH1_VOTING_PERIOD` constant for this specification. + /// + /// Spec v0.8.1 + fn slots_per_eth1_voting_period() -> usize { + Self::EpochsPerHistoricalVector::to_usize() + } } /// Macro to inherit some type values from another EthSpec. diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index 28e9d92f8..4c17d3f96 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -9,7 +9,7 @@ fn default_values() { let cache = CommitteeCache::default(); assert_eq!(cache.is_initialized_at(Epoch::new(0)), false); - assert_eq!(cache.active_validator_indices(), &[]); + assert!(&cache.active_validator_indices().is_empty()); assert_eq!(cache.get_crosslink_committee_for_shard(0), None); assert_eq!(cache.get_attestation_duties(0), None); assert_eq!(cache.active_validator_count(), 0); diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 67adccdda..0363e5848 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -90,11 +90,11 @@ fn test_active_index(state_slot: Slot) { // Test the start and end of the range. assert_eq!( - state.get_active_index_root_index(*range.start(), &spec), + state.get_active_index_root_index(*range.start(), &spec, AllowNextEpoch::False), Ok(modulo(*range.start())) ); assert_eq!( - state.get_active_index_root_index(*range.end(), &spec), + state.get_active_index_root_index(*range.end(), &spec, AllowNextEpoch::False), Ok(modulo(*range.end())) ); @@ -102,12 +102,12 @@ fn test_active_index(state_slot: Slot) { if state.current_epoch() > 0 { // Test is invalid on epoch zero, cannot subtract from zero. assert_eq!( - state.get_active_index_root_index(*range.start() - 1, &spec), + state.get_active_index_root_index(*range.start() - 1, &spec, AllowNextEpoch::False), Err(Error::EpochOutOfBounds) ); } assert_eq!( - state.get_active_index_root_index(*range.end() + 1, &spec), + state.get_active_index_root_index(*range.end() + 1, &spec, AllowNextEpoch::False), Err(Error::EpochOutOfBounds) ); } diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 9dec626d4..d59e0db0a 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -58,7 +58,7 @@ pub struct ChainSpec { /* * Time parameters */ - pub seconds_per_slot: u64, + pub milliseconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, @@ -158,7 +158,7 @@ impl ChainSpec { /* * Time parameters */ - seconds_per_slot: 6, + milliseconds_per_slot: 6_000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), activation_exit_delay: 4, @@ -221,7 +221,7 @@ impl ChainSpec { let boot_nodes = vec![]; Self { - seconds_per_slot: 12, + milliseconds_per_slot: 12_000, target_committee_size: 4, shuffle_round_count: 10, network_id: 13, diff --git a/eth2/types/src/checkpoint.rs b/eth2/types/src/checkpoint.rs index dc40b336f..d5d40fa67 100644 --- a/eth2/types/src/checkpoint.rs +++ b/eth2/types/src/checkpoint.rs @@ -3,8 +3,7 @@ use crate::{Epoch, Hash256}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use tree_hash::TreeHash; -use tree_hash_derive::{SignedRoot, TreeHash}; +use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. /// @@ -22,7 +21,6 @@ use tree_hash_derive::{SignedRoot, TreeHash}; Decode, TreeHash, TestRandom, - SignedRoot, )] pub struct Checkpoint { pub epoch: Epoch, diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 3edf8b36b..d1eaa393f 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -86,5 +86,8 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; /// Maps a slot to a block proposer. pub type ProposerMap = HashMap; -pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature}; +pub use bls::{ + AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, + Signature, SignatureBytes, +}; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 084ff98e7..3bd54ee2d 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -182,7 +182,7 @@ macro_rules! impl_display { &self, record: &slog::Record, key: slog::Key, - serializer: &mut slog::Serializer, + serializer: &mut dyn slog::Serializer, ) -> slog::Result { slog::Value::serialize(&self.0, record, key, serializer) } @@ -201,6 +201,10 @@ macro_rules! impl_ssz { ::ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + 0_u64.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { self.0.ssz_append(buf) } diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 98f840953..cf8c9ec8e 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -94,7 +94,7 @@ impl TestingBeaconStateBuilder { /// Creates the builder from an existing set of keypairs. pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { let validator_count = keypairs.len(); - let starting_balance = 32_000_000_000; + let starting_balance = spec.max_effective_balance; debug!( "Building {} Validator objects from keypairs...", @@ -123,8 +123,10 @@ impl TestingBeaconStateBuilder { .collect::>() .into(); + let genesis_time = 1567052589; // 29 August, 2019; + let mut state = BeaconState::new( - spec.min_genesis_time, + genesis_time, Eth1Data { deposit_root: Hash256::zero(), deposit_count: 0, diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index 6c72b520f..b97293427 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -39,15 +39,15 @@ impl TestingProposerSlashingBuilder { ..header_1.clone() }; + let epoch = slot.epoch(T::slots_per_epoch()); + header_1.signature = { let message = header_1.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; header_2.signature = { let message = header_2.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs index 172b142ef..188ce075d 100644 --- a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,7 +1,8 @@ use crate::*; -use eth2_interop_keypairs::be_private_key; +use eth2_interop_keypairs::{keypair, keypairs_from_yaml_file}; use log::debug; use rayon::prelude::*; +use std::path::PathBuf; /// Generates `validator_count` keypairs where the secret key is derived solely from the index of /// the validator. @@ -15,8 +16,8 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { let keypairs: Vec = (0..validator_count) .collect::>() - .par_iter() - .map(|&i| generate_deterministic_keypair(i)) + .into_par_iter() + .map(generate_deterministic_keypair) .collect(); keypairs @@ -26,8 +27,20 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { /// /// This is used for testing only, and not to be used in production! pub fn generate_deterministic_keypair(validator_index: usize) -> Keypair { - let sk = SecretKey::from_bytes(&be_private_key(validator_index)) - .expect("be_private_key always returns valid keys"); - let pk = PublicKey::from_secret_key(&sk); - Keypair { sk, pk } + let raw = keypair(validator_index); + Keypair { + pk: PublicKey::from_raw(raw.pk), + sk: SecretKey::from_raw(raw.sk), + } +} + +/// Loads a list of keypairs from file. +pub fn load_keypairs_from_yaml(path: PathBuf) -> Result, String> { + Ok(keypairs_from_yaml_file(path)? + .into_iter() + .map(|raw| Keypair { + pk: PublicKey::from_raw(raw.pk), + sk: SecretKey::from_raw(raw.sk), + }) + .collect()) } diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 9ca9ca78a..b3ecb9089 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -8,6 +8,7 @@ mod test_random; pub use builders::*; pub use generate_deterministic_keypairs::generate_deterministic_keypair; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; +pub use generate_deterministic_keypairs::load_keypairs_from_yaml; pub use keypairs_file::KeypairsFile; pub use rand::{ RngCore, diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 4f499ad37..349e08f54 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.10.0" } +# FIXME: update sigp repo +milagro_bls = { git = "https://github.com/michaelsproul/milagro_bls", branch = "little-endian-v0.10" } eth2_hashing = { path = "../eth2_hashing" } hex = "0.3" rand = "^0.5" diff --git a/eth2/utils/bls/src/fake_public_key.rs b/eth2/utils/bls/src/fake_public_key.rs index e8dafaca6..82b1c707f 100644 --- a/eth2/utils/bls/src/fake_public_key.rs +++ b/eth2/utils/bls/src/fake_public_key.rs @@ -1,5 +1,6 @@ use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE}; use milagro_bls::G1Point; +use milagro_bls::PublicKey as RawPublicKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; @@ -24,6 +25,13 @@ impl FakePublicKey { Self::zero() } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self { + bytes: raw.clone().as_bytes(), + point: G1Point::new(), + } + } + /// Creates a new all-zero's public key pub fn zero() -> Self { Self { diff --git a/eth2/utils/bls/src/macros.rs b/eth2/utils/bls/src/macros.rs index 09838b73e..e8bd3dd04 100644 --- a/eth2/utils/bls/src/macros.rs +++ b/eth2/utils/bls/src/macros.rs @@ -9,6 +9,10 @@ macro_rules! impl_ssz { $byte_size } + fn ssz_bytes_len(&self) -> usize { + $byte_size + } + fn ssz_append(&self, buf: &mut Vec) { buf.append(&mut self.as_bytes()) } diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index e03b17686..4b5abb58e 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -20,6 +20,10 @@ impl PublicKey { PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self(raw) + } + /// Returns the underlying signature. pub fn as_raw(&self) -> &RawPublicKey { &self.0 diff --git a/eth2/utils/bls/src/public_key_bytes.rs b/eth2/utils/bls/src/public_key_bytes.rs index f75735140..afdbcb270 100644 --- a/eth2/utils/bls/src/public_key_bytes.rs +++ b/eth2/utils/bls/src/public_key_bytes.rs @@ -31,6 +31,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_public_key() { let mut public_key_bytes = [0; BLS_PUBLIC_KEY_BYTE_SIZE]; public_key_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 12f9a713b..54da0fa0f 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -20,6 +20,10 @@ impl SecretKey { SecretKey(RawSecretKey::random(&mut rand::thread_rng())) } + pub fn from_raw(raw: RawSecretKey) -> Self { + Self(raw) + } + /// Returns the underlying point as compressed bytes. fn as_bytes(&self) -> Vec { self.as_raw().as_bytes() diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 7c7f677d7..7a2bc6051 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -1,9 +1,8 @@ use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE}; -use hex::encode as hex_encode; use milagro_bls::Signature as RawSignature; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::HexVisitor; +use serde_hex::{encode as hex_encode, HexVisitor}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; /// A single BLS signature. diff --git a/eth2/utils/bls/src/signature_bytes.rs b/eth2/utils/bls/src/signature_bytes.rs index a30cecb4d..b89c0f0d1 100644 --- a/eth2/utils/bls/src/signature_bytes.rs +++ b/eth2/utils/bls/src/signature_bytes.rs @@ -32,6 +32,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_signature() { let mut signature_bytes = [0; BLS_SIG_BYTE_SIZE]; signature_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed diff --git a/eth2/utils/bls/src/signature_set.rs b/eth2/utils/bls/src/signature_set.rs index 4b6065f9f..df1636f1d 100644 --- a/eth2/utils/bls/src/signature_set.rs +++ b/eth2/utils/bls/src/signature_set.rs @@ -7,7 +7,7 @@ use milagro_bls::AggregateSignature as RawAggregateSignature; type Message = Vec; type Domain = u64; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct SignedMessage<'a> { signing_keys: Vec<&'a G1Point>, message: Message, @@ -25,7 +25,7 @@ impl<'a> SignedMessage<'a> { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct SignatureSet<'a> { pub signature: &'a G2Point, signed_messages: Vec>, diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index e1c4dab04..a1a851d1d 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -7,5 +7,14 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lazy_static = "1.4" num-bigint = "0.2" eth2_hashing = "0.1" +hex = "0.3" +milagro_bls = { git = "https://github.com/michaelsproul/milagro_bls", branch = "little-endian-v0.10" } +serde_yaml = "0.8" +serde = "1.0" +serde_derive = "1.0" + +[dev-dependencies] +base64 = "0.10" diff --git a/eth2/utils/eth2_interop_keypairs/specs/keygen_10_validators.yaml b/eth2/utils/eth2_interop_keypairs/specs/keygen_10_validators.yaml new file mode 100644 index 000000000..b725ab2bd --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/specs/keygen_10_validators.yaml @@ -0,0 +1,20 @@ +- {privkey: '0x25295f0d1d592a90b333e26e85149708208e9f8e8bc18f6c77bd62f8ad7a6866', + pubkey: '0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c'} +- {privkey: '0x51d0b65185db6989ab0b560d6deed19c7ead0e24b9b6372cbecb1f26bdfad000', + pubkey: '0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b'} +- {privkey: '0x315ed405fafe339603932eebe8dbfd650ce5dafa561f6928664c75db85f97857', + pubkey: '0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b'} +- {privkey: '0x25b1166a43c109cb330af8945d364722757c65ed2bfed5444b5a2f057f82d391', + pubkey: '0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e'} +- {privkey: '0x3f5615898238c4c4f906b507ee917e9ea1bb69b93f1dbd11a34d229c3b06784b', + pubkey: '0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e'} +- {privkey: '0x055794614bc85ed5436c1f5cab586aab6ca84835788621091f4f3b813761e7a8', + pubkey: '0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34'} +- {privkey: '0x1023c68852075965e0f7352dee3f76a84a83e7582c181c10179936c6d6348893', + pubkey: '0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373'} +- {privkey: '0x3a941600dc41e5d20e818473b817a28507c23cdfdb4b659c15461ee5c71e41f5', + pubkey: '0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac'} +- {privkey: '0x066e3bdc0415530e5c7fed6382d5c822c192b620203cf669903e1810a8c67d06', + pubkey: '0xa6d310dbbfab9a22450f59993f87a4ce5db6223f3b5f1f30d2c4ec718922d400e0b3c7741de8e59960f72411a0ee10a7'} +- {privkey: '0x2b3b88a041168a1c4cd04bdd8de7964fd35238f95442dc678514f9dadb81ec34', + pubkey: '0x9893413c00283a3f9ed9fd9845dda1cea38228d22567f9541dccc357e54a2d6a6e204103c92564cbc05f4905ac7c493a'} diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 8ba2b9eba..cac7e7462 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -1,130 +1,132 @@ //! Produces the "deterministic" validator private keys used for inter-operability testing for //! Ethereum 2.0 clients. //! -//! Each private key is the first hash in the sha2 hash-chain that is less than 2^255. As such, -//! keys generated here are **not secret** and are **not for production use**. +//! Each private key is the sha2 hash of the validator index (little-endian, padded to 32 bytes), +//! modulo the BLS-381 curve order. //! -//! Note: these keys have not been tested against a reference implementation, yet. +//! Keys generated here are **not secret** and are **not for production use**. It is trivial to +//! know the secret key for any validator. +//! +//!## Reference +//! +//! Reference implementation: +//! +//! https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen.py +//! +//! +//! This implementation passes the [reference implementation +//! tests](https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml). +#[macro_use] +extern crate lazy_static; use eth2_hashing::hash; +use milagro_bls::{Keypair, PublicKey, SecretKey}; use num_bigint::BigUint; +use serde_derive::{Deserialize, Serialize}; +use std::convert::TryInto; +use std::fs::File; +use std::path::PathBuf; -pub const CURVE_ORDER_BITS: usize = 255; pub const PRIVATE_KEY_BYTES: usize = 48; +pub const PUBLIC_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; -fn hash_big_int_le(uint: BigUint) -> BigUint { - let mut preimage = uint.to_bytes_le(); - preimage.resize(32, 0_u8); - BigUint::from_bytes_le(&hash(&preimage)) +lazy_static! { + static ref CURVE_ORDER: BigUint = + "52435875175126190479447740508185965837690552500527637822603658699938581184513" + .parse::() + .expect("Curve order should be valid"); } -fn private_key(validator_index: usize) -> BigUint { - let mut key = BigUint::from(validator_index); - loop { - key = hash_big_int_le(key); - if key.bits() <= CURVE_ORDER_BITS { - break key; - } - } -} - -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in big-endian bytes. +/// Return a G1 point for the given `validator_index`, encoded as a compressed point in +/// big-endian byte-ordering. pub fn be_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_be(); + let preimage = { + let mut bytes = [0; HASH_BYTES]; + let index = validator_index.to_le_bytes(); + bytes[0..index.len()].copy_from_slice(&index); + bytes + }; - let mut out = [0; PRIVATE_KEY_BYTES]; - out[PRIVATE_KEY_BYTES - vec.len()..PRIVATE_KEY_BYTES].copy_from_slice(&vec); - out + let privkey = BigUint::from_bytes_le(&hash(&preimage)) % &*CURVE_ORDER; + + let mut bytes = [0; PRIVATE_KEY_BYTES]; + let privkey_bytes = privkey.to_bytes_be(); + bytes[PRIVATE_KEY_BYTES - privkey_bytes.len()..].copy_from_slice(&privkey_bytes); + bytes } -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in little-endian bytes. -pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_le(); +/// Return a public and private keypair for a given `validator_index`. +pub fn keypair(validator_index: usize) -> Keypair { + let sk = SecretKey::from_bytes(&be_private_key(validator_index)).expect(&format!( + "Should build valid private key for validator index {}", + validator_index + )); - let mut out = [0; PRIVATE_KEY_BYTES]; - out[0..vec.len()].copy_from_slice(&vec); - out -} - -#[cfg(test)] -mod tests { - use super::*; - - fn flip(vec: &[u8]) -> Vec { - let len = vec.len(); - let mut out = vec![0; len]; - for i in 0..len { - out[len - 1 - i] = vec[i]; - } - out - } - - fn pad_le_bls(mut vec: Vec) -> Vec { - vec.resize(PRIVATE_KEY_BYTES, 0_u8); - vec - } - - fn pad_be_bls(mut vec: Vec) -> Vec { - let mut out = vec![0; PRIVATE_KEY_BYTES - vec.len()]; - out.append(&mut vec); - out - } - - fn pad_le_hash(index: usize) -> Vec { - let mut vec = index.to_le_bytes().to_vec(); - vec.resize(HASH_BYTES, 0_u8); - vec - } - - fn multihash(index: usize, rounds: usize) -> Vec { - let mut vec = pad_le_hash(index); - for _ in 0..rounds { - vec = hash(&vec); - } - vec - } - - fn compare(validator_index: usize, preimage: &[u8]) { - assert_eq!( - &le_private_key(validator_index)[..], - &pad_le_bls(hash(preimage))[..] - ); - assert_eq!( - &be_private_key(validator_index)[..], - &pad_be_bls(flip(&hash(preimage)))[..] - ); - } - - #[test] - fn consistency() { - for i in 0..256 { - let le = BigUint::from_bytes_le(&le_private_key(i)); - let be = BigUint::from_bytes_be(&be_private_key(i)); - assert_eq!(le, be); - } - } - - #[test] - fn non_repeats() { - // These indices only need one hash to be in the curve order. - compare(0, &pad_le_hash(0)); - compare(3, &pad_le_hash(3)); - } - - #[test] - fn repeats() { - // Index 5 needs 5x hashes to get into the curve order. - compare(5, &multihash(5, 5)); - } - - #[test] - fn doesnt_panic() { - for i in 0..256 { - be_private_key(i); - le_private_key(i); - } + Keypair { + pk: PublicKey::from_secret_key(&sk), + sk, } } + +#[derive(Serialize, Deserialize)] +struct YamlKeypair { + /// Big-endian. + privkey: String, + /// Big-endian. + pubkey: String, +} + +impl TryInto for YamlKeypair { + type Error = String; + + fn try_into(self) -> Result { + let privkey = string_to_bytes(&self.privkey)?; + let pubkey = string_to_bytes(&self.pubkey)?; + + if (privkey.len() > PRIVATE_KEY_BYTES) || (pubkey.len() > PUBLIC_KEY_BYTES) { + return Err("Public or private key is too long".into()); + } + + let sk = { + let mut bytes = vec![0; PRIVATE_KEY_BYTES - privkey.len()]; + bytes.extend_from_slice(&privkey); + SecretKey::from_bytes(&bytes) + .map_err(|e| format!("Failed to decode bytes into secret key: {:?}", e))? + }; + + let pk = { + let mut bytes = vec![0; PUBLIC_KEY_BYTES - pubkey.len()]; + bytes.extend_from_slice(&pubkey); + PublicKey::from_bytes(&bytes) + .map_err(|e| format!("Failed to decode bytes into public key: {:?}", e))? + }; + + Ok(Keypair { pk, sk }) + } +} + +fn string_to_bytes(string: &str) -> Result, String> { + let string = if string.starts_with("0x") { + &string[2..] + } else { + string + }; + + hex::decode(string).map_err(|e| format!("Unable to decode public or private key: {}", e)) +} + +/// Loads keypairs from a YAML encoded file. +/// +/// Uses this as reference: +/// https://github.com/ethereum/eth2.0-pm/blob/9a9dbcd95e2b8e10287797bd768014ab3d842e99/interop/mocked_start/keygen_10_validators.yaml +pub fn keypairs_from_yaml_file(path: PathBuf) -> Result, String> { + let file = + File::open(path.clone()).map_err(|e| format!("Unable to open YAML key file: {}", e))?; + + serde_yaml::from_reader::<_, Vec>(file) + .map_err(|e| format!("Could not parse YAML: {:?}", e))? + .into_iter() + .map(TryInto::try_into) + .collect::, String>>() +} diff --git a/eth2/utils/eth2_interop_keypairs/tests/from_file.rs b/eth2/utils/eth2_interop_keypairs/tests/from_file.rs new file mode 100644 index 000000000..dd62d1f3e --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/tests/from_file.rs @@ -0,0 +1,23 @@ +#![cfg(test)] +use eth2_interop_keypairs::{keypair as reference_keypair, keypairs_from_yaml_file}; +use std::path::PathBuf; + +fn yaml_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("specs") + .join("keygen_10_validators.yaml") +} + +#[test] +fn load_from_yaml() { + let keypairs = keypairs_from_yaml_file(yaml_path()).expect("should read keypairs from file"); + + keypairs.into_iter().enumerate().for_each(|(i, keypair)| { + assert_eq!( + keypair, + reference_keypair(i), + "Decoded key {} does not match generated key", + i + ) + }); +} diff --git a/eth2/utils/eth2_interop_keypairs/tests/generation.rs b/eth2/utils/eth2_interop_keypairs/tests/generation.rs new file mode 100644 index 000000000..0d89eaa4d --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/tests/generation.rs @@ -0,0 +1,64 @@ +#![cfg(test)] +use eth2_interop_keypairs::{be_private_key, keypair}; +use num_bigint::BigUint; + +#[test] +fn reference_private_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "16808672146709759238327133555736750089977066230599028589193936481731504400486", + "37006103240406073079686739739280712467525465637222501547219594975923976982528", + "22330876536127119444572216874798222843352868708084730796787004036811744442455", + "17048462031355941381150076874414096388968985457797372268770826099852902060945", + "28647806952216650698330424381872693846361470773871570637461872359310549743691", + "2416304019107052589452838695606585506736351107897780798170812672519914514344", + "7300215445567548136411883691093515822872548648751398235557229381530420545683", + "26495790445032093722332687600112008700915252495659977774957922313678954054133", + "2908643403277969554503670470854573663206729491025062456164283925661321952518", + "19554639423851580804889717218680781396599791537051606512605582393920758869044", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let bytes = be_private_key(i); + let num = BigUint::from_bytes_be(&bytes); + assert_eq!(&num.to_str_radix(10), reference) + }); +} + +#[test] +fn reference_public_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "qZp27XeW974i1bfoXe63xWd+iOUR4LM3YY+MTrYTSbS/LRU/ZJ97UzWf6LlKOORM", + "uJvrxpl2lyajGMjplxvTFxKXxhrqSmV4p6T5S1R9y6W6wWqJEItrah/jaV0ah0oL", + "o6MrD4tN24PxoKhT2B3XJd/ld9T0w9uOzlLOKwJuyoSBXBp+jpKk3j11VzO/fkqb", + "iMFB33fNnY16cadcgmxBqcnwPG7hsYDz54UvaigAmd7TUbWNZuZTr45CgWpNj1Mu", + "gSg7eiDhykYOvZu9dwBdVXNwyrsfmkT1MMTExmIw9nX434tMKBiFGqfXeoDKWkpe", + "qwvdoPhfhC9DG+rM8SUL8f17pRtBAP1kNktkAf2oW7AGmz5xW1iBloTn/AsQpyo0", + "mXfxyLcxqNVVgUa/uGyuomQ088WHi1ib8oCkLJFZ5wDp3w5AhilsILAR0ueMJ9Nz", + "qNTHwneVpyWWExfvWVOnAy7W2Dc524sOinI1PRuLRDlCf376LInKoDzJ8o+Muris", + "ptMQ27+rmiJFD1mZP4ekzl22Ij87Xx8w0sTscYki1ADgs8d0HejlmWD3JBGg7hCn", + "mJNBPAAoOj+e2f2YRd2hzqOCKNIlZ/lUHczDV+VKLWpuIEEDySVky8BfSQWsfEk6", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let pair = keypair(i); + let reference = base64::decode(reference).expect("Reference should be valid base64"); + + assert_eq!( + reference.len(), + 48, + "Reference should be 48 bytes (public key size)" + ); + + assert_eq!(pair.pk.as_bytes(), reference); + }); +} diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml new file mode 100644 index 000000000..cfc4c6baf --- /dev/null +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "lighthouse_bootstrap" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eth2_config = { path = "../eth2_config" } +eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p" } +reqwest = "0.9" +url = "1.2" +types = { path = "../../types" } +serde = "1.0" +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } diff --git a/beacon_node/client/src/bootstrapper.rs b/eth2/utils/lighthouse_bootstrap/src/lib.rs similarity index 77% rename from beacon_node/client/src/bootstrapper.rs rename to eth2/utils/lighthouse_bootstrap/src/lib.rs index c94d9a51d..92a587ff2 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/eth2/utils/lighthouse_bootstrap/src/lib.rs @@ -1,14 +1,20 @@ +use eth2_config::Eth2Config; use eth2_libp2p::{ multiaddr::{Multiaddr, Protocol}, Enr, }; use reqwest::{Error as HttpError, Url}; use serde::Deserialize; +use slog::{error, Logger}; use std::borrow::Cow; use std::net::Ipv4Addr; +use std::time::Duration; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; use url::Host; +pub const RETRY_SLEEP_MILLIS: u64 = 100; +pub const RETRY_WARN_INTERVAL: u64 = 30; + #[derive(Debug)] enum Error { InvalidUrl, @@ -30,11 +36,35 @@ pub struct Bootstrapper { } impl Bootstrapper { - /// Parses the given `server` as a URL, instantiating `Self`. - pub fn from_server_string(server: String) -> Result { - Ok(Self { + /// Parses the given `server` as a URL, instantiating `Self` and blocking until a connection + /// can be made with the server. + /// + /// Never times out. + pub fn connect(server: String, log: &Logger) -> Result { + let bootstrapper = Self { url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, - }) + }; + + let mut retry_count = 0; + loop { + match bootstrapper.enr() { + Ok(_) => break, + Err(_) => { + if retry_count % RETRY_WARN_INTERVAL == 0 { + error!( + log, + "Failed to contact bootstrap server"; + "retry_count" => retry_count, + "retry_delay_millis" => RETRY_SLEEP_MILLIS, + ); + } + retry_count += 1; + std::thread::sleep(Duration::from_millis(RETRY_SLEEP_MILLIS)); + } + } + } + + Ok(bootstrapper) } /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. @@ -46,8 +76,12 @@ impl Bootstrapper { /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of /// `/ipv4/172.0.0.1/tcp/9000`. - pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.listen_port().ok()?; + pub fn best_effort_multiaddr(&self, port: Option) -> Option { + let tcp_port = if let Some(port) = port { + port + } else { + self.listen_port().ok()? + }; let mut multiaddr = Multiaddr::with_capacity(2); @@ -70,6 +104,11 @@ impl Bootstrapper { } } + /// Returns the servers Eth2Config. + pub fn eth2_config(&self) -> Result { + get_eth2_config(self.url.clone()).map_err(|e| format!("Unable to get Eth2Config: {:?}", e)) + } + /// Returns the servers ENR address. pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) @@ -125,6 +164,19 @@ fn get_slots_per_epoch(mut url: Url) -> Result { .map_err(Into::into) } +fn get_eth2_config(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("eth2_config"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { url.path_segments_mut() .map(|mut url| { diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index 6ef6cc0aa..5ffb6af53 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -7,3 +7,8 @@ edition = "2018" [dependencies] ethereum-types = "0.6" eth2_hashing = { path = "../eth2_hashing" } +lazy_static = "1.3.0" + +[dev-dependencies] +quickcheck = "0.8" +quickcheck_macros = "0.8" diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index bc8bcea12..73a972c75 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -1,6 +1,138 @@ +#[macro_use] +extern crate lazy_static; + use eth2_hashing::hash; use ethereum_types::H256; +const MAX_TREE_DEPTH: usize = 32; +const EMPTY_SLICE: &[H256] = &[]; + +lazy_static! { + /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. + static ref ZERO_HASHES: Vec = { + let mut hashes = vec![H256::from([0; 32]); MAX_TREE_DEPTH + 1]; + + for i in 0..MAX_TREE_DEPTH { + hashes[i + 1] = hash_concat(hashes[i], hashes[i]); + } + + hashes + }; + + /// Zero nodes to act as "synthetic" left and right subtrees of other zero nodes. + static ref ZERO_NODES: Vec = { + (0..MAX_TREE_DEPTH + 1).map(MerkleTree::Zero).collect() + }; +} + +/// Right-sparse Merkle tree. +/// +/// Efficiently represents a Merkle tree of fixed depth where only the first N +/// indices are populated by non-zero leaves (perfect for the deposit contract tree). +#[derive(Debug)] +pub enum MerkleTree { + /// Leaf node with the hash of its content. + Leaf(H256), + /// Internal node with hash, left subtree and right subtree. + Node(H256, Box, Box), + /// Zero subtree of a given depth. + /// + /// It represents a Merkle tree of 2^depth zero leaves. + Zero(usize), +} + +impl MerkleTree { + /// Create a new Merkle tree from a list of leaves and a fixed depth. + pub fn create(leaves: &[H256], depth: usize) -> Self { + use MerkleTree::*; + + if leaves.is_empty() { + return Zero(depth); + } + + match depth { + 0 => { + debug_assert_eq!(leaves.len(), 1); + Leaf(leaves[0]) + } + _ => { + // Split leaves into left and right subtrees + let subtree_capacity = 2usize.pow(depth as u32 - 1); + let (left_leaves, right_leaves) = if leaves.len() <= subtree_capacity { + (leaves, EMPTY_SLICE) + } else { + leaves.split_at(subtree_capacity) + }; + + let left_subtree = MerkleTree::create(left_leaves, depth - 1); + let right_subtree = MerkleTree::create(right_leaves, depth - 1); + let hash = hash_concat(left_subtree.hash(), right_subtree.hash()); + + Node(hash, Box::new(left_subtree), Box::new(right_subtree)) + } + } + } + + /// Retrieve the root hash of this Merkle tree. + pub fn hash(&self) -> H256 { + match *self { + MerkleTree::Leaf(h) => h, + MerkleTree::Node(h, _, _) => h, + MerkleTree::Zero(depth) => ZERO_HASHES[depth], + } + } + + /// Get a reference to the left and right subtrees if they exist. + pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> { + match *self { + MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, + MerkleTree::Node(_, ref l, ref r) => Some((l, r)), + MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])), + } + } + + /// Is this Merkle tree a leaf? + pub fn is_leaf(&self) -> bool { + match self { + MerkleTree::Leaf(_) => true, + _ => false, + } + } + + /// Return the leaf at `index` and a Merkle proof of its inclusion. + /// + /// The Merkle proof is in "bottom-up" order, starting with a leaf node + /// and moving up the tree. Its length will be exactly equal to `depth`. + pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec) { + let mut proof = vec![]; + let mut current_node = self; + let mut current_depth = depth; + while current_depth > 0 { + let ith_bit = (index >> (current_depth - 1)) & 0x01; + // Note: unwrap is safe because leaves are only ever constructed at depth == 0. + let (left, right) = current_node.left_and_right_branches().unwrap(); + + // Go right, include the left branch in the proof. + if ith_bit == 1 { + proof.push(left.hash()); + current_node = right; + } else { + proof.push(right.hash()); + current_node = left; + } + current_depth -= 1; + } + + debug_assert_eq!(proof.len(), depth); + debug_assert!(current_node.is_leaf()); + + // Put proof in bottom-up order. + proof.reverse(); + + (current_node.hash(), proof) + } +} + /// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`. /// /// The `branch` argument is the main component of the proof: it should be a list of internal @@ -46,15 +178,66 @@ fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { vec1 } +/// Compute the hash of two other hashes concatenated. +fn hash_concat(h1: H256, h2: H256) -> H256 { + H256::from_slice(&hash(&concat( + h1.as_bytes().to_vec(), + h2.as_bytes().to_vec(), + ))) +} + #[cfg(test)] mod tests { use super::*; + use quickcheck::TestResult; + use quickcheck_macros::quickcheck; - fn hash_concat(h1: H256, h2: H256) -> H256 { - H256::from_slice(&hash(&concat( - h1.as_bytes().to_vec(), - h2.as_bytes().to_vec(), - ))) + /// Check that we can: + /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. + /// 2. Generate valid proofs for all of the leaves of this MerkleTree. + #[quickcheck] + fn quickcheck_create_and_verify(int_leaves: Vec, depth: usize) -> TestResult { + if depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { + return TestResult::discard(); + } + + let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + let merkle_tree = MerkleTree::create(&leaves, depth); + let merkle_root = merkle_tree.hash(); + + let proofs_ok = (0..leaves.len()).into_iter().all(|i| { + let (leaf, branch) = merkle_tree.generate_proof(i, depth); + leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) + }); + + TestResult::from_bool(proofs_ok) + } + + #[test] + fn sparse_zero_correct() { + let depth = 2; + let zero = H256::from([0x00; 32]); + let dense_tree = MerkleTree::create(&[zero, zero, zero, zero], depth); + let sparse_tree = MerkleTree::create(&[], depth); + assert_eq!(dense_tree.hash(), sparse_tree.hash()); + } + + #[test] + fn create_small_example() { + // Construct a small merkle tree manually and check that it's consistent with + // the MerkleTree type. + let leaf_b00 = H256::from([0xAA; 32]); + let leaf_b01 = H256::from([0xBB; 32]); + let leaf_b10 = H256::from([0xCC; 32]); + let leaf_b11 = H256::from([0xDD; 32]); + + let node_b0x = hash_concat(leaf_b00, leaf_b01); + let node_b1x = hash_concat(leaf_b10, leaf_b11); + + let root = hash_concat(node_b0x, node_b1x); + + let tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10, leaf_b11], 2); + assert_eq!(tree.hash(), root); } #[test] diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 871743c9e..d31a1dc82 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -7,22 +7,25 @@ mod testing_slot_clock; use std::time::Duration; -pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; -pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; +pub use crate::system_time_slot_clock::SystemTimeSlotClock; +pub use crate::testing_slot_clock::TestingSlotClock; pub use metrics::scrape_for_metrics; pub use types::Slot; +/// A clock that reports the current slot. +/// +/// The clock is not required to be monotonically increasing and may go backwards. pub trait SlotClock: Send + Sync + Sized { - type Error; + /// Creates a new slot clock where the first slot is `genesis_slot`, genesis occured + /// `genesis_duration` after the `UNIX_EPOCH` and each slot is `slot_duration` apart. + fn new(genesis_slot: Slot, genesis_duration: Duration, slot_duration: Duration) -> Self; - /// Create a new `SlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self; + /// Returns the slot at this present time. + fn now(&self) -> Option; - fn present_slot(&self) -> Result, Self::Error>; + /// Returns the duration between slots + fn slot_duration(&self) -> Duration; - fn duration_to_next_slot(&self) -> Result, Self::Error>; - - fn slot_duration_millis(&self) -> u64; + /// Returns the duration until the next slot. + fn duration_to_next_slot(&self) -> Option; } diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index e0d3923e0..d1de491d0 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -17,8 +17,8 @@ lazy_static! { /// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. pub fn scrape_for_metrics(clock: &U) { - let present_slot = match clock.present_slot() { - Ok(Some(slot)) => slot, + let present_slot = match clock.now() { + Some(slot) => slot, _ => Slot::new(0), }; @@ -28,5 +28,8 @@ pub fn scrape_for_metrics(clock: &U) { present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, ); set_gauge(&SLOTS_PER_EPOCH, T::slots_per_epoch() as i64); - set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); + set_gauge( + &MILLISECONDS_PER_SLOT, + clock.slot_duration().as_millis() as i64, + ); } diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index c493a8be8..d2ebd42ea 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -1,99 +1,75 @@ use super::SlotClock; -use std::time::{Duration, SystemTime}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use types::Slot; pub use std::time::SystemTimeError; -#[derive(Debug, PartialEq)] -pub enum Error { - SlotDurationIsZero, - SystemTimeError(String), -} - /// Determines the present slot based upon the present system time. #[derive(Clone)] pub struct SystemTimeSlotClock { genesis_slot: Slot, - genesis_seconds: u64, - slot_duration_seconds: u64, + genesis_duration: Duration, + slot_duration: Duration, } impl SlotClock for SystemTimeSlotClock { - type Error = Error; + fn new(genesis_slot: Slot, genesis_duration: Duration, slot_duration: Duration) -> Self { + if slot_duration.as_millis() == 0 { + panic!("SystemTimeSlotClock cannot have a < 1ms slot duration."); + } - /// Create a new `SystemTimeSlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self { Self { genesis_slot, - genesis_seconds, - slot_duration_seconds, + genesis_duration, + slot_duration, } } - fn present_slot(&self) -> Result, Error> { - if self.slot_duration_seconds == 0 { - return Err(Error::SlotDurationIsZero); - } + fn now(&self) -> Option { + let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + let genesis = self.genesis_duration; - let syslot_time = SystemTime::now(); - let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?; - let duration_since_genesis = - duration_since_epoch.checked_sub(Duration::from_secs(self.genesis_seconds)); - - match duration_since_genesis { - None => Ok(None), - Some(d) => Ok(slot_from_duration(self.slot_duration_seconds, d) - .and_then(|s| Some(s + self.genesis_slot))), + if now > genesis { + let since_genesis = now + .checked_sub(genesis) + .expect("Control flow ensures now is greater than genesis"); + let slot = + Slot::from((since_genesis.as_millis() / self.slot_duration.as_millis()) as u64); + Some(slot + self.genesis_slot) + } else { + None } } - fn duration_to_next_slot(&self) -> Result, Error> { - duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds) + fn duration_to_next_slot(&self) -> Option { + let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + let genesis = self.genesis_duration; + + let slot_start = |slot: Slot| -> Duration { + let slot = slot.as_u64() as u32; + genesis + slot * self.slot_duration + }; + + if now > genesis { + Some( + slot_start(self.now()? + 1) + .checked_sub(now) + .expect("The next slot cannot start before now"), + ) + } else { + Some( + genesis + .checked_sub(now) + .expect("Control flow ensures genesis is greater than or equal to now"), + ) + } } - fn slot_duration_millis(&self) -> u64 { - self.slot_duration_seconds * 1000 + fn slot_duration(&self) -> Duration { + self.slot_duration } } -impl From for Error { - fn from(e: SystemTimeError) -> Error { - Error::SystemTimeError(format!("{:?}", e)) - } -} - -fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option { - Some(Slot::new( - duration.as_secs().checked_div(slot_duration_seconds)?, - )) -} -// calculate the duration to the next slot -fn duration_to_next_slot( - genesis_time: u64, - seconds_per_slot: u64, -) -> Result, Error> { - let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; - let genesis_time = Duration::from_secs(genesis_time); - - if now < genesis_time { - return Ok(None); - } - - let since_genesis = now - genesis_time; - - let elapsed_slots = since_genesis.as_secs() / seconds_per_slot; - - let next_slot_start_seconds = (elapsed_slots + 1) - .checked_mul(seconds_per_slot) - .expect("Next slot time should not overflow u64"); - - let time_to_next_slot = Duration::from_secs(next_slot_start_seconds) - since_genesis; - - Ok(Some(time_to_next_slot)) -} - #[cfg(test)] mod tests { use super::*; @@ -104,71 +80,57 @@ mod tests { */ #[test] fn test_slot_now() { - let slot_time = 100; let genesis_slot = Slot::new(0); - let now = SystemTime::now(); - let since_epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap(); - - let genesis = since_epoch.as_secs() - slot_time * 89; - - let clock = SystemTimeSlotClock { - genesis_slot, - genesis_seconds: genesis, - slot_duration_seconds: slot_time, + let prior_genesis = |milliseconds_prior: u64| { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("should get system time") + - Duration::from_millis(milliseconds_prior) }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(89))); - let clock = SystemTimeSlotClock { - genesis_slot, - genesis_seconds: since_epoch.as_secs(), - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(0))); + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(0), Duration::from_secs(1)); + assert_eq!(clock.now(), Some(Slot::new(0))); - let clock = SystemTimeSlotClock { - genesis_slot, - genesis_seconds: since_epoch.as_secs() - slot_time * 42 - 5, - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(42))); + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(5_000), Duration::from_secs(1)); + assert_eq!(clock.now(), Some(Slot::new(5))); + + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(500), Duration::from_secs(1)); + assert_eq!(clock.now(), Some(Slot::new(0))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); + + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(1_500), Duration::from_secs(1)); + assert_eq!(clock.now(), Some(Slot::new(1))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); } #[test] - fn test_slot_from_duration() { - let slot_time = 100; + #[should_panic] + fn zero_seconds() { + SystemTimeSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(0)); + } - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(0)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(10)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(100)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(101)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - Some(Slot::new(10)) + #[test] + #[should_panic] + fn zero_millis() { + SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_millis(0), ); } #[test] - fn test_slot_from_duration_slot_time_zero() { - let slot_time = 0; - - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(0)), None); - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(10)), None); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - None + #[should_panic] + fn less_than_one_millis() { + SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_nanos(999), ); } } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index f741d3b87..0697ec2bc 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -3,10 +3,9 @@ use std::sync::RwLock; use std::time::Duration; use types::Slot; -#[derive(Debug, PartialEq)] -pub enum Error {} - -/// Determines the present slot based upon the present system time. +/// A slot clock where the slot is manually set instead of being determined by the system time. +/// +/// Useful for testing scenarios. pub struct TestingSlotClock { slot: RwLock, } @@ -17,32 +16,30 @@ impl TestingSlotClock { } pub fn advance_slot(&self) { - self.set_slot(self.present_slot().unwrap().unwrap().as_u64() + 1) + self.set_slot(self.now().unwrap().as_u64() + 1) } } impl SlotClock for TestingSlotClock { - type Error = Error; - - /// Create a new `TestingSlotClock` at `genesis_slot`. - fn new(genesis_slot: Slot, _genesis_seconds: u64, _slot_duration_seconds: u64) -> Self { + fn new(genesis_slot: Slot, _genesis_duration: Duration, _slot_duration: Duration) -> Self { TestingSlotClock { slot: RwLock::new(genesis_slot), } } - fn present_slot(&self) -> Result, Error> { + fn now(&self) -> Option { let slot = *self.slot.read().expect("TestingSlotClock poisoned."); - Ok(Some(slot)) + Some(slot) } /// Always returns a duration of 1 second. - fn duration_to_next_slot(&self) -> Result, Error> { - Ok(Some(Duration::from_secs(1))) + fn duration_to_next_slot(&self) -> Option { + Some(Duration::from_secs(1)) } - fn slot_duration_millis(&self) -> u64 { - 0 + /// Always returns a slot duration of 0 seconds. + fn slot_duration(&self) -> Duration { + Duration::from_secs(0) } } @@ -52,11 +49,11 @@ mod tests { #[test] fn test_slot_now() { - let null = 0; + let null = Duration::from_secs(0); let clock = TestingSlotClock::new(Slot::new(10), null, null); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(10)))); + assert_eq!(clock.now(), Some(Slot::new(10))); clock.set_slot(123); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(123)))); + assert_eq!(clock.now(), Some(Slot::new(123))); } } diff --git a/eth2/utils/ssz/examples/struct_definition.rs b/eth2/utils/ssz/examples/struct_definition.rs index fa3ed2a64..0971e21da 100644 --- a/eth2/utils/ssz/examples/struct_definition.rs +++ b/eth2/utils/ssz/examples/struct_definition.rs @@ -12,6 +12,13 @@ impl Encode for Foo { ::is_ssz_fixed_len() && as Encode>::is_ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + ::ssz_fixed_len() + + ssz::BYTES_PER_LENGTH_OFFSET + + ::ssz_fixed_len() + + self.b.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len() diff --git a/eth2/utils/ssz/src/encode.rs b/eth2/utils/ssz/src/encode.rs index 6ceb08deb..52b3d9bfd 100644 --- a/eth2/utils/ssz/src/encode.rs +++ b/eth2/utils/ssz/src/encode.rs @@ -27,6 +27,12 @@ pub trait Encode { BYTES_PER_LENGTH_OFFSET } + /// Returns the size (in bytes) when `self` is serialized. + /// + /// Returns the same value as `self.as_ssz_bytes().len()` but this method is significantly more + /// efficient. + fn ssz_bytes_len(&self) -> usize; + /// Returns the full-form encoding of this object. /// /// The default implementation of this method should suffice for most cases. diff --git a/eth2/utils/ssz/src/encode/impls.rs b/eth2/utils/ssz/src/encode/impls.rs index 3d68d8911..d25e79370 100644 --- a/eth2/utils/ssz/src/encode/impls.rs +++ b/eth2/utils/ssz/src/encode/impls.rs @@ -13,6 +13,10 @@ macro_rules! impl_encodable_for_uint { $bit_size / 8 } + fn ssz_bytes_len(&self) -> usize { + $bit_size / 8 + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(&self.to_le_bytes()); } @@ -58,6 +62,23 @@ macro_rules! impl_encode_for_tuples { } } + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() + } else { + let mut len = 0; + $( + len += if <$T as Encode>::is_ssz_fixed_len() { + <$T as Encode>::ssz_fixed_len() + } else { + BYTES_PER_LENGTH_OFFSET + + self.$idx.ssz_bytes_len() + }; + )* + len + } + } + fn ssz_append(&self, buf: &mut Vec) { let offset = $( <$T as Encode>::ssz_fixed_len() + @@ -185,6 +206,19 @@ impl Encode for Option { false } + fn ssz_bytes_len(&self) -> usize { + if let Some(some) = self { + let len = if ::is_ssz_fixed_len() { + ::ssz_fixed_len() + } else { + some.ssz_bytes_len() + }; + len + BYTES_PER_LENGTH_OFFSET + } else { + BYTES_PER_LENGTH_OFFSET + } + } + fn ssz_append(&self, buf: &mut Vec) { match self { None => buf.append(&mut encode_union_index(0)), @@ -201,6 +235,16 @@ impl Encode for Vec { false } + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() * self.len() + } else { + let mut len = self.into_iter().map(|item| item.ssz_bytes_len()).sum(); + len += BYTES_PER_LENGTH_OFFSET * self.len(); + len + } + } + fn ssz_append(&self, buf: &mut Vec) { if T::is_ssz_fixed_len() { buf.reserve(T::ssz_fixed_len() * self.len()); @@ -229,6 +273,10 @@ impl Encode for bool { 1 } + fn ssz_bytes_len(&self) -> usize { + 1 + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(&(*self as u8).to_le_bytes()); } @@ -243,6 +291,10 @@ impl Encode for NonZeroUsize { ::ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + std::mem::size_of::() + } + fn ssz_append(&self, buf: &mut Vec) { self.get().ssz_append(buf) } @@ -257,6 +309,10 @@ impl Encode for H256 { 32 } + fn ssz_bytes_len(&self) -> usize { + 32 + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(self.as_bytes()); } @@ -271,6 +327,10 @@ impl Encode for U256 { 32 } + fn ssz_bytes_len(&self) -> usize { + 32 + } + fn ssz_append(&self, buf: &mut Vec) { let n = ::ssz_fixed_len(); let s = buf.len(); @@ -289,6 +349,10 @@ impl Encode for U128 { 16 } + fn ssz_bytes_len(&self) -> usize { + 16 + } + fn ssz_append(&self, buf: &mut Vec) { let n = ::ssz_fixed_len(); let s = buf.len(); @@ -309,6 +373,10 @@ macro_rules! impl_encodable_for_u8_array { $len } + fn ssz_bytes_len(&self) -> usize { + $len + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(&self[..]); } diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index 696d36cbf..115633889 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -36,7 +36,6 @@ mod decode; mod encode; -mod macros; pub use decode::{ impls::decode_list_of_variable_length_items, Decode, DecodeError, SszDecoder, SszDecoderBuilder, diff --git a/eth2/utils/ssz/src/macros.rs b/eth2/utils/ssz/src/macros.rs index 04147a805..8b1378917 100644 --- a/eth2/utils/ssz/src/macros.rs +++ b/eth2/utils/ssz/src/macros.rs @@ -1,96 +1 @@ -/// Implements `Encode` for `$impl_type` using an implementation of `From<$impl_type> for -/// $from_type`. -/// -/// In effect, this allows for easy implementation of `Encode` for some type that implements a -/// `From` conversion into another type that already has `Encode` implemented. -#[macro_export] -macro_rules! impl_encode_via_from { - ($impl_type: ty, $from_type: ty) => { - impl ssz::Encode for $impl_type { - fn is_ssz_fixed_len() -> bool { - <$from_type as ssz::Encode>::is_ssz_fixed_len() - } - fn ssz_fixed_len() -> usize { - <$from_type as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - let conv: $from_type = self.clone().into(); - - conv.ssz_append(buf) - } - } - }; -} - -/// Implements `Decode` for `$impl_type` using an implementation of `From<$impl_type> for -/// $from_type`. -/// -/// In effect, this allows for easy implementation of `Decode` for some type that implements a -/// `From` conversion into another type that already has `Decode` implemented. -#[macro_export] -macro_rules! impl_decode_via_from { - ($impl_type: ty, $from_type: tt) => { - impl ssz::Decode for $impl_type { - fn is_ssz_fixed_len() -> bool { - <$from_type as ssz::Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <$from_type as ssz::Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - $from_type::from_ssz_bytes(bytes).and_then(|dec| Ok(dec.into())) - } - } - }; -} - -#[cfg(test)] -mod tests { - use self::ssz::{Decode, Encode}; - use crate as ssz; - - #[derive(PartialEq, Debug, Clone, Copy)] - struct Wrapper(u64); - - impl From for Wrapper { - fn from(x: u64) -> Wrapper { - Wrapper(x) - } - } - - impl From for u64 { - fn from(x: Wrapper) -> u64 { - x.0 - } - } - - impl_encode_via_from!(Wrapper, u64); - impl_decode_via_from!(Wrapper, u64); - - #[test] - fn impl_encode_via_from() { - let check_encode = |a: u64, b: Wrapper| assert_eq!(a.as_ssz_bytes(), b.as_ssz_bytes()); - - check_encode(0, Wrapper(0)); - check_encode(1, Wrapper(1)); - check_encode(42, Wrapper(42)); - } - - #[test] - fn impl_decode_via_from() { - let check_decode = |bytes: Vec| { - let a = u64::from_ssz_bytes(&bytes).unwrap(); - let b = Wrapper::from_ssz_bytes(&bytes).unwrap(); - - assert_eq!(a, b.into()) - }; - - check_decode(vec![0, 0, 0, 0, 0, 0, 0, 0]); - check_decode(vec![1, 0, 0, 0, 0, 0, 0, 0]); - check_decode(vec![1, 0, 0, 0, 2, 0, 0, 0]); - } -} diff --git a/eth2/utils/ssz/tests/tests.rs b/eth2/utils/ssz/tests/tests.rs index c19e36662..26f2f53ef 100644 --- a/eth2/utils/ssz/tests/tests.rs +++ b/eth2/utils/ssz/tests/tests.rs @@ -8,6 +8,7 @@ mod round_trip { fn round_trip(items: Vec) { for item in items { let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } } diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 47d96859e..5bdb9ca9d 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -81,9 +81,12 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { }; let field_idents = get_serializable_named_field_idents(&struct_data); + let field_idents_a = get_serializable_named_field_idents(&struct_data); let field_types_a = get_serializable_field_types(&struct_data); let field_types_b = field_types_a.clone(); - let field_types_c = field_types_a.clone(); + let field_types_d = field_types_a.clone(); + let field_types_e = field_types_a.clone(); + let field_types_f = field_types_a.clone(); let output = quote! { impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { @@ -105,9 +108,27 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { } } + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() + } else { + let mut len = 0; + #( + if <#field_types_d as ssz::Encode>::is_ssz_fixed_len() { + len += <#field_types_e as ssz::Encode>::ssz_fixed_len(); + } else { + len += ssz::BYTES_PER_LENGTH_OFFSET; + len += self.#field_idents_a.ssz_bytes_len(); + } + )* + + len + } + } + fn ssz_append(&self, buf: &mut Vec) { let offset = #( - <#field_types_c as ssz::Encode>::ssz_fixed_len() + + <#field_types_f as ssz::Encode>::ssz_fixed_len() + )* 0; diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index 197426046..cc01d40c7 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -476,6 +476,12 @@ impl Encode for Bitfield> { false } + fn ssz_bytes_len(&self) -> usize { + // We could likely do better than turning this into bytes and reading the length, however + // it is kept this way for simplicity. + self.clone().into_bytes().len() + } + fn ssz_append(&self, buf: &mut Vec) { buf.append(&mut self.clone().into_bytes()) } @@ -498,6 +504,10 @@ impl Encode for Bitfield> { true } + fn ssz_bytes_len(&self) -> usize { + self.as_slice().len() + } + fn ssz_fixed_len() -> usize { bytes_for_bit_len(N::to_usize()) } @@ -616,6 +626,7 @@ mod bitvector { pub type BitVector4 = BitVector; pub type BitVector8 = BitVector; pub type BitVector16 = BitVector; + pub type BitVector64 = BitVector; #[test] fn ssz_encode() { @@ -706,6 +717,25 @@ mod bitvector { fn assert_round_trip(t: T) { assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); } + + #[test] + fn ssz_bytes_len() { + for i in 0..64 { + let mut bitfield = BitVector64::new(); + for j in 0..i { + bitfield.set(j, true).expect("should set bit in bounds"); + } + let bytes = bitfield.as_ssz_bytes(); + assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); + } + } + + #[test] + fn excess_bits_nimbus() { + let bad = vec![0b0001_1111]; + + assert!(BitVector4::from_ssz_bytes(&bad).is_err()); + } } #[cfg(test)] @@ -1152,4 +1182,16 @@ mod bitlist { vec![false, false, true, false, false, false, false, false, true] ); } + + #[test] + fn ssz_bytes_len() { + for i in 1..64 { + let mut bitfield = BitList1024::with_capacity(i).unwrap(); + for j in 0..i { + bitfield.set(j, true).expect("should set bit in bounds"); + } + let bytes = bitfield.as_ssz_bytes(); + assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); + } + } } diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index edac77f0d..f9c896331 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -172,7 +172,7 @@ where T: ssz::Encode, { fn is_ssz_fixed_len() -> bool { - true + T::is_ssz_fixed_len() } fn ssz_fixed_len() -> usize { @@ -183,6 +183,10 @@ where } } + fn ssz_bytes_len(&self) -> usize { + self.vec.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { if T::is_ssz_fixed_len() { buf.reserve(T::ssz_fixed_len() * self.len()); @@ -220,13 +224,26 @@ where fn from_ssz_bytes(bytes: &[u8]) -> Result { if bytes.is_empty() { - Ok(FixedVector::from(vec![])) + Err(ssz::DecodeError::InvalidByteLength { + len: 0, + expected: 1, + }) } else if T::is_ssz_fixed_len() { bytes .chunks(T::ssz_fixed_len()) .map(|chunk| T::from_ssz_bytes(chunk)) .collect::, _>>() - .and_then(|vec| Ok(vec.into())) + .and_then(|vec| { + if vec.len() == N::to_usize() { + Ok(vec.into()) + } else { + Err(ssz::DecodeError::BytesInvalid(format!( + "wrong number of vec elements, got: {}, expected: {}", + vec.len(), + N::to_usize() + ))) + } + }) } else { ssz::decode_list_of_variable_length_items(bytes).and_then(|vec| Ok(vec.into())) } @@ -305,6 +322,7 @@ mod test { fn ssz_round_trip(item: T) { let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } diff --git a/eth2/utils/ssz_types/src/variable_list.rs b/eth2/utils/ssz_types/src/variable_list.rs index beb7e6a93..feb656745 100644 --- a/eth2/utils/ssz_types/src/variable_list.rs +++ b/eth2/utils/ssz_types/src/variable_list.rs @@ -208,6 +208,10 @@ where >::ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + self.vec.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { self.vec.ssz_append(buf) } @@ -304,6 +308,7 @@ mod test { fn round_trip(item: T) { let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 88293196e..9f09f50ce 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -1,5 +1,5 @@ use super::*; -use ethereum_types::H256; +use ethereum_types::{H256, U128, U256}; macro_rules! impl_for_bitsize { ($type: ident, $bit_size: expr) => { @@ -73,6 +73,46 @@ macro_rules! impl_for_u8_array { impl_for_u8_array!(4); impl_for_u8_array!(32); +impl TreeHash for U128 { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + let mut result = vec![0; 16]; + self.to_little_endian(&mut result); + result + } + + fn tree_hash_packing_factor() -> usize { + 2 + } + + fn tree_hash_root(&self) -> Vec { + merkle_root(&self.tree_hash_packed_encoding(), 0) + } +} + +impl TreeHash for U256 { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + let mut result = vec![0; 32]; + self.to_little_endian(&mut result); + result + } + + fn tree_hash_packing_factor() -> usize { + 1 + } + + fn tree_hash_root(&self) -> Vec { + merkle_root(&self.tree_hash_packed_encoding(), 0) + } +} + impl TreeHash for H256 { fn tree_hash_type() -> TreeHashType { TreeHashType::Vector diff --git a/tests/cli_util/.gitignore b/lcli/.gitignore similarity index 100% rename from tests/cli_util/.gitignore rename to lcli/.gitignore diff --git a/tests/cli_util/Cargo.toml b/lcli/Cargo.toml similarity index 56% rename from tests/cli_util/Cargo.toml rename to lcli/Cargo.toml index 7690d5a87..55bfc1654 100644 --- a/tests/cli_util/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,5 +1,6 @@ [package] -name = "cli_util" +name = "lcli" +description = "Lighthouse CLI (modeled after zcli)" version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" @@ -8,8 +9,12 @@ edition = "2018" [dependencies] clap = "2.33" +hex = "0.3" log = "0.4" serde = "1.0" serde_yaml = "0.8" simple_logger = "1.0" -types = { path = "../../eth2/types" } +types = { path = "../eth2/types" } +state_processing = { path = "../eth2/state_processing" } +eth2_ssz = { path = "../eth2/utils/ssz" } +regex = "1.3" diff --git a/lcli/src/main.rs b/lcli/src/main.rs new file mode 100644 index 000000000..87d670cb9 --- /dev/null +++ b/lcli/src/main.rs @@ -0,0 +1,202 @@ +#[macro_use] +extern crate log; + +mod parse_hex; +mod pycli; +mod transition_blocks; + +use clap::{App, Arg, SubCommand}; +use parse_hex::run_parse_hex; +use pycli::run_pycli; +use std::fs::File; +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; +use transition_blocks::run_transition_blocks; +use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; + +type LocalEthSpec = MinimalEthSpec; + +fn main() { + simple_logger::init().expect("logger should initialize"); + + let matches = App::new("Lighthouse CLI Tool") + .version("0.1.0") + .author("Paul Hauner ") + .about( + "Performs various testing-related tasks, modelled after zcli. \ + by @protolambda.", + ) + .subcommand( + SubCommand::with_name("genesis_yaml") + .about("Generates a genesis YAML file") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("num_validators") + .short("n") + .value_name("INTEGER") + .takes_value(true) + .required(true) + .help("Number of initial validators."), + ) + .arg( + Arg::with_name("genesis_time") + .short("g") + .value_name("INTEGER") + .takes_value(true) + .required(false) + .help("Eth2 genesis time (seconds since UNIX epoch)."), + ) + .arg( + Arg::with_name("spec") + .short("s") + .value_name("STRING") + .takes_value(true) + .required(true) + .possible_values(&["minimal", "mainnet"]) + .default_value("minimal") + .help("Eth2 genesis time (seconds since UNIX epoch)."), + ) + .arg( + Arg::with_name("output_file") + .short("f") + .value_name("PATH") + .takes_value(true) + .default_value("./genesis_state.yaml") + .help("Output file for generated state."), + ), + ) + .subcommand( + SubCommand::with_name("transition-blocks") + .about("Performs a state transition given a pre-state and block") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("pre-state") + .value_name("BEACON_STATE") + .takes_value(true) + .required(true) + .help("Path to a SSZ file of the pre-state."), + ) + .arg( + Arg::with_name("block") + .value_name("BEACON_BLOCK") + .takes_value(true) + .required(true) + .help("Path to a SSZ file of the block to apply to pre-state."), + ) + .arg( + Arg::with_name("output") + .value_name("SSZ_FILE") + .takes_value(true) + .required(true) + .default_value("./output.ssz") + .help("Path to output a SSZ file."), + ), + ) + .subcommand( + SubCommand::with_name("pretty-hex") + .about("Parses SSZ encoded as ASCII 0x-prefixed hex") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("type") + .value_name("TYPE") + .takes_value(true) + .required(true) + .possible_values(&["block"]) + .help("The schema of the supplied SSZ."), + ) + .arg( + Arg::with_name("hex_ssz") + .value_name("HEX") + .takes_value(true) + .required(true) + .help("SSZ encoded as 0x-prefixed hex"), + ), + ) + .subcommand( + SubCommand::with_name("pycli") + .about("TODO") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("pycli-path") + .long("pycli-path") + .short("p") + .value_name("PATH") + .takes_value(true) + .default_value("../../pycli") + .help("Path to the pycli repository."), + ), + ) + .get_matches(); + + match matches.subcommand() { + ("genesis_yaml", Some(matches)) => { + let num_validators = matches + .value_of("num_validators") + .expect("slog requires num_validators") + .parse::() + .expect("num_validators must be a valid integer"); + + let genesis_time = if let Some(string) = matches.value_of("genesis_time") { + string + .parse::() + .expect("genesis_time must be a valid integer") + } else { + warn!("No genesis time supplied via CLI, using the current time."); + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("should obtain time since unix epoch") + .as_secs() + }; + + let file = matches + .value_of("output_file") + .expect("slog requires output file") + .parse::() + .expect("output_file must be a valid path"); + + info!( + "Creating genesis state with {} validators and genesis time {}.", + num_validators, genesis_time + ); + + match matches.value_of("spec").expect("spec is required by slog") { + "minimal" => genesis_yaml::(num_validators, genesis_time, file), + "mainnet" => genesis_yaml::(num_validators, genesis_time, file), + _ => unreachable!("guarded by slog possible_values"), + }; + + info!("Genesis state YAML file created. Exiting successfully."); + } + ("transition-blocks", Some(matches)) => run_transition_blocks(matches) + .unwrap_or_else(|e| error!("Failed to transition blocks: {}", e)), + ("pretty-hex", Some(matches)) => { + run_parse_hex(matches).unwrap_or_else(|e| error!("Failed to pretty print hex: {}", e)) + } + ("pycli", Some(matches)) => run_pycli::(matches) + .unwrap_or_else(|e| error!("Failed to run pycli: {}", e)), + (other, _) => error!("Unknown subcommand {}. See --help.", other), + } +} + +/// Creates a genesis state and writes it to a YAML file. +fn genesis_yaml(validator_count: usize, genesis_time: u64, output: PathBuf) { + let spec = &T::default_spec(); + + let builder: TestingBeaconStateBuilder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec); + + let (mut state, _keypairs) = builder.build(); + state.genesis_time = genesis_time; + + info!("Generated state root: {:?}", state.canonical_root()); + + info!("Writing genesis state to {:?}", output); + + let file = File::create(output.clone()) + .unwrap_or_else(|e| panic!("unable to create file: {:?}. Error: {:?}", output, e)); + serde_yaml::to_writer(file, &state).expect("should be able to serialize BeaconState"); +} diff --git a/lcli/src/parse_hex.rs b/lcli/src/parse_hex.rs new file mode 100644 index 000000000..50f61ea9f --- /dev/null +++ b/lcli/src/parse_hex.rs @@ -0,0 +1,43 @@ +use clap::ArgMatches; +use serde::Serialize; +use ssz::Decode; +use types::{BeaconBlock, BeaconState, MinimalEthSpec}; + +pub fn run_parse_hex(matches: &ArgMatches) -> Result<(), String> { + let type_str = matches + .value_of("type") + .ok_or_else(|| "No type supplied".to_string())?; + let mut hex: String = matches + .value_of("hex_ssz") + .ok_or_else(|| "No hex ssz supplied".to_string())? + .to_string(); + + if hex.starts_with("0x") { + hex = hex[2..].to_string(); + } + + let hex = hex::decode(&hex).map_err(|e| format!("Failed to parse hex: {:?}", e))?; + + info!("Using minimal spec"); + info!("Type: {:?}", type_str); + + match type_str.as_ref() { + "block" => decode_and_print::>(&hex)?, + "state" => decode_and_print::>(&hex)?, + other => return Err(format!("Unknown type: {}", other)), + }; + + Ok(()) +} + +fn decode_and_print(bytes: &[u8]) -> Result<(), String> { + let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e))?; + + println!( + "{}", + serde_yaml::to_string(&item) + .map_err(|e| format!("Unable to write object to YAML: {:?}", e))? + ); + + Ok(()) +} diff --git a/lcli/src/pycli.rs b/lcli/src/pycli.rs new file mode 100644 index 000000000..dda61dd72 --- /dev/null +++ b/lcli/src/pycli.rs @@ -0,0 +1,79 @@ +use clap::ArgMatches; +use ssz::Decode; +use std::fs; +use std::path::PathBuf; +use std::process::Command; +use types::{BeaconState, EthSpec}; + +pub fn run_pycli(matches: &ArgMatches) -> Result<(), String> { + let cmd_path = matches + .value_of("pycli-path") + .ok_or_else(|| "No pycli-path supplied")?; + + let pycli = PyCli::new(cmd_path.to_string())?; + + let block_path = PathBuf::from("/tmp/trinity/block_16.ssz"); + let pre_state_path = PathBuf::from("/tmp/trinity/state_15.ssz"); + + pycli + .transition_blocks::(block_path, pre_state_path) + .map_err(|e| e.to_string())?; + + Ok(()) +} + +/// A wrapper around Danny Ryan's `pycli` utility: +/// +/// https://github.com/djrtwo/pycli +/// +/// Provides functions for testing consensus logic against the executable Python spec. +pub struct PyCli { + cmd_path: PathBuf, +} + +impl PyCli { + /// Create a new instance, parsing the given `cmd_path` as a canonical path. + pub fn new(cmd_path: String) -> Result { + Ok(Self { + cmd_path: fs::canonicalize(cmd_path) + .map_err(|e| format!("Failed to canonicalize pycli path: {:?}", e))?, + }) + } + + /// Performs block processing on the state at the given `pre_state_path`, using the block at + /// `block_path`. + /// + /// Returns an SSZ-encoded `BeaconState` on success. + pub fn transition_blocks( + &self, + block_path: PathBuf, + pre_state_path: PathBuf, + ) -> Result, String> { + let output = Command::new("python") + .current_dir(self.cmd_path.clone()) + .arg("pycli.py") + .arg("transition") + .arg("blocks") + .arg(format!("--pre={}", path_string(pre_state_path)?)) + .arg(path_string(block_path)?) + .output() + .map_err(|e| format!("Failed to run command: {:?}", e))?; + + if output.status.success() { + let state = BeaconState::from_ssz_bytes(&output.stdout) + .map_err(|e| format!("Failed to parse SSZ: {:?}", e))?; + Ok(state) + } else { + Err(format!("pycli returned an error: {:?}", output)) + } + } +} + +fn path_string(path: PathBuf) -> Result { + let path = + fs::canonicalize(path).map_err(|e| format!("Unable to canonicalize path: {:?}", e))?; + + path.into_os_string() + .into_string() + .map_err(|p| format!("Unable to stringify path: {:?}", p)) +} diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs new file mode 100644 index 000000000..01e639db8 --- /dev/null +++ b/lcli/src/transition_blocks.rs @@ -0,0 +1,86 @@ +use clap::ArgMatches; +use ssz::{Decode, Encode}; +use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy}; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use types::{BeaconBlock, BeaconState, EthSpec, MinimalEthSpec}; + +pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), String> { + let pre_state_path = matches + .value_of("pre-state") + .ok_or_else(|| "No pre-state file supplied".to_string())? + .parse::() + .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; + + let block_path = matches + .value_of("block") + .ok_or_else(|| "No block file supplied".to_string())? + .parse::() + .map_err(|e| format!("Failed to parse block path: {}", e))?; + + let output_path = matches + .value_of("output") + .ok_or_else(|| "No output file supplied".to_string())? + .parse::() + .map_err(|e| format!("Failed to parse output path: {}", e))?; + + info!("Using minimal spec"); + info!("Pre-state path: {:?}", pre_state_path); + info!("Block path: {:?}", block_path); + + let pre_state: BeaconState = load_from_ssz(pre_state_path)?; + let block: BeaconBlock = load_from_ssz(block_path)?; + + let post_state = do_transition(pre_state, block)?; + + let mut output_file = File::create(output_path.clone()) + .map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&post_state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + + Ok(()) +} + +fn do_transition( + mut pre_state: BeaconState, + block: BeaconBlock, +) -> Result, String> { + let spec = &T::default_spec(); + + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + + // Transition the parent state to the block slot. + for i in pre_state.slot.as_u64()..block.slot.as_u64() { + per_slot_processing(&mut pre_state, spec) + .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; + } + + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + + per_block_processing( + &mut pre_state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + spec, + ) + .map_err(|e| format!("State transition failed: {:?}", e))?; + + Ok(pre_state) +} + +fn load_from_ssz(path: PathBuf) -> Result { + let mut file = + File::open(path.clone()).map_err(|e| format!("Unable to open file {:?}: {:?}", path, e))?; + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?; + T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e)) +} diff --git a/scripts/whiteblock_start.sh b/scripts/whiteblock_start.sh new file mode 100755 index 000000000..74bdd8cfa --- /dev/null +++ b/scripts/whiteblock_start.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +<" + echo "--peers=" + echo "--validator-keys=" + echo "--gen-state=" + echo "--port=" +} + +while [ "$1" != "" ]; +do + PARAM=`echo $1 | awk -F= '{print $1}'` + VALUE=`echo $1 | sed 's/^[^=]*=//g'` + + case $PARAM in + --identity) + IDENTITY=$VALUE + ;; + --peers) + PEERS+=",$VALUE" + ;; + --validator-keys) + VALIDATOR_KEYS=$VALUE + ;; + --gen-state) + GEN_STATE=$VALUE + ;; + --port) + PORT=$VALUE + ;; + --help) + usage + exit + ;; + *) + echo "ERROR: unknown parameter \"$PARAM\"" + usage + exit 1 + ;; + esac + shift +done + +./beacon_node \ + --p2p-priv-key $IDENTITY \ + --logfile $BEACON_LOG_FILE \ + --libp2p-addresses $PEERS \ + --port $PORT \ + testnet \ + --force \ + file \ + ssz \ + $GEN_STATE \ + & \ + +./validator_client \ + --logfile $VALIDATOR_LOG_FILE \ + testnet \ + --bootstrap \ + interop-yaml \ + $YAML_KEY_FILE \ + +trap 'trap - SIGTERM && kill 0' SIGINT SIGTERM EXIT diff --git a/tests/cli_util/src/main.rs b/tests/cli_util/src/main.rs deleted file mode 100644 index 330a0d171..000000000 --- a/tests/cli_util/src/main.rs +++ /dev/null @@ -1,118 +0,0 @@ -#[macro_use] -extern crate log; - -use clap::{App, Arg, SubCommand}; -use std::fs::File; -use std::path::PathBuf; -use std::time::{SystemTime, UNIX_EPOCH}; -use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; - -fn main() { - simple_logger::init().expect("logger should initialize"); - - let matches = App::new("Lighthouse Testing CLI Tool") - .version("0.1.0") - .author("Paul Hauner ") - .about("Performs various testing-related tasks.") - .subcommand( - SubCommand::with_name("genesis_yaml") - .about("Generates a genesis YAML file") - .version("0.1.0") - .author("Paul Hauner ") - .arg( - Arg::with_name("num_validators") - .short("n") - .value_name("INTEGER") - .takes_value(true) - .required(true) - .help("Number of initial validators."), - ) - .arg( - Arg::with_name("genesis_time") - .short("g") - .value_name("INTEGER") - .takes_value(true) - .required(false) - .help("Eth2 genesis time (seconds since UNIX epoch)."), - ) - .arg( - Arg::with_name("spec") - .short("s") - .value_name("STRING") - .takes_value(true) - .required(true) - .possible_values(&["minimal", "mainnet"]) - .default_value("minimal") - .help("Eth2 genesis time (seconds since UNIX epoch)."), - ) - .arg( - Arg::with_name("output_file") - .short("f") - .value_name("PATH") - .takes_value(true) - .default_value("./genesis_state.yaml") - .help("Output file for generated state."), - ), - ) - .get_matches(); - - if let Some(matches) = matches.subcommand_matches("genesis_yaml") { - let num_validators = matches - .value_of("num_validators") - .expect("slog requires num_validators") - .parse::() - .expect("num_validators must be a valid integer"); - - let genesis_time = if let Some(string) = matches.value_of("genesis_time") { - string - .parse::() - .expect("genesis_time must be a valid integer") - } else { - warn!("No genesis time supplied via CLI, using the current time."); - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("should obtain time since unix epoch") - .as_secs() - }; - - let file = matches - .value_of("output_file") - .expect("slog requires output file") - .parse::() - .expect("output_file must be a valid path"); - - info!( - "Creating genesis state with {} validators and genesis time {}.", - num_validators, genesis_time - ); - - match matches.value_of("spec").expect("spec is required by slog") { - "minimal" => genesis_yaml::(num_validators, genesis_time, file), - "mainnet" => genesis_yaml::(num_validators, genesis_time, file), - _ => unreachable!("guarded by slog possible_values"), - }; - - info!("Genesis state YAML file created. Exiting successfully."); - } else { - error!("No subcommand supplied.") - } -} - -/// Creates a genesis state and writes it to a YAML file. -fn genesis_yaml(validator_count: usize, genesis_time: u64, output: PathBuf) { - let spec = &T::default_spec(); - - let builder: TestingBeaconStateBuilder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec); - - let (mut state, _keypairs) = builder.build(); - state.genesis_time = genesis_time; - - info!("Generated state root: {:?}", state.canonical_root()); - - info!("Writing genesis state to {:?}", output); - - let file = File::create(output.clone()) - .unwrap_or_else(|e| panic!("unable to create file: {:?}. Error: {:?}", output, e)); - serde_yaml::to_writer(file, &state).expect("should be able to serialize BeaconState"); -} diff --git a/tests/ef_tests/.gitignore b/tests/ef_tests/.gitignore new file mode 100644 index 000000000..a83c5aa96 --- /dev/null +++ b/tests/ef_tests/.gitignore @@ -0,0 +1 @@ +/eth2.0-spec-tests diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index ba6aca259..2f1dea11d 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -18,7 +18,9 @@ serde_derive = "1.0" serde_repr = "0.1" serde_yaml = "0.8" eth2_ssz = "0.1" +eth2_ssz_derive = "0.1" tree_hash = "0.1" +tree_hash_derive = "0.2" state_processing = { path = "../../eth2/state_processing" } swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" } types = { path = "../../eth2/types" } diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests deleted file mode 160000 index aaa1673f5..000000000 --- a/tests/ef_tests/eth2.0-spec-tests +++ /dev/null @@ -1 +0,0 @@ -Subproject commit aaa1673f508103e11304833e0456e4149f880065 diff --git a/tests/ef_tests/src/bls_setting.rs b/tests/ef_tests/src/bls_setting.rs index 79990c8ee..add7d8b7b 100644 --- a/tests/ef_tests/src/bls_setting.rs +++ b/tests/ef_tests/src/bls_setting.rs @@ -2,7 +2,6 @@ use self::BlsSetting::*; use crate::error::Error; use serde_repr::Deserialize_repr; -// TODO: use this in every test case #[derive(Deserialize_repr, Debug, Clone, Copy)] #[repr(u8)] pub enum BlsSetting { diff --git a/tests/ef_tests/src/case_result.rs b/tests/ef_tests/src/case_result.rs index 88fd353a1..9df60f402 100644 --- a/tests/ef_tests/src/case_result.rs +++ b/tests/ef_tests/src/case_result.rs @@ -1,6 +1,7 @@ use super::*; use compare_fields::{CompareFields, Comparison, FieldComparison}; use std::fmt::Debug; +use std::path::{Path, PathBuf}; use types::BeaconState; pub const MAX_VALUE_STRING_LEN: usize = 500; @@ -9,14 +10,21 @@ pub const MAX_VALUE_STRING_LEN: usize = 500; pub struct CaseResult { pub case_index: usize, pub desc: String, + pub path: PathBuf, pub result: Result<(), Error>, } impl CaseResult { - pub fn new(case_index: usize, case: &impl Case, result: Result<(), Error>) -> Self { + pub fn new( + case_index: usize, + path: &Path, + case: &impl Case, + result: Result<(), Error>, + ) -> Self { CaseResult { case_index, desc: case.description(), + path: path.into(), result, } } diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 1ae4ea1d8..c5b0d8c4f 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -1,5 +1,7 @@ use super::*; +use rayon::prelude::*; use std::fmt::Debug; +use std::path::{Path, PathBuf}; mod bls_aggregate_pubkeys; mod bls_aggregate_sigs; @@ -7,20 +9,11 @@ mod bls_g2_compressed; mod bls_g2_uncompressed; mod bls_priv_to_pub; mod bls_sign_msg; -mod epoch_processing_crosslinks; -mod epoch_processing_final_updates; -mod epoch_processing_justification_and_finalization; -mod epoch_processing_registry_updates; -mod epoch_processing_slashings; +mod common; +mod epoch_processing; mod genesis_initialization; mod genesis_validity; -mod operations_attestation; -mod operations_attester_slashing; -mod operations_block_header; -mod operations_deposit; -mod operations_exit; -mod operations_proposer_slashing; -mod operations_transfer; +mod operations; mod sanity_blocks; mod sanity_slots; mod shuffling; @@ -33,27 +26,23 @@ pub use bls_g2_compressed::*; pub use bls_g2_uncompressed::*; pub use bls_priv_to_pub::*; pub use bls_sign_msg::*; -pub use epoch_processing_crosslinks::*; -pub use epoch_processing_final_updates::*; -pub use epoch_processing_justification_and_finalization::*; -pub use epoch_processing_registry_updates::*; -pub use epoch_processing_slashings::*; +pub use common::SszStaticType; +pub use epoch_processing::*; pub use genesis_initialization::*; pub use genesis_validity::*; -pub use operations_attestation::*; -pub use operations_attester_slashing::*; -pub use operations_block_header::*; -pub use operations_deposit::*; -pub use operations_exit::*; -pub use operations_proposer_slashing::*; -pub use operations_transfer::*; +pub use operations::*; pub use sanity_blocks::*; pub use sanity_slots::*; pub use shuffling::*; pub use ssz_generic::*; pub use ssz_static::*; -pub trait Case: Debug { +pub trait LoadCase: Sized { + /// Load the test case from a test case directory. + fn load_from_dir(_path: &Path) -> Result; +} + +pub trait Case: Debug + Sync { /// An optional field for implementing a custom description. /// /// Defaults to "no description". @@ -70,51 +59,15 @@ pub trait Case: Debug { #[derive(Debug)] pub struct Cases { - pub test_cases: Vec, + pub test_cases: Vec<(PathBuf, T)>, } -impl EfTest for Cases -where - T: Case + Debug, -{ - fn test_results(&self) -> Vec { +impl Cases { + pub fn test_results(&self) -> Vec { self.test_cases - .iter() + .into_par_iter() .enumerate() - .map(|(i, tc)| CaseResult::new(i, tc, tc.result(i))) + .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i))) .collect() } } - -impl YamlDecode for Cases { - /// Decodes a YAML list of test cases - fn yaml_decode(yaml: &str) -> Result { - let mut p = 0; - let mut elems: Vec<&str> = yaml - .match_indices("\n- ") - // Skip the `\n` used for matching a new line - .map(|(i, _)| i + 1) - .map(|i| { - let yaml_element = &yaml[p..i]; - p = i; - - yaml_element - }) - .collect(); - - elems.push(&yaml[p..]); - - let test_cases = elems - .iter() - .map(|s| { - // Remove the `- ` prefix. - let s = &s[2..]; - // Remove a single level of indenting. - s.replace("\n ", "\n") - }) - .map(|s| T::yaml_decode(&s.to_string()).unwrap()) - .collect(); - - Ok(Self { test_cases }) - } -} diff --git a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs index 6e38743f2..13c2fea17 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{AggregatePublicKey, PublicKey}; use serde_derive::Deserialize; @@ -9,11 +10,7 @@ pub struct BlsAggregatePubkeys { pub output: String, } -impl YamlDecode for BlsAggregatePubkeys { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsAggregatePubkeys {} impl Case for BlsAggregatePubkeys { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs index eeecab82c..22fa197df 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{AggregateSignature, Signature}; use serde_derive::Deserialize; @@ -9,11 +10,7 @@ pub struct BlsAggregateSigs { pub output: String, } -impl YamlDecode for BlsAggregateSigs { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsAggregateSigs {} impl Case for BlsAggregateSigs { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/bls_g2_compressed.rs b/tests/ef_tests/src/cases/bls_g2_compressed.rs index 185cb58f3..1a9f1d561 100644 --- a/tests/ef_tests/src/cases/bls_g2_compressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_compressed.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{compress_g2, hash_on_g2}; use serde_derive::Deserialize; @@ -15,11 +16,7 @@ pub struct BlsG2Compressed { pub output: Vec, } -impl YamlDecode for BlsG2Compressed { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsG2Compressed {} impl Case for BlsG2Compressed { fn result(&self, _case_index: usize) -> Result<(), Error> { @@ -45,14 +42,9 @@ impl Case for BlsG2Compressed { } } -// Converts a vector to u64 (from big endian) +// Converts a vector to u64 (from little endian) fn bytes_to_u64(array: &[u8]) -> u64 { - let mut result: u64 = 0; - for (i, value) in array.iter().rev().enumerate() { - if i == 8 { - break; - } - result += u64::pow(2, i as u32 * 8) * u64::from(*value); - } - result + let mut bytes = [0u8; 8]; + bytes.copy_from_slice(array); + u64::from_le_bytes(bytes) } diff --git a/tests/ef_tests/src/cases/bls_g2_uncompressed.rs b/tests/ef_tests/src/cases/bls_g2_uncompressed.rs index 962b6aac3..3eae29967 100644 --- a/tests/ef_tests/src/cases/bls_g2_uncompressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_uncompressed.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::hash_on_g2; use serde_derive::Deserialize; @@ -9,18 +10,14 @@ pub struct BlsG2UncompressedInput { pub domain: String, } +impl BlsCase for BlsG2UncompressedInput {} + #[derive(Debug, Clone, Deserialize)] pub struct BlsG2Uncompressed { pub input: BlsG2UncompressedInput, pub output: Vec>, } -impl YamlDecode for BlsG2Uncompressed { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - impl Case for BlsG2Uncompressed { fn result(&self, _case_index: usize) -> Result<(), Error> { // Convert message and domain to required types diff --git a/tests/ef_tests/src/cases/bls_priv_to_pub.rs b/tests/ef_tests/src/cases/bls_priv_to_pub.rs index d72a43bbb..016e04dd1 100644 --- a/tests/ef_tests/src/cases/bls_priv_to_pub.rs +++ b/tests/ef_tests/src/cases/bls_priv_to_pub.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{PublicKey, SecretKey}; use serde_derive::Deserialize; @@ -9,11 +10,7 @@ pub struct BlsPrivToPub { pub output: String, } -impl YamlDecode for BlsPrivToPub { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsPrivToPub {} impl Case for BlsPrivToPub { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/bls_sign_msg.rs b/tests/ef_tests/src/cases/bls_sign_msg.rs index e62c3550f..7ee109f81 100644 --- a/tests/ef_tests/src/cases/bls_sign_msg.rs +++ b/tests/ef_tests/src/cases/bls_sign_msg.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{SecretKey, Signature}; use serde_derive::Deserialize; @@ -16,11 +17,7 @@ pub struct BlsSign { pub output: String, } -impl YamlDecode for BlsSign { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsSign {} impl Case for BlsSign { fn result(&self, _case_index: usize) -> Result<(), Error> { @@ -45,16 +42,11 @@ impl Case for BlsSign { } } -// Converts a vector to u64 (from big endian) +// Converts a vector to u64 (from little endian) fn bytes_to_u64(array: &[u8]) -> u64 { - let mut result: u64 = 0; - for (i, value) in array.iter().rev().enumerate() { - if i == 8 { - break; - } - result += u64::pow(2, i as u32 * 8) * u64::from(*value); - } - result + let mut bytes = [0u8; 8]; + bytes.copy_from_slice(array); + u64::from_le_bytes(bytes) } // Increase the size of an array to 48 bytes diff --git a/tests/ef_tests/src/cases/common.rs b/tests/ef_tests/src/cases/common.rs new file mode 100644 index 000000000..8e787f157 --- /dev/null +++ b/tests/ef_tests/src/cases/common.rs @@ -0,0 +1,72 @@ +use crate::cases::LoadCase; +use crate::decode::yaml_decode_file; +use crate::error::Error; +use serde_derive::Deserialize; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::convert::TryFrom; +use std::fmt::Debug; +use std::path::Path; +use tree_hash::TreeHash; + +/// Trait for all BLS cases to eliminate some boilerplate. +pub trait BlsCase: serde::de::DeserializeOwned {} + +impl LoadCase for T { + fn load_from_dir(path: &Path) -> Result { + yaml_decode_file(&path.join("data.yaml")) + } +} + +/// Macro to wrap U128 and U256 so they deserialize correctly. +macro_rules! uint_wrapper { + ($wrapper_name:ident, $wrapped_type:ty) => { + #[derive(Debug, Clone, Copy, Default, PartialEq, Decode, Encode, Deserialize)] + #[serde(try_from = "String")] + pub struct $wrapper_name { + pub x: $wrapped_type, + } + + impl TryFrom for $wrapper_name { + type Error = String; + + fn try_from(s: String) -> Result { + <$wrapped_type>::from_dec_str(&s) + .map(|x| Self { x }) + .map_err(|e| format!("{:?}", e)) + } + } + + impl tree_hash::TreeHash for $wrapper_name { + fn tree_hash_type() -> tree_hash::TreeHashType { + <$wrapped_type>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.x.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <$wrapped_type>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Vec { + self.x.tree_hash_root() + } + } + }; +} + +uint_wrapper!(TestU128, ethereum_types::U128); +uint_wrapper!(TestU256, ethereum_types::U256); + +/// Trait alias for all deez bounds +pub trait SszStaticType: + serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync +{ +} + +impl SszStaticType for T where + T: serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync +{ +} diff --git a/tests/ef_tests/src/cases/epoch_processing.rs b/tests/ef_tests/src/cases/epoch_processing.rs new file mode 100644 index 000000000..ece69b3fe --- /dev/null +++ b/tests/ef_tests/src/cases/epoch_processing.rs @@ -0,0 +1,143 @@ +use super::*; +use crate::bls_setting::BlsSetting; +use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::type_name; +use crate::type_name::TypeName; +use serde_derive::Deserialize; +use state_processing::per_epoch_processing::{ + errors::EpochProcessingError, process_crosslinks, process_final_updates, + process_justification_and_finalization, process_registry_updates, process_slashings, + validator_statuses::ValidatorStatuses, +}; +use std::marker::PhantomData; +use std::path::{Path, PathBuf}; +use types::{BeaconState, ChainSpec, EthSpec}; + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + pub description: Option, + pub bls_setting: Option, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct EpochProcessing> { + pub path: PathBuf, + pub metadata: Metadata, + pub pre: BeaconState, + pub post: Option>, + #[serde(skip_deserializing)] + _phantom: PhantomData, +} + +pub trait EpochTransition: TypeName + Debug + Sync { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError>; +} + +#[derive(Debug)] +pub struct JustificationAndFinalization; +#[derive(Debug)] +pub struct Crosslinks; +#[derive(Debug)] +pub struct RegistryUpdates; +#[derive(Debug)] +pub struct Slashings; +#[derive(Debug)] +pub struct FinalUpdates; + +type_name!( + JustificationAndFinalization, + "justification_and_finalization" +); +type_name!(Crosslinks, "crosslinks"); +type_name!(RegistryUpdates, "registry_updates"); +type_name!(Slashings, "slashings"); +type_name!(FinalUpdates, "final_updates"); + +impl EpochTransition for JustificationAndFinalization { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state, spec)?; + process_justification_and_finalization(state, &validator_statuses.total_balances) + } +} + +impl EpochTransition for Crosslinks { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_crosslinks(state, spec)?; + Ok(()) + } +} + +impl EpochTransition for RegistryUpdates { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_registry_updates(state, spec) + } +} + +impl EpochTransition for Slashings { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state, spec)?; + process_slashings(state, validator_statuses.total_balances.current_epoch, spec)?; + Ok(()) + } +} + +impl EpochTransition for FinalUpdates { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_final_updates(state, spec) + } +} + +impl> LoadCase for EpochProcessing { + fn load_from_dir(path: &Path) -> Result { + let metadata_path = path.join("meta.yaml"); + let metadata: Metadata = if metadata_path.is_file() { + yaml_decode_file(&metadata_path)? + } else { + Metadata::default() + }; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let post_file = path.join("post.ssz"); + let post = if post_file.is_file() { + Some(ssz_decode_file(&post_file)?) + } else { + None + }; + + Ok(Self { + path: path.into(), + metadata, + pre, + post, + _phantom: PhantomData, + }) + } +} + +impl> Case for EpochProcessing { + fn description(&self) -> String { + self.metadata + .description + .clone() + .unwrap_or_else(String::new) + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + let mut state = self.pre.clone(); + let mut expected = self.post.clone(); + + let spec = &E::default_spec(); + + let mut result = (|| { + // Processing requires the epoch cache. + state.build_all_caches(spec)?; + + T::run(&mut state, spec).map(|_| state) + })(); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs b/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs deleted file mode 100644 index f2676d122..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs +++ /dev/null @@ -1,37 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::process_crosslinks; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingCrosslinks { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingCrosslinks { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingCrosslinks { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let mut result = process_crosslinks(&mut state, &E::default_spec()).map(|_| state); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_final_updates.rs b/tests/ef_tests/src/cases/epoch_processing_final_updates.rs deleted file mode 100644 index 69e6b8bd3..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_final_updates.rs +++ /dev/null @@ -1,41 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::process_final_updates; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingFinalUpdates { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingFinalUpdates { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingFinalUpdates { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - let spec = &E::default_spec(); - - let mut result = (|| { - // Processing requires the epoch cache. - state.build_all_caches(spec)?; - - process_final_updates(&mut state, spec).map(|_| state) - })(); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs b/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs deleted file mode 100644 index 788301086..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::{ - process_justification_and_finalization, validator_statuses::ValidatorStatuses, -}; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingJustificationAndFinalization { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingJustificationAndFinalization { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingJustificationAndFinalization { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - let spec = &E::default_spec(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let mut result = (|| { - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - process_justification_and_finalization(&mut state, &validator_statuses.total_balances) - .map(|_| state) - })(); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs b/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs deleted file mode 100644 index a01f895fe..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs +++ /dev/null @@ -1,38 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::registry_updates::process_registry_updates; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingRegistryUpdates { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingRegistryUpdates { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingRegistryUpdates { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - let spec = &E::default_spec(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let mut result = process_registry_updates(&mut state, spec).map(|_| state); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_slashings.rs b/tests/ef_tests/src/cases/epoch_processing_slashings.rs deleted file mode 100644 index d2a988d92..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_slashings.rs +++ /dev/null @@ -1,50 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::{ - process_slashings::process_slashings, validator_statuses::ValidatorStatuses, -}; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingSlashings { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingSlashings { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingSlashings { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - let spec = &E::default_spec(); - - let mut result = (|| { - // Processing requires the epoch cache. - state.build_all_caches(spec)?; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - process_slashings( - &mut state, - validator_statuses.total_balances.current_epoch, - spec, - ) - .map(|_| state) - })(); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/genesis_initialization.rs b/tests/ef_tests/src/cases/genesis_initialization.rs index 7ae8eef59..0fb64ccb3 100644 --- a/tests/ef_tests/src/cases/genesis_initialization.rs +++ b/tests/ef_tests/src/cases/genesis_initialization.rs @@ -1,34 +1,51 @@ use super::*; -use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; +use std::path::PathBuf; use types::{BeaconState, Deposit, EthSpec, Hash256}; +#[derive(Debug, Clone, Deserialize)] +struct Metadata { + deposits_count: usize, +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct GenesisInitialization { - pub description: String, - pub bls_setting: Option, + pub path: PathBuf, pub eth1_block_hash: Hash256, pub eth1_timestamp: u64, pub deposits: Vec, pub state: Option>, } -impl YamlDecode for GenesisInitialization { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for GenesisInitialization { + fn load_from_dir(path: &Path) -> Result { + let eth1_block_hash = ssz_decode_file(&path.join("eth1_block_hash.ssz"))?; + let eth1_timestamp = yaml_decode_file(&path.join("eth1_timestamp.yaml"))?; + let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; + let deposits: Vec = (0..meta.deposits_count) + .map(|i| { + let filename = format!("deposits_{}.ssz", i); + ssz_decode_file(&path.join(filename)) + }) + .collect::>()?; + let state = ssz_decode_file(&path.join("state.ssz"))?; + + Ok(Self { + path: path.into(), + eth1_block_hash, + eth1_timestamp, + deposits, + state: Some(state), + }) } } impl Case for GenesisInitialization { - fn description(&self) -> String { - self.description.clone() - } - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; let spec = &E::default_spec(); let mut result = initialize_beacon_state_from_eth1( diff --git a/tests/ef_tests/src/cases/genesis_validity.rs b/tests/ef_tests/src/cases/genesis_validity.rs index 7ddd3e8fd..f72ac4c3e 100644 --- a/tests/ef_tests/src/cases/genesis_validity.rs +++ b/tests/ef_tests/src/cases/genesis_validity.rs @@ -1,31 +1,28 @@ use super::*; -use crate::bls_setting::BlsSetting; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::is_valid_genesis_state; +use std::path::Path; use types::{BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct GenesisValidity { - pub description: String, - pub bls_setting: Option, pub genesis: BeaconState, pub is_valid: bool, } -impl YamlDecode for GenesisValidity { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for GenesisValidity { + fn load_from_dir(path: &Path) -> Result { + let genesis = ssz_decode_file(&path.join("genesis.ssz"))?; + let is_valid = yaml_decode_file(&path.join("is_valid.yaml"))?; + + Ok(Self { genesis, is_valid }) } } impl Case for GenesisValidity { - fn description(&self) -> String { - self.description.clone() - } - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; let spec = &E::default_spec(); let is_valid = is_valid_genesis_state(&self.genesis, spec); diff --git a/tests/ef_tests/src/cases/operations.rs b/tests/ef_tests/src/cases/operations.rs new file mode 100644 index 000000000..3da04d2a3 --- /dev/null +++ b/tests/ef_tests/src/cases/operations.rs @@ -0,0 +1,194 @@ +use super::*; +use crate::bls_setting::BlsSetting; +use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file, yaml_decode_file}; +use crate::type_name::TypeName; +use serde_derive::Deserialize; +use ssz::Decode; +use state_processing::per_block_processing::{ + errors::BlockProcessingError, process_attestations, process_attester_slashings, + process_block_header, process_deposits, process_exits, process_proposer_slashings, + process_transfers, VerifySignatures, +}; +use std::fmt::Debug; +use std::path::Path; +use types::{ + Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, + ProposerSlashing, Transfer, VoluntaryExit, +}; + +#[derive(Debug, Clone, Default, Deserialize)] +struct Metadata { + description: Option, + bls_setting: Option, +} + +#[derive(Debug, Clone)] +pub struct Operations> { + metadata: Metadata, + pub pre: BeaconState, + pub operation: O, + pub post: Option>, +} + +pub trait Operation: Decode + TypeName + Debug + Sync { + fn handler_name() -> String { + Self::name().to_lowercase() + } + + fn filename() -> String { + format!("{}.ssz", Self::handler_name()) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError>; +} + +impl Operation for Attestation { + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_attestations(state, &[self.clone()], VerifySignatures::True, spec) + } +} + +impl Operation for AttesterSlashing { + fn handler_name() -> String { + "attester_slashing".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_attester_slashings(state, &[self.clone()], VerifySignatures::True, spec) + } +} + +impl Operation for Deposit { + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_deposits(state, &[self.clone()], spec) + } +} + +impl Operation for ProposerSlashing { + fn handler_name() -> String { + "proposer_slashing".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_proposer_slashings(state, &[self.clone()], VerifySignatures::True, spec) + } +} + +impl Operation for Transfer { + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_transfers(state, &[self.clone()], VerifySignatures::True, spec) + } +} + +impl Operation for VoluntaryExit { + fn handler_name() -> String { + "voluntary_exit".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_exits(state, &[self.clone()], VerifySignatures::True, spec) + } +} + +impl Operation for BeaconBlock { + fn handler_name() -> String { + "block_header".into() + } + + fn filename() -> String { + "block.ssz".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + Ok(process_block_header( + state, + self, + None, + VerifySignatures::True, + spec, + )?) + } +} + +impl> LoadCase for Operations { + fn load_from_dir(path: &Path) -> Result { + let metadata_path = path.join("meta.yaml"); + let metadata: Metadata = if metadata_path.is_file() { + yaml_decode_file(&metadata_path)? + } else { + Metadata::default() + }; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let operation = ssz_decode_file(&path.join(O::filename()))?; + let post_filename = path.join("post.ssz"); + let post = if post_filename.is_file() { + Some(ssz_decode_file(&post_filename)?) + } else { + None + }; + + Ok(Self { + metadata, + pre, + operation, + post, + }) + } +} + +impl> Case for Operations { + fn description(&self) -> String { + self.metadata + .description + .clone() + .unwrap_or_else(String::new) + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + self.metadata.bls_setting.unwrap_or_default().check()?; + + let spec = &E::default_spec(); + let mut state = self.pre.clone(); + let mut expected = self.post.clone(); + + // Processing requires the epoch cache. + state.build_all_caches(spec).unwrap(); + + let mut result = self.operation.apply_to(&mut state, spec).map(|()| state); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/operations_attestation.rs b/tests/ef_tests/src/cases/operations_attestation.rs deleted file mode 100644 index ecd4835b8..000000000 --- a/tests/ef_tests/src/cases/operations_attestation.rs +++ /dev/null @@ -1,47 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::{process_attestations, VerifySignatures}; -use types::{Attestation, BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsAttestation { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub attestation: Attestation, - pub post: Option>, -} - -impl YamlDecode for OperationsAttestation { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(&yaml).unwrap()) - } -} - -impl Case for OperationsAttestation { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let spec = &E::default_spec(); - - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let attestation = self.attestation.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let result = process_attestations(&mut state, &[attestation], VerifySignatures::True, spec); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_attester_slashing.rs b/tests/ef_tests/src/cases/operations_attester_slashing.rs deleted file mode 100644 index 952443cee..000000000 --- a/tests/ef_tests/src/cases/operations_attester_slashing.rs +++ /dev/null @@ -1,52 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::{process_attester_slashings, VerifySignatures}; -use types::{AttesterSlashing, BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -pub struct OperationsAttesterSlashing { - pub description: String, - pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] - pub pre: BeaconState, - #[serde(bound = "E: EthSpec")] - pub attester_slashing: AttesterSlashing, - #[serde(bound = "E: EthSpec")] - pub post: Option>, -} - -impl YamlDecode for OperationsAttesterSlashing { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsAttesterSlashing { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let attester_slashing = self.attester_slashing.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let result = process_attester_slashings( - &mut state, - &[attester_slashing], - VerifySignatures::True, - &E::default_spec(), - ); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_block_header.rs b/tests/ef_tests/src/cases/operations_block_header.rs deleted file mode 100644 index f9b9dab1d..000000000 --- a/tests/ef_tests/src/cases/operations_block_header.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::{process_block_header, VerifySignatures}; -use types::{BeaconBlock, BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsBlockHeader { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub block: BeaconBlock, - pub post: Option>, -} - -impl YamlDecode for OperationsBlockHeader { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsBlockHeader { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let spec = &E::default_spec(); - - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let mut result = - process_block_header(&mut state, &self.block, None, VerifySignatures::True, spec) - .map(|_| state); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_deposit.rs b/tests/ef_tests/src/cases/operations_deposit.rs deleted file mode 100644 index 801c02029..000000000 --- a/tests/ef_tests/src/cases/operations_deposit.rs +++ /dev/null @@ -1,42 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_deposits; -use types::{BeaconState, Deposit, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsDeposit { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub deposit: Deposit, - pub post: Option>, -} - -impl YamlDecode for OperationsDeposit { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsDeposit { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let deposit = self.deposit.clone(); - let mut expected = self.post.clone(); - - let result = process_deposits(&mut state, &[deposit], &E::default_spec()); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_exit.rs b/tests/ef_tests/src/cases/operations_exit.rs deleted file mode 100644 index 6040e7ef3..000000000 --- a/tests/ef_tests/src/cases/operations_exit.rs +++ /dev/null @@ -1,50 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::{process_exits, VerifySignatures}; -use types::{BeaconState, EthSpec, VoluntaryExit}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsExit { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub voluntary_exit: VoluntaryExit, - pub post: Option>, -} - -impl YamlDecode for OperationsExit { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsExit { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let exit = self.voluntary_exit.clone(); - let mut expected = self.post.clone(); - - // Exit processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let result = process_exits( - &mut state, - &[exit], - VerifySignatures::True, - &E::default_spec(), - ); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_proposer_slashing.rs b/tests/ef_tests/src/cases/operations_proposer_slashing.rs deleted file mode 100644 index 282d93274..000000000 --- a/tests/ef_tests/src/cases/operations_proposer_slashing.rs +++ /dev/null @@ -1,50 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::{process_proposer_slashings, VerifySignatures}; -use types::{BeaconState, EthSpec, ProposerSlashing}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsProposerSlashing { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub proposer_slashing: ProposerSlashing, - pub post: Option>, -} - -impl YamlDecode for OperationsProposerSlashing { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsProposerSlashing { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let proposer_slashing = self.proposer_slashing.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let result = process_proposer_slashings( - &mut state, - &[proposer_slashing], - VerifySignatures::True, - &E::default_spec(), - ); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_transfer.rs b/tests/ef_tests/src/cases/operations_transfer.rs deleted file mode 100644 index 77069b5cf..000000000 --- a/tests/ef_tests/src/cases/operations_transfer.rs +++ /dev/null @@ -1,47 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::{process_transfers, VerifySignatures}; -use types::{BeaconState, EthSpec, Transfer}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsTransfer { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub transfer: Transfer, - pub post: Option>, -} - -impl YamlDecode for OperationsTransfer { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsTransfer { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let transfer = self.transfer.clone(); - let mut expected = self.post.clone(); - - // Transfer processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let spec = E::default_spec(); - - let result = process_transfers(&mut state, &[transfer], VerifySignatures::True, &spec); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/sanity_blocks.rs b/tests/ef_tests/src/cases/sanity_blocks.rs index bc4d7b3de..24ae6f81c 100644 --- a/tests/ef_tests/src/cases/sanity_blocks.rs +++ b/tests/ef_tests/src/cases/sanity_blocks.rs @@ -1,35 +1,65 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, }; use types::{BeaconBlock, BeaconState, EthSpec, RelativeEpoch}; +#[derive(Debug, Clone, Deserialize)] +pub struct Metadata { + pub description: Option, + pub bls_setting: Option, + pub blocks_count: usize, +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct SanityBlocks { - pub description: String, - pub bls_setting: Option, + pub metadata: Metadata, pub pre: BeaconState, pub blocks: Vec>, pub post: Option>, } -impl YamlDecode for SanityBlocks { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for SanityBlocks { + fn load_from_dir(path: &Path) -> Result { + let metadata: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let blocks: Vec> = (0..metadata.blocks_count) + .map(|i| { + let filename = format!("blocks_{}.ssz", i); + ssz_decode_file(&path.join(filename)) + }) + .collect::>()?; + let post_file = path.join("post.ssz"); + let post = if post_file.is_file() { + Some(ssz_decode_file(&post_file)?) + } else { + None + }; + + Ok(Self { + metadata, + pre, + blocks, + post, + }) } } impl Case for SanityBlocks { fn description(&self) -> String { - self.description.clone() + self.metadata + .description + .clone() + .unwrap_or_else(String::new) } fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; + self.metadata.bls_setting.unwrap_or_default().check()?; let mut state = self.pre.clone(); let mut expected = self.post.clone(); diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index fbce1a06a..e9b80a252 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -1,30 +1,63 @@ use super::*; +use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::per_slot_processing; use types::{BeaconState, EthSpec}; +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + pub description: Option, + pub bls_setting: Option, +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct SanitySlots { - pub description: String, + pub metadata: Metadata, pub pre: BeaconState, - pub slots: usize, + pub slots: u64, pub post: Option>, } -impl YamlDecode for SanitySlots { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for SanitySlots { + fn load_from_dir(path: &Path) -> Result { + let metadata_path = path.join("meta.yaml"); + let metadata: Metadata = if metadata_path.is_file() { + yaml_decode_file(&metadata_path)? + } else { + Metadata::default() + }; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let slots: u64 = yaml_decode_file(&path.join("slots.yaml"))?; + let post_file = path.join("post.ssz"); + let post = if post_file.is_file() { + Some(ssz_decode_file(&post_file)?) + } else { + None + }; + + Ok(Self { + metadata, + pre, + slots, + post, + }) } } impl Case for SanitySlots { fn description(&self) -> String { - self.description.clone() + self.metadata + .description + .clone() + .unwrap_or_else(String::new) } fn result(&self, _case_index: usize) -> Result<(), Error> { + self.metadata.bls_setting.unwrap_or_default().check()?; + let mut state = self.pre.clone(); let mut expected = self.post.clone(); let spec = &E::default_spec(); diff --git a/tests/ef_tests/src/cases/shuffling.rs b/tests/ef_tests/src/cases/shuffling.rs index d7ff40e59..2fe632e84 100644 --- a/tests/ef_tests/src/cases/shuffling.rs +++ b/tests/ef_tests/src/cases/shuffling.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::decode::yaml_decode_file; use serde_derive::Deserialize; use std::marker::PhantomData; use swap_or_not_shuffle::{get_permutated_index, shuffle_list}; @@ -8,21 +9,21 @@ use swap_or_not_shuffle::{get_permutated_index, shuffle_list}; pub struct Shuffling { pub seed: String, pub count: usize, - pub shuffled: Vec, + pub mapping: Vec, #[serde(skip)] _phantom: PhantomData, } -impl YamlDecode for Shuffling { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for Shuffling { + fn load_from_dir(path: &Path) -> Result { + yaml_decode_file(&path.join("mapping.yaml")) } } impl Case for Shuffling { fn result(&self, _case_index: usize) -> Result<(), Error> { if self.count == 0 { - compare_result::<_, Error>(&Ok(vec![]), &Some(self.shuffled.clone()))?; + compare_result::<_, Error>(&Ok(vec![]), &Some(self.mapping.clone()))?; } else { let spec = T::default_spec(); let seed = hex::decode(&self.seed[2..]) @@ -34,12 +35,12 @@ impl Case for Shuffling { get_permutated_index(i, self.count, &seed, spec.shuffle_round_count).unwrap() }) .collect(); - compare_result::<_, Error>(&Ok(shuffling), &Some(self.shuffled.clone()))?; + compare_result::<_, Error>(&Ok(shuffling), &Some(self.mapping.clone()))?; // Test "shuffle_list" let input: Vec = (0..self.count).collect(); let shuffling = shuffle_list(input, spec.shuffle_round_count, &seed, false).unwrap(); - compare_result::<_, Error>(&Ok(shuffling), &Some(self.shuffled.clone()))?; + compare_result::<_, Error>(&Ok(shuffling), &Some(self.mapping.clone()))?; } Ok(()) diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index ca49d2106..fc62e66fc 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -1,68 +1,302 @@ +#![allow(non_snake_case)] + use super::*; -use crate::case_result::compare_result; -use ethereum_types::{U128, U256}; +use crate::cases::common::{SszStaticType, TestU128, TestU256}; +use crate::cases::ssz_static::{check_serialization, check_tree_hash}; +use crate::decode::yaml_decode_file; +use serde::{de::Error as SerdeError, Deserializer}; use serde_derive::Deserialize; -use ssz::Decode; -use std::fmt::Debug; +use ssz_derive::{Decode, Encode}; +use std::fs; +use std::path::{Path, PathBuf}; +use tree_hash_derive::TreeHash; +use types::typenum::*; +use types::{BitList, BitVector, FixedVector, VariableList}; #[derive(Debug, Clone, Deserialize)] -pub struct SszGeneric { - #[serde(alias = "type")] - pub type_name: String, - pub valid: bool, - pub value: Option, - pub ssz: Option, +struct Metadata { + root: String, + signing_root: Option, } -impl YamlDecode for SszGeneric { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +#[derive(Debug, Clone)] +pub struct SszGeneric { + path: PathBuf, + handler_name: String, + case_name: String, +} + +impl LoadCase for SszGeneric { + fn load_from_dir(path: &Path) -> Result { + let components = path + .components() + .map(|c| c.as_os_str().to_string_lossy().into_owned()) + .rev() + .collect::>(); + // Test case name is last + let case_name = components[0].clone(); + // Handler name is third last, before suite name and case name + let handler_name = components[2].clone(); + Ok(Self { + path: path.into(), + handler_name, + case_name, + }) + } +} + +macro_rules! type_dispatch { + ($function:ident, + ($($arg:expr),*), + $base_ty:tt, + <$($param_ty:ty),*>, + [ $value:expr => primitive_type ] $($rest:tt)*) => { + match $value { + "bool" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* bool>, $($rest)*), + "uint8" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u8>, $($rest)*), + "uint16" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u16>, $($rest)*), + "uint32" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u32>, $($rest)*), + "uint64" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u64>, $($rest)*), + "uint128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU128>, $($rest)*), + "uint256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU256>, $($rest)*), + _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), + } + }; + ($function:ident, + ($($arg:expr),*), + $base_ty:tt, + <$($param_ty:ty),*>, + [ $value:expr => typenum ] $($rest:tt)*) => { + match $value { + // DO YOU LIKE NUMBERS? + "0" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U0>, $($rest)*), + "1" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U1>, $($rest)*), + "2" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U2>, $($rest)*), + "3" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U3>, $($rest)*), + "4" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U4>, $($rest)*), + "5" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U5>, $($rest)*), + "6" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U6>, $($rest)*), + "7" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U7>, $($rest)*), + "8" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U8>, $($rest)*), + "9" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U9>, $($rest)*), + "16" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U16>, $($rest)*), + "31" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U31>, $($rest)*), + "32" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U32>, $($rest)*), + "64" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U64>, $($rest)*), + "128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U128>, $($rest)*), + "256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U256>, $($rest)*), + "512" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U512>, $($rest)*), + "513" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U513>, $($rest)*), + "1024" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U1024>, $($rest)*), + "2048" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U2048>, $($rest)*), + "4096" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U4096>, $($rest)*), + "8192" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U8192>, $($rest)*), + _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), + } + }; + ($function:ident, + ($($arg:expr),*), + $base_ty:tt, + <$($param_ty:ty),*>, + [ $value:expr => test_container ] $($rest:tt)*) => { + match $value { + "SingleFieldTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* SingleFieldTestStruct>, $($rest)*), + "SmallTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* SmallTestStruct>, $($rest)*), + "FixedTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* FixedTestStruct>, $($rest)*), + "VarTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* VarTestStruct>, $($rest)*), + "ComplexTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* ComplexTestStruct>, $($rest)*), + "BitsStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* BitsStruct>, $($rest)*), + _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), + } + }; + // No base type: apply type params to function + ($function:ident, ($($arg:expr),*), _, <$($param_ty:ty),*>,) => { + $function::<$($param_ty),*>($($arg),*) + }; + ($function:ident, ($($arg:expr),*), $base_type_name:ident, <$($param_ty:ty),*>,) => { + $function::<$base_type_name<$($param_ty),*>>($($arg),*) } } impl Case for SszGeneric { fn result(&self, _case_index: usize) -> Result<(), Error> { - if let Some(ssz) = &self.ssz { - match self.type_name.as_ref() { - "uint8" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint16" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint32" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint64" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint128" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint256" => ssz_generic_test::(self.valid, ssz, &self.value), - _ => Err(Error::FailedToParseTest(format!( - "Unknown type: {}", - self.type_name - ))), + let parts = self.case_name.split('_').collect::>(); + + match self.handler_name.as_str() { + "basic_vector" => { + let elem_ty = parts[1]; + let length = parts[2]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + FixedVector, + <>, + [elem_ty => primitive_type] + [length => typenum] + )?; } - } else { - // Skip tests that do not have an ssz field. - // - // See: https://github.com/ethereum/eth2.0-specs/issues/1079 - Ok(()) + "bitlist" => { + let mut limit = parts[1]; + + // Test format is inconsistent, pretend the limit is 32 (arbitrary) + // https://github.com/ethereum/eth2.0-spec-tests + if limit == "no" { + limit = "32"; + } + + type_dispatch!( + ssz_generic_test, + (&self.path), + BitList, + <>, + [limit => typenum] + )?; + } + "bitvector" => { + let length = parts[1]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + BitVector, + <>, + [length => typenum] + )?; + } + "boolean" => { + ssz_generic_test::(&self.path)?; + } + "uints" => { + let type_name = "uint".to_owned() + parts[1]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + _, + <>, + [type_name.as_str() => primitive_type] + )?; + } + "containers" => { + let type_name = parts[0]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + _, + <>, + [type_name => test_container] + )?; + } + _ => panic!("unsupported handler: {}", self.handler_name), } + Ok(()) } } -/// Execute a `ssz_generic` test case. -fn ssz_generic_test(should_be_ok: bool, ssz: &str, value: &Option) -> Result<(), Error> -where - T: Decode + YamlDecode + Debug + PartialEq, -{ - let ssz = hex::decode(&ssz[2..]).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - - // We do not cater for the scenario where the test is valid but we are not passed any SSZ. - if should_be_ok && value.is_none() { - panic!("Unexpected test input. Cannot pass without value.") - } - - let expected = if let Some(string) = value { - Some(T::yaml_decode(string)?) +fn ssz_generic_test(path: &Path) -> Result<(), Error> { + let meta_path = path.join("meta.yaml"); + let meta: Option = if meta_path.is_file() { + Some(yaml_decode_file(&meta_path)?) } else { None }; - let decoded = T::from_ssz_bytes(&ssz); + let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); - compare_result(&decoded, &expected) + let value_path = path.join("value.yaml"); + let value: Option = if value_path.is_file() { + Some(yaml_decode_file(&value_path)?) + } else { + None + }; + + // Valid + // TODO: signing root (annoying because of traits) + if let Some(value) = value { + check_serialization(&value, &serialized)?; + + if let Some(ref meta) = meta { + check_tree_hash(&meta.root, value.tree_hash_root())?; + } + } + // Invalid + else { + if let Ok(decoded) = T::from_ssz_bytes(&serialized) { + return Err(Error::DidntFail(format!( + "Decoded invalid bytes into: {:?}", + decoded + ))); + } + } + + Ok(()) +} + +// Containers for SSZ generic tests +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct SingleFieldTestStruct { + A: u8, +} + +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct SmallTestStruct { + A: u16, + B: u16, +} + +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct FixedTestStruct { + A: u8, + B: u64, + C: u32, +} + +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct VarTestStruct { + A: u16, + B: VariableList, + C: u8, +} + +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct ComplexTestStruct { + A: u16, + B: VariableList, + C: u8, + #[serde(deserialize_with = "byte_list_from_hex_str")] + D: VariableList, + E: VarTestStruct, + F: FixedVector, + G: FixedVector, +} + +#[derive(Debug, Clone, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct BitsStruct { + A: BitList, + B: BitVector, + C: BitVector, + D: BitList, + E: BitVector, +} + +fn byte_list_from_hex_str<'de, D, N: Unsigned>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let s: String = serde::de::Deserialize::deserialize(deserializer)?; + let decoded: Vec = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?; + + if decoded.len() > N::to_usize() { + return Err(D::Error::custom(format!( + "Too many values for list, got: {}, limit: {}", + decoded.len(), + N::to_usize() + ))); + } else { + Ok(decoded.into()) + } } diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index 96ba38b6a..62f285d58 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -1,127 +1,101 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::SszStaticType; +use crate::decode::yaml_decode_file; use serde_derive::Deserialize; -use ssz::{Decode, Encode}; -use std::fmt::Debug; -use std::marker::PhantomData; -use tree_hash::TreeHash; -use types::{ - test_utils::TestRandom, Attestation, AttestationData, AttestationDataAndCustodyBit, - AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, BeaconState, Checkpoint, - CompactCommittee, Crosslink, Deposit, DepositData, Eth1Data, EthSpec, Fork, Hash256, - HistoricalBatch, IndexedAttestation, PendingAttestation, ProposerSlashing, Transfer, Validator, - VoluntaryExit, -}; - -// Enum variant names are used by Serde when deserializing the test YAML -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone, Deserialize)] -pub enum SszStatic -where - E: EthSpec, -{ - Fork(SszStaticInner), - Crosslink(SszStaticInner), - Checkpoint(SszStaticInner), - CompactCommittee(SszStaticInner, E>), - Eth1Data(SszStaticInner), - AttestationData(SszStaticInner), - AttestationDataAndCustodyBit(SszStaticInner), - IndexedAttestation(SszStaticInner, E>), - DepositData(SszStaticInner), - BeaconBlockHeader(SszStaticInner), - Validator(SszStaticInner), - PendingAttestation(SszStaticInner, E>), - HistoricalBatch(SszStaticInner, E>), - ProposerSlashing(SszStaticInner), - AttesterSlashing(SszStaticInner, E>), - Attestation(SszStaticInner, E>), - Deposit(SszStaticInner), - VoluntaryExit(SszStaticInner), - Transfer(SszStaticInner), - BeaconBlockBody(SszStaticInner, E>), - BeaconBlock(SszStaticInner, E>), - BeaconState(SszStaticInner, E>), -} +use std::fs; +use tree_hash::SignedRoot; +use types::Hash256; #[derive(Debug, Clone, Deserialize)] -pub struct SszStaticInner -where - E: EthSpec, -{ - pub value: T, - pub serialized: String, - pub root: String, - #[serde(skip, default)] - _phantom: PhantomData, +struct SszStaticRoots { + root: String, + signing_root: Option, } -impl YamlDecode for SszStatic { - fn yaml_decode(yaml: &str) -> Result { - serde_yaml::from_str(yaml).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) +#[derive(Debug, Clone)] +pub struct SszStatic { + roots: SszStaticRoots, + serialized: Vec, + value: T, +} + +#[derive(Debug, Clone)] +pub struct SszStaticSR { + roots: SszStaticRoots, + serialized: Vec, + value: T, +} + +fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec, T), Error> { + let roots = yaml_decode_file(&path.join("roots.yaml"))?; + let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); + let value = yaml_decode_file(&path.join("value.yaml"))?; + + Ok((roots, serialized, value)) +} + +impl LoadCase for SszStatic { + fn load_from_dir(path: &Path) -> Result { + load_from_dir(path).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) } } -impl Case for SszStatic { - fn result(&self, _case_index: usize) -> Result<(), Error> { - use self::SszStatic::*; - - match *self { - Fork(ref val) => ssz_static_test(val), - Crosslink(ref val) => ssz_static_test(val), - Checkpoint(ref val) => ssz_static_test(val), - CompactCommittee(ref val) => ssz_static_test(val), - Eth1Data(ref val) => ssz_static_test(val), - AttestationData(ref val) => ssz_static_test(val), - AttestationDataAndCustodyBit(ref val) => ssz_static_test(val), - IndexedAttestation(ref val) => ssz_static_test(val), - DepositData(ref val) => ssz_static_test(val), - BeaconBlockHeader(ref val) => ssz_static_test(val), - Validator(ref val) => ssz_static_test(val), - PendingAttestation(ref val) => ssz_static_test(val), - HistoricalBatch(ref val) => ssz_static_test(val), - ProposerSlashing(ref val) => ssz_static_test(val), - AttesterSlashing(ref val) => ssz_static_test(val), - Attestation(ref val) => ssz_static_test(val), - Deposit(ref val) => ssz_static_test(val), - VoluntaryExit(ref val) => ssz_static_test(val), - Transfer(ref val) => ssz_static_test(val), - BeaconBlockBody(ref val) => ssz_static_test(val), - BeaconBlock(ref val) => ssz_static_test(val), - BeaconState(ref val) => ssz_static_test(val), - } +impl LoadCase for SszStaticSR { + fn load_from_dir(path: &Path) -> Result { + load_from_dir(path).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) } } -fn ssz_static_test(tc: &SszStaticInner) -> Result<(), Error> -where - T: Clone - + Decode - + Debug - + Encode - + PartialEq - + serde::de::DeserializeOwned - + TreeHash - + TestRandom, -{ - // Verify we can decode SSZ in the same way we can decode YAML. - let ssz = hex::decode(&tc.serialized[2..]) - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - let expected = tc.value.clone(); - let decode_result = T::from_ssz_bytes(&ssz); - compare_result(&decode_result, &Some(expected))?; +pub fn check_serialization(value: &T, serialized: &[u8]) -> Result<(), Error> { + // Check serialization + let serialized_result = value.as_ssz_bytes(); + compare_result::(&Ok(value.ssz_bytes_len()), &Some(serialized.len()))?; + compare_result::, Error>(&Ok(serialized_result), &Some(serialized.to_vec()))?; - // Verify we can encode the result back into original ssz bytes - let decoded = decode_result.unwrap(); - let encoded_result = decoded.as_ssz_bytes(); - compare_result::, Error>(&Ok(encoded_result), &Some(ssz))?; - - // Verify the TreeHash root of the decoded struct matches the test. - let expected_root = - &hex::decode(&tc.root[2..]).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - let expected_root = Hash256::from_slice(&expected_root); - let tree_hash_root = Hash256::from_slice(&decoded.tree_hash_root()); - compare_result::(&Ok(tree_hash_root), &Some(expected_root))?; + // Check deserialization + let deserialized_result = T::from_ssz_bytes(serialized); + compare_result(&deserialized_result, &Some(value.clone()))?; Ok(()) } + +pub fn check_tree_hash(expected_str: &str, actual_root: Vec) -> Result<(), Error> { + let expected_root = hex::decode(&expected_str[2..]) + .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; + let expected_root = Hash256::from_slice(&expected_root); + let tree_hash_root = Hash256::from_slice(&actual_root); + compare_result::(&Ok(tree_hash_root), &Some(expected_root)) +} + +impl Case for SszStatic { + fn result(&self, _case_index: usize) -> Result<(), Error> { + check_serialization(&self.value, &self.serialized)?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root())?; + Ok(()) + } +} + +impl Case for SszStaticSR { + fn result(&self, _case_index: usize) -> Result<(), Error> { + check_serialization(&self.value, &self.serialized)?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root())?; + check_tree_hash( + &self + .roots + .signing_root + .as_ref() + .expect("signed root exists"), + self.value.signed_root(), + )?; + Ok(()) + } +} diff --git a/tests/ef_tests/src/decode.rs b/tests/ef_tests/src/decode.rs new file mode 100644 index 000000000..c1ea6fb3b --- /dev/null +++ b/tests/ef_tests/src/decode.rs @@ -0,0 +1,31 @@ +use super::*; +use std::fs; +use std::path::Path; + +pub fn yaml_decode(string: &str) -> Result { + serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) +} + +pub fn yaml_decode_file(path: &Path) -> Result { + fs::read_to_string(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| yaml_decode(&s)) +} + +pub fn ssz_decode_file(path: &Path) -> Result { + fs::read(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| { + T::from_ssz_bytes(&s).map_err(|e| { + Error::FailedToParseTest(format!( + "Unable to parse SSZ at {}: {:?}", + path.display(), + e + )) + }) + }) +} diff --git a/tests/ef_tests/src/doc.rs b/tests/ef_tests/src/doc.rs deleted file mode 100644 index 7dfe9954c..000000000 --- a/tests/ef_tests/src/doc.rs +++ /dev/null @@ -1,253 +0,0 @@ -use crate::case_result::CaseResult; -use crate::cases::*; -use crate::doc_header::DocHeader; -use crate::error::Error; -use crate::yaml_decode::{yaml_split_header_and_cases, YamlDecode}; -use crate::EfTest; -use serde_derive::Deserialize; -use std::{fs::File, io::prelude::*, path::PathBuf}; -use types::{MainnetEthSpec, MinimalEthSpec}; - -#[derive(Debug, Deserialize)] -pub struct Doc { - pub header_yaml: String, - pub cases_yaml: String, - pub path: PathBuf, -} - -impl Doc { - fn from_path(path: PathBuf) -> Self { - let mut file = File::open(path.clone()).unwrap(); - - let mut yaml = String::new(); - file.read_to_string(&mut yaml).unwrap(); - - let (header_yaml, cases_yaml) = yaml_split_header_and_cases(yaml.clone()); - - Self { - header_yaml, - cases_yaml, - path, - } - } - - pub fn test_results(&self) -> Vec { - let header: DocHeader = serde_yaml::from_str(&self.header_yaml.as_str()).unwrap(); - - match ( - header.runner.as_ref(), - header.handler.as_ref(), - header.config.as_ref(), - ) { - ("ssz", "uint", _) => run_test::(self), - ("ssz", "static", "minimal") => run_test::>(self), - ("ssz", "static", "mainnet") => run_test::>(self), - ("sanity", "slots", "minimal") => run_test::>(self), - // FIXME: skipped due to compact committees issue - ("sanity", "slots", "mainnet") => vec![], // run_test::>(self), - ("sanity", "blocks", "minimal") => run_test::>(self), - // FIXME: skipped due to compact committees issue - ("sanity", "blocks", "mainnet") => vec![], // run_test::>(self), - ("shuffling", "core", "minimal") => run_test::>(self), - ("shuffling", "core", "mainnet") => run_test::>(self), - ("bls", "aggregate_pubkeys", "mainnet") => run_test::(self), - ("bls", "aggregate_sigs", "mainnet") => run_test::(self), - ("bls", "msg_hash_compressed", "mainnet") => run_test::(self), - // Note this test fails due to a difference in our internal representations. It does - // not effect verification or external representation. - // - // It is skipped. - ("bls", "msg_hash_uncompressed", "mainnet") => vec![], - ("bls", "priv_to_pub", "mainnet") => run_test::(self), - ("bls", "sign_msg", "mainnet") => run_test::(self), - ("operations", "deposit", "mainnet") => { - run_test::>(self) - } - ("operations", "deposit", "minimal") => { - run_test::>(self) - } - ("operations", "transfer", "mainnet") => { - run_test::>(self) - } - ("operations", "transfer", "minimal") => { - run_test::>(self) - } - ("operations", "voluntary_exit", "mainnet") => { - run_test::>(self) - } - ("operations", "voluntary_exit", "minimal") => { - run_test::>(self) - } - ("operations", "proposer_slashing", "mainnet") => { - run_test::>(self) - } - ("operations", "proposer_slashing", "minimal") => { - run_test::>(self) - } - ("operations", "attester_slashing", "mainnet") => { - run_test::>(self) - } - ("operations", "attester_slashing", "minimal") => { - run_test::>(self) - } - ("operations", "attestation", "mainnet") => { - run_test::>(self) - } - ("operations", "attestation", "minimal") => { - run_test::>(self) - } - ("operations", "block_header", "mainnet") => { - run_test::>(self) - } - ("operations", "block_header", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "crosslinks", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "crosslinks", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "registry_updates", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "registry_updates", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "justification_and_finalization", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "justification_and_finalization", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "slashings", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "slashings", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "final_updates", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "final_updates", "mainnet") => { - vec![] - // FIXME: skipped due to compact committees issue - // run_test::>(self) - } - ("genesis", "initialization", "minimal") => { - run_test::>(self) - } - ("genesis", "initialization", "mainnet") => { - run_test::>(self) - } - ("genesis", "validity", "minimal") => run_test::>(self), - ("genesis", "validity", "mainnet") => run_test::>(self), - (runner, handler, config) => panic!( - "No implementation for runner: \"{}\", handler: \"{}\", config: \"{}\"", - runner, handler, config - ), - } - } - - pub fn assert_tests_pass(path: PathBuf) { - let doc = Self::from_path(path); - let results = doc.test_results(); - - let (failed, skipped_bls, skipped_known_failures) = categorize_results(&results); - - if failed.len() + skipped_known_failures.len() > 0 { - print_results( - &doc, - &failed, - &skipped_bls, - &skipped_known_failures, - &results, - ); - if !failed.is_empty() { - panic!("Tests failed (see above)"); - } - } else { - println!("Passed {} tests in {:?}", results.len(), doc.path); - } - } -} - -pub fn run_test(doc: &Doc) -> Vec -where - Cases: EfTest + YamlDecode, -{ - // Pass only the "test_cases" YAML string to `yaml_decode`. - let test_cases: Cases = Cases::yaml_decode(&doc.cases_yaml).unwrap(); - - test_cases.test_results() -} - -pub fn categorize_results( - results: &[CaseResult], -) -> (Vec<&CaseResult>, Vec<&CaseResult>, Vec<&CaseResult>) { - let mut failed = vec![]; - let mut skipped_bls = vec![]; - let mut skipped_known_failures = vec![]; - - for case in results { - match case.result.as_ref().err() { - Some(Error::SkippedBls) => skipped_bls.push(case), - Some(Error::SkippedKnownFailure) => skipped_known_failures.push(case), - Some(_) => failed.push(case), - None => (), - } - } - - (failed, skipped_bls, skipped_known_failures) -} - -pub fn print_results( - doc: &Doc, - failed: &[&CaseResult], - skipped_bls: &[&CaseResult], - skipped_known_failures: &[&CaseResult], - results: &[CaseResult], -) { - let header: DocHeader = serde_yaml::from_str(&doc.header_yaml).unwrap(); - println!("--------------------------------------------------"); - println!( - "Test {}", - if failed.is_empty() { - "Result" - } else { - "Failure" - } - ); - println!("Title: {}", header.title); - println!("File: {:?}", doc.path); - println!( - "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed. (See below for errors)", - results.len(), - failed.len(), - skipped_known_failures.len(), - skipped_bls.len(), - results.len() - skipped_bls.len() - skipped_known_failures.len() - failed.len() - ); - println!(); - - for case in skipped_known_failures { - println!("-------"); - println!( - "case[{}] ({}) skipped because it's a known failure", - case.case_index, case.desc, - ); - } - for failure in failed { - let error = failure.result.clone().unwrap_err(); - - println!("-------"); - println!( - "case[{}] ({}) failed with {}:", - failure.case_index, - failure.desc, - error.name() - ); - println!("{}", error.message()); - } - println!(); -} diff --git a/tests/ef_tests/src/doc_header.rs b/tests/ef_tests/src/doc_header.rs deleted file mode 100644 index c0d6d3276..000000000 --- a/tests/ef_tests/src/doc_header.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde_derive::Deserialize; - -#[derive(Debug, Deserialize)] -pub struct DocHeader { - pub title: String, - pub summary: String, - pub forks_timeline: String, - pub forks: Vec, - pub config: String, - pub runner: String, - pub handler: String, -} diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs new file mode 100644 index 000000000..e8c83e1f8 --- /dev/null +++ b/tests/ef_tests/src/handler.rs @@ -0,0 +1,297 @@ +use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; +use crate::type_name; +use crate::type_name::TypeName; +use std::fs; +use std::marker::PhantomData; +use std::path::PathBuf; +use tree_hash::SignedRoot; +use types::EthSpec; + +pub trait Handler { + type Case: Case + LoadCase; + + fn config_name() -> &'static str { + "general" + } + + fn fork_name() -> &'static str { + "phase0" + } + + fn runner_name() -> &'static str; + + fn handler_name() -> String; + + fn run() { + let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("eth2.0-spec-tests") + .join("tests") + .join(Self::config_name()) + .join(Self::fork_name()) + .join(Self::runner_name()) + .join(Self::handler_name()); + + // If the directory containing the tests does not exist, just let all tests pass. + if !handler_path.exists() { + return; + } + + // Iterate through test suites + let test_cases = fs::read_dir(&handler_path) + .expect("handler dir exists") + .flat_map(|entry| { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) + }) + .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) + .flat_map(Result::ok) + .map(|test_case_dir| { + let path = test_case_dir.path(); + let case = Self::Case::load_from_dir(&path).expect("test should load"); + (path, case) + }) + .collect(); + + let results = Cases { test_cases }.test_results(); + + let name = format!("{}/{}", Self::runner_name(), Self::handler_name()); + crate::results::assert_tests_pass(&name, &handler_path, &results); + } +} + +macro_rules! bls_handler { + ($runner_name: ident, $case_name:ident, $handler_name:expr) => { + pub struct $runner_name; + + impl Handler for $runner_name { + type Case = cases::$case_name; + + fn runner_name() -> &'static str { + "bls" + } + + fn handler_name() -> String { + $handler_name.into() + } + } + }; +} + +bls_handler!( + BlsAggregatePubkeysHandler, + BlsAggregatePubkeys, + "aggregate_pubkeys" +); +bls_handler!(BlsAggregateSigsHandler, BlsAggregateSigs, "aggregate_sigs"); +bls_handler!( + BlsG2CompressedHandler, + BlsG2Compressed, + "msg_hash_compressed" +); +bls_handler!(BlsPrivToPubHandler, BlsPrivToPub, "priv_to_pub"); +bls_handler!(BlsSignMsgHandler, BlsSign, "sign_msg"); + +/// Handler for SSZ types that do not implement `SignedRoot`. +pub struct SszStaticHandler(PhantomData<(T, E)>); + +/// Handler for SSZ types that do implement `SignedRoot`. +pub struct SszStaticSRHandler(PhantomData<(T, E)>); + +impl Handler for SszStaticHandler +where + T: cases::SszStaticType + TypeName, + E: TypeName, +{ + type Case = cases::SszStatic; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "ssz_static" + } + + fn handler_name() -> String { + T::name().into() + } +} + +impl Handler for SszStaticSRHandler +where + T: cases::SszStaticType + SignedRoot + TypeName, + E: TypeName, +{ + type Case = cases::SszStaticSR; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "ssz_static" + } + + fn handler_name() -> String { + T::name().into() + } +} + +pub struct ShufflingHandler(PhantomData); + +impl Handler for ShufflingHandler { + type Case = cases::Shuffling; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "shuffling" + } + + fn handler_name() -> String { + "core".into() + } +} + +pub struct SanityBlocksHandler(PhantomData); + +impl Handler for SanityBlocksHandler { + type Case = cases::SanityBlocks; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "sanity" + } + + fn handler_name() -> String { + "blocks".into() + } +} + +pub struct SanitySlotsHandler(PhantomData); + +impl Handler for SanitySlotsHandler { + type Case = cases::SanitySlots; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "sanity" + } + + fn handler_name() -> String { + "slots".into() + } +} + +pub struct EpochProcessingHandler(PhantomData<(E, T)>); + +impl> Handler for EpochProcessingHandler { + type Case = cases::EpochProcessing; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "epoch_processing" + } + + fn handler_name() -> String { + T::name().into() + } +} + +pub struct GenesisValidityHandler(PhantomData); + +impl Handler for GenesisValidityHandler { + type Case = cases::GenesisValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "genesis" + } + + fn handler_name() -> String { + "validity".into() + } +} + +pub struct GenesisInitializationHandler(PhantomData); + +impl Handler for GenesisInitializationHandler { + type Case = cases::GenesisInitialization; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "genesis" + } + + fn handler_name() -> String { + "initialization".into() + } +} + +pub struct OperationsHandler(PhantomData<(E, O)>); + +impl> Handler for OperationsHandler { + type Case = cases::Operations; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "operations" + } + + fn handler_name() -> String { + O::handler_name() + } +} + +pub struct SszGenericHandler(PhantomData); + +impl Handler for SszGenericHandler { + type Case = cases::SszGeneric; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "ssz_generic" + } + + fn handler_name() -> String { + H::name().into() + } +} + +// Supported SSZ generic handlers +pub struct BasicVector; +type_name!(BasicVector, "basic_vector"); +pub struct Bitlist; +type_name!(Bitlist, "bitlist"); +pub struct Bitvector; +type_name!(Bitvector, "bitvector"); +pub struct Boolean; +type_name!(Boolean, "boolean"); +pub struct Uints; +type_name!(Uints, "uints"); +pub struct Containers; +type_name!(Containers, "containers"); diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index fdd4e7b85..719bfc1aa 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -2,21 +2,17 @@ use types::EthSpec; pub use case_result::CaseResult; pub use cases::Case; -pub use doc::Doc; +pub use cases::{ + Crosslinks, FinalUpdates, JustificationAndFinalization, RegistryUpdates, Slashings, +}; pub use error::Error; -pub use yaml_decode::YamlDecode; +pub use handler::*; mod bls_setting; mod case_result; mod cases; -mod doc; -mod doc_header; +mod decode; mod error; -mod yaml_decode; - -/// Defined where an object can return the results of some test(s) adhering to the Ethereum -/// Foundation testing format. -pub trait EfTest { - /// Returns the results of executing one or more tests. - fn test_results(&self) -> Vec; -} +mod handler; +mod results; +mod type_name; diff --git a/tests/ef_tests/src/results.rs b/tests/ef_tests/src/results.rs new file mode 100644 index 000000000..4f5513a9a --- /dev/null +++ b/tests/ef_tests/src/results.rs @@ -0,0 +1,92 @@ +use crate::case_result::CaseResult; +use crate::error::Error; +use std::path::Path; + +pub fn assert_tests_pass(handler_name: &str, path: &Path, results: &[CaseResult]) { + let (failed, skipped_bls, skipped_known_failures) = categorize_results(results); + + if failed.len() + skipped_known_failures.len() > 0 { + print_results( + handler_name, + &failed, + &skipped_bls, + &skipped_known_failures, + &results, + ); + if !failed.is_empty() { + panic!("Tests failed (see above)"); + } + } else { + println!("Passed {} tests in {}", results.len(), path.display()); + } +} + +pub fn categorize_results( + results: &[CaseResult], +) -> (Vec<&CaseResult>, Vec<&CaseResult>, Vec<&CaseResult>) { + let mut failed = vec![]; + let mut skipped_bls = vec![]; + let mut skipped_known_failures = vec![]; + + for case in results { + match case.result.as_ref().err() { + Some(Error::SkippedBls) => skipped_bls.push(case), + Some(Error::SkippedKnownFailure) => skipped_known_failures.push(case), + Some(_) => failed.push(case), + None => (), + } + } + + (failed, skipped_bls, skipped_known_failures) +} + +pub fn print_results( + handler_name: &str, + failed: &[&CaseResult], + skipped_bls: &[&CaseResult], + skipped_known_failures: &[&CaseResult], + results: &[CaseResult], +) { + println!("--------------------------------------------------"); + println!( + "Test {}", + if failed.is_empty() { + "Result" + } else { + "Failure" + } + ); + println!("Title: {}", handler_name); + println!( + "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed. (See below for errors)", + results.len(), + failed.len(), + skipped_known_failures.len(), + skipped_bls.len(), + results.len() - skipped_bls.len() - skipped_known_failures.len() - failed.len() + ); + println!(); + + for case in skipped_known_failures { + println!("-------"); + println!( + "case ({}) from {} skipped because it's a known failure", + case.desc, + case.path.display() + ); + } + for failure in failed { + let error = failure.result.clone().unwrap_err(); + + println!("-------"); + println!( + "case {} ({}) from {} failed with {}:", + failure.case_index, + failure.desc, + failure.path.display(), + error.name() + ); + println!("{}", error.message()); + } + println!(); +} diff --git a/tests/ef_tests/src/type_name.rs b/tests/ef_tests/src/type_name.rs new file mode 100644 index 000000000..5af0c5256 --- /dev/null +++ b/tests/ef_tests/src/type_name.rs @@ -0,0 +1,60 @@ +//! Mapping from types to canonical string identifiers used in testing. +use types::*; + +pub trait TypeName { + fn name() -> &'static str; +} + +#[macro_export] +macro_rules! type_name { + ($typ:ident) => { + type_name!($typ, stringify!($typ)); + }; + ($typ:ident, $name:expr) => { + impl TypeName for $typ { + fn name() -> &'static str { + $name + } + } + }; +} + +#[macro_export] +macro_rules! type_name_generic { + ($typ:ident) => { + type_name_generic!($typ, stringify!($typ)); + }; + ($typ:ident, $name:expr) => { + impl TypeName for $typ { + fn name() -> &'static str { + $name + } + } + }; +} + +type_name!(MinimalEthSpec, "minimal"); +type_name!(MainnetEthSpec, "mainnet"); + +type_name_generic!(Attestation); +type_name!(AttestationData); +type_name!(AttestationDataAndCustodyBit); +type_name_generic!(AttesterSlashing); +type_name_generic!(BeaconBlock); +type_name_generic!(BeaconBlockBody); +type_name!(BeaconBlockHeader); +type_name_generic!(BeaconState); +type_name!(Checkpoint); +type_name_generic!(CompactCommittee); +type_name!(Crosslink); +type_name!(Deposit); +type_name!(DepositData); +type_name!(Eth1Data); +type_name!(Fork); +type_name_generic!(HistoricalBatch); +type_name_generic!(IndexedAttestation); +type_name_generic!(PendingAttestation); +type_name!(ProposerSlashing); +type_name!(Transfer); +type_name!(Validator); +type_name!(VoluntaryExit); diff --git a/tests/ef_tests/src/yaml_decode.rs b/tests/ef_tests/src/yaml_decode.rs deleted file mode 100644 index c89dd92a9..000000000 --- a/tests/ef_tests/src/yaml_decode.rs +++ /dev/null @@ -1,59 +0,0 @@ -use super::*; -use ethereum_types::{U128, U256}; -use types::Fork; - -mod utils; - -pub use utils::*; - -pub trait YamlDecode: Sized { - /// Decode an object from the test specification YAML. - fn yaml_decode(string: &str) -> Result; -} - -/// Basic types can general be decoded with the `parse` fn if they implement `str::FromStr`. -macro_rules! impl_via_parse { - ($ty: ty) => { - impl YamlDecode for $ty { - fn yaml_decode(string: &str) -> Result { - string - .parse::() - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) - } - } - }; -} - -impl_via_parse!(u8); -impl_via_parse!(u16); -impl_via_parse!(u32); -impl_via_parse!(u64); - -/// Some `ethereum-types` methods have a `str::FromStr` implementation that expects `0x`-prefixed: -/// hex, so we use `from_dec_str` instead. -macro_rules! impl_via_from_dec_str { - ($ty: ty) => { - impl YamlDecode for $ty { - fn yaml_decode(string: &str) -> Result { - Self::from_dec_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) - } - } - }; -} - -impl_via_from_dec_str!(U128); -impl_via_from_dec_str!(U256); - -/// Types that already implement `serde::Deserialize` can be decoded using `serde_yaml`. -macro_rules! impl_via_serde_yaml { - ($ty: ty) => { - impl YamlDecode for $ty { - fn yaml_decode(string: &str) -> Result { - serde_yaml::from_str(string) - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) - } - } - }; -} - -impl_via_serde_yaml!(Fork); diff --git a/tests/ef_tests/src/yaml_decode/utils.rs b/tests/ef_tests/src/yaml_decode/utils.rs deleted file mode 100644 index 7b6caac72..000000000 --- a/tests/ef_tests/src/yaml_decode/utils.rs +++ /dev/null @@ -1,10 +0,0 @@ -pub fn yaml_split_header_and_cases(mut yaml: String) -> (String, String) { - let test_cases_start = yaml.find("\ntest_cases:\n").unwrap(); - // + 1 to skip the \n we used for matching. - let mut test_cases = yaml.split_off(test_cases_start + 1); - - let end_of_first_line = test_cases.find('\n').unwrap(); - let test_cases = test_cases.split_off(end_of_first_line + 1); - - (yaml, test_cases) -} diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index deb699e78..337c54b46 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -1,225 +1,214 @@ use ef_tests::*; -use rayon::prelude::*; -use std::path::{Path, PathBuf}; -use walkdir::WalkDir; - -fn yaml_files_in_test_dir(dir: &Path) -> Vec { - let base_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("eth2.0-spec-tests") - .join("tests") - .join(dir); - - assert!( - base_path.exists(), - format!( - "Unable to locate {:?}. Did you init git submodules?", - base_path - ) - ); - - let mut paths: Vec = WalkDir::new(base_path) - .into_iter() - .filter_map(|e| e.ok()) - .filter_map(|entry| { - if entry.file_type().is_file() { - match entry.file_name().to_str() { - Some(f) if f.ends_with(".yaml") => Some(entry.path().to_path_buf()), - Some(f) if f.ends_with(".yml") => Some(entry.path().to_path_buf()), - _ => None, - } - } else { - None - } - }) - .collect(); - - // Reverse the file order. Assuming files come in lexicographical order, executing tests in - // reverse means we get the "minimal" tests before the "mainnet" tests. This makes life easier - // for debugging. - paths.reverse(); - paths -} - -#[test] -#[cfg(feature = "fake_crypto")] -fn ssz_generic() { - yaml_files_in_test_dir(&Path::new("ssz_generic")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); -} - -#[test] -#[cfg(feature = "fake_crypto")] -fn ssz_static() { - yaml_files_in_test_dir(&Path::new("ssz_static")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); -} +use types::*; #[test] fn shuffling() { - yaml_files_in_test_dir(&Path::new("shuffling").join("core")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + ShufflingHandler::::run(); + ShufflingHandler::::run(); } #[test] fn operations_deposit() { - yaml_files_in_test_dir(&Path::new("operations").join("deposit")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + OperationsHandler::::run(); } #[test] fn operations_transfer() { - yaml_files_in_test_dir(&Path::new("operations").join("transfer")) - .into_par_iter() - .rev() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + // Note: there are no transfer tests for mainnet } #[test] fn operations_exit() { - yaml_files_in_test_dir(&Path::new("operations").join("voluntary_exit")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + OperationsHandler::::run(); } #[test] fn operations_proposer_slashing() { - yaml_files_in_test_dir(&Path::new("operations").join("proposer_slashing")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + OperationsHandler::::run(); } #[test] fn operations_attester_slashing() { - yaml_files_in_test_dir(&Path::new("operations").join("attester_slashing")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::>::run(); + OperationsHandler::>::run(); } #[test] fn operations_attestation() { - yaml_files_in_test_dir(&Path::new("operations").join("attestation")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::>::run(); + OperationsHandler::>::run(); } #[test] fn operations_block_header() { - yaml_files_in_test_dir(&Path::new("operations").join("block_header")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::>::run(); + OperationsHandler::>::run(); } #[test] fn sanity_blocks() { - yaml_files_in_test_dir(&Path::new("sanity").join("blocks")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + SanityBlocksHandler::::run(); + SanityBlocksHandler::::run(); } #[test] fn sanity_slots() { - yaml_files_in_test_dir(&Path::new("sanity").join("slots")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + SanitySlotsHandler::::run(); + SanitySlotsHandler::::run(); } #[test] #[cfg(not(feature = "fake_crypto"))] -fn bls() { - yaml_files_in_test_dir(&Path::new("bls")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); +fn bls_aggregate_pubkeys() { + BlsAggregatePubkeysHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_aggregate_sigs() { + BlsAggregateSigsHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_msg_hash_g2_compressed() { + BlsG2CompressedHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_priv_to_pub() { + BlsPrivToPubHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_sign_msg() { + BlsSignMsgHandler::run(); +} + +#[cfg(feature = "fake_crypto")] +macro_rules! ssz_static_test { + // Signed-root + ($test_name:ident, $typ:ident$(<$generics:tt>)?, SR) => { + ssz_static_test!($test_name, SszStaticSRHandler, $typ$(<$generics>)?); + }; + // Non-signed root + ($test_name:ident, $typ:ident$(<$generics:tt>)?) => { + ssz_static_test!($test_name, SszStaticHandler, $typ$(<$generics>)?); + }; + // Generic + ($test_name:ident, $handler:ident, $typ:ident<_>) => { + ssz_static_test!( + $test_name, $handler, { + ($typ, MinimalEthSpec), + ($typ, MainnetEthSpec) + } + ); + }; + // Non-generic + ($test_name:ident, $handler:ident, $typ:ident) => { + ssz_static_test!( + $test_name, $handler, { + ($typ, MinimalEthSpec), + ($typ, MainnetEthSpec) + } + ); + }; + // Base case + ($test_name:ident, $handler:ident, { $(($typ:ty, $spec:ident)),+ }) => { + #[test] + fn $test_name() { + $( + $handler::<$typ, $spec>::run(); + )+ + } + }; +} + +#[cfg(feature = "fake_crypto")] +mod ssz_static { + use ef_tests::{Handler, SszStaticHandler, SszStaticSRHandler}; + use types::*; + + ssz_static_test!(attestation, Attestation<_>, SR); + ssz_static_test!(attestation_data, AttestationData); + ssz_static_test!( + attestation_data_and_custody_bit, + AttestationDataAndCustodyBit + ); + ssz_static_test!(attester_slashing, AttesterSlashing<_>); + ssz_static_test!(beacon_block, BeaconBlock<_>, SR); + ssz_static_test!(beacon_block_body, BeaconBlockBody<_>); + ssz_static_test!(beacon_block_header, BeaconBlockHeader, SR); + ssz_static_test!(beacon_state, BeaconState<_>); + ssz_static_test!(checkpoint, Checkpoint); + ssz_static_test!(compact_committee, CompactCommittee<_>); + ssz_static_test!(crosslink, Crosslink); + ssz_static_test!(deposit, Deposit); + ssz_static_test!(deposit_data, DepositData, SR); + ssz_static_test!(eth1_data, Eth1Data); + ssz_static_test!(fork, Fork); + ssz_static_test!(historical_batch, HistoricalBatch<_>); + ssz_static_test!(indexed_attestation, IndexedAttestation<_>, SR); + ssz_static_test!(pending_attestation, PendingAttestation<_>); + ssz_static_test!(proposer_slashing, ProposerSlashing); + ssz_static_test!(transfer, Transfer, SR); + ssz_static_test!(validator, Validator); + ssz_static_test!(voluntary_exit, VoluntaryExit, SR); +} + +#[test] +fn ssz_generic() { + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); } #[test] fn epoch_processing_justification_and_finalization() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("justification_and_finalization")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_crosslinks() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("crosslinks")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_registry_updates() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("registry_updates")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_slashings() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("slashings")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_final_updates() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("final_updates")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn genesis_initialization() { - yaml_files_in_test_dir(&Path::new("genesis").join("initialization")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + GenesisInitializationHandler::::run(); } #[test] fn genesis_validity() { - yaml_files_in_test_dir(&Path::new("genesis").join("validity")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + GenesisValidityHandler::::run(); + // Note: there are no genesis validity tests for mainnet } diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 927731f63..f6961cba8 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -18,13 +18,16 @@ eth2_ssz = "0.1" eth2_config = { path = "../eth2/utils/eth2_config" } tree_hash = "0.1" clap = "2.32.0" +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } +eth2_interop_keypairs = { path = "../eth2/utils/eth2_interop_keypairs" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } types = { path = "../eth2/types" } serde = "1.0" serde_derive = "1.0" -slog = "^2.2.3" +serde_json = "^1.0" +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-async = "^2.3.0" slog-json = "^2.3" slog-term = "^2.4.0" diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index e831b4c1c..6f4a5f304 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -50,9 +50,12 @@ impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a /// Handle outputs and results from attestation production. pub fn handle_produce_attestation(&mut self, log: slog::Logger) { match self.produce_attestation() { - Ok(ValidatorEvent::AttestationProduced(_slot)) => { - info!(log, "Attestation produced"; "Validator" => format!("{}", self.signer)) - } + Ok(ValidatorEvent::AttestationProduced(slot)) => info!( + log, + "Attestation produced"; + "validator" => format!("{}", self.signer), + "slot" => slot, + ), Err(e) => error!(log, "Attestation production error"; "Error" => format!("{:?}", e)), Ok(ValidatorEvent::SignerRejection(_slot)) => { error!(log, "Attestation production error"; "Error" => "Signer could not sign the attestation".to_string()) diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index ca1e3a1d8..bb9c5741d 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -6,7 +6,8 @@ pub use self::beacon_node_block::{BeaconNodeError, PublishOutcome}; pub use self::grpc::BeaconBlockGrpcClient; use crate::signer::Signer; use core::marker::PhantomData; -use slog::{error, info, warn}; +use serde_json; +use slog::{error, info, trace, warn}; use std::sync::Arc; use tree_hash::{SignedRoot, TreeHash}; use types::{BeaconBlock, ChainSpec, Domain, EthSpec, Fork, Slot}; @@ -53,27 +54,32 @@ pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> { pub slots_per_epoch: u64, /// Mere vessel for E. pub _phantom: PhantomData, + /// The logger, for logging + pub log: slog::Logger, } impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { /// Handle outputs and results from block production. - pub fn handle_produce_block(&mut self, log: slog::Logger) { + pub fn handle_produce_block(&mut self) { match self.produce_block() { - Ok(ValidatorEvent::BlockProduced(_slot)) => { - info!(log, "Block produced"; "Validator" => format!("{}", self.signer)) - } - Err(e) => error!(log, "Block production error"; "Error" => format!("{:?}", e)), + Ok(ValidatorEvent::BlockProduced(slot)) => info!( + self.log, + "Block produced"; + "validator" => format!("{}", self.signer), + "slot" => slot, + ), + Err(e) => error!(self.log, "Block production error"; "Error" => format!("{:?}", e)), Ok(ValidatorEvent::SignerRejection(_slot)) => { - error!(log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()) + error!(self.log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()) } Ok(ValidatorEvent::SlashableBlockNotProduced(_slot)) => { - error!(log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string()) + error!(self.log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string()) } Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(_slot)) => { - error!(log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string()) + error!(self.log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string()) } Ok(v) => { - warn!(log, "Unknown result for block production"; "Error" => format!("{:?}",v)) + warn!(self.log, "Unknown result for block production"; "Error" => format!("{:?}",v)) } } } @@ -90,14 +96,21 @@ impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { /// slashing. pub fn produce_block(&mut self) -> Result { let epoch = self.slot.epoch(self.slots_per_epoch); + trace!(self.log, "Producing block"; "epoch" => epoch); let message = epoch.tree_hash_root(); let randao_reveal = match self.signer.sign_message( &message, self.spec.get_domain(epoch, Domain::Randao, &self.fork), ) { - None => return Ok(ValidatorEvent::SignerRejection(self.slot)), - Some(signature) => signature, + None => { + warn!(self.log, "Signing rejected"; "message" => format!("{:?}", message)); + return Ok(ValidatorEvent::SignerRejection(self.slot)); + } + Some(signature) => { + info!(self.log, "Signed tree_hash_root for randao_reveal"; "message" => format!("{:?}", message), "signature" => serde_json::to_string(&signature).expect("We should always be able to serialize a signature as JSON.")); + signature + } }; if let Some(block) = self @@ -105,12 +118,13 @@ impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { .produce_beacon_block(self.slot, &randao_reveal)? { if self.safe_to_produce(&block) { + let slot = block.slot; let domain = self .spec .get_domain(epoch, Domain::BeaconProposer, &self.fork); if let Some(block) = self.sign_block(block, domain) { self.beacon_node.publish_beacon_block(block)?; - Ok(ValidatorEvent::BlockProduced(self.slot)) + Ok(ValidatorEvent::BlockProduced(slot)) } else { Ok(ValidatorEvent::SignerRejection(self.slot)) } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 7bc504b23..33e8addb6 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -2,22 +2,53 @@ use bincode; use bls::Keypair; use clap::ArgMatches; use serde_derive::{Deserialize, Serialize}; -use slog::{debug, error, info, o, Drain}; +use slog::{error, info, o, warn, Drain}; use std::fs::{self, File, OpenOptions}; use std::io::{Error, ErrorKind}; +use std::ops::Range; use std::path::PathBuf; use std::sync::Mutex; -use types::{EthSpec, MainnetEthSpec}; +use types::{ + test_utils::{generate_deterministic_keypair, load_keypairs_from_yaml}, + EthSpec, MainnetEthSpec, +}; + +pub const DEFAULT_SERVER: &str = "localhost"; +pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051"; +pub const DEFAULT_SERVER_HTTP_PORT: &str = "5052"; + +#[derive(Clone)] +pub enum KeySource { + /// Load the keypairs from disk. + Disk, + /// Generate the keypairs (insecure, generates predictable keys). + TestingKeypairRange(Range), + /// Load testing keypairs from YAML + YamlKeypairs(PathBuf), +} + +impl Default for KeySource { + fn default() -> Self { + KeySource::Disk + } +} /// Stores the core configuration for this validator instance. #[derive(Clone, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases pub data_dir: PathBuf, + /// The source for loading keypairs + #[serde(skip)] + pub key_source: KeySource, /// The path where the logs will be outputted pub log_file: PathBuf, /// The server at which the Beacon Node can be contacted pub server: String, + /// The gRPC port on the server + pub server_grpc_port: u16, + /// The HTTP port on the server, for the REST API. + pub server_http_port: u16, /// The number of slots per epoch. pub slots_per_epoch: u64, } @@ -29,14 +60,33 @@ impl Default for Config { fn default() -> Self { Self { data_dir: PathBuf::from(".lighthouse-validator"), + key_source: <_>::default(), log_file: PathBuf::from(""), - server: "localhost:5051".to_string(), + server: DEFAULT_SERVER.into(), + server_grpc_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("gRPC port constant should be valid"), + server_http_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("HTTP port constant should be valid"), slots_per_epoch: MainnetEthSpec::slots_per_epoch(), } } } impl Config { + /// Returns the full path for the client data directory (not just the name of the directory). + pub fn full_data_dir(&self) -> Option { + dirs::home_dir().map(|path| path.join(&self.data_dir)) + } + + /// Creates the data directory (and any non-existing parent directories). + pub fn create_data_dir(&self) -> Option { + let path = dirs::home_dir()?.join(&self.data_dir); + fs::create_dir_all(&path).ok()?; + Some(path) + } + /// Apply the following arguments to `self`, replacing values if they are specified in `args`. /// /// Returns an error if arguments are obviously invalid. May succeed even if some values are @@ -94,67 +144,120 @@ impl Config { Ok(()) } - /// Try to load keys from validator_dir, returning None if none are found or an error. + /// Reads a single keypair from the given `path`. + /// + /// `path` should be the path to a directory containing a private key. The file name of `path` + /// must align with the public key loaded from it, otherwise an error is returned. + /// + /// An error will be returned if `path` is a file (not a directory). + fn read_keypair_file(&self, path: PathBuf) -> Result { + if !path.is_dir() { + return Err("Is not a directory".into()); + } + + let key_filename: PathBuf = path.join(DEFAULT_PRIVATE_KEY_FILENAME); + + if !key_filename.is_file() { + return Err(format!( + "Private key is not a file: {:?}", + key_filename.to_str() + )); + } + + let mut key_file = File::open(key_filename.clone()) + .map_err(|e| format!("Unable to open private key file: {}", e))?; + + let key: Keypair = bincode::deserialize_from(&mut key_file) + .map_err(|e| format!("Unable to deserialize private key: {:?}", e))?; + + let ki = key.identifier(); + if &ki + != &path + .file_name() + .ok_or_else(|| "Invalid path".to_string())? + .to_string_lossy() + { + return Err(format!( + "The validator key ({:?}) did not match the directory filename {:?}.", + ki, + path.to_str() + )); + } else { + Ok(key) + } + } + + pub fn fetch_keys_from_disk(&self, log: &slog::Logger) -> Result, String> { + Ok( + fs::read_dir(&self.full_data_dir().expect("Data dir must exist")) + .map_err(|e| format!("Failed to read datadir: {:?}", e))? + .filter_map(|validator_dir| { + let path = validator_dir.ok()?.path(); + + if path.is_dir() { + match self.read_keypair_file(path.clone()) { + Ok(keypair) => Some(keypair), + Err(e) => { + error!( + log, + "Failed to parse a validator keypair"; + "error" => e, + "path" => path.to_str(), + ); + None + } + } + } else { + None + } + }) + .collect(), + ) + } + + pub fn fetch_testing_keypairs( + &self, + range: std::ops::Range, + ) -> Result, String> { + Ok(range + .into_iter() + .map(generate_deterministic_keypair) + .collect()) + } + + /// Loads the keypairs according to `self.key_source`. Will return one or more keypairs, or an + /// error. #[allow(dead_code)] - pub fn fetch_keys(&self, log: &slog::Logger) -> Option> { - let key_pairs: Vec = fs::read_dir(&self.data_dir) - .ok()? - .filter_map(|validator_dir| { - let validator_dir = validator_dir.ok()?; - - if !(validator_dir.file_type().ok()?.is_dir()) { - // Skip non-directories (i.e. no files/symlinks) - return None; - } - - let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); - - if !(key_filename.is_file()) { - info!( - log, - "Private key is not a file: {:?}", - key_filename.to_str() - ); - return None; - } - - debug!( + pub fn fetch_keys(&self, log: &slog::Logger) -> Result, String> { + let keypairs = match &self.key_source { + KeySource::Disk => self.fetch_keys_from_disk(log)?, + KeySource::TestingKeypairRange(range) => { + warn!( log, - "Deserializing private key from file: {:?}", - key_filename.to_str() + "Using insecure interop private keys"; + "range" => format!("{:?}", range) + ); + self.fetch_testing_keypairs(range.clone())? + } + KeySource::YamlKeypairs(path) => { + warn!( + log, + "Private keys are stored insecurely (plain text). Testing use only." ); - let mut key_file = File::open(key_filename.clone()).ok()?; - - let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) { - key_ok - } else { - error!( - log, - "Unable to deserialize the private key file: {:?}", key_filename - ); - return None; - }; - - let ki = key.identifier(); - if ki != validator_dir.file_name().into_string().ok()? { - error!( - log, - "The validator key ({:?}) did not match the directory filename {:?}.", - ki, - &validator_dir.path().to_string_lossy() - ); - return None; - } - Some(key) - }) - .collect(); + load_keypairs_from_yaml(path.to_path_buf())? + } + }; // Check if it's an empty vector, and return none. - if key_pairs.is_empty() { - None + if keypairs.is_empty() { + Err( + "No validator keypairs were found, unable to proceed. To generate \ + testing keypairs, see 'testnet range --help'." + .into(), + ) } else { - Some(key_pairs) + Ok(keypairs) } } diff --git a/validator_client/src/error.rs b/validator_client/src/error.rs index 97500f900..e13f7ded5 100644 --- a/validator_client/src/error.rs +++ b/validator_client/src/error.rs @@ -1,16 +1,9 @@ -use slot_clock; - use error_chain::error_chain; error_chain! { links { } errors { - SlotClockError(e: slot_clock::SystemTimeSlotClockError) { - description("Error reading system time"), - display("SlotClockError: '{:?}'", e) - } - SystemTimeError(t: String ) { description("Error reading system time"), display("SystemTimeError: '{}'", t) diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 83a874df7..30ed95661 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -6,13 +6,16 @@ pub mod error; mod service; mod signer; -use crate::config::Config as ValidatorClientConfig; +use crate::config::{ + Config as ClientConfig, KeySource, DEFAULT_SERVER, DEFAULT_SERVER_GRPC_PORT, + DEFAULT_SERVER_HTTP_PORT, +}; use crate::service::Service as ValidatorService; -use clap::{App, Arg}; -use eth2_config::{read_from_file, write_to_file, Eth2Config}; +use clap::{App, Arg, ArgMatches, SubCommand}; +use eth2_config::Eth2Config; +use lighthouse_bootstrap::Bootstrapper; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, warn, Drain, Level}; -use std::fs; +use slog::{crit, error, info, o, Drain, Level, Logger}; use std::path::PathBuf; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -21,6 +24,8 @@ pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; pub const CLIENT_CONFIG_FILENAME: &str = "validator-client.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; +type Result = core::result::Result; + fn main() { // Logging let decorator = slog_term::TermDecorator::new().build(); @@ -49,28 +54,48 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("eth2-spec") - .long("eth2-spec") + Arg::with_name("spec") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type.") + .takes_value(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .conflicts_with("eth2-config") + .global(true) + ) + .arg( + Arg::with_name("eth2-config") + .long("eth2-config") .short("e") .value_name("TOML_FILE") - .help("Path to Ethereum 2.0 specifications file.") + .help("Path to Ethereum 2.0 config and specification file (e.g., eth2_spec.toml).") .takes_value(true), ) .arg( Arg::with_name("server") .long("server") - .value_name("server") + .value_name("NETWORK_ADDRESS") .help("Address to connect to BeaconNode.") + .default_value(DEFAULT_SERVER) .takes_value(true), ) .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) + Arg::with_name("server-grpc-port") + .long("server-grpc-port") + .short("g") + .value_name("PORT") + .help("Port to use for gRPC API connection to the server.") + .default_value(DEFAULT_SERVER_GRPC_PORT) + .takes_value(true), + ) + .arg( + Arg::with_name("server-http-port") + .long("server-http-port") + .short("h") + .value_name("PORT") + .help("Port to use for HTTP API connection to the server.") + .default_value(DEFAULT_SERVER_HTTP_PORT) + .takes_value(true), ) .arg( Arg::with_name("debug-level") @@ -80,7 +105,51 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("info"), + .default_value("trace"), + ) + /* + * The "testnet" sub-command. + * + * Used for starting testnet validator clients. + */ + .subcommand(SubCommand::with_name("testnet") + .about("Starts a testnet validator using INSECURE, predicatable private keys, based off the canonical \ + validator index. ONLY USE FOR TESTING PURPOSES!") + .arg( + Arg::with_name("bootstrap") + .short("b") + .long("bootstrap") + .help("Connect to the RPC server to download the eth2_config via the HTTP API.") + ) + .subcommand(SubCommand::with_name("insecure") + .about("Uses the standard, predicatable `interop` keygen method to produce a range \ + of predicatable private keys and starts performing their validator duties.") + .arg(Arg::with_name("first_validator") + .value_name("VALIDATOR_INDEX") + .required(true) + .help("The first validator public key to be generated for this client.")) + .arg(Arg::with_name("validator_count") + .value_name("COUNT") + .required(true) + .help("The number of validators.")) + ) + .subcommand(SubCommand::with_name("interop-yaml") + .about("Loads plain-text secret keys from YAML files. Expects the interop format defined + in the ethereum/eth2.0-pm repo.") + .arg(Arg::with_name("path") + .value_name("PATH") + .required(true) + .help("Path to a YAML file.")) + ) + ) + .subcommand(SubCommand::with_name("sign_block") + .about("Connects to the beacon server, requests a new block (after providing reveal),\ + and prints the signed block to standard out") + .arg(Arg::with_name("validator") + .value_name("VALIDATOR") + .required(true) + .help("The pubkey of the validator that should sign the block.") + ) ) .get_matches(); @@ -93,133 +162,25 @@ fn main() { Some("crit") => drain.filter_level(Level::Critical), _ => unreachable!("guarded by clap"), }; + let mut log = slog::Logger::root(drain.fuse(), o!()); - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; - - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} - Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } + if std::mem::size_of::() != 8 { + crit!( + log, + "Lighthouse only supports 64bit CPUs"; + "detected" => format!("{}bit", std::mem::size_of::() * 8) + ); } - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. - // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::( - client_config_path.clone(), - ) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ValidatorClientConfig::default(); - if let Err(e) = write_to_file(client_config_path.clone(), &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } + let (client_config, eth2_config) = match get_configs(&matches, &mut log) { + Ok(tuple) => tuple, Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); - return; - } - }; - - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path: PathBuf = matches - .value_of("eth2-spec") - .and_then(|s| Some(PathBuf::from(s))) - .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); + crit!( + log, + "Unable to initialize configuration"; + "error" => e + ); return; } }; @@ -227,8 +188,7 @@ fn main() { info!( log, "Starting validator client"; - "datadir" => client_config.data_dir.to_str(), - "spec_constants" => ð2_config.spec_constants, + "datadir" => client_config.full_data_dir().expect("Unable to find datadir").to_str(), ); let result = match eth2_config.spec_constants.as_str() { @@ -260,3 +220,135 @@ fn main() { Err(e) => crit!(log, "Validator client exited with error"; "error" => e.to_string()), } } + +/// Parses the CLI arguments and attempts to load the client and eth2 configuration. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +pub fn get_configs( + cli_args: &ArgMatches, + mut log: &mut Logger, +) -> Result<(ClientConfig, Eth2Config)> { + let mut client_config = ClientConfig::default(); + + client_config.apply_cli_args(&cli_args, &mut log)?; + + if let Some(server) = cli_args.value_of("server") { + client_config.server = server.to_string(); + } + + if let Some(port) = cli_args.value_of("server-http-port") { + client_config.server_http_port = port + .parse::() + .map_err(|e| format!("Unable to parse HTTP port: {:?}", e))?; + } + + if let Some(port) = cli_args.value_of("server-grpc-port") { + client_config.server_grpc_port = port + .parse::() + .map_err(|e| format!("Unable to parse gRPC port: {:?}", e))?; + } + + info!( + *log, + "Beacon node connection info"; + "grpc_port" => client_config.server_grpc_port, + "http_port" => client_config.server_http_port, + "server" => &client_config.server, + ); + + let (client_config, eth2_config) = match cli_args.subcommand() { + ("testnet", Some(sub_cli_args)) => { + if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { + return Err( + "Cannot specify --eth2-config and --bootstrap as it may result \ + in ambiguity." + .into(), + ); + } + process_testnet_subcommand(sub_cli_args, client_config, log) + } + _ => return Err("You must use the testnet command. See '--help'.".into()), + }?; + + Ok((client_config, eth2_config)) +} + +/// Parses the `testnet` CLI subcommand. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +fn process_testnet_subcommand( + cli_args: &ArgMatches, + mut client_config: ClientConfig, + log: &Logger, +) -> Result<(ClientConfig, Eth2Config)> { + let eth2_config = if cli_args.is_present("bootstrap") { + info!(log, "Connecting to bootstrap server"); + let bootstrapper = Bootstrapper::connect( + format!( + "http://{}:{}", + client_config.server, client_config.server_http_port + ), + &log, + )?; + + let eth2_config = bootstrapper.eth2_config()?; + + info!( + log, + "Bootstrapped eth2 config via HTTP"; + "slot_time_millis" => eth2_config.spec.milliseconds_per_slot, + "spec" => ð2_config.spec_constants, + ); + + eth2_config + } else { + match cli_args.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("No --spec flag provided. See '--help'.".into()), + } + }; + + client_config.key_source = match cli_args.subcommand() { + ("insecure", Some(sub_cli_args)) => { + let first = sub_cli_args + .value_of("first_validator") + .ok_or_else(|| "No first validator supplied")? + .parse::() + .map_err(|e| format!("Unable to parse first validator: {:?}", e))?; + let count = sub_cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator count supplied")? + .parse::() + .map_err(|e| format!("Unable to parse validator count: {:?}", e))?; + + info!( + log, + "Generating unsafe testing keys"; + "first_validator" => first, + "count" => count + ); + + KeySource::TestingKeypairRange(first..first + count) + } + ("interop-yaml", Some(sub_cli_args)) => { + let path = sub_cli_args + .value_of("path") + .ok_or_else(|| "No yaml path supplied")? + .parse::() + .map_err(|e| format!("Unable to parse yaml path: {:?}", e))?; + + info!( + log, + "Loading keypairs from interop YAML format"; + "path" => format!("{:?}", path), + ); + + KeySource::YamlKeypairs(path) + } + _ => KeySource::Disk, + }; + + Ok((client_config, eth2_config)) +} diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 3ddb96e4c..a7974594d 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -13,7 +13,6 @@ use crate::block_producer::{BeaconBlockGrpcClient, BlockProducer}; use crate::config::Config as ValidatorConfig; use crate::duties::{BeaconNodeDuties, DutiesManager, EpochDutiesMap}; use crate::error as error_chain; -use crate::error::ErrorKind; use crate::signer::Signer; use bls::Keypair; use eth2_config::Eth2Config; @@ -23,12 +22,12 @@ use protos::services_grpc::{ AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient, ValidatorServiceClient, }; -use slog::{crit, error, info, warn}; +use slog::{crit, error, info, trace, warn}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::marker::PhantomData; use std::sync::Arc; use std::sync::RwLock; -use std::time::{Duration, Instant, SystemTime}; +use std::time::{Duration, Instant}; use tokio::prelude::*; use tokio::runtime::Builder; use tokio::timer::Interval; @@ -47,8 +46,8 @@ pub struct Service, slots_per_epoch: u64, /// The chain specification for this clients instance. spec: Arc, @@ -74,12 +73,15 @@ impl Service error_chain::Result> { - // initialise the beacon node client to check for a connection + let server_url = format!( + "{}:{}", + client_config.server, client_config.server_grpc_port + ); let env = Arc::new(EnvBuilder::new().build()); // Beacon node gRPC beacon node endpoints. let beacon_node_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&server_url); BeaconNodeServiceClient::new(ch) }; @@ -87,25 +89,17 @@ impl Service { - warn!(log, "Could not connect to node. Error: {}", e); - info!(log, "Retrying in 5 seconds..."); - std::thread::sleep(Duration::from_secs(5)); + let retry_seconds = 5; + warn!( + log, + "Could not connect to beacon node"; + "error" => format!("{:?}", e), + "retry_in" => format!("{} seconds", retry_seconds), + ); + std::thread::sleep(Duration::from_secs(retry_seconds)); continue; } Ok(info) => { - // verify the node's genesis time - if SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() - < info.genesis_time - { - error!( - log, - "Beacon Node's genesis time is in the future. No work to do.\n Exiting" - ); - return Err("Genesis time in the future".into()); - } // verify the node's network id if eth2_config.spec.network_id != info.network_id as u8 { error!( @@ -123,7 +117,13 @@ impl Service node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time); + info!( + log, + "Beacon node connected"; + "version" => node_info.version.clone(), + "network_id" => node_info.network_id, + "genesis_time" => genesis_time + ); let proto_fork = node_info.get_fork(); let mut previous_version: [u8; 4] = [0; 4]; @@ -140,7 +140,7 @@ impl Service Service(|| { - "Genesis is not in the past. Exiting.".into() - })?; - /* Generate the duties manager */ // Load generated keypairs - let keypairs = match client_config.fetch_keys(&log) { - Some(kps) => Arc::new(kps), - None => { - return Err("Unable to locate validator key pairs, nothing to do.".into()); - } - }; + let keypairs = Arc::new(client_config.fetch_keys(&log)?); let slots_per_epoch = E::slots_per_epoch(); @@ -207,7 +195,7 @@ impl Service Service::initialize_service( client_config, eth2_config, - log, + log.clone(), )?; // we have connected to a node and established its parameters. Spin up the core service @@ -244,19 +232,31 @@ impl Service(|| { - "Genesis is not in the past. Exiting.".into() + "Unable to determine duration to next slot. Exiting.".into() })?; // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_secs(service.spec.seconds_per_slot); + let slot_duration = Duration::from_millis(service.spec.milliseconds_per_slot); //TODO: Handle checked add correctly Interval::new(Instant::now() + duration_to_next_slot, slot_duration) }; + if service.slot_clock.now().is_none() { + warn!( + log, + "Starting node prior to genesis"; + ); + } + + info!( + log, + "Waiting for next slot"; + "seconds_to_wait" => duration_to_next_slot.as_secs() + ); + /* kick off the core service */ runtime.block_on( interval @@ -286,35 +286,39 @@ impl Service error_chain::Result<()> { - let current_slot = match self.slot_clock.present_slot() { - Err(e) => { - error!(self.log, "SystemTimeError {:?}", e); - return Err("Could not read system time".into()); - } - Ok(slot) => slot.ok_or_else::(|| { + let wall_clock_slot = self + .slot_clock + .now() + .ok_or_else::(|| { "Genesis is not in the past. Exiting.".into() - })?, - }; + })?; - let current_epoch = current_slot.epoch(self.slots_per_epoch); + let wall_clock_epoch = wall_clock_slot.epoch(self.slots_per_epoch); // this is a non-fatal error. If the slot clock repeats, the node could // have been slow to process the previous slot and is now duplicating tasks. // We ignore duplicated but raise a critical error. - if current_slot <= self.current_slot { - crit!( - self.log, - "The validator tried to duplicate a slot. Likely missed the previous slot" - ); - return Err("Duplicate slot".into()); + if let Some(current_slot) = self.current_slot { + if wall_clock_slot <= current_slot { + crit!( + self.log, + "The validator tried to duplicate a slot. Likely missed the previous slot" + ); + return Err("Duplicate slot".into()); + } } - self.current_slot = current_slot; - info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64()); + self.current_slot = Some(wall_clock_slot); + info!(self.log, "Processing"; "slot" => wall_clock_slot.as_u64(), "epoch" => wall_clock_epoch.as_u64()); Ok(()) } @@ -322,7 +326,17 @@ impl Service current_epoch + ); + // spawn a new thread separate to the runtime // TODO: Handle thread termination/timeout // TODO: Add duties thread back in, with channel to process duties in duty change. @@ -336,20 +350,36 @@ impl Service work.len() + ); + for (signer_index, work_type) in work { if work_type.produce_block { // we need to produce a block // spawns a thread to produce a beacon block let signers = self.duties_manager.signers.clone(); // this is an arc let fork = self.fork.clone(); - let slot = self.current_slot; + let slot = self + .current_slot + .expect("The current slot must be updated before processing duties"); let spec = self.spec.clone(); let beacon_node = self.beacon_block_client.clone(); let log = self.log.clone(); let slots_per_epoch = self.slots_per_epoch; std::thread::spawn(move || { - info!(log, "Producing a block"; "Validator"=> format!("{}", signers[signer_index])); + info!( + log, + "Producing a block"; + "validator"=> format!("{}", signers[signer_index]), + "slot"=> slot + ); let signer = &signers[signer_index]; let mut block_producer = BlockProducer { fork, @@ -359,13 +389,17 @@ impl Service, + log, }; - block_producer.handle_produce_block(log); + block_producer.handle_produce_block(); }); } if work_type.attestation_duty.is_some() { // we need to produce an attestation // spawns a thread to produce and sign an attestation + let slot = self + .current_slot + .expect("The current slot must be updated before processing duties"); let signers = self.duties_manager.signers.clone(); // this is an arc let fork = self.fork.clone(); let spec = self.spec.clone(); @@ -373,7 +407,12 @@ impl Service format!("{}", signers[signer_index])); + info!( + log, + "Producing an attestation"; + "validator"=> format!("{}", signers[signer_index]), + "slot"=> slot + ); let signer = &signers[signer_index]; let mut attestation_producer = AttestationProducer { fork,