diff --git a/Cargo.toml b/Cargo.toml index 1fb9bd1ac..e9acb2be4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,8 +24,9 @@ members = [ "eth2/utils/fisher_yates_shuffle", "eth2/utils/test_random_derive", "beacon_node", - "beacon_node/db", + "beacon_node/store", "beacon_node/client", + "beacon_node/http_server", "beacon_node/network", "beacon_node/eth2-libp2p", "beacon_node/rpc", diff --git a/README.md b/README.md index 879f9b8fe..2151a0db8 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,7 @@ tests and benchmarks which may be of interest. A few basic steps are needed to get set up: - 1. Install [rustup](https://rustup.rs/). It's a toolchain manager for Rust (Linux | macos | Windows). For installation run the below command in your terminal `$ curl https://sh.rustup.rs -sSf | sh` + 1. Install [rustup](https://rustup.rs/). It's a toolchain manager for Rust (Linux | macOS | Windows). For installation, download the script with `$ curl -f https://sh.rustup.rs > rustup.sh`, review its content (e.g. `$ less ./rustup.sh`) and run the script `$ ./rustup.sh` (you may need to change the permissions to allow execution, i.e. `$ chmod +x rustup.sh`) 2. (Linux & MacOS) To configure your current shell run: `$ source $HOME/.cargo/env` 3. Use the command `rustup show` to get information about the Rust installation. You should see that the active toolchain is the stable version. @@ -176,4 +176,4 @@ Ping @paulhauner or @AgeManning to get the quickest response. If you support the cause, we could certainly use donations to help fund development: -`0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b` +`0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b` (donation.sigmaprime.eth) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 37d96a497..d78a5b596 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] types = { path = "../eth2/types" } +store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 34b6e11c6..3a84256a7 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] bls = { path = "../../eth2/utils/bls" } boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" } -db = { path = "../db" } +store = { path = "../store" } failure = "0.1" failure_derive = "0.1" hashing = { path = "../../eth2/utils/hashing" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9a55851a4..3acca74af 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,16 +1,11 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, DBError, -}; use fork_choice::{ForkChoice, ForkChoiceError}; use log::{debug, trace}; use operation_pool::DepositInsertStatus; use operation_pool::OperationPool; use parking_lot::{RwLock, RwLockReadGuard}; use slot_clock::SlotClock; -use ssz::ssz_encode; use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, ExitValidationError, ProposerSlashingValidationError, TransferValidationError, @@ -20,6 +15,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, SlotProcessingError, }; use std::sync::Arc; +use store::{Error as DBError, Store}; use types::*; #[derive(Debug, PartialEq)] @@ -83,40 +79,39 @@ impl BlockProcessingOutcome { } } -pub struct BeaconChain { - pub block_store: Arc>, - pub state_store: Arc>, - pub slot_clock: U, - pub op_pool: OperationPool, - canonical_head: RwLock>, - finalized_head: RwLock>, - pub state: RwLock>, - pub spec: ChainSpec, - pub fork_choice: RwLock, +pub trait BeaconChainTypes { + type Store: store::Store; + type SlotClock: slot_clock::SlotClock; + type ForkChoice: fork_choice::ForkChoice; + type EthSpec: types::EthSpec; } -impl BeaconChain -where - T: ClientDB, - U: SlotClock, - F: ForkChoice, - E: EthSpec, -{ +pub struct BeaconChain { + pub store: Arc, + pub slot_clock: T::SlotClock, + pub op_pool: OperationPool, + canonical_head: RwLock>, + finalized_head: RwLock>, + pub state: RwLock>, + pub spec: ChainSpec, + pub fork_choice: RwLock, +} + +impl BeaconChain { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( - state_store: Arc>, - block_store: Arc>, - slot_clock: U, - mut genesis_state: BeaconState, + store: Arc, + slot_clock: T::SlotClock, + mut genesis_state: BeaconState, genesis_block: BeaconBlock, spec: ChainSpec, - fork_choice: F, + fork_choice: T::ForkChoice, ) -> Result { let state_root = genesis_state.canonical_root(); - state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; + store.put(&state_root, &genesis_state)?; let block_root = genesis_block.block_header().canonical_root(); - block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; + store.put(&block_root, &genesis_block)?; let finalized_head = RwLock::new(CheckPoint::new( genesis_block.clone(), @@ -134,8 +129,7 @@ where genesis_state.build_all_caches(&spec)?; Ok(Self { - block_store, - state_store, + store, slot_clock, op_pool: OperationPool::new(), state: RwLock::new(genesis_state), @@ -230,12 +224,12 @@ where Err(BeaconStateError::SlotOutOfBounds) => { // Read the earliest historic state in the current slot. let earliest_historic_slot = - state.slot - Slot::from(E::SlotsPerHistoricalRoot::to_usize()); + state.slot - Slot::from(T::EthSpec::slots_per_historical_root()); // Load the earlier state from disk. let new_state_root = state.get_state_root(earliest_historic_slot)?; // Break if the DB is unable to load the state. - state = match self.state_store.get_deserialized(&new_state_root) { + state = match self.store.get(&new_state_root) { Ok(Some(state)) => state, _ => break, } @@ -262,7 +256,7 @@ where /// /// May return a database error. pub fn get_block(&self, block_root: &Hash256) -> Result, Error> { - Ok(self.block_store.get_deserialized(block_root)?) + Ok(self.store.get(block_root)?) } /// Update the canonical head to some new values. @@ -270,7 +264,7 @@ where &self, new_beacon_block: BeaconBlock, new_beacon_block_root: Hash256, - new_beacon_state: BeaconState, + new_beacon_state: BeaconState, new_beacon_state_root: Hash256, ) { debug!( @@ -292,7 +286,7 @@ where /// It is important to note that the `beacon_state` returned may not match the present slot. It /// is the state as it was when the head block was received, which could be some slots prior to /// now. - pub fn head(&self) -> RwLockReadGuard> { + pub fn head(&self) -> RwLockReadGuard> { self.canonical_head.read() } @@ -302,7 +296,7 @@ where /// state and calling `catchup_state` as it will not result in an old state being installed and /// then having it iteratively updated -- in such a case it's possible for another thread to /// find the state at an old slot. - pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> { + pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> { let present_slot = match self.slot_clock.present_slot() { Ok(Some(slot)) => slot, _ => return Err(Error::UnableToReadSlot), @@ -357,7 +351,7 @@ where &self, new_beacon_block: BeaconBlock, new_beacon_block_root: Hash256, - new_beacon_state: BeaconState, + new_beacon_state: BeaconState, new_beacon_state_root: Hash256, ) { let mut finalized_head = self.finalized_head.write(); @@ -371,7 +365,7 @@ where /// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen, /// indirectly, by the fork-choice rule). - pub fn finalized_head(&self) -> RwLockReadGuard> { + pub fn finalized_head(&self) -> RwLockReadGuard> { self.finalized_head.read() } @@ -588,7 +582,7 @@ where // Load the blocks parent block from the database, returning invalid if that block is not // found. let parent_block_root = block.previous_block_root; - let parent_block = match self.block_store.get_deserialized(&parent_block_root)? { + let parent_block: BeaconBlock = match self.store.get(&parent_block_root)? { Some(previous_block_root) => previous_block_root, None => { return Ok(BlockProcessingOutcome::InvalidBlock( @@ -601,15 +595,15 @@ where // It is an error because if know the parent block we should also know the parent state. let parent_state_root = parent_block.state_root; let parent_state = self - .state_store - .get_deserialized(&parent_state_root)? + .store + .get(&parent_state_root)? .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?; // TODO: check the block proposer signature BEFORE doing a state transition. This will // significantly lower exposure surface to DoS attacks. // Transition the parent state to the block slot. - let mut state = parent_state; + let mut state: BeaconState = parent_state; for _ in state.slot.as_u64()..block.slot.as_u64() { if let Err(e) = per_slot_processing(&mut state, &self.spec) { return Ok(BlockProcessingOutcome::InvalidBlock( @@ -635,8 +629,8 @@ where } // Store the block and state. - self.block_store.put(&block_root, &ssz_encode(&block)[..])?; - self.state_store.put(&state_root, &ssz_encode(&state)[..])?; + self.store.put(&block_root, &block)?; + self.store.put(&state_root, &state)?; // run the fork_choice add_block logic self.fork_choice @@ -664,7 +658,7 @@ where pub fn produce_block( &self, randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { + ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { debug!("Producing block at slot {}...", self.state.read().slot); let mut state = self.state.read().clone(); @@ -729,15 +723,15 @@ where .find_head(&present_head, &self.spec)?; if new_head != present_head { - let block = self - .block_store - .get_deserialized(&new_head)? + let block: BeaconBlock = self + .store + .get(&new_head)? .ok_or_else(|| Error::MissingBeaconBlock(new_head))?; let block_root = block.canonical_root(); - let state = self - .state_store - .get_deserialized(&block.state_root)? + let state: BeaconState = self + .store + .get(&block.state_root)? .ok_or_else(|| Error::MissingBeaconState(block.state_root))?; let state_root = state.canonical_root(); @@ -752,14 +746,14 @@ where /// Returns `true` if the given block root has not been processed. pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { - Ok(!self.block_store.exists(beacon_block_root)?) + Ok(!self.store.exists::(beacon_block_root)?) } /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. /// /// This could be a very expensive operation and should only be done in testing/analysis /// activities. - pub fn chain_dump(&self) -> Result>, Error> { + pub fn chain_dump(&self) -> Result>, Error> { let mut dump = vec![]; let mut last_slot = CheckPoint { @@ -778,19 +772,14 @@ where break; // Genesis has been reached. } - let beacon_block = self - .block_store - .get_deserialized(&beacon_block_root)? - .ok_or_else(|| { + let beacon_block: BeaconBlock = + self.store.get(&beacon_block_root)?.ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; let beacon_state_root = beacon_block.state_root; - let beacon_state = self - .state_store - .get_deserialized(&beacon_state_root)? - .ok_or_else(|| { - Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) - })?; + let beacon_state = self.store.get(&beacon_state_root)?.ok_or_else(|| { + Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) + })?; let slot = CheckPoint { beacon_block, @@ -811,7 +800,7 @@ where impl From for Error { fn from(e: DBError) -> Error { - Error::DBError(e.message) + Error::DBError(e) } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index a84e4b10e..73884916a 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -20,7 +20,7 @@ pub enum BeaconChainError { UnableToReadSlot, BeaconStateError(BeaconStateError), DBInconsistent(String), - DBError(String), + DBError(store::Error), ForkChoiceError(ForkChoiceError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs deleted file mode 100644 index 83b60a4f7..000000000 --- a/beacon_node/beacon_chain/src/initialise.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Initialisation functions to generate a new BeaconChain. -// Note: A new version of ClientTypes may need to be implemented for the lighthouse -// testnet. These are examples. Also. there is code duplication which can/should be cleaned up. - -use crate::BeaconChain; -use db::stores::{BeaconBlockStore, BeaconStateStore}; -use db::{DiskDB, MemoryDB}; -use fork_choice::BitwiseLMDGhost; -use slot_clock::SystemTimeSlotClock; -use std::path::PathBuf; -use std::sync::Arc; -use tree_hash::TreeHash; -use types::test_utils::TestingBeaconStateBuilder; -use types::{BeaconBlock, ChainSpec, FewValidatorsEthSpec, FoundationEthSpec, Hash256}; - -//TODO: Correct this for prod -//TODO: Account for historical db -pub fn initialise_beacon_chain( - spec: &ChainSpec, - db_name: Option<&PathBuf>, -) -> Arc< - BeaconChain< - DiskDB, - SystemTimeSlotClock, - BitwiseLMDGhost, - FoundationEthSpec, - >, -> { - // set up the db - let db = Arc::new(DiskDB::open( - db_name.expect("Database directory must be included"), - None, - )); - - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); - - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, &spec); - let (genesis_state, _keypairs) = state_builder.build(); - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - // Slot clock - let slot_clock = SystemTimeSlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ) - .expect("Unable to load SystemTimeSlotClock"); - // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - - // Genesis chain - //TODO: Handle error correctly - Arc::new( - BeaconChain::from_genesis( - state_store.clone(), - block_store.clone(), - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - fork_choice, - ) - .expect("Terminate if beacon chain generation fails"), - ) -} - -/// Initialisation of a test beacon chain, uses an in memory db with fixed genesis time. -pub fn initialise_test_beacon_chain( - spec: &ChainSpec, - _db_name: Option<&PathBuf>, -) -> Arc< - BeaconChain< - MemoryDB, - SystemTimeSlotClock, - BitwiseLMDGhost, - FewValidatorsEthSpec, - >, -> { - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); - - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, spec); - let (genesis_state, _keypairs) = state_builder.build(); - - let mut genesis_block = BeaconBlock::empty(spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - // Slot clock - let slot_clock = SystemTimeSlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ) - .expect("Unable to load SystemTimeSlotClock"); - // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - - // Genesis chain - //TODO: Handle error correctly - Arc::new( - BeaconChain::from_genesis( - state_store.clone(), - block_store.clone(), - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - fork_choice, - ) - .expect("Terminate if beacon chain generation fails"), - ) -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d8d85a8a6..9f3058d0b 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,13 +1,12 @@ mod beacon_chain; mod checkpoint; mod errors; -pub mod initialise; -pub mod test_utils; -pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock}; +pub use self::beacon_chain::{ + BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock, ValidBlock, +}; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; -pub use db; pub use fork_choice; pub use parking_lot; pub use slot_clock; @@ -15,4 +14,5 @@ pub use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; +pub use store; pub use types; diff --git a/beacon_node/beacon_chain/src/test_utils/mod.rs b/beacon_node/beacon_chain/src/test_utils/mod.rs deleted file mode 100644 index ad251a3c9..000000000 --- a/beacon_node/beacon_chain/src/test_utils/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod testing_beacon_chain_builder; - -pub use testing_beacon_chain_builder::TestingBeaconChainBuilder; diff --git a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs deleted file mode 100644 index f7ff3cdae..000000000 --- a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs +++ /dev/null @@ -1,51 +0,0 @@ -pub use crate::{BeaconChain, BeaconChainError, CheckPoint}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - MemoryDB, -}; -use fork_choice::BitwiseLMDGhost; -use slot_clock::TestingSlotClock; -use std::sync::Arc; -use tree_hash::TreeHash; -use types::*; -use types::{test_utils::TestingBeaconStateBuilder, EthSpec, FewValidatorsEthSpec}; - -type TestingBeaconChain = - BeaconChain, E>; - -pub struct TestingBeaconChainBuilder { - state_builder: TestingBeaconStateBuilder, -} - -impl TestingBeaconChainBuilder { - pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain { - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); - let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); - - let (genesis_state, _keypairs) = self.state_builder.build(); - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - // Create the Beacon Chain - BeaconChain::from_genesis( - state_store.clone(), - block_store.clone(), - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - fork_choice, - ) - .unwrap() - } -} - -impl From> for TestingBeaconChainBuilder { - fn from(state_builder: TestingBeaconStateBuilder) -> TestingBeaconChainBuilder { - TestingBeaconChainBuilder { state_builder } - } -} diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8956dbb07..387bf1675 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,10 +7,12 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } -db = { path = "../db" } +store = { path = "../store" } +http_server = { path = "../http_server" } rpc = { path = "../rpc" } fork_choice = { path = "../../eth2/fork_choice" } types = { path = "../../eth2/types" } +tree_hash = { path = "../../eth2/utils/tree_hash" } slot_clock = { path = "../../eth2/utils/slot_clock" } error-chain = "0.12.0" slog = "^2.2.3" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs new file mode 100644 index 000000000..b8236c679 --- /dev/null +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -0,0 +1,109 @@ +use crate::ClientConfig; +use beacon_chain::{ + fork_choice::BitwiseLMDGhost, + slot_clock::SystemTimeSlotClock, + store::{DiskStore, MemoryStore, Store}, + BeaconChain, BeaconChainTypes, +}; +use std::sync::Arc; +use tree_hash::TreeHash; +use types::{ + test_utils::TestingBeaconStateBuilder, BeaconBlock, EthSpec, FewValidatorsEthSpec, Hash256, +}; + +/// Provides a new, initialized `BeaconChain` +pub trait InitialiseBeaconChain { + fn initialise_beacon_chain(config: &ClientConfig) -> BeaconChain; +} + +/// A testnet-suitable BeaconChainType, using `MemoryStore`. +#[derive(Clone)] +pub struct TestnetMemoryBeaconChainTypes; + +impl BeaconChainTypes for TestnetMemoryBeaconChainTypes { + type Store = MemoryStore; + type SlotClock = SystemTimeSlotClock; + type ForkChoice = BitwiseLMDGhost; + type EthSpec = FewValidatorsEthSpec; +} + +impl InitialiseBeaconChain for TestnetMemoryBeaconChainTypes +where + T: BeaconChainTypes< + Store = MemoryStore, + SlotClock = SystemTimeSlotClock, + ForkChoice = BitwiseLMDGhost, + >, +{ + fn initialise_beacon_chain(_config: &ClientConfig) -> BeaconChain { + initialize_chain(MemoryStore::open()) + } +} + +/// A testnet-suitable BeaconChainType, using `DiskStore`. +#[derive(Clone)] +pub struct TestnetDiskBeaconChainTypes; + +impl BeaconChainTypes for TestnetDiskBeaconChainTypes { + type Store = DiskStore; + type SlotClock = SystemTimeSlotClock; + type ForkChoice = BitwiseLMDGhost; + type EthSpec = FewValidatorsEthSpec; +} + +impl InitialiseBeaconChain for TestnetDiskBeaconChainTypes +where + T: BeaconChainTypes< + Store = DiskStore, + SlotClock = SystemTimeSlotClock, + ForkChoice = BitwiseLMDGhost, + >, +{ + fn initialise_beacon_chain(config: &ClientConfig) -> BeaconChain { + let store = DiskStore::open(&config.db_name).expect("Unable to open DB."); + + initialize_chain(store) + } +} + +/// Produces a `BeaconChain` given some pre-initialized `Store`. +fn initialize_chain(store: U) -> BeaconChain +where + T: BeaconChainTypes< + Store = U, + SlotClock = SystemTimeSlotClock, + ForkChoice = BitwiseLMDGhost, + >, +{ + let spec = T::EthSpec::spec(); + + let store = Arc::new(store); + + let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, &spec); + let (genesis_state, _keypairs) = state_builder.build(); + + let mut genesis_block = BeaconBlock::empty(&spec); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); + + // Slot clock + let slot_clock = SystemTimeSlotClock::new( + spec.genesis_slot, + genesis_state.genesis_time, + spec.seconds_per_slot, + ) + .expect("Unable to load SystemTimeSlotClock"); + // Choose the fork choice + let fork_choice = BitwiseLMDGhost::new(store.clone()); + + // Genesis chain + //TODO: Handle error correctly + BeaconChain::from_genesis( + store, + slot_clock, + genesis_state, + genesis_block, + spec.clone(), + fork_choice, + ) + .expect("Terminate if beacon chain generation fails") +} diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index d84b63f4f..243848e9f 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -1,6 +1,6 @@ use clap::ArgMatches; -use db::DBType; use fork_choice::ForkChoiceAlgorithm; +use http_server::HttpServerConfig; use network::NetworkConfig; use slog::error; use std::fs; @@ -12,6 +12,12 @@ use types::multiaddr::ToMultiaddr; use types::Multiaddr; use types::{ChainSpec, EthSpec, LighthouseTestnetEthSpec}; +#[derive(Debug, Clone)] +pub enum DBType { + Memory, + Disk, +} + /// Stores the client configuration for this Lighthouse instance. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -22,7 +28,7 @@ pub struct ClientConfig { pub db_type: DBType, pub db_name: PathBuf, pub rpc_conf: rpc::RPCConfig, - //pub ipc_conf: + pub http_conf: HttpServerConfig, //pub ipc_conf: } impl Default for ClientConfig { @@ -48,8 +54,9 @@ impl Default for ClientConfig { // default to memory db for now db_type: DBType::Memory, // default db name for disk-based dbs - db_name: data_dir.join("chain.db"), + db_name: data_dir.join("chain_db"), rpc_conf: rpc::RPCConfig::default(), + http_conf: HttpServerConfig::default(), } } } @@ -90,13 +97,15 @@ impl ClientConfig { } // Custom bootnodes - // TODO: Handle list of addresses if let Some(boot_addresses_str) = args.value_of("boot-nodes") { - if let Ok(boot_address) = boot_addresses_str.parse::() { - config.net_conf.boot_nodes.append(&mut vec![boot_address]); - } else { - error!(log, "Invalid Bootnode multiaddress"; "Multiaddr" => boot_addresses_str); - return Err("Invalid IP Address"); + let mut boot_addresses_split = boot_addresses_str.split(","); + for boot_address in boot_addresses_split { + if let Ok(boot_address) = boot_address.parse::() { + config.net_conf.boot_nodes.append(&mut vec![boot_address]); + } else { + error!(log, "Invalid Bootnode multiaddress"; "Multiaddr" => boot_addresses_str); + return Err("Invalid IP Address"); + } } } @@ -131,6 +140,12 @@ impl ClientConfig { } } + match args.value_of("db") { + Some("disk") => config.db_type = DBType::Disk, + Some("memory") => config.db_type = DBType::Memory, + _ => unreachable!(), // clap prevents this. + }; + Ok(config) } } diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs deleted file mode 100644 index 8c9352d7c..000000000 --- a/beacon_node/client/src/client_types.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::{ArcBeaconChain, ClientConfig}; -use beacon_chain::{ - db::{ClientDB, DiskDB, MemoryDB}, - fork_choice::BitwiseLMDGhost, - initialise, - slot_clock::{SlotClock, SystemTimeSlotClock}, -}; -use fork_choice::ForkChoice; -use types::{EthSpec, FewValidatorsEthSpec, FoundationEthSpec}; - -pub trait ClientTypes { - type DB: ClientDB + 'static; - type SlotClock: SlotClock + 'static; - type ForkChoice: ForkChoice + 'static; - type EthSpec: EthSpec + 'static; - - fn initialise_beacon_chain( - config: &ClientConfig, - ) -> ArcBeaconChain; -} - -pub struct StandardClientType; - -impl ClientTypes for StandardClientType { - type DB = DiskDB; - type SlotClock = SystemTimeSlotClock; - type ForkChoice = BitwiseLMDGhost; - type EthSpec = FoundationEthSpec; - - fn initialise_beacon_chain( - config: &ClientConfig, - ) -> ArcBeaconChain { - initialise::initialise_beacon_chain(&config.spec, Some(&config.db_name)) - } -} - -pub struct TestingClientType; - -impl ClientTypes for TestingClientType { - type DB = MemoryDB; - type SlotClock = SystemTimeSlotClock; - type ForkChoice = BitwiseLMDGhost; - type EthSpec = FewValidatorsEthSpec; - - fn initialise_beacon_chain( - config: &ClientConfig, - ) -> ArcBeaconChain { - initialise::initialise_test_beacon_chain(&config.spec, None) - } -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 5d7c221ef..40be9b7b8 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,16 +1,13 @@ extern crate slog; +mod beacon_chain_types; mod client_config; -pub mod client_types; pub mod error; pub mod notifier; use beacon_chain::BeaconChain; -pub use client_config::ClientConfig; -pub use client_types::ClientTypes; -use db::ClientDB; +use beacon_chain_types::InitialiseBeaconChain; use exit_future::Signal; -use fork_choice::ForkChoice; use futures::{future::Future, Stream}; use network::Service as NetworkService; use slog::{error, info, o}; @@ -20,21 +17,24 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; -use types::EthSpec; -type ArcBeaconChain = Arc>; +pub use beacon_chain::BeaconChainTypes; +pub use beacon_chain_types::{TestnetDiskBeaconChainTypes, TestnetMemoryBeaconChainTypes}; +pub use client_config::{ClientConfig, DBType}; /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. -pub struct Client { +pub struct Client { /// Configuration for the lighthouse client. _config: ClientConfig, /// The beacon chain for the running client. - _beacon_chain: ArcBeaconChain, + _beacon_chain: Arc>, /// Reference to the network service. - pub network: Arc>, + pub network: Arc>, /// Signal to terminate the RPC server. pub rpc_exit_signal: Option, + /// Signal to terminate the HTTP server. + pub http_exit_signal: Option, /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// The clients logger. @@ -43,7 +43,10 @@ pub struct Client { phantom: PhantomData, } -impl Client { +impl Client +where + T: BeaconChainTypes + InitialiseBeaconChain + Clone + 'static, +{ /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( config: ClientConfig, @@ -51,7 +54,7 @@ impl Client { executor: &TaskExecutor, ) -> error::Result { // generate a beacon chain - let beacon_chain = TClientType::initialise_beacon_chain(&config); + let beacon_chain = Arc::new(T::initialise_beacon_chain(&config)); if beacon_chain.read_slot_clock().is_none() { panic!("Cannot start client before genesis!") @@ -98,7 +101,7 @@ impl Client { Some(rpc::start_server( &config.rpc_conf, executor, - network_send, + network_send.clone(), beacon_chain.clone(), &log, )) @@ -106,6 +109,17 @@ impl Client { None }; + // Start the `http_server` service. + // + // Note: presently we are ignoring the config and _always_ starting a HTTP server. + let http_exit_signal = Some(http_server::start_service( + &config.http_conf, + executor, + network_send, + beacon_chain.clone(), + &log, + )); + let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot @@ -135,6 +149,7 @@ impl Client { Ok(Client { _config: config, _beacon_chain: beacon_chain, + http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), log, @@ -144,13 +159,7 @@ impl Client { } } -fn do_state_catchup(chain: &Arc>, log: &slog::Logger) -where - T: ClientDB, - U: SlotClock, - F: ForkChoice, - E: EthSpec, -{ +fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { if let Some(genesis_height) = chain.slots_since_genesis() { let result = chain.catchup_state(); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index aa1e43c3c..977342b1a 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,5 +1,5 @@ use crate::Client; -use crate::ClientTypes; +use beacon_chain::BeaconChainTypes; use exit_future::Exit; use futures::{Future, Stream}; use slog::{debug, o}; @@ -10,7 +10,11 @@ use tokio::timer::Interval; /// Thread that monitors the client and reports useful statistics to the user. -pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) { +pub fn run( + client: &Client, + executor: TaskExecutor, + exit: Exit, +) { // notification heartbeat let interval = Interval::new(Instant::now(), Duration::from_secs(5)); diff --git a/beacon_node/db/Cargo.toml b/beacon_node/db/Cargo.toml deleted file mode 100644 index 122aaa34d..000000000 --- a/beacon_node/db/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "db" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -blake2-rfc = "0.2.18" -bls = { path = "../../eth2/utils/bls" } -bytes = "0.4.10" -rocksdb = "0.10.1" -ssz = { path = "../../eth2/utils/ssz" } -types = { path = "../../eth2/types" } diff --git a/beacon_node/db/src/lib.rs b/beacon_node/db/src/lib.rs deleted file mode 100644 index 5e710ae9a..000000000 --- a/beacon_node/db/src/lib.rs +++ /dev/null @@ -1,21 +0,0 @@ -extern crate blake2_rfc as blake2; -extern crate bls; -extern crate rocksdb; - -mod disk_db; -mod memory_db; -pub mod stores; -mod traits; - -use self::stores::COLUMNS; - -pub use self::disk_db::DiskDB; -pub use self::memory_db::MemoryDB; -pub use self::traits::{ClientDB, DBError, DBValue}; - -/// Currently available database options -#[derive(Debug, Clone)] -pub enum DBType { - Memory, - RocksDB, -} diff --git a/beacon_node/db/src/memory_db.rs b/beacon_node/db/src/memory_db.rs deleted file mode 100644 index 008e5912f..000000000 --- a/beacon_node/db/src/memory_db.rs +++ /dev/null @@ -1,236 +0,0 @@ -use super::blake2::blake2b::blake2b; -use super::COLUMNS; -use super::{ClientDB, DBError, DBValue}; -use std::collections::{HashMap, HashSet}; -use std::sync::RwLock; - -type DBHashMap = HashMap, Vec>; -type ColumnHashSet = HashSet; - -/// An in-memory database implementing the ClientDB trait. -/// -/// It is not particularily optimized, it exists for ease and speed of testing. It's not expected -/// this DB would be used outside of tests. -pub struct MemoryDB { - db: RwLock, - known_columns: RwLock, -} - -impl MemoryDB { - /// Open the in-memory database. - /// - /// All columns must be supplied initially, you will get an error if you try to access a column - /// that was not declared here. This condition is enforced artificially to simulate RocksDB. - pub fn open() -> Self { - let db: DBHashMap = HashMap::new(); - let mut known_columns: ColumnHashSet = HashSet::new(); - for col in &COLUMNS { - known_columns.insert(col.to_string()); - } - Self { - db: RwLock::new(db), - known_columns: RwLock::new(known_columns), - } - } - - /// Hashes a key and a column name in order to get a unique key for the supplied column. - fn get_key_for_col(col: &str, key: &[u8]) -> Vec { - blake2b(32, col.as_bytes(), key).as_bytes().to_vec() - } -} - -impl ClientDB for MemoryDB { - /// Get the value of some key from the database. Returns `None` if the key does not exist. - fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { - // Panic if the DB locks are poisoned. - let db = self.db.read().unwrap(); - let known_columns = self.known_columns.read().unwrap(); - - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - Ok(db.get(&column_key).and_then(|val| Some(val.clone()))) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } - } - - /// Puts a key in the database. - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { - // Panic if the DB locks are poisoned. - let mut db = self.db.write().unwrap(); - let known_columns = self.known_columns.read().unwrap(); - - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - db.insert(column_key, val.to_vec()); - Ok(()) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } - } - - /// Return true if some key exists in some column. - fn exists(&self, col: &str, key: &[u8]) -> Result { - // Panic if the DB locks are poisoned. - let db = self.db.read().unwrap(); - let known_columns = self.known_columns.read().unwrap(); - - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - Ok(db.contains_key(&column_key)) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } - } - - /// Delete some key from the database. - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { - // Panic if the DB locks are poisoned. - let mut db = self.db.write().unwrap(); - let known_columns = self.known_columns.read().unwrap(); - - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - db.remove(&column_key); - Ok(()) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } - } -} - -#[cfg(test)] -mod tests { - use super::super::stores::{BLOCKS_DB_COLUMN, VALIDATOR_DB_COLUMN}; - use super::super::ClientDB; - use super::*; - use std::sync::Arc; - use std::thread; - - #[test] - fn test_memorydb_can_delete() { - let col_a: &str = BLOCKS_DB_COLUMN; - - let db = MemoryDB::open(); - - db.put(col_a, "dogs".as_bytes(), "lol".as_bytes()).unwrap(); - - assert_eq!( - db.get(col_a, "dogs".as_bytes()).unwrap().unwrap(), - "lol".as_bytes() - ); - - db.delete(col_a, "dogs".as_bytes()).unwrap(); - - assert_eq!(db.get(col_a, "dogs".as_bytes()).unwrap(), None); - } - - #[test] - fn test_memorydb_column_access() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_b: &str = VALIDATOR_DB_COLUMN; - - let db = MemoryDB::open(); - - /* - * Testing that if we write to the same key in different columns that - * there is not an overlap. - */ - db.put(col_a, "same".as_bytes(), "cat".as_bytes()).unwrap(); - db.put(col_b, "same".as_bytes(), "dog".as_bytes()).unwrap(); - - assert_eq!( - db.get(col_a, "same".as_bytes()).unwrap().unwrap(), - "cat".as_bytes() - ); - assert_eq!( - db.get(col_b, "same".as_bytes()).unwrap().unwrap(), - "dog".as_bytes() - ); - } - - #[test] - fn test_memorydb_unknown_column_access() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_x: &str = "ColumnX"; - - let db = MemoryDB::open(); - - /* - * Test that we get errors when using undeclared columns - */ - assert!(db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).is_ok()); - assert!(db.put(col_x, "cats".as_bytes(), "lol".as_bytes()).is_err()); - - assert!(db.get(col_a, "cats".as_bytes()).is_ok()); - assert!(db.get(col_x, "cats".as_bytes()).is_err()); - } - - #[test] - fn test_memorydb_exists() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_b: &str = VALIDATOR_DB_COLUMN; - - let db = MemoryDB::open(); - - /* - * Testing that if we write to the same key in different columns that - * there is not an overlap. - */ - db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).unwrap(); - - assert_eq!(true, db.exists(col_a, "cats".as_bytes()).unwrap()); - assert_eq!(false, db.exists(col_b, "cats".as_bytes()).unwrap()); - - assert_eq!(false, db.exists(col_a, "dogs".as_bytes()).unwrap()); - assert_eq!(false, db.exists(col_b, "dogs".as_bytes()).unwrap()); - } - - #[test] - fn test_memorydb_threading() { - let col_name: &str = BLOCKS_DB_COLUMN; - - let db = Arc::new(MemoryDB::open()); - - let thread_count = 10; - let write_count = 10; - - // We're execting the product of these numbers to fit in one byte. - assert!(thread_count * write_count <= 255); - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let db = db.clone(); - let col = col_name.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = (t * w) as u8; - let val = 42; - db.put(&col, &vec![key], &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = (t * w) as u8; - let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } - } -} diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs deleted file mode 100644 index 868caafe2..000000000 --- a/beacon_node/db/src/stores/beacon_block_store.rs +++ /dev/null @@ -1,246 +0,0 @@ -use super::BLOCKS_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use ssz::Decode; -use std::sync::Arc; -use types::{BeaconBlock, Hash256, Slot}; - -#[derive(Clone, Debug, PartialEq)] -pub enum BeaconBlockAtSlotError { - UnknownBeaconBlock(Hash256), - InvalidBeaconBlock(Hash256), - DBError(String), -} - -pub struct BeaconBlockStore -where - T: ClientDB, -{ - db: Arc, -} - -// Implements `put`, `get`, `exists` and `delete` for the store. -impl_crud_for_store!(BeaconBlockStore, DB_COLUMN); - -impl BeaconBlockStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let block = BeaconBlock::from_ssz_bytes(&ssz).map_err(|_| DBError { - message: "Bad BeaconBlock SSZ.".to_string(), - })?; - Ok(Some(block)) - } - } - } - - /// Retrieve the block at a slot given a "head_hash" and a slot. - /// - /// A "head_hash" must be a block hash with a slot number greater than or equal to the desired - /// slot. - /// - /// This function will read each block down the chain until it finds a block with the given - /// slot number. If the slot is skipped, the function will return None. - /// - /// If a block is found, a tuple of (block_hash, serialized_block) is returned. - /// - /// Note: this function uses a loop instead of recursion as the compiler is over-strict when it - /// comes to recursion and the `impl Trait` pattern. See: - /// https://stackoverflow.com/questions/54032940/using-impl-trait-in-a-recursive-function - pub fn block_at_slot( - &self, - head_hash: &Hash256, - slot: Slot, - ) -> Result, BeaconBlockAtSlotError> { - let mut current_hash = *head_hash; - - loop { - if let Some(block) = self.get_deserialized(¤t_hash)? { - if block.slot == slot { - break Ok(Some((current_hash, block))); - } else if block.slot < slot { - break Ok(None); - } else { - current_hash = block.previous_block_root; - } - } else { - break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); - } - } - } -} - -impl From for BeaconBlockAtSlotError { - fn from(e: DBError) -> Self { - BeaconBlockAtSlotError::DBError(e.message) - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - - use std::sync::Arc; - use std::thread; - - use ssz::ssz_encode; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::BeaconBlock; - use types::Hash256; - - test_crud_for_store!(BeaconBlockStore, DB_COLUMN); - - #[test] - fn head_hash_slot_too_low() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - let mut rng = XorShiftRng::from_seed([42; 16]); - - let mut block = BeaconBlock::random_for_test(&mut rng); - block.slot = Slot::from(10_u64); - - let block_root = block.canonical_root(); - bs.put(&block_root, &ssz_encode(&block)).unwrap(); - - let result = bs.block_at_slot(&block_root, Slot::from(11_u64)).unwrap(); - assert_eq!(result, None); - } - - #[test] - fn test_invalid_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconBlockStore::new(db.clone()); - - let ssz = "definitly not a valid block".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!( - store.block_at_slot(hash, Slot::from(42_u64)), - Err(BeaconBlockAtSlotError::DBError( - "Bad BeaconBlock SSZ.".into() - )) - ); - } - - #[test] - fn test_unknown_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconBlockStore::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!( - store.block_at_slot(other_hash, Slot::from(42_u64)), - Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash)) - ); - } - - #[test] - fn test_block_store_on_memory_db() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - - let thread_count = 10; - let write_count = 10; - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let bs = bs.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = t * w; - let val = 42; - bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = t * w; - assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap()); - let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } - } - - #[test] - #[ignore] - fn test_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - let mut rng = XorShiftRng::from_seed([42; 16]); - - // Specify test block parameters. - let hashes = [ - Hash256::from([0; 32]), - Hash256::from([1; 32]), - Hash256::from([2; 32]), - Hash256::from([3; 32]), - Hash256::from([4; 32]), - ]; - let parent_hashes = [ - Hash256::from([255; 32]), // Genesis block. - Hash256::from([0; 32]), - Hash256::from([1; 32]), - Hash256::from([2; 32]), - Hash256::from([3; 32]), - ]; - let unknown_hash = Hash256::from([101; 32]); // different from all above - let slots: Vec = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect(); - - // Generate a vec of random blocks and store them in the DB. - let block_count = 5; - let mut blocks: Vec = Vec::with_capacity(5); - for i in 0..block_count { - let mut block = BeaconBlock::random_for_test(&mut rng); - - block.previous_block_root = parent_hashes[i]; - block.slot = slots[i]; - - let ssz = ssz_encode(&block); - db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap(); - - blocks.push(block); - } - - // Test that certain slots can be reached from certain hashes. - let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)]; - for (hashes_index, slot_index) in test_cases { - let (matched_block_hash, block) = bs - .block_at_slot(&hashes[hashes_index], slots[slot_index]) - .unwrap() - .unwrap(); - assert_eq!(matched_block_hash, hashes[slot_index]); - assert_eq!(block.slot, slots[slot_index]); - } - - let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap(); - assert_eq!(ssz, None); - - let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap(); - assert_eq!(ssz, None); - - let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2)); - assert_eq!( - ssz, - Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash)) - ); - } -} diff --git a/beacon_node/db/src/stores/beacon_state_store.rs b/beacon_node/db/src/stores/beacon_state_store.rs deleted file mode 100644 index 044290592..000000000 --- a/beacon_node/db/src/stores/beacon_state_store.rs +++ /dev/null @@ -1,65 +0,0 @@ -use super::STATES_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use ssz::Decode; -use std::sync::Arc; -use types::{BeaconState, EthSpec, Hash256}; - -pub struct BeaconStateStore -where - T: ClientDB, -{ - db: Arc, -} - -// Implements `put`, `get`, `exists` and `delete` for the store. -impl_crud_for_store!(BeaconStateStore, DB_COLUMN); - -impl BeaconStateStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn get_deserialized( - &self, - hash: &Hash256, - ) -> Result>, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let state = BeaconState::from_ssz_bytes(&ssz).map_err(|_| DBError { - message: "Bad State SSZ.".to_string(), - })?; - Ok(Some(state)) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - - use ssz::ssz_encode; - use std::sync::Arc; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::{FoundationBeaconState, Hash256}; - - test_crud_for_store!(BeaconStateStore, DB_COLUMN); - - #[test] - fn test_reader() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconStateStore::new(db.clone()); - - let mut rng = XorShiftRng::from_seed([42; 16]); - let state: FoundationBeaconState = BeaconState::random_for_test(&mut rng); - let state_root = state.canonical_root(); - - store.put(&state_root, &ssz_encode(&state)).unwrap(); - - let decoded = store.get_deserialized(&state_root).unwrap().unwrap(); - - assert_eq!(state, decoded); - } -} diff --git a/beacon_node/db/src/stores/macros.rs b/beacon_node/db/src/stores/macros.rs deleted file mode 100644 index 6c53e40ee..000000000 --- a/beacon_node/db/src/stores/macros.rs +++ /dev/null @@ -1,103 +0,0 @@ -macro_rules! impl_crud_for_store { - ($store: ident, $db_column: expr) => { - impl $store { - pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> { - self.db.put($db_column, hash.as_bytes(), ssz) - } - - pub fn get(&self, hash: &Hash256) -> Result>, DBError> { - self.db.get($db_column, hash.as_bytes()) - } - - pub fn exists(&self, hash: &Hash256) -> Result { - self.db.exists($db_column, hash.as_bytes()) - } - - pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> { - self.db.delete($db_column, hash.as_bytes()) - } - } - }; -} - -#[cfg(test)] -macro_rules! test_crud_for_store { - ($store: ident, $db_column: expr) => { - #[test] - fn test_put() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - store.put(hash, ssz).unwrap(); - assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz); - } - - #[test] - fn test_get() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!(store.get(hash).unwrap().unwrap(), ssz); - } - - #[test] - fn test_get_unknown() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap(); - assert_eq!(store.get(hash).unwrap(), None); - } - - #[test] - fn test_exists() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(store.exists(hash).unwrap()); - } - - #[test] - fn test_block_does_not_exist() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(!store.exists(other_hash).unwrap()); - } - - #[test] - fn test_delete() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); - - store.delete(hash).unwrap(); - assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); - } - }; -} diff --git a/beacon_node/db/src/stores/mod.rs b/beacon_node/db/src/stores/mod.rs deleted file mode 100644 index 44de7eed1..000000000 --- a/beacon_node/db/src/stores/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::{ClientDB, DBError}; - -#[macro_use] -mod macros; -mod beacon_block_store; -mod beacon_state_store; -mod pow_chain_store; -mod validator_store; - -pub use self::beacon_block_store::{BeaconBlockAtSlotError, BeaconBlockStore}; -pub use self::beacon_state_store::BeaconStateStore; -pub use self::pow_chain_store::PoWChainStore; -pub use self::validator_store::{ValidatorStore, ValidatorStoreError}; - -pub const BLOCKS_DB_COLUMN: &str = "blocks"; -pub const STATES_DB_COLUMN: &str = "states"; -pub const POW_CHAIN_DB_COLUMN: &str = "powchain"; -pub const VALIDATOR_DB_COLUMN: &str = "validator"; - -pub const COLUMNS: [&str; 4] = [ - BLOCKS_DB_COLUMN, - STATES_DB_COLUMN, - POW_CHAIN_DB_COLUMN, - VALIDATOR_DB_COLUMN, -]; diff --git a/beacon_node/db/src/stores/pow_chain_store.rs b/beacon_node/db/src/stores/pow_chain_store.rs deleted file mode 100644 index 5c8b97907..000000000 --- a/beacon_node/db/src/stores/pow_chain_store.rs +++ /dev/null @@ -1,68 +0,0 @@ -use super::POW_CHAIN_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use std::sync::Arc; - -pub struct PoWChainStore -where - T: ClientDB, -{ - db: Arc, -} - -impl PoWChainStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> { - self.db.put(DB_COLUMN, hash, &[0]) - } - - pub fn block_hash_exists(&self, hash: &[u8]) -> Result { - self.db.exists(DB_COLUMN, hash) - } -} - -#[cfg(test)] -mod tests { - extern crate types; - - use super::super::super::MemoryDB; - use super::*; - - use self::types::Hash256; - - #[test] - fn test_put_block_hash() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - store.put_block_hash(hash).unwrap(); - - assert!(db.exists(DB_COLUMN, hash).unwrap()); - } - - #[test] - fn test_block_hash_exists() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - db.put(DB_COLUMN, hash, &[0]).unwrap(); - - assert!(store.block_hash_exists(hash).unwrap()); - } - - #[test] - fn test_block_hash_does_not_exist() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec(); - db.put(DB_COLUMN, hash, &[0]).unwrap(); - - assert!(!store.block_hash_exists(other_hash).unwrap()); - } -} diff --git a/beacon_node/db/src/stores/validator_store.rs b/beacon_node/db/src/stores/validator_store.rs deleted file mode 100644 index f653c9f71..000000000 --- a/beacon_node/db/src/stores/validator_store.rs +++ /dev/null @@ -1,215 +0,0 @@ -extern crate bytes; - -use self::bytes::{BufMut, BytesMut}; -use super::VALIDATOR_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use bls::PublicKey; -use ssz::{Decode, Encode}; -use std::sync::Arc; - -#[derive(Debug, PartialEq)] -pub enum ValidatorStoreError { - DBError(String), - DecodeError, -} - -impl From for ValidatorStoreError { - fn from(error: DBError) -> Self { - ValidatorStoreError::DBError(error.message) - } -} - -#[derive(Debug, PartialEq)] -enum KeyPrefixes { - PublicKey, -} - -pub struct ValidatorStore -where - T: ClientDB, -{ - db: Arc, -} - -impl ValidatorStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - fn prefix_bytes(&self, key_prefix: &KeyPrefixes) -> Vec { - match key_prefix { - KeyPrefixes::PublicKey => b"pubkey".to_vec(), - } - } - - fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) -> Vec { - let mut buf = BytesMut::with_capacity(6 + 8); - buf.put(self.prefix_bytes(key_prefix)); - buf.put_u64_be(index as u64); - buf.take().to_vec() - } - - pub fn put_public_key_by_index( - &self, - index: usize, - public_key: &PublicKey, - ) -> Result<(), ValidatorStoreError> { - let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); - let val = public_key.as_ssz_bytes(); - self.db - .put(DB_COLUMN, &key[..], &val[..]) - .map_err(ValidatorStoreError::from) - } - - pub fn get_public_key_by_index( - &self, - index: usize, - ) -> Result, ValidatorStoreError> { - let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); - let val = self.db.get(DB_COLUMN, &key[..])?; - match val { - None => Ok(None), - Some(val) => match PublicKey::from_ssz_bytes(&val) { - Ok(key) => Ok(Some(key)), - Err(_) => Err(ValidatorStoreError::DecodeError), - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - use bls::Keypair; - - #[test] - fn test_prefix_bytes() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - assert_eq!( - store.prefix_bytes(&KeyPrefixes::PublicKey), - b"pubkey".to_vec() - ); - } - - #[test] - fn test_get_db_key_for_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let mut buf = BytesMut::with_capacity(6 + 8); - buf.put(b"pubkey".to_vec()); - buf.put_u64_be(42); - assert_eq!( - store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42), - buf.take().to_vec() - ) - } - - #[test] - fn test_put_public_key_by_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let index = 3; - let public_key = Keypair::random().pk; - - store.put_public_key_by_index(index, &public_key).unwrap(); - let public_key_at_index = db - .get( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], - ) - .unwrap() - .unwrap(); - - assert_eq!(public_key_at_index, public_key.as_ssz_bytes()); - } - - #[test] - fn test_get_public_key_by_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let index = 4; - let public_key = Keypair::random().pk; - - db.put( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], - &public_key.as_ssz_bytes(), - ) - .unwrap(); - - let public_key_at_index = store.get_public_key_by_index(index).unwrap().unwrap(); - assert_eq!(public_key_at_index, public_key); - } - - #[test] - fn test_get_public_key_by_unknown_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let public_key = Keypair::random().pk; - - db.put( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, 3)[..], - &public_key.as_ssz_bytes(), - ) - .unwrap(); - - let public_key_at_index = store.get_public_key_by_index(4).unwrap(); - assert_eq!(public_key_at_index, None); - } - - #[test] - fn test_get_invalid_public_key() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42); - db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap(); - - assert_eq!( - store.get_public_key_by_index(42), - Err(ValidatorStoreError::DecodeError) - ); - } - - #[test] - fn test_validator_store_put_get() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db); - - let keys = vec![ - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - ]; - - for i in 0..keys.len() { - store.put_public_key_by_index(i, &keys[i].pk).unwrap(); - } - - /* - * Check all keys are retrieved correctly. - */ - for i in 0..keys.len() { - let retrieved = store.get_public_key_by_index(i).unwrap().unwrap(); - assert_eq!(retrieved, keys[i].pk); - } - - /* - * Check that an index that wasn't stored returns None. - */ - assert!(store - .get_public_key_by_index(keys.len() + 1) - .unwrap() - .is_none()); - } -} diff --git a/beacon_node/db/src/traits.rs b/beacon_node/db/src/traits.rs deleted file mode 100644 index 41be3e23d..000000000 --- a/beacon_node/db/src/traits.rs +++ /dev/null @@ -1,28 +0,0 @@ -pub type DBValue = Vec; - -#[derive(Debug)] -pub struct DBError { - pub message: String, -} - -impl DBError { - pub fn new(message: String) -> Self { - Self { message } - } -} - -/// A generic database to be used by the "client' (i.e., -/// the lighthouse blockchain client). -/// -/// The purpose of having this generic trait is to allow the -/// program to use a persistent on-disk database during production, -/// but use a transient database during tests. -pub trait ClientDB: Sync + Send { - fn get(&self, col: &str, key: &[u8]) -> Result, DBError>; - - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError>; - - fn exists(&self, col: &str, key: &[u8]) -> Result; - - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError>; -} diff --git a/beacon_node/http_server/Cargo.toml b/beacon_node/http_server/Cargo.toml new file mode 100644 index 000000000..fb8bf9f4b --- /dev/null +++ b/beacon_node/http_server/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "http_server" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +bls = { path = "../../eth2/utils/bls" } +beacon_chain = { path = "../beacon_chain" } +iron = "^0.6" +router = "^0.6" +network = { path = "../network" } +eth2-libp2p = { path = "../eth2-libp2p" } +version = { path = "../version" } +types = { path = "../../eth2/types" } +ssz = { path = "../../eth2/utils/ssz" } +slot_clock = { path = "../../eth2/utils/slot_clock" } +protos = { path = "../../protos" } +fork_choice = { path = "../../eth2/fork_choice" } +grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } +persistent = "^0.4" +protobuf = "2.0.2" +prometheus = "^0.6" +clap = "2.32.0" +store = { path = "../store" } +dirs = "1.0.3" +futures = "0.1.23" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +slog = "^2.2.3" +slog-term = "^2.4.0" +slog-async = "^2.3.0" +tokio = "0.1.17" +exit-future = "0.1.4" +crossbeam-channel = "0.3.8" diff --git a/beacon_node/http_server/src/api.rs b/beacon_node/http_server/src/api.rs new file mode 100644 index 000000000..a91080899 --- /dev/null +++ b/beacon_node/http_server/src/api.rs @@ -0,0 +1,71 @@ +use crate::{key::BeaconChainKey, map_persistent_err_to_500}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use iron::prelude::*; +use iron::{ + headers::{CacheControl, CacheDirective, ContentType}, + status::Status, + AfterMiddleware, Handler, IronResult, Request, Response, +}; +use persistent::Read; +use router::Router; +use serde_json::json; +use std::sync::Arc; + +/// Yields a handler for the HTTP API. +pub fn build_handler( + beacon_chain: Arc>, +) -> impl Handler { + let mut router = Router::new(); + + router.get("/node/fork", handle_fork::, "fork"); + + let mut chain = Chain::new(router); + + // Insert `BeaconChain` so it may be accessed in a request. + chain.link(Read::>::both(beacon_chain.clone())); + // Set the content-type headers. + chain.link_after(SetJsonContentType); + // Set the cache headers. + chain.link_after(SetCacheDirectives); + + chain +} + +/// Sets the `cache-control` headers on _all_ responses, unless they are already set. +struct SetCacheDirectives; +impl AfterMiddleware for SetCacheDirectives { + fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult { + // This is run for every requests, AFTER all handlers have been executed + if resp.headers.get::() == None { + resp.headers.set(CacheControl(vec![ + CacheDirective::NoCache, + CacheDirective::NoStore, + ])); + } + Ok(resp) + } +} + +/// Sets the `content-type` headers on _all_ responses, unless they are already set. +struct SetJsonContentType; +impl AfterMiddleware for SetJsonContentType { + fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult { + if resp.headers.get::() == None { + resp.headers.set(ContentType::json()); + } + Ok(resp) + } +} + +fn handle_fork(req: &mut Request) -> IronResult { + let beacon_chain = req + .get::>>() + .map_err(map_persistent_err_to_500)?; + + let response = json!({ + "fork": beacon_chain.head().beacon_state.fork, + "chain_id": beacon_chain.spec.chain_id + }); + + Ok(Response::with((Status::Ok, response.to_string()))) +} diff --git a/beacon_node/http_server/src/key.rs b/beacon_node/http_server/src/key.rs new file mode 100644 index 000000000..2d27ce9f0 --- /dev/null +++ b/beacon_node/http_server/src/key.rs @@ -0,0 +1,12 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use iron::typemap::Key; +use std::marker::PhantomData; +use std::sync::Arc; + +pub struct BeaconChainKey { + _phantom: PhantomData, +} + +impl Key for BeaconChainKey { + type Value = Arc>; +} diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs new file mode 100644 index 000000000..486badaff --- /dev/null +++ b/beacon_node/http_server/src/lib.rs @@ -0,0 +1,118 @@ +mod api; +mod key; +mod metrics; + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use futures::Future; +use iron::prelude::*; +use network::NetworkMessage; +use router::Router; +use slog::{info, o, warn}; +use std::sync::Arc; +use tokio::runtime::TaskExecutor; + +#[derive(PartialEq, Clone, Debug)] +pub struct HttpServerConfig { + pub enabled: bool, + pub listen_address: String, +} + +impl Default for HttpServerConfig { + fn default() -> Self { + Self { + enabled: false, + listen_address: "127.0.0.1:5051".to_string(), + } + } +} + +/// Build the `iron` HTTP server, defining the core routes. +pub fn create_iron_http_server( + beacon_chain: Arc>, +) -> Iron { + let mut router = Router::new(); + + // A `GET` request to `/metrics` is handled by the `metrics` module. + router.get( + "/metrics", + metrics::build_handler(beacon_chain.clone()), + "metrics", + ); + + // Any request to all other endpoints is handled by the `api` module. + router.any("/*", api::build_handler(beacon_chain.clone()), "api"); + + Iron::new(router) +} + +/// Start the HTTP service on the tokio `TaskExecutor`. +pub fn start_service( + config: &HttpServerConfig, + executor: &TaskExecutor, + _network_chan: crossbeam_channel::Sender, + beacon_chain: Arc>, + log: &slog::Logger, +) -> exit_future::Signal { + let log = log.new(o!("Service"=>"HTTP")); + + // Create: + // - `shutdown_trigger` a one-shot to shut down this service. + // - `wait_for_shutdown` a future that will wait until someone calls shutdown. + let (shutdown_trigger, wait_for_shutdown) = exit_future::signal(); + + // Create an `iron` http, without starting it yet. + let iron = create_iron_http_server(beacon_chain); + + // Create a HTTP server future. + // + // 1. Start the HTTP server + // 2. Build an exit future that will shutdown the server when requested. + // 3. Return the exit future, so the caller may shutdown the service when desired. + let http_service = { + // Start the HTTP server + let server_start_result = iron.http(config.listen_address.clone()); + + if server_start_result.is_ok() { + info!(log, "HTTP server running on {}", config.listen_address); + } else { + warn!( + log, + "HTTP server failed to start on {}", config.listen_address + ); + } + + // Build a future that will shutdown the HTTP server when the `shutdown_trigger` is + // triggered. + wait_for_shutdown.and_then(move |_| { + info!(log, "HTTP server shutting down"); + + if let Ok(mut server) = server_start_result { + // According to the documentation, `server.close()` "doesn't work" and the server + // keeps listening. + // + // It is being called anyway, because it seems like the right thing to do. If you + // know this has negative side-effects, please create an issue to discuss. + // + // See: https://docs.rs/iron/0.6.0/iron/struct.Listening.html#impl + match server.close() { + _ => (), + }; + } + info!(log, "HTTP server shutdown complete."); + Ok(()) + }) + }; + + // Attach the HTTP server to the executor. + executor.spawn(http_service); + + shutdown_trigger +} + +/// Helper function for mapping a failure to read state to a 500 server error. +fn map_persistent_err_to_500(e: persistent::PersistentError) -> iron::error::IronError { + iron::error::IronError { + error: Box::new(e), + response: iron::Response::with(iron::status::Status::InternalServerError), + } +} diff --git a/beacon_node/http_server/src/metrics.rs b/beacon_node/http_server/src/metrics.rs new file mode 100644 index 000000000..eb7816d0e --- /dev/null +++ b/beacon_node/http_server/src/metrics.rs @@ -0,0 +1,60 @@ +use crate::{key::BeaconChainKey, map_persistent_err_to_500}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use iron::prelude::*; +use iron::{status::Status, Handler, IronResult, Request, Response}; +use persistent::Read; +use prometheus::{Encoder, IntCounter, Opts, Registry, TextEncoder}; +use slot_clock::SlotClock; +use std::sync::Arc; +use types::Slot; + +/// Yields a handler for the metrics endpoint. +pub fn build_handler( + beacon_chain: Arc>, +) -> impl Handler { + let mut chain = Chain::new(handle_metrics::); + + chain.link(Read::>::both(beacon_chain)); + + chain +} + +/// Handle a request for Prometheus metrics. +/// +/// Returns a text string containing all metrics. +fn handle_metrics(req: &mut Request) -> IronResult { + let beacon_chain = req + .get::>>() + .map_err(map_persistent_err_to_500)?; + + let r = Registry::new(); + + let present_slot = if let Ok(Some(slot)) = beacon_chain.slot_clock.present_slot() { + slot + } else { + Slot::new(0) + }; + register_and_set_slot( + &r, + "present_slot", + "direct_slock_clock_reading", + present_slot, + ); + + // Gather the metrics. + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + let metric_families = r.gather(); + encoder.encode(&metric_families, &mut buffer).unwrap(); + + let prom_string = String::from_utf8(buffer).unwrap(); + + Ok(Response::with((Status::Ok, prom_string))) +} + +fn register_and_set_slot(registry: &Registry, name: &str, help: &str, slot: Slot) { + let counter_opts = Opts::new(name, help); + let counter = IntCounter::with_opts(counter_opts).unwrap(); + registry.register(Box::new(counter.clone())).unwrap(); + counter.inc_by(slot.as_u64() as i64); +} diff --git a/beacon_node/network/src/beacon_chain.rs b/beacon_node/network/src/beacon_chain.rs index a98aa73de..6324e3a94 100644 --- a/beacon_node/network/src/beacon_chain.rs +++ b/beacon_node/network/src/beacon_chain.rs @@ -1,9 +1,6 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ - db::ClientDB, - fork_choice::ForkChoice, parking_lot::RwLockReadGuard, - slot_clock::SlotClock, types::{BeaconState, ChainSpec}, AttestationValidationError, CheckPoint, }; @@ -12,17 +9,17 @@ use types::{ Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, }; -pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome, InvalidBlock}; +pub use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock}; /// The network's API to the beacon chain. -pub trait BeaconChain: Send + Sync { +pub trait BeaconChain: Send + Sync { fn get_spec(&self) -> &ChainSpec; - fn get_state(&self) -> RwLockReadGuard>; + fn get_state(&self) -> RwLockReadGuard>; fn slot(&self) -> Slot; - fn head(&self) -> RwLockReadGuard>; + fn head(&self) -> RwLockReadGuard>; fn get_block(&self, block_root: &Hash256) -> Result, BeaconChainError>; @@ -30,7 +27,7 @@ pub trait BeaconChain: Send + Sync { fn best_block_root(&self) -> Hash256; - fn finalized_head(&self) -> RwLockReadGuard>; + fn finalized_head(&self) -> RwLockReadGuard>; fn finalized_epoch(&self) -> Epoch; @@ -64,18 +61,12 @@ pub trait BeaconChain: Send + Sync { fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result; } -impl BeaconChain for RawBeaconChain -where - T: ClientDB + Sized, - U: SlotClock, - F: ForkChoice, - E: EthSpec, -{ +impl BeaconChain for RawBeaconChain { fn get_spec(&self) -> &ChainSpec { &self.spec } - fn get_state(&self) -> RwLockReadGuard> { + fn get_state(&self) -> RwLockReadGuard> { self.state.read() } @@ -83,7 +74,7 @@ where self.get_state().slot } - fn head(&self) -> RwLockReadGuard> { + fn head(&self) -> RwLockReadGuard> { self.head() } @@ -95,7 +86,7 @@ where self.get_state().finalized_epoch } - fn finalized_head(&self) -> RwLockReadGuard> { + fn finalized_head(&self) -> RwLockReadGuard> { self.finalized_head() } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index a7d0ff2a1..f6a27ad60 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::BeaconChain; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::error; use crate::service::{NetworkMessage, OutgoingMessage}; use crate::sync::SimpleSync; @@ -13,7 +13,6 @@ use slog::{debug, warn}; use std::collections::HashMap; use std::sync::Arc; use std::time::Instant; -use types::EthSpec; /// Timeout for RPC requests. // const REQUEST_TIMEOUT: Duration = Duration::from_secs(30); @@ -21,11 +20,11 @@ use types::EthSpec; // const HELLO_TIMEOUT: Duration = Duration::from_secs(30); /// Handles messages received from the network and client and organises syncing. -pub struct MessageHandler { +pub struct MessageHandler { /// Currently loaded and initialised beacon chain. - _chain: Arc>, + _chain: Arc>, /// The syncing framework. - sync: SimpleSync, + sync: SimpleSync, /// The context required to send messages to, and process messages from peers. network_context: NetworkContext, /// The `MessageHandler` logger. @@ -45,10 +44,10 @@ pub enum HandlerMessage { PubsubMessage(PeerId, Box), } -impl MessageHandler { +impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn spawn( - beacon_chain: Arc>, + beacon_chain: Arc>, network_send: crossbeam_channel::Sender, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 50454a875..d87b9e5a9 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::BeaconChain; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::NetworkConfig; @@ -13,20 +13,20 @@ use slog::{debug, info, o, trace}; use std::marker::PhantomData; use std::sync::Arc; use tokio::runtime::TaskExecutor; -use types::{EthSpec, Topic}; +use types::Topic; /// Service that handles communication between internal services and the eth2_libp2p network service. -pub struct Service { +pub struct Service { //libp2p_service: Arc>, _libp2p_exit: oneshot::Sender<()>, network_send: crossbeam_channel::Sender, - _phantom: PhantomData, //message_handler: MessageHandler, + _phantom: PhantomData, //message_handler: MessageHandler, //message_handler_send: Sender } -impl Service { +impl Service { pub fn new( - beacon_chain: Arc>, + beacon_chain: Arc>, config: &NetworkConfig, executor: &TaskExecutor, log: slog::Logger, diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs index 6c2fc33ee..793f4c395 100644 --- a/beacon_node/network/src/sync/import_queue.rs +++ b/beacon_node/network/src/sync/import_queue.rs @@ -1,11 +1,11 @@ -use crate::beacon_chain::BeaconChain; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::PeerId; use slog::{debug, error}; use std::sync::Arc; use std::time::{Duration, Instant}; use tree_hash::TreeHash; -use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot}; +use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot}; /// Provides a queue for fully and partially built `BeaconBlock`s. /// @@ -19,8 +19,8 @@ use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, S /// `BeaconBlockBody` as the key. /// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore /// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`. -pub struct ImportQueue { - pub chain: Arc>, +pub struct ImportQueue { + pub chain: Arc>, /// Partially imported blocks, keyed by the root of `BeaconBlockBody`. pub partials: Vec, /// Time before a queue entry is considered state. @@ -29,9 +29,9 @@ pub struct ImportQueue { log: slog::Logger, } -impl ImportQueue { +impl ImportQueue { /// Return a new, empty queue. - pub fn new(chain: Arc>, stale_time: Duration, log: slog::Logger) -> Self { + pub fn new(chain: Arc>, stale_time: Duration, log: slog::Logger) -> Self { Self { chain, partials: vec![], diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index d44ffd4b7..6ab8ea7d9 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,5 +1,5 @@ use super::import_queue::ImportQueue; -use crate::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock}; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock}; use crate::message_handler::NetworkContext; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; @@ -9,7 +9,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use tree_hash::TreeHash; -use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; +use types::{Attestation, BeaconBlock, Epoch, Hash256, Slot}; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: u64 = 100; @@ -88,8 +88,8 @@ impl From for PeerSyncInfo { } } -impl From<&Arc>> for PeerSyncInfo { - fn from(chain: &Arc>) -> PeerSyncInfo { +impl From<&Arc>> for PeerSyncInfo { + fn from(chain: &Arc>) -> PeerSyncInfo { Self::from(chain.hello_message()) } } @@ -103,22 +103,22 @@ pub enum SyncState { } /// Simple Syncing protocol. -pub struct SimpleSync { +pub struct SimpleSync { /// A reference to the underlying beacon chain. - chain: Arc>, + chain: Arc>, /// A mapping of Peers to their respective PeerSyncInfo. known_peers: HashMap, /// A queue to allow importing of blocks - import_queue: ImportQueue, + import_queue: ImportQueue, /// The current state of the syncing protocol. state: SyncState, /// Sync logger. log: slog::Logger, } -impl SimpleSync { +impl SimpleSync { /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. - pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS); diff --git a/beacon_node/rpc/Cargo.toml b/beacon_node/rpc/Cargo.toml index 3fc52c6b1..a361c94ab 100644 --- a/beacon_node/rpc/Cargo.toml +++ b/beacon_node/rpc/Cargo.toml @@ -17,7 +17,7 @@ protos = { path = "../../protos" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protobuf = "2.0.2" clap = "2.32.0" -db = { path = "../db" } +store = { path = "../store" } dirs = "1.0.3" futures = "0.1.23" slog = "^2.2.3" diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index e22715b55..6048e42b1 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::BeaconChain; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use protos::services::{ @@ -9,15 +9,15 @@ use protos::services_grpc::AttestationService; use slog::{error, info, trace, warn}; use ssz::{ssz_encode, Decode}; use std::sync::Arc; -use types::{Attestation, EthSpec}; +use types::Attestation; #[derive(Clone)] -pub struct AttestationServiceInstance { - pub chain: Arc>, +pub struct AttestationServiceInstance { + pub chain: Arc>, pub log: slog::Logger, } -impl AttestationService for AttestationServiceInstance { +impl AttestationService for AttestationServiceInstance { /// Produce the `AttestationData` for signing by a validator. fn produce_attestation_data( &mut self, diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index bbe6a8ee2..e553b79e7 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::BeaconChain; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use crossbeam_channel; use eth2_libp2p::PubsubMessage; use futures::Future; @@ -13,16 +13,16 @@ use slog::Logger; use slog::{error, info, trace, warn}; use ssz::{ssz_encode, Decode}; use std::sync::Arc; -use types::{BeaconBlock, EthSpec, Signature, Slot}; +use types::{BeaconBlock, Signature, Slot}; #[derive(Clone)] -pub struct BeaconBlockServiceInstance { - pub chain: Arc>, +pub struct BeaconBlockServiceInstance { + pub chain: Arc>, pub network_chan: crossbeam_channel::Sender, pub log: Logger, } -impl BeaconBlockService for BeaconBlockServiceInstance { +impl BeaconBlockService for BeaconBlockServiceInstance { /// Produce a `BeaconBlock` for signing by a validator. fn produce_beacon_block( &mut self, diff --git a/beacon_node/rpc/src/beacon_chain.rs b/beacon_node/rpc/src/beacon_chain.rs index 7e75b32ce..b0a490137 100644 --- a/beacon_node/rpc/src/beacon_chain.rs +++ b/beacon_node/rpc/src/beacon_chain.rs @@ -1,22 +1,19 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ - db::ClientDB, - fork_choice::ForkChoice, parking_lot::{RwLockReadGuard, RwLockWriteGuard}, - slot_clock::SlotClock, types::{BeaconState, ChainSpec, Signature}, AttestationValidationError, BlockProductionError, }; -pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome}; +pub use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessingOutcome}; use types::{Attestation, AttestationData, BeaconBlock, EthSpec}; /// The RPC's API to the beacon chain. -pub trait BeaconChain: Send + Sync { +pub trait BeaconChain: Send + Sync { fn get_spec(&self) -> &ChainSpec; - fn get_state(&self) -> RwLockReadGuard>; + fn get_state(&self) -> RwLockReadGuard>; - fn get_mut_state(&self) -> RwLockWriteGuard>; + fn get_mut_state(&self) -> RwLockWriteGuard>; fn process_block(&self, block: BeaconBlock) -> Result; @@ -24,7 +21,7 @@ pub trait BeaconChain: Send + Sync { fn produce_block( &self, randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError>; + ) -> Result<(BeaconBlock, BeaconState), BlockProductionError>; fn produce_attestation_data(&self, shard: u64) -> Result; @@ -34,22 +31,16 @@ pub trait BeaconChain: Send + Sync { ) -> Result<(), AttestationValidationError>; } -impl BeaconChain for RawBeaconChain -where - T: ClientDB + Sized, - U: SlotClock, - F: ForkChoice, - E: EthSpec, -{ +impl BeaconChain for RawBeaconChain { fn get_spec(&self) -> &ChainSpec { &self.spec } - fn get_state(&self) -> RwLockReadGuard> { + fn get_state(&self) -> RwLockReadGuard> { self.state.read() } - fn get_mut_state(&self) -> RwLockWriteGuard> { + fn get_mut_state(&self) -> RwLockWriteGuard> { self.state.write() } @@ -63,7 +54,7 @@ where fn produce_block( &self, randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { + ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { self.produce_block(randao_reveal) } diff --git a/beacon_node/rpc/src/beacon_node.rs b/beacon_node/rpc/src/beacon_node.rs index 2ca39ae51..a923bbb35 100644 --- a/beacon_node/rpc/src/beacon_node.rs +++ b/beacon_node/rpc/src/beacon_node.rs @@ -1,19 +1,18 @@ -use crate::beacon_chain::BeaconChain; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::Future; use grpcio::{RpcContext, UnarySink}; use protos::services::{Empty, Fork, NodeInfoResponse}; use protos::services_grpc::BeaconNodeService; use slog::{trace, warn}; use std::sync::Arc; -use types::EthSpec; #[derive(Clone)] -pub struct BeaconNodeServiceInstance { - pub chain: Arc>, +pub struct BeaconNodeServiceInstance { + pub chain: Arc>, pub log: slog::Logger, } -impl BeaconNodeService for BeaconNodeServiceInstance { +impl BeaconNodeService for BeaconNodeServiceInstance { /// Provides basic node information. fn info(&mut self, ctx: RpcContext, _req: Empty, sink: UnarySink) { trace!(self.log, "Node info requested via RPC"); diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index f1d5e9c88..9646135b6 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -7,7 +7,7 @@ mod validator; use self::attestation::AttestationServiceInstance; use self::beacon_block::BeaconBlockServiceInstance; -use self::beacon_chain::BeaconChain; +use self::beacon_chain::{BeaconChain, BeaconChainTypes}; use self::beacon_node::BeaconNodeServiceInstance; use self::validator::ValidatorServiceInstance; pub use config::Config as RPCConfig; @@ -21,13 +21,12 @@ use protos::services_grpc::{ use slog::{info, o, warn}; use std::sync::Arc; use tokio::runtime::TaskExecutor; -use types::EthSpec; -pub fn start_server( +pub fn start_server( config: &RPCConfig, executor: &TaskExecutor, network_chan: crossbeam_channel::Sender, - beacon_chain: Arc>, + beacon_chain: Arc>, log: &slog::Logger, ) -> exit_future::Signal { let log = log.new(o!("Service"=>"RPC")); diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 34fbba5c4..e58c202d6 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -1,4 +1,4 @@ -use crate::beacon_chain::BeaconChain; +use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -7,16 +7,16 @@ use protos::services_grpc::ValidatorService; use slog::{trace, warn}; use ssz::Decode; use std::sync::Arc; -use types::{Epoch, EthSpec, RelativeEpoch}; +use types::{Epoch, RelativeEpoch}; #[derive(Clone)] -pub struct ValidatorServiceInstance { - pub chain: Arc>, +pub struct ValidatorServiceInstance { + pub chain: Arc>, pub log: slog::Logger, } //TODO: Refactor Errors -impl ValidatorService for ValidatorServiceInstance { +impl ValidatorService for ValidatorServiceInstance { /// For a list of validator public keys, this function returns the slot at which each /// validator must propose a block, attest to a shard, their shard committee and the shard they /// need to attest to. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 45aafb3ce..ef2121882 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -68,6 +68,15 @@ fn main() { .help("Listen port for RPC endpoint.") .takes_value(true), ) + .arg( + Arg::with_name("db") + .long("db") + .value_name("DB") + .help("Type of database to use.") + .takes_value(true) + .possible_values(&["disk", "memory"]) + .default_value("memory"), + ) .get_matches(); // invalid arguments, panic diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 1d9156124..6ec65a92d 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,15 +1,18 @@ -use client::client_types::TestingClientType; -use client::error; -use client::{notifier, Client, ClientConfig}; +use client::{ + error, notifier, BeaconChainTypes, Client, ClientConfig, DBType, TestnetDiskBeaconChainTypes, + TestnetMemoryBeaconChainTypes, +}; use futures::sync::oneshot; use futures::Future; use slog::info; use std::cell::RefCell; use tokio::runtime::Builder; +use tokio::runtime::Runtime; +use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Result<()> { - let mut runtime = Builder::new() + let runtime = Builder::new() .name_prefix("main-") .clock(Clock::system()) .build() @@ -20,8 +23,42 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul "data_dir" => &config.data_dir.to_str(), "port" => &config.net_conf.listen_port); + let executor = runtime.executor(); + + match config.db_type { + DBType::Disk => { + info!( + log, + "BeaconNode starting"; + "type" => "TestnetDiskBeaconChainTypes" + ); + let client: Client = + Client::new(config, log.clone(), &executor)?; + + run(client, executor, runtime, log) + } + DBType::Memory => { + info!( + log, + "BeaconNode starting"; + "type" => "TestnetMemoryBeaconChainTypes" + ); + let client: Client = + Client::new(config, log.clone(), &executor)?; + + run(client, executor, runtime, log) + } + } +} + +pub fn run( + client: Client, + executor: TaskExecutor, + mut runtime: Runtime, + log: &slog::Logger, +) -> error::Result<()> { // run service until ctrl-c - let (ctrlc_send, ctrlc) = oneshot::channel(); + let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); ctrlc::set_handler(move || { if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { @@ -32,14 +69,10 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul let (exit_signal, exit) = exit_future::signal(); - let executor = runtime.executor(); - - // currently testing - using TestingClientType - let client: Client = Client::new(config, log.clone(), &executor)?; notifier::run(&client, executor, exit); runtime - .block_on(ctrlc) + .block_on(ctrlc_oneshot) .map_err(|e| format!("Ctrlc oneshot failed: {:?}", e))?; // perform global shutdown operations. diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml new file mode 100644 index 000000000..a95dafa90 --- /dev/null +++ b/beacon_node/store/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "store" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dev-dependencies] +tempfile = "3" + +[dependencies] +blake2-rfc = "0.2.18" +bls = { path = "../../eth2/utils/bls" } +bytes = "0.4.10" +db-key = "0.0.5" +leveldb = "0.8.4" +parking_lot = "0.7" +ssz = { path = "../../eth2/utils/ssz" } +ssz_derive = { path = "../../eth2/utils/ssz_derive" } +tree_hash = { path = "../../eth2/utils/tree_hash" } +types = { path = "../../eth2/types" } diff --git a/beacon_node/store/src/block_at_slot.rs b/beacon_node/store/src/block_at_slot.rs new file mode 100644 index 000000000..4a8abaefd --- /dev/null +++ b/beacon_node/store/src/block_at_slot.rs @@ -0,0 +1,183 @@ +use super::*; +use ssz::{Decode, DecodeError}; + +fn get_block_bytes(store: &T, root: Hash256) -> Result>, Error> { + store.get_bytes(BeaconBlock::db_column().into(), &root[..]) +} + +fn read_slot_from_block_bytes(bytes: &[u8]) -> Result { + let end = std::cmp::min(Slot::ssz_fixed_len(), bytes.len()); + + Slot::from_ssz_bytes(&bytes[0..end]) +} + +fn read_previous_block_root_from_block_bytes(bytes: &[u8]) -> Result { + let previous_bytes = Slot::ssz_fixed_len(); + let slice = bytes + .get(previous_bytes..previous_bytes + Hash256::ssz_fixed_len()) + .ok_or_else(|| DecodeError::BytesInvalid("Not enough bytes.".to_string()))?; + + Hash256::from_ssz_bytes(slice) +} + +pub fn get_block_at_preceeding_slot( + store: &T, + slot: Slot, + start_root: Hash256, +) -> Result, Error> { + let mut root = start_root; + + loop { + if let Some(bytes) = get_block_bytes(store, root)? { + let this_slot = read_slot_from_block_bytes(&bytes)?; + + if this_slot == slot { + let block = BeaconBlock::from_ssz_bytes(&bytes)?; + break Ok(Some((root, block))); + } else if this_slot < slot { + break Ok(None); + } else { + root = read_previous_block_root_from_block_bytes(&bytes)?; + } + } else { + break Ok(None); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz::Encode; + use tree_hash::TreeHash; + + #[test] + fn read_slot() { + let spec = FewValidatorsEthSpec::spec(); + + let test_slot = |slot: Slot| { + let mut block = BeaconBlock::empty(&spec); + block.slot = slot; + let bytes = block.as_ssz_bytes(); + assert_eq!(read_slot_from_block_bytes(&bytes).unwrap(), slot); + }; + + test_slot(Slot::new(0)); + test_slot(Slot::new(1)); + test_slot(Slot::new(42)); + test_slot(Slot::new(u64::max_value())); + } + + #[test] + fn bad_slot() { + for i in 0..8 { + assert!(read_slot_from_block_bytes(&vec![0; i]).is_err()); + } + } + + #[test] + fn read_previous_block_root() { + let spec = FewValidatorsEthSpec::spec(); + + let test_root = |root: Hash256| { + let mut block = BeaconBlock::empty(&spec); + block.previous_block_root = root; + let bytes = block.as_ssz_bytes(); + assert_eq!( + read_previous_block_root_from_block_bytes(&bytes).unwrap(), + root + ); + }; + + test_root(Hash256::random()); + test_root(Hash256::random()); + test_root(Hash256::random()); + } + + fn build_chain( + store: &impl Store, + slots: &[usize], + spec: &ChainSpec, + ) -> Vec<(Hash256, BeaconBlock)> { + let mut blocks_and_roots: Vec<(Hash256, BeaconBlock)> = vec![]; + + for (i, slot) in slots.iter().enumerate() { + let mut block = BeaconBlock::empty(spec); + block.slot = Slot::from(*slot); + + if i > 0 { + block.previous_block_root = blocks_and_roots[i - 1].0; + } + + let root = Hash256::from_slice(&block.tree_hash_root()); + + store.put(&root, &block).unwrap(); + blocks_and_roots.push((root, block)); + } + + blocks_and_roots + } + + #[test] + fn chain_without_skips() { + let n: usize = 10; + let store = MemoryStore::open(); + let spec = FewValidatorsEthSpec::spec(); + + let slots: Vec = (0..n).collect(); + let blocks_and_roots = build_chain(&store, &slots, &spec); + + for source in 1..n { + for target in 0..=source { + let (source_root, _source_block) = &blocks_and_roots[source]; + let (target_root, target_block) = &blocks_and_roots[target]; + + let (found_root, found_block) = store + .get_block_at_preceeding_slot(*source_root, target_block.slot) + .unwrap() + .unwrap(); + + assert_eq!(found_root, *target_root); + assert_eq!(found_block, *target_block); + } + } + } + + #[test] + fn chain_with_skips() { + let store = MemoryStore::open(); + let spec = FewValidatorsEthSpec::spec(); + + let slots = vec![0, 1, 2, 5]; + + let blocks_and_roots = build_chain(&store, &slots, &spec); + + // Valid slots + for target in 0..3 { + let (source_root, _source_block) = &blocks_and_roots[3]; + let (target_root, target_block) = &blocks_and_roots[target]; + + let (found_root, found_block) = store + .get_block_at_preceeding_slot(*source_root, target_block.slot) + .unwrap() + .unwrap(); + + assert_eq!(found_root, *target_root); + assert_eq!(found_block, *target_block); + } + + // Slot that doesn't exist + let (source_root, _source_block) = &blocks_and_roots[3]; + assert!(store + .get_block_at_preceeding_slot(*source_root, Slot::new(3)) + .unwrap() + .is_none()); + + // Slot too high + let (source_root, _source_block) = &blocks_and_roots[3]; + assert!(store + .get_block_at_preceeding_slot(*source_root, Slot::new(3)) + .unwrap() + .is_none()); + } +} diff --git a/beacon_node/db/src/disk_db.rs b/beacon_node/store/src/disk_db.rs similarity index 86% rename from beacon_node/db/src/disk_db.rs rename to beacon_node/store/src/disk_db.rs index 2d26315da..eb2b885c6 100644 --- a/beacon_node/db/src/disk_db.rs +++ b/beacon_node/store/src/disk_db.rs @@ -1,19 +1,20 @@ extern crate rocksdb; -use super::rocksdb::Error as RocksError; -use super::rocksdb::{Options, DB}; +// use super::stores::COLUMNS; use super::{ClientDB, DBError, DBValue}; +use rocksdb::Error as RocksError; +use rocksdb::{Options, DB}; use std::fs; use std::path::Path; /// A on-disk database which implements the ClientDB trait. /// /// This implementation uses RocksDB with default options. -pub struct DiskDB { +pub struct DiskStore { db: DB, } -impl DiskDB { +impl DiskStore { /// Open the RocksDB database, optionally supplying columns if required. /// /// The RocksDB database will be contained in a directory titled @@ -23,31 +24,32 @@ impl DiskDB { /// /// Panics if the database is unable to be created. pub fn open(path: &Path, columns: Option<&[&str]>) -> Self { - /* - * Initialise the options - */ + // Rocks options. let mut options = Options::default(); options.create_if_missing(true); - // TODO: ensure that columns are created (and remove - // the dead_code allow) - - /* - * Initialise the path - */ + // Ensure the path exists. fs::create_dir_all(&path).unwrap_or_else(|_| panic!("Unable to create {:?}", &path)); let db_path = path.join("database"); - /* - * Open the database - */ - let db = match columns { - None => DB::open(&options, db_path), - Some(columns) => DB::open_cf(&options, db_path, columns), - } - .expect("Unable to open local database");; + let columns = columns.unwrap_or(&COLUMNS); - Self { db } + if db_path.exists() { + Self { + db: DB::open_cf(&options, db_path, &COLUMNS) + .expect("Unable to open local database"), + } + } else { + let mut db = Self { + db: DB::open(&options, db_path).expect("Unable to open local database"), + }; + + for cf in columns { + db.create_col(cf).unwrap(); + } + + db + } } /// Create a RocksDB column family. Corresponds to the @@ -69,7 +71,7 @@ impl From for DBError { } } -impl ClientDB for DiskDB { +impl ClientDB for DiskStore { /// Get the value for some key on some column. /// /// Corresponds to the `get_cf()` method on the RocksDB API. @@ -97,7 +99,7 @@ impl ClientDB for DiskDB { None => Err(DBError { message: "Unknown column".to_string(), }), - Some(handle) => self.db.put_cf(handle, key, val).map_err(Into::into), + Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()), } } @@ -152,7 +154,7 @@ mod tests { let col_name: &str = "TestColumn"; let column_families = vec![col_name]; - let mut db = DiskDB::open(&path, None); + let mut db = DiskStore::open(&path, None); for cf in column_families { db.create_col(&cf).unwrap(); diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs new file mode 100644 index 000000000..815b35a8e --- /dev/null +++ b/beacon_node/store/src/errors.rs @@ -0,0 +1,30 @@ +use ssz::DecodeError; + +#[derive(Debug, PartialEq)] +pub enum Error { + SszDecodeError(DecodeError), + DBError { message: String }, +} + +impl From for Error { + fn from(e: DecodeError) -> Error { + Error::SszDecodeError(e) + } +} + +impl From for Error { + fn from(e: DBError) -> Error { + Error::DBError { message: e.message } + } +} + +#[derive(Debug)] +pub struct DBError { + pub message: String, +} + +impl DBError { + pub fn new(message: String) -> Self { + Self { message } + } +} diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs new file mode 100644 index 000000000..91f8d52de --- /dev/null +++ b/beacon_node/store/src/impls.rs @@ -0,0 +1,30 @@ +use crate::*; +use ssz::{Decode, Encode}; + +impl StoreItem for BeaconBlock { + fn db_column() -> DBColumn { + DBColumn::BeaconBlock + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +impl StoreItem for BeaconState { + fn db_column() -> DBColumn { + DBColumn::BeaconState + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } +} diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs new file mode 100644 index 000000000..09aec46fa --- /dev/null +++ b/beacon_node/store/src/leveldb_store.rs @@ -0,0 +1,100 @@ +use super::*; +use db_key::Key; +use leveldb::database::kv::KV; +use leveldb::database::Database; +use leveldb::error::Error as LevelDBError; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use std::path::Path; + +/// A wrapped leveldb database. +pub struct LevelDB { + db: Database, +} + +impl LevelDB { + /// Open a database at `path`, creating a new database if one does not already exist. + pub fn open(path: &Path) -> Result { + let mut options = Options::new(); + + options.create_if_missing = true; + + let db = Database::open(path, options)?; + + Ok(Self { db }) + } + + fn read_options(&self) -> ReadOptions { + ReadOptions::new() + } + + fn write_options(&self) -> WriteOptions { + WriteOptions::new() + } + + fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey { + let mut col = col.as_bytes().to_vec(); + col.append(&mut key.to_vec()); + BytesKey { key: col } + } +} + +/// Used for keying leveldb. +pub struct BytesKey { + key: Vec, +} + +impl Key for BytesKey { + fn from_u8(key: &[u8]) -> Self { + Self { key: key.to_vec() } + } + + fn as_slice T>(&self, f: F) -> T { + f(self.key.as_slice()) + } +} + +impl Store for LevelDB { + /// Retrieve some bytes in `column` with `key`. + fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { + let column_key = Self::get_key_for_col(col, key); + + self.db + .get(self.read_options(), column_key) + .map_err(Into::into) + } + + /// Store some `value` in `column`, indexed with `key`. + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + let column_key = Self::get_key_for_col(col, key); + + self.db + .put(self.write_options(), column_key, val) + .map_err(Into::into) + } + + /// Return `true` if `key` exists in `column`. + fn key_exists(&self, col: &str, key: &[u8]) -> Result { + let column_key = Self::get_key_for_col(col, key); + + self.db + .get(self.read_options(), column_key) + .map_err(Into::into) + .and_then(|val| Ok(val.is_some())) + } + + /// Removes `key` from `column`. + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { + let column_key = Self::get_key_for_col(col, key); + self.db + .delete(self.write_options(), column_key) + .map_err(Into::into) + } +} + +impl From for Error { + fn from(e: LevelDBError) -> Error { + Error::DBError { + message: format!("{:?}", e), + } + } +} diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs new file mode 100644 index 000000000..59875601a --- /dev/null +++ b/beacon_node/store/src/lib.rs @@ -0,0 +1,223 @@ +//! Storage functionality for Lighthouse. +//! +//! Provides the following stores: +//! +//! - `DiskStore`: an on-disk store backed by leveldb. Used in production. +//! - `MemoryStore`: an in-memory store backed by a hash-map. Used for testing. +//! +//! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See +//! tests for implementation examples. + +mod block_at_slot; +mod errors; +mod impls; +mod leveldb_store; +mod memory_store; + +pub use self::leveldb_store::LevelDB as DiskStore; +pub use self::memory_store::MemoryStore; +pub use errors::Error; +pub use types::*; + +/// An object capable of storing and retrieving objects implementing `StoreItem`. +/// +/// A `Store` is fundamentally backed by a key-value database, however it provides support for +/// columns. A simple column implementation might involve prefixing a key with some bytes unique to +/// each column. +pub trait Store: Sync + Send + Sized { + /// Store an item in `Self`. + fn put(&self, key: &Hash256, item: &impl StoreItem) -> Result<(), Error> { + item.db_put(self, key) + } + + /// Retrieve an item from `Self`. + fn get(&self, key: &Hash256) -> Result, Error> { + I::db_get(self, key) + } + + /// Returns `true` if the given key represents an item in `Self`. + fn exists(&self, key: &Hash256) -> Result { + I::db_exists(self, key) + } + + /// Remove an item from `Self`. + fn delete(&self, key: &Hash256) -> Result<(), Error> { + I::db_delete(self, key) + } + + /// Given the root of an existing block in the store (`start_block_root`), return a parent + /// block with the specified `slot`. + /// + /// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the + /// slot of `start_block_root`. + fn get_block_at_preceeding_slot( + &self, + start_block_root: Hash256, + slot: Slot, + ) -> Result, Error> { + block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) + } + + /// Retrieve some bytes in `column` with `key`. + fn get_bytes(&self, column: &str, key: &[u8]) -> Result>, Error>; + + /// Store some `value` in `column`, indexed with `key`. + fn put_bytes(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error>; + + /// Return `true` if `key` exists in `column`. + fn key_exists(&self, column: &str, key: &[u8]) -> Result; + + /// Removes `key` from `column`. + fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>; +} + +/// A unique column identifier. +pub enum DBColumn { + BeaconBlock, + BeaconState, + BeaconChain, +} + +impl<'a> Into<&'a str> for DBColumn { + /// Returns a `&str` that can be used for keying a key-value data base. + fn into(self) -> &'a str { + match self { + DBColumn::BeaconBlock => &"blk", + DBColumn::BeaconState => &"ste", + DBColumn::BeaconChain => &"bch", + } + } +} + +/// An item that may be stored in a `Store`. +/// +/// Provides default methods that are suitable for most applications, however when overridden they +/// provide full customizability of `Store` operations. +pub trait StoreItem: Sized { + /// Identifies which column this item should be placed in. + fn db_column() -> DBColumn; + + /// Serialize `self` as bytes. + fn as_store_bytes(&self) -> Vec; + + /// De-serialize `self` from bytes. + fn from_store_bytes(bytes: &mut [u8]) -> Result; + + /// Store `self`. + fn db_put(&self, store: &impl Store, key: &Hash256) -> Result<(), Error> { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + store + .put_bytes(column, key, &self.as_store_bytes()) + .map_err(Into::into) + } + + /// Retrieve an instance of `Self`. + fn db_get(store: &impl Store, key: &Hash256) -> Result, Error> { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + match store.get_bytes(column, key)? { + Some(mut bytes) => Ok(Some(Self::from_store_bytes(&mut bytes[..])?)), + None => Ok(None), + } + } + + /// Return `true` if an instance of `Self` exists in `Store`. + fn db_exists(store: &impl Store, key: &Hash256) -> Result { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + store.key_exists(column, key) + } + + /// Delete `self` from the `Store`. + fn db_delete(store: &impl Store, key: &Hash256) -> Result<(), Error> { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + store.key_delete(column, key) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz::{Decode, Encode}; + use ssz_derive::{Decode, Encode}; + use tempfile::tempdir; + + #[derive(PartialEq, Debug, Encode, Decode)] + struct StorableThing { + a: u64, + b: u64, + } + + impl StoreItem for StorableThing { + fn db_column() -> DBColumn { + DBColumn::BeaconBlock + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } + } + + fn test_impl(store: impl Store) { + let key = Hash256::random(); + let item = StorableThing { a: 1, b: 42 }; + + assert_eq!(store.exists::(&key), Ok(false)); + + store.put(&key, &item).unwrap(); + + assert_eq!(store.exists::(&key), Ok(true)); + + let retrieved = store.get(&key).unwrap().unwrap(); + assert_eq!(item, retrieved); + + store.delete::(&key).unwrap(); + + assert_eq!(store.exists::(&key), Ok(false)); + + assert_eq!(store.get::(&key), Ok(None)); + } + + #[test] + fn diskdb() { + let dir = tempdir().unwrap(); + let path = dir.path(); + let store = DiskStore::open(&path).unwrap(); + + test_impl(store); + } + + #[test] + fn memorydb() { + let store = MemoryStore::open(); + + test_impl(store); + } + + #[test] + fn exists() { + let store = MemoryStore::open(); + let key = Hash256::random(); + let item = StorableThing { a: 1, b: 42 }; + + assert_eq!(store.exists::(&key).unwrap(), false); + + store.put(&key, &item).unwrap(); + + assert_eq!(store.exists::(&key).unwrap(), true); + + store.delete::(&key).unwrap(); + + assert_eq!(store.exists::(&key).unwrap(), false); + } +} diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs new file mode 100644 index 000000000..086a16c26 --- /dev/null +++ b/beacon_node/store/src/memory_store.rs @@ -0,0 +1,63 @@ +use super::{Error, Store}; +use parking_lot::RwLock; +use std::collections::HashMap; + +type DBHashMap = HashMap, Vec>; + +/// A thread-safe `HashMap` wrapper. +pub struct MemoryStore { + db: RwLock, +} + +impl MemoryStore { + /// Create a new, empty database. + pub fn open() -> Self { + Self { + db: RwLock::new(HashMap::new()), + } + } + + fn get_key_for_col(col: &str, key: &[u8]) -> Vec { + let mut col = col.as_bytes().to_vec(); + col.append(&mut key.to_vec()); + col + } +} + +impl Store for MemoryStore { + /// Get the value of some key from the database. Returns `None` if the key does not exist. + fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { + let column_key = MemoryStore::get_key_for_col(col, key); + + Ok(self + .db + .read() + .get(&column_key) + .and_then(|val| Some(val.clone()))) + } + + /// Puts a key in the database. + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + let column_key = MemoryStore::get_key_for_col(col, key); + + self.db.write().insert(column_key, val.to_vec()); + + Ok(()) + } + + /// Return true if some key exists in some column. + fn key_exists(&self, col: &str, key: &[u8]) -> Result { + let column_key = MemoryStore::get_key_for_col(col, key); + + Ok(self.db.read().contains_key(&column_key)) + } + + /// Delete some key from the database. + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { + let column_key = MemoryStore::get_key_for_col(col, key); + + self.db.write().remove(&column_key); + + Ok(()) + } +} diff --git a/beacon_node/store/src/store.rs b/beacon_node/store/src/store.rs new file mode 100644 index 000000000..5d18c7ba5 --- /dev/null +++ b/beacon_node/store/src/store.rs @@ -0,0 +1,37 @@ +use super::*; + +pub type Vec = Vec; + +pub trait Store: Sync + Send + Sized { + fn put(&self, key: &Hash256, item: &impl StoreItem) -> Result<(), Error> { + item.db_put(self, key) + } + + fn get(&self, key: &Hash256) -> Result, Error> { + I::db_get(self, key) + } + + fn exists(&self, key: &Hash256) -> Result { + I::db_exists(self, key) + } + + fn delete(&self, key: &Hash256) -> Result<(), Error> { + I::db_delete(self, key) + } + + fn get_block_at_preceeding_slot( + &self, + start_block_root: Hash256, + slot: Slot, + ) -> Result, Error> { + block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) + } + + fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error>; + + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>; + + fn key_exists(&self, col: &str, key: &[u8]) -> Result; + + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; +} diff --git a/eth2/fork_choice/Cargo.toml b/eth2/fork_choice/Cargo.toml index 819b84055..f2e6825ed 100644 --- a/eth2/fork_choice/Cargo.toml +++ b/eth2/fork_choice/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Age Manning "] edition = "2018" [dependencies] -db = { path = "../../beacon_node/db" } +store = { path = "../../beacon_node/store" } ssz = { path = "../utils/ssz" } types = { path = "../types" } log = "0.4.6" diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index 0bbac6bb6..0e579c0b9 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -1,16 +1,11 @@ //! The optimised bitwise LMD-GHOST fork choice rule. -extern crate bit_vec; - use crate::{ForkChoice, ForkChoiceError}; use bit_vec::BitVec; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, -}; use log::{debug, trace}; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot, SlotHeight}; //TODO: Pruning - Children @@ -34,7 +29,7 @@ fn power_of_2_below(x: u64) -> u64 { } /// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm. -pub struct BitwiseLMDGhost { +pub struct BitwiseLMDGhost { /// A cache of known ancestors at given heights for a specific block. //TODO: Consider FnvHashMap cache: HashMap, Hash256>, @@ -46,30 +41,21 @@ pub struct BitwiseLMDGhost { /// The latest attestation targets as a map of validator index to block hash. //TODO: Could this be a fixed size vec latest_attestation_targets: HashMap, - /// Block storage access. - block_store: Arc>, - /// State storage access. - state_store: Arc>, + /// Block and state storage. + store: Arc, max_known_height: SlotHeight, _phantom: PhantomData, } -impl BitwiseLMDGhost -where - T: ClientDB + Sized, -{ - pub fn new( - block_store: Arc>, - state_store: Arc>, - ) -> Self { +impl BitwiseLMDGhost { + pub fn new(store: Arc) -> Self { BitwiseLMDGhost { cache: HashMap::new(), ancestors: vec![HashMap::new(); 16], latest_attestation_targets: HashMap::new(), children: HashMap::new(), max_known_height: SlotHeight::new(0), - block_store, - state_store, + store, _phantom: PhantomData, } } @@ -89,8 +75,8 @@ where let mut latest_votes: HashMap = HashMap::new(); // gets the current weighted votes let current_state: BeaconState = self - .state_store - .get_deserialized(&state_root)? + .store + .get(&state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = @@ -121,8 +107,8 @@ where // return None if we can't get the block from the db. let block_height = { let block_slot = self - .block_store - .get_deserialized(&block_hash) + .store + .get::(&block_hash) .ok()? .expect("Should have returned already if None") .slot; @@ -243,7 +229,7 @@ where } } -impl ForkChoice for BitwiseLMDGhost { +impl ForkChoice for BitwiseLMDGhost { fn add_block( &mut self, block: &BeaconBlock, @@ -252,8 +238,8 @@ impl ForkChoice for BitwiseLMDGhost { ) -> Result<(), ForkChoiceError> { // get the height of the parent let parent_height = self - .block_store - .get_deserialized(&block.previous_block_root)? + .store + .get::(&block.previous_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))? .slot .height(spec.genesis_slot); @@ -304,16 +290,16 @@ impl ForkChoice for BitwiseLMDGhost { trace!("Old attestation found: {:?}", attestation_target); // get the height of the target block let block_height = self - .block_store - .get_deserialized(&target_block_root)? + .store + .get::(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .slot .height(spec.genesis_slot); // get the height of the past target block let past_block_height = self - .block_store - .get_deserialized(&attestation_target)? + .store + .get::(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .slot .height(spec.genesis_slot); @@ -337,8 +323,8 @@ impl ForkChoice for BitwiseLMDGhost { justified_block_start ); let block = self - .block_store - .get_deserialized(&justified_block_start)? + .store + .get::(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; let block_slot = block.slot; @@ -429,8 +415,8 @@ impl ForkChoice for BitwiseLMDGhost { // didn't find head yet, proceed to next iteration // update block height block_height = self - .block_store - .get_deserialized(¤t_head)? + .store + .get::(¤t_head)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? .slot .height(spec.genesis_slot); diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index 016cd5dea..ffc40e6c6 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -16,17 +16,14 @@ //! [`slow_lmd_ghost`]: struct.SlowLmdGhost.html //! [`bitwise_lmd_ghost`]: struct.OptimisedLmdGhost.html -extern crate db; -extern crate ssz; -extern crate types; - pub mod bitwise_lmd_ghost; pub mod longest_chain; pub mod optimized_lmd_ghost; pub mod slow_lmd_ghost; -use db::stores::BeaconBlockAtSlotError; -use db::DBError; +// use store::stores::BeaconBlockAtSlotError; +// use store::DBError; +use store::Error as DBError; use types::{BeaconBlock, ChainSpec, Hash256}; pub use bitwise_lmd_ghost::BitwiseLMDGhost; @@ -77,10 +74,11 @@ pub enum ForkChoiceError { impl From for ForkChoiceError { fn from(e: DBError) -> ForkChoiceError { - ForkChoiceError::StorageError(e.message) + ForkChoiceError::StorageError(format!("{:?}", e)) } } +/* impl From for ForkChoiceError { fn from(e: BeaconBlockAtSlotError) -> ForkChoiceError { match e { @@ -94,6 +92,7 @@ impl From for ForkChoiceError { } } } +*/ /// Fork choice options that are currently implemented. #[derive(Debug, Clone)] diff --git a/eth2/fork_choice/src/longest_chain.rs b/eth2/fork_choice/src/longest_chain.rs index 423edc567..11453cf49 100644 --- a/eth2/fork_choice/src/longest_chain.rs +++ b/eth2/fork_choice/src/longest_chain.rs @@ -1,31 +1,25 @@ use crate::{ForkChoice, ForkChoiceError}; -use db::{stores::BeaconBlockStore, ClientDB}; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, ChainSpec, Hash256, Slot}; -pub struct LongestChain -where - T: ClientDB + Sized, -{ +pub struct LongestChain { /// List of head block hashes head_block_hashes: Vec, - /// Block storage access. - block_store: Arc>, + /// Block storage. + store: Arc, } -impl LongestChain -where - T: ClientDB + Sized, -{ - pub fn new(block_store: Arc>) -> Self { +impl LongestChain { + pub fn new(store: Arc) -> Self { LongestChain { head_block_hashes: Vec::new(), - block_store, + store, } } } -impl ForkChoice for LongestChain { +impl ForkChoice for LongestChain { fn add_block( &mut self, block: &BeaconBlock, @@ -55,9 +49,9 @@ impl ForkChoice for LongestChain { * Load all the head_block hashes from the DB as SszBeaconBlocks. */ for (index, block_hash) in self.head_block_hashes.iter().enumerate() { - let block = self - .block_store - .get_deserialized(&block_hash)? + let block: BeaconBlock = self + .store + .get(&block_hash)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_hash))?; head_blocks.push((index, block)); } diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index 3f585e3c1..dba6e60da 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -1,16 +1,11 @@ //! The optimised bitwise LMD-GHOST fork choice rule. -extern crate bit_vec; - use crate::{ForkChoice, ForkChoiceError}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, -}; use log::{debug, trace}; use std::cmp::Ordering; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot, SlotHeight}; //TODO: Pruning - Children @@ -34,7 +29,7 @@ fn power_of_2_below(x: u64) -> u64 { } /// Stores the necessary data structures to run the optimised lmd ghost algorithm. -pub struct OptimizedLMDGhost { +pub struct OptimizedLMDGhost { /// A cache of known ancestors at given heights for a specific block. //TODO: Consider FnvHashMap cache: HashMap, Hash256>, @@ -46,30 +41,21 @@ pub struct OptimizedLMDGhost { /// The latest attestation targets as a map of validator index to block hash. //TODO: Could this be a fixed size vec latest_attestation_targets: HashMap, - /// Block storage access. - block_store: Arc>, - /// State storage access. - state_store: Arc>, + /// Block and state storage. + store: Arc, max_known_height: SlotHeight, _phantom: PhantomData, } -impl OptimizedLMDGhost -where - T: ClientDB + Sized, -{ - pub fn new( - block_store: Arc>, - state_store: Arc>, - ) -> Self { +impl OptimizedLMDGhost { + pub fn new(store: Arc) -> Self { OptimizedLMDGhost { cache: HashMap::new(), ancestors: vec![HashMap::new(); 16], latest_attestation_targets: HashMap::new(), children: HashMap::new(), max_known_height: SlotHeight::new(0), - block_store, - state_store, + store, _phantom: PhantomData, } } @@ -89,8 +75,8 @@ where let mut latest_votes: HashMap = HashMap::new(); // gets the current weighted votes let current_state: BeaconState = self - .state_store - .get_deserialized(&state_root)? + .store + .get(&state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = @@ -121,8 +107,8 @@ where // return None if we can't get the block from the db. let block_height = { let block_slot = self - .block_store - .get_deserialized(&block_hash) + .store + .get::(&block_hash) .ok()? .expect("Should have returned already if None") .slot; @@ -214,7 +200,7 @@ where } } -impl ForkChoice for OptimizedLMDGhost { +impl ForkChoice for OptimizedLMDGhost { fn add_block( &mut self, block: &BeaconBlock, @@ -223,8 +209,8 @@ impl ForkChoice for OptimizedLMDGhost { ) -> Result<(), ForkChoiceError> { // get the height of the parent let parent_height = self - .block_store - .get_deserialized(&block.previous_block_root)? + .store + .get::(&block.previous_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))? .slot .height(spec.genesis_slot); @@ -275,16 +261,16 @@ impl ForkChoice for OptimizedLMDGhost { trace!("Old attestation found: {:?}", attestation_target); // get the height of the target block let block_height = self - .block_store - .get_deserialized(&target_block_root)? + .store + .get::(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .slot .height(spec.genesis_slot); // get the height of the past target block let past_block_height = self - .block_store - .get_deserialized(&attestation_target)? + .store + .get::(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .slot .height(spec.genesis_slot); @@ -308,8 +294,8 @@ impl ForkChoice for OptimizedLMDGhost { justified_block_start ); let block = self - .block_store - .get_deserialized(&justified_block_start)? + .store + .get::(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; let block_slot = block.slot; @@ -400,8 +386,8 @@ impl ForkChoice for OptimizedLMDGhost { // didn't find head yet, proceed to next iteration // update block height block_height = self - .block_store - .get_deserialized(¤t_head)? + .store + .get::(¤t_head)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? .slot .height(spec.genesis_slot); diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index c9aaa70d1..888356417 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -1,44 +1,30 @@ -extern crate db; - use crate::{ForkChoice, ForkChoiceError}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, -}; use log::{debug, trace}; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot}; //TODO: Pruning and syncing -pub struct SlowLMDGhost { +pub struct SlowLMDGhost { /// The latest attestation targets as a map of validator index to block hash. //TODO: Could this be a fixed size vec latest_attestation_targets: HashMap, /// Stores the children for any given parent. children: HashMap>, - /// Block storage access. - block_store: Arc>, - /// State storage access. - state_store: Arc>, + /// Block and state storage. + store: Arc, _phantom: PhantomData, } -impl SlowLMDGhost -where - T: ClientDB + Sized, -{ - pub fn new( - block_store: Arc>, - state_store: Arc>, - ) -> Self { +impl SlowLMDGhost { + pub fn new(store: Arc) -> Self { SlowLMDGhost { latest_attestation_targets: HashMap::new(), children: HashMap::new(), - block_store, - state_store, + store, _phantom: PhantomData, } } @@ -58,8 +44,8 @@ where let mut latest_votes: HashMap = HashMap::new(); // gets the current weighted votes let current_state: BeaconState = self - .state_store - .get_deserialized(&state_root)? + .store + .get(state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = @@ -90,15 +76,15 @@ where ) -> Result { let mut count = 0; let block_slot = self - .block_store - .get_deserialized(&block_root)? + .store + .get::(&block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))? .slot; for (vote_hash, votes) in latest_votes.iter() { let (root_at_slot, _) = self - .block_store - .block_at_slot(&vote_hash, block_slot)? + .store + .get_block_at_preceeding_slot(*vote_hash, block_slot)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))?; if root_at_slot == *block_root { count += votes; @@ -108,7 +94,7 @@ where } } -impl ForkChoice for SlowLMDGhost { +impl ForkChoice for SlowLMDGhost { /// Process when a block is added fn add_block( &mut self, @@ -150,16 +136,16 @@ impl ForkChoice for SlowLMDGhost { trace!("Old attestation found: {:?}", attestation_target); // get the height of the target block let block_height = self - .block_store - .get_deserialized(&target_block_root)? + .store + .get::(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .slot .height(spec.genesis_slot); // get the height of the past target block let past_block_height = self - .block_store - .get_deserialized(&attestation_target)? + .store + .get::(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .slot .height(spec.genesis_slot); @@ -180,8 +166,8 @@ impl ForkChoice for SlowLMDGhost { ) -> Result { debug!("Running LMD Ghost Fork-choice rule"); let start = self - .block_store - .get_deserialized(&justified_block_start)? + .store + .get::(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; let start_state_root = start.state_root; diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 067d39da4..0327e8cb3 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -1,26 +1,14 @@ #![cfg(not(debug_assertions))] // Tests the available fork-choice algorithms -extern crate beacon_chain; -extern crate bls; -extern crate db; -// extern crate env_logger; // for debugging -extern crate fork_choice; -extern crate hex; -extern crate log; -extern crate slot_clock; -extern crate types; -extern crate yaml_rust; - pub use beacon_chain::BeaconChain; use bls::Signature; -use db::stores::{BeaconBlockStore, BeaconStateStore}; -use db::MemoryDB; +use store::MemoryStore; +use store::Store; // use env_logger::{Builder, Env}; use fork_choice::{ BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost, }; -use ssz::ssz_encode; use std::collections::HashMap; use std::sync::Arc; use std::{fs::File, io::prelude::*, path::PathBuf}; @@ -106,7 +94,7 @@ fn test_yaml_vectors( // process the tests for test_case in test_cases { // setup a fresh test - let (mut fork_choice, block_store, state_root) = + let (mut fork_choice, store, state_root) = setup_inital_state(&fork_choice_algo, emulated_validators); // keep a hashmap of block_id's to block_hashes (random hashes to abstract block_id) @@ -149,9 +137,7 @@ fn test_yaml_vectors( }; // Store the block. - block_store - .put(&block_hash, &ssz_encode(&beacon_block)[..]) - .unwrap(); + store.put(&block_hash, &beacon_block).unwrap(); // run add block for fork choice if not genesis if parent_id != block_id { @@ -222,29 +208,26 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec { fn setup_inital_state( fork_choice_algo: &ForkChoiceAlgorithm, num_validators: usize, -) -> (Box, Arc>, Hash256) { - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); +) -> (Box, Arc, Hash256) { + let store = Arc::new(MemoryStore::open()); // the fork choice instantiation let fork_choice: Box = match fork_choice_algo { ForkChoiceAlgorithm::OptimizedLMDGhost => { - let f: OptimizedLMDGhost = - OptimizedLMDGhost::new(block_store.clone(), state_store.clone()); + let f: OptimizedLMDGhost = + OptimizedLMDGhost::new(store.clone()); Box::new(f) } ForkChoiceAlgorithm::BitwiseLMDGhost => { - let f: BitwiseLMDGhost = - BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + let f: BitwiseLMDGhost = + BitwiseLMDGhost::new(store.clone()); Box::new(f) } ForkChoiceAlgorithm::SlowLMDGhost => { - let f: SlowLMDGhost = - SlowLMDGhost::new(block_store.clone(), state_store.clone()); + let f: SlowLMDGhost = SlowLMDGhost::new(store.clone()); Box::new(f) } - ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())), + ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(store.clone())), }; let spec = FoundationEthSpec::spec(); @@ -255,12 +238,10 @@ fn setup_inital_state( let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); - state_store - .put(&state_root, &ssz_encode(&state)[..]) - .unwrap(); + store.put(&state_root, &state).unwrap(); // return initialised vars - (fork_choice, block_store, state_root) + (fork_choice, store, state_root) } // convert a block_id into a Hash256 -- assume input is hex encoded; diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index d3e3526cf..fa42671d9 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -30,3 +30,6 @@ tree_hash = { path = "../utils/tree_hash" } tree_hash_derive = { path = "../utils/tree_hash_derive" } types = { path = "../types" } rayon = "1.0" + +[features] +fake_crypto = ["bls/fake_crypto"] \ No newline at end of file diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index e8b271f44..5f1a42033 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -22,7 +22,9 @@ pub use verify_transfer::{ execute_transfer, verify_transfer, verify_transfer_time_independent_only, }; +pub mod block_processing_builder; pub mod errors; +pub mod tests; mod validate_attestation; mod verify_attester_slashing; mod verify_deposit; diff --git a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs new file mode 100644 index 000000000..35e736d5f --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs @@ -0,0 +1,66 @@ +use tree_hash::SignedRoot; +use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; +use types::*; + +pub struct BlockProcessingBuilder { + pub state_builder: TestingBeaconStateBuilder, + pub block_builder: TestingBeaconBlockBuilder, + + pub num_validators: usize, +} + +impl BlockProcessingBuilder { + pub fn new(num_validators: usize, spec: &ChainSpec) -> Self { + let state_builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec); + let block_builder = TestingBeaconBlockBuilder::new(spec); + + Self { + state_builder, + block_builder, + num_validators: 0, + } + } + + pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) { + self.state_builder.teleport_to_slot(slot, &spec); + } + + pub fn build_caches(&mut self, spec: &ChainSpec) { + // Builds all caches; benches will not contain shuffling/committee building times. + self.state_builder.build_caches(&spec).unwrap(); + } + + pub fn build( + mut self, + randao_sk: Option, + previous_block_root: Option, + spec: &ChainSpec, + ) -> (BeaconBlock, BeaconState) { + let (state, keypairs) = self.state_builder.build(); + let builder = &mut self.block_builder; + + builder.set_slot(state.slot); + + match previous_block_root { + Some(root) => builder.set_previous_block_root(root), + None => builder.set_previous_block_root(Hash256::from_slice( + &state.latest_block_header.signed_root(), + )), + } + + let proposer_index = state + .get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec) + .unwrap(); + let keypair = &keypairs[proposer_index]; + + match randao_sk { + Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), + None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), + } + + let block = self.block_builder.build(&keypair.sk, &state.fork, spec); + + (block, state) + } +} diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs new file mode 100644 index 000000000..19418aba1 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -0,0 +1,112 @@ +#![cfg(all(test, not(feature = "fake_crypto")))] +use super::block_processing_builder::BlockProcessingBuilder; +use super::errors::*; +use crate::per_block_processing; +use tree_hash::SignedRoot; +use types::*; + +pub const VALIDATOR_COUNT: usize = 10; + +#[test] +fn valid_block_ok() { + let spec = FoundationEthSpec::spec(); + let builder = get_builder(&spec); + let (block, mut state) = builder.build(None, None, &spec); + + let result = per_block_processing(&mut state, &block, &spec); + + assert_eq!(result, Ok(())); +} + +#[test] +fn invalid_block_header_state_slot() { + let spec = FoundationEthSpec::spec(); + let builder = get_builder(&spec); + let (mut block, mut state) = builder.build(None, None, &spec); + + state.slot = Slot::new(133713); + block.slot = Slot::new(424242); + + let result = per_block_processing(&mut state, &block, &spec); + + assert_eq!( + result, + Err(BlockProcessingError::Invalid( + BlockInvalid::StateSlotMismatch + )) + ); +} + +#[test] +fn invalid_parent_block_root() { + let spec = FoundationEthSpec::spec(); + let builder = get_builder(&spec); + let invalid_parent_root = Hash256::from([0xAA; 32]); + let (block, mut state) = builder.build(None, Some(invalid_parent_root), &spec); + + let result = per_block_processing(&mut state, &block, &spec); + + assert_eq!( + result, + Err(BlockProcessingError::Invalid( + BlockInvalid::ParentBlockRootMismatch { + state: Hash256::from_slice(&state.latest_block_header.signed_root()), + block: block.previous_block_root + } + )) + ); +} + +#[test] +fn invalid_block_signature() { + let spec = FoundationEthSpec::spec(); + let builder = get_builder(&spec); + let (mut block, mut state) = builder.build(None, None, &spec); + + // sign the block with a keypair that is not the expected proposer + let keypair = Keypair::random(); + let message = block.signed_root(); + let epoch = block.slot.epoch(spec.slots_per_epoch); + let domain = spec.get_domain(epoch, Domain::BeaconBlock, &state.fork); + block.signature = Signature::new(&message, domain, &keypair.sk); + + // process block with invalid block signature + let result = per_block_processing(&mut state, &block, &spec); + + // should get a BadSignature error + assert_eq!( + result, + Err(BlockProcessingError::Invalid(BlockInvalid::BadSignature)) + ); +} + +#[test] +fn invalid_randao_reveal_signature() { + let spec = FoundationEthSpec::spec(); + let builder = get_builder(&spec); + + // sign randao reveal with random keypair + let keypair = Keypair::random(); + let (block, mut state) = builder.build(Some(keypair.sk), None, &spec); + + let result = per_block_processing(&mut state, &block, &spec); + + // should get a BadRandaoSignature error + assert_eq!( + result, + Err(BlockProcessingError::Invalid( + BlockInvalid::BadRandaoSignature + )) + ); +} + +fn get_builder(spec: &ChainSpec) -> (BlockProcessingBuilder) { + let mut builder = BlockProcessingBuilder::new(VALIDATOR_COUNT, &spec); + + // Set the state and block to be in the last slot of the 4th epoch. + let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + builder.set_slot(last_slot_of_epoch, &spec); + builder.build_caches(&spec); + + (builder) +} diff --git a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs index afaa7eebb..941ad8fdd 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs @@ -23,6 +23,11 @@ impl TestingBeaconBlockBuilder { } } + /// Set the previous block root + pub fn set_previous_block_root(&mut self, root: Hash256) { + self.block.previous_block_root = root; + } + /// Set the slot of the block. pub fn set_slot(&mut self, slot: Slot) { self.block.slot = slot; @@ -48,6 +53,11 @@ impl TestingBeaconBlockBuilder { self.block.body.randao_reveal = Signature::new(&message, domain, sk); } + /// Has the randao reveal been set? + pub fn randao_reveal_not_set(&mut self) -> bool { + self.block.body.randao_reveal.is_empty() + } + /// Inserts a signed, valid `ProposerSlashing` for the validator. pub fn insert_proposer_slashing( &mut self, diff --git a/eth2/utils/bls/src/fake_signature.rs b/eth2/utils/bls/src/fake_signature.rs index 0bf3d0a25..de16a05f3 100644 --- a/eth2/utils/bls/src/fake_signature.rs +++ b/eth2/utils/bls/src/fake_signature.rs @@ -14,6 +14,7 @@ use tree_hash::tree_hash_ssz_encoding_as_vector; #[derive(Debug, PartialEq, Clone, Eq)] pub struct FakeSignature { bytes: Vec, + is_empty: bool, } impl FakeSignature { @@ -26,6 +27,7 @@ impl FakeSignature { pub fn zero() -> Self { Self { bytes: vec![0; BLS_SIG_BYTE_SIZE], + is_empty: true, } } @@ -59,6 +61,7 @@ impl FakeSignature { } else { Ok(Self { bytes: bytes.to_vec(), + is_empty: false, }) } } @@ -71,6 +74,11 @@ impl FakeSignature { pub fn empty_signature() -> Self { FakeSignature::zero() } + + // Check for empty Signature + pub fn is_empty(&self) -> bool { + self.is_empty + } } impl_ssz!(FakeSignature, BLS_SIG_BYTE_SIZE, "FakeSignature");