Merge branch 'docker-env' into v0.6.1

This commit is contained in:
Paul Hauner 2019-06-13 10:37:35 -04:00
commit a71f05066b
No known key found for this signature in database
GPG Key ID: 303E4494BB28068C
142 changed files with 3186 additions and 2062 deletions

View File

@ -9,6 +9,7 @@ members = [
"eth2/utils/cached_tree_hash", "eth2/utils/cached_tree_hash",
"eth2/utils/compare_fields", "eth2/utils/compare_fields",
"eth2/utils/compare_fields_derive", "eth2/utils/compare_fields_derive",
"eth2/utils/eth2_config",
"eth2/utils/fixed_len_vec", "eth2/utils/fixed_len_vec",
"eth2/utils/hashing", "eth2/utils/hashing",
"eth2/utils/honey-badger-split", "eth2/utils/honey-badger-split",

View File

@ -12,3 +12,4 @@ slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"
validator_client = { path = "../validator_client" } validator_client = { path = "../validator_client" }
types = { path = "../eth2/types" } types = { path = "../eth2/types" }
eth2_config = { path = "../eth2/utils/eth2_config" }

View File

@ -1,9 +1,13 @@
use bls::Keypair; use bls::Keypair;
use clap::{App, Arg, SubCommand}; use clap::{App, Arg, SubCommand};
use slog::{debug, info, o, Drain}; use slog::{crit, debug, info, o, Drain};
use std::path::PathBuf; use std::path::PathBuf;
use types::test_utils::generate_deterministic_keypair; use types::test_utils::generate_deterministic_keypair;
use validator_client::Config as ValidatorClientConfig; use validator_client::Config as ValidatorClientConfig;
use eth2_config::{get_data_dir};
pub const DEFAULT_DATA_DIR: &str = ".lighthouse-account-manager";
pub const CLIENT_CONFIG_FILENAME: &str = "account-manager-config.toml";
fn main() { fn main() {
// Logging // Logging
@ -20,6 +24,7 @@ fn main() {
.arg( .arg(
Arg::with_name("datadir") Arg::with_name("datadir")
.long("datadir") .long("datadir")
.short("d")
.value_name("DIR") .value_name("DIR")
.help("Data directory for keys and databases.") .help("Data directory for keys and databases.")
.takes_value(true), .takes_value(true),
@ -43,20 +48,98 @@ fn main() {
.help("The index of the validator, for which the test key is generated") .help("The index of the validator, for which the test key is generated")
.takes_value(true) .takes_value(true)
.required(true), .required(true),
), )
.arg(
Arg::with_name("validator count")
.long("validator_count")
.short("n")
.value_name("validator_count")
.help("If supplied along with `index`, generates keys `i..i + n`.")
.takes_value(true)
.default_value("1"),
)
) )
.get_matches(); .get_matches();
let config = ValidatorClientConfig::parse_args(&matches, &log) let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) {
.expect("Unable to build a configuration for the account manager."); Ok(dir) => dir,
Err(e) => {
crit!(log, "Failed to initialize data dir"; "error" => format!("{:?}", e));
return
}
};
let mut client_config = ValidatorClientConfig::default();
if let Err(e) = client_config.apply_cli_args(&matches) {
crit!(log, "Failed to apply CLI args"; "error" => format!("{:?}", e));
return
};
// Ensure the `data_dir` in the config matches that supplied to the CLI.
client_config.data_dir = data_dir.clone();
// Update the client config with any CLI args.
match client_config.apply_cli_args(&matches) {
Ok(()) => (),
Err(s) => {
crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s);
return;
}
};
// Log configuration // Log configuration
info!(log, ""; info!(log, "";
"data_dir" => &config.data_dir.to_str()); "data_dir" => &client_config.data_dir.to_str());
match matches.subcommand() { match matches.subcommand() {
("generate", Some(_gen_m)) => { ("generate", Some(_)) => generate_random(&client_config, &log),
let keypair = Keypair::random(); ("generate_deterministic", Some(m)) => {
if let Some(string) = m.value_of("validator index") {
let i: usize = string.parse().expect("Invalid validator index");
if let Some(string) = m.value_of("validator count") {
let n: usize = string.parse().expect("Invalid end validator count");
let indices: Vec<usize> = (i..i + n).collect();
generate_deterministic_multiple(&indices, &client_config, &log)
} else {
generate_deterministic(i, &client_config, &log)
}
}
}
_ => panic!(
"The account manager must be run with a subcommand. See help for more information."
),
}
}
fn generate_random(config: &ValidatorClientConfig, log: &slog::Logger) {
save_key(&Keypair::random(), config, log)
}
fn generate_deterministic_multiple(
validator_indices: &[usize],
config: &ValidatorClientConfig,
log: &slog::Logger,
) {
for validator_index in validator_indices {
generate_deterministic(*validator_index, config, log)
}
}
fn generate_deterministic(
validator_index: usize,
config: &ValidatorClientConfig,
log: &slog::Logger,
) {
save_key(
&generate_deterministic_keypair(validator_index),
config,
log,
)
}
fn save_key(keypair: &Keypair, config: &ValidatorClientConfig, log: &slog::Logger) {
let key_path: PathBuf = config let key_path: PathBuf = config
.save_key(&keypair) .save_key(&keypair)
.expect("Unable to save newly generated private key."); .expect("Unable to save newly generated private key.");
@ -67,25 +150,3 @@ fn main() {
key_path.to_string_lossy() key_path.to_string_lossy()
); );
} }
("generate_deterministic", Some(gen_d_matches)) => {
let validator_index = gen_d_matches
.value_of("validator index")
.expect("Validator index required.")
.parse::<u64>()
.expect("Invalid validator index.") as usize;
let keypair = generate_deterministic_keypair(validator_index);
let key_path: PathBuf = config
.save_key(&keypair)
.expect("Unable to save newly generated deterministic private key.");
debug!(
log,
"Deterministic Keypair generated {:?}, saved to: {:?}",
keypair.identifier(),
key_path.to_string_lossy()
);
}
_ => panic!(
"The account manager must be run with a subcommand. See help for more information."
),
}
}

View File

@ -5,11 +5,14 @@ authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com
edition = "2018" edition = "2018"
[dependencies] [dependencies]
eth2_config = { path = "../eth2/utils/eth2_config" }
types = { path = "../eth2/types" } types = { path = "../eth2/types" }
toml = "^0.5"
store = { path = "./store" } store = { path = "./store" }
client = { path = "client" } client = { path = "client" }
version = { path = "version" } version = { path = "version" }
clap = "2.32.0" clap = "2.32.0"
serde = "1.0"
slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] } slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] }
slog-term = "^2.4.0" slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"

View File

@ -13,6 +13,7 @@ failure_derive = "0.1"
hashing = { path = "../../eth2/utils/hashing" } hashing = { path = "../../eth2/utils/hashing" }
fork_choice = { path = "../../eth2/fork_choice" } fork_choice = { path = "../../eth2/fork_choice" }
parking_lot = "0.7" parking_lot = "0.7"
prometheus = "^0.6"
log = "0.4" log = "0.4"
operation_pool = { path = "../../eth2/operation_pool" } operation_pool = { path = "../../eth2/operation_pool" }
env_logger = "0.6" env_logger = "0.6"
@ -21,6 +22,7 @@ serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
slot_clock = { path = "../../eth2/utils/slot_clock" } slot_clock = { path = "../../eth2/utils/slot_clock" }
ssz = { path = "../../eth2/utils/ssz" } ssz = { path = "../../eth2/utils/ssz" }
ssz_derive = { path = "../../eth2/utils/ssz_derive" }
state_processing = { path = "../../eth2/state_processing" } state_processing = { path = "../../eth2/state_processing" }
tree_hash = { path = "../../eth2/utils/tree_hash" } tree_hash = { path = "../../eth2/utils/tree_hash" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }

View File

@ -1,5 +1,8 @@
use crate::checkpoint::CheckPoint; use crate::checkpoint::CheckPoint;
use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::errors::{BeaconChainError as Error, BlockProductionError};
use crate::iter::{BlockIterator, BlockRootsIterator};
use crate::metrics::Metrics;
use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY};
use fork_choice::{ForkChoice, ForkChoiceError}; use fork_choice::{ForkChoice, ForkChoiceError};
use log::{debug, trace}; use log::{debug, trace};
use operation_pool::DepositInsertStatus; use operation_pool::DepositInsertStatus;
@ -12,7 +15,7 @@ use state_processing::per_block_processing::errors::{
}; };
use state_processing::{ use state_processing::{
per_block_processing, per_block_processing_without_verifying_block_signature, per_block_processing, per_block_processing_without_verifying_block_signature,
per_slot_processing, BlockProcessingError, SlotProcessingError, per_slot_processing, BlockProcessingError,
}; };
use std::sync::Arc; use std::sync::Arc;
use store::{Error as DBError, Store}; use store::{Error as DBError, Store};
@ -20,13 +23,11 @@ use tree_hash::TreeHash;
use types::*; use types::*;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum ValidBlock { pub enum BlockProcessingOutcome {
/// The block was successfully processed. /// Block was valid and imported into the block graph.
Processed, Processed,
} /// The blocks parent_root is unknown.
ParentUnknown { parent: Hash256 },
#[derive(Debug, PartialEq)]
pub enum InvalidBlock {
/// The block slot is greater than the present slot. /// The block slot is greater than the present slot.
FutureSlot { FutureSlot {
present_slot: Slot, present_slot: Slot,
@ -34,68 +35,47 @@ pub enum InvalidBlock {
}, },
/// The block state_root does not match the generated state. /// The block state_root does not match the generated state.
StateRootMismatch, StateRootMismatch,
/// The blocks parent_root is unknown. /// The block was a genesis block, these blocks cannot be re-imported.
ParentUnknown, GenesisBlock,
/// There was an error whilst advancing the parent state to the present slot. This condition /// The slot is finalized, no need to import.
/// should not occur, it likely represents an internal error. FinalizedSlot,
SlotProcessingError(SlotProcessingError), /// Block is already known, no need to re-import.
BlockIsAlreadyKnown,
/// The block could not be applied to the state, it is invalid. /// The block could not be applied to the state, it is invalid.
PerBlockProcessingError(BlockProcessingError), PerBlockProcessingError(BlockProcessingError),
} }
#[derive(Debug, PartialEq)]
pub enum BlockProcessingOutcome {
/// The block was successfully validated.
ValidBlock(ValidBlock),
/// The block was not successfully validated.
InvalidBlock(InvalidBlock),
}
impl BlockProcessingOutcome {
/// Returns `true` if the block was objectively invalid and we should disregard the peer who
/// sent it.
pub fn is_invalid(&self) -> bool {
match self {
BlockProcessingOutcome::ValidBlock(_) => false,
BlockProcessingOutcome::InvalidBlock(r) => match r {
InvalidBlock::FutureSlot { .. } => true,
InvalidBlock::StateRootMismatch => true,
InvalidBlock::ParentUnknown => false,
InvalidBlock::SlotProcessingError(_) => false,
InvalidBlock::PerBlockProcessingError(e) => match e {
BlockProcessingError::Invalid(_) => true,
BlockProcessingError::BeaconStateError(_) => false,
},
},
}
}
/// Returns `true` if the block was successfully processed and can be removed from any import
/// queues or temporary storage.
pub fn sucessfully_processed(&self) -> bool {
match self {
BlockProcessingOutcome::ValidBlock(_) => true,
_ => false,
}
}
}
pub trait BeaconChainTypes { pub trait BeaconChainTypes {
type Store: store::Store; type Store: store::Store;
type SlotClock: slot_clock::SlotClock; type SlotClock: slot_clock::SlotClock;
type ForkChoice: fork_choice::ForkChoice; type ForkChoice: fork_choice::ForkChoice<Self::Store>;
type EthSpec: types::EthSpec; type EthSpec: types::EthSpec;
} }
/// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block
/// operations and chooses a canonical head.
pub struct BeaconChain<T: BeaconChainTypes> { pub struct BeaconChain<T: BeaconChainTypes> {
pub store: Arc<T::Store>,
pub slot_clock: T::SlotClock,
pub op_pool: OperationPool<T::EthSpec>,
canonical_head: RwLock<CheckPoint<T::EthSpec>>,
finalized_head: RwLock<CheckPoint<T::EthSpec>>,
pub state: RwLock<BeaconState<T::EthSpec>>,
pub spec: ChainSpec, pub spec: ChainSpec,
/// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB.
pub store: Arc<T::Store>,
/// Reports the current slot, typically based upon the system clock.
pub slot_clock: T::SlotClock,
/// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for
/// inclusion in a block.
pub op_pool: OperationPool<T::EthSpec>,
/// Stores a "snapshot" of the chain at the time the head-of-the-chain block was recieved.
canonical_head: RwLock<CheckPoint<T::EthSpec>>,
/// The same state from `self.canonical_head`, but updated at the start of each slot with a
/// skip slot if no block is recieved. This is effectively a cache that avoids repeating calls
/// to `per_slot_processing`.
state: RwLock<BeaconState<T::EthSpec>>,
/// The root of the genesis block.
genesis_block_root: Hash256,
/// A state-machine that is updated with information from the network and chooses a canonical
/// head block.
pub fork_choice: RwLock<T::ForkChoice>, pub fork_choice: RwLock<T::ForkChoice>,
/// Stores metrics about this `BeaconChain`.
pub metrics: Metrics,
} }
impl<T: BeaconChainTypes> BeaconChain<T> { impl<T: BeaconChainTypes> BeaconChain<T> {
@ -111,18 +91,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let state_root = genesis_state.canonical_root(); let state_root = genesis_state.canonical_root();
store.put(&state_root, &genesis_state)?; store.put(&state_root, &genesis_state)?;
let block_root = genesis_block.block_header().canonical_root(); let genesis_block_root = genesis_block.block_header().canonical_root();
store.put(&block_root, &genesis_block)?; store.put(&genesis_block_root, &genesis_block)?;
// Also store the genesis block under the `ZERO_HASH` key.
let genesis_block_root = genesis_block.block_header().canonical_root();
store.put(&spec.zero_hash, &genesis_block)?;
let finalized_head = RwLock::new(CheckPoint::new(
genesis_block.clone(),
block_root,
genesis_state.clone(),
state_root,
));
let canonical_head = RwLock::new(CheckPoint::new( let canonical_head = RwLock::new(CheckPoint::new(
genesis_block.clone(), genesis_block.clone(),
block_root, genesis_block_root,
genesis_state.clone(), genesis_state.clone(),
state_root, state_root,
)); ));
@ -130,17 +108,65 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
genesis_state.build_all_caches(&spec)?; genesis_state.build_all_caches(&spec)?;
Ok(Self { Ok(Self {
spec,
store, store,
slot_clock, slot_clock,
op_pool: OperationPool::new(), op_pool: OperationPool::new(),
state: RwLock::new(genesis_state), state: RwLock::new(genesis_state),
finalized_head,
canonical_head, canonical_head,
spec, genesis_block_root,
fork_choice: RwLock::new(fork_choice), fork_choice: RwLock::new(fork_choice),
metrics: Metrics::new()?,
}) })
} }
/// Attempt to load an existing instance from the given `store`.
pub fn from_store(
store: Arc<T::Store>,
spec: ChainSpec,
) -> Result<Option<BeaconChain<T>>, Error> {
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
let p: PersistedBeaconChain<T> = match store.get(&key) {
Err(e) => return Err(e.into()),
Ok(None) => return Ok(None),
Ok(Some(p)) => p,
};
let slot_clock = T::SlotClock::new(
spec.genesis_slot,
p.state.genesis_time,
spec.seconds_per_slot,
);
let fork_choice = T::ForkChoice::new(store.clone());
Ok(Some(BeaconChain {
spec,
store,
slot_clock,
op_pool: OperationPool::default(),
canonical_head: RwLock::new(p.canonical_head),
state: RwLock::new(p.state),
fork_choice: RwLock::new(fork_choice),
genesis_block_root: p.genesis_block_root,
metrics: Metrics::new()?,
}))
}
/// Attempt to save this instance to `self.store`.
pub fn persist(&self) -> Result<(), Error> {
let p: PersistedBeaconChain<T> = PersistedBeaconChain {
canonical_head: self.canonical_head.read().clone(),
genesis_block_root: self.genesis_block_root,
state: self.state.read().clone(),
};
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
self.store.put(&key, &p)?;
Ok(())
}
/// Returns the beacon block body for each beacon block root in `roots`. /// Returns the beacon block body for each beacon block root in `roots`.
/// ///
/// Fails if any root in `roots` does not have a corresponding block. /// Fails if any root in `roots` does not have a corresponding block.
@ -149,7 +175,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.iter() .iter()
.map(|root| match self.get_block(root)? { .map(|root| match self.get_block(root)? {
Some(block) => Ok(block.body), Some(block) => Ok(block.body),
None => Err(Error::DBInconsistent("Missing block".into())), None => Err(Error::DBInconsistent(format!("Missing block: {}", root))),
}) })
.collect(); .collect();
@ -170,85 +196,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(headers?) Ok(headers?)
} }
/// Iterate in reverse (highest to lowest slot) through all blocks from the block at `slot`
/// Returns `count `beacon block roots, starting from `start_slot` with an /// through to the genesis block.
/// interval of `skip` slots between each root.
/// ///
/// ## Errors: /// Returns `None` for headers prior to genesis or when there is an error reading from `Store`.
/// ///
/// - `SlotOutOfBounds`: Unable to return the full specified range. /// Contains duplicate headers when skip slots are encountered.
/// - `SlotOutOfBounds`: Unable to load a state from the DB. pub fn rev_iter_blocks(&self, slot: Slot) -> BlockIterator<T::EthSpec, T::Store> {
/// - `SlotOutOfBounds`: Start slot is higher than the first slot. BlockIterator::new(self.store.clone(), self.state.read().clone(), slot)
/// - Other: BeaconState` is inconsistent.
pub fn get_block_roots(
&self,
earliest_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<Hash256>, Error> {
let step_by = Slot::from(skip + 1);
let mut roots: Vec<Hash256> = vec![];
// The state for reading block roots. Will be updated with an older state if slots go too
// far back in history.
let mut state = self.state.read().clone();
// The final slot in this series, will be reduced by `skip` each loop iteration.
let mut slot = earliest_slot + Slot::from(count * (skip + 1)) - 1;
// If the highest slot requested is that of the current state insert the root of the
// head block, unless the head block's slot is not matching.
if slot == state.slot && self.head().beacon_block.slot == slot {
roots.push(self.head().beacon_block_root);
slot -= step_by;
} else if slot >= state.slot {
return Err(BeaconStateError::SlotOutOfBounds.into());
} }
loop { /// Iterates in reverse (highest to lowest slot) through all block roots from `slot` through to
// If the slot is within the range of the current state's block roots, append the root /// genesis.
// to the output vec. ///
// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
// If we get `SlotOutOfBounds` error, load the oldest available historic ///
// state from the DB. /// Contains duplicate roots when skip slots are encountered.
match state.get_block_root(slot) { pub fn rev_iter_block_roots(&self, slot: Slot) -> BlockRootsIterator<T::EthSpec, T::Store> {
Ok(root) => { BlockRootsIterator::new(self.store.clone(), self.state.read().clone(), slot)
if slot < earliest_slot {
break;
} else {
roots.push(*root);
slot -= step_by;
}
}
Err(BeaconStateError::SlotOutOfBounds) => {
// Read the earliest historic state in the current slot.
let earliest_historic_slot =
state.slot - Slot::from(T::EthSpec::slots_per_historical_root());
// Load the earlier state from disk.
let new_state_root = state.get_state_root(earliest_historic_slot)?;
// Break if the DB is unable to load the state.
state = match self.store.get(&new_state_root) {
Ok(Some(state)) => state,
_ => break,
}
}
Err(e) => return Err(e.into()),
};
}
// Return the results if they pass a sanity check.
if (slot <= earliest_slot) && (roots.len() == count) {
// Reverse the ordering of the roots. We extracted them in reverse order to make it
// simpler to lookup historic states.
//
// This is a potential optimisation target.
Ok(roots.iter().rev().cloned().collect())
} else {
Err(BeaconStateError::SlotOutOfBounds.into())
}
} }
/// Returns the block at the given root, if any. /// Returns the block at the given root, if any.
@ -260,44 +225,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(self.store.get(block_root)?) Ok(self.store.get(block_root)?)
} }
/// Update the canonical head to some new values. /// Update the canonical head to `new_head`.
pub fn update_canonical_head( fn update_canonical_head(&self, new_head: CheckPoint<T::EthSpec>) -> Result<(), Error> {
&self, // Update the checkpoint that stores the head of the chain at the time it received the
new_beacon_block: BeaconBlock, // block.
new_beacon_block_root: Hash256, *self.canonical_head.write() = new_head;
new_beacon_state: BeaconState<T::EthSpec>,
new_beacon_state_root: Hash256,
) {
debug!(
"Updating canonical head with block at slot: {}",
new_beacon_block.slot
);
let mut head = self.canonical_head.write();
head.update(
new_beacon_block,
new_beacon_block_root,
new_beacon_state,
new_beacon_state_root,
);
}
/// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the // Update the always-at-the-present-slot state we keep around for performance gains.
/// fork-choice rule). *self.state.write() = {
/// let mut state = self.canonical_head.read().beacon_state.clone();
/// It is important to note that the `beacon_state` returned may not match the present slot. It
/// is the state as it was when the head block was received, which could be some slots prior to
/// now.
pub fn head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>> {
self.canonical_head.read()
}
/// Updates the canonical `BeaconState` with the supplied state.
///
/// Advances the chain forward to the present slot. This method is better than just setting
/// state and calling `catchup_state` as it will not result in an old state being installed and
/// then having it iteratively updated -- in such a case it's possible for another thread to
/// find the state at an old slot.
pub fn update_state(&self, mut state: BeaconState<T::EthSpec>) -> Result<(), Error> {
let present_slot = match self.slot_clock.present_slot() { let present_slot = match self.slot_clock.present_slot() {
Ok(Some(slot)) => slot, Ok(Some(slot)) => slot,
_ => return Err(Error::UnableToReadSlot), _ => return Err(Error::UnableToReadSlot),
@ -310,13 +247,40 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
state.build_all_caches(&self.spec)?; state.build_all_caches(&self.spec)?;
*self.state.write() = state; state
};
// Save `self` to `self.store`.
self.persist()?;
Ok(()) Ok(())
} }
/// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been
/// updated to match the current slot clock.
pub fn current_state(&self) -> RwLockReadGuard<BeaconState<T::EthSpec>> {
self.state.read()
}
/// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the
/// fork-choice rule).
///
/// It is important to note that the `beacon_state` returned may not match the present slot. It
/// is the state as it was when the head block was received, which could be some slots prior to
/// now.
pub fn head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>> {
self.canonical_head.read()
}
/// Returns the slot of the highest block in the canonical chain.
pub fn best_slot(&self) -> Slot {
self.canonical_head.read().beacon_block.slot
}
/// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`. /// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`.
pub fn catchup_state(&self) -> Result<(), Error> { pub fn catchup_state(&self) -> Result<(), Error> {
let spec = &self.spec;
let present_slot = match self.slot_clock.present_slot() { let present_slot = match self.slot_clock.present_slot() {
Ok(Some(slot)) => slot, Ok(Some(slot)) => slot,
_ => return Err(Error::UnableToReadSlot), _ => return Err(Error::UnableToReadSlot),
@ -327,12 +291,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If required, transition the new state to the present slot. // If required, transition the new state to the present slot.
for _ in state.slot.as_u64()..present_slot.as_u64() { for _ in state.slot.as_u64()..present_slot.as_u64() {
// Ensure the next epoch state caches are built in case of an epoch transition. // Ensure the next epoch state caches are built in case of an epoch transition.
state.build_committee_cache(RelativeEpoch::Next, &self.spec)?; state.build_committee_cache(RelativeEpoch::Next, spec)?;
per_slot_processing(&mut *state, &self.spec)?; per_slot_processing(&mut *state, spec)?;
} }
state.build_all_caches(&self.spec)?; state.build_all_caches(spec)?;
Ok(()) Ok(())
} }
@ -346,29 +310,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(()) Ok(())
} }
/// Update the justified head to some new values.
pub fn update_finalized_head(
&self,
new_beacon_block: BeaconBlock,
new_beacon_block_root: Hash256,
new_beacon_state: BeaconState<T::EthSpec>,
new_beacon_state_root: Hash256,
) {
let mut finalized_head = self.finalized_head.write();
finalized_head.update(
new_beacon_block,
new_beacon_block_root,
new_beacon_state,
new_beacon_state_root,
);
}
/// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen,
/// indirectly, by the fork-choice rule).
pub fn finalized_head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>> {
self.finalized_head.read()
}
/// Returns the validator index (if any) for the given public key. /// Returns the validator index (if any) for the given public key.
/// ///
/// Information is retrieved from the present `beacon_state.validator_registry`. /// Information is retrieved from the present `beacon_state.validator_registry`.
@ -407,13 +348,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// genesis. /// genesis.
pub fn slots_since_genesis(&self) -> Option<SlotHeight> { pub fn slots_since_genesis(&self) -> Option<SlotHeight> {
let now = self.read_slot_clock()?; let now = self.read_slot_clock()?;
let genesis_slot = self.spec.genesis_slot;
if now < self.spec.genesis_slot { if now < genesis_slot {
None None
} else { } else {
Some(SlotHeight::from( Some(SlotHeight::from(now.as_u64() - genesis_slot.as_u64()))
now.as_u64() - self.spec.genesis_slot.as_u64(),
))
} }
} }
@ -469,15 +409,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`.
pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> { pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> {
trace!("BeaconChain::produce_attestation: shard: {}", shard); let slots_per_epoch = T::EthSpec::slots_per_epoch();
self.metrics.attestation_production_requests.inc();
let timer = self.metrics.attestation_production_times.start_timer();
let state = self.state.read(); let state = self.state.read();
let current_epoch_start_slot = self let current_epoch_start_slot = self
.state .state
.read() .read()
.slot .slot
.epoch(self.spec.slots_per_epoch) .epoch(slots_per_epoch)
.start_slot(self.spec.slots_per_epoch); .start_slot(slots_per_epoch);
let target_root = if state.slot == current_epoch_start_slot { let target_root = if state.slot == current_epoch_start_slot {
// If we're on the first slot of the state's epoch. // If we're on the first slot of the state's epoch.
@ -490,7 +434,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
*self *self
.state .state
.read() .read()
.get_block_root(current_epoch_start_slot - self.spec.slots_per_epoch)? .get_block_root(current_epoch_start_slot - slots_per_epoch)?
} }
} else { } else {
// If we're not on the first slot of the epoch. // If we're not on the first slot of the epoch.
@ -500,6 +444,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let previous_crosslink_root = let previous_crosslink_root =
Hash256::from_slice(&state.get_current_crosslink(shard)?.tree_hash_root()); Hash256::from_slice(&state.get_current_crosslink(shard)?.tree_hash_root());
self.metrics.attestation_production_successes.inc();
timer.observe_duration();
Ok(AttestationData { Ok(AttestationData {
beacon_block_root: self.head().beacon_block_root, beacon_block_root: self.head().beacon_block_root,
source_epoch: state.current_justified_epoch, source_epoch: state.current_justified_epoch,
@ -520,8 +467,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&self, &self,
attestation: Attestation, attestation: Attestation,
) -> Result<(), AttestationValidationError> { ) -> Result<(), AttestationValidationError> {
self.op_pool self.metrics.attestation_processing_requests.inc();
.insert_attestation(attestation, &*self.state.read(), &self.spec) let timer = self.metrics.attestation_processing_times.start_timer();
let result = self
.op_pool
.insert_attestation(attestation, &*self.state.read(), &self.spec);
if result.is_ok() {
self.metrics.attestation_processing_successes.inc();
}
timer.observe_duration();
result
} }
/// Accept some deposit and queue it for inclusion in an appropriate block. /// Accept some deposit and queue it for inclusion in an appropriate block.
@ -567,19 +526,39 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// ///
/// Will accept blocks from prior slots, however it will reject any block from a future slot. /// Will accept blocks from prior slots, however it will reject any block from a future slot.
pub fn process_block(&self, block: BeaconBlock) -> Result<BlockProcessingOutcome, Error> { pub fn process_block(&self, block: BeaconBlock) -> Result<BlockProcessingOutcome, Error> {
debug!("Processing block with slot {}...", block.slot); self.metrics.block_processing_requests.inc();
let timer = self.metrics.block_processing_times.start_timer();
let finalized_slot = self
.state
.read()
.finalized_epoch
.start_slot(T::EthSpec::slots_per_epoch());
if block.slot <= finalized_slot {
return Ok(BlockProcessingOutcome::FinalizedSlot);
}
if block.slot == 0 {
return Ok(BlockProcessingOutcome::GenesisBlock);
}
let block_root = block.block_header().canonical_root(); let block_root = block.block_header().canonical_root();
if block_root == self.genesis_block_root {
return Ok(BlockProcessingOutcome::GenesisBlock);
}
let present_slot = self.present_slot(); let present_slot = self.present_slot();
if block.slot > present_slot { if block.slot > present_slot {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::FutureSlot {
InvalidBlock::FutureSlot {
present_slot, present_slot,
block_slot: block.slot, block_slot: block.slot,
}, });
)); }
if self.store.exists::<BeaconBlock>(&block_root)? {
return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown);
} }
// Load the blocks parent block from the database, returning invalid if that block is not // Load the blocks parent block from the database, returning invalid if that block is not
@ -588,9 +567,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let parent_block: BeaconBlock = match self.store.get(&parent_block_root)? { let parent_block: BeaconBlock = match self.store.get(&parent_block_root)? {
Some(previous_block_root) => previous_block_root, Some(previous_block_root) => previous_block_root,
None => { None => {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::ParentUnknown {
InvalidBlock::ParentUnknown, parent: parent_block_root,
)); });
} }
}; };
@ -608,50 +587,49 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Transition the parent state to the block slot. // Transition the parent state to the block slot.
let mut state: BeaconState<T::EthSpec> = parent_state; let mut state: BeaconState<T::EthSpec> = parent_state;
for _ in state.slot.as_u64()..block.slot.as_u64() { for _ in state.slot.as_u64()..block.slot.as_u64() {
if let Err(e) = per_slot_processing(&mut state, &self.spec) { per_slot_processing(&mut state, &self.spec)?;
return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::SlotProcessingError(e),
));
}
} }
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
// Apply the received block to its parent state (which has been transitioned into this // Apply the received block to its parent state (which has been transitioned into this
// slot). // slot).
if let Err(e) = per_block_processing(&mut state, &block, &self.spec) { match per_block_processing(&mut state, &block, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock( Err(BlockProcessingError::BeaconStateError(e)) => {
InvalidBlock::PerBlockProcessingError(e), return Err(Error::BeaconStateError(e))
)); }
Err(e) => return Ok(BlockProcessingOutcome::PerBlockProcessingError(e)),
_ => {}
} }
let state_root = state.canonical_root(); let state_root = state.canonical_root();
if block.state_root != state_root { if block.state_root != state_root {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::StateRootMismatch);
InvalidBlock::StateRootMismatch,
));
} }
// Store the block and state. // Store the block and state.
self.store.put(&block_root, &block)?; self.store.put(&block_root, &block)?;
self.store.put(&state_root, &state)?; self.store.put(&state_root, &state)?;
// run the fork_choice add_block logic // Register the new block with the fork choice service.
self.fork_choice self.fork_choice
.write() .write()
.add_block(&block, &block_root, &self.spec)?; .add_block(&block, &block_root, &self.spec)?;
// If the parent block was the parent_block, automatically update the canonical head. // Execute the fork choice algorithm, enthroning a new head if discovered.
// //
// TODO: this is a first-in-best-dressed scenario that is not ideal; fork_choice should be // Note: in the future we may choose to run fork-choice less often, potentially based upon
// run instead. // some heuristic around number of attestations seen for the block.
if self.head().beacon_block_root == parent_block_root { self.fork_choice()?;
self.update_canonical_head(block.clone(), block_root, state.clone(), state_root);
// Update the canonical `BeaconState`. self.metrics.block_processing_successes.inc();
self.update_state(state)?; self.metrics
} .operations_per_block_attestation
.observe(block.body.attestations.len() as f64);
timer.observe_duration();
Ok(BlockProcessingOutcome::ValidBlock(ValidBlock::Processed)) Ok(BlockProcessingOutcome::Processed)
} }
/// Produce a new block at the present slot. /// Produce a new block at the present slot.
@ -663,6 +641,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
randao_reveal: Signature, randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState<T::EthSpec>), BlockProductionError> { ) -> Result<(BeaconBlock, BeaconState<T::EthSpec>), BlockProductionError> {
debug!("Producing block at slot {}...", self.state.read().slot); debug!("Producing block at slot {}...", self.state.read().slot);
self.metrics.block_production_requests.inc();
let timer = self.metrics.block_production_times.start_timer();
let mut state = self.state.read().clone(); let mut state = self.state.read().clone();
@ -670,9 +650,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
trace!("Finding attestations for new block..."); trace!("Finding attestations for new block...");
let previous_block_root = *state let previous_block_root = if state.slot > 0 {
*state
.get_block_root(state.slot - 1) .get_block_root(state.slot - 1)
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?; .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?
} else {
state.latest_block_header.canonical_root()
};
let (proposer_slashings, attester_slashings) = let (proposer_slashings, attester_slashings) =
self.op_pool.get_slashings(&*self.state.read(), &self.spec); self.op_pool.get_slashings(&*self.state.read(), &self.spec);
@ -716,35 +700,63 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block.state_root = state_root; block.state_root = state_root;
self.metrics.block_production_successes.inc();
timer.observe_duration();
Ok((block, state)) Ok((block, state))
} }
// TODO: Left this as is, modify later /// Execute the fork choice algorithm and enthrone the result as the canonical head.
pub fn fork_choice(&self) -> Result<(), Error> { pub fn fork_choice(&self) -> Result<(), Error> {
let present_head = self.finalized_head().beacon_block_root; self.metrics.fork_choice_requests.inc();
let new_head = self // Start fork choice metrics timer.
let timer = self.metrics.fork_choice_times.start_timer();
let justified_root = {
let root = self.head().beacon_state.current_justified_root;
if root == self.spec.zero_hash {
self.genesis_block_root
} else {
root
}
};
// Determine the root of the block that is the head of the chain.
let beacon_block_root = self
.fork_choice .fork_choice
.write() .write()
.find_head(&present_head, &self.spec)?; .find_head(&justified_root, &self.spec)?;
if new_head != present_head { // End fork choice metrics timer.
let block: BeaconBlock = self timer.observe_duration();
// If a new head was chosen.
if beacon_block_root != self.head().beacon_block_root {
self.metrics.fork_choice_changed_head.inc();
let beacon_block: BeaconBlock = self
.store .store
.get(&new_head)? .get(&beacon_block_root)?
.ok_or_else(|| Error::MissingBeaconBlock(new_head))?; .ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?;
let block_root = block.canonical_root();
let state: BeaconState<T::EthSpec> = self let beacon_state_root = beacon_block.state_root;
let beacon_state: BeaconState<T::EthSpec> = self
.store .store
.get(&block.state_root)? .get(&beacon_state_root)?
.ok_or_else(|| Error::MissingBeaconState(block.state_root))?; .ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?;
let state_root = state.canonical_root();
self.update_canonical_head(block, block_root, state.clone(), state_root); // If we switched to a new chain (instead of building atop the present chain).
if self.head().beacon_block_root != beacon_block.previous_block_root {
self.metrics.fork_choice_reorg_count.inc();
};
// Update the canonical `BeaconState`. self.update_canonical_head(CheckPoint {
self.update_state(state)?; beacon_block,
beacon_block_root,
beacon_state,
beacon_state_root,
})?;
} }
Ok(()) Ok(())

View File

@ -1,9 +1,10 @@
use serde_derive::Serialize; use serde_derive::Serialize;
use ssz_derive::{Decode, Encode};
use types::{BeaconBlock, BeaconState, EthSpec, Hash256}; use types::{BeaconBlock, BeaconState, EthSpec, Hash256};
/// Represents some block and it's associated state. Generally, this will be used for tracking the /// Represents some block and it's associated state. Generally, this will be used for tracking the
/// head, justified head and finalized head. /// head, justified head and finalized head.
#[derive(Clone, Serialize, PartialEq, Debug)] #[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)]
pub struct CheckPoint<E: EthSpec> { pub struct CheckPoint<E: EthSpec> {
pub beacon_block: BeaconBlock, pub beacon_block: BeaconBlock,
pub beacon_block_root: Hash256, pub beacon_block_root: Hash256,

View File

@ -1,3 +1,4 @@
use crate::metrics::Error as MetricsError;
use fork_choice::ForkChoiceError; use fork_choice::ForkChoiceError;
use state_processing::BlockProcessingError; use state_processing::BlockProcessingError;
use state_processing::SlotProcessingError; use state_processing::SlotProcessingError;
@ -25,10 +26,17 @@ pub enum BeaconChainError {
MissingBeaconBlock(Hash256), MissingBeaconBlock(Hash256),
MissingBeaconState(Hash256), MissingBeaconState(Hash256),
SlotProcessingError(SlotProcessingError), SlotProcessingError(SlotProcessingError),
MetricsError(String),
} }
easy_from_to!(SlotProcessingError, BeaconChainError); easy_from_to!(SlotProcessingError, BeaconChainError);
impl From<MetricsError> for BeaconChainError {
fn from(e: MetricsError) -> BeaconChainError {
BeaconChainError::MetricsError(format!("{:?}", e))
}
}
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum BlockProductionError { pub enum BlockProductionError {
UnableToGetBlockRootFromState, UnableToGetBlockRootFromState,

View File

@ -0,0 +1,133 @@
use std::sync::Arc;
use store::Store;
use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot};
/// Extends `BlockRootsIterator`, returning `BeaconBlock` instances, instead of their roots.
pub struct BlockIterator<T: EthSpec, U> {
roots: BlockRootsIterator<T, U>,
}
impl<T: EthSpec, U: Store> BlockIterator<T, U> {
/// Create a new iterator over all blocks in the given `beacon_state` and prior states.
pub fn new(store: Arc<U>, beacon_state: BeaconState<T>, start_slot: Slot) -> Self {
Self {
roots: BlockRootsIterator::new(store, beacon_state, start_slot),
}
}
}
impl<T: EthSpec, U: Store> Iterator for BlockIterator<T, U> {
type Item = BeaconBlock;
fn next(&mut self) -> Option<Self::Item> {
let root = self.roots.next()?;
self.roots.store.get(&root).ok()?
}
}
/// Iterates backwards through block roots.
///
/// Uses the `latest_block_roots` field of `BeaconState` to as the source of block roots and will
/// perform a lookup on the `Store` for a prior `BeaconState` if `latest_block_roots` has been
/// exhausted.
///
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
pub struct BlockRootsIterator<T: EthSpec, U> {
store: Arc<U>,
beacon_state: BeaconState<T>,
slot: Slot,
}
impl<T: EthSpec, U: Store> BlockRootsIterator<T, U> {
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
pub fn new(store: Arc<U>, beacon_state: BeaconState<T>, start_slot: Slot) -> Self {
Self {
slot: start_slot,
beacon_state,
store,
}
}
}
impl<T: EthSpec, U: Store> Iterator for BlockRootsIterator<T, U> {
type Item = Hash256;
fn next(&mut self) -> Option<Self::Item> {
if (self.slot == 0) || (self.slot > self.beacon_state.slot) {
return None;
}
self.slot -= 1;
match self.beacon_state.get_block_root(self.slot) {
Ok(root) => Some(*root),
Err(BeaconStateError::SlotOutOfBounds) => {
// Read a `BeaconState` from the store that has access to prior historical root.
self.beacon_state = {
// Load the earlier state from disk. Skip forward one slot, because a state
// doesn't return it's own state root.
let new_state_root = self.beacon_state.get_state_root(self.slot + 1).ok()?;
self.store.get(&new_state_root).ok()?
}?;
self.beacon_state.get_block_root(self.slot).ok().cloned()
}
_ => None,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use store::MemoryStore;
use types::{test_utils::TestingBeaconStateBuilder, Keypair, MainnetEthSpec};
fn get_state<T: EthSpec>() -> BeaconState<T> {
let builder = TestingBeaconStateBuilder::from_single_keypair(
0,
&Keypair::random(),
&T::default_spec(),
);
let (state, _keypairs) = builder.build();
state
}
#[test]
fn root_iter() {
let store = Arc::new(MemoryStore::open());
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
let mut state_a: BeaconState<MainnetEthSpec> = get_state();
let mut state_b: BeaconState<MainnetEthSpec> = get_state();
state_a.slot = Slot::from(slots_per_historical_root);
state_b.slot = Slot::from(slots_per_historical_root * 2);
let mut hashes = (0..).into_iter().map(|i| Hash256::from(i));
for root in &mut state_a.latest_block_roots[..] {
*root = hashes.next().unwrap()
}
for root in &mut state_b.latest_block_roots[..] {
*root = hashes.next().unwrap()
}
let state_a_root = hashes.next().unwrap();
state_b.latest_state_roots[0] = state_a_root;
store.put(&state_a_root, &state_a).unwrap();
let iter = BlockRootsIterator::new(store.clone(), state_b.clone(), state_b.slot - 1);
let mut collected: Vec<Hash256> = iter.collect();
collected.reverse();
let expected_len = 2 * MainnetEthSpec::slots_per_historical_root() - 1;
assert_eq!(collected.len(), expected_len);
for i in 0..expected_len {
assert_eq!(collected[i], Hash256::from(i as u64));
}
}
}

View File

@ -1,10 +1,11 @@
mod beacon_chain; mod beacon_chain;
mod checkpoint; mod checkpoint;
mod errors; mod errors;
pub mod iter;
mod metrics;
mod persisted_beacon_chain;
pub use self::beacon_chain::{ pub use self::beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock, ValidBlock,
};
pub use self::checkpoint::CheckPoint; pub use self::checkpoint::CheckPoint;
pub use self::errors::{BeaconChainError, BlockProductionError}; pub use self::errors::{BeaconChainError, BlockProductionError};
pub use fork_choice; pub use fork_choice;

View File

@ -0,0 +1,143 @@
pub use prometheus::Error;
use prometheus::{Histogram, HistogramOpts, IntCounter, Opts, Registry};
pub struct Metrics {
pub block_processing_requests: IntCounter,
pub block_processing_successes: IntCounter,
pub block_processing_times: Histogram,
pub block_production_requests: IntCounter,
pub block_production_successes: IntCounter,
pub block_production_times: Histogram,
pub attestation_production_requests: IntCounter,
pub attestation_production_successes: IntCounter,
pub attestation_production_times: Histogram,
pub attestation_processing_requests: IntCounter,
pub attestation_processing_successes: IntCounter,
pub attestation_processing_times: Histogram,
pub fork_choice_requests: IntCounter,
pub fork_choice_changed_head: IntCounter,
pub fork_choice_reorg_count: IntCounter,
pub fork_choice_times: Histogram,
pub operations_per_block_attestation: Histogram,
}
impl Metrics {
pub fn new() -> Result<Self, Error> {
Ok(Self {
block_processing_requests: {
let opts = Opts::new("block_processing_requests", "total_blocks_processed");
IntCounter::with_opts(opts)?
},
block_processing_successes: {
let opts = Opts::new("block_processing_successes", "total_valid_blocks_processed");
IntCounter::with_opts(opts)?
},
block_processing_times: {
let opts = HistogramOpts::new("block_processing_times", "block_processing_time");
Histogram::with_opts(opts)?
},
block_production_requests: {
let opts = Opts::new("block_production_requests", "attempts_to_produce_new_block");
IntCounter::with_opts(opts)?
},
block_production_successes: {
let opts = Opts::new("block_production_successes", "blocks_successfully_produced");
IntCounter::with_opts(opts)?
},
block_production_times: {
let opts = HistogramOpts::new("block_production_times", "block_production_time");
Histogram::with_opts(opts)?
},
attestation_production_requests: {
let opts = Opts::new(
"attestation_production_requests",
"total_attestation_production_requests",
);
IntCounter::with_opts(opts)?
},
attestation_production_successes: {
let opts = Opts::new(
"attestation_production_successes",
"total_attestation_production_successes",
);
IntCounter::with_opts(opts)?
},
attestation_production_times: {
let opts = HistogramOpts::new(
"attestation_production_times",
"attestation_production_time",
);
Histogram::with_opts(opts)?
},
attestation_processing_requests: {
let opts = Opts::new(
"attestation_processing_requests",
"total_attestation_processing_requests",
);
IntCounter::with_opts(opts)?
},
attestation_processing_successes: {
let opts = Opts::new(
"attestation_processing_successes",
"total_attestation_processing_successes",
);
IntCounter::with_opts(opts)?
},
attestation_processing_times: {
let opts = HistogramOpts::new(
"attestation_processing_times",
"attestation_processing_time",
);
Histogram::with_opts(opts)?
},
fork_choice_requests: {
let opts = Opts::new("fork_choice_requests", "total_times_fork_choice_called");
IntCounter::with_opts(opts)?
},
fork_choice_changed_head: {
let opts = Opts::new(
"fork_choice_changed_head",
"total_times_fork_choice_chose_a_new_head",
);
IntCounter::with_opts(opts)?
},
fork_choice_reorg_count: {
let opts = Opts::new("fork_choice_reorg_count", "number_of_reorgs");
IntCounter::with_opts(opts)?
},
fork_choice_times: {
let opts = HistogramOpts::new("fork_choice_time", "total_time_to_run_fork_choice");
Histogram::with_opts(opts)?
},
operations_per_block_attestation: {
let opts = HistogramOpts::new(
"operations_per_block_attestation",
"count_of_attestations_per_block",
);
Histogram::with_opts(opts)?
},
})
}
pub fn register(&self, registry: &Registry) -> Result<(), Error> {
registry.register(Box::new(self.block_processing_requests.clone()))?;
registry.register(Box::new(self.block_processing_successes.clone()))?;
registry.register(Box::new(self.block_processing_times.clone()))?;
registry.register(Box::new(self.block_production_requests.clone()))?;
registry.register(Box::new(self.block_production_successes.clone()))?;
registry.register(Box::new(self.block_production_times.clone()))?;
registry.register(Box::new(self.attestation_production_requests.clone()))?;
registry.register(Box::new(self.attestation_production_successes.clone()))?;
registry.register(Box::new(self.attestation_production_times.clone()))?;
registry.register(Box::new(self.attestation_processing_requests.clone()))?;
registry.register(Box::new(self.attestation_processing_successes.clone()))?;
registry.register(Box::new(self.attestation_processing_times.clone()))?;
registry.register(Box::new(self.fork_choice_requests.clone()))?;
registry.register(Box::new(self.fork_choice_changed_head.clone()))?;
registry.register(Box::new(self.fork_choice_reorg_count.clone()))?;
registry.register(Box::new(self.fork_choice_times.clone()))?;
registry.register(Box::new(self.operations_per_block_attestation.clone()))?;
Ok(())
}
}

View File

@ -0,0 +1,30 @@
use crate::{BeaconChainTypes, CheckPoint};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use store::{DBColumn, Error as StoreError, StoreItem};
use types::{BeaconState, Hash256};
/// 32-byte key for accessing the `PersistedBeaconChain`.
pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA";
#[derive(Encode, Decode)]
pub struct PersistedBeaconChain<T: BeaconChainTypes> {
pub canonical_head: CheckPoint<T::EthSpec>,
// TODO: operations pool.
pub genesis_block_root: Hash256,
pub state: BeaconState<T::EthSpec>,
}
impl<T: BeaconChainTypes> StoreItem for PersistedBeaconChain<T> {
fn db_column() -> DBColumn {
DBColumn::BeaconChain
}
fn as_store_bytes(&self) -> Vec<u8> {
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &mut [u8]) -> Result<Self, StoreError> {
Self::from_ssz_bytes(bytes).map_err(Into::into)
}
}

View File

@ -11,9 +11,13 @@ store = { path = "../store" }
http_server = { path = "../http_server" } http_server = { path = "../http_server" }
rpc = { path = "../rpc" } rpc = { path = "../rpc" }
fork_choice = { path = "../../eth2/fork_choice" } fork_choice = { path = "../../eth2/fork_choice" }
prometheus = "^0.6"
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
tree_hash = { path = "../../eth2/utils/tree_hash" } tree_hash = { path = "../../eth2/utils/tree_hash" }
eth2_config = { path = "../../eth2/utils/eth2_config" }
slot_clock = { path = "../../eth2/utils/slot_clock" } slot_clock = { path = "../../eth2/utils/slot_clock" }
serde = "1.0"
serde_derive = "1.0"
error-chain = "0.12.0" error-chain = "0.12.0"
slog = "^2.2.3" slog = "^2.2.3"
ssz = { path = "../../eth2/utils/ssz" } ssz = { path = "../../eth2/utils/ssz" }

View File

@ -1,99 +1,81 @@
use crate::ClientConfig;
use beacon_chain::{ use beacon_chain::{
fork_choice::BitwiseLMDGhost, fork_choice::OptimizedLMDGhost, slot_clock::SystemTimeSlotClock, store::Store, BeaconChain,
slot_clock::SystemTimeSlotClock, BeaconChainTypes,
store::{DiskStore, MemoryStore, Store},
BeaconChain, BeaconChainTypes,
}; };
use fork_choice::ForkChoice;
use slog::{info, Logger};
use slot_clock::SlotClock;
use std::marker::PhantomData;
use std::sync::Arc; use std::sync::Arc;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, ChainSpec, EthSpec, Hash256};
test_utils::TestingBeaconStateBuilder, BeaconBlock, EthSpec, FewValidatorsEthSpec, Hash256,
}; /// The number initial validators when starting the `Minimal`.
const TESTNET_VALIDATOR_COUNT: usize = 16;
/// Provides a new, initialized `BeaconChain` /// Provides a new, initialized `BeaconChain`
pub trait InitialiseBeaconChain<T: BeaconChainTypes> { pub trait InitialiseBeaconChain<T: BeaconChainTypes> {
fn initialise_beacon_chain(config: &ClientConfig) -> BeaconChain<T>; fn initialise_beacon_chain(
store: Arc<T::Store>,
spec: ChainSpec,
log: Logger,
) -> BeaconChain<T> {
maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, spec, log)
}
} }
/// A testnet-suitable BeaconChainType, using `MemoryStore`.
#[derive(Clone)] #[derive(Clone)]
pub struct TestnetMemoryBeaconChainTypes; pub struct ClientType<S: Store, E: EthSpec> {
_phantom_t: PhantomData<S>,
_phantom_u: PhantomData<E>,
}
impl BeaconChainTypes for TestnetMemoryBeaconChainTypes { impl<S: Store, E: EthSpec + Clone> BeaconChainTypes for ClientType<S, E> {
type Store = MemoryStore; type Store = S;
type SlotClock = SystemTimeSlotClock; type SlotClock = SystemTimeSlotClock;
type ForkChoice = BitwiseLMDGhost<Self::Store, Self::EthSpec>; type ForkChoice = OptimizedLMDGhost<S, E>;
type EthSpec = FewValidatorsEthSpec; type EthSpec = E;
} }
impl<T: Store, E: EthSpec, X: BeaconChainTypes> InitialiseBeaconChain<X> for ClientType<T, E> {}
impl<T> InitialiseBeaconChain<T> for TestnetMemoryBeaconChainTypes /// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis.
fn maybe_load_from_store_for_testnet<T, U: Store, V: EthSpec>(
store: Arc<U>,
spec: ChainSpec,
log: Logger,
) -> BeaconChain<T>
where where
T: BeaconChainTypes< T: BeaconChainTypes<Store = U>,
Store = MemoryStore, T::ForkChoice: ForkChoice<U>,
SlotClock = SystemTimeSlotClock,
ForkChoice = BitwiseLMDGhost<MemoryStore, FewValidatorsEthSpec>,
>,
{ {
fn initialise_beacon_chain(_config: &ClientConfig) -> BeaconChain<T> { if let Ok(Some(beacon_chain)) = BeaconChain::from_store(store.clone(), spec.clone()) {
initialize_chain(MemoryStore::open()) info!(
} log,
} "Loaded BeaconChain from store";
"slot" => beacon_chain.head().beacon_state.slot,
"best_slot" => beacon_chain.best_slot(),
);
/// A testnet-suitable BeaconChainType, using `DiskStore`. beacon_chain
#[derive(Clone)] } else {
pub struct TestnetDiskBeaconChainTypes; info!(log, "Initializing new BeaconChain from genesis");
let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(
impl BeaconChainTypes for TestnetDiskBeaconChainTypes { TESTNET_VALIDATOR_COUNT,
type Store = DiskStore; &spec,
type SlotClock = SystemTimeSlotClock; );
type ForkChoice = BitwiseLMDGhost<Self::Store, Self::EthSpec>;
type EthSpec = FewValidatorsEthSpec;
}
impl<T> InitialiseBeaconChain<T> for TestnetDiskBeaconChainTypes
where
T: BeaconChainTypes<
Store = DiskStore,
SlotClock = SystemTimeSlotClock,
ForkChoice = BitwiseLMDGhost<DiskStore, FewValidatorsEthSpec>,
>,
{
fn initialise_beacon_chain(config: &ClientConfig) -> BeaconChain<T> {
let store = DiskStore::open(&config.db_name).expect("Unable to open DB.");
initialize_chain(store)
}
}
/// Produces a `BeaconChain` given some pre-initialized `Store`.
fn initialize_chain<T, U: Store, V: EthSpec>(store: U) -> BeaconChain<T>
where
T: BeaconChainTypes<
Store = U,
SlotClock = SystemTimeSlotClock,
ForkChoice = BitwiseLMDGhost<U, V>,
>,
{
let spec = T::EthSpec::spec();
let store = Arc::new(store);
let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, &spec);
let (genesis_state, _keypairs) = state_builder.build(); let (genesis_state, _keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec); let mut genesis_block = BeaconBlock::empty(&spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
// Slot clock // Slot clock
let slot_clock = SystemTimeSlotClock::new( let slot_clock = T::SlotClock::new(
spec.genesis_slot, spec.genesis_slot,
genesis_state.genesis_time, genesis_state.genesis_time,
spec.seconds_per_slot, spec.seconds_per_slot,
) );
.expect("Unable to load SystemTimeSlotClock");
// Choose the fork choice // Choose the fork choice
let fork_choice = BitwiseLMDGhost::new(store.clone()); let fork_choice = T::ForkChoice::new(store.clone());
// Genesis chain // Genesis chain
//TODO: Handle error correctly //TODO: Handle error correctly
@ -102,8 +84,9 @@ where
slot_clock, slot_clock,
genesis_state, genesis_state,
genesis_block, genesis_block,
spec.clone(), spec,
fork_choice, fork_choice,
) )
.expect("Terminate if beacon chain generation fails") .expect("Terminate if beacon chain generation fails")
} }
}

View File

@ -1,151 +1,67 @@
use clap::ArgMatches; use clap::ArgMatches;
use fork_choice::ForkChoiceAlgorithm;
use http_server::HttpServerConfig; use http_server::HttpServerConfig;
use network::NetworkConfig; use network::NetworkConfig;
use slog::error; use serde_derive::{Deserialize, Serialize};
use std::fs; use std::fs;
use std::net::SocketAddr;
use std::net::{IpAddr, Ipv4Addr};
use std::path::PathBuf; use std::path::PathBuf;
use types::multiaddr::Protocol;
use types::multiaddr::ToMultiaddr;
use types::Multiaddr;
use types::{ChainSpec, EthSpec, LighthouseTestnetEthSpec};
#[derive(Debug, Clone)] /// The core configuration of a Lighthouse beacon node.
pub enum DBType { #[derive(Debug, Clone, Serialize, Deserialize)]
Memory,
Disk,
}
/// Stores the client configuration for this Lighthouse instance.
#[derive(Debug, Clone)]
pub struct ClientConfig { pub struct ClientConfig {
pub data_dir: PathBuf, pub data_dir: PathBuf,
pub spec: ChainSpec, pub db_type: String,
pub net_conf: network::NetworkConfig, db_name: String,
pub fork_choice: ForkChoiceAlgorithm, pub network: network::NetworkConfig,
pub db_type: DBType, pub rpc: rpc::RPCConfig,
pub db_name: PathBuf, pub http: HttpServerConfig,
pub rpc_conf: rpc::RPCConfig,
pub http_conf: HttpServerConfig, //pub ipc_conf:
} }
impl Default for ClientConfig { impl Default for ClientConfig {
/// Build a new lighthouse configuration from defaults.
fn default() -> Self { fn default() -> Self {
let data_dir = {
let home = dirs::home_dir().expect("Unable to determine home dir.");
home.join(".lighthouse/")
};
fs::create_dir_all(&data_dir)
.unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir));
let default_spec = LighthouseTestnetEthSpec::spec();
let default_net_conf = NetworkConfig::new(default_spec.boot_nodes.clone());
Self { Self {
data_dir: data_dir.clone(), data_dir: PathBuf::from(".lighthouse"),
// default to foundation for chain specs db_type: "disk".to_string(),
spec: default_spec, db_name: "chain_db".to_string(),
net_conf: default_net_conf, // Note: there are no default bootnodes specified.
// default to bitwise LMD Ghost // Once bootnodes are established, add them here.
fork_choice: ForkChoiceAlgorithm::BitwiseLMDGhost, network: NetworkConfig::new(vec![]),
// default to memory db for now rpc: rpc::RPCConfig::default(),
db_type: DBType::Memory, http: HttpServerConfig::default(),
// default db name for disk-based dbs
db_name: data_dir.join("chain_db"),
rpc_conf: rpc::RPCConfig::default(),
http_conf: HttpServerConfig::default(),
} }
} }
} }
impl ClientConfig { impl ClientConfig {
/// Parses the CLI arguments into a `Config` struct. /// Returns the path to which the client may initialize an on-disk database.
pub fn parse_args(args: ArgMatches, log: &slog::Logger) -> Result<Self, &'static str> { pub fn db_path(&self) -> Option<PathBuf> {
let mut config = ClientConfig::default(); self.data_dir()
.and_then(|path| Some(path.join(&self.db_name)))
/* Network related arguments */
// Custom p2p listen port
if let Some(port_str) = args.value_of("port") {
if let Ok(port) = port_str.parse::<u16>() {
config.net_conf.listen_port = port;
// update the listening multiaddrs
for address in &mut config.net_conf.listen_addresses {
address.pop();
address.append(Protocol::Tcp(port));
}
} else {
error!(log, "Invalid port"; "port" => port_str);
return Err("Invalid port");
}
}
// Custom listening address ipv4/ipv6
// TODO: Handle list of addresses
if let Some(listen_address_str) = args.value_of("listen-address") {
if let Ok(listen_address) = listen_address_str.parse::<IpAddr>() {
let multiaddr = SocketAddr::new(listen_address, config.net_conf.listen_port)
.to_multiaddr()
.expect("Invalid listen address format");
config.net_conf.listen_addresses = vec![multiaddr];
} else {
error!(log, "Invalid IP Address"; "Address" => listen_address_str);
return Err("Invalid IP Address");
}
} }
// Custom bootnodes /// Returns the core path for the client.
if let Some(boot_addresses_str) = args.value_of("boot-nodes") { pub fn data_dir(&self) -> Option<PathBuf> {
let mut boot_addresses_split = boot_addresses_str.split(","); let path = dirs::home_dir()?.join(&self.data_dir);
for boot_address in boot_addresses_split { fs::create_dir_all(&path).ok()?;
if let Ok(boot_address) = boot_address.parse::<Multiaddr>() { Some(path)
config.net_conf.boot_nodes.append(&mut vec![boot_address]);
} else {
error!(log, "Invalid Bootnode multiaddress"; "Multiaddr" => boot_addresses_str);
return Err("Invalid IP Address");
}
}
} }
/* Filesystem related arguments */ /// Apply the following arguments to `self`, replacing values if they are specified in `args`.
///
// Custom datadir /// Returns an error if arguments are obviously invalid. May succeed even if some values are
/// invalid.
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
if let Some(dir) = args.value_of("datadir") { if let Some(dir) = args.value_of("datadir") {
config.data_dir = PathBuf::from(dir.to_string()); self.data_dir = PathBuf::from(dir);
}; };
/* RPC related arguments */ if let Some(dir) = args.value_of("db") {
self.db_type = dir.to_string();
if args.is_present("rpc") {
config.rpc_conf.enabled = true;
} }
if let Some(rpc_address) = args.value_of("rpc-address") { self.network.apply_cli_args(args)?;
if let Ok(listen_address) = rpc_address.parse::<Ipv4Addr>() { self.rpc.apply_cli_args(args)?;
config.rpc_conf.listen_address = listen_address; self.http.apply_cli_args(args)?;
} else {
error!(log, "Invalid RPC listen address"; "Address" => rpc_address);
return Err("Invalid RPC listen address");
}
}
if let Some(rpc_port) = args.value_of("rpc-port") { Ok(())
if let Ok(port) = rpc_port.parse::<u16>() {
config.rpc_conf.port = port;
} else {
error!(log, "Invalid RPC port"; "port" => rpc_port);
return Err("Invalid RPC port");
}
}
match args.value_of("db") {
Some("disk") => config.db_type = DBType::Disk,
Some("memory") => config.db_type = DBType::Memory,
_ => unreachable!(), // clap prevents this.
};
Ok(config)
} }
} }

View File

@ -1,14 +1,9 @@
// generates error types
use network; use network;
use error_chain::{ use error_chain::error_chain;
error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed,
impl_extract_backtrace,
};
error_chain! { error_chain! {
links { links {
Network(network::error::Error, network::error::ErrorKind); Network(network::error::Error, network::error::ErrorKind);
} }
} }

View File

@ -6,10 +6,10 @@ pub mod error;
pub mod notifier; pub mod notifier;
use beacon_chain::BeaconChain; use beacon_chain::BeaconChain;
use beacon_chain_types::InitialiseBeaconChain;
use exit_future::Signal; use exit_future::Signal;
use futures::{future::Future, Stream}; use futures::{future::Future, Stream};
use network::Service as NetworkService; use network::Service as NetworkService;
use prometheus::Registry;
use slog::{error, info, o}; use slog::{error, info, o};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::marker::PhantomData; use std::marker::PhantomData;
@ -19,16 +19,18 @@ use tokio::runtime::TaskExecutor;
use tokio::timer::Interval; use tokio::timer::Interval;
pub use beacon_chain::BeaconChainTypes; pub use beacon_chain::BeaconChainTypes;
pub use beacon_chain_types::{TestnetDiskBeaconChainTypes, TestnetMemoryBeaconChainTypes}; pub use beacon_chain_types::ClientType;
pub use client_config::{ClientConfig, DBType}; pub use beacon_chain_types::InitialiseBeaconChain;
pub use client_config::ClientConfig;
pub use eth2_config::Eth2Config;
/// Main beacon node client service. This provides the connection and initialisation of the clients /// Main beacon node client service. This provides the connection and initialisation of the clients
/// sub-services in multiple threads. /// sub-services in multiple threads.
pub struct Client<T: BeaconChainTypes> { pub struct Client<T: BeaconChainTypes> {
/// Configuration for the lighthouse client. /// Configuration for the lighthouse client.
_config: ClientConfig, _client_config: ClientConfig,
/// The beacon chain for the running client. /// The beacon chain for the running client.
_beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
/// Reference to the network service. /// Reference to the network service.
pub network: Arc<NetworkService<T>>, pub network: Arc<NetworkService<T>>,
/// Signal to terminate the RPC server. /// Signal to terminate the RPC server.
@ -49,12 +51,27 @@ where
{ {
/// Generate an instance of the client. Spawn and link all internal sub-processes. /// Generate an instance of the client. Spawn and link all internal sub-processes.
pub fn new( pub fn new(
config: ClientConfig, client_config: ClientConfig,
eth2_config: Eth2Config,
store: T::Store,
log: slog::Logger, log: slog::Logger,
executor: &TaskExecutor, executor: &TaskExecutor,
) -> error::Result<Self> { ) -> error::Result<Self> {
// generate a beacon chain let metrics_registry = Registry::new();
let beacon_chain = Arc::new(T::initialise_beacon_chain(&config)); let store = Arc::new(store);
let seconds_per_slot = eth2_config.spec.seconds_per_slot;
// Load a `BeaconChain` from the store, or create a new one if it does not exist.
let beacon_chain = Arc::new(T::initialise_beacon_chain(
store,
eth2_config.spec.clone(),
log.clone(),
));
// Registry all beacon chain metrics with the global registry.
beacon_chain
.metrics
.register(&metrics_registry)
.expect("Failed to registry metrics");
if beacon_chain.read_slot_clock().is_none() { if beacon_chain.read_slot_clock().is_none() {
panic!("Cannot start client before genesis!") panic!("Cannot start client before genesis!")
@ -65,7 +82,7 @@ where
// If we don't block here we create an initial scenario where we're unable to process any // If we don't block here we create an initial scenario where we're unable to process any
// blocks and we're basically useless. // blocks and we're basically useless.
{ {
let state_slot = beacon_chain.state.read().slot; let state_slot = beacon_chain.head().beacon_state.slot;
let wall_clock_slot = beacon_chain.read_slot_clock().unwrap(); let wall_clock_slot = beacon_chain.read_slot_clock().unwrap();
let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap();
info!( info!(
@ -81,13 +98,13 @@ where
info!( info!(
log, log,
"State initialized"; "State initialized";
"state_slot" => beacon_chain.state.read().slot, "state_slot" => beacon_chain.head().beacon_state.slot,
"wall_clock_slot" => beacon_chain.read_slot_clock().unwrap(), "wall_clock_slot" => beacon_chain.read_slot_clock().unwrap(),
); );
// Start the network service, libp2p and syncing threads // Start the network service, libp2p and syncing threads
// TODO: Add beacon_chain reference to network parameters // TODO: Add beacon_chain reference to network parameters
let network_config = &config.net_conf; let network_config = &client_config.network;
let network_logger = log.new(o!("Service" => "Network")); let network_logger = log.new(o!("Service" => "Network"));
let (network, network_send) = NetworkService::new( let (network, network_send) = NetworkService::new(
beacon_chain.clone(), beacon_chain.clone(),
@ -97,9 +114,9 @@ where
)?; )?;
// spawn the RPC server // spawn the RPC server
let rpc_exit_signal = if config.rpc_conf.enabled { let rpc_exit_signal = if client_config.rpc.enabled {
Some(rpc::start_server( Some(rpc::start_server(
&config.rpc_conf, &client_config.rpc,
executor, executor,
network_send.clone(), network_send.clone(),
beacon_chain.clone(), beacon_chain.clone(),
@ -112,20 +129,26 @@ where
// Start the `http_server` service. // Start the `http_server` service.
// //
// Note: presently we are ignoring the config and _always_ starting a HTTP server. // Note: presently we are ignoring the config and _always_ starting a HTTP server.
let http_exit_signal = Some(http_server::start_service( let http_exit_signal = if client_config.http.enabled {
&config.http_conf, Some(http_server::start_service(
&client_config.http,
executor, executor,
network_send, network_send,
beacon_chain.clone(), beacon_chain.clone(),
client_config.db_path().expect("unable to read datadir"),
metrics_registry,
&log, &log,
)); ))
} else {
None
};
let (slot_timer_exit_signal, exit) = exit_future::signal(); let (slot_timer_exit_signal, exit) = exit_future::signal();
if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() {
// set up the validator work interval - start at next slot and proceed every slot // set up the validator work interval - start at next slot and proceed every slot
let interval = { let interval = {
// Set the interval to start at the next slot, and every slot after // Set the interval to start at the next slot, and every slot after
let slot_duration = Duration::from_secs(config.spec.seconds_per_slot); let slot_duration = Duration::from_secs(seconds_per_slot);
//TODO: Handle checked add correctly //TODO: Handle checked add correctly
Interval::new(Instant::now() + duration_to_next_slot, slot_duration) Interval::new(Instant::now() + duration_to_next_slot, slot_duration)
}; };
@ -147,8 +170,8 @@ where
} }
Ok(Client { Ok(Client {
_config: config, _client_config: client_config,
_beacon_chain: beacon_chain, beacon_chain,
http_exit_signal, http_exit_signal,
rpc_exit_signal, rpc_exit_signal,
slot_timer_exit_signal: Some(slot_timer_exit_signal), slot_timer_exit_signal: Some(slot_timer_exit_signal),
@ -159,6 +182,14 @@ where
} }
} }
impl<T: BeaconChainTypes> Drop for Client<T> {
fn drop(&mut self) {
// Save the beacon chain to it's store before dropping.
let _result = self.beacon_chain.persist();
dbg!("Saved BeaconChain to store");
}
}
fn do_state_catchup<T: BeaconChainTypes>(chain: &Arc<BeaconChain<T>>, log: &slog::Logger) { fn do_state_catchup<T: BeaconChainTypes>(chain: &Arc<BeaconChain<T>>, log: &slog::Logger) {
if let Some(genesis_height) = chain.slots_since_genesis() { if let Some(genesis_height) = chain.slots_since_genesis() {
let result = chain.catchup_state(); let result = chain.catchup_state();
@ -167,7 +198,7 @@ fn do_state_catchup<T: BeaconChainTypes>(chain: &Arc<BeaconChain<T>>, log: &slog
"best_slot" => chain.head().beacon_block.slot, "best_slot" => chain.head().beacon_block.slot,
"latest_block_root" => format!("{}", chain.head().beacon_block_root), "latest_block_root" => format!("{}", chain.head().beacon_block_root),
"wall_clock_slot" => chain.read_slot_clock().unwrap(), "wall_clock_slot" => chain.read_slot_clock().unwrap(),
"state_slot" => chain.state.read().slot, "state_slot" => chain.head().beacon_state.slot,
"slots_since_genesis" => genesis_height, "slots_since_genesis" => genesis_height,
); );

View File

@ -6,9 +6,12 @@ edition = "2018"
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
clap = "2.32.0"
# SigP repository until PR is merged # SigP repository until PR is merged
libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" } libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
serde = "1.0"
serde_derive = "1.0"
ssz = { path = "../../eth2/utils/ssz" } ssz = { path = "../../eth2/utils/ssz" }
ssz_derive = { path = "../../eth2/utils/ssz_derive" } ssz_derive = { path = "../../eth2/utils/ssz_derive" }
slog = "2.4.1" slog = "2.4.1"

View File

@ -261,7 +261,7 @@ mod test {
#[test] #[test]
fn ssz_encoding() { fn ssz_encoding() {
let original = PubsubMessage::Block(BeaconBlock::empty(&FoundationEthSpec::spec())); let original = PubsubMessage::Block(BeaconBlock::empty(&MainnetEthSpec::default_spec()));
let encoded = ssz_encode(&original); let encoded = ssz_encode(&original);

View File

@ -1,20 +1,22 @@
use crate::Multiaddr; use clap::ArgMatches;
use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder};
use serde_derive::{Deserialize, Serialize};
use types::multiaddr::{Error as MultiaddrError, Multiaddr};
#[derive(Clone, Debug)] #[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
/// Network configuration for lighthouse. /// Network configuration for lighthouse.
pub struct Config { pub struct Config {
//TODO: stubbing networking initial params, change in the future
/// IP address to listen on. /// IP address to listen on.
pub listen_addresses: Vec<Multiaddr>, listen_addresses: Vec<String>,
/// Listen port UDP/TCP.
pub listen_port: u16,
/// Gossipsub configuration parameters. /// Gossipsub configuration parameters.
#[serde(skip)]
pub gs_config: GossipsubConfig, pub gs_config: GossipsubConfig,
/// Configuration parameters for node identification protocol. /// Configuration parameters for node identification protocol.
#[serde(skip)]
pub identify_config: IdentifyConfig, pub identify_config: IdentifyConfig,
/// List of nodes to initially connect to. /// List of nodes to initially connect to.
pub boot_nodes: Vec<Multiaddr>, boot_nodes: Vec<String>,
/// Client version /// Client version
pub client_version: String, pub client_version: String,
/// List of topics to subscribe to as strings /// List of topics to subscribe to as strings
@ -25,15 +27,12 @@ impl Default for Config {
/// Generate a default network configuration. /// Generate a default network configuration.
fn default() -> Self { fn default() -> Self {
Config { Config {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/9000" listen_addresses: vec!["/ip4/127.0.0.1/tcp/9000".to_string()],
.parse()
.expect("is a correct multi-address")],
listen_port: 9000,
gs_config: GossipsubConfigBuilder::new() gs_config: GossipsubConfigBuilder::new()
.max_gossip_size(4_000_000) .max_gossip_size(4_000_000)
.build(), .build(),
identify_config: IdentifyConfig::default(), identify_config: IdentifyConfig::default(),
boot_nodes: Vec::new(), boot_nodes: vec![],
client_version: version::version(), client_version: version::version(),
topics: vec![String::from("beacon_chain")], topics: vec![String::from("beacon_chain")],
} }
@ -41,12 +40,34 @@ impl Default for Config {
} }
impl Config { impl Config {
pub fn new(boot_nodes: Vec<Multiaddr>) -> Self { pub fn new(boot_nodes: Vec<String>) -> Self {
let mut conf = Config::default(); let mut conf = Config::default();
conf.boot_nodes = boot_nodes; conf.boot_nodes = boot_nodes;
conf conf
} }
pub fn listen_addresses(&self) -> Result<Vec<Multiaddr>, MultiaddrError> {
self.listen_addresses.iter().map(|s| s.parse()).collect()
}
pub fn boot_nodes(&self) -> Result<Vec<Multiaddr>, MultiaddrError> {
self.boot_nodes.iter().map(|s| s.parse()).collect()
}
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
if let Some(listen_address_str) = args.value_of("listen-address") {
let listen_addresses = listen_address_str.split(',').map(Into::into).collect();
self.listen_addresses = listen_addresses;
}
if let Some(boot_addresses_str) = args.value_of("boot-nodes") {
let boot_addresses = boot_addresses_str.split(',').map(Into::into).collect();
self.boot_nodes = boot_addresses;
}
Ok(())
}
} }
/// The configuration parameters for the Identify protocol /// The configuration parameters for the Identify protocol

View File

@ -1,8 +1,5 @@
// generates error types // generates error types
use error_chain::{ use error_chain::error_chain;
error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed,
impl_extract_backtrace,
};
error_chain! {} error_chain! {}

View File

@ -172,8 +172,8 @@ pub struct BeaconBlockRootsResponse {
impl BeaconBlockRootsResponse { impl BeaconBlockRootsResponse {
/// Returns `true` if each `self.roots.slot[i]` is higher than the preceeding `i`. /// Returns `true` if each `self.roots.slot[i]` is higher than the preceeding `i`.
pub fn slots_are_ascending(&self) -> bool { pub fn slots_are_ascending(&self) -> bool {
for i in 1..self.roots.len() { for window in self.roots.windows(2) {
if self.roots[i - 1].slot >= self.roots[i].slot { if window[0].slot >= window[1].slot {
return false; return false;
} }
} }

View File

@ -1,6 +1,7 @@
use super::methods::*; use super::methods::*;
use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo};
use ssz::{impl_decode_via_from, impl_encode_via_from, ssz_encode, Decode, Encode}; use ssz::{impl_decode_via_from, impl_encode_via_from, ssz_encode, Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::io; use std::io;
use std::iter; use std::iter;
@ -31,7 +32,7 @@ impl Default for RPCProtocol {
} }
/// A monotonic counter for ordering `RPCRequest`s. /// A monotonic counter for ordering `RPCRequest`s.
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Copy, Default)]
pub struct RequestId(u64); pub struct RequestId(u64);
impl RequestId { impl RequestId {
@ -41,7 +42,7 @@ impl RequestId {
} }
/// Return the previous id. /// Return the previous id.
pub fn previous(&self) -> Self { pub fn previous(self) -> Self {
Self(self.0 - 1) Self(self.0 - 1)
} }
} }
@ -115,65 +116,67 @@ where
} }
} }
/// A helper structed used to obtain SSZ serialization for RPC messages.
#[derive(Encode, Decode, Default)]
struct SszContainer {
/// Note: the `is_request` field is not included in the spec.
///
/// We are unable to determine a request from a response unless we add some flag to the
/// packet. Here we have added a bool (encoded as 1 byte) which is set to `1` if the
/// message is a request.
is_request: bool,
id: u64,
other: u16,
bytes: Vec<u8>,
}
// NOTE! // NOTE!
// //
// This code has not been tested, it is a placeholder until we can update to the new libp2p // This code has not been tested, it is a placeholder until we can update to the new libp2p
// spec. // spec.
fn decode(packet: Vec<u8>) -> Result<RPCEvent, DecodeError> { fn decode(packet: Vec<u8>) -> Result<RPCEvent, DecodeError> {
let mut builder = ssz::SszDecoderBuilder::new(&packet); let msg = SszContainer::from_ssz_bytes(&packet)?;
builder.register_type::<bool>()?; if msg.is_request {
builder.register_type::<RequestId>()?; let body = match RPCMethod::from(msg.other) {
builder.register_type::<u16>()?; RPCMethod::Hello => RPCRequest::Hello(HelloMessage::from_ssz_bytes(&msg.bytes)?),
builder.register_type::<Vec<u8>>()?; RPCMethod::Goodbye => RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(&msg.bytes)?),
let mut decoder = builder.build()?;
let request: bool = decoder.decode_next()?;
let id: RequestId = decoder.decode_next()?;
let method_id: u16 = decoder.decode_next()?;
let bytes: Vec<u8> = decoder.decode_next()?;
if request {
let body = match RPCMethod::from(method_id) {
RPCMethod::Hello => RPCRequest::Hello(HelloMessage::from_ssz_bytes(&bytes)?),
RPCMethod::Goodbye => RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes(&bytes)?),
RPCMethod::BeaconBlockRoots => { RPCMethod::BeaconBlockRoots => {
RPCRequest::BeaconBlockRoots(BeaconBlockRootsRequest::from_ssz_bytes(&bytes)?) RPCRequest::BeaconBlockRoots(BeaconBlockRootsRequest::from_ssz_bytes(&msg.bytes)?)
}
RPCMethod::BeaconBlockHeaders => {
RPCRequest::BeaconBlockHeaders(BeaconBlockHeadersRequest::from_ssz_bytes(&bytes)?)
} }
RPCMethod::BeaconBlockHeaders => RPCRequest::BeaconBlockHeaders(
BeaconBlockHeadersRequest::from_ssz_bytes(&msg.bytes)?,
),
RPCMethod::BeaconBlockBodies => { RPCMethod::BeaconBlockBodies => {
RPCRequest::BeaconBlockBodies(BeaconBlockBodiesRequest::from_ssz_bytes(&bytes)?) RPCRequest::BeaconBlockBodies(BeaconBlockBodiesRequest::from_ssz_bytes(&msg.bytes)?)
} }
RPCMethod::BeaconChainState => { RPCMethod::BeaconChainState => {
RPCRequest::BeaconChainState(BeaconChainStateRequest::from_ssz_bytes(&bytes)?) RPCRequest::BeaconChainState(BeaconChainStateRequest::from_ssz_bytes(&msg.bytes)?)
} }
RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod), RPCMethod::Unknown => return Err(DecodeError::UnknownRPCMethod),
}; };
Ok(RPCEvent::Request { Ok(RPCEvent::Request {
id, id: RequestId::from(msg.id),
method_id, method_id: msg.other,
body, body,
}) })
} }
// we have received a response // we have received a response
else { else {
let result = match RPCMethod::from(method_id) { let result = match RPCMethod::from(msg.other) {
RPCMethod::Hello => RPCResponse::Hello(HelloMessage::from_ssz_bytes(&bytes)?), RPCMethod::Hello => RPCResponse::Hello(HelloMessage::from_ssz_bytes(&msg.bytes)?),
RPCMethod::BeaconBlockRoots => { RPCMethod::BeaconBlockRoots => {
RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse::from_ssz_bytes(&bytes)?) RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse::from_ssz_bytes(&msg.bytes)?)
}
RPCMethod::BeaconBlockHeaders => {
RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse::from_ssz_bytes(&bytes)?)
}
RPCMethod::BeaconBlockBodies => {
RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse::from_ssz_bytes(&packet)?)
} }
RPCMethod::BeaconBlockHeaders => RPCResponse::BeaconBlockHeaders(
BeaconBlockHeadersResponse::from_ssz_bytes(&msg.bytes)?,
),
RPCMethod::BeaconBlockBodies => RPCResponse::BeaconBlockBodies(
BeaconBlockBodiesResponse::from_ssz_bytes(&msg.bytes)?,
),
RPCMethod::BeaconChainState => { RPCMethod::BeaconChainState => {
RPCResponse::BeaconChainState(BeaconChainStateResponse::from_ssz_bytes(&packet)?) RPCResponse::BeaconChainState(BeaconChainStateResponse::from_ssz_bytes(&msg.bytes)?)
} }
// We should never receive a goodbye response; it is invalid. // We should never receive a goodbye response; it is invalid.
RPCMethod::Goodbye => return Err(DecodeError::UnknownRPCMethod), RPCMethod::Goodbye => return Err(DecodeError::UnknownRPCMethod),
@ -181,8 +184,8 @@ fn decode(packet: Vec<u8>) -> Result<RPCEvent, DecodeError> {
}; };
Ok(RPCEvent::Response { Ok(RPCEvent::Response {
id, id: RequestId::from(msg.id),
method_id, method_id: msg.other,
result, result,
}) })
} }
@ -208,80 +211,44 @@ impl Encode for RPCEvent {
false false
} }
// NOTE!
//
// This code has not been tested, it is a placeholder until we can update to the new libp2p
// spec.
fn ssz_append(&self, buf: &mut Vec<u8>) { fn ssz_append(&self, buf: &mut Vec<u8>) {
let offset = <bool as Encode>::ssz_fixed_len() let container = match self {
+ <u16 as Encode>::ssz_fixed_len()
+ <Vec<u8> as Encode>::ssz_fixed_len();
let mut encoder = ssz::SszEncoder::container(buf, offset);
match self {
RPCEvent::Request { RPCEvent::Request {
id, id,
method_id, method_id,
body, body,
} => { } => SszContainer {
encoder.append(&true); is_request: true,
encoder.append(id); id: (*id).into(),
encoder.append(method_id); other: *method_id,
bytes: match body {
// Encode the `body` as a `Vec<u8>`. RPCRequest::Hello(body) => body.as_ssz_bytes(),
match body { RPCRequest::Goodbye(body) => body.as_ssz_bytes(),
RPCRequest::Hello(body) => { RPCRequest::BeaconBlockRoots(body) => body.as_ssz_bytes(),
encoder.append(&body.as_ssz_bytes()); RPCRequest::BeaconBlockHeaders(body) => body.as_ssz_bytes(),
} RPCRequest::BeaconBlockBodies(body) => body.as_ssz_bytes(),
RPCRequest::Goodbye(body) => { RPCRequest::BeaconChainState(body) => body.as_ssz_bytes(),
encoder.append(&body.as_ssz_bytes()); },
} },
RPCRequest::BeaconBlockRoots(body) => {
encoder.append(&body.as_ssz_bytes());
}
RPCRequest::BeaconBlockHeaders(body) => {
encoder.append(&body.as_ssz_bytes());
}
RPCRequest::BeaconBlockBodies(body) => {
encoder.append(&body.as_ssz_bytes());
}
RPCRequest::BeaconChainState(body) => {
encoder.append(&body.as_ssz_bytes());
}
}
}
RPCEvent::Response { RPCEvent::Response {
id, id,
method_id, method_id,
result, result,
} => { } => SszContainer {
encoder.append(&true); is_request: false,
encoder.append(id); id: (*id).into(),
encoder.append(method_id); other: *method_id,
bytes: match result {
RPCResponse::Hello(response) => response.as_ssz_bytes(),
RPCResponse::BeaconBlockRoots(response) => response.as_ssz_bytes(),
RPCResponse::BeaconBlockHeaders(response) => response.as_ssz_bytes(),
RPCResponse::BeaconBlockBodies(response) => response.as_ssz_bytes(),
RPCResponse::BeaconChainState(response) => response.as_ssz_bytes(),
},
},
};
match result { container.ssz_append(buf)
RPCResponse::Hello(response) => {
encoder.append(&response.as_ssz_bytes());
}
RPCResponse::BeaconBlockRoots(response) => {
encoder.append(&response.as_ssz_bytes());
}
RPCResponse::BeaconBlockHeaders(response) => {
encoder.append(&response.as_ssz_bytes());
}
RPCResponse::BeaconBlockBodies(response) => {
encoder.append(&response.as_ssz_bytes());
}
RPCResponse::BeaconChainState(response) => {
encoder.append(&response.as_ssz_bytes());
}
}
}
}
// Finalize the encoder, writing to `buf`.
encoder.finalize();
} }
} }

View File

@ -57,7 +57,10 @@ impl Service {
}; };
// listen on all addresses // listen on all addresses
for address in &config.listen_addresses { for address in config
.listen_addresses()
.map_err(|e| format!("Invalid listen multiaddr: {}", e))?
{
match Swarm::listen_on(&mut swarm, address.clone()) { match Swarm::listen_on(&mut swarm, address.clone()) {
Ok(mut listen_addr) => { Ok(mut listen_addr) => {
listen_addr.append(Protocol::P2p(local_peer_id.clone().into())); listen_addr.append(Protocol::P2p(local_peer_id.clone().into()));
@ -68,7 +71,10 @@ impl Service {
} }
// connect to boot nodes - these are currently stored as multiaddrs // connect to boot nodes - these are currently stored as multiaddrs
// Once we have discovery, can set to peerId // Once we have discovery, can set to peerId
for bootnode in config.boot_nodes { for bootnode in config
.boot_nodes()
.map_err(|e| format!("Invalid boot node multiaddr: {:?}", e))?
{
match Swarm::dial_addr(&mut swarm, bootnode.clone()) { match Swarm::dial_addr(&mut swarm, bootnode.clone()) {
Ok(()) => debug!(log, "Dialing bootnode: {}", bootnode), Ok(()) => debug!(log, "Dialing bootnode: {}", bootnode),
Err(err) => debug!( Err(err) => debug!(

View File

@ -20,7 +20,7 @@ fork_choice = { path = "../../eth2/fork_choice" }
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
persistent = "^0.4" persistent = "^0.4"
protobuf = "2.0.2" protobuf = "2.0.2"
prometheus = "^0.6" prometheus = { version = "^0.6", features = ["process"] }
clap = "2.32.0" clap = "2.32.0"
store = { path = "../store" } store = { path = "../store" }
dirs = "1.0.3" dirs = "1.0.3"

View File

@ -1,6 +1,9 @@
use crate::metrics::LocalMetrics;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use iron::typemap::Key; use iron::typemap::Key;
use prometheus::Registry;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
pub struct BeaconChainKey<T> { pub struct BeaconChainKey<T> {
@ -10,3 +13,21 @@ pub struct BeaconChainKey<T> {
impl<T: BeaconChainTypes + 'static> Key for BeaconChainKey<T> { impl<T: BeaconChainTypes + 'static> Key for BeaconChainKey<T> {
type Value = Arc<BeaconChain<T>>; type Value = Arc<BeaconChain<T>>;
} }
pub struct MetricsRegistryKey;
impl Key for MetricsRegistryKey {
type Value = Registry;
}
pub struct LocalMetricsKey;
impl Key for LocalMetricsKey {
type Value = LocalMetrics;
}
pub struct DBPathKey;
impl Key for DBPathKey {
type Value = PathBuf;
}

View File

@ -3,39 +3,65 @@ mod key;
mod metrics; mod metrics;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use clap::ArgMatches;
use futures::Future; use futures::Future;
use iron::prelude::*; use iron::prelude::*;
use network::NetworkMessage; use network::NetworkMessage;
use prometheus::Registry;
use router::Router; use router::Router;
use serde_derive::{Deserialize, Serialize};
use slog::{info, o, warn}; use slog::{info, o, warn};
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use tokio::runtime::TaskExecutor; use tokio::runtime::TaskExecutor;
#[derive(PartialEq, Clone, Debug)] #[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct HttpServerConfig { pub struct HttpServerConfig {
pub enabled: bool, pub enabled: bool,
pub listen_address: String, pub listen_address: String,
pub listen_port: String,
} }
impl Default for HttpServerConfig { impl Default for HttpServerConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
enabled: false, enabled: false,
listen_address: "127.0.0.1:5051".to_string(), listen_address: "127.0.0.1".to_string(),
listen_port: "5052".to_string(),
} }
} }
} }
impl HttpServerConfig {
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
if args.is_present("http") {
self.enabled = true;
}
if let Some(listen_address) = args.value_of("http-address") {
self.listen_address = listen_address.to_string();
}
if let Some(listen_port) = args.value_of("http-port") {
self.listen_port = listen_port.to_string();
}
Ok(())
}
}
/// Build the `iron` HTTP server, defining the core routes. /// Build the `iron` HTTP server, defining the core routes.
pub fn create_iron_http_server<T: BeaconChainTypes + 'static>( pub fn create_iron_http_server<T: BeaconChainTypes + 'static>(
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
db_path: PathBuf,
metrics_registry: Registry,
) -> Iron<Router> { ) -> Iron<Router> {
let mut router = Router::new(); let mut router = Router::new();
// A `GET` request to `/metrics` is handled by the `metrics` module. // A `GET` request to `/metrics` is handled by the `metrics` module.
router.get( router.get(
"/metrics", "/metrics",
metrics::build_handler(beacon_chain.clone()), metrics::build_handler(beacon_chain.clone(), db_path, metrics_registry),
"metrics", "metrics",
); );
@ -51,6 +77,8 @@ pub fn start_service<T: BeaconChainTypes + 'static>(
executor: &TaskExecutor, executor: &TaskExecutor,
_network_chan: crossbeam_channel::Sender<NetworkMessage>, _network_chan: crossbeam_channel::Sender<NetworkMessage>,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
db_path: PathBuf,
metrics_registry: Registry,
log: &slog::Logger, log: &slog::Logger,
) -> exit_future::Signal { ) -> exit_future::Signal {
let log = log.new(o!("Service"=>"HTTP")); let log = log.new(o!("Service"=>"HTTP"));
@ -61,7 +89,7 @@ pub fn start_service<T: BeaconChainTypes + 'static>(
let (shutdown_trigger, wait_for_shutdown) = exit_future::signal(); let (shutdown_trigger, wait_for_shutdown) = exit_future::signal();
// Create an `iron` http, without starting it yet. // Create an `iron` http, without starting it yet.
let iron = create_iron_http_server(beacon_chain); let iron = create_iron_http_server(beacon_chain, db_path, metrics_registry);
// Create a HTTP server future. // Create a HTTP server future.
// //
@ -69,16 +97,14 @@ pub fn start_service<T: BeaconChainTypes + 'static>(
// 2. Build an exit future that will shutdown the server when requested. // 2. Build an exit future that will shutdown the server when requested.
// 3. Return the exit future, so the caller may shutdown the service when desired. // 3. Return the exit future, so the caller may shutdown the service when desired.
let http_service = { let http_service = {
let listen_address = format!("{}:{}", config.listen_address, config.listen_port);
// Start the HTTP server // Start the HTTP server
let server_start_result = iron.http(config.listen_address.clone()); let server_start_result = iron.http(listen_address.clone());
if server_start_result.is_ok() { if server_start_result.is_ok() {
info!(log, "HTTP server running on {}", config.listen_address); info!(log, "HTTP server running on {}", listen_address);
} else { } else {
warn!( warn!(log, "HTTP server failed to start on {}", listen_address);
log,
"HTTP server failed to start on {}", config.listen_address
);
} }
// Build a future that will shutdown the HTTP server when the `shutdown_trigger` is // Build a future that will shutdown the HTTP server when the `shutdown_trigger` is

View File

@ -1,20 +1,34 @@
use crate::{key::BeaconChainKey, map_persistent_err_to_500}; use crate::{
key::{BeaconChainKey, DBPathKey, LocalMetricsKey, MetricsRegistryKey},
map_persistent_err_to_500,
};
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use iron::prelude::*; use iron::prelude::*;
use iron::{status::Status, Handler, IronResult, Request, Response}; use iron::{status::Status, Handler, IronResult, Request, Response};
use persistent::Read; use persistent::Read;
use prometheus::{Encoder, IntCounter, Opts, Registry, TextEncoder}; use prometheus::{Encoder, Registry, TextEncoder};
use slot_clock::SlotClock; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use types::Slot;
pub use local_metrics::LocalMetrics;
mod local_metrics;
/// Yields a handler for the metrics endpoint. /// Yields a handler for the metrics endpoint.
pub fn build_handler<T: BeaconChainTypes + 'static>( pub fn build_handler<T: BeaconChainTypes + 'static>(
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
db_path: PathBuf,
metrics_registry: Registry,
) -> impl Handler { ) -> impl Handler {
let mut chain = Chain::new(handle_metrics::<T>); let mut chain = Chain::new(handle_metrics::<T>);
let local_metrics = LocalMetrics::new().unwrap();
local_metrics.register(&metrics_registry).unwrap();
chain.link(Read::<BeaconChainKey<T>>::both(beacon_chain)); chain.link(Read::<BeaconChainKey<T>>::both(beacon_chain));
chain.link(Read::<MetricsRegistryKey>::both(metrics_registry));
chain.link(Read::<LocalMetricsKey>::both(local_metrics));
chain.link(Read::<DBPathKey>::both(db_path));
chain chain
} }
@ -27,23 +41,28 @@ fn handle_metrics<T: BeaconChainTypes + 'static>(req: &mut Request) -> IronResul
.get::<Read<BeaconChainKey<T>>>() .get::<Read<BeaconChainKey<T>>>()
.map_err(map_persistent_err_to_500)?; .map_err(map_persistent_err_to_500)?;
let r = Registry::new(); let r = req
.get::<Read<MetricsRegistryKey>>()
.map_err(map_persistent_err_to_500)?;
let present_slot = if let Ok(Some(slot)) = beacon_chain.slot_clock.present_slot() { let local_metrics = req
slot .get::<Read<LocalMetricsKey>>()
} else { .map_err(map_persistent_err_to_500)?;
Slot::new(0)
}; let db_path = req
register_and_set_slot( .get::<Read<DBPathKey>>()
&r, .map_err(map_persistent_err_to_500)?;
"present_slot",
"direct_slock_clock_reading", // Update metrics that are calculated on each scrape.
present_slot, local_metrics.update(&beacon_chain, &db_path);
);
// Gather the metrics.
let mut buffer = vec![]; let mut buffer = vec![];
let encoder = TextEncoder::new(); let encoder = TextEncoder::new();
// Gather `DEFAULT_REGISTRY` metrics.
encoder.encode(&prometheus::gather(), &mut buffer).unwrap();
// Gather metrics from our registry.
let metric_families = r.gather(); let metric_families = r.gather();
encoder.encode(&metric_families, &mut buffer).unwrap(); encoder.encode(&metric_families, &mut buffer).unwrap();
@ -51,10 +70,3 @@ fn handle_metrics<T: BeaconChainTypes + 'static>(req: &mut Request) -> IronResul
Ok(Response::with((Status::Ok, prom_string))) Ok(Response::with((Status::Ok, prom_string)))
} }
fn register_and_set_slot(registry: &Registry, name: &str, help: &str, slot: Slot) {
let counter_opts = Opts::new(name, help);
let counter = IntCounter::with_opts(counter_opts).unwrap();
registry.register(Box::new(counter.clone())).unwrap();
counter.inc_by(slot.as_u64() as i64);
}

View File

@ -0,0 +1,106 @@
use beacon_chain::{BeaconChain, BeaconChainTypes};
use prometheus::{IntGauge, Opts, Registry};
use slot_clock::SlotClock;
use std::fs::File;
use std::path::PathBuf;
use types::{EthSpec, Slot};
// If set to `true` will iterate and sum the balances of all validators in the state for each
// scrape.
const SHOULD_SUM_VALIDATOR_BALANCES: bool = true;
pub struct LocalMetrics {
present_slot: IntGauge,
present_epoch: IntGauge,
best_slot: IntGauge,
validator_count: IntGauge,
justified_epoch: IntGauge,
finalized_epoch: IntGauge,
validator_balances_sum: IntGauge,
database_size: IntGauge,
}
impl LocalMetrics {
/// Create a new instance.
pub fn new() -> Result<Self, prometheus::Error> {
Ok(Self {
present_slot: {
let opts = Opts::new("present_slot", "slot_at_time_of_scrape");
IntGauge::with_opts(opts)?
},
present_epoch: {
let opts = Opts::new("present_epoch", "epoch_at_time_of_scrape");
IntGauge::with_opts(opts)?
},
best_slot: {
let opts = Opts::new("best_slot", "slot_of_block_at_chain_head");
IntGauge::with_opts(opts)?
},
validator_count: {
let opts = Opts::new("validator_count", "number_of_validators");
IntGauge::with_opts(opts)?
},
justified_epoch: {
let opts = Opts::new("justified_epoch", "state_justified_epoch");
IntGauge::with_opts(opts)?
},
finalized_epoch: {
let opts = Opts::new("finalized_epoch", "state_finalized_epoch");
IntGauge::with_opts(opts)?
},
validator_balances_sum: {
let opts = Opts::new("validator_balances_sum", "sum_of_all_validator_balances");
IntGauge::with_opts(opts)?
},
database_size: {
let opts = Opts::new("database_size", "size_of_on_disk_db_in_mb");
IntGauge::with_opts(opts)?
},
})
}
/// Registry this instance with the `registry`.
pub fn register(&self, registry: &Registry) -> Result<(), prometheus::Error> {
registry.register(Box::new(self.present_slot.clone()))?;
registry.register(Box::new(self.present_epoch.clone()))?;
registry.register(Box::new(self.best_slot.clone()))?;
registry.register(Box::new(self.validator_count.clone()))?;
registry.register(Box::new(self.finalized_epoch.clone()))?;
registry.register(Box::new(self.justified_epoch.clone()))?;
registry.register(Box::new(self.validator_balances_sum.clone()))?;
registry.register(Box::new(self.database_size.clone()))?;
Ok(())
}
/// Update the metrics in `self` to the latest values.
pub fn update<T: BeaconChainTypes>(&self, beacon_chain: &BeaconChain<T>, db_path: &PathBuf) {
let state = &beacon_chain.head().beacon_state;
let present_slot = beacon_chain
.slot_clock
.present_slot()
.unwrap_or_else(|_| None)
.unwrap_or_else(|| Slot::new(0));
self.present_slot.set(present_slot.as_u64() as i64);
self.present_epoch
.set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64);
self.best_slot.set(state.slot.as_u64() as i64);
self.validator_count
.set(state.validator_registry.len() as i64);
self.justified_epoch
.set(state.current_justified_epoch.as_u64() as i64);
self.finalized_epoch
.set(state.finalized_epoch.as_u64() as i64);
if SHOULD_SUM_VALIDATOR_BALANCES {
self.validator_balances_sum
.set(state.balances.iter().sum::<u64>() as i64);
}
let db_size = File::open(db_path)
.and_then(|f| f.metadata())
.and_then(|m| Ok(m.len()))
.unwrap_or(0);
self.database_size.set(db_size as i64);
}
}

View File

@ -9,6 +9,7 @@ sloggers = "0.3.2"
[dependencies] [dependencies]
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
store = { path = "../store" }
eth2-libp2p = { path = "../eth2-libp2p" } eth2-libp2p = { path = "../eth2-libp2p" }
version = { path = "../version" } version = { path = "../version" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }

View File

@ -1,157 +0,0 @@
use beacon_chain::BeaconChain as RawBeaconChain;
use beacon_chain::{
parking_lot::RwLockReadGuard,
types::{BeaconState, ChainSpec},
AttestationValidationError, CheckPoint,
};
use eth2_libp2p::rpc::HelloMessage;
use types::{
Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot,
};
pub use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock};
/// The network's API to the beacon chain.
pub trait BeaconChain<T: BeaconChainTypes>: Send + Sync {
fn get_spec(&self) -> &ChainSpec;
fn get_state(&self) -> RwLockReadGuard<BeaconState<T::EthSpec>>;
fn slot(&self) -> Slot;
fn head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>>;
fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, BeaconChainError>;
fn best_slot(&self) -> Slot;
fn best_block_root(&self) -> Hash256;
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>>;
fn finalized_epoch(&self) -> Epoch;
fn hello_message(&self) -> HelloMessage;
fn process_block(&self, block: BeaconBlock)
-> Result<BlockProcessingOutcome, BeaconChainError>;
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError>;
fn get_block_roots(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<Hash256>, BeaconChainError>;
fn get_block_headers(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<BeaconBlockHeader>, BeaconChainError>;
fn get_block_bodies(&self, roots: &[Hash256])
-> Result<Vec<BeaconBlockBody>, BeaconChainError>;
fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, BeaconChainError>;
}
impl<T: BeaconChainTypes> BeaconChain<T> for RawBeaconChain<T> {
fn get_spec(&self) -> &ChainSpec {
&self.spec
}
fn get_state(&self) -> RwLockReadGuard<BeaconState<T::EthSpec>> {
self.state.read()
}
fn slot(&self) -> Slot {
self.get_state().slot
}
fn head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>> {
self.head()
}
fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, BeaconChainError> {
self.get_block(block_root)
}
fn finalized_epoch(&self) -> Epoch {
self.get_state().finalized_epoch
}
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint<T::EthSpec>> {
self.finalized_head()
}
fn best_slot(&self) -> Slot {
self.head().beacon_block.slot
}
fn best_block_root(&self) -> Hash256 {
self.head().beacon_block_root
}
fn hello_message(&self) -> HelloMessage {
let spec = self.get_spec();
let state = self.get_state();
HelloMessage {
network_id: spec.chain_id,
latest_finalized_root: state.finalized_root,
latest_finalized_epoch: state.finalized_epoch,
best_root: self.best_block_root(),
best_slot: self.best_slot(),
}
}
fn process_block(
&self,
block: BeaconBlock,
) -> Result<BlockProcessingOutcome, BeaconChainError> {
self.process_block(block)
}
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError> {
self.process_attestation(attestation)
}
fn get_block_roots(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<Hash256>, BeaconChainError> {
self.get_block_roots(start_slot, count, skip)
}
fn get_block_headers(
&self,
start_slot: Slot,
count: usize,
skip: usize,
) -> Result<Vec<BeaconBlockHeader>, BeaconChainError> {
let roots = self.get_block_roots(start_slot, count, skip)?;
self.get_block_headers(&roots)
}
fn get_block_bodies(
&self,
roots: &[Hash256],
) -> Result<Vec<BeaconBlockBody>, BeaconChainError> {
self.get_block_bodies(roots)
}
fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, BeaconChainError> {
self.is_new_block_root(beacon_block_root)
}
}

View File

@ -1,10 +1,7 @@
// generates error types // generates error types
use eth2_libp2p; use eth2_libp2p;
use error_chain::{ use error_chain::error_chain;
error_chain, error_chain_processing, impl_error_chain_kind, impl_error_chain_processed,
impl_extract_backtrace,
};
error_chain! { error_chain! {
links { links {

View File

@ -1,5 +1,4 @@
/// This crate provides the network server for Lighthouse. /// This crate provides the network server for Lighthouse.
pub mod beacon_chain;
pub mod error; pub mod error;
pub mod message_handler; pub mod message_handler;
pub mod service; pub mod service;

View File

@ -1,7 +1,7 @@
use crate::beacon_chain::{BeaconChain, BeaconChainTypes};
use crate::error; use crate::error;
use crate::service::{NetworkMessage, OutgoingMessage}; use crate::service::{NetworkMessage, OutgoingMessage};
use crate::sync::SimpleSync; use crate::sync::SimpleSync;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use crossbeam_channel::{unbounded as channel, Sender}; use crossbeam_channel::{unbounded as channel, Sender};
use eth2_libp2p::{ use eth2_libp2p::{
behaviour::PubsubMessage, behaviour::PubsubMessage,
@ -155,7 +155,7 @@ impl<T: BeaconChainTypes + 'static> MessageHandler<T> {
if self if self
.network_context .network_context
.outstanding_outgoing_request_ids .outstanding_outgoing_request_ids
.remove(&(peer_id.clone(), id.clone())) .remove(&(peer_id.clone(), id))
.is_none() .is_none()
{ {
warn!( warn!(
@ -250,7 +250,7 @@ impl NetworkContext {
let id = self.generate_request_id(&peer_id); let id = self.generate_request_id(&peer_id);
self.outstanding_outgoing_request_ids self.outstanding_outgoing_request_ids
.insert((peer_id.clone(), id.clone()), Instant::now()); .insert((peer_id.clone(), id), Instant::now());
self.send_rpc_event( self.send_rpc_event(
peer_id, peer_id,

View File

@ -1,7 +1,7 @@
use crate::beacon_chain::{BeaconChain, BeaconChainTypes};
use crate::error; use crate::error;
use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::message_handler::{HandlerMessage, MessageHandler};
use crate::NetworkConfig; use crate::NetworkConfig;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use crossbeam_channel::{unbounded as channel, Sender, TryRecvError}; use crossbeam_channel::{unbounded as channel, Sender, TryRecvError};
use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Service as LibP2PService;
use eth2_libp2p::{Libp2pEvent, PeerId}; use eth2_libp2p::{Libp2pEvent, PeerId};

View File

@ -1,4 +1,4 @@
use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
use slog::{debug, error}; use slog::{debug, error};
@ -166,7 +166,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
let mut required_bodies: Vec<Hash256> = vec![]; let mut required_bodies: Vec<Hash256> = vec![];
for header in headers { for header in headers {
let block_root = Hash256::from_slice(&header.tree_hash_root()[..]); let block_root = Hash256::from_slice(&header.canonical_root()[..]);
if self.chain_has_not_seen_block(&block_root) { if self.chain_has_not_seen_block(&block_root) {
self.insert_header(block_root, header, sender.clone()); self.insert_header(block_root, header, sender.clone());
@ -212,7 +212,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
// Case 2: there was no partial with a matching block root. // Case 2: there was no partial with a matching block root.
// //
// A new partial is added. This case permits adding a header without already known the // A new partial is added. This case permits adding a header without already known the
// root -- this is not possible in the wire protocol however we support it anyway. // root.
self.partials.push(PartialBeaconBlock { self.partials.push(PartialBeaconBlock {
slot: header.slot, slot: header.slot,
block_root, block_root,
@ -250,7 +250,7 @@ impl<T: BeaconChainTypes> ImportQueue<T> {
/// ///
/// If the partial already existed, the `inserted` time is set to `now`. /// If the partial already existed, the `inserted` time is set to `now`.
fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) {
let block_root = Hash256::from_slice(&block.tree_hash_root()[..]); let block_root = Hash256::from_slice(&block.canonical_root()[..]);
let partial = PartialBeaconBlock { let partial = PartialBeaconBlock {
slot: block.slot, slot: block.slot,

View File

@ -1,6 +1,6 @@
use super::import_queue::ImportQueue; use super::import_queue::ImportQueue;
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InvalidBlock};
use crate::message_handler::NetworkContext; use crate::message_handler::NetworkContext;
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId};
use eth2_libp2p::PeerId; use eth2_libp2p::PeerId;
@ -8,8 +8,10 @@ use slog::{debug, error, info, o, warn};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tree_hash::TreeHash; use store::Store;
use types::{Attestation, BeaconBlock, Epoch, Hash256, Slot}; use types::{
Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot,
};
/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. /// The number of slots that we can import blocks ahead of us, before going into full Sync mode.
const SLOT_IMPORT_TOLERANCE: u64 = 100; const SLOT_IMPORT_TOLERANCE: u64 = 100;
@ -21,6 +23,9 @@ const QUEUE_STALE_SECS: u64 = 600;
/// Otherwise we queue it. /// Otherwise we queue it.
const FUTURE_SLOT_TOLERANCE: u64 = 1; const FUTURE_SLOT_TOLERANCE: u64 = 1;
const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true;
const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false;
/// Keeps track of syncing information for known connected peers. /// Keeps track of syncing information for known connected peers.
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct PeerSyncInfo { pub struct PeerSyncInfo {
@ -31,51 +36,6 @@ pub struct PeerSyncInfo {
best_slot: Slot, best_slot: Slot,
} }
impl PeerSyncInfo {
/// Returns `true` if the has a different network ID to `other`.
fn has_different_network_id_to(&self, other: Self) -> bool {
self.network_id != other.network_id
}
/// Returns `true` if the peer has a higher finalized epoch than `other`.
fn has_higher_finalized_epoch_than(&self, other: Self) -> bool {
self.latest_finalized_epoch > other.latest_finalized_epoch
}
/// Returns `true` if the peer has a higher best slot than `other`.
fn has_higher_best_slot_than(&self, other: Self) -> bool {
self.best_slot > other.best_slot
}
}
/// The status of a peers view on the chain, relative to some other view of the chain (presumably
/// our view).
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum PeerStatus {
/// The peer is on a completely different chain.
DifferentNetworkId,
/// The peer lists a finalized epoch for which we have a different root.
FinalizedEpochNotInChain,
/// The peer has a higher finalized epoch.
HigherFinalizedEpoch,
/// The peer has a higher best slot.
HigherBestSlot,
/// The peer has the same or lesser view of the chain. We have nothing to request of them.
NotInteresting,
}
impl PeerStatus {
pub fn should_handshake(self) -> bool {
match self {
PeerStatus::DifferentNetworkId => false,
PeerStatus::FinalizedEpochNotInChain => false,
PeerStatus::HigherFinalizedEpoch => true,
PeerStatus::HigherBestSlot => true,
PeerStatus::NotInteresting => true,
}
}
}
impl From<HelloMessage> for PeerSyncInfo { impl From<HelloMessage> for PeerSyncInfo {
fn from(hello: HelloMessage) -> PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo {
PeerSyncInfo { PeerSyncInfo {
@ -90,7 +50,7 @@ impl From<HelloMessage> for PeerSyncInfo {
impl<T: BeaconChainTypes> From<&Arc<BeaconChain<T>>> for PeerSyncInfo { impl<T: BeaconChainTypes> From<&Arc<BeaconChain<T>>> for PeerSyncInfo {
fn from(chain: &Arc<BeaconChain<T>>) -> PeerSyncInfo { fn from(chain: &Arc<BeaconChain<T>>) -> PeerSyncInfo {
Self::from(chain.hello_message()) Self::from(hello_message(chain))
} }
} }
@ -151,9 +111,9 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
/// ///
/// Sends a `Hello` message to the peer. /// Sends a `Hello` message to the peer.
pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) {
info!(self.log, "PeerConnect"; "peer" => format!("{:?}", peer_id)); info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id));
network.send_rpc_request(peer_id, RPCRequest::Hello(self.chain.hello_message())); network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain)));
} }
/// Handle a `Hello` request. /// Handle a `Hello` request.
@ -172,7 +132,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
network.send_rpc_response( network.send_rpc_response(
peer_id.clone(), peer_id.clone(),
request_id, request_id,
RPCResponse::Hello(self.chain.hello_message()), RPCResponse::Hello(hello_message(&self.chain)),
); );
self.process_hello(peer_id, hello, network); self.process_hello(peer_id, hello, network);
@ -191,51 +151,6 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
self.process_hello(peer_id, hello, network); self.process_hello(peer_id, hello, network);
} }
/// Returns a `PeerStatus` for some peer.
fn peer_status(&self, peer: PeerSyncInfo) -> PeerStatus {
let local = PeerSyncInfo::from(&self.chain);
if peer.has_different_network_id_to(local) {
return PeerStatus::DifferentNetworkId;
}
if local.has_higher_finalized_epoch_than(peer) {
let peer_finalized_slot = peer
.latest_finalized_epoch
.start_slot(self.chain.get_spec().slots_per_epoch);
let local_roots = self.chain.get_block_roots(peer_finalized_slot, 1, 0);
if let Ok(local_roots) = local_roots {
if let Some(local_root) = local_roots.get(0) {
if *local_root != peer.latest_finalized_root {
return PeerStatus::FinalizedEpochNotInChain;
}
} else {
error!(
self.log,
"Cannot get root for peer finalized slot.";
"error" => "empty roots"
);
}
} else {
error!(
self.log,
"Cannot get root for peer finalized slot.";
"error" => format!("{:?}", local_roots)
);
}
}
if peer.has_higher_finalized_epoch_than(local) {
PeerStatus::HigherFinalizedEpoch
} else if peer.has_higher_best_slot_than(local) {
PeerStatus::HigherBestSlot
} else {
PeerStatus::NotInteresting
}
}
/// Process a `Hello` message, requesting new blocks if appropriate. /// Process a `Hello` message, requesting new blocks if appropriate.
/// ///
/// Disconnects the peer if required. /// Disconnects the peer if required.
@ -245,31 +160,64 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
hello: HelloMessage, hello: HelloMessage,
network: &mut NetworkContext, network: &mut NetworkContext,
) { ) {
let spec = self.chain.get_spec(); let spec = &self.chain.spec;
let remote = PeerSyncInfo::from(hello); let remote = PeerSyncInfo::from(hello);
let local = PeerSyncInfo::from(&self.chain); let local = PeerSyncInfo::from(&self.chain);
let remote_status = self.peer_status(remote);
if remote_status.should_handshake() { // Disconnect nodes who are on a different network.
info!(self.log, "HandshakeSuccess"; "peer" => format!("{:?}", peer_id)); if local.network_id != remote.network_id {
self.known_peers.insert(peer_id.clone(), remote);
} else {
info!( info!(
self.log, "HandshakeFailure"; self.log, "HandshakeFailure";
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
"reason" => "network_id" "reason" => "network_id"
); );
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork); network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
// Disconnect nodes if our finalized epoch is greater than thieirs, and their finalized
// epoch is not in our chain. Viz., they are on another chain.
//
// If the local or remote have a `latest_finalized_root == ZERO_HASH`, skips checks about
// the finalized_root. The logic is akward and I think we're better without it.
} else if (local.latest_finalized_epoch >= remote.latest_finalized_epoch)
&& (!self
.chain
.rev_iter_block_roots(local.best_slot)
.any(|root| root == remote.latest_finalized_root))
&& (local.latest_finalized_root != spec.zero_hash)
&& (remote.latest_finalized_root != spec.zero_hash)
{
info!(
self.log, "HandshakeFailure";
"peer" => format!("{:?}", peer_id),
"reason" => "wrong_finalized_chain"
);
network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork);
// Process handshakes from peers that seem to be on our chain.
} else {
info!(self.log, "HandshakeSuccess"; "peer" => format!("{:?}", peer_id));
self.known_peers.insert(peer_id.clone(), remote);
// If we have equal or better finalized epochs and best slots, we require nothing else from
// this peer.
//
// We make an exception when our best slot is 0. Best slot does not indicate wether or
// not there is a block at slot zero.
if (remote.latest_finalized_epoch <= local.latest_finalized_epoch)
&& (remote.best_slot <= local.best_slot)
&& (local.best_slot > 0)
{
debug!(self.log, "Peer is naive"; "peer" => format!("{:?}", peer_id));
return;
} }
// If required, send additional requests. // If the remote has a higher finalized epoch, request all block roots from our finalized
match remote_status { // epoch through to its best slot.
PeerStatus::HigherFinalizedEpoch => { if remote.latest_finalized_epoch > local.latest_finalized_epoch {
let start_slot = remote debug!(self.log, "Peer has high finalized epoch"; "peer" => format!("{:?}", peer_id));
let start_slot = local
.latest_finalized_epoch .latest_finalized_epoch
.start_slot(spec.slots_per_epoch); .start_slot(T::EthSpec::slots_per_epoch());
let required_slots = start_slot - local.best_slot; let required_slots = remote.best_slot - start_slot;
self.request_block_roots( self.request_block_roots(
peer_id, peer_id,
@ -279,22 +227,26 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
}, },
network, network,
); );
} // If the remote has a greater best slot, request the roots between our best slot and their
PeerStatus::HigherBestSlot => { // best slot.
let required_slots = remote.best_slot - local.best_slot; } else if remote.best_slot > local.best_slot {
debug!(self.log, "Peer has higher best slot"; "peer" => format!("{:?}", peer_id));
let start_slot = local
.latest_finalized_epoch
.start_slot(T::EthSpec::slots_per_epoch());
let required_slots = remote.best_slot - start_slot;
self.request_block_roots( self.request_block_roots(
peer_id, peer_id,
BeaconBlockRootsRequest { BeaconBlockRootsRequest {
start_slot: local.best_slot + 1, start_slot,
count: required_slots.into(), count: required_slots.into(),
}, },
network, network,
); );
} else {
debug!(self.log, "Nothing to request from peer"; "peer" => format!("{:?}", peer_id));
} }
PeerStatus::FinalizedEpochNotInChain => {}
PeerStatus::DifferentNetworkId => {}
PeerStatus::NotInteresting => {}
} }
} }
@ -311,34 +263,40 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
"BlockRootsRequest"; "BlockRootsRequest";
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
"count" => req.count, "count" => req.count,
"start_slot" => req.start_slot,
); );
let roots = match self let mut roots: Vec<Hash256> = self
.chain .chain
.get_block_roots(req.start_slot, req.count as usize, 0) .rev_iter_block_roots(req.start_slot + req.count)
{ .skip(1)
Ok(roots) => roots, .take(req.count as usize)
Err(e) => { .collect();
// TODO: return RPC error.
warn!(
self.log,
"RPCRequest"; "peer" => format!("{:?}", peer_id),
"req" => "BeaconBlockRoots",
"error" => format!("{:?}", e)
);
return;
}
};
let roots = roots if roots.len() as u64 != req.count {
debug!(
self.log,
"BlockRootsRequest";
"peer" => format!("{:?}", peer_id),
"msg" => "Failed to return all requested hashes",
"requested" => req.count,
"returned" => roots.len(),
);
}
roots.reverse();
let mut roots: Vec<BlockRootSlot> = roots
.iter() .iter()
.enumerate() .enumerate()
.map(|(i, &block_root)| BlockRootSlot { .map(|(i, block_root)| BlockRootSlot {
slot: req.start_slot + Slot::from(i), slot: req.start_slot + Slot::from(i),
block_root, block_root: *block_root,
}) })
.collect(); .collect();
roots.dedup_by_key(|brs| brs.block_root);
network.send_rpc_response( network.send_rpc_response(
peer_id, peer_id,
request_id, request_id,
@ -424,23 +382,29 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
"count" => req.max_headers, "count" => req.max_headers,
); );
let headers = match self.chain.get_block_headers( let count = req.max_headers;
req.start_slot,
req.max_headers as usize, // Collect the block roots.
req.skip_slots as usize, //
) { // Instead of using `chain.rev_iter_blocks` we collect the roots first. This avoids
Ok(headers) => headers, // unnecessary block deserialization when `req.skip_slots > 0`.
Err(e) => { let mut roots: Vec<Hash256> = self
// TODO: return RPC error. .chain
warn!( .rev_iter_block_roots(req.start_slot + (count - 1))
self.log, .take(count as usize)
"RPCRequest"; "peer" => format!("{:?}", peer_id), .collect();
"req" => "BeaconBlockHeaders",
"error" => format!("{:?}", e) roots.reverse();
); roots.dedup();
return;
} let headers: Vec<BeaconBlockHeader> = roots
}; .into_iter()
.step_by(req.skip_slots as usize + 1)
.filter_map(|root| {
let block = self.chain.store.get::<BeaconBlock>(&root).ok()?;
Some(block?.block_header())
})
.collect();
network.send_rpc_response( network.send_rpc_response(
peer_id, peer_id,
@ -488,27 +452,33 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
req: BeaconBlockBodiesRequest, req: BeaconBlockBodiesRequest,
network: &mut NetworkContext, network: &mut NetworkContext,
) { ) {
let block_bodies: Vec<BeaconBlockBody> = req
.block_roots
.iter()
.filter_map(|root| {
if let Ok(Some(block)) = self.chain.store.get::<BeaconBlock>(root) {
Some(block.body)
} else {
debug!(
self.log,
"Peer requested unknown block";
"peer" => format!("{:?}", peer_id),
"request_root" => format!("{:}", root),
);
None
}
})
.collect();
debug!( debug!(
self.log, self.log,
"BlockBodiesRequest"; "BlockBodiesRequest";
"peer" => format!("{:?}", peer_id), "peer" => format!("{:?}", peer_id),
"count" => req.block_roots.len(), "requested" => req.block_roots.len(),
"returned" => block_bodies.len(),
); );
let block_bodies = match self.chain.get_block_bodies(&req.block_roots) {
Ok(bodies) => bodies,
Err(e) => {
// TODO: return RPC error.
warn!(
self.log,
"RPCRequest"; "peer" => format!("{:?}", peer_id),
"req" => "BeaconBlockBodies",
"error" => format!("{:?}", e)
);
return;
}
};
network.send_rpc_response( network.send_rpc_response(
peer_id, peer_id,
request_id, request_id,
@ -542,6 +512,8 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
/// Process a gossip message declaring a new block. /// Process a gossip message declaring a new block.
/// ///
/// Attempts to apply to block to the beacon chain. May queue the block for later processing.
///
/// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers.
pub fn on_block_gossip( pub fn on_block_gossip(
&mut self, &mut self,
@ -549,140 +521,35 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
block: BeaconBlock, block: BeaconBlock,
network: &mut NetworkContext, network: &mut NetworkContext,
) -> bool { ) -> bool {
info!( if let Some(outcome) =
self.log, self.process_block(peer_id.clone(), block.clone(), network, &"gossip")
"NewGossipBlock"; {
"peer" => format!("{:?}", peer_id), match outcome {
); BlockProcessingOutcome::Processed => SHOULD_FORWARD_GOSSIP_BLOCK,
BlockProcessingOutcome::ParentUnknown { .. } => {
// Ignore any block from a finalized slot.
if self.slot_is_finalized(block.slot) {
warn!(
self.log, "NewGossipBlock";
"msg" => "new block slot is finalized.",
"block_slot" => block.slot,
);
return false;
}
let block_root = Hash256::from_slice(&block.tree_hash_root());
// Ignore any block that the chain already knows about.
if self.chain_has_seen_block(&block_root) {
println!("this happened");
// TODO: Age confirm that we shouldn't forward a block if we already know of it.
return false;
}
debug!(
self.log,
"NewGossipBlock";
"peer" => format!("{:?}", peer_id),
"msg" => "processing block",
);
match self.chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::ParentUnknown)) => {
// The block was valid and we processed it successfully.
debug!(
self.log, "NewGossipBlock";
"msg" => "parent block unknown",
"parent_root" => format!("{}", block.previous_block_root),
"peer" => format!("{:?}", peer_id),
);
// Queue the block for later processing.
self.import_queue self.import_queue
.enqueue_full_blocks(vec![block], peer_id.clone()); .enqueue_full_blocks(vec![block], peer_id.clone());
// Send a hello to learn of the clients best slot so we can then sync the require
// parent(s). SHOULD_FORWARD_GOSSIP_BLOCK
network.send_rpc_request(
peer_id.clone(),
RPCRequest::Hello(self.chain.hello_message()),
);
// Forward the block onto our peers.
//
// Note: this may need to be changed if we decide to only forward blocks if we have
// all required info.
true
} }
Ok(BlockProcessingOutcome::InvalidBlock(InvalidBlock::FutureSlot { BlockProcessingOutcome::FutureSlot {
present_slot, present_slot,
block_slot, block_slot,
})) => { } if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => {
if block_slot - present_slot > FUTURE_SLOT_TOLERANCE { self.import_queue
// The block is too far in the future, drop it. .enqueue_full_blocks(vec![block], peer_id.clone());
warn!(
self.log, "NewGossipBlock"; SHOULD_FORWARD_GOSSIP_BLOCK
"msg" => "future block rejected",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
// Do not forward the block around to peers.
false
} else {
// The block is in the future, but not too far.
warn!(
self.log, "NewGossipBlock";
"msg" => "queuing future block",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
// Queue the block for later processing.
self.import_queue.enqueue_full_blocks(vec![block], peer_id);
// Forward the block around to peers.
true
} }
} // Note: known blocks are forwarded on the gossip network.
Ok(outcome) => {
if outcome.is_invalid() {
// The peer has sent a block which is fundamentally invalid.
warn!(
self.log, "NewGossipBlock";
"msg" => "invalid block from peer",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", peer_id),
);
// Disconnect the peer
network.disconnect(peer_id, GoodbyeReason::Fault);
// Do not forward the block to peers.
false
} else if outcome.sucessfully_processed() {
// The block was valid and we processed it successfully.
info!(
self.log, "NewGossipBlock";
"msg" => "block import successful",
"peer" => format!("{:?}", peer_id),
);
// Forward the block to peers
true
} else {
// The block wasn't necessarily invalid but we didn't process it successfully.
// This condition shouldn't be reached.
error!(
self.log, "NewGossipBlock";
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", outcome),
);
// Do not forward the block on.
false
}
}
Err(e) => {
// We encountered an error whilst processing the block.
// //
// Blocks should not be able to trigger errors, instead they should be flagged as // We rely upon the lower layers (libp2p) to stop loops occuring from re-gossiped
// invalid. // blocks.
error!( BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK,
self.log, "NewGossipBlock"; _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK,
"msg" => "internal error in processing block.",
"error" => format!("{:?}", e),
);
// Do not forward the block to peers.
false
} }
} else {
SHOULD_NOT_FORWARD_GOSSIP_BLOCK
} }
} }
@ -691,19 +558,15 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
/// Not currently implemented. /// Not currently implemented.
pub fn on_attestation_gossip( pub fn on_attestation_gossip(
&mut self, &mut self,
peer_id: PeerId, _peer_id: PeerId,
msg: Attestation, msg: Attestation,
_network: &mut NetworkContext, _network: &mut NetworkContext,
) { ) {
info!(
self.log,
"NewAttestationGossip";
"peer" => format!("{:?}", peer_id),
);
match self.chain.process_attestation(msg) { match self.chain.process_attestation(msg) {
Ok(()) => info!(self.log, "ImportedAttestation"), Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"),
Err(e) => warn!(self.log, "InvalidAttestation"; "error" => format!("{:?}", e)), Err(e) => {
warn!(self.log, "InvalidAttestation"; "source" => "gossip", "error" => format!("{:?}", e))
}
} }
} }
@ -713,55 +576,32 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
/// the queue. /// the queue.
pub fn process_import_queue(&mut self, network: &mut NetworkContext) { pub fn process_import_queue(&mut self, network: &mut NetworkContext) {
let mut successful = 0; let mut successful = 0;
let mut invalid = 0;
let mut errored = 0;
// Loop through all of the complete blocks in the queue. // Loop through all of the complete blocks in the queue.
for (block_root, block, sender) in self.import_queue.complete_blocks() { for (block_root, block, sender) in self.import_queue.complete_blocks() {
match self.chain.process_block(block) { let processing_result = self.process_block(sender, block.clone(), network, &"gossip");
Ok(outcome) => {
if outcome.is_invalid() { let should_dequeue = match processing_result {
invalid += 1; Some(BlockProcessingOutcome::ParentUnknown { .. }) => false,
warn!( Some(BlockProcessingOutcome::FutureSlot {
self.log, present_slot,
"InvalidBlock"; block_slot,
"sender_peer_id" => format!("{:?}", sender), }) if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => false,
"reason" => format!("{:?}", outcome), _ => true,
); };
network.disconnect(sender, GoodbyeReason::Fault);
break; if processing_result == Some(BlockProcessingOutcome::Processed) {
successful += 1;
} }
// If this results to true, the item will be removed from the queue. if should_dequeue {
if outcome.sucessfully_processed() {
successful += 1;
self.import_queue.remove(block_root); self.import_queue.remove(block_root);
} else {
debug!(
self.log,
"ProcessImportQueue";
"msg" => "Block not imported",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", sender),
);
}
}
Err(e) => {
errored += 1;
error!(self.log, "BlockProcessingError"; "error" => format!("{:?}", e));
}
} }
} }
if successful > 0 { if successful > 0 {
info!(self.log, "Imported {} blocks", successful) info!(self.log, "Imported {} blocks", successful)
} }
if invalid > 0 {
warn!(self.log, "Rejected {} invalid blocks", invalid)
}
if errored > 0 {
warn!(self.log, "Failed to process {} blocks", errored)
}
} }
/// Request some `BeaconBlockRoots` from the remote peer. /// Request some `BeaconBlockRoots` from the remote peer.
@ -833,17 +673,140 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
}) })
} }
/// Returns `true` if the given slot is finalized in our chain.
fn slot_is_finalized(&self, slot: Slot) -> bool {
slot <= self
.chain
.hello_message()
.latest_finalized_epoch
.start_slot(self.chain.get_spec().slots_per_epoch)
}
/// Generates our current state in the form of a HELLO RPC message. /// Generates our current state in the form of a HELLO RPC message.
pub fn generate_hello(&self) -> HelloMessage { pub fn generate_hello(&self) -> HelloMessage {
self.chain.hello_message() hello_message(&self.chain)
}
/// Processes the `block` that was received from `peer_id`.
///
/// If the block was submitted to the beacon chain without internal error, `Some(outcome)` is
/// returned, otherwise `None` is returned. Note: `Some(_)` does not necessarily indicate that
/// the block was successfully processed or valid.
///
/// This function performs the following duties:
///
/// - Attempting to import the block into the beacon chain.
/// - Logging
/// - Requesting unavailable blocks (e.g., if parent is unknown).
/// - Disconnecting faulty nodes.
///
/// This function does not remove processed blocks from the import queue.
fn process_block(
&mut self,
peer_id: PeerId,
block: BeaconBlock,
network: &mut NetworkContext,
source: &str,
) -> Option<BlockProcessingOutcome> {
let processing_result = self.chain.process_block(block.clone());
if let Ok(outcome) = processing_result {
match outcome {
BlockProcessingOutcome::Processed => {
info!(
self.log, "Imported block from network";
"source" => source,
"slot" => block.slot,
"peer" => format!("{:?}", peer_id),
);
}
BlockProcessingOutcome::ParentUnknown { parent } => {
// The block was valid and we processed it successfully.
debug!(
self.log, "ParentBlockUnknown";
"source" => source,
"parent_root" => format!("{}", parent),
"peer" => format!("{:?}", peer_id),
);
// Send a hello to learn of the clients best slot so we can then sync the require
// parent(s).
network.send_rpc_request(
peer_id.clone(),
RPCRequest::Hello(hello_message(&self.chain)),
);
// Explicitly request the parent block from the peer.
//
// It is likely that this is duplicate work, given we already send a hello
// request. However, I believe there are some edge-cases where the hello
// message doesn't suffice, so we perform this request as well.
self.request_block_headers(
peer_id,
BeaconBlockHeadersRequest {
start_root: parent,
start_slot: block.slot - 1,
max_headers: 1,
skip_slots: 0,
},
network,
)
}
BlockProcessingOutcome::FutureSlot {
present_slot,
block_slot,
} => {
if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot {
// The block is too far in the future, drop it.
warn!(
self.log, "FutureBlock";
"source" => source,
"msg" => "block for future slot rejected, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
network.disconnect(peer_id, GoodbyeReason::Fault);
} else {
// The block is in the future, but not too far.
debug!(
self.log, "QueuedFutureBlock";
"source" => source,
"msg" => "queuing future block, check your time",
"present_slot" => present_slot,
"block_slot" => block_slot,
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
"peer" => format!("{:?}", peer_id),
);
}
}
_ => {
debug!(
self.log, "InvalidBlock";
"source" => source,
"msg" => "peer sent invalid block",
"outcome" => format!("{:?}", outcome),
"peer" => format!("{:?}", peer_id),
);
}
}
Some(outcome)
} else {
error!(
self.log, "BlockProcessingFailure";
"source" => source,
"msg" => "unexpected condition in processing block.",
"outcome" => format!("{:?}", processing_result)
);
None
}
}
}
/// Build a `HelloMessage` representing the state of the given `beacon_chain`.
fn hello_message<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) -> HelloMessage {
let spec = &beacon_chain.spec;
let state = &beacon_chain.head().beacon_state;
HelloMessage {
network_id: spec.chain_id,
latest_finalized_root: state.finalized_root,
latest_finalized_epoch: state.finalized_epoch,
best_root: beacon_chain.head().beacon_block_root,
best_slot: state.slot,
} }
} }

View File

@ -20,6 +20,8 @@ clap = "2.32.0"
store = { path = "../store" } store = { path = "../store" }
dirs = "1.0.3" dirs = "1.0.3"
futures = "0.1.23" futures = "0.1.23"
serde = "1.0"
serde_derive = "1.0"
slog = "^2.2.3" slog = "^2.2.3"
slog-term = "^2.4.0" slog-term = "^2.4.0"
slog-async = "^2.3.0" slog-async = "^2.3.0"

View File

@ -1,6 +1,8 @@
use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::PubsubMessage;
use futures::Future; use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
use network::NetworkMessage;
use protos::services::{ use protos::services::{
AttestationData as AttestationDataProto, ProduceAttestationDataRequest, AttestationData as AttestationDataProto, ProduceAttestationDataRequest,
ProduceAttestationDataResponse, PublishAttestationRequest, PublishAttestationResponse, ProduceAttestationDataResponse, PublishAttestationRequest, PublishAttestationResponse,
@ -14,6 +16,7 @@ use types::Attestation;
#[derive(Clone)] #[derive(Clone)]
pub struct AttestationServiceInstance<T: BeaconChainTypes> { pub struct AttestationServiceInstance<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>, pub chain: Arc<BeaconChain<T>>,
pub network_chan: crossbeam_channel::Sender<NetworkMessage>,
pub log: slog::Logger, pub log: slog::Logger,
} }
@ -34,7 +37,7 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
// verify the slot, drop lock on state afterwards // verify the slot, drop lock on state afterwards
{ {
let slot_requested = req.get_slot(); let slot_requested = req.get_slot();
let state = self.chain.get_state(); let state = &self.chain.current_state();
// Start by performing some checks // Start by performing some checks
// Check that the AttestionData is for the current slot (otherwise it will not be valid) // Check that the AttestionData is for the current slot (otherwise it will not be valid)
@ -124,7 +127,7 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
} }
}; };
match self.chain.process_attestation(attestation) { match self.chain.process_attestation(attestation.clone()) {
Ok(_) => { Ok(_) => {
// Attestation was successfully processed. // Attestation was successfully processed.
info!( info!(
@ -133,6 +136,25 @@ impl<T: BeaconChainTypes> AttestationService for AttestationServiceInstance<T> {
"type" => "valid_attestation", "type" => "valid_attestation",
); );
// TODO: Obtain topics from the network service properly.
let topic = types::TopicBuilder::new("beacon_chain".to_string()).build();
let message = PubsubMessage::Attestation(attestation);
// Publish the attestation to the p2p network via gossipsub.
self.network_chan
.send(NetworkMessage::Publish {
topics: vec![topic],
message: Box::new(message),
})
.unwrap_or_else(|e| {
error!(
self.log,
"PublishAttestation";
"type" => "failed to publish to gossipsub",
"error" => format!("{:?}", e)
);
});
resp.set_success(true); resp.set_success(true);
} }
Err(e) => { Err(e) => {

View File

@ -1,4 +1,4 @@
use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome};
use crossbeam_channel; use crossbeam_channel;
use eth2_libp2p::PubsubMessage; use eth2_libp2p::PubsubMessage;
use futures::Future; use futures::Future;
@ -95,14 +95,12 @@ impl<T: BeaconChainTypes> BeaconBlockService for BeaconBlockServiceInstance<T> {
Ok(block) => { Ok(block) => {
match self.chain.process_block(block.clone()) { match self.chain.process_block(block.clone()) {
Ok(outcome) => { Ok(outcome) => {
if outcome.sucessfully_processed() { if outcome == BlockProcessingOutcome::Processed {
// Block was successfully processed. // Block was successfully processed.
info!( info!(
self.log, self.log,
"PublishBeaconBlock"; "Valid block from RPC";
"type" => "valid_block",
"block_slot" => block.slot, "block_slot" => block.slot,
"outcome" => format!("{:?}", outcome)
); );
// TODO: Obtain topics from the network service properly. // TODO: Obtain topics from the network service properly.
@ -126,12 +124,11 @@ impl<T: BeaconChainTypes> BeaconBlockService for BeaconBlockServiceInstance<T> {
}); });
resp.set_success(true); resp.set_success(true);
} else if outcome.is_invalid() { } else {
// Block was invalid. // Block was not successfully processed.
warn!( warn!(
self.log, self.log,
"PublishBeaconBlock"; "Invalid block from RPC";
"type" => "invalid_block",
"outcome" => format!("{:?}", outcome) "outcome" => format!("{:?}", outcome)
); );
@ -139,17 +136,6 @@ impl<T: BeaconChainTypes> BeaconBlockService for BeaconBlockServiceInstance<T> {
resp.set_msg( resp.set_msg(
format!("InvalidBlock: {:?}", outcome).as_bytes().to_vec(), format!("InvalidBlock: {:?}", outcome).as_bytes().to_vec(),
); );
} else {
// Some failure during processing.
warn!(
self.log,
"PublishBeaconBlock";
"type" => "unable_to_import",
"outcome" => format!("{:?}", outcome)
);
resp.set_success(false);
resp.set_msg(format!("other: {:?}", outcome).as_bytes().to_vec());
} }
} }
Err(e) => { Err(e) => {

View File

@ -1,71 +0,0 @@
use beacon_chain::BeaconChain as RawBeaconChain;
use beacon_chain::{
parking_lot::{RwLockReadGuard, RwLockWriteGuard},
types::{BeaconState, ChainSpec, Signature},
AttestationValidationError, BlockProductionError,
};
pub use beacon_chain::{BeaconChainError, BeaconChainTypes, BlockProcessingOutcome};
use types::{Attestation, AttestationData, BeaconBlock, EthSpec};
/// The RPC's API to the beacon chain.
pub trait BeaconChain<T: BeaconChainTypes>: Send + Sync {
fn get_spec(&self) -> &ChainSpec;
fn get_state(&self) -> RwLockReadGuard<BeaconState<T::EthSpec>>;
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState<T::EthSpec>>;
fn process_block(&self, block: BeaconBlock)
-> Result<BlockProcessingOutcome, BeaconChainError>;
fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState<T::EthSpec>), BlockProductionError>;
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError>;
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError>;
}
impl<T: BeaconChainTypes> BeaconChain<T> for RawBeaconChain<T> {
fn get_spec(&self) -> &ChainSpec {
&self.spec
}
fn get_state(&self) -> RwLockReadGuard<BeaconState<T::EthSpec>> {
self.state.read()
}
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState<T::EthSpec>> {
self.state.write()
}
fn process_block(
&self,
block: BeaconBlock,
) -> Result<BlockProcessingOutcome, BeaconChainError> {
self.process_block(block)
}
fn produce_block(
&self,
randao_reveal: Signature,
) -> Result<(BeaconBlock, BeaconState<T::EthSpec>), BlockProductionError> {
self.produce_block(randao_reveal)
}
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError> {
self.produce_attestation_data(shard)
}
fn process_attestation(
&self,
attestation: Attestation,
) -> Result<(), AttestationValidationError> {
self.process_attestation(attestation)
}
}

View File

@ -1,4 +1,4 @@
use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::Future; use futures::Future;
use grpcio::{RpcContext, UnarySink}; use grpcio::{RpcContext, UnarySink};
use protos::services::{Empty, Fork, NodeInfoResponse}; use protos::services::{Empty, Fork, NodeInfoResponse};
@ -22,7 +22,7 @@ impl<T: BeaconChainTypes> BeaconNodeService for BeaconNodeServiceInstance<T> {
node_info.set_version(version::version()); node_info.set_version(version::version());
// get the chain state // get the chain state
let state = self.chain.get_state(); let state = &self.chain.head().beacon_state;
let state_fork = state.fork.clone(); let state_fork = state.fork.clone();
let genesis_time = state.genesis_time; let genesis_time = state.genesis_time;
@ -32,10 +32,12 @@ impl<T: BeaconChainTypes> BeaconNodeService for BeaconNodeServiceInstance<T> {
fork.set_current_version(state_fork.current_version.to_vec()); fork.set_current_version(state_fork.current_version.to_vec());
fork.set_epoch(state_fork.epoch.into()); fork.set_epoch(state_fork.epoch.into());
let spec = &self.chain.spec;
node_info.set_fork(fork); node_info.set_fork(fork);
node_info.set_genesis_time(genesis_time); node_info.set_genesis_time(genesis_time);
node_info.set_genesis_slot(self.chain.get_spec().genesis_slot.as_u64()); node_info.set_genesis_slot(spec.genesis_slot.as_u64());
node_info.set_chain_id(u32::from(self.chain.get_spec().chain_id)); node_info.set_chain_id(u32::from(spec.chain_id));
// send the node_info the requester // send the node_info the requester
let error_log = self.log.clone(); let error_log = self.log.clone();

View File

@ -1,7 +1,9 @@
use clap::ArgMatches;
use serde_derive::{Deserialize, Serialize};
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
/// RPC Configuration /// RPC Configuration
#[derive(Debug, Clone)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config { pub struct Config {
/// Enable the RPC server. /// Enable the RPC server.
pub enabled: bool, pub enabled: bool,
@ -20,3 +22,23 @@ impl Default for Config {
} }
} }
} }
impl Config {
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
if args.is_present("rpc") {
self.enabled = true;
}
if let Some(rpc_address) = args.value_of("rpc-address") {
self.listen_address = rpc_address
.parse::<Ipv4Addr>()
.map_err(|_| "rpc-address is not IPv4 address")?;
}
if let Some(rpc_port) = args.value_of("rpc-port") {
self.port = rpc_port.parse::<u16>().map_err(|_| "rpc-port is not u16")?;
}
Ok(())
}
}

View File

@ -1,15 +1,14 @@
mod attestation; mod attestation;
mod beacon_block; mod beacon_block;
pub mod beacon_chain;
mod beacon_node; mod beacon_node;
pub mod config; pub mod config;
mod validator; mod validator;
use self::attestation::AttestationServiceInstance; use self::attestation::AttestationServiceInstance;
use self::beacon_block::BeaconBlockServiceInstance; use self::beacon_block::BeaconBlockServiceInstance;
use self::beacon_chain::{BeaconChain, BeaconChainTypes};
use self::beacon_node::BeaconNodeServiceInstance; use self::beacon_node::BeaconNodeServiceInstance;
use self::validator::ValidatorServiceInstance; use self::validator::ValidatorServiceInstance;
use beacon_chain::{BeaconChain, BeaconChainTypes};
pub use config::Config as RPCConfig; pub use config::Config as RPCConfig;
use futures::Future; use futures::Future;
use grpcio::{Environment, ServerBuilder}; use grpcio::{Environment, ServerBuilder};
@ -28,7 +27,8 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
network_chan: crossbeam_channel::Sender<NetworkMessage>, network_chan: crossbeam_channel::Sender<NetworkMessage>,
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
log: &slog::Logger, log: &slog::Logger,
) -> exit_future::Signal { ) -> exit_future::Signal
{
let log = log.new(o!("Service"=>"RPC")); let log = log.new(o!("Service"=>"RPC"));
let env = Arc::new(Environment::new(1)); let env = Arc::new(Environment::new(1));
@ -47,7 +47,7 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
let beacon_block_service = { let beacon_block_service = {
let instance = BeaconBlockServiceInstance { let instance = BeaconBlockServiceInstance {
chain: beacon_chain.clone(), chain: beacon_chain.clone(),
network_chan, network_chan: network_chan.clone(),
log: log.clone(), log: log.clone(),
}; };
create_beacon_block_service(instance) create_beacon_block_service(instance)
@ -62,6 +62,7 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
let attestation_service = { let attestation_service = {
let instance = AttestationServiceInstance { let instance = AttestationServiceInstance {
chain: beacon_chain.clone(), chain: beacon_chain.clone(),
network_chan,
log: log.clone(), log: log.clone(),
}; };
create_attestation_service(instance) create_attestation_service(instance)

View File

@ -1,4 +1,4 @@
use crate::beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use bls::PublicKey; use bls::PublicKey;
use futures::Future; use futures::Future;
use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink};
@ -7,14 +7,13 @@ use protos::services_grpc::ValidatorService;
use slog::{trace, warn}; use slog::{trace, warn};
use ssz::Decode; use ssz::Decode;
use std::sync::Arc; use std::sync::Arc;
use types::{Epoch, RelativeEpoch}; use types::{Epoch, EthSpec, RelativeEpoch};
#[derive(Clone)] #[derive(Clone)]
pub struct ValidatorServiceInstance<T: BeaconChainTypes> { pub struct ValidatorServiceInstance<T: BeaconChainTypes> {
pub chain: Arc<BeaconChain<T>>, pub chain: Arc<BeaconChain<T>>,
pub log: slog::Logger, pub log: slog::Logger,
} }
//TODO: Refactor Errors
impl<T: BeaconChainTypes> ValidatorService for ValidatorServiceInstance<T> { impl<T: BeaconChainTypes> ValidatorService for ValidatorServiceInstance<T> {
/// For a list of validator public keys, this function returns the slot at which each /// For a list of validator public keys, this function returns the slot at which each
@ -29,14 +28,15 @@ impl<T: BeaconChainTypes> ValidatorService for ValidatorServiceInstance<T> {
let validators = req.get_validators(); let validators = req.get_validators();
trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch());
let spec = self.chain.get_spec(); let spec = &self.chain.spec;
let state = self.chain.get_state(); let state = &self.chain.current_state();
let epoch = Epoch::from(req.get_epoch()); let epoch = Epoch::from(req.get_epoch());
let mut resp = GetDutiesResponse::new(); let mut resp = GetDutiesResponse::new();
let resp_validators = resp.mut_active_validators(); let resp_validators = resp.mut_active_validators();
let relative_epoch = let relative_epoch =
match RelativeEpoch::from_epoch(state.slot.epoch(spec.slots_per_epoch), epoch) { match RelativeEpoch::from_epoch(state.slot.epoch(T::EthSpec::slots_per_epoch()), epoch)
{
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
// incorrect epoch // incorrect epoch
@ -52,7 +52,7 @@ impl<T: BeaconChainTypes> ValidatorService for ValidatorServiceInstance<T> {
}; };
let validator_proposers: Result<Vec<usize>, _> = epoch let validator_proposers: Result<Vec<usize>, _> = epoch
.slot_iter(spec.slots_per_epoch) .slot_iter(T::EthSpec::slots_per_epoch())
.map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec)) .map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec))
.collect(); .collect();
let validator_proposers = match validator_proposers { let validator_proposers = match validator_proposers {
@ -148,7 +148,7 @@ impl<T: BeaconChainTypes> ValidatorService for ValidatorServiceInstance<T> {
// check if the validator needs to propose a block // check if the validator needs to propose a block
if let Some(slot) = validator_proposers.iter().position(|&v| val_index == v) { if let Some(slot) = validator_proposers.iter().position(|&v| val_index == v) {
duty.set_block_production_slot( duty.set_block_production_slot(
epoch.start_slot(spec.slots_per_epoch).as_u64() + slot as u64, epoch.start_slot(T::EthSpec::slots_per_epoch()).as_u64() + slot as u64,
); );
} else { } else {
// no blocks to propose this epoch // no blocks to propose this epoch

View File

@ -3,8 +3,15 @@ extern crate slog;
mod run; mod run;
use clap::{App, Arg}; use clap::{App, Arg};
use client::ClientConfig; use client::{ClientConfig, Eth2Config};
use slog::{error, o, Drain}; use eth2_config::{get_data_dir, read_from_file, write_to_file};
use slog::{crit, o, Drain};
use std::path::PathBuf;
pub const DEFAULT_DATA_DIR: &str = ".lighthouse";
pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml";
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
fn main() { fn main() {
let decorator = slog_term::TermDecorator::new().build(); let decorator = slog_term::TermDecorator::new().build();
@ -22,28 +29,22 @@ fn main() {
.long("datadir") .long("datadir")
.value_name("DIR") .value_name("DIR")
.help("Data directory for keys and databases.") .help("Data directory for keys and databases.")
.takes_value(true), .takes_value(true)
.default_value(DEFAULT_DATA_DIR),
) )
// network related arguments // network related arguments
.arg( .arg(
Arg::with_name("listen-address") Arg::with_name("listen-address")
.long("listen-address") .long("listen-address")
.value_name("Listen Address") .value_name("Listen Address")
.help("The Network address to listen for p2p connections.") .help("One or more comma-delimited multi-addresses to listen for p2p connections.")
.takes_value(true),
)
.arg(
Arg::with_name("port")
.long("port")
.value_name("PORT")
.help("Network listen port for p2p connections.")
.takes_value(true), .takes_value(true),
) )
.arg( .arg(
Arg::with_name("boot-nodes") Arg::with_name("boot-nodes")
.long("boot-nodes") .long("boot-nodes")
.value_name("BOOTNODES") .value_name("BOOTNODES")
.help("A list of comma separated multi addresses representing bootnodes to connect to.") .help("One or more comma-delimited multi-addresses to bootstrap the p2p network.")
.takes_value(true), .takes_value(true),
) )
// rpc related arguments // rpc related arguments
@ -68,6 +69,28 @@ fn main() {
.help("Listen port for RPC endpoint.") .help("Listen port for RPC endpoint.")
.takes_value(true), .takes_value(true),
) )
// HTTP related arguments
.arg(
Arg::with_name("http")
.long("http")
.value_name("HTTP")
.help("Enable the HTTP server.")
.takes_value(false),
)
.arg(
Arg::with_name("http-address")
.long("http-address")
.value_name("HTTPADDRESS")
.help("Listen address for the HTTP server.")
.takes_value(true),
)
.arg(
Arg::with_name("http-port")
.long("http-port")
.value_name("HTTPPORT")
.help("Listen port for the HTTP server.")
.takes_value(true),
)
.arg( .arg(
Arg::with_name("db") Arg::with_name("db")
.long("db") .long("db")
@ -77,13 +100,101 @@ fn main() {
.possible_values(&["disk", "memory"]) .possible_values(&["disk", "memory"])
.default_value("memory"), .default_value("memory"),
) )
.arg(
Arg::with_name("spec-constants")
.long("spec-constants")
.value_name("TITLE")
.short("s")
.help("The title of the spec constants for chain config.")
.takes_value(true)
.possible_values(&["mainnet", "minimal"])
.default_value("minimal"),
)
.arg(
Arg::with_name("recent-genesis")
.long("recent-genesis")
.short("r")
.help("When present, genesis will be within 30 minutes prior. Only for testing"),
)
.get_matches(); .get_matches();
// invalid arguments, panic let data_dir = match get_data_dir(&matches, PathBuf::from(DEFAULT_DATA_DIR)) {
let config = ClientConfig::parse_args(matches, &logger).unwrap(); Ok(dir) => dir,
Err(e) => {
crit!(logger, "Failed to initialize data dir"; "error" => format!("{:?}", e));
return;
}
};
match run::run_beacon_node(config, &logger) { let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME);
// Attempt to lead the `ClientConfig` from disk.
//
// If file doesn't exist, create a new, default one.
let mut client_config = match read_from_file::<ClientConfig>(client_config_path.clone()) {
Ok(Some(c)) => c,
Ok(None) => {
let default = ClientConfig::default();
if let Err(e) = write_to_file(client_config_path, &default) {
crit!(logger, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e));
return;
}
default
}
Err(e) => {
crit!(logger, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e));
return;
}
};
// Ensure the `data_dir` in the config matches that supplied to the CLI.
client_config.data_dir = data_dir.clone();
// Update the client config with any CLI args.
match client_config.apply_cli_args(&matches) {
Ok(()) => (),
Err(s) => {
crit!(logger, "Failed to parse ClientConfig CLI arguments"; "error" => s);
return;
}
};
let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME);
// Attempt to load the `Eth2Config` from file.
//
// If the file doesn't exist, create a default one depending on the CLI flags.
let mut eth2_config = match read_from_file::<Eth2Config>(eth2_config_path.clone()) {
Ok(Some(c)) => c,
Ok(None) => {
let default = match matches.value_of("spec-constants") {
Some("mainnet") => Eth2Config::mainnet(),
Some("minimal") => Eth2Config::minimal(),
_ => unreachable!(), // Guarded by slog.
};
if let Err(e) = write_to_file(eth2_config_path, &default) {
crit!(logger, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e));
return;
}
default
}
Err(e) => {
crit!(logger, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e));
return;
}
};
// Update the eth2 config with any CLI flags.
match eth2_config.apply_cli_args(&matches) {
Ok(()) => (),
Err(s) => {
crit!(logger, "Failed to parse Eth2Config CLI arguments"; "error" => s);
return;
}
};
match run::run_beacon_node(client_config, eth2_config, &logger) {
Ok(_) => {} Ok(_) => {}
Err(e) => error!(logger, "Beacon node failed because {:?}", e), Err(e) => crit!(logger, "Beacon node failed to start"; "reason" => format!("{:}", e)),
} }
} }

View File

@ -1,62 +1,115 @@
use client::{ use client::{
error, notifier, BeaconChainTypes, Client, ClientConfig, DBType, TestnetDiskBeaconChainTypes, error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config,
TestnetMemoryBeaconChainTypes, InitialiseBeaconChain,
}; };
use futures::sync::oneshot; use futures::sync::oneshot;
use futures::Future; use futures::Future;
use slog::info; use slog::{error, info, warn};
use std::cell::RefCell; use std::cell::RefCell;
use std::path::Path;
use std::path::PathBuf;
use store::{DiskStore, MemoryStore};
use tokio::runtime::Builder; use tokio::runtime::Builder;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use tokio::runtime::TaskExecutor; use tokio::runtime::TaskExecutor;
use tokio_timer::clock::Clock; use tokio_timer::clock::Clock;
use types::{MainnetEthSpec, MinimalEthSpec};
pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Result<()> { pub fn run_beacon_node(
client_config: ClientConfig,
eth2_config: Eth2Config,
log: &slog::Logger,
) -> error::Result<()> {
let runtime = Builder::new() let runtime = Builder::new()
.name_prefix("main-") .name_prefix("main-")
.clock(Clock::system()) .clock(Clock::system())
.build() .build()
.map_err(|e| format!("{:?}", e))?; .map_err(|e| format!("{:?}", e))?;
// Log configuration
info!(log, "Listening on {:?}", &config.net_conf.listen_addresses;
"data_dir" => &config.data_dir.to_str(),
"port" => &config.net_conf.listen_port);
let executor = runtime.executor(); let executor = runtime.executor();
match config.db_type { let db_path: PathBuf = client_config
DBType::Disk => { .db_path()
.ok_or_else::<error::Error, _>(|| "Unable to access database path".into())?;
let db_type = &client_config.db_type;
let spec_constants = eth2_config.spec_constants.clone();
let other_client_config = client_config.clone();
warn!(
log,
"This software is EXPERIMENTAL and provides no guarantees or warranties."
);
let result = match (db_type.as_str(), spec_constants.as_str()) {
("disk", "minimal") => run::<ClientType<DiskStore, MinimalEthSpec>>(
&db_path,
client_config,
eth2_config,
executor,
runtime,
log,
),
("memory", "minimal") => run::<ClientType<MemoryStore, MinimalEthSpec>>(
&db_path,
client_config,
eth2_config,
executor,
runtime,
log,
),
("disk", "mainnet") => run::<ClientType<DiskStore, MainnetEthSpec>>(
&db_path,
client_config,
eth2_config,
executor,
runtime,
log,
),
("memory", "mainnet") => run::<ClientType<MemoryStore, MainnetEthSpec>>(
&db_path,
client_config,
eth2_config,
executor,
runtime,
log,
),
(db_type, spec) => {
error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type);
Err("Unknown specification and/or db_type.".into())
}
};
if result.is_ok() {
info!( info!(
log, log,
"BeaconNode starting"; "Started beacon node";
"type" => "TestnetDiskBeaconChainTypes" "p2p_listen_addresses" => format!("{:?}", &other_client_config.network.listen_addresses()),
"data_dir" => format!("{:?}", other_client_config.data_dir()),
"spec_constants" => &spec_constants,
"db_type" => &other_client_config.db_type,
); );
let client: Client<TestnetDiskBeaconChainTypes> =
Client::new(config, log.clone(), &executor)?;
run(client, executor, runtime, log)
}
DBType::Memory => {
info!(
log,
"BeaconNode starting";
"type" => "TestnetMemoryBeaconChainTypes"
);
let client: Client<TestnetMemoryBeaconChainTypes> =
Client::new(config, log.clone(), &executor)?;
run(client, executor, runtime, log)
}
}
} }
pub fn run<T: BeaconChainTypes + Send + Sync + 'static>( result
client: Client<T>, }
pub fn run<T>(
db_path: &Path,
client_config: ClientConfig,
eth2_config: Eth2Config,
executor: TaskExecutor, executor: TaskExecutor,
mut runtime: Runtime, mut runtime: Runtime,
log: &slog::Logger, log: &slog::Logger,
) -> error::Result<()> { ) -> error::Result<()>
where
T: BeaconChainTypes + InitialiseBeaconChain<T> + Clone + Send + Sync + 'static,
T::Store: OpenDatabase,
{
let store = T::Store::open_database(&db_path)?;
let client: Client<T> = Client::new(client_config, eth2_config, store, log.clone(), &executor)?;
// run service until ctrl-c // run service until ctrl-c
let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); let (ctrlc_send, ctrlc_oneshot) = oneshot::channel();
let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); let ctrlc_send_c = RefCell::new(Some(ctrlc_send));
@ -84,3 +137,22 @@ pub fn run<T: BeaconChainTypes + Send + Sync + 'static>(
runtime.shutdown_on_idle().wait().unwrap(); runtime.shutdown_on_idle().wait().unwrap();
Ok(()) Ok(())
} }
/// A convenience trait, providing a method to open a database.
///
/// Panics if unable to open the database.
pub trait OpenDatabase: Sized {
fn open_database(path: &Path) -> error::Result<Self>;
}
impl OpenDatabase for MemoryStore {
fn open_database(_path: &Path) -> error::Result<Self> {
Ok(MemoryStore::open())
}
}
impl OpenDatabase for DiskStore {
fn open_database(path: &Path) -> error::Result<Self> {
DiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e).into())
}
}

View File

@ -25,15 +25,23 @@ pub fn get_block_at_preceeding_slot<T: Store>(
slot: Slot, slot: Slot,
start_root: Hash256, start_root: Hash256,
) -> Result<Option<(Hash256, BeaconBlock)>, Error> { ) -> Result<Option<(Hash256, BeaconBlock)>, Error> {
let mut root = start_root; Ok(match get_at_preceeding_slot(store, slot, start_root)? {
Some((hash, bytes)) => Some((hash, BeaconBlock::from_ssz_bytes(&bytes)?)),
None => None,
})
}
fn get_at_preceeding_slot<T: Store>(
store: &T,
slot: Slot,
mut root: Hash256,
) -> Result<Option<(Hash256, Vec<u8>)>, Error> {
loop { loop {
if let Some(bytes) = get_block_bytes(store, root)? { if let Some(bytes) = get_block_bytes(store, root)? {
let this_slot = read_slot_from_block_bytes(&bytes)?; let this_slot = read_slot_from_block_bytes(&bytes)?;
if this_slot == slot { if this_slot == slot {
let block = BeaconBlock::from_ssz_bytes(&bytes)?; break Ok(Some((root, bytes)));
break Ok(Some((root, block)));
} else if this_slot < slot { } else if this_slot < slot {
break Ok(None); break Ok(None);
} else { } else {
@ -53,7 +61,7 @@ mod tests {
#[test] #[test]
fn read_slot() { fn read_slot() {
let spec = FewValidatorsEthSpec::spec(); let spec = MinimalEthSpec::default_spec();
let test_slot = |slot: Slot| { let test_slot = |slot: Slot| {
let mut block = BeaconBlock::empty(&spec); let mut block = BeaconBlock::empty(&spec);
@ -77,7 +85,7 @@ mod tests {
#[test] #[test]
fn read_previous_block_root() { fn read_previous_block_root() {
let spec = FewValidatorsEthSpec::spec(); let spec = MinimalEthSpec::default_spec();
let test_root = |root: Hash256| { let test_root = |root: Hash256| {
let mut block = BeaconBlock::empty(&spec); let mut block = BeaconBlock::empty(&spec);
@ -122,7 +130,7 @@ mod tests {
fn chain_without_skips() { fn chain_without_skips() {
let n: usize = 10; let n: usize = 10;
let store = MemoryStore::open(); let store = MemoryStore::open();
let spec = FewValidatorsEthSpec::spec(); let spec = MinimalEthSpec::default_spec();
let slots: Vec<usize> = (0..n).collect(); let slots: Vec<usize> = (0..n).collect();
let blocks_and_roots = build_chain(&store, &slots, &spec); let blocks_and_roots = build_chain(&store, &slots, &spec);
@ -146,7 +154,7 @@ mod tests {
#[test] #[test]
fn chain_with_skips() { fn chain_with_skips() {
let store = MemoryStore::open(); let store = MemoryStore::open();
let spec = FewValidatorsEthSpec::spec(); let spec = MinimalEthSpec::default_spec();
let slots = vec![0, 1, 2, 5]; let slots = vec![0, 1, 2, 5];

View File

@ -1,6 +1,5 @@
extern crate rocksdb; extern crate rocksdb;
// use super::stores::COLUMNS;
use super::{ClientDB, DBError, DBValue}; use super::{ClientDB, DBError, DBValue};
use rocksdb::Error as RocksError; use rocksdb::Error as RocksError;
use rocksdb::{Options, DB}; use rocksdb::{Options, DB};

View File

@ -1,6 +1,8 @@
use crate::*; use crate::*;
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
mod beacon_state;
impl StoreItem for BeaconBlock { impl StoreItem for BeaconBlock {
fn db_column() -> DBColumn { fn db_column() -> DBColumn {
DBColumn::BeaconBlock DBColumn::BeaconBlock
@ -14,17 +16,3 @@ impl StoreItem for BeaconBlock {
Self::from_ssz_bytes(bytes).map_err(Into::into) Self::from_ssz_bytes(bytes).map_err(Into::into)
} }
} }
impl<T: EthSpec> StoreItem for BeaconState<T> {
fn db_column() -> DBColumn {
DBColumn::BeaconState
}
fn as_store_bytes(&self) -> Vec<u8> {
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &mut [u8]) -> Result<Self, Error> {
Self::from_ssz_bytes(bytes).map_err(Into::into)
}
}

View File

@ -0,0 +1,64 @@
use crate::*;
use ssz::{Decode, DecodeError, Encode};
use ssz_derive::{Decode, Encode};
use std::convert::TryInto;
use types::beacon_state::{CommitteeCache, CACHED_EPOCHS};
/// A container for storing `BeaconState` components.
#[derive(Encode, Decode)]
struct StorageContainer {
state_bytes: Vec<u8>,
committee_caches_bytes: Vec<Vec<u8>>,
}
impl StorageContainer {
/// Create a new instance for storing a `BeaconState`.
pub fn new<T: EthSpec>(state: &BeaconState<T>) -> Self {
let mut committee_caches_bytes = vec![];
for cache in state.committee_caches[..].iter() {
committee_caches_bytes.push(cache.as_ssz_bytes());
}
Self {
state_bytes: state.as_ssz_bytes(),
committee_caches_bytes,
}
}
}
impl<T: EthSpec> TryInto<BeaconState<T>> for StorageContainer {
type Error = Error;
fn try_into(self) -> Result<BeaconState<T>, Error> {
let mut state: BeaconState<T> = BeaconState::from_ssz_bytes(&self.state_bytes)?;
for i in 0..CACHED_EPOCHS {
let bytes = &self.committee_caches_bytes.get(i).ok_or_else(|| {
Error::SszDecodeError(DecodeError::BytesInvalid(
"Insufficient committees for BeaconState".to_string(),
))
})?;
state.committee_caches[i] = CommitteeCache::from_ssz_bytes(bytes)?;
}
Ok(state)
}
}
impl<T: EthSpec> StoreItem for BeaconState<T> {
fn db_column() -> DBColumn {
DBColumn::BeaconState
}
fn as_store_bytes(&self) -> Vec<u8> {
let container = StorageContainer::new(self);
container.as_ssz_bytes()
}
fn from_store_bytes(bytes: &mut [u8]) -> Result<Self, Error> {
let container = StorageContainer::from_ssz_bytes(bytes)?;
container.try_into()
}
}

View File

@ -5,10 +5,14 @@ use leveldb::database::Database;
use leveldb::error::Error as LevelDBError; use leveldb::error::Error as LevelDBError;
use leveldb::options::{Options, ReadOptions, WriteOptions}; use leveldb::options::{Options, ReadOptions, WriteOptions};
use std::path::Path; use std::path::Path;
use std::sync::Arc;
/// A wrapped leveldb database. /// A wrapped leveldb database.
#[derive(Clone)]
pub struct LevelDB { pub struct LevelDB {
db: Database<BytesKey>, // Note: this `Arc` is only included because of an artificial constraint by gRPC. Hopefully we
// can remove this one day.
db: Arc<Database<BytesKey>>,
} }
impl LevelDB { impl LevelDB {
@ -18,7 +22,7 @@ impl LevelDB {
options.create_if_missing = true; options.create_if_missing = true;
let db = Database::open(path, options)?; let db = Arc::new(Database::open(path, options)?);
Ok(Self { db }) Ok(Self { db })
} }

View File

@ -1,19 +1,23 @@
use super::{Error, Store}; use super::{Error, Store};
use parking_lot::RwLock; use parking_lot::RwLock;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc;
type DBHashMap = HashMap<Vec<u8>, Vec<u8>>; type DBHashMap = HashMap<Vec<u8>, Vec<u8>>;
/// A thread-safe `HashMap` wrapper. /// A thread-safe `HashMap` wrapper.
#[derive(Clone)]
pub struct MemoryStore { pub struct MemoryStore {
db: RwLock<DBHashMap>, // Note: this `Arc` is only included because of an artificial constraint by gRPC. Hopefully we
// can remove this one day.
db: Arc<RwLock<DBHashMap>>,
} }
impl MemoryStore { impl MemoryStore {
/// Create a new, empty database. /// Create a new, empty database.
pub fn open() -> Self { pub fn open() -> Self {
Self { Self {
db: RwLock::new(HashMap::new()), db: Arc::new(RwLock::new(HashMap::new())),
} }
} }

View File

@ -4,6 +4,10 @@ version = "0.1.0"
authors = ["Age Manning <Age@AgeManning.com>"] authors = ["Age Manning <Age@AgeManning.com>"]
edition = "2018" edition = "2018"
[[bench]]
name = "benches"
harness = false
[dependencies] [dependencies]
store = { path = "../../beacon_node/store" } store = { path = "../../beacon_node/store" }
ssz = { path = "../utils/ssz" } ssz = { path = "../utils/ssz" }
@ -12,6 +16,7 @@ log = "0.4.6"
bit-vec = "0.5.0" bit-vec = "0.5.0"
[dev-dependencies] [dev-dependencies]
criterion = "0.2"
hex = "0.3.2" hex = "0.3.2"
yaml-rust = "0.4.2" yaml-rust = "0.4.2"
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }

View File

@ -0,0 +1,75 @@
use criterion::Criterion;
use criterion::{criterion_group, criterion_main, Benchmark};
use fork_choice::{test_utils::TestingForkChoiceBuilder, ForkChoice, OptimizedLMDGhost};
use std::sync::Arc;
use store::MemoryStore;
use types::{ChainSpec, EthSpec, MainnetEthSpec};
pub type TestedForkChoice<T, U> = OptimizedLMDGhost<T, U>;
pub type TestedEthSpec = MainnetEthSpec;
/// Helper function to setup a builder and spec.
fn setup(
validator_count: usize,
chain_length: usize,
) -> (
TestingForkChoiceBuilder<MemoryStore, TestedEthSpec>,
ChainSpec,
) {
let store = MemoryStore::open();
let builder: TestingForkChoiceBuilder<MemoryStore, TestedEthSpec> =
TestingForkChoiceBuilder::new(validator_count, chain_length, Arc::new(store));
let spec = TestedEthSpec::default_spec();
(builder, spec)
}
/// Benches adding blocks to fork_choice.
fn add_block(c: &mut Criterion) {
let validator_count = 16;
let chain_length = 100;
let (builder, spec) = setup(validator_count, chain_length);
c.bench(
&format!("{}_blocks", chain_length),
Benchmark::new("add_blocks", move |b| {
b.iter(|| {
let mut fc = builder.build::<TestedForkChoice<MemoryStore, TestedEthSpec>>();
for (root, block) in builder.chain.iter().skip(1) {
fc.add_block(block, root, &spec).unwrap();
}
})
})
.sample_size(10),
);
}
/// Benches fork choice head finding.
fn find_head(c: &mut Criterion) {
let validator_count = 16;
let chain_length = 64 * 2;
let (builder, spec) = setup(validator_count, chain_length);
let mut fc = builder.build::<TestedForkChoice<MemoryStore, TestedEthSpec>>();
for (root, block) in builder.chain.iter().skip(1) {
fc.add_block(block, root, &spec).unwrap();
}
let head_root = builder.chain.last().unwrap().0;
for i in 0..validator_count {
fc.add_attestation(i as u64, &head_root, &spec).unwrap();
}
c.bench(
&format!("{}_blocks", chain_length),
Benchmark::new("find_head", move |b| {
b.iter(|| fc.find_head(&builder.genesis_root(), &spec).unwrap())
})
.sample_size(10),
);
}
criterion_group!(benches, add_block, find_head);
criterion_main!(benches);

View File

@ -0,0 +1,40 @@
use fork_choice::{test_utils::TestingForkChoiceBuilder, ForkChoice, OptimizedLMDGhost};
use std::sync::Arc;
use store::{MemoryStore, Store};
use types::{BeaconBlock, ChainSpec, EthSpec, Hash256, MainnetEthSpec};
fn main() {
let validator_count = 16;
let chain_length = 100;
let repetitions = 50;
let store = MemoryStore::open();
let builder: TestingForkChoiceBuilder<MemoryStore, MainnetEthSpec> =
TestingForkChoiceBuilder::new(validator_count, chain_length, Arc::new(store));
let fork_choosers: Vec<OptimizedLMDGhost<MemoryStore, MainnetEthSpec>> = (0..repetitions)
.into_iter()
.map(|_| builder.build())
.collect();
let spec = &MainnetEthSpec::default_spec();
println!("Running {} times...", repetitions);
for fc in fork_choosers {
do_thing(fc, &builder.chain, builder.genesis_root(), spec);
}
}
#[inline(never)]
fn do_thing<F: ForkChoice<S>, S: Store>(
mut fc: F,
chain: &[(Hash256, BeaconBlock)],
genesis_root: Hash256,
spec: &ChainSpec,
) {
for (root, block) in chain.iter().skip(1) {
fc.add_block(block, root, spec).unwrap();
}
let _head = fc.find_head(&genesis_root, spec).unwrap();
}

View File

@ -48,18 +48,6 @@ pub struct BitwiseLMDGhost<T, E> {
} }
impl<T: Store, E: EthSpec> BitwiseLMDGhost<T, E> { impl<T: Store, E: EthSpec> BitwiseLMDGhost<T, E> {
pub fn new(store: Arc<T>) -> Self {
BitwiseLMDGhost {
cache: HashMap::new(),
ancestors: vec![HashMap::new(); 16],
latest_attestation_targets: HashMap::new(),
children: HashMap::new(),
max_known_height: SlotHeight::new(0),
store,
_phantom: PhantomData,
}
}
/// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to
/// weighted votes. /// weighted votes.
pub fn get_latest_votes( pub fn get_latest_votes(
@ -80,7 +68,7 @@ impl<T: Store, E: EthSpec> BitwiseLMDGhost<T, E> {
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
let active_validator_indices = let active_validator_indices =
current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); current_state.get_active_validator_indices(block_slot.epoch(E::slots_per_epoch()));
for index in active_validator_indices { for index in active_validator_indices {
let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance) let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance)
@ -130,12 +118,12 @@ impl<T: Store, E: EthSpec> BitwiseLMDGhost<T, E> {
// not in the cache recursively search for ancestors using a log-lookup // not in the cache recursively search for ancestors using a log-lookup
if let Some(ancestor) = { if let Some(ancestor) = {
let ancestor_lookup = self.ancestors let ancestor_lookup = *self.ancestors
[log2_int((block_height - target_height - 1u64).as_u64()) as usize] [log2_int((block_height - target_height - 1u64).as_u64()) as usize]
.get(&block_hash) .get(&block_hash)
//TODO: Panic if we can't lookup and fork choice fails //TODO: Panic if we can't lookup and fork choice fails
.expect("All blocks should be added to the ancestor log lookup table"); .expect("All blocks should be added to the ancestor log lookup table");
self.get_ancestor(*ancestor_lookup, target_height, &spec) self.get_ancestor(ancestor_lookup, target_height, &spec)
} { } {
// add the result to the cache // add the result to the cache
self.cache.insert(cache_key, ancestor); self.cache.insert(cache_key, ancestor);
@ -161,7 +149,7 @@ impl<T: Store, E: EthSpec> BitwiseLMDGhost<T, E> {
// these have already been weighted by balance // these have already been weighted by balance
for (hash, votes) in latest_votes.iter() { for (hash, votes) in latest_votes.iter() {
if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) { if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) {
let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); let current_vote_value = *current_votes.get(&ancestor).unwrap_or_else(|| &0);
current_votes.insert(ancestor, current_vote_value + *votes); current_votes.insert(ancestor, current_vote_value + *votes);
total_vote_count += votes; total_vote_count += votes;
} }
@ -227,7 +215,19 @@ impl<T: Store, E: EthSpec> BitwiseLMDGhost<T, E> {
} }
} }
impl<T: Store, E: EthSpec> ForkChoice for BitwiseLMDGhost<T, E> { impl<T: Store, E: EthSpec> ForkChoice<T> for BitwiseLMDGhost<T, E> {
fn new(store: Arc<T>) -> Self {
BitwiseLMDGhost {
cache: HashMap::new(),
ancestors: vec![HashMap::new(); 16],
latest_attestation_targets: HashMap::new(),
children: HashMap::new(),
max_known_height: SlotHeight::new(0),
store,
_phantom: PhantomData,
}
}
fn add_block( fn add_block(
&mut self, &mut self,
block: &BeaconBlock, block: &BeaconBlock,

View File

@ -20,9 +20,9 @@ pub mod bitwise_lmd_ghost;
pub mod longest_chain; pub mod longest_chain;
pub mod optimized_lmd_ghost; pub mod optimized_lmd_ghost;
pub mod slow_lmd_ghost; pub mod slow_lmd_ghost;
pub mod test_utils;
// use store::stores::BeaconBlockAtSlotError; use std::sync::Arc;
// use store::DBError;
use store::Error as DBError; use store::Error as DBError;
use types::{BeaconBlock, ChainSpec, Hash256}; use types::{BeaconBlock, ChainSpec, Hash256};
@ -34,7 +34,10 @@ pub use slow_lmd_ghost::SlowLMDGhost;
/// Defines the interface for Fork Choices. Each Fork choice will define their own data structures /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures
/// which can be built in block processing through the `add_block` and `add_attestation` functions. /// which can be built in block processing through the `add_block` and `add_attestation` functions.
/// The main fork choice algorithm is specified in `find_head /// The main fork choice algorithm is specified in `find_head
pub trait ForkChoice: Send + Sync { pub trait ForkChoice<T>: Send + Sync {
/// Create a new `ForkChoice` which reads from `store`.
fn new(store: Arc<T>) -> Self;
/// Called when a block has been added. Allows generic block-level data structures to be /// Called when a block has been added. Allows generic block-level data structures to be
/// built for a given fork-choice. /// built for a given fork-choice.
fn add_block( fn add_block(
@ -78,22 +81,6 @@ impl From<DBError> for ForkChoiceError {
} }
} }
/*
impl From<BeaconBlockAtSlotError> for ForkChoiceError {
fn from(e: BeaconBlockAtSlotError) -> ForkChoiceError {
match e {
BeaconBlockAtSlotError::UnknownBeaconBlock(hash) => {
ForkChoiceError::MissingBeaconBlock(hash)
}
BeaconBlockAtSlotError::InvalidBeaconBlock(hash) => {
ForkChoiceError::MissingBeaconBlock(hash)
}
BeaconBlockAtSlotError::DBError(string) => ForkChoiceError::StorageError(string),
}
}
}
*/
/// Fork choice options that are currently implemented. /// Fork choice options that are currently implemented.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum ForkChoiceAlgorithm { pub enum ForkChoiceAlgorithm {

View File

@ -10,16 +10,14 @@ pub struct LongestChain<T> {
store: Arc<T>, store: Arc<T>,
} }
impl<T: Store> LongestChain<T> { impl<T: Store> ForkChoice<T> for LongestChain<T> {
pub fn new(store: Arc<T>) -> Self { fn new(store: Arc<T>) -> Self {
LongestChain { LongestChain {
head_block_hashes: Vec::new(), head_block_hashes: Vec::new(),
store, store,
} }
} }
}
impl<T: Store> ForkChoice for LongestChain<T> {
fn add_block( fn add_block(
&mut self, &mut self,
block: &BeaconBlock, block: &BeaconBlock,

View File

@ -48,18 +48,6 @@ pub struct OptimizedLMDGhost<T, E> {
} }
impl<T: Store, E: EthSpec> OptimizedLMDGhost<T, E> { impl<T: Store, E: EthSpec> OptimizedLMDGhost<T, E> {
pub fn new(store: Arc<T>) -> Self {
OptimizedLMDGhost {
cache: HashMap::new(),
ancestors: vec![HashMap::new(); 16],
latest_attestation_targets: HashMap::new(),
children: HashMap::new(),
max_known_height: SlotHeight::new(0),
store,
_phantom: PhantomData,
}
}
/// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to
/// weighted votes. /// weighted votes.
pub fn get_latest_votes( pub fn get_latest_votes(
@ -80,7 +68,7 @@ impl<T: Store, E: EthSpec> OptimizedLMDGhost<T, E> {
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
let active_validator_indices = let active_validator_indices =
current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); current_state.get_active_validator_indices(block_slot.epoch(E::slots_per_epoch()));
for index in active_validator_indices { for index in active_validator_indices {
let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance) let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance)
@ -130,12 +118,12 @@ impl<T: Store, E: EthSpec> OptimizedLMDGhost<T, E> {
// not in the cache recursively search for ancestors using a log-lookup // not in the cache recursively search for ancestors using a log-lookup
if let Some(ancestor) = { if let Some(ancestor) = {
let ancestor_lookup = self.ancestors let ancestor_lookup = *self.ancestors
[log2_int((block_height - target_height - 1u64).as_u64()) as usize] [log2_int((block_height - target_height - 1u64).as_u64()) as usize]
.get(&block_hash) .get(&block_hash)
//TODO: Panic if we can't lookup and fork choice fails //TODO: Panic if we can't lookup and fork choice fails
.expect("All blocks should be added to the ancestor log lookup table"); .expect("All blocks should be added to the ancestor log lookup table");
self.get_ancestor(*ancestor_lookup, target_height, &spec) self.get_ancestor(ancestor_lookup, target_height, &spec)
} { } {
// add the result to the cache // add the result to the cache
self.cache.insert(cache_key, ancestor); self.cache.insert(cache_key, ancestor);
@ -161,7 +149,7 @@ impl<T: Store, E: EthSpec> OptimizedLMDGhost<T, E> {
// these have already been weighted by balance // these have already been weighted by balance
for (hash, votes) in latest_votes.iter() { for (hash, votes) in latest_votes.iter() {
if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) { if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) {
let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); let current_vote_value = *current_votes.get(&ancestor).unwrap_or_else(|| &0);
current_votes.insert(ancestor, current_vote_value + *votes); current_votes.insert(ancestor, current_vote_value + *votes);
total_vote_count += votes; total_vote_count += votes;
} }
@ -198,7 +186,19 @@ impl<T: Store, E: EthSpec> OptimizedLMDGhost<T, E> {
} }
} }
impl<T: Store, E: EthSpec> ForkChoice for OptimizedLMDGhost<T, E> { impl<T: Store, E: EthSpec> ForkChoice<T> for OptimizedLMDGhost<T, E> {
fn new(store: Arc<T>) -> Self {
OptimizedLMDGhost {
cache: HashMap::new(),
ancestors: vec![HashMap::new(); 16],
latest_attestation_targets: HashMap::new(),
children: HashMap::new(),
max_known_height: SlotHeight::new(0),
store,
_phantom: PhantomData,
}
}
fn add_block( fn add_block(
&mut self, &mut self,
block: &BeaconBlock, block: &BeaconBlock,

View File

@ -20,15 +20,6 @@ pub struct SlowLMDGhost<T, E> {
} }
impl<T: Store, E: EthSpec> SlowLMDGhost<T, E> { impl<T: Store, E: EthSpec> SlowLMDGhost<T, E> {
pub fn new(store: Arc<T>) -> Self {
SlowLMDGhost {
latest_attestation_targets: HashMap::new(),
children: HashMap::new(),
store,
_phantom: PhantomData,
}
}
/// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to
/// weighted votes. /// weighted votes.
pub fn get_latest_votes( pub fn get_latest_votes(
@ -49,7 +40,7 @@ impl<T: Store, E: EthSpec> SlowLMDGhost<T, E> {
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
let active_validator_indices = let active_validator_indices =
current_state.get_active_validator_indices(block_slot.epoch(spec.slots_per_epoch)); current_state.get_active_validator_indices(block_slot.epoch(E::slots_per_epoch()));
for index in active_validator_indices { for index in active_validator_indices {
let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance) let balance = std::cmp::min(current_state.balances[index], spec.max_effective_balance)
@ -92,7 +83,16 @@ impl<T: Store, E: EthSpec> SlowLMDGhost<T, E> {
} }
} }
impl<T: Store, E: EthSpec> ForkChoice for SlowLMDGhost<T, E> { impl<T: Store, E: EthSpec> ForkChoice<T> for SlowLMDGhost<T, E> {
fn new(store: Arc<T>) -> Self {
SlowLMDGhost {
latest_attestation_targets: HashMap::new(),
children: HashMap::new(),
store,
_phantom: PhantomData,
}
}
/// Process when a block is added /// Process when a block is added
fn add_block( fn add_block(
&mut self, &mut self,

View File

@ -0,0 +1,91 @@
use crate::ForkChoice;
use std::marker::PhantomData;
use std::sync::Arc;
use store::Store;
use types::{
test_utils::{SeedableRng, TestRandom, TestingBeaconStateBuilder, XorShiftRng},
BeaconBlock, BeaconState, EthSpec, Hash256, Keypair, MainnetEthSpec,
};
/// Creates a chain of blocks and produces `ForkChoice` instances with pre-filled stores.
pub struct TestingForkChoiceBuilder<S, E> {
store: Arc<S>,
pub chain: Vec<(Hash256, BeaconBlock)>,
_phantom: PhantomData<E>,
}
impl<S: Store, E: EthSpec> TestingForkChoiceBuilder<S, E> {
pub fn new(validator_count: usize, chain_length: usize, store: Arc<S>) -> Self {
let chain =
get_chain_of_blocks::<MainnetEthSpec, S>(chain_length, validator_count, store.clone());
Self {
store,
chain,
_phantom: PhantomData,
}
}
pub fn genesis_root(&self) -> Hash256 {
self.chain[0].0
}
/// Return a new `ForkChoice` instance with a chain stored in it's `Store`.
pub fn build<F: ForkChoice<S>>(&self) -> F {
F::new(self.store.clone())
}
}
fn get_state<T: EthSpec>(validator_count: usize) -> BeaconState<T> {
let spec = T::default_spec();
let builder: TestingBeaconStateBuilder<T> =
TestingBeaconStateBuilder::from_single_keypair(validator_count, &Keypair::random(), &spec);
let (state, _keypairs) = builder.build();
state
}
/// Generates a chain of blocks of length `len`.
///
/// Creates a `BeaconState` for the block and stores it in `Store`, along with the block.
///
/// Returns the chain of blocks.
fn get_chain_of_blocks<T: EthSpec, U: Store>(
len: usize,
validator_count: usize,
store: Arc<U>,
) -> Vec<(Hash256, BeaconBlock)> {
let spec = T::default_spec();
let mut blocks_and_roots: Vec<(Hash256, BeaconBlock)> = vec![];
let mut unique_hashes = (0..).map(Hash256::from);
let mut random_block = BeaconBlock::random_for_test(&mut XorShiftRng::from_seed([42; 16]));
random_block.previous_block_root = Hash256::zero();
let beacon_state = get_state::<T>(validator_count);
for i in 0..len {
let slot = spec.genesis_slot + i as u64;
// Generate and store the state.
let mut state = beacon_state.clone();
state.slot = slot;
let state_root = unique_hashes.next().unwrap();
store.put(&state_root, &state).unwrap();
// Generate the block.
let mut block = random_block.clone();
block.slot = slot;
block.state_root = state_root;
// Chain all the blocks to their parents.
if i > 0 {
block.previous_block_root = blocks_and_roots[i - 1].0;
}
// Store the block.
let block_root = unique_hashes.next().unwrap();
store.put(&block_root, &block).unwrap();
blocks_and_roots.push((block_root, block));
}
blocks_and_roots
}

View File

@ -1,20 +1,17 @@
#![cfg(not(debug_assertions))] #![cfg(not(debug_assertions))]
// Tests the available fork-choice algorithms /// Tests the available fork-choice algorithms
pub use beacon_chain::BeaconChain; pub use beacon_chain::BeaconChain;
use bls::Signature; use bls::Signature;
use store::MemoryStore; use store::MemoryStore;
use store::Store; use store::Store;
// use env_logger::{Builder, Env}; // use env_logger::{Builder, Env};
use fork_choice::{ use fork_choice::{BitwiseLMDGhost, ForkChoice, LongestChain, OptimizedLMDGhost, SlowLMDGhost};
BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost,
};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::{fs::File, io::prelude::*, path::PathBuf}; use std::{fs::File, io::prelude::*, path::PathBuf};
use types::test_utils::TestingBeaconStateBuilder; use types::test_utils::TestingBeaconStateBuilder;
use types::{ use types::{
BeaconBlock, BeaconBlockBody, Eth1Data, EthSpec, FoundationEthSpec, Hash256, Keypair, Slot, BeaconBlock, BeaconBlockBody, Eth1Data, EthSpec, Hash256, Keypair, MainnetEthSpec, Slot,
}; };
use yaml_rust::yaml; use yaml_rust::yaml;
@ -25,8 +22,7 @@ fn test_optimized_lmd_ghost() {
// set up logging // set up logging
// Builder::from_env(Env::default().default_filter_or("trace")).init(); // Builder::from_env(Env::default().default_filter_or("trace")).init();
test_yaml_vectors( test_yaml_vectors::<OptimizedLMDGhost<MemoryStore, MainnetEthSpec>>(
ForkChoiceAlgorithm::OptimizedLMDGhost,
"tests/lmd_ghost_test_vectors.yaml", "tests/lmd_ghost_test_vectors.yaml",
100, 100,
); );
@ -37,8 +33,7 @@ fn test_bitwise_lmd_ghost() {
// set up logging // set up logging
//Builder::from_env(Env::default().default_filter_or("trace")).init(); //Builder::from_env(Env::default().default_filter_or("trace")).init();
test_yaml_vectors( test_yaml_vectors::<BitwiseLMDGhost<MemoryStore, MainnetEthSpec>>(
ForkChoiceAlgorithm::BitwiseLMDGhost,
"tests/bitwise_lmd_ghost_test_vectors.yaml", "tests/bitwise_lmd_ghost_test_vectors.yaml",
100, 100,
); );
@ -46,8 +41,7 @@ fn test_bitwise_lmd_ghost() {
#[test] #[test]
fn test_slow_lmd_ghost() { fn test_slow_lmd_ghost() {
test_yaml_vectors( test_yaml_vectors::<SlowLMDGhost<MemoryStore, MainnetEthSpec>>(
ForkChoiceAlgorithm::SlowLMDGhost,
"tests/lmd_ghost_test_vectors.yaml", "tests/lmd_ghost_test_vectors.yaml",
100, 100,
); );
@ -55,16 +49,11 @@ fn test_slow_lmd_ghost() {
#[test] #[test]
fn test_longest_chain() { fn test_longest_chain() {
test_yaml_vectors( test_yaml_vectors::<LongestChain<MemoryStore>>("tests/longest_chain_test_vectors.yaml", 100);
ForkChoiceAlgorithm::LongestChain,
"tests/longest_chain_test_vectors.yaml",
100,
);
} }
// run a generic test over given YAML test vectors // run a generic test over given YAML test vectors
fn test_yaml_vectors( fn test_yaml_vectors<T: ForkChoice<MemoryStore>>(
fork_choice_algo: ForkChoiceAlgorithm,
yaml_file_path: &str, yaml_file_path: &str,
emulated_validators: usize, // the number of validators used to give weights. emulated_validators: usize, // the number of validators used to give weights.
) { ) {
@ -72,7 +61,7 @@ fn test_yaml_vectors(
let test_cases = load_test_cases_from_yaml(yaml_file_path); let test_cases = load_test_cases_from_yaml(yaml_file_path);
// default vars // default vars
let spec = FoundationEthSpec::spec(); let spec = MainnetEthSpec::default_spec();
let zero_hash = Hash256::zero(); let zero_hash = Hash256::zero();
let eth1_data = Eth1Data { let eth1_data = Eth1Data {
deposit_count: 0, deposit_count: 0,
@ -96,8 +85,7 @@ fn test_yaml_vectors(
// process the tests // process the tests
for test_case in test_cases { for test_case in test_cases {
// setup a fresh test // setup a fresh test
let (mut fork_choice, store, state_root) = let (mut fork_choice, store, state_root) = setup_inital_state::<T>(emulated_validators);
setup_inital_state(&fork_choice_algo, emulated_validators);
// keep a hashmap of block_id's to block_hashes (random hashes to abstract block_id) // keep a hashmap of block_id's to block_hashes (random hashes to abstract block_id)
//let mut block_id_map: HashMap<String, Hash256> = HashMap::new(); //let mut block_id_map: HashMap<String, Hash256> = HashMap::new();
@ -206,35 +194,19 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec<yaml_rust::Yaml> {
doc["test_cases"].as_vec().unwrap().clone() doc["test_cases"].as_vec().unwrap().clone()
} }
// initialise a single validator and state. All blocks will reference this state root. fn setup_inital_state<T>(
fn setup_inital_state( // fork_choice_algo: &ForkChoiceAlgorithm,
fork_choice_algo: &ForkChoiceAlgorithm, num_validators: usize
num_validators: usize, ) -> (T, Arc<MemoryStore>, Hash256)
) -> (Box<ForkChoice>, Arc<MemoryStore>, Hash256) { where
T: ForkChoice<MemoryStore>,
{
let store = Arc::new(MemoryStore::open()); let store = Arc::new(MemoryStore::open());
// the fork choice instantiation let fork_choice = ForkChoice::new(store.clone());
let fork_choice: Box<ForkChoice> = match fork_choice_algo { let spec = MainnetEthSpec::default_spec();
ForkChoiceAlgorithm::OptimizedLMDGhost => {
let f: OptimizedLMDGhost<MemoryStore, FoundationEthSpec> =
OptimizedLMDGhost::new(store.clone());
Box::new(f)
}
ForkChoiceAlgorithm::BitwiseLMDGhost => {
let f: BitwiseLMDGhost<MemoryStore, FoundationEthSpec> =
BitwiseLMDGhost::new(store.clone());
Box::new(f)
}
ForkChoiceAlgorithm::SlowLMDGhost => {
let f: SlowLMDGhost<MemoryStore, FoundationEthSpec> = SlowLMDGhost::new(store.clone());
Box::new(f)
}
ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(store.clone())),
};
let spec = FoundationEthSpec::spec(); let mut state_builder: TestingBeaconStateBuilder<MainnetEthSpec> =
let mut state_builder: TestingBeaconStateBuilder<FoundationEthSpec> =
TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec); TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec);
state_builder.build_caches(&spec).unwrap(); state_builder.build_caches(&spec).unwrap();
let (state, _keypairs) = state_builder.build(); let (state, _keypairs) = state_builder.build();

View File

@ -675,12 +675,12 @@ mod tests {
.collect() .collect()
} }
fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState<FoundationEthSpec>) { fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState<MainnetEthSpec>) {
let spec = FoundationEthSpec::spec(); let spec = MainnetEthSpec::default_spec();
let mut state = BeaconState::random_for_test(rng); let mut state = BeaconState::random_for_test(rng);
state.fork = Fork::genesis(&spec); state.fork = Fork::genesis(MainnetEthSpec::genesis_epoch());
(spec, state) (spec, state)
} }
@ -721,27 +721,27 @@ mod tests {
fn attestation_test_state<E: EthSpec>( fn attestation_test_state<E: EthSpec>(
num_committees: usize, num_committees: usize,
) -> (BeaconState<E>, Vec<Keypair>, ChainSpec) { ) -> (BeaconState<E>, Vec<Keypair>, ChainSpec) {
let spec = E::spec(); let spec = E::default_spec();
let num_validators = let num_validators =
num_committees * spec.slots_per_epoch as usize * spec.target_committee_size; num_committees * E::slots_per_epoch() as usize * spec.target_committee_size;
let mut state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( let mut state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(
num_validators, num_validators,
&spec, &spec,
); );
let slot_offset = 1000 * spec.slots_per_epoch + spec.slots_per_epoch / 2; let slot_offset = 1000 * E::slots_per_epoch() + E::slots_per_epoch() / 2;
let slot = spec.genesis_slot + slot_offset; let slot = spec.genesis_slot + slot_offset;
state_builder.teleport_to_slot(slot, &spec); state_builder.teleport_to_slot(slot);
state_builder.build_caches(&spec).unwrap(); state_builder.build_caches(&spec).unwrap();
let (state, keypairs) = state_builder.build(); let (state, keypairs) = state_builder.build();
(state, keypairs, FoundationEthSpec::spec()) (state, keypairs, MainnetEthSpec::default_spec())
} }
#[test] #[test]
fn test_attestation_score() { fn test_attestation_score() {
let (ref mut state, ref keypairs, ref spec) = let (ref mut state, ref keypairs, ref spec) =
attestation_test_state::<FoundationEthSpec>(1); attestation_test_state::<MainnetEthSpec>(1);
let slot = state.slot - 1; let slot = state.slot - 1;
let committees = state let committees = state
@ -793,7 +793,7 @@ mod tests {
#[test] #[test]
fn attestation_aggregation_insert_get_prune() { fn attestation_aggregation_insert_get_prune() {
let (ref mut state, ref keypairs, ref spec) = let (ref mut state, ref keypairs, ref spec) =
attestation_test_state::<FoundationEthSpec>(1); attestation_test_state::<MainnetEthSpec>(1);
let op_pool = OperationPool::new(); let op_pool = OperationPool::new();
@ -852,7 +852,7 @@ mod tests {
// But once we advance to more than an epoch after the attestation, it should prune it // But once we advance to more than an epoch after the attestation, it should prune it
// out of existence. // out of existence.
state.slot += 2 * spec.slots_per_epoch; state.slot += 2 * MainnetEthSpec::slots_per_epoch();
op_pool.prune_attestations(state); op_pool.prune_attestations(state);
assert_eq!(op_pool.num_attestations(), 0); assert_eq!(op_pool.num_attestations(), 0);
} }
@ -861,7 +861,7 @@ mod tests {
#[test] #[test]
fn attestation_duplicate() { fn attestation_duplicate() {
let (ref mut state, ref keypairs, ref spec) = let (ref mut state, ref keypairs, ref spec) =
attestation_test_state::<FoundationEthSpec>(1); attestation_test_state::<MainnetEthSpec>(1);
let op_pool = OperationPool::new(); let op_pool = OperationPool::new();
@ -898,7 +898,7 @@ mod tests {
#[test] #[test]
fn attestation_pairwise_overlapping() { fn attestation_pairwise_overlapping() {
let (ref mut state, ref keypairs, ref spec) = let (ref mut state, ref keypairs, ref spec) =
attestation_test_state::<FoundationEthSpec>(1); attestation_test_state::<MainnetEthSpec>(1);
let op_pool = OperationPool::new(); let op_pool = OperationPool::new();
@ -946,7 +946,7 @@ mod tests {
let big_step_size = 4; let big_step_size = 4;
let (ref mut state, ref keypairs, ref spec) = let (ref mut state, ref keypairs, ref spec) =
attestation_test_state::<FoundationEthSpec>(big_step_size); attestation_test_state::<MainnetEthSpec>(big_step_size);
let op_pool = OperationPool::new(); let op_pool = OperationPool::new();

View File

@ -17,13 +17,13 @@ pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10;
/// Run the benchmarking suite on a foundation spec with 16,384 validators. /// Run the benchmarking suite on a foundation spec with 16,384 validators.
pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) { pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) {
let spec = ChainSpec::foundation(); let spec = ChainSpec::mainnet();
let mut builder = let mut builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
// Set the state to be just before an epoch transition. // Set the state to be just before an epoch transition.
let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); let target_slot = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
builder.teleport_to_slot(target_slot, &spec); builder.teleport_to_slot(target_slot, &spec);
// Builds all caches; benches will not contain shuffling/committee building times. // Builds all caches; benches will not contain shuffling/committee building times.
@ -38,10 +38,10 @@ pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: u
// Assert that the state has an attestations for each committee that is able to include an // Assert that the state has an attestations for each committee that is able to include an
// attestation in the state. // attestation in the state.
let committees_per_epoch = spec.get_epoch_committee_count(validator_count); let committees_per_epoch = spec.get_epoch_committee_count(validator_count);
let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; let committees_per_slot = committees_per_epoch / T::slots_per_epoch();
let previous_epoch_attestations = committees_per_epoch; let previous_epoch_attestations = committees_per_epoch;
let current_epoch_attestations = let current_epoch_attestations =
committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay); committees_per_slot * (T::slots_per_epoch() - spec.min_attestation_inclusion_delay);
assert_eq!( assert_eq!(
state.latest_attestations.len() as u64, state.latest_attestations.len() as u64,
previous_epoch_attestations + current_epoch_attestations, previous_epoch_attestations + current_epoch_attestations,

View File

@ -25,7 +25,7 @@ pub fn block_processing_worst_case(c: &mut Criterion) {
); );
// Use the specifications from the Eth2.0 spec. // Use the specifications from the Eth2.0 spec.
let spec = ChainSpec::foundation(); let spec = ChainSpec::mainnet();
// Create a builder for configuring the block and state for benching. // Create a builder for configuring the block and state for benching.
let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
@ -34,7 +34,7 @@ pub fn block_processing_worst_case(c: &mut Criterion) {
bench_builder.maximize_block_operations(&spec); bench_builder.maximize_block_operations(&spec);
// Set the state and block to be in the last slot of the 4th epoch. // Set the state and block to be in the last slot of the 4th epoch.
let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
bench_builder.set_slot(last_slot_of_epoch, &spec); bench_builder.set_slot(last_slot_of_epoch, &spec);
// Build all the state caches so the build times aren't included in the benches. // Build all the state caches so the build times aren't included in the benches.
@ -59,7 +59,7 @@ pub fn block_processing_reasonable_case(c: &mut Criterion) {
); );
// Use the specifications from the Eth2.0 spec. // Use the specifications from the Eth2.0 spec.
let spec = ChainSpec::foundation(); let spec = ChainSpec::mainnet();
// Create a builder for configuring the block and state for benching. // Create a builder for configuring the block and state for benching.
let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
@ -67,13 +67,13 @@ pub fn block_processing_reasonable_case(c: &mut Criterion) {
// Set the number of included operations to what we might expect normally. // Set the number of included operations to what we might expect normally.
bench_builder.num_proposer_slashings = 0; bench_builder.num_proposer_slashings = 0;
bench_builder.num_attester_slashings = 0; bench_builder.num_attester_slashings = 0;
bench_builder.num_attestations = (spec.shard_count / spec.slots_per_epoch) as usize; bench_builder.num_attestations = (spec.shard_count / T::slots_per_epoch()) as usize;
bench_builder.num_deposits = 2; bench_builder.num_deposits = 2;
bench_builder.num_exits = 2; bench_builder.num_exits = 2;
bench_builder.num_transfers = 2; bench_builder.num_transfers = 2;
// Set the state and block to be in the last slot of the 4th epoch. // Set the state and block to be in the last slot of the 4th epoch.
let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch());
bench_builder.set_slot(last_slot_of_epoch, &spec); bench_builder.set_slot(last_slot_of_epoch, &spec);
// Build all the state caches so the build times aren't included in the benches. // Build all the state caches so the build times aren't included in the benches.

View File

@ -25,8 +25,8 @@ pub fn get_genesis_beacon_state<T: EthSpec>(
// Process genesis activations. // Process genesis activations.
for validator in &mut state.validator_registry { for validator in &mut state.validator_registry {
if validator.effective_balance >= spec.max_effective_balance { if validator.effective_balance >= spec.max_effective_balance {
validator.activation_eligibility_epoch = spec.genesis_epoch; validator.activation_eligibility_epoch = T::genesis_epoch();
validator.activation_epoch = spec.genesis_epoch; validator.activation_epoch = T::genesis_epoch();
} }
} }

View File

@ -142,7 +142,7 @@ pub fn verify_block_signature<T: EthSpec>(
[state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
let domain = spec.get_domain( let domain = spec.get_domain(
block.slot.epoch(spec.slots_per_epoch), block.slot.epoch(T::slots_per_epoch()),
Domain::BeaconProposer, Domain::BeaconProposer,
&state.fork, &state.fork,
); );
@ -174,7 +174,7 @@ pub fn process_randao<T: EthSpec>(
block.body.randao_reveal.verify( block.body.randao_reveal.verify(
&state.current_epoch().tree_hash_root()[..], &state.current_epoch().tree_hash_root()[..],
spec.get_domain( spec.get_domain(
block.slot.epoch(spec.slots_per_epoch), block.slot.epoch(T::slots_per_epoch()),
Domain::Randao, Domain::Randao,
&state.fork &state.fork
), ),

View File

@ -22,8 +22,8 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
} }
} }
pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) { pub fn set_slot(&mut self, slot: Slot) {
self.state_builder.teleport_to_slot(slot, &spec); self.state_builder.teleport_to_slot(slot);
} }
pub fn build_caches(&mut self, spec: &ChainSpec) { pub fn build_caches(&mut self, spec: &ChainSpec) {
@ -55,11 +55,13 @@ impl<T: EthSpec> BlockProcessingBuilder<T> {
let keypair = &keypairs[proposer_index]; let keypair = &keypairs[proposer_index];
match randao_sk { match randao_sk {
Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), Some(sk) => builder.set_randao_reveal::<T>(&sk, &state.fork, spec),
None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), None => builder.set_randao_reveal::<T>(&keypair.sk, &state.fork, spec),
} }
let block = self.block_builder.build(&keypair.sk, &state.fork, spec); let block = self
.block_builder
.build::<T>(&keypair.sk, &state.fork, spec);
(block, state) (block, state)
} }

View File

@ -9,7 +9,7 @@ pub const VALIDATOR_COUNT: usize = 10;
#[test] #[test]
fn valid_block_ok() { fn valid_block_ok() {
let spec = FoundationEthSpec::spec(); let spec = MainnetEthSpec::default_spec();
let builder = get_builder(&spec); let builder = get_builder(&spec);
let (block, mut state) = builder.build(None, None, &spec); let (block, mut state) = builder.build(None, None, &spec);
@ -20,7 +20,7 @@ fn valid_block_ok() {
#[test] #[test]
fn invalid_block_header_state_slot() { fn invalid_block_header_state_slot() {
let spec = FoundationEthSpec::spec(); let spec = MainnetEthSpec::default_spec();
let builder = get_builder(&spec); let builder = get_builder(&spec);
let (mut block, mut state) = builder.build(None, None, &spec); let (mut block, mut state) = builder.build(None, None, &spec);
@ -39,7 +39,7 @@ fn invalid_block_header_state_slot() {
#[test] #[test]
fn invalid_parent_block_root() { fn invalid_parent_block_root() {
let spec = FoundationEthSpec::spec(); let spec = MainnetEthSpec::default_spec();
let builder = get_builder(&spec); let builder = get_builder(&spec);
let invalid_parent_root = Hash256::from([0xAA; 32]); let invalid_parent_root = Hash256::from([0xAA; 32]);
let (block, mut state) = builder.build(None, Some(invalid_parent_root), &spec); let (block, mut state) = builder.build(None, Some(invalid_parent_root), &spec);
@ -59,14 +59,14 @@ fn invalid_parent_block_root() {
#[test] #[test]
fn invalid_block_signature() { fn invalid_block_signature() {
let spec = FoundationEthSpec::spec(); let spec = MainnetEthSpec::default_spec();
let builder = get_builder(&spec); let builder = get_builder(&spec);
let (mut block, mut state) = builder.build(None, None, &spec); let (mut block, mut state) = builder.build(None, None, &spec);
// sign the block with a keypair that is not the expected proposer // sign the block with a keypair that is not the expected proposer
let keypair = Keypair::random(); let keypair = Keypair::random();
let message = block.signed_root(); let message = block.signed_root();
let epoch = block.slot.epoch(spec.slots_per_epoch); let epoch = block.slot.epoch(MainnetEthSpec::slots_per_epoch());
let domain = spec.get_domain(epoch, Domain::BeaconProposer, &state.fork); let domain = spec.get_domain(epoch, Domain::BeaconProposer, &state.fork);
block.signature = Signature::new(&message, domain, &keypair.sk); block.signature = Signature::new(&message, domain, &keypair.sk);
@ -82,7 +82,7 @@ fn invalid_block_signature() {
#[test] #[test]
fn invalid_randao_reveal_signature() { fn invalid_randao_reveal_signature() {
let spec = FoundationEthSpec::spec(); let spec = MainnetEthSpec::default_spec();
let builder = get_builder(&spec); let builder = get_builder(&spec);
// sign randao reveal with random keypair // sign randao reveal with random keypair
@ -100,12 +100,13 @@ fn invalid_randao_reveal_signature() {
); );
} }
fn get_builder(spec: &ChainSpec) -> (BlockProcessingBuilder<FoundationEthSpec>) { fn get_builder(spec: &ChainSpec) -> (BlockProcessingBuilder<MainnetEthSpec>) {
let mut builder = BlockProcessingBuilder::new(VALIDATOR_COUNT, &spec); let mut builder = BlockProcessingBuilder::new(VALIDATOR_COUNT, &spec);
// Set the state and block to be in the last slot of the 4th epoch. // Set the state and block to be in the last slot of the 4th epoch.
let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); let last_slot_of_epoch =
builder.set_slot(last_slot_of_epoch, &spec); (MainnetEthSpec::genesis_epoch() + 4).end_slot(MainnetEthSpec::slots_per_epoch());
builder.set_slot(last_slot_of_epoch);
builder.build_caches(&spec); builder.build_caches(&spec);
(builder) (builder)

View File

@ -68,7 +68,7 @@ fn validate_attestation_parametric<T: EthSpec>(
} }
); );
verify!( verify!(
state.slot <= attestation_slot + spec.slots_per_epoch, state.slot <= attestation_slot + T::slots_per_epoch(),
Invalid::IncludedTooLate { Invalid::IncludedTooLate {
state: state.slot, state: state.slot,
attestation: attestation_slot attestation: attestation_slot

View File

@ -49,7 +49,7 @@ fn verify_indexed_attestation_parametric<T: EthSpec>(
); );
// Check that nobody signed with custody bit 1 (to be removed in phase 1) // Check that nobody signed with custody bit 1 (to be removed in phase 1)
if custody_bit_1_indices.len() > 0 { if !custody_bit_1_indices.is_empty() {
invalid!(Invalid::CustodyBitfieldHasSetBits); invalid!(Invalid::CustodyBitfieldHasSetBits);
} }
@ -96,7 +96,7 @@ where
state state
.validator_registry .validator_registry
.get(validator_idx as usize) .get(validator_idx as usize)
.ok_or(Error::Invalid(Invalid::UnknownValidator(validator_idx))) .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(validator_idx)))
.map(|validator| { .map(|validator| {
aggregate_pubkey.add(&validator.pubkey); aggregate_pubkey.add(&validator.pubkey);
aggregate_pubkey aggregate_pubkey

View File

@ -21,8 +21,8 @@ pub fn verify_proposer_slashing<T: EthSpec>(
})?; })?;
verify!( verify!(
proposer_slashing.header_1.slot.epoch(spec.slots_per_epoch) proposer_slashing.header_1.slot.epoch(T::slots_per_epoch())
== proposer_slashing.header_2.slot.epoch(spec.slots_per_epoch), == proposer_slashing.header_2.slot.epoch(T::slots_per_epoch()),
Invalid::ProposalEpochMismatch( Invalid::ProposalEpochMismatch(
proposer_slashing.header_1.slot, proposer_slashing.header_1.slot,
proposer_slashing.header_2.slot proposer_slashing.header_2.slot
@ -40,7 +40,7 @@ pub fn verify_proposer_slashing<T: EthSpec>(
); );
verify!( verify!(
verify_header_signature( verify_header_signature::<T>(
&proposer_slashing.header_1, &proposer_slashing.header_1,
&proposer.pubkey, &proposer.pubkey,
&state.fork, &state.fork,
@ -49,7 +49,7 @@ pub fn verify_proposer_slashing<T: EthSpec>(
Invalid::BadProposal1Signature Invalid::BadProposal1Signature
); );
verify!( verify!(
verify_header_signature( verify_header_signature::<T>(
&proposer_slashing.header_2, &proposer_slashing.header_2,
&proposer.pubkey, &proposer.pubkey,
&state.fork, &state.fork,
@ -66,7 +66,7 @@ pub fn verify_proposer_slashing<T: EthSpec>(
/// Returns `true` if the signature is valid. /// Returns `true` if the signature is valid.
/// ///
/// Spec v0.6.1 /// Spec v0.6.1
fn verify_header_signature( fn verify_header_signature<T: EthSpec>(
header: &BeaconBlockHeader, header: &BeaconBlockHeader,
pubkey: &PublicKey, pubkey: &PublicKey,
fork: &Fork, fork: &Fork,
@ -74,7 +74,7 @@ fn verify_header_signature(
) -> bool { ) -> bool {
let message = header.signed_root(); let message = header.signed_root();
let domain = spec.get_domain( let domain = spec.get_domain(
header.slot.epoch(spec.slots_per_epoch), header.slot.epoch(T::slots_per_epoch()),
Domain::BeaconProposer, Domain::BeaconProposer,
fork, fork,
); );

View File

@ -101,7 +101,7 @@ fn verify_transfer_parametric<T: EthSpec>(
.get(transfer.sender as usize) .get(transfer.sender as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?; .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
let epoch = state.slot.epoch(spec.slots_per_epoch); let epoch = state.slot.epoch(T::slots_per_epoch());
// Ensure one of the following is met: // Ensure one of the following is met:
// //
@ -136,7 +136,7 @@ fn verify_transfer_parametric<T: EthSpec>(
// Verify the transfer signature. // Verify the transfer signature.
let message = transfer.signed_root(); let message = transfer.signed_root();
let domain = spec.get_domain( let domain = spec.get_domain(
transfer.slot.epoch(spec.slots_per_epoch), transfer.slot.epoch(T::slots_per_epoch()),
Domain::Transfer, Domain::Transfer,
&state.fork, &state.fork,
); );

View File

@ -42,7 +42,7 @@ pub fn per_epoch_processing<T: EthSpec>(
validator_statuses.process_attestations(&state, spec)?; validator_statuses.process_attestations(&state, spec)?;
// Justification and finalization. // Justification and finalization.
process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; process_justification_and_finalization(state, &validator_statuses.total_balances)?;
// Crosslinks. // Crosslinks.
let winning_root_for_shards = process_crosslinks(state, spec)?; let winning_root_for_shards = process_crosslinks(state, spec)?;
@ -84,9 +84,8 @@ pub fn per_epoch_processing<T: EthSpec>(
pub fn process_justification_and_finalization<T: EthSpec>( pub fn process_justification_and_finalization<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
total_balances: &TotalBalances, total_balances: &TotalBalances,
spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
if state.current_epoch() == spec.genesis_epoch { if state.current_epoch() == T::genesis_epoch() {
return Ok(()); return Ok(());
} }
@ -104,14 +103,14 @@ pub fn process_justification_and_finalization<T: EthSpec>(
if total_balances.previous_epoch_target_attesters * 3 >= total_balances.previous_epoch * 2 { if total_balances.previous_epoch_target_attesters * 3 >= total_balances.previous_epoch * 2 {
state.current_justified_epoch = previous_epoch; state.current_justified_epoch = previous_epoch;
state.current_justified_root = state.current_justified_root =
*state.get_block_root_at_epoch(state.current_justified_epoch, spec)?; *state.get_block_root_at_epoch(state.current_justified_epoch)?;
state.justification_bitfield |= 2; state.justification_bitfield |= 2;
} }
// If the current epoch gets justified, fill the last bit. // If the current epoch gets justified, fill the last bit.
if total_balances.current_epoch_target_attesters * 3 >= total_balances.current_epoch * 2 { if total_balances.current_epoch_target_attesters * 3 >= total_balances.current_epoch * 2 {
state.current_justified_epoch = current_epoch; state.current_justified_epoch = current_epoch;
state.current_justified_root = state.current_justified_root =
*state.get_block_root_at_epoch(state.current_justified_epoch, spec)?; *state.get_block_root_at_epoch(state.current_justified_epoch)?;
state.justification_bitfield |= 1; state.justification_bitfield |= 1;
} }
@ -120,22 +119,22 @@ pub fn process_justification_and_finalization<T: EthSpec>(
// The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source. // The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source.
if (bitfield >> 1) % 8 == 0b111 && old_previous_justified_epoch == current_epoch - 3 { if (bitfield >> 1) % 8 == 0b111 && old_previous_justified_epoch == current_epoch - 3 {
state.finalized_epoch = old_previous_justified_epoch; state.finalized_epoch = old_previous_justified_epoch;
state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?;
} }
// The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source. // The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source.
if (bitfield >> 1) % 4 == 0b11 && state.previous_justified_epoch == current_epoch - 2 { if (bitfield >> 1) % 4 == 0b11 && state.previous_justified_epoch == current_epoch - 2 {
state.finalized_epoch = old_previous_justified_epoch; state.finalized_epoch = old_previous_justified_epoch;
state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?;
} }
// The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 2nd as source. // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 2nd as source.
if bitfield % 8 == 0b111 && state.current_justified_epoch == current_epoch - 2 { if bitfield % 8 == 0b111 && state.current_justified_epoch == current_epoch - 2 {
state.finalized_epoch = old_current_justified_epoch; state.finalized_epoch = old_current_justified_epoch;
state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?;
} }
// The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source. // The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source.
if bitfield % 4 == 0b11 && state.current_justified_epoch == current_epoch - 1 { if bitfield % 4 == 0b11 && state.current_justified_epoch == current_epoch - 1 {
state.finalized_epoch = old_current_justified_epoch; state.finalized_epoch = old_current_justified_epoch;
state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch, spec)?; state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?;
} }
Ok(()) Ok(())
@ -157,7 +156,7 @@ pub fn process_crosslinks<T: EthSpec>(
state.previous_crosslinks = state.current_crosslinks.clone(); state.previous_crosslinks = state.current_crosslinks.clone();
for relative_epoch in vec![RelativeEpoch::Previous, RelativeEpoch::Current] { for &relative_epoch in &[RelativeEpoch::Previous, RelativeEpoch::Current] {
let epoch = relative_epoch.into_epoch(state.current_epoch()); let epoch = relative_epoch.into_epoch(state.current_epoch());
for offset in 0..state.get_epoch_committee_count(relative_epoch)? { for offset in 0..state.get_epoch_committee_count(relative_epoch)? {
let shard = let shard =
@ -212,7 +211,7 @@ pub fn process_final_updates<T: EthSpec>(
} }
// Update start shard. // Update start shard.
state.latest_start_shard = state.next_epoch_start_shard()?; state.latest_start_shard = state.next_epoch_start_shard(spec)?;
// This is a hack to allow us to update index roots and slashed balances for the next epoch. // This is a hack to allow us to update index roots and slashed balances for the next epoch.
// //
@ -241,7 +240,7 @@ pub fn process_final_updates<T: EthSpec>(
state.slot -= 1; state.slot -= 1;
} }
if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / spec.slots_per_epoch) == 0 { if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 {
let historical_batch = state.historical_batch(); let historical_batch = state.historical_batch();
state state
.historical_roots .historical_roots

View File

@ -39,7 +39,7 @@ pub fn process_rewards_and_penalties<T: EthSpec>(
winning_root_for_shards: &WinningRootHashSet, winning_root_for_shards: &WinningRootHashSet,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
if state.current_epoch() == spec.genesis_epoch { if state.current_epoch() == T::genesis_epoch() {
return Ok(()); return Ok(());
} }

View File

@ -8,13 +8,14 @@ use types::*;
fn runs_without_error() { fn runs_without_error() {
Builder::from_env(Env::default().default_filter_or("error")).init(); Builder::from_env(Env::default().default_filter_or("error")).init();
let spec = FewValidatorsEthSpec::spec(); let spec = MinimalEthSpec::default_spec();
let mut builder: TestingBeaconStateBuilder<FewValidatorsEthSpec> = let mut builder: TestingBeaconStateBuilder<MinimalEthSpec> =
TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec);
let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); let target_slot =
builder.teleport_to_slot(target_slot, &spec); (MinimalEthSpec::genesis_epoch() + 4).end_slot(MinimalEthSpec::slots_per_epoch());
builder.teleport_to_slot(target_slot);
let (mut state, _keypairs) = builder.build(); let (mut state, _keypairs) = builder.build();

View File

@ -223,7 +223,7 @@ impl ValidatorStatuses {
if is_from_epoch(a, state.current_epoch()) { if is_from_epoch(a, state.current_epoch()) {
status.is_current_epoch_attester = true; status.is_current_epoch_attester = true;
if target_matches_epoch_start_block(a, state, state.current_epoch(), spec)? { if target_matches_epoch_start_block(a, state, state.current_epoch())? {
status.is_current_epoch_target_attester = true; status.is_current_epoch_target_attester = true;
} }
} else if is_from_epoch(a, state.previous_epoch()) { } else if is_from_epoch(a, state.previous_epoch()) {
@ -233,7 +233,7 @@ impl ValidatorStatuses {
let attestation_slot = state.get_attestation_slot(&a.data)?; let attestation_slot = state.get_attestation_slot(&a.data)?;
let inclusion_slot = attestation_slot + a.inclusion_delay; let inclusion_slot = attestation_slot + a.inclusion_delay;
let relative_epoch = let relative_epoch =
RelativeEpoch::from_slot(state.slot, inclusion_slot, spec.slots_per_epoch)?; RelativeEpoch::from_slot(state.slot, inclusion_slot, T::slots_per_epoch())?;
status.inclusion_info = Some(InclusionInfo { status.inclusion_info = Some(InclusionInfo {
slot: inclusion_slot, slot: inclusion_slot,
distance: a.inclusion_delay, distance: a.inclusion_delay,
@ -244,7 +244,7 @@ impl ValidatorStatuses {
)?, )?,
}); });
if target_matches_epoch_start_block(a, state, state.previous_epoch(), spec)? { if target_matches_epoch_start_block(a, state, state.previous_epoch())? {
status.is_previous_epoch_target_attester = true; status.is_previous_epoch_target_attester = true;
} }
@ -297,7 +297,7 @@ impl ValidatorStatuses {
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), BeaconStateError> { ) -> Result<(), BeaconStateError> {
// Loop through each slot in the previous epoch. // Loop through each slot in the previous epoch.
for slot in state.previous_epoch().slot_iter(spec.slots_per_epoch) { for slot in state.previous_epoch().slot_iter(T::slots_per_epoch()) {
let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?; let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?;
// Loop through each committee in the slot. // Loop through each committee in the slot.
@ -336,9 +336,8 @@ fn target_matches_epoch_start_block<T: EthSpec>(
a: &PendingAttestation, a: &PendingAttestation,
state: &BeaconState<T>, state: &BeaconState<T>,
epoch: Epoch, epoch: Epoch,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> { ) -> Result<bool, BeaconStateError> {
let slot = epoch.start_slot(spec.slots_per_epoch); let slot = epoch.start_slot(T::slots_per_epoch());
let state_boundary_root = *state.get_block_root(slot)?; let state_boundary_root = *state.get_block_root(slot)?;
Ok(a.data.target_root == state_boundary_root) Ok(a.data.target_root == state_boundary_root)

View File

@ -1,5 +1,4 @@
use crate::*; use crate::*;
use tree_hash::SignedRoot;
use types::*; use types::*;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -17,7 +16,7 @@ pub fn per_slot_processing<T: EthSpec>(
) -> Result<(), Error> { ) -> Result<(), Error> {
cache_state(state, spec)?; cache_state(state, spec)?;
if (state.slot > spec.genesis_slot) && ((state.slot + 1) % spec.slots_per_epoch == 0) { if (state.slot > spec.genesis_slot) && ((state.slot + 1) % T::slots_per_epoch() == 0) {
per_epoch_processing(state, spec)?; per_epoch_processing(state, spec)?;
} }
@ -44,7 +43,7 @@ fn cache_state<T: EthSpec>(state: &mut BeaconState<T>, spec: &ChainSpec) -> Resu
// Store the previous slot's post state transition root. // Store the previous slot's post state transition root.
state.set_state_root(previous_slot, previous_slot_state_root)?; state.set_state_root(previous_slot, previous_slot_state_root)?;
let latest_block_root = Hash256::from_slice(&state.latest_block_header.signed_root()[..]); let latest_block_root = state.latest_block_header.canonical_root();
state.set_block_root(previous_slot, latest_block_root)?; state.set_block_root(previous_slot, latest_block_root)?;
// Set the state slot back to what it should be. // Set the state slot back to what it should be.

View File

@ -1,4 +1,4 @@
use self::committee_cache::{get_active_validator_indices, CommitteeCache}; use self::committee_cache::get_active_validator_indices;
use self::exit_cache::ExitCache; use self::exit_cache::ExitCache;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::*; use crate::*;
@ -15,6 +15,7 @@ use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::{CachedTreeHash, TreeHash};
pub use self::committee_cache::CommitteeCache;
pub use beacon_state_types::*; pub use beacon_state_types::*;
mod beacon_state_types; mod beacon_state_types;
@ -111,7 +112,7 @@ where
pub previous_crosslinks: FixedLenVec<Crosslink, T::ShardCount>, pub previous_crosslinks: FixedLenVec<Crosslink, T::ShardCount>,
pub latest_block_roots: FixedLenVec<Hash256, T::SlotsPerHistoricalRoot>, pub latest_block_roots: FixedLenVec<Hash256, T::SlotsPerHistoricalRoot>,
#[compare_fields(as_slice)] #[compare_fields(as_slice)]
latest_state_roots: FixedLenVec<Hash256, T::SlotsPerHistoricalRoot>, pub latest_state_roots: FixedLenVec<Hash256, T::SlotsPerHistoricalRoot>,
#[compare_fields(as_slice)] #[compare_fields(as_slice)]
latest_active_index_roots: FixedLenVec<Hash256, T::LatestActiveIndexRootsLength>, latest_active_index_roots: FixedLenVec<Hash256, T::LatestActiveIndexRootsLength>,
latest_slashed_balances: FixedLenVec<u64, T::LatestSlashedExitLength>, latest_slashed_balances: FixedLenVec<u64, T::LatestSlashedExitLength>,
@ -163,7 +164,7 @@ impl<T: EthSpec> BeaconState<T> {
spec: &ChainSpec, spec: &ChainSpec,
) -> BeaconState<T> { ) -> BeaconState<T> {
let initial_crosslink = Crosslink { let initial_crosslink = Crosslink {
epoch: spec.genesis_epoch, epoch: T::genesis_epoch(),
previous_crosslink_root: spec.zero_hash, previous_crosslink_root: spec.zero_hash,
crosslink_data_root: spec.zero_hash, crosslink_data_root: spec.zero_hash,
}; };
@ -172,7 +173,7 @@ impl<T: EthSpec> BeaconState<T> {
// Misc // Misc
slot: spec.genesis_slot, slot: spec.genesis_slot,
genesis_time, genesis_time,
fork: Fork::genesis(spec), fork: Fork::genesis(T::genesis_epoch()),
// Validator registry // Validator registry
validator_registry: vec![], // Set later in the function. validator_registry: vec![], // Set later in the function.
@ -188,12 +189,12 @@ impl<T: EthSpec> BeaconState<T> {
// Finality // Finality
previous_epoch_attestations: vec![], previous_epoch_attestations: vec![],
current_epoch_attestations: vec![], current_epoch_attestations: vec![],
previous_justified_epoch: spec.genesis_epoch, previous_justified_epoch: T::genesis_epoch(),
current_justified_epoch: spec.genesis_epoch, current_justified_epoch: T::genesis_epoch(),
previous_justified_root: spec.zero_hash, previous_justified_root: spec.zero_hash,
current_justified_root: spec.zero_hash, current_justified_root: spec.zero_hash,
justification_bitfield: 0, justification_bitfield: 0,
finalized_epoch: spec.genesis_epoch, finalized_epoch: T::genesis_epoch(),
finalized_root: spec.zero_hash, finalized_root: spec.zero_hash,
// Recent state // Recent state
@ -300,10 +301,10 @@ impl<T: EthSpec> BeaconState<T> {
Ok(cache.epoch_start_shard()) Ok(cache.epoch_start_shard())
} }
pub fn next_epoch_start_shard(&self) -> Result<u64, Error> { pub fn next_epoch_start_shard(&self, spec: &ChainSpec) -> Result<u64, Error> {
let cache = self.cache(RelativeEpoch::Current)?; let cache = self.cache(RelativeEpoch::Current)?;
let active_validator_count = cache.active_validator_count(); let active_validator_count = cache.active_validator_count();
let shard_delta = T::get_shard_delta(active_validator_count); let shard_delta = T::get_shard_delta(active_validator_count, spec.target_committee_size);
Ok((self.latest_start_shard + shard_delta) % T::ShardCount::to_u64()) Ok((self.latest_start_shard + shard_delta) % T::ShardCount::to_u64())
} }
@ -422,7 +423,7 @@ impl<T: EthSpec> BeaconState<T> {
}; };
let effective_balance = self.validator_registry[candidate_index].effective_balance; let effective_balance = self.validator_registry[candidate_index].effective_balance;
if (effective_balance * MAX_RANDOM_BYTE) if (effective_balance * MAX_RANDOM_BYTE)
>= (spec.max_effective_balance * random_byte as u64) >= (spec.max_effective_balance * u64::from(random_byte))
{ {
break candidate_index; break candidate_index;
} }
@ -453,12 +454,8 @@ impl<T: EthSpec> BeaconState<T> {
/// ///
/// Spec v0.6.0 /// Spec v0.6.0
// FIXME(sproul): name swap with get_block_root // FIXME(sproul): name swap with get_block_root
pub fn get_block_root_at_epoch( pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> {
&self, self.get_block_root(epoch.start_slot(T::slots_per_epoch()))
epoch: Epoch,
spec: &ChainSpec,
) -> Result<&Hash256, BeaconStateError> {
self.get_block_root(epoch.start_slot(spec.slots_per_epoch))
} }
/// Sets the block root for some given slot. /// Sets the block root for some given slot.

View File

@ -1,5 +1,5 @@
use crate::*; use crate::*;
use fixed_len_vec::typenum::{Unsigned, U1024, U8, U8192}; use fixed_len_vec::typenum::{Unsigned, U0, U1024, U64, U8, U8192};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::fmt::Debug; use std::fmt::Debug;
@ -9,14 +9,24 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
type LatestRandaoMixesLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; type LatestRandaoMixesLength: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type LatestActiveIndexRootsLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; type LatestActiveIndexRootsLength: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type LatestSlashedExitLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; type LatestSlashedExitLength: Unsigned + Clone + Sync + Send + Debug + PartialEq;
/// Note: `SlotsPerEpoch` is not necessarily required to be a compile-time constant. We include
/// it here just for the convenience of not passing `slots_per_epoch` around all the time.
type SlotsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type GenesisEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq;
fn spec() -> ChainSpec; fn default_spec() -> ChainSpec;
fn genesis_epoch() -> Epoch {
Epoch::new(Self::GenesisEpoch::to_u64())
}
/// Return the number of committees in one epoch. /// Return the number of committees in one epoch.
/// ///
/// Spec v0.6.1 /// Spec v0.6.1
fn get_epoch_committee_count(active_validator_count: usize) -> usize { fn get_epoch_committee_count(
let target_committee_size = Self::spec().target_committee_size; active_validator_count: usize,
target_committee_size: usize,
) -> usize {
let shard_count = Self::shard_count(); let shard_count = Self::shard_count();
let slots_per_epoch = Self::slots_per_epoch() as usize; let slots_per_epoch = Self::slots_per_epoch() as usize;
@ -32,10 +42,10 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
/// Return the number of shards to increment `state.latest_start_shard` by in a given epoch. /// Return the number of shards to increment `state.latest_start_shard` by in a given epoch.
/// ///
/// Spec v0.6.3 /// Spec v0.6.3
fn get_shard_delta(active_validator_count: usize) -> u64 { fn get_shard_delta(active_validator_count: usize, target_committee_size: usize) -> u64 {
std::cmp::min( std::cmp::min(
Self::get_epoch_committee_count(active_validator_count) as u64, Self::get_epoch_committee_count(active_validator_count, target_committee_size) as u64,
Self::ShardCount::to_u64() - Self::ShardCount::to_u64() / Self::spec().slots_per_epoch, Self::ShardCount::to_u64() - Self::ShardCount::to_u64() / Self::slots_per_epoch(),
) )
} }
@ -45,21 +55,14 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
/// basic sense. This count is not required to provide any security guarantees regarding /// basic sense. This count is not required to provide any security guarantees regarding
/// decentralization, entropy, etc. /// decentralization, entropy, etc.
fn minimum_validator_count() -> usize { fn minimum_validator_count() -> usize {
Self::slots_per_epoch() as usize Self::SlotsPerEpoch::to_usize()
} }
/// Returns the `SLOTS_PER_EPOCH` constant for this specification. /// Returns the `SLOTS_PER_EPOCH` constant for this specification.
/// ///
/// Spec v0.6.1 /// Spec v0.6.1
fn slots_per_epoch() -> u64 { fn slots_per_epoch() -> u64 {
Self::spec().slots_per_epoch Self::SlotsPerEpoch::to_u64()
}
/// Returns the `SLOTS_PER_EPOCH` constant for this specification.
///
/// Spec v0.6.1
fn genesis_epoch() -> Epoch {
Self::spec().genesis_epoch
} }
/// Returns the `SHARD_COUNT` constant for this specification. /// Returns the `SHARD_COUNT` constant for this specification.
@ -102,54 +105,40 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq {
/// ///
/// Spec v0.6.1 /// Spec v0.6.1
#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)]
pub struct FoundationEthSpec; pub struct MainnetEthSpec;
impl EthSpec for FoundationEthSpec { impl EthSpec for MainnetEthSpec {
type ShardCount = U1024; type ShardCount = U1024;
type SlotsPerHistoricalRoot = U8192; type SlotsPerHistoricalRoot = U8192;
type LatestRandaoMixesLength = U8192; type LatestRandaoMixesLength = U8192;
type LatestActiveIndexRootsLength = U8192; type LatestActiveIndexRootsLength = U8192;
type LatestSlashedExitLength = U8192; type LatestSlashedExitLength = U8192;
type SlotsPerEpoch = U64;
type GenesisEpoch = U0;
fn spec() -> ChainSpec { fn default_spec() -> ChainSpec {
ChainSpec::foundation() ChainSpec::mainnet()
} }
} }
pub type FoundationBeaconState = BeaconState<FoundationEthSpec>; pub type FoundationBeaconState = BeaconState<MainnetEthSpec>;
/// Ethereum Foundation specifications, modified to be suitable for < 1000 validators. /// Ethereum Foundation specifications, modified to be suitable for < 1000 validators.
#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)]
pub struct FewValidatorsEthSpec; pub struct MinimalEthSpec;
impl EthSpec for FewValidatorsEthSpec { impl EthSpec for MinimalEthSpec {
type ShardCount = U8; type ShardCount = U8;
type SlotsPerHistoricalRoot = U8192; type SlotsPerHistoricalRoot = U8192;
type LatestRandaoMixesLength = U8192; type LatestRandaoMixesLength = U8192;
type LatestActiveIndexRootsLength = U8192; type LatestActiveIndexRootsLength = U8192;
type LatestSlashedExitLength = U8192; type LatestSlashedExitLength = U8192;
type SlotsPerEpoch = U8;
type GenesisEpoch = U0;
fn spec() -> ChainSpec { fn default_spec() -> ChainSpec {
ChainSpec::few_validators() ChainSpec::minimal()
} }
} }
pub type FewValidatorsBeaconState = BeaconState<FewValidatorsEthSpec>; pub type MinimalBeaconState = BeaconState<MinimalEthSpec>;
/// Specifications suitable for a small-scale (< 1000 validators) lighthouse testnet.
#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)]
pub struct LighthouseTestnetEthSpec;
impl EthSpec for LighthouseTestnetEthSpec {
type ShardCount = U8;
type SlotsPerHistoricalRoot = U8192;
type LatestRandaoMixesLength = U8192;
type LatestActiveIndexRootsLength = U8192;
type LatestSlashedExitLength = U8192;
fn spec() -> ChainSpec {
ChainSpec::lighthouse_testnet()
}
}
pub type LighthouseTestnetBeaconState = BeaconState<LighthouseTestnetEthSpec>;

View File

@ -2,6 +2,7 @@ use super::BeaconState;
use crate::*; use crate::*;
use core::num::NonZeroUsize; use core::num::NonZeroUsize;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use std::ops::Range; use std::ops::Range;
use swap_or_not_shuffle::shuffle_list; use swap_or_not_shuffle::shuffle_list;
@ -9,7 +10,7 @@ mod tests;
/// Computes and stores the shuffling for an epoch. Provides various getters to allow callers to /// Computes and stores the shuffling for an epoch. Provides various getters to allow callers to
/// read the committees for the given epoch. /// read the committees for the given epoch.
#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)] #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)]
pub struct CommitteeCache { pub struct CommitteeCache {
initialized_epoch: Option<Epoch>, initialized_epoch: Option<Epoch>,
shuffling: Vec<usize>, shuffling: Vec<usize>,
@ -44,12 +45,16 @@ impl CommitteeCache {
return Err(Error::InsufficientValidators); return Err(Error::InsufficientValidators);
} }
let committee_count = T::get_epoch_committee_count(active_validator_indices.len()) as usize; let committee_count = T::get_epoch_committee_count(
active_validator_indices.len(),
spec.target_committee_size,
) as usize;
let shuffling_start_shard = match relative_epoch { let shuffling_start_shard = match relative_epoch {
RelativeEpoch::Current => state.latest_start_shard, RelativeEpoch::Current => state.latest_start_shard,
RelativeEpoch::Previous => { RelativeEpoch::Previous => {
let shard_delta = T::get_shard_delta(active_validator_indices.len()); let shard_delta =
T::get_shard_delta(active_validator_indices.len(), spec.target_committee_size);
(state.latest_start_shard + T::ShardCount::to_u64() - shard_delta) (state.latest_start_shard + T::ShardCount::to_u64() - shard_delta)
% T::ShardCount::to_u64() % T::ShardCount::to_u64()
@ -57,7 +62,8 @@ impl CommitteeCache {
RelativeEpoch::Next => { RelativeEpoch::Next => {
let current_active_validators = let current_active_validators =
get_active_validator_count(&state.validator_registry, state.current_epoch()); get_active_validator_count(&state.validator_registry, state.current_epoch());
let shard_delta = T::get_shard_delta(current_active_validators); let shard_delta =
T::get_shard_delta(current_active_validators, spec.target_committee_size);
(state.latest_start_shard + shard_delta) % T::ShardCount::to_u64() (state.latest_start_shard + shard_delta) % T::ShardCount::to_u64()
} }
@ -152,7 +158,6 @@ impl CommitteeCache {
let i = self.shuffled_position(validator_index)?; let i = self.shuffled_position(validator_index)?;
(0..self.committee_count) (0..self.committee_count)
.into_iter()
.map(|nth_committee| (nth_committee, self.compute_committee_range(nth_committee))) .map(|nth_committee| (nth_committee, self.compute_committee_range(nth_committee)))
.find(|(_, range)| { .find(|(_, range)| {
if let Some(range) = range { if let Some(range) = range {

View File

@ -20,12 +20,12 @@ fn default_values() {
} }
fn new_state<T: EthSpec>(validator_count: usize, slot: Slot) -> BeaconState<T> { fn new_state<T: EthSpec>(validator_count: usize, slot: Slot) -> BeaconState<T> {
let spec = &T::spec(); let spec = &T::default_spec();
let mut builder = let mut builder =
TestingBeaconStateBuilder::from_single_keypair(validator_count, &Keypair::random(), spec); TestingBeaconStateBuilder::from_single_keypair(validator_count, &Keypair::random(), spec);
builder.teleport_to_slot(slot, spec); builder.teleport_to_slot(slot);
let (state, _keypairs) = builder.build(); let (state, _keypairs) = builder.build();
@ -34,8 +34,8 @@ fn new_state<T: EthSpec>(validator_count: usize, slot: Slot) -> BeaconState<T> {
#[test] #[test]
fn fails_without_validators() { fn fails_without_validators() {
let state = new_state::<FewValidatorsEthSpec>(0, Slot::new(0)); let state = new_state::<MinimalEthSpec>(0, Slot::new(0));
let spec = &FewValidatorsEthSpec::spec(); let spec = &MinimalEthSpec::default_spec();
assert_eq!( assert_eq!(
CommitteeCache::initialized(&state, state.current_epoch(), &spec), CommitteeCache::initialized(&state, state.current_epoch(), &spec),
@ -45,8 +45,8 @@ fn fails_without_validators() {
#[test] #[test]
fn initializes_with_the_right_epoch() { fn initializes_with_the_right_epoch() {
let state = new_state::<FewValidatorsEthSpec>(16, Slot::new(0)); let state = new_state::<MinimalEthSpec>(16, Slot::new(0));
let spec = &FewValidatorsEthSpec::spec(); let spec = &MinimalEthSpec::default_spec();
let cache = CommitteeCache::default(); let cache = CommitteeCache::default();
assert_eq!(cache.initialized_epoch, None); assert_eq!(cache.initialized_epoch, None);
@ -63,14 +63,14 @@ fn initializes_with_the_right_epoch() {
#[test] #[test]
fn shuffles_for_the_right_epoch() { fn shuffles_for_the_right_epoch() {
let num_validators = FewValidatorsEthSpec::minimum_validator_count() * 2; let num_validators = MinimalEthSpec::minimum_validator_count() * 2;
let epoch = Epoch::new(100_000_000); let epoch = Epoch::new(100_000_000);
let slot = epoch.start_slot(FewValidatorsEthSpec::slots_per_epoch()); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch());
let mut state = new_state::<FewValidatorsEthSpec>(num_validators, slot); let mut state = new_state::<MinimalEthSpec>(num_validators, slot);
let spec = &FewValidatorsEthSpec::spec(); let spec = &MinimalEthSpec::default_spec();
let distinct_hashes: Vec<Hash256> = (0..FewValidatorsEthSpec::latest_randao_mixes_length()) let distinct_hashes: Vec<Hash256> = (0..MinimalEthSpec::latest_randao_mixes_length())
.into_iter() .into_iter()
.map(|i| Hash256::from(i as u64)) .map(|i| Hash256::from(i as u64))
.collect(); .collect();
@ -118,17 +118,19 @@ fn shuffles_for_the_right_epoch() {
#[test] #[test]
fn can_start_on_any_shard() { fn can_start_on_any_shard() {
let num_validators = FewValidatorsEthSpec::minimum_validator_count() * 2; let num_validators = MinimalEthSpec::minimum_validator_count() * 2;
let epoch = Epoch::new(100_000_000); let epoch = Epoch::new(100_000_000);
let slot = epoch.start_slot(FewValidatorsEthSpec::slots_per_epoch()); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch());
let mut state = new_state::<FewValidatorsEthSpec>(num_validators, slot); let mut state = new_state::<MinimalEthSpec>(num_validators, slot);
let spec = &FewValidatorsEthSpec::spec(); let spec = &MinimalEthSpec::default_spec();
let shard_delta = FewValidatorsEthSpec::get_shard_delta(num_validators); let target_committee_size = MinimalEthSpec::default_spec().target_committee_size;
let shard_count = FewValidatorsEthSpec::shard_count() as u64;
for i in 0..FewValidatorsEthSpec::shard_count() as u64 { let shard_delta = MinimalEthSpec::get_shard_delta(num_validators, target_committee_size);
let shard_count = MinimalEthSpec::shard_count() as u64;
for i in 0..MinimalEthSpec::shard_count() as u64 {
state.latest_start_shard = i; state.latest_start_shard = i;
let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap();
@ -156,15 +158,17 @@ impl EthSpec for ExcessShardsEthSpec {
type LatestRandaoMixesLength = U8192; type LatestRandaoMixesLength = U8192;
type LatestActiveIndexRootsLength = U8192; type LatestActiveIndexRootsLength = U8192;
type LatestSlashedExitLength = U8192; type LatestSlashedExitLength = U8192;
type SlotsPerEpoch = U8;
type GenesisEpoch = U0;
fn spec() -> ChainSpec { fn default_spec() -> ChainSpec {
ChainSpec::few_validators() ChainSpec::minimal()
} }
} }
#[test] #[test]
fn starts_on_the_correct_shard() { fn starts_on_the_correct_shard() {
let spec = &ExcessShardsEthSpec::spec(); let spec = &ExcessShardsEthSpec::default_spec();
let num_validators = ExcessShardsEthSpec::shard_count(); let num_validators = ExcessShardsEthSpec::shard_count();
@ -206,14 +210,16 @@ fn starts_on_the_correct_shard() {
let previous_shards = ExcessShardsEthSpec::get_epoch_committee_count( let previous_shards = ExcessShardsEthSpec::get_epoch_committee_count(
get_active_validator_count(&state.validator_registry, previous_epoch), get_active_validator_count(&state.validator_registry, previous_epoch),
spec.target_committee_size,
); );
let current_shards = ExcessShardsEthSpec::get_epoch_committee_count( let current_shards = ExcessShardsEthSpec::get_epoch_committee_count(
get_active_validator_count(&state.validator_registry, current_epoch), get_active_validator_count(&state.validator_registry, current_epoch),
spec.target_committee_size,
);
let next_shards = ExcessShardsEthSpec::get_epoch_committee_count(
get_active_validator_count(&state.validator_registry, next_epoch),
spec.target_committee_size,
); );
let next_shards = ExcessShardsEthSpec::get_epoch_committee_count(get_active_validator_count(
&state.validator_registry,
next_epoch,
));
assert_eq!( assert_eq!(
previous_shards as usize, previous_shards as usize,

View File

@ -7,7 +7,7 @@ ssz_tests!(FoundationBeaconState);
cached_tree_hash_tests!(FoundationBeaconState); cached_tree_hash_tests!(FoundationBeaconState);
fn test_beacon_proposer_index<T: EthSpec>() { fn test_beacon_proposer_index<T: EthSpec>() {
let spec = T::spec(); let spec = T::default_spec();
let relative_epoch = RelativeEpoch::Current; let relative_epoch = RelativeEpoch::Current;
// Build a state for testing. // Build a state for testing.
@ -53,7 +53,7 @@ fn test_beacon_proposer_index<T: EthSpec>() {
#[test] #[test]
fn beacon_proposer_index() { fn beacon_proposer_index() {
test_beacon_proposer_index::<FewValidatorsEthSpec>(); test_beacon_proposer_index::<MinimalEthSpec>();
} }
/// Should produce (note the set notation brackets): /// Should produce (note the set notation brackets):
@ -61,7 +61,7 @@ fn beacon_proposer_index() {
/// (current_epoch - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY, current_epoch + /// (current_epoch - LATEST_ACTIVE_INDEX_ROOTS_LENGTH + ACTIVATION_EXIT_DELAY, current_epoch +
/// ACTIVATION_EXIT_DELAY] /// ACTIVATION_EXIT_DELAY]
fn active_index_range<T: EthSpec>(current_epoch: Epoch) -> RangeInclusive<Epoch> { fn active_index_range<T: EthSpec>(current_epoch: Epoch) -> RangeInclusive<Epoch> {
let delay = T::spec().activation_exit_delay; let delay = T::default_spec().activation_exit_delay;
let start: i32 = let start: i32 =
current_epoch.as_u64() as i32 - T::latest_active_index_roots() as i32 + delay as i32; current_epoch.as_u64() as i32 - T::latest_active_index_roots() as i32 + delay as i32;
@ -79,7 +79,7 @@ fn active_index_range<T: EthSpec>(current_epoch: Epoch) -> RangeInclusive<Epoch>
/// Test getting an active index root at the start and end of the valid range, and one either side /// Test getting an active index root at the start and end of the valid range, and one either side
/// of that range. /// of that range.
fn test_active_index<T: EthSpec>(state_slot: Slot) { fn test_active_index<T: EthSpec>(state_slot: Slot) {
let spec = T::spec(); let spec = T::default_spec();
let builder: TestingBeaconStateBuilder<T> = let builder: TestingBeaconStateBuilder<T> =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec); TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec);
let (mut state, _keypairs) = builder.build(); let (mut state, _keypairs) = builder.build();
@ -115,11 +115,11 @@ fn test_active_index<T: EthSpec>(state_slot: Slot) {
#[test] #[test]
fn get_active_index_root_index() { fn get_active_index_root_index() {
test_active_index::<FoundationEthSpec>(Slot::new(0)); test_active_index::<MainnetEthSpec>(Slot::new(0));
let epoch = Epoch::from(FoundationEthSpec::latest_active_index_roots() * 4); let epoch = Epoch::from(MainnetEthSpec::latest_active_index_roots() * 4);
let slot = epoch.start_slot(FoundationEthSpec::slots_per_epoch()); let slot = epoch.start_slot(MainnetEthSpec::slots_per_epoch());
test_active_index::<FoundationEthSpec>(slot); test_active_index::<MainnetEthSpec>(slot);
} }
/// Test that /// Test that
@ -133,8 +133,8 @@ fn test_cache_initialization<'a, T: EthSpec>(
spec: &ChainSpec, spec: &ChainSpec,
) { ) {
let slot = relative_epoch let slot = relative_epoch
.into_epoch(state.slot.epoch(spec.slots_per_epoch)) .into_epoch(state.slot.epoch(T::slots_per_epoch()))
.start_slot(spec.slots_per_epoch); .start_slot(T::slots_per_epoch());
// Assuming the cache isn't already built, assert that a call to a cache-using function fails. // Assuming the cache isn't already built, assert that a call to a cache-using function fails.
assert_eq!( assert_eq!(
@ -166,13 +166,14 @@ fn test_cache_initialization<'a, T: EthSpec>(
#[test] #[test]
fn cache_initialization() { fn cache_initialization() {
let spec = FewValidatorsEthSpec::spec(); let spec = MinimalEthSpec::default_spec();
let builder: TestingBeaconStateBuilder<FewValidatorsEthSpec> = let builder: TestingBeaconStateBuilder<MinimalEthSpec> =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec); TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec);
let (mut state, _keypairs) = builder.build(); let (mut state, _keypairs) = builder.build();
state.slot = (spec.genesis_epoch + 1).start_slot(spec.slots_per_epoch); state.slot =
(MinimalEthSpec::genesis_epoch() + 1).start_slot(MinimalEthSpec::slots_per_epoch());
test_cache_initialization(&mut state, RelativeEpoch::Previous, &spec); test_cache_initialization(&mut state, RelativeEpoch::Previous, &spec);
test_cache_initialization(&mut state, RelativeEpoch::Current, &spec); test_cache_initialization(&mut state, RelativeEpoch::Current, &spec);
@ -202,7 +203,7 @@ fn tree_hash_cache() {
#[cfg(test)] #[cfg(test)]
mod committees { mod committees {
use super::*; use super::*;
use crate::beacon_state::FewValidatorsEthSpec; use crate::beacon_state::MinimalEthSpec;
use swap_or_not_shuffle::shuffle_list; use swap_or_not_shuffle::shuffle_list;
fn execute_committee_consistency_test<T: EthSpec>( fn execute_committee_consistency_test<T: EthSpec>(
@ -234,7 +235,7 @@ mod committees {
(start_shard..start_shard + T::shard_count() as u64).into_iter(); (start_shard..start_shard + T::shard_count() as u64).into_iter();
// Loop through all slots in the epoch being tested. // Loop through all slots in the epoch being tested.
for slot in epoch.slot_iter(spec.slots_per_epoch) { for slot in epoch.slot_iter(T::slots_per_epoch()) {
let crosslink_committees = state.get_crosslink_committees_at_slot(slot).unwrap(); let crosslink_committees = state.get_crosslink_committees_at_slot(slot).unwrap();
// Assert that the number of committees in this slot is consistent with the reported number // Assert that the number of committees in this slot is consistent with the reported number
@ -290,7 +291,7 @@ mod committees {
state_epoch: Epoch, state_epoch: Epoch,
cache_epoch: RelativeEpoch, cache_epoch: RelativeEpoch,
) { ) {
let spec = &T::spec(); let spec = &T::default_spec();
let mut builder = TestingBeaconStateBuilder::from_single_keypair( let mut builder = TestingBeaconStateBuilder::from_single_keypair(
validator_count, validator_count,
@ -298,8 +299,8 @@ mod committees {
spec, spec,
); );
let slot = state_epoch.start_slot(spec.slots_per_epoch); let slot = state_epoch.start_slot(T::slots_per_epoch());
builder.teleport_to_slot(slot, spec); builder.teleport_to_slot(slot);
let (mut state, _keypairs): (BeaconState<T>, _) = builder.build(); let (mut state, _keypairs): (BeaconState<T>, _) = builder.build();
@ -325,7 +326,7 @@ mod committees {
} }
fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) { fn committee_consistency_test_suite<T: EthSpec>(cached_epoch: RelativeEpoch) {
let spec = T::spec(); let spec = T::default_spec();
let validator_count = (T::shard_count() * spec.target_committee_size) + 1; let validator_count = (T::shard_count() * spec.target_committee_size) + 1;
@ -333,29 +334,29 @@ mod committees {
committee_consistency_test::<T>( committee_consistency_test::<T>(
validator_count as usize, validator_count as usize,
spec.genesis_epoch + 4, T::genesis_epoch() + 4,
cached_epoch, cached_epoch,
); );
committee_consistency_test::<T>( committee_consistency_test::<T>(
validator_count as usize, validator_count as usize,
spec.genesis_epoch + T::slots_per_historical_root() as u64 * T::slots_per_epoch() * 4, T::genesis_epoch() + T::slots_per_historical_root() as u64 * T::slots_per_epoch() * 4,
cached_epoch, cached_epoch,
); );
} }
#[test] #[test]
fn current_epoch_committee_consistency() { fn current_epoch_committee_consistency() {
committee_consistency_test_suite::<FewValidatorsEthSpec>(RelativeEpoch::Current); committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Current);
} }
#[test] #[test]
fn previous_epoch_committee_consistency() { fn previous_epoch_committee_consistency() {
committee_consistency_test_suite::<FewValidatorsEthSpec>(RelativeEpoch::Previous); committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Previous);
} }
#[test] #[test]
fn next_epoch_committee_consistency() { fn next_epoch_committee_consistency() {
committee_consistency_test_suite::<FewValidatorsEthSpec>(RelativeEpoch::Next); committee_consistency_test_suite::<MinimalEthSpec>(RelativeEpoch::Next);
} }
} }

View File

@ -1,7 +1,7 @@
use crate::*; use crate::*;
use int_to_bytes::int_to_bytes4; use int_to_bytes::int_to_bytes4;
use serde_derive::Deserialize; use serde_derive::{Deserialize, Serialize};
use test_utils::u8_from_hex_str; use test_utils::{u8_from_hex_str, u8_to_hex_str};
/// Each of the BLS signature domains. /// Each of the BLS signature domains.
/// ///
@ -18,7 +18,7 @@ pub enum Domain {
/// Holds all the "constants" for a BeaconChain. /// Holds all the "constants" for a BeaconChain.
/// ///
/// Spec v0.6.1 /// Spec v0.6.1
#[derive(PartialEq, Debug, Clone, Deserialize)] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
#[serde(default)] #[serde(default)]
pub struct ChainSpec { pub struct ChainSpec {
/* /*
@ -48,18 +48,19 @@ pub struct ChainSpec {
* Initial Values * Initial Values
*/ */
pub genesis_slot: Slot, pub genesis_slot: Slot,
pub genesis_epoch: Epoch, // Skipped because serde TOML can't handle u64::max_value, the typical value for this field.
#[serde(skip_serializing)]
pub far_future_epoch: Epoch, pub far_future_epoch: Epoch,
pub zero_hash: Hash256, pub zero_hash: Hash256,
#[serde(deserialize_with = "u8_from_hex_str")] #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")]
pub bls_withdrawal_prefix_byte: u8, pub bls_withdrawal_prefix_byte: u8,
/* /*
* Time parameters * Time parameters
*/ */
pub genesis_time: u64,
pub seconds_per_slot: u64, pub seconds_per_slot: u64,
pub min_attestation_inclusion_delay: u64, pub min_attestation_inclusion_delay: u64,
pub slots_per_epoch: u64,
pub min_seed_lookahead: Epoch, pub min_seed_lookahead: Epoch,
pub activation_exit_delay: u64, pub activation_exit_delay: u64,
pub slots_per_eth1_voting_period: u64, pub slots_per_eth1_voting_period: u64,
@ -137,7 +138,7 @@ impl ChainSpec {
/// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification.
/// ///
/// Spec v0.6.1 /// Spec v0.6.1
pub(crate) fn foundation() -> Self { pub fn mainnet() -> Self {
Self { Self {
/* /*
* Misc * Misc
@ -166,7 +167,6 @@ impl ChainSpec {
* Initial Values * Initial Values
*/ */
genesis_slot: Slot::new(0), genesis_slot: Slot::new(0),
genesis_epoch: Epoch::new(0),
far_future_epoch: Epoch::new(u64::max_value()), far_future_epoch: Epoch::new(u64::max_value()),
zero_hash: Hash256::zero(), zero_hash: Hash256::zero(),
bls_withdrawal_prefix_byte: 0, bls_withdrawal_prefix_byte: 0,
@ -174,9 +174,9 @@ impl ChainSpec {
/* /*
* Time parameters * Time parameters
*/ */
genesis_time: u64::from(u32::max_value()),
seconds_per_slot: 6, seconds_per_slot: 6,
min_attestation_inclusion_delay: 4, min_attestation_inclusion_delay: 4,
slots_per_epoch: 64,
min_seed_lookahead: Epoch::new(1), min_seed_lookahead: Epoch::new(1),
activation_exit_delay: 4, activation_exit_delay: 4,
slots_per_eth1_voting_period: 1_024, slots_per_eth1_voting_period: 1_024,
@ -219,47 +219,35 @@ impl ChainSpec {
* Boot nodes * Boot nodes
*/ */
boot_nodes: vec![], boot_nodes: vec![],
chain_id: 1, // foundation chain id chain_id: 1, // mainnet chain id
} }
} }
/// Returns a `ChainSpec` compatible with the Lighthouse testnet specification. /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators.
/// pub fn minimal() -> Self {
/// Spec v0.4.0 let genesis_slot = Slot::new(0);
pub(crate) fn lighthouse_testnet() -> Self {
/* // Note: these bootnodes are placeholders.
* Lighthouse testnet bootnodes //
*/ // Should be updated once static bootnodes exist.
let boot_nodes = vec!["/ip4/127.0.0.1/tcp/9000" let boot_nodes = vec!["/ip4/127.0.0.1/tcp/9000"
.parse() .parse()
.expect("correct multiaddr")]; .expect("correct multiaddr")];
Self { Self {
boot_nodes, boot_nodes,
chain_id: 2, // lighthouse testnet chain id
..ChainSpec::few_validators()
}
}
/// Returns a `ChainSpec` compatible with the specification suitable for 8 validators.
pub(crate) fn few_validators() -> Self {
let genesis_slot = Slot::new(0);
let slots_per_epoch = 8;
let genesis_epoch = genesis_slot.epoch(slots_per_epoch);
Self {
target_committee_size: 1, target_committee_size: 1,
chain_id: 2, // lighthouse testnet chain id
genesis_slot, genesis_slot,
genesis_epoch, shuffle_round_count: 10,
slots_per_epoch, ..ChainSpec::mainnet()
..ChainSpec::foundation()
} }
} }
} }
impl Default for ChainSpec { impl Default for ChainSpec {
fn default() -> Self { fn default() -> Self {
Self::foundation() Self::mainnet()
} }
} }
@ -269,12 +257,12 @@ mod tests {
use int_to_bytes::int_to_bytes8; use int_to_bytes::int_to_bytes8;
#[test] #[test]
fn test_foundation_spec_can_be_constructed() { fn test_mainnet_spec_can_be_constructed() {
let _ = ChainSpec::foundation(); let _ = ChainSpec::mainnet();
} }
fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) { fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) {
let fork = Fork::genesis(&spec); let fork = Fork::genesis(Epoch::new(0));
let epoch = Epoch::new(0); let epoch = Epoch::new(0);
let domain = spec.get_domain(epoch, domain_type, &fork); let domain = spec.get_domain(epoch, domain_type, &fork);
@ -287,7 +275,7 @@ mod tests {
#[test] #[test]
fn test_get_domain() { fn test_get_domain() {
let spec = ChainSpec::foundation(); let spec = ChainSpec::mainnet();
test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec);
test_domain(Domain::Randao, spec.domain_randao, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec);

View File

@ -1,6 +1,6 @@
use crate::{ use crate::{
test_utils::{fork_from_hex_str, TestRandom}, test_utils::{fork_from_hex_str, TestRandom},
ChainSpec, Epoch, Epoch,
}; };
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
@ -36,11 +36,11 @@ impl Fork {
/// Initialize the `Fork` from the genesis parameters in the `spec`. /// Initialize the `Fork` from the genesis parameters in the `spec`.
/// ///
/// Spec v0.6.1 /// Spec v0.6.1
pub fn genesis(spec: &ChainSpec) -> Self { pub fn genesis(genesis_epoch: Epoch) -> Self {
Self { Self {
previous_version: [0; 4], previous_version: [0; 4],
current_version: [0; 4], current_version: [0; 4],
epoch: spec.genesis_epoch, epoch: genesis_epoch,
} }
} }
@ -63,13 +63,9 @@ mod tests {
cached_tree_hash_tests!(Fork); cached_tree_hash_tests!(Fork);
fn test_genesis(epoch: Epoch) { fn test_genesis(epoch: Epoch) {
let mut spec = ChainSpec::foundation(); let fork = Fork::genesis(epoch);
spec.genesis_epoch = epoch; assert_eq!(fork.epoch, epoch, "epoch incorrect");
let fork = Fork::genesis(&spec);
assert_eq!(fork.epoch, spec.genesis_epoch, "epoch incorrect");
assert_eq!( assert_eq!(
fork.previous_version, fork.current_version, fork.previous_version, fork.current_version,
"previous and current are not identical" "previous and current are not identical"

View File

@ -31,7 +31,7 @@ pub struct HistoricalBatch<T: EthSpec> {
mod tests { mod tests {
use super::*; use super::*;
pub type FoundationHistoricalBatch = HistoricalBatch<FoundationEthSpec>; pub type FoundationHistoricalBatch = HistoricalBatch<MainnetEthSpec>;
ssz_tests!(FoundationHistoricalBatch); ssz_tests!(FoundationHistoricalBatch);
cached_tree_hash_tests!(FoundationHistoricalBatch); cached_tree_hash_tests!(FoundationHistoricalBatch);

View File

@ -23,6 +23,7 @@ use std::iter::Iterator;
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign};
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Slot(u64); pub struct Slot(u64);
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
@ -76,7 +77,7 @@ impl Epoch {
/// Position of some slot inside an epoch, if any. /// Position of some slot inside an epoch, if any.
/// ///
/// E.g., the first `slot` in `epoch` is at position `0`. /// E.g., the first `slot` in `epoch` is at position `0`.
pub fn position(&self, slot: Slot, slots_per_epoch: u64) -> Option<usize> { pub fn position(self, slot: Slot, slots_per_epoch: u64) -> Option<usize> {
let start = self.start_slot(slots_per_epoch); let start = self.start_slot(slots_per_epoch);
let end = self.end_slot(slots_per_epoch); let end = self.end_slot(slots_per_epoch);

View File

@ -184,7 +184,7 @@ macro_rules! impl_display {
key: slog::Key, key: slog::Key,
serializer: &mut slog::Serializer, serializer: &mut slog::Serializer,
) -> slog::Result { ) -> slog::Result {
self.0.serialize(record, key, serializer) slog::Value::serialize(&self.0, record, key, serializer)
} }
} }
}; };

View File

@ -21,7 +21,7 @@ impl TestingAttestationDataBuilder {
let previous_epoch = state.previous_epoch(); let previous_epoch = state.previous_epoch();
let is_previous_epoch = let is_previous_epoch =
state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); state.slot.epoch(T::slots_per_epoch()) != slot.epoch(T::slots_per_epoch());
let source_epoch = if is_previous_epoch { let source_epoch = if is_previous_epoch {
state.previous_justified_epoch state.previous_justified_epoch
@ -37,11 +37,11 @@ impl TestingAttestationDataBuilder {
let target_root = if is_previous_epoch { let target_root = if is_previous_epoch {
*state *state
.get_block_root(previous_epoch.start_slot(spec.slots_per_epoch)) .get_block_root(previous_epoch.start_slot(T::slots_per_epoch()))
.unwrap() .unwrap()
} else { } else {
*state *state
.get_block_root(current_epoch.start_slot(spec.slots_per_epoch)) .get_block_root(current_epoch.start_slot(T::slots_per_epoch()))
.unwrap() .unwrap()
}; };
@ -57,7 +57,7 @@ impl TestingAttestationDataBuilder {
}; };
let source_root = *state let source_root = *state
.get_block_root(source_epoch.start_slot(spec.slots_per_epoch)) .get_block_root(source_epoch.start_slot(T::slots_per_epoch()))
.unwrap(); .unwrap();
let data = AttestationData { let data = AttestationData {

View File

@ -36,9 +36,9 @@ impl TestingBeaconBlockBuilder {
/// Signs the block. /// Signs the block.
/// ///
/// Modifying the block after signing may invalidate the signature. /// Modifying the block after signing may invalidate the signature.
pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { pub fn sign<T: EthSpec>(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) {
let message = self.block.signed_root(); let message = self.block.signed_root();
let epoch = self.block.slot.epoch(spec.slots_per_epoch); let epoch = self.block.slot.epoch(T::slots_per_epoch());
let domain = spec.get_domain(epoch, Domain::BeaconProposer, fork); let domain = spec.get_domain(epoch, Domain::BeaconProposer, fork);
self.block.signature = Signature::new(&message, domain, sk); self.block.signature = Signature::new(&message, domain, sk);
} }
@ -46,8 +46,8 @@ impl TestingBeaconBlockBuilder {
/// Sets the randao to be a signature across the blocks epoch. /// Sets the randao to be a signature across the blocks epoch.
/// ///
/// Modifying the block's slot after signing may invalidate the signature. /// Modifying the block's slot after signing may invalidate the signature.
pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { pub fn set_randao_reveal<T: EthSpec>(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) {
let epoch = self.block.slot.epoch(spec.slots_per_epoch); let epoch = self.block.slot.epoch(T::slots_per_epoch());
let message = epoch.tree_hash_root(); let message = epoch.tree_hash_root();
let domain = spec.get_domain(epoch, Domain::Randao, fork); let domain = spec.get_domain(epoch, Domain::Randao, fork);
self.block.body.randao_reveal = Signature::new(&message, domain, sk); self.block.body.randao_reveal = Signature::new(&message, domain, sk);
@ -59,14 +59,15 @@ impl TestingBeaconBlockBuilder {
} }
/// Inserts a signed, valid `ProposerSlashing` for the validator. /// Inserts a signed, valid `ProposerSlashing` for the validator.
pub fn insert_proposer_slashing( pub fn insert_proposer_slashing<T: EthSpec>(
&mut self, &mut self,
validator_index: u64, validator_index: u64,
secret_key: &SecretKey, secret_key: &SecretKey,
fork: &Fork, fork: &Fork,
spec: &ChainSpec, spec: &ChainSpec,
) { ) {
let proposer_slashing = build_proposer_slashing(validator_index, secret_key, fork, spec); let proposer_slashing =
build_proposer_slashing::<T>(validator_index, secret_key, fork, spec);
self.block.body.proposer_slashings.push(proposer_slashing); self.block.body.proposer_slashings.push(proposer_slashing);
} }
@ -115,7 +116,7 @@ impl TestingBeaconBlockBuilder {
// - The slot is too old to be included in a block at this slot. // - The slot is too old to be included in a block at this slot.
// - The `MAX_ATTESTATIONS`. // - The `MAX_ATTESTATIONS`.
loop { loop {
if state.slot >= slot + spec.slots_per_epoch { if state.slot >= slot + T::slots_per_epoch() {
break; break;
} }
@ -194,7 +195,7 @@ impl TestingBeaconBlockBuilder {
builder.set_index(index); builder.set_index(index);
builder.sign( builder.sign(
&keypair, &keypair,
state.slot.epoch(spec.slots_per_epoch), state.slot.epoch(T::slots_per_epoch()),
&state.fork, &state.fork,
spec, spec,
); );
@ -211,7 +212,7 @@ impl TestingBeaconBlockBuilder {
spec: &ChainSpec, spec: &ChainSpec,
) { ) {
let mut builder = TestingVoluntaryExitBuilder::new( let mut builder = TestingVoluntaryExitBuilder::new(
state.slot.epoch(spec.slots_per_epoch), state.slot.epoch(T::slots_per_epoch()),
validator_index, validator_index,
); );
@ -234,14 +235,19 @@ impl TestingBeaconBlockBuilder {
spec: &ChainSpec, spec: &ChainSpec,
) { ) {
let mut builder = TestingTransferBuilder::new(from, to, amount, state.slot); let mut builder = TestingTransferBuilder::new(from, to, amount, state.slot);
builder.sign(keypair, &state.fork, spec); builder.sign::<T>(keypair, &state.fork, spec);
self.block.body.transfers.push(builder.build()) self.block.body.transfers.push(builder.build())
} }
/// Signs and returns the block, consuming the builder. /// Signs and returns the block, consuming the builder.
pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { pub fn build<T: EthSpec>(
self.sign(sk, fork, spec); mut self,
sk: &SecretKey,
fork: &Fork,
spec: &ChainSpec,
) -> BeaconBlock {
self.sign::<T>(sk, fork, spec);
self.block self.block
} }
@ -254,7 +260,7 @@ impl TestingBeaconBlockBuilder {
/// Builds an `ProposerSlashing` for some `validator_index`. /// Builds an `ProposerSlashing` for some `validator_index`.
/// ///
/// Signs the message using a `BeaconChainHarness`. /// Signs the message using a `BeaconChainHarness`.
fn build_proposer_slashing( fn build_proposer_slashing<T: EthSpec>(
validator_index: u64, validator_index: u64,
secret_key: &SecretKey, secret_key: &SecretKey,
fork: &Fork, fork: &Fork,
@ -265,7 +271,7 @@ fn build_proposer_slashing(
Signature::new(message, domain, secret_key) Signature::new(message, domain, secret_key)
}; };
TestingProposerSlashingBuilder::double_vote(validator_index, signer, spec) TestingProposerSlashingBuilder::double_vote::<T, _>(validator_index, signer)
} }
/// Builds an `AttesterSlashing` for some `validator_indices`. /// Builds an `AttesterSlashing` for some `validator_indices`.

View File

@ -6,7 +6,6 @@ use dirs;
use log::debug; use log::debug;
use rayon::prelude::*; use rayon::prelude::*;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::time::SystemTime;
pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs";
@ -113,8 +112,8 @@ impl<T: EthSpec> TestingBeaconStateBuilder<T> {
pubkey: keypair.pk.clone(), pubkey: keypair.pk.clone(),
withdrawal_credentials, withdrawal_credentials,
// All validators start active. // All validators start active.
activation_eligibility_epoch: spec.genesis_epoch, activation_eligibility_epoch: T::genesis_epoch(),
activation_epoch: spec.genesis_epoch, activation_epoch: T::genesis_epoch(),
exit_epoch: spec.far_future_epoch, exit_epoch: spec.far_future_epoch,
withdrawable_epoch: spec.far_future_epoch, withdrawable_epoch: spec.far_future_epoch,
slashed: false, slashed: false,
@ -123,20 +122,8 @@ impl<T: EthSpec> TestingBeaconStateBuilder<T> {
}) })
.collect(); .collect();
// TODO: Testing only. Burn with fire later.
// set genesis to the last 30 minute block.
// this is used for testing only. Allows multiple nodes to connect within a 30min window
// and agree on a genesis
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs();
let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0);
// genesis is now the last 30 minute block.
let genesis_time = now - secs_after_last_period;
let mut state = BeaconState::genesis( let mut state = BeaconState::genesis(
genesis_time, spec.genesis_time,
Eth1Data { Eth1Data {
deposit_root: Hash256::zero(), deposit_root: Hash256::zero(),
deposit_count: 0, deposit_count: 0,
@ -172,8 +159,8 @@ impl<T: EthSpec> TestingBeaconStateBuilder<T> {
} }
/// Sets the `BeaconState` to be in a slot, calling `teleport_to_epoch` to update the epoch. /// Sets the `BeaconState` to be in a slot, calling `teleport_to_epoch` to update the epoch.
pub fn teleport_to_slot(&mut self, slot: Slot, spec: &ChainSpec) { pub fn teleport_to_slot(&mut self, slot: Slot) {
self.teleport_to_epoch(slot.epoch(spec.slots_per_epoch), spec); self.teleport_to_epoch(slot.epoch(T::slots_per_epoch()));
self.state.slot = slot; self.state.slot = slot;
} }
@ -181,10 +168,10 @@ impl<T: EthSpec> TestingBeaconStateBuilder<T> {
/// ///
/// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e.,
/// highest justified and finalized slots, full justification bitfield, etc). /// highest justified and finalized slots, full justification bitfield, etc).
fn teleport_to_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { fn teleport_to_epoch(&mut self, epoch: Epoch) {
let state = &mut self.state; let state = &mut self.state;
let slot = epoch.start_slot(spec.slots_per_epoch); let slot = epoch.start_slot(T::slots_per_epoch());
state.slot = slot; state.slot = slot;
@ -214,8 +201,8 @@ impl<T: EthSpec> TestingBeaconStateBuilder<T> {
let current_epoch = state.current_epoch(); let current_epoch = state.current_epoch();
let previous_epoch = state.previous_epoch(); let previous_epoch = state.previous_epoch();
let first_slot = previous_epoch.start_slot(spec.slots_per_epoch).as_u64(); let first_slot = previous_epoch.start_slot(T::slots_per_epoch()).as_u64();
let last_slot = current_epoch.end_slot(spec.slots_per_epoch).as_u64() let last_slot = current_epoch.end_slot(T::slots_per_epoch()).as_u64()
- spec.min_attestation_inclusion_delay; - spec.min_attestation_inclusion_delay;
let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); let last_slot = std::cmp::min(state.slot.as_u64(), last_slot);

View File

@ -17,8 +17,9 @@ impl TestingProposerSlashingBuilder {
/// - `domain: Domain` /// - `domain: Domain`
/// ///
/// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`).
pub fn double_vote<F>(proposer_index: u64, signer: F, spec: &ChainSpec) -> ProposerSlashing pub fn double_vote<T, F>(proposer_index: u64, signer: F) -> ProposerSlashing
where where
T: EthSpec,
F: Fn(u64, &[u8], Epoch, Domain) -> Signature, F: Fn(u64, &[u8], Epoch, Domain) -> Signature,
{ {
let slot = Slot::new(0); let slot = Slot::new(0);
@ -40,13 +41,13 @@ impl TestingProposerSlashingBuilder {
header_1.signature = { header_1.signature = {
let message = header_1.signed_root(); let message = header_1.signed_root();
let epoch = slot.epoch(spec.slots_per_epoch); let epoch = slot.epoch(T::slots_per_epoch());
signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) signer(proposer_index, &message[..], epoch, Domain::BeaconProposer)
}; };
header_2.signature = { header_2.signature = {
let message = header_2.signed_root(); let message = header_2.signed_root();
let epoch = slot.epoch(spec.slots_per_epoch); let epoch = slot.epoch(T::slots_per_epoch());
signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) signer(proposer_index, &message[..], epoch, Domain::BeaconProposer)
}; };

View File

@ -29,10 +29,10 @@ impl TestingTransferBuilder {
/// Signs the transfer. /// Signs the transfer.
/// ///
/// The keypair must match that of the `from` validator index. /// The keypair must match that of the `from` validator index.
pub fn sign(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) { pub fn sign<T: EthSpec>(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) {
self.transfer.pubkey = keypair.pk; self.transfer.pubkey = keypair.pk;
let message = self.transfer.signed_root(); let message = self.transfer.signed_root();
let epoch = self.transfer.slot.epoch(spec.slots_per_epoch); let epoch = self.transfer.slot.epoch(T::slots_per_epoch());
let domain = spec.get_domain(epoch, Domain::Transfer, fork); let domain = spec.get_domain(epoch, Domain::Transfer, fork);
self.transfer.signature = Signature::new(&message, domain, &keypair.sk); self.transfer.signature = Signature::new(&message, domain, &keypair.sk);

View File

@ -14,5 +14,5 @@ pub use rand::{
RngCore, RngCore,
{prng::XorShiftRng, SeedableRng}, {prng::XorShiftRng, SeedableRng},
}; };
pub use serde_utils::{fork_from_hex_str, graffiti_from_hex_str, u8_from_hex_str}; pub use serde_utils::{fork_from_hex_str, graffiti_from_hex_str, u8_from_hex_str, u8_to_hex_str};
pub use test_random::TestRandom; pub use test_random::TestRandom;

View File

@ -1,5 +1,5 @@
use serde::de::Error; use serde::de::Error;
use serde::{Deserialize, Deserializer}; use serde::{Deserialize, Deserializer, Serializer};
pub const FORK_BYTES_LEN: usize = 4; pub const FORK_BYTES_LEN: usize = 4;
pub const GRAFFITI_BYTES_LEN: usize = 32; pub const GRAFFITI_BYTES_LEN: usize = 32;
@ -13,6 +13,17 @@ where
u8::from_str_radix(&s.as_str()[2..], 16).map_err(D::Error::custom) u8::from_str_radix(&s.as_str()[2..], 16).map_err(D::Error::custom)
} }
#[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref.
pub fn u8_to_hex_str<S>(byte: &u8, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut hex: String = "0x".to_string();
hex.push_str(&hex::encode(&[*byte]));
serializer.serialize_str(&hex)
}
pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error> pub fn fork_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; FORK_BYTES_LEN], D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,

View File

@ -0,0 +1,13 @@
[package]
name = "eth2_config"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
clap = "2.32.0"
dirs = "1.0.3"
serde = "1.0"
serde_derive = "1.0"
toml = "^0.5"
types = { path = "../../types" }

View File

@ -0,0 +1,119 @@
use clap::ArgMatches;
use serde_derive::{Deserialize, Serialize};
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::path::PathBuf;
use std::time::SystemTime;
use types::ChainSpec;
/// The core configuration of a Lighthouse beacon node.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct Eth2Config {
pub spec_constants: String,
pub spec: ChainSpec,
}
impl Default for Eth2Config {
fn default() -> Self {
Self {
spec_constants: "minimal".to_string(),
spec: ChainSpec::minimal(),
}
}
}
impl Eth2Config {
pub fn mainnet() -> Self {
Self {
spec_constants: "mainnet".to_string(),
spec: ChainSpec::mainnet(),
}
}
pub fn minimal() -> Self {
Self {
spec_constants: "minimal".to_string(),
spec: ChainSpec::minimal(),
}
}
}
impl Eth2Config {
/// Apply the following arguments to `self`, replacing values if they are specified in `args`.
///
/// Returns an error if arguments are obviously invalid. May succeed even if some values are
/// invalid.
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
if args.is_present("recent-genesis") {
self.spec.genesis_time = recent_genesis_time()
}
Ok(())
}
}
/// Returns the system time, mod 30 minutes.
///
/// Used for easily creating testnets.
fn recent_genesis_time() -> u64 {
let now = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs();
let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0);
// genesis is now the last 30 minute block.
now - secs_after_last_period
}
/// Write a configuration to file.
pub fn write_to_file<T>(path: PathBuf, config: &T) -> Result<(), String>
where
T: Default + serde::de::DeserializeOwned + serde::Serialize,
{
if let Ok(mut file) = File::create(path.clone()) {
let toml_encoded = toml::to_string(&config).map_err(|e| {
format!(
"Failed to write configuration to {:?}. Error: {:?}",
path, e
)
})?;
file.write_all(toml_encoded.as_bytes())
.unwrap_or_else(|_| panic!("Unable to write to {:?}", path));
}
Ok(())
}
/// Loads a `ClientConfig` from file. If unable to load from file, generates a default
/// configuration and saves that as a sample file.
pub fn read_from_file<T>(path: PathBuf) -> Result<Option<T>, String>
where
T: Default + serde::de::DeserializeOwned + serde::Serialize,
{
if let Ok(mut file) = File::open(path.clone()) {
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|e| format!("Unable to read {:?}. Error: {:?}", path, e))?;
let config = toml::from_str(&contents)
.map_err(|e| format!("Unable to parse {:?}: {:?}", path, e))?;
Ok(Some(config))
} else {
Ok(None)
}
}
pub fn get_data_dir(args: &ArgMatches, default_data_dir: PathBuf) -> Result<PathBuf, &'static str> {
if let Some(data_dir) = args.value_of("data_dir") {
Ok(PathBuf::from(data_dir))
} else {
let path = dirs::home_dir()
.ok_or_else(|| "Unable to locate home directory")?
.join(&default_data_dir);
fs::create_dir_all(&path).map_err(|_| "Unable to create data_dir")?;
Ok(path)
}
}

View File

@ -100,7 +100,7 @@ where
} }
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> { fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> {
if bytes.len() == 0 { if bytes.is_empty() {
Ok(FixedLenVec::from(vec![])) Ok(FixedLenVec::from(vec![]))
} else if T::is_ssz_fixed_len() { } else if T::is_ssz_fixed_len() {
bytes bytes

View File

@ -6,9 +6,14 @@ pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotC
use std::time::Duration; use std::time::Duration;
pub use types::Slot; pub use types::Slot;
pub trait SlotClock: Send + Sync { pub trait SlotClock: Send + Sync + Sized {
type Error; type Error;
/// Create a new `SlotClock`.
///
/// Returns an Error if `slot_duration_seconds == 0`.
fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self;
fn present_slot(&self) -> Result<Option<Slot>, Self::Error>; fn present_slot(&self) -> Result<Option<Slot>, Self::Error>;
fn duration_to_next_slot(&self) -> Result<Option<Duration>, Self::Error>; fn duration_to_next_slot(&self) -> Result<Option<Duration>, Self::Error>;

Some files were not shown because too many files have changed in this diff Show More