diff --git a/.gitignore b/.gitignore
index 346ef9afa..6b8d4ab21 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,5 @@ Cargo.lock
*.pk
*.sk
*.raw_keypairs
+flamegraph.svg
+perf.data*
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 000000000..b21fd7ba9
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,36 @@
+#Adapted from https://users.rust-lang.org/t/my-gitlab-config-docs-tests/16396
+
+image: 'sigp/lighthouse:latest'
+
+stages:
+ - test
+ - document
+
+variables:
+ CARGO_HOME: /cache/cargocache
+
+check-fmt:
+ stage: test
+ script:
+ - cargo build --manifest-path protos/Cargo.toml
+ - cargo fmt --all -- --check
+
+test-dev:
+ stage: test
+ script:
+ - cargo test --verbose --all
+
+test-release:
+ stage: test
+ script:
+ - cargo test --verbose --all --release
+
+documentation:
+ stage: document
+ script:
+ - cargo doc --no-deps
+ - aws s3 sync target/doc/ s3://lighthouse-docs.sigmaprime.io/ --exclude '.lock' --delete
+ # Configure the below when we want to have a default page (and update S3 bucket index).
+ # - echo '' > public/index.html
+ only:
+ - master
diff --git a/.travis.yml b/.travis.yml
index e725aa0ba..3662e17cf 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,4 +1,7 @@
language: rust
+cache:
+ directories:
+ - /home/travis/.cargo
before_install:
- curl -OL https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-linux-x86_64.zip
- unzip protoc-3.4.0-linux-x86_64.zip -d protoc3
@@ -7,15 +10,8 @@ before_install:
- sudo chown $USER /usr/local/bin/protoc
- sudo chown -R $USER /usr/local/include/google
script:
- - cargo build --verbose --all
- - cargo build --verbose --release --all
- - cargo test --verbose --all
- - cargo test --verbose --release --all
- - cargo fmt --all -- --check
- # No clippy until later...
- #- cargo clippy
+ - cargo build --verbose --all --release
rust:
- - stable
- beta
- nightly
matrix:
@@ -24,4 +20,3 @@ matrix:
fast_finish: true
install:
- rustup component add rustfmt
- - rustup component add clippy
diff --git a/Cargo.toml b/Cargo.toml
index 5c9593f5a..893189941 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,14 +1,13 @@
[workspace]
members = [
- "eth2/attester",
- "eth2/block_proposer",
"eth2/fork_choice",
"eth2/operation_pool",
"eth2/state_processing",
- "eth2/state_processing/yaml_utils",
"eth2/types",
"eth2/utils/bls",
"eth2/utils/boolean-bitfield",
+ "eth2/utils/cached_tree_hash",
+ "eth2/utils/fixed_len_vec",
"eth2/utils/hashing",
"eth2/utils/honey-badger-split",
"eth2/utils/merkle_proof",
@@ -18,6 +17,8 @@ members = [
"eth2/utils/ssz",
"eth2/utils/ssz_derive",
"eth2/utils/swap_or_not_shuffle",
+ "eth2/utils/tree_hash",
+ "eth2/utils/tree_hash_derive",
"eth2/utils/fisher_yates_shuffle",
"eth2/utils/test_random_derive",
"beacon_node",
@@ -28,7 +29,6 @@ members = [
"beacon_node/rpc",
"beacon_node/version",
"beacon_node/beacon_chain",
- "beacon_node/beacon_chain/test_harness",
"protos",
"validator_client",
"account_manager",
diff --git a/Dockerfile b/Dockerfile
index 6691efa97..57f677b78 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,6 +1,6 @@
FROM rust:latest
-RUN apt-get update && apt-get install -y clang libclang-dev cmake build-essential git unzip autoconf libtool
+RUN apt-get update && apt-get install -y clang libclang-dev cmake build-essential git unzip autoconf libtool awscli
RUN git clone https://github.com/google/protobuf.git && \
cd protobuf && \
@@ -14,8 +14,8 @@ RUN git clone https://github.com/google/protobuf.git && \
rm -r protobuf
-RUN mkdir /cargocache && chmod -R ugo+rwX /cargocache
+RUN mkdir -p /cache/cargocache && chmod -R ugo+rwX /cache/cargocache
-ENV CARGO_HOME /cargocache
+ENV CARGO_HOME /cache/cargocache
RUN rustup component add rustfmt clippy
diff --git a/Jenkinsfile b/Jenkinsfile
deleted file mode 100644
index d12189941..000000000
--- a/Jenkinsfile
+++ /dev/null
@@ -1,29 +0,0 @@
-pipeline {
- agent {
- dockerfile {
- filename 'Dockerfile'
- args '-v cargo-cache:/cargocache:rw -e "CARGO_HOME=/cargocache"'
- }
- }
- stages {
- stage('Build') {
- steps {
- sh 'cargo build --verbose --all'
- sh 'cargo build --verbose --all --release'
- }
- }
- stage('Check') {
- steps {
- sh 'cargo fmt --all -- --check'
- // No clippy until later...
- //sh 'cargo clippy'
- }
- }
- stage('Test') {
- steps {
- sh 'cargo test --verbose --all'
- sh 'cargo test --verbose --all --release'
- }
- }
- }
-}
diff --git a/README.md b/README.md
index 7727154e7..879f9b8fe 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,13 @@
# Lighthouse: an Ethereum Serenity client
-[![Build Status](https://travis-ci.org/sigp/lighthouse.svg?branch=master)](https://travis-ci.org/sigp/lighthouse) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sigp/lighthouse?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Gitter Badge]][Gitter Link]
+
+[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg
+[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines
+[Gitter Badge]: https://badges.gitter.im/Join%20Chat.svg
+[Gitter Link]: https://gitter.im/sigp/lighthouse
+[Doc Status]: https://img.shields.io/badge/docs-master-blue.svg
+[Doc Link]: http://lighthouse-docs.sigmaprime.io/
A work-in-progress, open-source implementation of the Serenity Beacon
Chain, maintained by Sigma Prime.
@@ -24,6 +31,7 @@ present-Ethereum functionality.
- [About Lighthouse](docs/lighthouse.md): Goals, Ideology and Ethos surrounding
this implementation.
- [What is Ethereum Serenity](docs/serenity.md): an introduction to Ethereum Serenity.
+- [Lighthouse Technical Documentation](http://lighthouse-docs.sigmaprime.io/): The Rust generated documentation, updated regularly.
If you'd like some background on Sigma Prime, please see the [Lighthouse Update
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the
diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml
index c26d4b70a..7b561869a 100644
--- a/account_manager/Cargo.toml
+++ b/account_manager/Cargo.toml
@@ -11,3 +11,4 @@ slog = "^2.2.3"
slog-term = "^2.4.0"
slog-async = "^2.3.0"
validator_client = { path = "../validator_client" }
+types = { path = "../eth2/types" }
diff --git a/account_manager/README.md b/account_manager/README.md
index bf8891f40..6762b937f 100644
--- a/account_manager/README.md
+++ b/account_manager/README.md
@@ -1,6 +1,6 @@
-# Lighthouse Accounts Manager
+# Lighthouse Account Manager
-The accounts manager (AM) is a stand-alone binary which allows
+The account manager (AM) is a stand-alone binary which allows
users to generate and manage the cryptographic keys necessary to
interact with Ethereum Serenity.
@@ -21,4 +21,14 @@ staking on Ethereum 1.x (TPD)
The AM is not a service, and does not run continuously, nor does it
interact with any running services.
It is intended to be executed separately from other Lighthouse binaries
-and produce files which can be consumed by them.
\ No newline at end of file
+and produce files which can be consumed by them.&
+
+## Usage
+
+Simply run `./account_manager generate` to generate a new random private key,
+which will be automatically saved to the correct directory.
+
+If you prefer to use our "deterministic" keys for testing purposes, simply
+run `./accounts_manager generate_deterministic -i `, where `index` is
+the validator index for the key. This will reliably produce the same key each time
+and save it to the directory.
\ No newline at end of file
diff --git a/account_manager/src/main.rs b/account_manager/src/main.rs
index 42c78aaea..c30b5b103 100644
--- a/account_manager/src/main.rs
+++ b/account_manager/src/main.rs
@@ -2,6 +2,7 @@ use bls::Keypair;
use clap::{App, Arg, SubCommand};
use slog::{debug, info, o, Drain};
use std::path::PathBuf;
+use types::test_utils::generate_deterministic_keypair;
use validator_client::Config as ValidatorClientConfig;
fn main() {
@@ -29,6 +30,21 @@ fn main() {
.version("0.0.1")
.author("Sigma Prime "),
)
+ .subcommand(
+ SubCommand::with_name("generate_deterministic")
+ .about("Generates a deterministic validator private key FOR TESTING")
+ .version("0.0.1")
+ .author("Sigma Prime ")
+ .arg(
+ Arg::with_name("validator index")
+ .long("index")
+ .short("i")
+ .value_name("index")
+ .help("The index of the validator, for which the test key is generated")
+ .takes_value(true)
+ .required(true),
+ ),
+ )
.get_matches();
let config = ValidatorClientConfig::parse_args(&matches, &log)
@@ -51,6 +67,23 @@ fn main() {
key_path.to_string_lossy()
);
}
+ ("generate_deterministic", Some(gen_d_matches)) => {
+ let validator_index = gen_d_matches
+ .value_of("validator index")
+ .expect("Validator index required.")
+ .parse::()
+ .expect("Invalid validator index.") as usize;
+ let keypair = generate_deterministic_keypair(validator_index);
+ let key_path: PathBuf = config
+ .save_key(&keypair)
+ .expect("Unable to save newly generated deterministic private key.");
+ debug!(
+ log,
+ "Deterministic Keypair generated {:?}, saved to: {:?}",
+ keypair.identifier(),
+ key_path.to_string_lossy()
+ );
+ }
_ => panic!(
"The account manager must be run with a subcommand. See help for more information."
),
diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml
index 55d4bacfd..34b6e11c6 100644
--- a/beacon_node/beacon_chain/Cargo.toml
+++ b/beacon_node/beacon_chain/Cargo.toml
@@ -5,7 +5,6 @@ authors = ["Paul Hauner ", "Age Manning {
+pub struct BeaconChain {
pub block_store: Arc>,
pub state_store: Arc>,
pub slot_clock: U,
- pub op_pool: OperationPool,
- canonical_head: RwLock,
- finalized_head: RwLock,
- pub state: RwLock,
+ pub op_pool: OperationPool,
+ canonical_head: RwLock>,
+ finalized_head: RwLock>,
+ pub state: RwLock>,
pub spec: ChainSpec,
pub fork_choice: RwLock,
}
-impl BeaconChain
+impl BeaconChain
where
T: ClientDB,
U: SlotClock,
F: ForkChoice,
+ E: EthSpec,
{
/// Instantiate a new Beacon Chain, from genesis.
pub fn from_genesis(
state_store: Arc>,
block_store: Arc>,
slot_clock: U,
- mut genesis_state: BeaconState,
+ mut genesis_state: BeaconState,
genesis_block: BeaconBlock,
spec: ChainSpec,
fork_choice: F,
@@ -190,7 +191,6 @@ where
count: usize,
skip: usize,
) -> Result, Error> {
- let spec = &self.spec;
let step_by = Slot::from(skip + 1);
let mut roots: Vec = vec![];
@@ -218,7 +218,7 @@ where
//
// If we get `SlotOutOfBounds` error, load the oldest available historic
// state from the DB.
- match state.get_block_root(slot, spec) {
+ match state.get_block_root(slot) {
Ok(root) => {
if slot < earliest_slot {
break;
@@ -230,9 +230,9 @@ where
Err(BeaconStateError::SlotOutOfBounds) => {
// Read the earliest historic state in the current slot.
let earliest_historic_slot =
- state.slot - Slot::from(spec.slots_per_historical_root);
+ state.slot - Slot::from(E::SlotsPerHistoricalRoot::to_usize());
// Load the earlier state from disk.
- let new_state_root = state.get_state_root(earliest_historic_slot, spec)?;
+ let new_state_root = state.get_state_root(earliest_historic_slot)?;
// Break if the DB is unable to load the state.
state = match self.state_store.get_deserialized(&new_state_root) {
@@ -270,7 +270,7 @@ where
&self,
new_beacon_block: BeaconBlock,
new_beacon_block_root: Hash256,
- new_beacon_state: BeaconState,
+ new_beacon_state: BeaconState,
new_beacon_state_root: Hash256,
) {
debug!(
@@ -292,7 +292,7 @@ where
/// It is important to note that the `beacon_state` returned may not match the present slot. It
/// is the state as it was when the head block was received, which could be some slots prior to
/// now.
- pub fn head(&self) -> RwLockReadGuard {
+ pub fn head(&self) -> RwLockReadGuard> {
self.canonical_head.read()
}
@@ -302,9 +302,7 @@ where
/// state and calling `catchup_state` as it will not result in an old state being installed and
/// then having it iteratively updated -- in such a case it's possible for another thread to
/// find the state at an old slot.
- pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> {
- let latest_block_header = self.head().beacon_block.block_header();
-
+ pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> {
let present_slot = match self.slot_clock.present_slot() {
Ok(Some(slot)) => slot,
_ => return Err(Error::UnableToReadSlot),
@@ -312,7 +310,7 @@ where
// If required, transition the new state to the present slot.
for _ in state.slot.as_u64()..present_slot.as_u64() {
- per_slot_processing(&mut state, &latest_block_header, &self.spec)?;
+ per_slot_processing(&mut state, &self.spec)?;
}
state.build_all_caches(&self.spec)?;
@@ -324,8 +322,6 @@ where
/// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`.
pub fn catchup_state(&self) -> Result<(), Error> {
- let latest_block_header = self.head().beacon_block.block_header();
-
let present_slot = match self.slot_clock.present_slot() {
Ok(Some(slot)) => slot,
_ => return Err(Error::UnableToReadSlot),
@@ -339,7 +335,7 @@ where
state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &self.spec)?;
state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &self.spec)?;
- per_slot_processing(&mut *state, &latest_block_header, &self.spec)?;
+ per_slot_processing(&mut *state, &self.spec)?;
}
state.build_all_caches(&self.spec)?;
@@ -361,7 +357,7 @@ where
&self,
new_beacon_block: BeaconBlock,
new_beacon_block_root: Hash256,
- new_beacon_state: BeaconState,
+ new_beacon_state: BeaconState,
new_beacon_state_root: Hash256,
) {
let mut finalized_head = self.finalized_head.write();
@@ -375,7 +371,7 @@ where
/// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen,
/// indirectly, by the fork-choice rule).
- pub fn finalized_head(&self) -> RwLockReadGuard {
+ pub fn finalized_head(&self) -> RwLockReadGuard> {
self.finalized_head.read()
}
@@ -497,17 +493,14 @@ where
} else {
// If the current head block is not from this slot, use the slot from the previous
// epoch.
- *self.state.read().get_block_root(
- current_epoch_start_slot - self.spec.slots_per_epoch,
- &self.spec,
- )?
+ *self
+ .state
+ .read()
+ .get_block_root(current_epoch_start_slot - self.spec.slots_per_epoch)?
}
} else {
// If we're not on the first slot of the epoch.
- *self
- .state
- .read()
- .get_block_root(current_epoch_start_slot, &self.spec)?
+ *self.state.read().get_block_root(current_epoch_start_slot)?
};
Ok(AttestationData {
@@ -617,9 +610,8 @@ where
// Transition the parent state to the block slot.
let mut state = parent_state;
- let previous_block_header = parent_block.block_header();
for _ in state.slot.as_u64()..block.slot.as_u64() {
- if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) {
+ if let Err(e) = per_slot_processing(&mut state, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::SlotProcessingError(e),
));
@@ -672,7 +664,7 @@ where
pub fn produce_block(
&self,
randao_reveal: Signature,
- ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
+ ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
debug!("Producing block at slot {}...", self.state.read().slot);
let mut state = self.state.read().clone();
@@ -682,7 +674,7 @@ where
trace!("Finding attestations for new block...");
let previous_block_root = *state
- .get_block_root(state.slot - 1, &self.spec)
+ .get_block_root(state.slot - 1)
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?;
let (proposer_slashings, attester_slashings) =
@@ -767,7 +759,7 @@ where
///
/// This could be a very expensive operation and should only be done in testing/analysis
/// activities.
- pub fn chain_dump(&self) -> Result, Error> {
+ pub fn chain_dump(&self) -> Result>, Error> {
let mut dump = vec![];
let mut last_slot = CheckPoint {
diff --git a/beacon_node/beacon_chain/src/checkpoint.rs b/beacon_node/beacon_chain/src/checkpoint.rs
index 78227e5c8..c069ac104 100644
--- a/beacon_node/beacon_chain/src/checkpoint.rs
+++ b/beacon_node/beacon_chain/src/checkpoint.rs
@@ -1,22 +1,22 @@
use serde_derive::Serialize;
-use types::{BeaconBlock, BeaconState, Hash256};
+use types::{BeaconBlock, BeaconState, EthSpec, Hash256};
/// Represents some block and it's associated state. Generally, this will be used for tracking the
/// head, justified head and finalized head.
#[derive(Clone, Serialize, PartialEq, Debug)]
-pub struct CheckPoint {
+pub struct CheckPoint {
pub beacon_block: BeaconBlock,
pub beacon_block_root: Hash256,
- pub beacon_state: BeaconState,
+ pub beacon_state: BeaconState,
pub beacon_state_root: Hash256,
}
-impl CheckPoint {
+impl CheckPoint {
/// Create a new checkpoint.
pub fn new(
beacon_block: BeaconBlock,
beacon_block_root: Hash256,
- beacon_state: BeaconState,
+ beacon_state: BeaconState,
beacon_state_root: Hash256,
) -> Self {
Self {
@@ -32,7 +32,7 @@ impl CheckPoint {
&mut self,
beacon_block: BeaconBlock,
beacon_block_root: Hash256,
- beacon_state: BeaconState,
+ beacon_state: BeaconState,
beacon_state_root: Hash256,
) {
self.beacon_block = beacon_block;
diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs
index 0951e06fb..83b60a4f7 100644
--- a/beacon_node/beacon_chain/src/initialise.rs
+++ b/beacon_node/beacon_chain/src/initialise.rs
@@ -7,18 +7,25 @@ use db::stores::{BeaconBlockStore, BeaconStateStore};
use db::{DiskDB, MemoryDB};
use fork_choice::BitwiseLMDGhost;
use slot_clock::SystemTimeSlotClock;
-use ssz::TreeHash;
use std::path::PathBuf;
use std::sync::Arc;
+use tree_hash::TreeHash;
use types::test_utils::TestingBeaconStateBuilder;
-use types::{BeaconBlock, ChainSpec, Hash256};
+use types::{BeaconBlock, ChainSpec, FewValidatorsEthSpec, FoundationEthSpec, Hash256};
//TODO: Correct this for prod
//TODO: Account for historical db
pub fn initialise_beacon_chain(
spec: &ChainSpec,
db_name: Option<&PathBuf>,
-) -> Arc>> {
+) -> Arc<
+ BeaconChain<
+ DiskDB,
+ SystemTimeSlotClock,
+ BitwiseLMDGhost,
+ FoundationEthSpec,
+ >,
+> {
// set up the db
let db = Arc::new(DiskDB::open(
db_name.expect("Database directory must be included"),
@@ -32,7 +39,7 @@ pub fn initialise_beacon_chain(
let (genesis_state, _keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec);
- genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
+ genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
// Slot clock
let slot_clock = SystemTimeSlotClock::new(
@@ -64,7 +71,14 @@ pub fn initialise_beacon_chain(
pub fn initialise_test_beacon_chain(
spec: &ChainSpec,
_db_name: Option<&PathBuf>,
-) -> Arc>> {
+) -> Arc<
+ BeaconChain<
+ MemoryDB,
+ SystemTimeSlotClock,
+ BitwiseLMDGhost,
+ FewValidatorsEthSpec,
+ >,
+> {
let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
@@ -73,7 +87,7 @@ pub fn initialise_test_beacon_chain(
let (genesis_state, _keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(spec);
- genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
+ genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
// Slot clock
let slot_clock = SystemTimeSlotClock::new(
diff --git a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs
index 5c5477e55..f7ff3cdae 100644
--- a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs
+++ b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs
@@ -5,19 +5,20 @@ use db::{
};
use fork_choice::BitwiseLMDGhost;
use slot_clock::TestingSlotClock;
-use ssz::TreeHash;
use std::sync::Arc;
-use types::test_utils::TestingBeaconStateBuilder;
+use tree_hash::TreeHash;
use types::*;
+use types::{test_utils::TestingBeaconStateBuilder, EthSpec, FewValidatorsEthSpec};
-type TestingBeaconChain = BeaconChain>;
+type TestingBeaconChain =
+ BeaconChain, E>;
-pub struct TestingBeaconChainBuilder {
- state_builder: TestingBeaconStateBuilder,
+pub struct TestingBeaconChainBuilder {
+ state_builder: TestingBeaconStateBuilder,
}
-impl TestingBeaconChainBuilder {
- pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain {
+impl TestingBeaconChainBuilder {
+ pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain {
let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
@@ -27,7 +28,7 @@ impl TestingBeaconChainBuilder {
let (genesis_state, _keypairs) = self.state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec);
- genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
+ genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
// Create the Beacon Chain
BeaconChain::from_genesis(
@@ -43,8 +44,8 @@ impl TestingBeaconChainBuilder {
}
}
-impl From for TestingBeaconChainBuilder {
- fn from(state_builder: TestingBeaconStateBuilder) -> TestingBeaconChainBuilder {
+impl From> for TestingBeaconChainBuilder {
+ fn from(state_builder: TestingBeaconStateBuilder) -> TestingBeaconChainBuilder {
TestingBeaconChainBuilder { state_builder }
}
}
diff --git a/beacon_node/beacon_chain/test_harness/Cargo.toml b/beacon_node/beacon_chain/test_harness/Cargo.toml
deleted file mode 100644
index 50d154732..000000000
--- a/beacon_node/beacon_chain/test_harness/Cargo.toml
+++ /dev/null
@@ -1,42 +0,0 @@
-[package]
-name = "test_harness"
-version = "0.1.0"
-authors = ["Paul Hauner "]
-edition = "2018"
-
-[[bin]]
-name = "test_harness"
-path = "src/bin.rs"
-
-[lib]
-name = "test_harness"
-path = "src/lib.rs"
-
-[dev-dependencies]
-state_processing = { path = "../../../eth2/state_processing" }
-
-[dependencies]
-attester = { path = "../../../eth2/attester" }
-beacon_chain = { path = "../../beacon_chain" }
-block_proposer = { path = "../../../eth2/block_proposer" }
-bls = { path = "../../../eth2/utils/bls" }
-boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" }
-clap = "2.32.0"
-db = { path = "../../db" }
-parking_lot = "0.7"
-failure = "0.1"
-failure_derive = "0.1"
-fork_choice = { path = "../../../eth2/fork_choice" }
-hashing = { path = "../../../eth2/utils/hashing" }
-int_to_bytes = { path = "../../../eth2/utils/int_to_bytes" }
-log = "0.4"
-env_logger = "0.6.0"
-rayon = "1.0"
-serde = "1.0"
-serde_derive = "1.0"
-serde_json = "1.0"
-serde_yaml = "0.8"
-slot_clock = { path = "../../../eth2/utils/slot_clock" }
-ssz = { path = "../../../eth2/utils/ssz" }
-types = { path = "../../../eth2/types" }
-yaml-rust = "0.4.2"
diff --git a/beacon_node/beacon_chain/test_harness/README.md b/beacon_node/beacon_chain/test_harness/README.md
deleted file mode 100644
index 9dfd90d60..000000000
--- a/beacon_node/beacon_chain/test_harness/README.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# Test Harness
-
-Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
-
-This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
-directly to the `BeaconChain` via an `Arc`.
-
-The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
-instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
-producing blocks and attestations.
-
-The crate consists of a library and binary, examples for using both are
-described below.
-
-## YAML
-
-Both the library and the binary are capable of parsing tests from a YAML file,
-in fact this is the sole purpose of the binary.
-
-You can find YAML test cases [here](specs/). An example is included below:
-
-```yaml
-title: Validator Registry Tests
-summary: Tests deposit and slashing effects on validator registry.
-test_suite: validator_registry
-fork: tchaikovsky
-version: 1.0
-test_cases:
- - config:
- slots_per_epoch: 64
- deposits_for_chain_start: 1000
- num_slots: 64
- skip_slots: [2, 3]
- deposits:
- # At slot 1, create a new validator deposit of 32 ETH.
- - slot: 1
- amount: 32
- # Trigger more deposits...
- - slot: 3
- amount: 32
- - slot: 5
- amount: 32
- proposer_slashings:
- # At slot 2, trigger a proposer slashing for validator #42.
- - slot: 2
- validator_index: 42
- # Trigger another slashing...
- - slot: 8
- validator_index: 13
- attester_slashings:
- # At slot 2, trigger an attester slashing for validators #11 and #12.
- - slot: 2
- validator_indices: [11, 12]
- # Trigger another slashing...
- - slot: 5
- validator_indices: [14]
- results:
- num_skipped_slots: 2
- states:
- - slot: 63
- num_validators: 1003
- slashed_validators: [11, 12, 13, 14, 42]
- exited_validators: []
-
-```
-
-Thanks to [prsym](http://github.com/prysmaticlabs/prysm) for coming up with the
-base YAML format.
-
-### Notes
-
-Wherever `slot` is used, it is actually the "slot height", or slots since
-genesis. This allows the tests to disregard the `GENESIS_EPOCH`.
-
-### Differences from Prysmatic's format
-
-1. The detail for `deposits`, `proposer_slashings` and `attester_slashings` is
- ommitted from the test specification. It assumed they should be valid
- objects.
-2. There is a `states` list in `results` that runs checks against any state
- specified by a `slot` number. This is in contrast to the variables in
- `results` that assume the last (highest) state should be inspected.
-
-#### Reasoning
-
-Respective reasonings for above changes:
-
-1. This removes the concerns of the actual object structure from the tests.
- This allows for more variation in the deposits/slashings objects without
- needing to update the tests. Also, it makes it makes it easier to create
- tests.
-2. This gives more fine-grained control over the tests. It allows for checking
- that certain events happened at certain times whilst making the tests only
- slightly more verbose.
-
-_Notes: it may be useful to add an extra field to each slashing type to
-indicate if it should be valid or not. It also may be useful to add an option
-for double-vote/surround-vote attester slashings. The `amount` field was left
-on `deposits` as it changes the behaviour of state significantly._
-
-## Binary Usage Example
-
-Follow these steps to run as a binary:
-
-1. Navigate to the root of this crate (where this readme is located)
-2. Run `$ cargo run --release -- --yaml examples/validator_registry.yaml`
-
-_Note: the `--release` flag builds the binary without all the debugging
-instrumentation. The test is much faster built using `--release`. As is
-customary in cargo, the flags before `--` are passed to cargo and the flags
-after are passed to the binary._
-
-### CLI Options
-
-```
-Lighthouse Test Harness Runner 0.0.1
-Sigma Prime
-Runs `test_harness` using a YAML test_case.
-
-USAGE:
- test_harness --log-level --yaml
-
-FLAGS:
- -h, --help Prints help information
- -V, --version Prints version information
-
-OPTIONS:
- --log-level Logging level. [default: debug] [possible values: error, warn, info, debug, trace]
- --yaml YAML file test_case.
-```
-
-
-## Library Usage Example
-
-```rust
-use test_harness::BeaconChainHarness;
-use types::ChainSpec;
-
-let validator_count = 8;
-let spec = ChainSpec::few_validators();
-
-let mut harness = BeaconChainHarness::new(spec, validator_count);
-
-harness.advance_chain_with_block();
-
-let chain = harness.chain_dump().unwrap();
-
-// One block should have been built on top of the genesis block.
-assert_eq!(chain.len(), 2);
-```
diff --git a/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml b/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml
deleted file mode 100644
index ad9c899cf..000000000
--- a/beacon_node/beacon_chain/test_harness/specs/validator_registry.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
-title: Validator Registry Tests
-summary: Tests deposit and slashing effects on validator registry.
-test_suite: validator_registry
-fork: tchaikovsky
-version: 1.0
-test_cases:
- - config:
- slots_per_epoch: 64
- deposits_for_chain_start: 1000
- num_slots: 64
- skip_slots: [2, 3]
- persistent_committee_period: 0
- deposits:
- # At slot 1, create a new validator deposit of 5 ETH.
- - slot: 1
- amount: 5000000000
- # Trigger more deposits...
- - slot: 3
- amount: 5000000000
- - slot: 5
- amount: 32000000000
- exits:
- # At slot 10, submit an exit for validator #50.
- - slot: 10
- validator_index: 50
- transfers:
- - slot: 6
- from: 1000
- to: 1001
- amount: 5000000000
- proposer_slashings:
- # At slot 2, trigger a proposer slashing for validator #42.
- - slot: 2
- validator_index: 42
- # Trigger another slashing...
- - slot: 8
- validator_index: 13
- attester_slashings:
- # At slot 2, trigger an attester slashing for validators #11 and #12.
- - slot: 2
- validator_indices: [11, 12]
- # Trigger another slashing...
- - slot: 5
- validator_indices: [14]
- results:
- num_skipped_slots: 2
- states:
- - slot: 63
- num_validators: 1003
- num_previous_epoch_attestations: 0
- # slots_per_epoch - attestation_inclusion_delay - skip_slots
- num_current_epoch_attestations: 57
- slashed_validators: [11, 12, 13, 14, 42]
- exited_validators: []
- exit_initiated_validators: [50]
- balances:
- - validator_index: 1000
- comparison: "eq"
- balance: 0
- - validator_index: 1001
- comparison: "eq"
- balance: 10000000000
-
diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs
deleted file mode 100644
index aeb734a4e..000000000
--- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs
+++ /dev/null
@@ -1,350 +0,0 @@
-use super::ValidatorHarness;
-use beacon_chain::{BeaconChain, BlockProcessingOutcome};
-pub use beacon_chain::{BeaconChainError, CheckPoint};
-use db::{
- stores::{BeaconBlockStore, BeaconStateStore},
- MemoryDB,
-};
-use fork_choice::BitwiseLMDGhost;
-use log::debug;
-use rayon::prelude::*;
-use slot_clock::TestingSlotClock;
-use ssz::TreeHash;
-use std::sync::Arc;
-use types::{test_utils::TestingBeaconStateBuilder, *};
-
-type TestingBeaconChain = BeaconChain>;
-
-/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
-/// to it. Each validator is provided a borrow to the beacon chain, where it may read
-/// information and submit blocks/attestations for processing.
-///
-/// This test harness is useful for testing validator and internal state transition logic. It
-/// is not useful for testing that multiple beacon nodes can reach consensus.
-pub struct BeaconChainHarness {
- pub db: Arc,
- pub beacon_chain: Arc,
- pub block_store: Arc>,
- pub state_store: Arc>,
- pub validators: Vec,
- pub spec: Arc,
-}
-
-impl BeaconChainHarness {
- /// Create a new harness with:
- ///
- /// - A keypair, `BlockProducer` and `Attester` for each validator.
- /// - A new BeaconChain struct where the given validators are in the genesis.
- pub fn new(spec: ChainSpec, validator_count: usize) -> Self {
- let state_builder =
- TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
- Self::from_beacon_state_builder(state_builder, spec)
- }
-
- pub fn from_beacon_state_builder(
- state_builder: TestingBeaconStateBuilder,
- spec: ChainSpec,
- ) -> Self {
- let db = Arc::new(MemoryDB::open());
- let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
- let state_store = Arc::new(BeaconStateStore::new(db.clone()));
- let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
- let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
-
- let (mut genesis_state, keypairs) = state_builder.build();
-
- let mut genesis_block = BeaconBlock::empty(&spec);
- genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
-
- genesis_state
- .build_epoch_cache(RelativeEpoch::Previous, &spec)
- .unwrap();
- genesis_state
- .build_epoch_cache(RelativeEpoch::Current, &spec)
- .unwrap();
- genesis_state
- .build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)
- .unwrap();
- genesis_state
- .build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)
- .unwrap();
-
- // Create the Beacon Chain
- let beacon_chain = Arc::new(
- BeaconChain::from_genesis(
- state_store.clone(),
- block_store.clone(),
- slot_clock,
- genesis_state,
- genesis_block,
- spec.clone(),
- fork_choice,
- )
- .unwrap(),
- );
-
- let spec = Arc::new(spec);
-
- debug!("Creating validator producer and attester instances...");
-
- // Spawn the test validator instances.
- let validators: Vec = keypairs
- .iter()
- .map(|keypair| {
- ValidatorHarness::new(keypair.clone(), beacon_chain.clone(), spec.clone())
- })
- .collect();
-
- debug!("Created {} ValidatorHarnesss", validators.len());
-
- Self {
- db,
- beacon_chain,
- block_store,
- state_store,
- validators,
- spec,
- }
- }
-
- /// Move the `slot_clock` for the `BeaconChain` forward one slot.
- ///
- /// This is the equivalent of advancing a system clock forward one `SLOT_DURATION`.
- ///
- /// Returns the new slot.
- pub fn increment_beacon_chain_slot(&mut self) -> Slot {
- let slot = self.beacon_chain.present_slot() + 1;
-
- let nth_slot = slot
- - slot
- .epoch(self.spec.slots_per_epoch)
- .start_slot(self.spec.slots_per_epoch);
- let nth_epoch = slot.epoch(self.spec.slots_per_epoch) - self.spec.genesis_epoch;
- debug!(
- "Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
- slot,
- slot.epoch(self.spec.slots_per_epoch),
- nth_epoch,
- nth_slot
- );
-
- self.beacon_chain.slot_clock.set_slot(slot.as_u64());
- self.beacon_chain
- .catchup_state()
- .expect("Failed to catch state");
- slot
- }
-
- pub fn gather_attesations(&mut self) -> Vec {
- let present_slot = self.beacon_chain.present_slot();
- let state = self.beacon_chain.state.read();
-
- let mut attestations = vec![];
-
- for committee in state
- .get_crosslink_committees_at_slot(present_slot, &self.spec)
- .unwrap()
- {
- for &validator in &committee.committee {
- let duties = state
- .get_attestation_duties(validator, &self.spec)
- .unwrap()
- .expect("Attesting validators by definition have duties");
-
- // Obtain `AttestationData` from the beacon chain.
- let data = self
- .beacon_chain
- .produce_attestation_data(duties.shard)
- .unwrap();
-
- // Produce an aggregate signature with a single signature.
- let aggregate_signature = {
- let message = AttestationDataAndCustodyBit {
- data: data.clone(),
- custody_bit: false,
- }
- .hash_tree_root();
- let domain = self.spec.get_domain(
- state.slot.epoch(self.spec.slots_per_epoch),
- Domain::Attestation,
- &state.fork,
- );
- let sig =
- Signature::new(&message, domain, &self.validators[validator].keypair.sk);
-
- let mut agg_sig = AggregateSignature::new();
- agg_sig.add(&sig);
-
- agg_sig
- };
-
- let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len);
- let custody_bitfield = Bitfield::with_capacity(duties.committee_len);
-
- aggregation_bitfield.set(duties.committee_index, true);
-
- attestations.push(Attestation {
- aggregation_bitfield,
- data,
- custody_bitfield,
- aggregate_signature,
- })
- }
- }
-
- attestations
- }
-
- /// Get the block from the proposer for the slot.
- ///
- /// Note: the validator will only produce it _once per slot_. So, if you call this twice you'll
- /// only get a block once.
- pub fn produce_block(&mut self) -> BeaconBlock {
- let present_slot = self.beacon_chain.present_slot();
-
- let proposer = self.beacon_chain.block_proposer(present_slot).unwrap();
-
- debug!(
- "Producing block from validator #{} for slot {}.",
- proposer, present_slot
- );
-
- // Ensure the validators slot clock is accurate.
- self.validators[proposer].set_slot(present_slot);
-
- self.validators[proposer].produce_block().unwrap()
- }
-
- /// Advances the chain with a BeaconBlock and attestations from all validators.
- ///
- /// This is the ideal scenario for the Beacon Chain, 100% honest participation from
- /// validators.
- pub fn advance_chain_with_block(&mut self) -> BeaconBlock {
- self.increment_beacon_chain_slot();
-
- // Produce a new block.
- let block = self.produce_block();
- debug!("Submitting block for processing...");
- match self.beacon_chain.process_block(block.clone()) {
- Ok(BlockProcessingOutcome::ValidBlock(_)) => {}
- other => panic!("block processing failed with {:?}", other),
- };
- debug!("...block processed by BeaconChain.");
-
- debug!("Producing attestations...");
-
- // Produce new attestations.
- let attestations = self.gather_attesations();
-
- debug!("Processing {} attestations...", attestations.len());
-
- attestations
- .par_iter()
- .enumerate()
- .for_each(|(i, attestation)| {
- self.beacon_chain
- .process_attestation(attestation.clone())
- .unwrap_or_else(|_| panic!("Attestation {} invalid: {:?}", i, attestation));
- });
-
- debug!("Attestations processed.");
-
- block
- }
-
- /// Signs a message using some validators secret key with the `Fork` info from the latest state
- /// of the `BeaconChain`.
- ///
- /// Useful for producing slashable messages and other objects that `BeaconChainHarness` does
- /// not produce naturally.
- pub fn validator_sign(
- &self,
- validator_index: usize,
- message: &[u8],
- epoch: Epoch,
- domain_type: Domain,
- ) -> Option {
- let validator = self.validators.get(validator_index)?;
-
- let domain = self
- .spec
- .get_domain(epoch, domain_type, &self.beacon_chain.state.read().fork);
-
- Some(Signature::new(message, domain, &validator.keypair.sk))
- }
-
- /// Returns the current `Fork` of the `beacon_chain`.
- pub fn fork(&self) -> Fork {
- self.beacon_chain.state.read().fork.clone()
- }
-
- /// Returns the current `epoch` of the `beacon_chain`.
- pub fn epoch(&self) -> Epoch {
- self.beacon_chain
- .state
- .read()
- .slot
- .epoch(self.spec.slots_per_epoch)
- }
-
- /// Returns the keypair for some validator index.
- pub fn validator_keypair(&self, validator_index: usize) -> Option<&Keypair> {
- self.validators
- .get(validator_index)
- .and_then(|v| Some(&v.keypair))
- }
-
- /// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new
- /// `ValidatorHarness` instance for this validator.
- ///
- /// If a new `ValidatorHarness` was created, the validator should become fully operational as
- /// if the validator were created during `BeaconChainHarness` instantiation.
- pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option) {
- self.beacon_chain.process_deposit(deposit).unwrap();
-
- // If a keypair is present, add a new `ValidatorHarness` to the rig.
- if let Some(keypair) = keypair {
- let validator =
- ValidatorHarness::new(keypair, self.beacon_chain.clone(), self.spec.clone());
- self.validators.push(validator);
- }
- }
-
- /// Submit an exit to the `BeaconChain` for inclusion in some block.
- ///
- /// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it
- /// will stop receiving duties from the beacon chain and just do nothing when prompted to
- /// produce/attest.
- pub fn add_exit(&mut self, exit: VoluntaryExit) {
- self.beacon_chain.process_voluntary_exit(exit).unwrap();
- }
-
- /// Submit an transfer to the `BeaconChain` for inclusion in some block.
- pub fn add_transfer(&mut self, transfer: Transfer) {
- self.beacon_chain.process_transfer(transfer).unwrap();
- }
-
- /// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
- pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
- self.beacon_chain
- .process_proposer_slashing(proposer_slashing)
- .unwrap();
- }
-
- /// Submit an attester slashing to the `BeaconChain` for inclusion in some block.
- pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) {
- self.beacon_chain
- .process_attester_slashing(attester_slashing)
- .unwrap();
- }
-
- /// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head.
- pub fn run_fork_choice(&mut self) {
- self.beacon_chain.fork_choice().unwrap()
- }
-
- /// Dump all blocks and states from the canonical beacon chain.
- pub fn chain_dump(&self) -> Result, BeaconChainError> {
- self.beacon_chain.chain_dump()
- }
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/bin.rs b/beacon_node/beacon_chain/test_harness/src/bin.rs
deleted file mode 100644
index 3afc921de..000000000
--- a/beacon_node/beacon_chain/test_harness/src/bin.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-use clap::{App, Arg, SubCommand};
-use env_logger::{Builder, Env};
-use gen_keys::gen_keys;
-use run_test::run_test;
-use std::fs;
-use types::test_utils::keypairs_path;
-use types::ChainSpec;
-
-mod beacon_chain_harness;
-mod gen_keys;
-mod run_test;
-mod test_case;
-mod validator_harness;
-
-use validator_harness::ValidatorHarness;
-
-fn main() {
- let validator_file_path = keypairs_path();
-
- let _ = fs::create_dir(validator_file_path.parent().unwrap());
-
- let matches = App::new("Lighthouse Test Harness Runner")
- .version("0.0.1")
- .author("Sigma Prime ")
- .about("Runs `test_harness` using a YAML test_case.")
- .arg(
- Arg::with_name("log")
- .long("log-level")
- .short("l")
- .value_name("LOG_LEVEL")
- .help("Logging level.")
- .possible_values(&["error", "warn", "info", "debug", "trace"])
- .default_value("debug")
- .required(true),
- )
- .arg(
- Arg::with_name("spec")
- .long("spec")
- .short("s")
- .value_name("SPECIFICATION")
- .help("ChainSpec instantiation.")
- .possible_values(&["foundation", "few_validators"])
- .default_value("foundation"),
- )
- .subcommand(
- SubCommand::with_name("run_test")
- .about("Executes a YAML test specification")
- .arg(
- Arg::with_name("yaml")
- .long("yaml")
- .value_name("FILE")
- .help("YAML file test_case.")
- .required(true),
- )
- .arg(
- Arg::with_name("validators_dir")
- .long("validators-dir")
- .short("v")
- .value_name("VALIDATORS_DIR")
- .help("A directory with validator deposits and keypair YAML."),
- ),
- )
- .subcommand(
- SubCommand::with_name("gen_keys")
- .about("Builds a file of BLS keypairs for faster tests.")
- .arg(
- Arg::with_name("validator_count")
- .long("validator_count")
- .short("n")
- .value_name("VALIDATOR_COUNT")
- .help("Number of validators to generate.")
- .required(true),
- )
- .arg(
- Arg::with_name("output_file")
- .long("output_file")
- .short("d")
- .value_name("GENESIS_TIME")
- .help("Output directory for generated YAML.")
- .default_value(validator_file_path.to_str().unwrap()),
- ),
- )
- .get_matches();
-
- if let Some(log_level) = matches.value_of("log") {
- Builder::from_env(Env::default().default_filter_or(log_level)).init();
- }
-
- let _spec = match matches.value_of("spec") {
- Some("foundation") => ChainSpec::foundation(),
- Some("few_validators") => ChainSpec::few_validators(),
- _ => unreachable!(), // Has a default value, should always exist.
- };
-
- if let Some(matches) = matches.subcommand_matches("run_test") {
- run_test(matches);
- }
-
- if let Some(matches) = matches.subcommand_matches("gen_keys") {
- gen_keys(matches);
- }
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/gen_keys.rs b/beacon_node/beacon_chain/test_harness/src/gen_keys.rs
deleted file mode 100644
index abd512423..000000000
--- a/beacon_node/beacon_chain/test_harness/src/gen_keys.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use clap::{value_t, ArgMatches};
-use log::debug;
-use std::path::Path;
-use types::test_utils::{generate_deterministic_keypairs, KeypairsFile};
-
-/// Creates a file containing BLS keypairs.
-pub fn gen_keys(matches: &ArgMatches) {
- let validator_count = value_t!(matches.value_of("validator_count"), usize)
- .expect("Validator count is required argument");
- let output_file = matches
- .value_of("output_file")
- .expect("Output file has a default value.");
-
- let keypairs = generate_deterministic_keypairs(validator_count);
-
- debug!("Writing keypairs to file...");
-
- let keypairs_path = Path::new(output_file);
-
- keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap();
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/lib.rs b/beacon_node/beacon_chain/test_harness/src/lib.rs
deleted file mode 100644
index 0703fd4a5..000000000
--- a/beacon_node/beacon_chain/test_harness/src/lib.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-//! Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
-//!
-//! This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
-//! directly to the `BeaconChain` via an `Arc`.
-//!
-//! The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
-//! instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
-//! producing blocks and attestations.
-//!
-//! Example:
-//! ```
-//! use test_harness::BeaconChainHarness;
-//! use types::ChainSpec;
-//!
-//! let validator_count = 8;
-//! let spec = ChainSpec::few_validators();
-//!
-//! let mut harness = BeaconChainHarness::new(spec, validator_count);
-//!
-//! harness.advance_chain_with_block();
-//!
-//! let chain = harness.chain_dump().unwrap();
-//!
-//! // One block should have been built on top of the genesis block.
-//! assert_eq!(chain.len(), 2);
-//! ```
-
-mod beacon_chain_harness;
-pub mod test_case;
-mod validator_harness;
-
-pub use self::beacon_chain_harness::BeaconChainHarness;
-pub use self::validator_harness::ValidatorHarness;
diff --git a/beacon_node/beacon_chain/test_harness/src/run_test.rs b/beacon_node/beacon_chain/test_harness/src/run_test.rs
deleted file mode 100644
index 4caa299d6..000000000
--- a/beacon_node/beacon_chain/test_harness/src/run_test.rs
+++ /dev/null
@@ -1,37 +0,0 @@
-use crate::test_case::TestCase;
-use clap::ArgMatches;
-use std::{fs::File, io::prelude::*};
-use yaml_rust::YamlLoader;
-
-/// Runs a YAML-specified test case.
-pub fn run_test(matches: &ArgMatches) {
- if let Some(yaml_file) = matches.value_of("yaml") {
- let docs = {
- let mut file = File::open(yaml_file).unwrap();
-
- let mut yaml_str = String::new();
- file.read_to_string(&mut yaml_str).unwrap();
-
- YamlLoader::load_from_str(&yaml_str).unwrap()
- };
-
- for doc in &docs {
- // For each `test_cases` YAML in the document, build a `TestCase`, execute it and
- // assert that the execution result matches the test_case description.
- //
- // In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
- // and a new `BeaconChain` is built as per the test_case.
- //
- // After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
- // and states in the chain is obtained and checked against the `results` specified in
- // the `test_case`.
- //
- // If any of the expectations in the results are not met, the process
- // panics with a message.
- for test_case in doc["test_cases"].as_vec().unwrap() {
- let test_case = TestCase::from_yaml(test_case);
- test_case.assert_result_valid(test_case.execute())
- }
- }
- }
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs
deleted file mode 100644
index f65b45505..000000000
--- a/beacon_node/beacon_chain/test_harness/src/test_case.rs
+++ /dev/null
@@ -1,312 +0,0 @@
-//! Defines execution and testing specs for a `BeaconChainHarness` instance. Supports loading from
-//! a YAML file.
-
-use crate::beacon_chain_harness::BeaconChainHarness;
-use beacon_chain::CheckPoint;
-use log::{info, warn};
-use ssz::SignedRoot;
-use types::*;
-
-use types::test_utils::*;
-use yaml_rust::Yaml;
-
-mod config;
-mod results;
-mod state_check;
-mod yaml_helpers;
-
-pub use config::Config;
-pub use results::Results;
-pub use state_check::StateCheck;
-
-/// Defines the execution and testing of a `BeaconChainHarness` instantiation.
-///
-/// Typical workflow is:
-///
-/// 1. Instantiate the `TestCase` from YAML: `let test_case = TestCase::from_yaml(&my_yaml);`
-/// 2. Execute the test_case: `let result = test_case.execute();`
-/// 3. Test the results against the test_case: `test_case.assert_result_valid(result);`
-#[derive(Debug)]
-pub struct TestCase {
- /// Defines the execution.
- pub config: Config,
- /// Defines tests to run against the execution result.
- pub results: Results,
-}
-
-/// The result of executing a `TestCase`.
-///
-pub struct ExecutionResult {
- /// The canonical beacon chain generated from the execution.
- pub chain: Vec,
- /// The spec used for execution.
- pub spec: ChainSpec,
-}
-
-impl TestCase {
- /// Load the test case from a YAML document.
- pub fn from_yaml(test_case: &Yaml) -> Self {
- Self {
- results: Results::from_yaml(&test_case["results"]),
- config: Config::from_yaml(&test_case["config"]),
- }
- }
-
- /// Return a `ChainSpec::foundation()`.
- ///
- /// If specified in `config`, returns it with a modified `slots_per_epoch`.
- fn spec(&self) -> ChainSpec {
- let mut spec = ChainSpec::foundation();
-
- if let Some(n) = self.config.slots_per_epoch {
- spec.slots_per_epoch = n;
- }
-
- if let Some(n) = self.config.persistent_committee_period {
- spec.persistent_committee_period = n;
- }
-
- spec
- }
-
- /// Executes the test case, returning an `ExecutionResult`.
- #[allow(clippy::cyclomatic_complexity)]
- pub fn execute(&self) -> ExecutionResult {
- let spec = self.spec();
- let validator_count = self.config.deposits_for_chain_start;
- let slots = self.config.num_slots;
-
- info!(
- "Building BeaconChainHarness with {} validators...",
- validator_count
- );
-
- let mut harness = BeaconChainHarness::new(spec, validator_count);
-
- info!("Starting simulation across {} slots...", slots);
-
- // Start at 1 because genesis counts as a slot.
- for slot_height in 1..slots {
- // Used to ensure that deposits in the same slot have incremental deposit indices.
- let mut deposit_index_offset = 0;
-
- // Feed deposits to the BeaconChain.
- if let Some(ref deposits) = self.config.deposits {
- for (slot, amount) in deposits {
- if *slot == slot_height {
- info!("Including deposit at slot height {}.", slot_height);
- let (deposit, keypair) =
- build_deposit(&harness, *amount, deposit_index_offset);
- harness.add_deposit(deposit, Some(keypair.clone()));
- deposit_index_offset += 1;
- }
- }
- }
-
- // Feed proposer slashings to the BeaconChain.
- if let Some(ref slashings) = self.config.proposer_slashings {
- for (slot, validator_index) in slashings {
- if *slot == slot_height {
- info!(
- "Including proposer slashing at slot height {} for validator #{}.",
- slot_height, validator_index
- );
- let slashing = build_proposer_slashing(&harness, *validator_index);
- harness.add_proposer_slashing(slashing);
- }
- }
- }
-
- // Feed attester slashings to the BeaconChain.
- if let Some(ref slashings) = self.config.attester_slashings {
- for (slot, validator_indices) in slashings {
- if *slot == slot_height {
- info!(
- "Including attester slashing at slot height {} for validators {:?}.",
- slot_height, validator_indices
- );
- let slashing =
- build_double_vote_attester_slashing(&harness, &validator_indices[..]);
- harness.add_attester_slashing(slashing);
- }
- }
- }
-
- // Feed exits to the BeaconChain.
- if let Some(ref exits) = self.config.exits {
- for (slot, validator_index) in exits {
- if *slot == slot_height {
- info!(
- "Including exit at slot height {} for validator {}.",
- slot_height, validator_index
- );
- let exit = build_exit(&harness, *validator_index);
- harness.add_exit(exit);
- }
- }
- }
-
- // Feed transfers to the BeaconChain.
- if let Some(ref transfers) = self.config.transfers {
- for (slot, from, to, amount) in transfers {
- if *slot == slot_height {
- info!(
- "Including transfer at slot height {} from validator {}.",
- slot_height, from
- );
- let transfer = build_transfer(&harness, *from, *to, *amount);
- harness.add_transfer(transfer);
- }
- }
- }
-
- // Build a block or skip a slot.
- match self.config.skip_slots {
- Some(ref skip_slots) if skip_slots.contains(&slot_height) => {
- warn!("Skipping slot at height {}.", slot_height);
- harness.increment_beacon_chain_slot();
- }
- _ => {
- info!("Producing block at slot height {}.", slot_height);
- harness.advance_chain_with_block();
- }
- }
- }
-
- harness.run_fork_choice();
-
- info!("Test execution complete!");
-
- info!("Building chain dump for analysis...");
-
- ExecutionResult {
- chain: harness.chain_dump().expect("Chain dump failed."),
- spec: (*harness.spec).clone(),
- }
- }
-
- /// Checks that the `ExecutionResult` is consistent with the specifications in `self.results`.
- ///
- /// # Panics
- ///
- /// Panics with a message if any result does not match exepectations.
- pub fn assert_result_valid(&self, execution_result: ExecutionResult) {
- info!("Verifying test results...");
- let spec = &execution_result.spec;
-
- if let Some(num_skipped_slots) = self.results.num_skipped_slots {
- assert_eq!(
- execution_result.chain.len(),
- self.config.num_slots as usize - num_skipped_slots,
- "actual skipped slots != expected."
- );
- info!(
- "OK: Chain length is {} ({} skipped slots).",
- execution_result.chain.len(),
- num_skipped_slots
- );
- }
-
- if let Some(ref state_checks) = self.results.state_checks {
- for checkpoint in &execution_result.chain {
- let state = &checkpoint.beacon_state;
-
- for state_check in state_checks {
- let adjusted_state_slot =
- state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch);
-
- if state_check.slot == adjusted_state_slot {
- state_check.assert_valid(state, spec);
- }
- }
- }
- }
- }
-}
-
-/// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot.
-fn build_transfer(
- harness: &BeaconChainHarness,
- sender: u64,
- recipient: u64,
- amount: u64,
-) -> Transfer {
- let slot = harness.beacon_chain.state.read().slot + 1;
-
- let mut builder = TestingTransferBuilder::new(sender, recipient, amount, slot);
-
- let keypair = harness.validator_keypair(sender as usize).unwrap();
- builder.sign(keypair.clone(), &harness.fork(), &harness.spec);
-
- builder.build()
-}
-
-/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`.
-///
-/// `index_offset` is used to ensure that `deposit.index == state.index` when adding multiple
-/// deposits.
-fn build_deposit(
- harness: &BeaconChainHarness,
- amount: u64,
- index_offset: u64,
-) -> (Deposit, Keypair) {
- let keypair = Keypair::random();
-
- let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount);
- builder.set_index(harness.beacon_chain.state.read().deposit_index + index_offset);
- builder.sign(&keypair, harness.epoch(), &harness.fork(), &harness.spec);
-
- (builder.build(), keypair)
-}
-
-/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`.
-fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> VoluntaryExit {
- let epoch = harness
- .beacon_chain
- .state
- .read()
- .current_epoch(&harness.spec);
-
- let mut exit = VoluntaryExit {
- epoch,
- validator_index,
- signature: Signature::empty_signature(),
- };
-
- let message = exit.signed_root();
-
- exit.signature = harness
- .validator_sign(validator_index as usize, &message[..], epoch, Domain::Exit)
- .expect("Unable to sign VoluntaryExit");
-
- exit
-}
-
-/// Builds an `AttesterSlashing` for some `validator_indices`.
-///
-/// Signs the message using a `BeaconChainHarness`.
-fn build_double_vote_attester_slashing(
- harness: &BeaconChainHarness,
- validator_indices: &[u64],
-) -> AttesterSlashing {
- let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
- harness
- .validator_sign(validator_index as usize, message, epoch, domain)
- .expect("Unable to sign AttesterSlashing")
- };
-
- TestingAttesterSlashingBuilder::double_vote(validator_indices, signer)
-}
-
-/// Builds an `ProposerSlashing` for some `validator_index`.
-///
-/// Signs the message using a `BeaconChainHarness`.
-fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing {
- let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
- harness
- .validator_sign(validator_index as usize, message, epoch, domain)
- .expect("Unable to sign AttesterSlashing")
- };
-
- TestingProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec)
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/config.rs b/beacon_node/beacon_chain/test_harness/src/test_case/config.rs
deleted file mode 100644
index 12d5da2d7..000000000
--- a/beacon_node/beacon_chain/test_harness/src/test_case/config.rs
+++ /dev/null
@@ -1,135 +0,0 @@
-use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
-use types::*;
-use yaml_rust::Yaml;
-
-pub type ValidatorIndex = u64;
-pub type ValidatorIndices = Vec;
-pub type GweiAmount = u64;
-
-pub type DepositTuple = (SlotHeight, GweiAmount);
-pub type ExitTuple = (SlotHeight, ValidatorIndex);
-pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex);
-pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices);
-/// (slot_height, from, to, amount)
-pub type TransferTuple = (SlotHeight, ValidatorIndex, ValidatorIndex, GweiAmount);
-
-/// Defines the execution of a `BeaconStateHarness` across a series of slots.
-#[derive(Debug)]
-pub struct Config {
- /// Initial validators.
- pub deposits_for_chain_start: usize,
- /// Number of slots in an epoch.
- pub slots_per_epoch: Option,
- /// Affects the number of epochs a validator must be active before they can withdraw.
- pub persistent_committee_period: Option,
- /// Number of slots to build before ending execution.
- pub num_slots: u64,
- /// Number of slots that should be skipped due to inactive validator.
- pub skip_slots: Option>,
- /// Deposits to be included during execution.
- pub deposits: Option>,
- /// Proposer slashings to be included during execution.
- pub proposer_slashings: Option>,
- /// Attester slashings to be including during execution.
- pub attester_slashings: Option>,
- /// Exits to be including during execution.
- pub exits: Option>,
- /// Transfers to be including during execution.
- pub transfers: Option>,
-}
-
-impl Config {
- /// Load from a YAML document.
- ///
- /// Expects to receive the `config` section of the document.
- pub fn from_yaml(yaml: &Yaml) -> Self {
- Self {
- deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start")
- .expect("Must specify validator count"),
- slots_per_epoch: as_u64(&yaml, "slots_per_epoch"),
- persistent_committee_period: as_u64(&yaml, "persistent_committee_period"),
- num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"),
- skip_slots: as_vec_u64(yaml, "skip_slots"),
- deposits: parse_deposits(&yaml),
- proposer_slashings: parse_proposer_slashings(&yaml),
- attester_slashings: parse_attester_slashings(&yaml),
- exits: parse_exits(&yaml),
- transfers: parse_transfers(&yaml),
- }
- }
-}
-
-/// Parse the `transfers` section of the YAML document.
-fn parse_transfers(yaml: &Yaml) -> Option> {
- let mut tuples = vec![];
-
- for exit in yaml["transfers"].as_vec()? {
- let slot = as_u64(exit, "slot").expect("Incomplete transfer (slot)");
- let from = as_u64(exit, "from").expect("Incomplete transfer (from)");
- let to = as_u64(exit, "to").expect("Incomplete transfer (to)");
- let amount = as_u64(exit, "amount").expect("Incomplete transfer (amount)");
-
- tuples.push((SlotHeight::from(slot), from, to, amount));
- }
-
- Some(tuples)
-}
-
-/// Parse the `attester_slashings` section of the YAML document.
-fn parse_exits(yaml: &Yaml) -> Option> {
- let mut tuples = vec![];
-
- for exit in yaml["exits"].as_vec()? {
- let slot = as_u64(exit, "slot").expect("Incomplete exit (slot)");
- let validator_index =
- as_u64(exit, "validator_index").expect("Incomplete exit (validator_index)");
-
- tuples.push((SlotHeight::from(slot), validator_index));
- }
-
- Some(tuples)
-}
-
-/// Parse the `attester_slashings` section of the YAML document.
-fn parse_attester_slashings(yaml: &Yaml) -> Option> {
- let mut slashings = vec![];
-
- for slashing in yaml["attester_slashings"].as_vec()? {
- let slot = as_u64(slashing, "slot").expect("Incomplete attester_slashing (slot)");
- let validator_indices = as_vec_u64(slashing, "validator_indices")
- .expect("Incomplete attester_slashing (validator_indices)");
-
- slashings.push((SlotHeight::from(slot), validator_indices));
- }
-
- Some(slashings)
-}
-
-/// Parse the `proposer_slashings` section of the YAML document.
-fn parse_proposer_slashings(yaml: &Yaml) -> Option> {
- let mut slashings = vec![];
-
- for slashing in yaml["proposer_slashings"].as_vec()? {
- let slot = as_u64(slashing, "slot").expect("Incomplete proposer slashing (slot)_");
- let validator_index = as_u64(slashing, "validator_index")
- .expect("Incomplete proposer slashing (validator_index)");
-
- slashings.push((SlotHeight::from(slot), validator_index));
- }
-
- Some(slashings)
-}
-
-/// Parse the `deposits` section of the YAML document.
-fn parse_deposits(yaml: &Yaml) -> Option> {
- let mut deposits = vec![];
-
- for deposit in yaml["deposits"].as_vec()? {
- let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)");
- let amount = as_u64(deposit, "amount").expect("Incomplete deposit (amount)");
-
- deposits.push((SlotHeight::from(slot), amount))
- }
-
- Some(deposits)
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/results.rs b/beacon_node/beacon_chain/test_harness/src/test_case/results.rs
deleted file mode 100644
index 596418c0f..000000000
--- a/beacon_node/beacon_chain/test_harness/src/test_case/results.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-use super::state_check::StateCheck;
-use super::yaml_helpers::as_usize;
-use yaml_rust::Yaml;
-
-/// A series of tests to be carried out upon an `ExecutionResult`, returned from executing a
-/// `TestCase`.
-#[derive(Debug)]
-pub struct Results {
- pub num_skipped_slots: Option,
- pub state_checks: Option>,
-}
-
-impl Results {
- /// Load from a YAML document.
- ///
- /// Expects the `results` section of the YAML document.
- pub fn from_yaml(yaml: &Yaml) -> Self {
- Self {
- num_skipped_slots: as_usize(yaml, "num_skipped_slots"),
- state_checks: parse_state_checks(yaml),
- }
- }
-}
-
-/// Parse the `state_checks` section of the YAML document.
-fn parse_state_checks(yaml: &Yaml) -> Option> {
- let mut states = vec![];
-
- for state_yaml in yaml["states"].as_vec()? {
- states.push(StateCheck::from_yaml(state_yaml));
- }
-
- Some(states)
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs b/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs
deleted file mode 100644
index c6bdf8978..000000000
--- a/beacon_node/beacon_chain/test_harness/src/test_case/state_check.rs
+++ /dev/null
@@ -1,206 +0,0 @@
-use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
-use log::info;
-use types::*;
-use yaml_rust::Yaml;
-
-type ValidatorIndex = u64;
-type BalanceGwei = u64;
-
-type BalanceCheckTuple = (ValidatorIndex, String, BalanceGwei);
-
-/// Tests to be conducted upon a `BeaconState` object generated during the execution of a
-/// `TestCase`.
-#[derive(Debug)]
-pub struct StateCheck {
- /// Checked against `beacon_state.slot`.
- pub slot: Slot,
- /// Checked against `beacon_state.validator_registry.len()`.
- pub num_validators: Option,
- /// The number of pending attestations from the previous epoch that should be in the state.
- pub num_previous_epoch_attestations: Option,
- /// The number of pending attestations from the current epoch that should be in the state.
- pub num_current_epoch_attestations: Option,
- /// A list of validator indices which have been penalized. Must be in ascending order.
- pub slashed_validators: Option>,
- /// A list of validator indices which have been fully exited. Must be in ascending order.
- pub exited_validators: Option>,
- /// A list of validator indices which have had an exit initiated. Must be in ascending order.
- pub exit_initiated_validators: Option>,
- /// A list of balances to check.
- pub balances: Option>,
-}
-
-impl StateCheck {
- /// Load from a YAML document.
- ///
- /// Expects the `state_check` section of the YAML document.
- pub fn from_yaml(yaml: &Yaml) -> Self {
- Self {
- slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")),
- num_validators: as_usize(&yaml, "num_validators"),
- num_previous_epoch_attestations: as_usize(&yaml, "num_previous_epoch_attestations"),
- num_current_epoch_attestations: as_usize(&yaml, "num_current_epoch_attestations"),
- slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
- exited_validators: as_vec_u64(&yaml, "exited_validators"),
- exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
- balances: parse_balances(&yaml),
- }
- }
-
- /// Performs all checks against a `BeaconState`
- ///
- /// # Panics
- ///
- /// Panics with an error message if any test fails.
- #[allow(clippy::cyclomatic_complexity)]
- pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
- let state_epoch = state.slot.epoch(spec.slots_per_epoch);
-
- info!("Running state check for slot height {}.", self.slot);
-
- // Check the state slot.
- assert_eq!(
- self.slot,
- state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch),
- "State slot is invalid."
- );
-
- // Check the validator count
- if let Some(num_validators) = self.num_validators {
- assert_eq!(
- state.validator_registry.len(),
- num_validators,
- "State validator count != expected."
- );
- info!("OK: num_validators = {}.", num_validators);
- }
-
- // Check the previous epoch attestations
- if let Some(n) = self.num_previous_epoch_attestations {
- assert_eq!(
- state.previous_epoch_attestations.len(),
- n,
- "previous epoch attestations count != expected."
- );
- info!("OK: num_previous_epoch_attestations = {}.", n);
- }
-
- // Check the current epoch attestations
- if let Some(n) = self.num_current_epoch_attestations {
- assert_eq!(
- state.current_epoch_attestations.len(),
- n,
- "current epoch attestations count != expected."
- );
- info!("OK: num_current_epoch_attestations = {}.", n);
- }
-
- // Check for slashed validators.
- if let Some(ref slashed_validators) = self.slashed_validators {
- let actually_slashed_validators: Vec = state
- .validator_registry
- .iter()
- .enumerate()
- .filter_map(|(i, validator)| {
- if validator.slashed {
- Some(i as u64)
- } else {
- None
- }
- })
- .collect();
- assert_eq!(
- actually_slashed_validators, *slashed_validators,
- "Slashed validators != expected."
- );
- info!("OK: slashed_validators = {:?}.", slashed_validators);
- }
-
- // Check for exited validators.
- if let Some(ref exited_validators) = self.exited_validators {
- let actually_exited_validators: Vec = state
- .validator_registry
- .iter()
- .enumerate()
- .filter_map(|(i, validator)| {
- if validator.is_exited_at(state_epoch) {
- Some(i as u64)
- } else {
- None
- }
- })
- .collect();
- assert_eq!(
- actually_exited_validators, *exited_validators,
- "Exited validators != expected."
- );
- info!("OK: exited_validators = {:?}.", exited_validators);
- }
-
- // Check for validators that have initiated exit.
- if let Some(ref exit_initiated_validators) = self.exit_initiated_validators {
- let actual: Vec = state
- .validator_registry
- .iter()
- .enumerate()
- .filter_map(|(i, validator)| {
- if validator.initiated_exit {
- Some(i as u64)
- } else {
- None
- }
- })
- .collect();
- assert_eq!(
- actual, *exit_initiated_validators,
- "Exit initiated validators != expected."
- );
- info!(
- "OK: exit_initiated_validators = {:?}.",
- exit_initiated_validators
- );
- }
-
- // Check validator balances.
- if let Some(ref balances) = self.balances {
- for (index, comparison, expected) in balances {
- let actual = *state
- .validator_balances
- .get(*index as usize)
- .expect("Balance check specifies unknown validator");
-
- let result = match comparison.as_ref() {
- "eq" => actual == *expected,
- _ => panic!("Unknown balance comparison (use `eq`)"),
- };
- assert!(
- result,
- format!(
- "Validator balance for {}: {} !{} {}.",
- index, actual, comparison, expected
- )
- );
- info!("OK: validator balance for {:?}.", index);
- }
- }
- }
-}
-
-/// Parse the `transfers` section of the YAML document.
-fn parse_balances(yaml: &Yaml) -> Option> {
- let mut tuples = vec![];
-
- for exit in yaml["balances"].as_vec()? {
- let from =
- as_u64(exit, "validator_index").expect("Incomplete balance check (validator_index)");
- let comparison = exit["comparison"]
- .clone()
- .into_string()
- .expect("Incomplete balance check (amount)");
- let balance = as_u64(exit, "balance").expect("Incomplete balance check (balance)");
-
- tuples.push((from, comparison, balance));
- }
-
- Some(tuples)
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/test_case/yaml_helpers.rs b/beacon_node/beacon_chain/test_harness/src/test_case/yaml_helpers.rs
deleted file mode 100644
index c499b3c0f..000000000
--- a/beacon_node/beacon_chain/test_harness/src/test_case/yaml_helpers.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-use yaml_rust::Yaml;
-
-pub fn as_usize(yaml: &Yaml, key: &str) -> Option {
- yaml[key].as_i64().and_then(|n| Some(n as usize))
-}
-
-pub fn as_u64(yaml: &Yaml, key: &str) -> Option {
- yaml[key].as_i64().and_then(|n| Some(n as u64))
-}
-
-pub fn as_vec_u64(yaml: &Yaml, key: &str) -> Option> {
- yaml[key].clone().into_vec().and_then(|vec| {
- Some(
- vec.iter()
- .map(|item| item.as_i64().unwrap() as u64)
- .collect(),
- )
- })
-}
diff --git a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs b/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs
deleted file mode 100644
index d47fd44b9..000000000
--- a/beacon_node/beacon_chain/test_harness/src/validator_harness/direct_beacon_node.rs
+++ /dev/null
@@ -1,100 +0,0 @@
-use attester::{
- BeaconNode as AttesterBeaconNode, BeaconNodeError as NodeError,
- PublishOutcome as AttestationPublishOutcome,
-};
-use beacon_chain::BeaconChain;
-use block_proposer::{
- BeaconNode as BeaconBlockNode, BeaconNodeError as BeaconBlockNodeError,
- PublishOutcome as BlockPublishOutcome,
-};
-use db::ClientDB;
-use fork_choice::ForkChoice;
-use parking_lot::RwLock;
-use slot_clock::SlotClock;
-use std::sync::Arc;
-use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot};
-
-/// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit
-/// blocks/attestations.
-///
-/// `BeaconBlock`s and `FreeAttestation`s are not actually published to the `BeaconChain`, instead
-/// they are stored inside this struct. This is to allow one to benchmark the submission of the
-/// block/attestation directly, or modify it before submission.
-pub struct DirectBeaconNode {
- beacon_chain: Arc>,
- published_blocks: RwLock>,
- published_attestations: RwLock>,
-}
-
-impl DirectBeaconNode {
- pub fn new(beacon_chain: Arc>) -> Self {
- Self {
- beacon_chain,
- published_blocks: RwLock::new(vec![]),
- published_attestations: RwLock::new(vec![]),
- }
- }
-
- /// Get the last published block (if any).
- pub fn last_published_block(&self) -> Option {
- Some(self.published_blocks.read().last()?.clone())
- }
-}
-
-impl AttesterBeaconNode for DirectBeaconNode {
- fn produce_attestation_data(
- &self,
- _slot: Slot,
- shard: u64,
- ) -> Result