Remove attester, block_producer & test_harness

This commit is contained in:
Paul Hauner 2019-05-09 11:48:14 +10:00
parent 4c0c93f0c9
commit 49c92ef167
No known key found for this signature in database
GPG Key ID: D362883A9218FCC6
37 changed files with 0 additions and 3391 deletions

View File

@ -1,7 +1,5 @@
[workspace]
members = [
"eth2/attester",
"eth2/block_proposer",
"eth2/fork_choice",
"eth2/operation_pool",
"eth2/state_processing",
@ -32,7 +30,6 @@ members = [
"beacon_node/rpc",
"beacon_node/version",
"beacon_node/beacon_chain",
"beacon_node/beacon_chain/test_harness",
"protos",
"validator_client",
"account_manager",

View File

@ -5,7 +5,6 @@ authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com
edition = "2018"
[dependencies]
block_proposer = { path = "../../eth2/block_proposer" }
bls = { path = "../../eth2/utils/bls" }
boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" }
db = { path = "../db" }

View File

@ -1,43 +0,0 @@
[package]
name = "test_harness"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[[bin]]
name = "test_harness"
path = "src/bin.rs"
[lib]
name = "test_harness"
path = "src/lib.rs"
[dev-dependencies]
state_processing = { path = "../../../eth2/state_processing" }
[dependencies]
attester = { path = "../../../eth2/attester" }
beacon_chain = { path = "../../beacon_chain" }
block_proposer = { path = "../../../eth2/block_proposer" }
bls = { path = "../../../eth2/utils/bls" }
boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" }
clap = "2.32.0"
db = { path = "../../db" }
parking_lot = "0.7"
failure = "0.1"
failure_derive = "0.1"
fork_choice = { path = "../../../eth2/fork_choice" }
hashing = { path = "../../../eth2/utils/hashing" }
int_to_bytes = { path = "../../../eth2/utils/int_to_bytes" }
log = "0.4"
env_logger = "0.6.0"
rayon = "1.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
serde_yaml = "0.8"
slot_clock = { path = "../../../eth2/utils/slot_clock" }
ssz = { path = "../../../eth2/utils/ssz" }
tree_hash = { path = "../../../eth2/utils/tree_hash" }
types = { path = "../../../eth2/types" }
yaml-rust = "0.4.2"

View File

@ -1,150 +0,0 @@
# Test Harness
Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
directly to the `BeaconChain` via an `Arc`.
The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
producing blocks and attestations.
The crate consists of a library and binary, examples for using both are
described below.
## YAML
Both the library and the binary are capable of parsing tests from a YAML file,
in fact this is the sole purpose of the binary.
You can find YAML test cases [here](specs/). An example is included below:
```yaml
title: Validator Registry Tests
summary: Tests deposit and slashing effects on validator registry.
test_suite: validator_registry
fork: tchaikovsky
version: 1.0
test_cases:
- config:
slots_per_epoch: 64
deposits_for_chain_start: 1000
num_slots: 64
skip_slots: [2, 3]
deposits:
# At slot 1, create a new validator deposit of 32 ETH.
- slot: 1
amount: 32
# Trigger more deposits...
- slot: 3
amount: 32
- slot: 5
amount: 32
proposer_slashings:
# At slot 2, trigger a proposer slashing for validator #42.
- slot: 2
validator_index: 42
# Trigger another slashing...
- slot: 8
validator_index: 13
attester_slashings:
# At slot 2, trigger an attester slashing for validators #11 and #12.
- slot: 2
validator_indices: [11, 12]
# Trigger another slashing...
- slot: 5
validator_indices: [14]
results:
num_skipped_slots: 2
states:
- slot: 63
num_validators: 1003
slashed_validators: [11, 12, 13, 14, 42]
exited_validators: []
```
Thanks to [prsym](http://github.com/prysmaticlabs/prysm) for coming up with the
base YAML format.
### Notes
Wherever `slot` is used, it is actually the "slot height", or slots since
genesis. This allows the tests to disregard the `GENESIS_EPOCH`.
### Differences from Prysmatic's format
1. The detail for `deposits`, `proposer_slashings` and `attester_slashings` is
ommitted from the test specification. It assumed they should be valid
objects.
2. There is a `states` list in `results` that runs checks against any state
specified by a `slot` number. This is in contrast to the variables in
`results` that assume the last (highest) state should be inspected.
#### Reasoning
Respective reasonings for above changes:
1. This removes the concerns of the actual object structure from the tests.
This allows for more variation in the deposits/slashings objects without
needing to update the tests. Also, it makes it makes it easier to create
tests.
2. This gives more fine-grained control over the tests. It allows for checking
that certain events happened at certain times whilst making the tests only
slightly more verbose.
_Notes: it may be useful to add an extra field to each slashing type to
indicate if it should be valid or not. It also may be useful to add an option
for double-vote/surround-vote attester slashings. The `amount` field was left
on `deposits` as it changes the behaviour of state significantly._
## Binary Usage Example
Follow these steps to run as a binary:
1. Navigate to the root of this crate (where this readme is located)
2. Run `$ cargo run --release -- --yaml examples/validator_registry.yaml`
_Note: the `--release` flag builds the binary without all the debugging
instrumentation. The test is much faster built using `--release`. As is
customary in cargo, the flags before `--` are passed to cargo and the flags
after are passed to the binary._
### CLI Options
```
Lighthouse Test Harness Runner 0.0.1
Sigma Prime <contact@sigmaprime.io>
Runs `test_harness` using a YAML test_case.
USAGE:
test_harness --log-level <LOG_LEVEL> --yaml <FILE>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
--log-level <LOG_LEVEL> Logging level. [default: debug] [possible values: error, warn, info, debug, trace]
--yaml <FILE> YAML file test_case.
```
## Library Usage Example
```rust
use test_harness::BeaconChainHarness;
use types::ChainSpec;
let validator_count = 8;
let spec = ChainSpec::few_validators();
let mut harness = BeaconChainHarness::new(spec, validator_count);
harness.advance_chain_with_block();
let chain = harness.chain_dump().unwrap();
// One block should have been built on top of the genesis block.
assert_eq!(chain.len(), 2);
```

View File

@ -1,63 +0,0 @@
title: Validator Registry Tests
summary: Tests deposit and slashing effects on validator registry.
test_suite: validator_registry
fork: tchaikovsky
version: 1.0
test_cases:
- config:
slots_per_epoch: 64
deposits_for_chain_start: 1000
num_slots: 64
skip_slots: [2, 3]
persistent_committee_period: 0
deposits:
# At slot 1, create a new validator deposit of 5 ETH.
- slot: 1
amount: 5000000000
# Trigger more deposits...
- slot: 3
amount: 5000000000
- slot: 5
amount: 32000000000
exits:
# At slot 10, submit an exit for validator #50.
- slot: 10
validator_index: 50
transfers:
- slot: 6
from: 1000
to: 1001
amount: 5000000000
proposer_slashings:
# At slot 2, trigger a proposer slashing for validator #42.
- slot: 2
validator_index: 42
# Trigger another slashing...
- slot: 8
validator_index: 13
attester_slashings:
# At slot 2, trigger an attester slashing for validators #11 and #12.
- slot: 2
validator_indices: [11, 12]
# Trigger another slashing...
- slot: 5
validator_indices: [14]
results:
num_skipped_slots: 2
states:
- slot: 63
num_validators: 1003
num_previous_epoch_attestations: 0
# slots_per_epoch - attestation_inclusion_delay - skip_slots
num_current_epoch_attestations: 57
slashed_validators: [11, 12, 13, 14, 42]
exited_validators: []
exit_initiated_validators: [50]
balances:
- validator_index: 1000
comparison: "eq"
balance: 0
- validator_index: 1001
comparison: "eq"
balance: 10000000000

View File

@ -1,350 +0,0 @@
use super::ValidatorHarness;
use beacon_chain::{BeaconChain, BlockProcessingOutcome};
pub use beacon_chain::{BeaconChainError, CheckPoint};
use db::{
stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB,
};
use fork_choice::BitwiseLMDGhost;
use log::debug;
use rayon::prelude::*;
use slot_clock::TestingSlotClock;
use std::sync::Arc;
use tree_hash::TreeHash;
use types::{test_utils::TestingBeaconStateBuilder, *};
type TestingBeaconChain = BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>;
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
/// to it. Each validator is provided a borrow to the beacon chain, where it may read
/// information and submit blocks/attestations for processing.
///
/// This test harness is useful for testing validator and internal state transition logic. It
/// is not useful for testing that multiple beacon nodes can reach consensus.
pub struct BeaconChainHarness {
pub db: Arc<MemoryDB>,
pub beacon_chain: Arc<TestingBeaconChain>,
pub block_store: Arc<BeaconBlockStore<MemoryDB>>,
pub state_store: Arc<BeaconStateStore<MemoryDB>>,
pub validators: Vec<ValidatorHarness>,
pub spec: Arc<ChainSpec>,
}
impl BeaconChainHarness {
/// Create a new harness with:
///
/// - A keypair, `BlockProducer` and `Attester` for each validator.
/// - A new BeaconChain struct where the given validators are in the genesis.
pub fn new(spec: ChainSpec, validator_count: usize) -> Self {
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
Self::from_beacon_state_builder(state_builder, spec)
}
pub fn from_beacon_state_builder(
state_builder: TestingBeaconStateBuilder,
spec: ChainSpec,
) -> Self {
let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
let (mut genesis_state, keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
genesis_state
.build_epoch_cache(RelativeEpoch::Previous, &spec)
.unwrap();
genesis_state
.build_epoch_cache(RelativeEpoch::Current, &spec)
.unwrap();
genesis_state
.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)
.unwrap();
genesis_state
.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)
.unwrap();
// Create the Beacon Chain
let beacon_chain = Arc::new(
BeaconChain::from_genesis(
state_store.clone(),
block_store.clone(),
slot_clock,
genesis_state,
genesis_block,
spec.clone(),
fork_choice,
)
.unwrap(),
);
let spec = Arc::new(spec);
debug!("Creating validator producer and attester instances...");
// Spawn the test validator instances.
let validators: Vec<ValidatorHarness> = keypairs
.iter()
.map(|keypair| {
ValidatorHarness::new(keypair.clone(), beacon_chain.clone(), spec.clone())
})
.collect();
debug!("Created {} ValidatorHarnesss", validators.len());
Self {
db,
beacon_chain,
block_store,
state_store,
validators,
spec,
}
}
/// Move the `slot_clock` for the `BeaconChain` forward one slot.
///
/// This is the equivalent of advancing a system clock forward one `SLOT_DURATION`.
///
/// Returns the new slot.
pub fn increment_beacon_chain_slot(&mut self) -> Slot {
let slot = self.beacon_chain.present_slot() + 1;
let nth_slot = slot
- slot
.epoch(self.spec.slots_per_epoch)
.start_slot(self.spec.slots_per_epoch);
let nth_epoch = slot.epoch(self.spec.slots_per_epoch) - self.spec.genesis_epoch;
debug!(
"Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
slot,
slot.epoch(self.spec.slots_per_epoch),
nth_epoch,
nth_slot
);
self.beacon_chain.slot_clock.set_slot(slot.as_u64());
self.beacon_chain
.catchup_state()
.expect("Failed to catch state");
slot
}
pub fn gather_attesations(&mut self) -> Vec<Attestation> {
let present_slot = self.beacon_chain.present_slot();
let state = self.beacon_chain.state.read();
let mut attestations = vec![];
for committee in state
.get_crosslink_committees_at_slot(present_slot, &self.spec)
.unwrap()
{
for &validator in &committee.committee {
let duties = state
.get_attestation_duties(validator, &self.spec)
.unwrap()
.expect("Attesting validators by definition have duties");
// Obtain `AttestationData` from the beacon chain.
let data = self
.beacon_chain
.produce_attestation_data(duties.shard)
.unwrap();
// Produce an aggregate signature with a single signature.
let aggregate_signature = {
let message = AttestationDataAndCustodyBit {
data: data.clone(),
custody_bit: false,
}
.tree_hash_root();
let domain = self.spec.get_domain(
state.slot.epoch(self.spec.slots_per_epoch),
Domain::Attestation,
&state.fork,
);
let sig =
Signature::new(&message, domain, &self.validators[validator].keypair.sk);
let mut agg_sig = AggregateSignature::new();
agg_sig.add(&sig);
agg_sig
};
let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len);
let custody_bitfield = Bitfield::with_capacity(duties.committee_len);
aggregation_bitfield.set(duties.committee_index, true);
attestations.push(Attestation {
aggregation_bitfield,
data,
custody_bitfield,
aggregate_signature,
})
}
}
attestations
}
/// Get the block from the proposer for the slot.
///
/// Note: the validator will only produce it _once per slot_. So, if you call this twice you'll
/// only get a block once.
pub fn produce_block(&mut self) -> BeaconBlock {
let present_slot = self.beacon_chain.present_slot();
let proposer = self.beacon_chain.block_proposer(present_slot).unwrap();
debug!(
"Producing block from validator #{} for slot {}.",
proposer, present_slot
);
// Ensure the validators slot clock is accurate.
self.validators[proposer].set_slot(present_slot);
self.validators[proposer].produce_block().unwrap()
}
/// Advances the chain with a BeaconBlock and attestations from all validators.
///
/// This is the ideal scenario for the Beacon Chain, 100% honest participation from
/// validators.
pub fn advance_chain_with_block(&mut self) -> BeaconBlock {
self.increment_beacon_chain_slot();
// Produce a new block.
let block = self.produce_block();
debug!("Submitting block for processing...");
match self.beacon_chain.process_block(block.clone()) {
Ok(BlockProcessingOutcome::ValidBlock(_)) => {}
other => panic!("block processing failed with {:?}", other),
};
debug!("...block processed by BeaconChain.");
debug!("Producing attestations...");
// Produce new attestations.
let attestations = self.gather_attesations();
debug!("Processing {} attestations...", attestations.len());
attestations
.par_iter()
.enumerate()
.for_each(|(i, attestation)| {
self.beacon_chain
.process_attestation(attestation.clone())
.unwrap_or_else(|_| panic!("Attestation {} invalid: {:?}", i, attestation));
});
debug!("Attestations processed.");
block
}
/// Signs a message using some validators secret key with the `Fork` info from the latest state
/// of the `BeaconChain`.
///
/// Useful for producing slashable messages and other objects that `BeaconChainHarness` does
/// not produce naturally.
pub fn validator_sign(
&self,
validator_index: usize,
message: &[u8],
epoch: Epoch,
domain_type: Domain,
) -> Option<Signature> {
let validator = self.validators.get(validator_index)?;
let domain = self
.spec
.get_domain(epoch, domain_type, &self.beacon_chain.state.read().fork);
Some(Signature::new(message, domain, &validator.keypair.sk))
}
/// Returns the current `Fork` of the `beacon_chain`.
pub fn fork(&self) -> Fork {
self.beacon_chain.state.read().fork.clone()
}
/// Returns the current `epoch` of the `beacon_chain`.
pub fn epoch(&self) -> Epoch {
self.beacon_chain
.state
.read()
.slot
.epoch(self.spec.slots_per_epoch)
}
/// Returns the keypair for some validator index.
pub fn validator_keypair(&self, validator_index: usize) -> Option<&Keypair> {
self.validators
.get(validator_index)
.and_then(|v| Some(&v.keypair))
}
/// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new
/// `ValidatorHarness` instance for this validator.
///
/// If a new `ValidatorHarness` was created, the validator should become fully operational as
/// if the validator were created during `BeaconChainHarness` instantiation.
pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) {
self.beacon_chain.process_deposit(deposit).unwrap();
// If a keypair is present, add a new `ValidatorHarness` to the rig.
if let Some(keypair) = keypair {
let validator =
ValidatorHarness::new(keypair, self.beacon_chain.clone(), self.spec.clone());
self.validators.push(validator);
}
}
/// Submit an exit to the `BeaconChain` for inclusion in some block.
///
/// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it
/// will stop receiving duties from the beacon chain and just do nothing when prompted to
/// produce/attest.
pub fn add_exit(&mut self, exit: VoluntaryExit) {
self.beacon_chain.process_voluntary_exit(exit).unwrap();
}
/// Submit an transfer to the `BeaconChain` for inclusion in some block.
pub fn add_transfer(&mut self, transfer: Transfer) {
self.beacon_chain.process_transfer(transfer).unwrap();
}
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
self.beacon_chain
.process_proposer_slashing(proposer_slashing)
.unwrap();
}
/// Submit an attester slashing to the `BeaconChain` for inclusion in some block.
pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) {
self.beacon_chain
.process_attester_slashing(attester_slashing)
.unwrap();
}
/// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head.
pub fn run_fork_choice(&mut self) {
self.beacon_chain.fork_choice().unwrap()
}
/// Dump all blocks and states from the canonical beacon chain.
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, BeaconChainError> {
self.beacon_chain.chain_dump()
}
}

View File

@ -1,102 +0,0 @@
use clap::{App, Arg, SubCommand};
use env_logger::{Builder, Env};
use gen_keys::gen_keys;
use run_test::run_test;
use std::fs;
use types::test_utils::keypairs_path;
use types::ChainSpec;
mod beacon_chain_harness;
mod gen_keys;
mod run_test;
mod test_case;
mod validator_harness;
use validator_harness::ValidatorHarness;
fn main() {
let validator_file_path = keypairs_path();
let _ = fs::create_dir(validator_file_path.parent().unwrap());
let matches = App::new("Lighthouse Test Harness Runner")
.version("0.0.1")
.author("Sigma Prime <contact@sigmaprime.io>")
.about("Runs `test_harness` using a YAML test_case.")
.arg(
Arg::with_name("log")
.long("log-level")
.short("l")
.value_name("LOG_LEVEL")
.help("Logging level.")
.possible_values(&["error", "warn", "info", "debug", "trace"])
.default_value("debug")
.required(true),
)
.arg(
Arg::with_name("spec")
.long("spec")
.short("s")
.value_name("SPECIFICATION")
.help("ChainSpec instantiation.")
.possible_values(&["foundation", "few_validators"])
.default_value("foundation"),
)
.subcommand(
SubCommand::with_name("run_test")
.about("Executes a YAML test specification")
.arg(
Arg::with_name("yaml")
.long("yaml")
.value_name("FILE")
.help("YAML file test_case.")
.required(true),
)
.arg(
Arg::with_name("validators_dir")
.long("validators-dir")
.short("v")
.value_name("VALIDATORS_DIR")
.help("A directory with validator deposits and keypair YAML."),
),
)
.subcommand(
SubCommand::with_name("gen_keys")
.about("Builds a file of BLS keypairs for faster tests.")
.arg(
Arg::with_name("validator_count")
.long("validator_count")
.short("n")
.value_name("VALIDATOR_COUNT")
.help("Number of validators to generate.")
.required(true),
)
.arg(
Arg::with_name("output_file")
.long("output_file")
.short("d")
.value_name("GENESIS_TIME")
.help("Output directory for generated YAML.")
.default_value(validator_file_path.to_str().unwrap()),
),
)
.get_matches();
if let Some(log_level) = matches.value_of("log") {
Builder::from_env(Env::default().default_filter_or(log_level)).init();
}
let _spec = match matches.value_of("spec") {
Some("foundation") => ChainSpec::foundation(),
Some("few_validators") => ChainSpec::few_validators(),
_ => unreachable!(), // Has a default value, should always exist.
};
if let Some(matches) = matches.subcommand_matches("run_test") {
run_test(matches);
}
if let Some(matches) = matches.subcommand_matches("gen_keys") {
gen_keys(matches);
}
}

View File

@ -1,21 +0,0 @@
use clap::{value_t, ArgMatches};
use log::debug;
use std::path::Path;
use types::test_utils::{generate_deterministic_keypairs, KeypairsFile};
/// Creates a file containing BLS keypairs.
pub fn gen_keys(matches: &ArgMatches) {
let validator_count = value_t!(matches.value_of("validator_count"), usize)
.expect("Validator count is required argument");
let output_file = matches
.value_of("output_file")
.expect("Output file has a default value.");
let keypairs = generate_deterministic_keypairs(validator_count);
debug!("Writing keypairs to file...");
let keypairs_path = Path::new(output_file);
keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap();
}

View File

@ -1,33 +0,0 @@
//! Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
//!
//! This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
//! directly to the `BeaconChain` via an `Arc`.
//!
//! The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
//! instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
//! producing blocks and attestations.
//!
//! Example:
//! ```rust,no_run
//! use test_harness::BeaconChainHarness;
//! use types::ChainSpec;
//!
//! let validator_count = 8;
//! let spec = ChainSpec::few_validators();
//!
//! let mut harness = BeaconChainHarness::new(spec, validator_count);
//!
//! harness.advance_chain_with_block();
//!
//! let chain = harness.chain_dump().unwrap();
//!
//! // One block should have been built on top of the genesis block.
//! assert_eq!(chain.len(), 2);
//! ```
mod beacon_chain_harness;
pub mod test_case;
mod validator_harness;
pub use self::beacon_chain_harness::BeaconChainHarness;
pub use self::validator_harness::ValidatorHarness;

View File

@ -1,37 +0,0 @@
use crate::test_case::TestCase;
use clap::ArgMatches;
use std::{fs::File, io::prelude::*};
use yaml_rust::YamlLoader;
/// Runs a YAML-specified test case.
pub fn run_test(matches: &ArgMatches) {
if let Some(yaml_file) = matches.value_of("yaml") {
let docs = {
let mut file = File::open(yaml_file).unwrap();
let mut yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
YamlLoader::load_from_str(&yaml_str).unwrap()
};
for doc in &docs {
// For each `test_cases` YAML in the document, build a `TestCase`, execute it and
// assert that the execution result matches the test_case description.
//
// In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
// and a new `BeaconChain` is built as per the test_case.
//
// After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
// and states in the chain is obtained and checked against the `results` specified in
// the `test_case`.
//
// If any of the expectations in the results are not met, the process
// panics with a message.
for test_case in doc["test_cases"].as_vec().unwrap() {
let test_case = TestCase::from_yaml(test_case);
test_case.assert_result_valid(test_case.execute())
}
}
}
}

View File

@ -1,312 +0,0 @@
//! Defines execution and testing specs for a `BeaconChainHarness` instance. Supports loading from
//! a YAML file.
use crate::beacon_chain_harness::BeaconChainHarness;
use beacon_chain::CheckPoint;
use log::{info, warn};
use tree_hash::SignedRoot;
use types::*;
use types::test_utils::*;
use yaml_rust::Yaml;
mod config;
mod results;
mod state_check;
mod yaml_helpers;
pub use config::Config;
pub use results::Results;
pub use state_check::StateCheck;
/// Defines the execution and testing of a `BeaconChainHarness` instantiation.
///
/// Typical workflow is:
///
/// 1. Instantiate the `TestCase` from YAML: `let test_case = TestCase::from_yaml(&my_yaml);`
/// 2. Execute the test_case: `let result = test_case.execute();`
/// 3. Test the results against the test_case: `test_case.assert_result_valid(result);`
#[derive(Debug)]
pub struct TestCase {
/// Defines the execution.
pub config: Config,
/// Defines tests to run against the execution result.
pub results: Results,
}
/// The result of executing a `TestCase`.
///
pub struct ExecutionResult {
/// The canonical beacon chain generated from the execution.
pub chain: Vec<CheckPoint>,
/// The spec used for execution.
pub spec: ChainSpec,
}
impl TestCase {
/// Load the test case from a YAML document.
pub fn from_yaml(test_case: &Yaml) -> Self {
Self {
results: Results::from_yaml(&test_case["results"]),
config: Config::from_yaml(&test_case["config"]),
}
}
/// Return a `ChainSpec::foundation()`.
///
/// If specified in `config`, returns it with a modified `slots_per_epoch`.
fn spec(&self) -> ChainSpec {
let mut spec = ChainSpec::foundation();
if let Some(n) = self.config.slots_per_epoch {
spec.slots_per_epoch = n;
}
if let Some(n) = self.config.persistent_committee_period {
spec.persistent_committee_period = n;
}
spec
}
/// Executes the test case, returning an `ExecutionResult`.
#[allow(clippy::cyclomatic_complexity)]
pub fn execute(&self) -> ExecutionResult {
let spec = self.spec();
let validator_count = self.config.deposits_for_chain_start;
let slots = self.config.num_slots;
info!(
"Building BeaconChainHarness with {} validators...",
validator_count
);
let mut harness = BeaconChainHarness::new(spec, validator_count);
info!("Starting simulation across {} slots...", slots);
// Start at 1 because genesis counts as a slot.
for slot_height in 1..slots {
// Used to ensure that deposits in the same slot have incremental deposit indices.
let mut deposit_index_offset = 0;
// Feed deposits to the BeaconChain.
if let Some(ref deposits) = self.config.deposits {
for (slot, amount) in deposits {
if *slot == slot_height {
info!("Including deposit at slot height {}.", slot_height);
let (deposit, keypair) =
build_deposit(&harness, *amount, deposit_index_offset);
harness.add_deposit(deposit, Some(keypair.clone()));
deposit_index_offset += 1;
}
}
}
// Feed proposer slashings to the BeaconChain.
if let Some(ref slashings) = self.config.proposer_slashings {
for (slot, validator_index) in slashings {
if *slot == slot_height {
info!(
"Including proposer slashing at slot height {} for validator #{}.",
slot_height, validator_index
);
let slashing = build_proposer_slashing(&harness, *validator_index);
harness.add_proposer_slashing(slashing);
}
}
}
// Feed attester slashings to the BeaconChain.
if let Some(ref slashings) = self.config.attester_slashings {
for (slot, validator_indices) in slashings {
if *slot == slot_height {
info!(
"Including attester slashing at slot height {} for validators {:?}.",
slot_height, validator_indices
);
let slashing =
build_double_vote_attester_slashing(&harness, &validator_indices[..]);
harness.add_attester_slashing(slashing);
}
}
}
// Feed exits to the BeaconChain.
if let Some(ref exits) = self.config.exits {
for (slot, validator_index) in exits {
if *slot == slot_height {
info!(
"Including exit at slot height {} for validator {}.",
slot_height, validator_index
);
let exit = build_exit(&harness, *validator_index);
harness.add_exit(exit);
}
}
}
// Feed transfers to the BeaconChain.
if let Some(ref transfers) = self.config.transfers {
for (slot, from, to, amount) in transfers {
if *slot == slot_height {
info!(
"Including transfer at slot height {} from validator {}.",
slot_height, from
);
let transfer = build_transfer(&harness, *from, *to, *amount);
harness.add_transfer(transfer);
}
}
}
// Build a block or skip a slot.
match self.config.skip_slots {
Some(ref skip_slots) if skip_slots.contains(&slot_height) => {
warn!("Skipping slot at height {}.", slot_height);
harness.increment_beacon_chain_slot();
}
_ => {
info!("Producing block at slot height {}.", slot_height);
harness.advance_chain_with_block();
}
}
}
harness.run_fork_choice();
info!("Test execution complete!");
info!("Building chain dump for analysis...");
ExecutionResult {
chain: harness.chain_dump().expect("Chain dump failed."),
spec: (*harness.spec).clone(),
}
}
/// Checks that the `ExecutionResult` is consistent with the specifications in `self.results`.
///
/// # Panics
///
/// Panics with a message if any result does not match exepectations.
pub fn assert_result_valid(&self, execution_result: ExecutionResult) {
info!("Verifying test results...");
let spec = &execution_result.spec;
if let Some(num_skipped_slots) = self.results.num_skipped_slots {
assert_eq!(
execution_result.chain.len(),
self.config.num_slots as usize - num_skipped_slots,
"actual skipped slots != expected."
);
info!(
"OK: Chain length is {} ({} skipped slots).",
execution_result.chain.len(),
num_skipped_slots
);
}
if let Some(ref state_checks) = self.results.state_checks {
for checkpoint in &execution_result.chain {
let state = &checkpoint.beacon_state;
for state_check in state_checks {
let adjusted_state_slot =
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch);
if state_check.slot == adjusted_state_slot {
state_check.assert_valid(state, spec);
}
}
}
}
}
}
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot.
fn build_transfer(
harness: &BeaconChainHarness,
sender: u64,
recipient: u64,
amount: u64,
) -> Transfer {
let slot = harness.beacon_chain.state.read().slot + 1;
let mut builder = TestingTransferBuilder::new(sender, recipient, amount, slot);
let keypair = harness.validator_keypair(sender as usize).unwrap();
builder.sign(keypair.clone(), &harness.fork(), &harness.spec);
builder.build()
}
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`.
///
/// `index_offset` is used to ensure that `deposit.index == state.index` when adding multiple
/// deposits.
fn build_deposit(
harness: &BeaconChainHarness,
amount: u64,
index_offset: u64,
) -> (Deposit, Keypair) {
let keypair = Keypair::random();
let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount);
builder.set_index(harness.beacon_chain.state.read().deposit_index + index_offset);
builder.sign(&keypair, harness.epoch(), &harness.fork(), &harness.spec);
(builder.build(), keypair)
}
/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`.
fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> VoluntaryExit {
let epoch = harness
.beacon_chain
.state
.read()
.current_epoch(&harness.spec);
let mut exit = VoluntaryExit {
epoch,
validator_index,
signature: Signature::empty_signature(),
};
let message = exit.signed_root();
exit.signature = harness
.validator_sign(validator_index as usize, &message[..], epoch, Domain::Exit)
.expect("Unable to sign VoluntaryExit");
exit
}
/// Builds an `AttesterSlashing` for some `validator_indices`.
///
/// Signs the message using a `BeaconChainHarness`.
fn build_double_vote_attester_slashing(
harness: &BeaconChainHarness,
validator_indices: &[u64],
) -> AttesterSlashing {
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
harness
.validator_sign(validator_index as usize, message, epoch, domain)
.expect("Unable to sign AttesterSlashing")
};
TestingAttesterSlashingBuilder::double_vote(validator_indices, signer)
}
/// Builds an `ProposerSlashing` for some `validator_index`.
///
/// Signs the message using a `BeaconChainHarness`.
fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing {
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
harness
.validator_sign(validator_index as usize, message, epoch, domain)
.expect("Unable to sign AttesterSlashing")
};
TestingProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec)
}

View File

@ -1,135 +0,0 @@
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
use types::*;
use yaml_rust::Yaml;
pub type ValidatorIndex = u64;
pub type ValidatorIndices = Vec<u64>;
pub type GweiAmount = u64;
pub type DepositTuple = (SlotHeight, GweiAmount);
pub type ExitTuple = (SlotHeight, ValidatorIndex);
pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex);
pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices);
/// (slot_height, from, to, amount)
pub type TransferTuple = (SlotHeight, ValidatorIndex, ValidatorIndex, GweiAmount);
/// Defines the execution of a `BeaconStateHarness` across a series of slots.
#[derive(Debug)]
pub struct Config {
/// Initial validators.
pub deposits_for_chain_start: usize,
/// Number of slots in an epoch.
pub slots_per_epoch: Option<u64>,
/// Affects the number of epochs a validator must be active before they can withdraw.
pub persistent_committee_period: Option<u64>,
/// Number of slots to build before ending execution.
pub num_slots: u64,
/// Number of slots that should be skipped due to inactive validator.
pub skip_slots: Option<Vec<u64>>,
/// Deposits to be included during execution.
pub deposits: Option<Vec<DepositTuple>>,
/// Proposer slashings to be included during execution.
pub proposer_slashings: Option<Vec<ProposerSlashingTuple>>,
/// Attester slashings to be including during execution.
pub attester_slashings: Option<Vec<AttesterSlashingTuple>>,
/// Exits to be including during execution.
pub exits: Option<Vec<ExitTuple>>,
/// Transfers to be including during execution.
pub transfers: Option<Vec<TransferTuple>>,
}
impl Config {
/// Load from a YAML document.
///
/// Expects to receive the `config` section of the document.
pub fn from_yaml(yaml: &Yaml) -> Self {
Self {
deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start")
.expect("Must specify validator count"),
slots_per_epoch: as_u64(&yaml, "slots_per_epoch"),
persistent_committee_period: as_u64(&yaml, "persistent_committee_period"),
num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"),
skip_slots: as_vec_u64(yaml, "skip_slots"),
deposits: parse_deposits(&yaml),
proposer_slashings: parse_proposer_slashings(&yaml),
attester_slashings: parse_attester_slashings(&yaml),
exits: parse_exits(&yaml),
transfers: parse_transfers(&yaml),
}
}
}
/// Parse the `transfers` section of the YAML document.
fn parse_transfers(yaml: &Yaml) -> Option<Vec<TransferTuple>> {
let mut tuples = vec![];
for exit in yaml["transfers"].as_vec()? {
let slot = as_u64(exit, "slot").expect("Incomplete transfer (slot)");
let from = as_u64(exit, "from").expect("Incomplete transfer (from)");
let to = as_u64(exit, "to").expect("Incomplete transfer (to)");
let amount = as_u64(exit, "amount").expect("Incomplete transfer (amount)");
tuples.push((SlotHeight::from(slot), from, to, amount));
}
Some(tuples)
}
/// Parse the `attester_slashings` section of the YAML document.
fn parse_exits(yaml: &Yaml) -> Option<Vec<ExitTuple>> {
let mut tuples = vec![];
for exit in yaml["exits"].as_vec()? {
let slot = as_u64(exit, "slot").expect("Incomplete exit (slot)");
let validator_index =
as_u64(exit, "validator_index").expect("Incomplete exit (validator_index)");
tuples.push((SlotHeight::from(slot), validator_index));
}
Some(tuples)
}
/// Parse the `attester_slashings` section of the YAML document.
fn parse_attester_slashings(yaml: &Yaml) -> Option<Vec<AttesterSlashingTuple>> {
let mut slashings = vec![];
for slashing in yaml["attester_slashings"].as_vec()? {
let slot = as_u64(slashing, "slot").expect("Incomplete attester_slashing (slot)");
let validator_indices = as_vec_u64(slashing, "validator_indices")
.expect("Incomplete attester_slashing (validator_indices)");
slashings.push((SlotHeight::from(slot), validator_indices));
}
Some(slashings)
}
/// Parse the `proposer_slashings` section of the YAML document.
fn parse_proposer_slashings(yaml: &Yaml) -> Option<Vec<ProposerSlashingTuple>> {
let mut slashings = vec![];
for slashing in yaml["proposer_slashings"].as_vec()? {
let slot = as_u64(slashing, "slot").expect("Incomplete proposer slashing (slot)_");
let validator_index = as_u64(slashing, "validator_index")
.expect("Incomplete proposer slashing (validator_index)");
slashings.push((SlotHeight::from(slot), validator_index));
}
Some(slashings)
}
/// Parse the `deposits` section of the YAML document.
fn parse_deposits(yaml: &Yaml) -> Option<Vec<DepositTuple>> {
let mut deposits = vec![];
for deposit in yaml["deposits"].as_vec()? {
let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)");
let amount = as_u64(deposit, "amount").expect("Incomplete deposit (amount)");
deposits.push((SlotHeight::from(slot), amount))
}
Some(deposits)
}

View File

@ -1,34 +0,0 @@
use super::state_check::StateCheck;
use super::yaml_helpers::as_usize;
use yaml_rust::Yaml;
/// A series of tests to be carried out upon an `ExecutionResult`, returned from executing a
/// `TestCase`.
#[derive(Debug)]
pub struct Results {
pub num_skipped_slots: Option<usize>,
pub state_checks: Option<Vec<StateCheck>>,
}
impl Results {
/// Load from a YAML document.
///
/// Expects the `results` section of the YAML document.
pub fn from_yaml(yaml: &Yaml) -> Self {
Self {
num_skipped_slots: as_usize(yaml, "num_skipped_slots"),
state_checks: parse_state_checks(yaml),
}
}
}
/// Parse the `state_checks` section of the YAML document.
fn parse_state_checks(yaml: &Yaml) -> Option<Vec<StateCheck>> {
let mut states = vec![];
for state_yaml in yaml["states"].as_vec()? {
states.push(StateCheck::from_yaml(state_yaml));
}
Some(states)
}

View File

@ -1,206 +0,0 @@
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
use log::info;
use types::*;
use yaml_rust::Yaml;
type ValidatorIndex = u64;
type BalanceGwei = u64;
type BalanceCheckTuple = (ValidatorIndex, String, BalanceGwei);
/// Tests to be conducted upon a `BeaconState` object generated during the execution of a
/// `TestCase`.
#[derive(Debug)]
pub struct StateCheck {
/// Checked against `beacon_state.slot`.
pub slot: Slot,
/// Checked against `beacon_state.validator_registry.len()`.
pub num_validators: Option<usize>,
/// The number of pending attestations from the previous epoch that should be in the state.
pub num_previous_epoch_attestations: Option<usize>,
/// The number of pending attestations from the current epoch that should be in the state.
pub num_current_epoch_attestations: Option<usize>,
/// A list of validator indices which have been penalized. Must be in ascending order.
pub slashed_validators: Option<Vec<u64>>,
/// A list of validator indices which have been fully exited. Must be in ascending order.
pub exited_validators: Option<Vec<u64>>,
/// A list of validator indices which have had an exit initiated. Must be in ascending order.
pub exit_initiated_validators: Option<Vec<u64>>,
/// A list of balances to check.
pub balances: Option<Vec<BalanceCheckTuple>>,
}
impl StateCheck {
/// Load from a YAML document.
///
/// Expects the `state_check` section of the YAML document.
pub fn from_yaml(yaml: &Yaml) -> Self {
Self {
slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")),
num_validators: as_usize(&yaml, "num_validators"),
num_previous_epoch_attestations: as_usize(&yaml, "num_previous_epoch_attestations"),
num_current_epoch_attestations: as_usize(&yaml, "num_current_epoch_attestations"),
slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
exited_validators: as_vec_u64(&yaml, "exited_validators"),
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
balances: parse_balances(&yaml),
}
}
/// Performs all checks against a `BeaconState`
///
/// # Panics
///
/// Panics with an error message if any test fails.
#[allow(clippy::cyclomatic_complexity)]
pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
info!("Running state check for slot height {}.", self.slot);
// Check the state slot.
assert_eq!(
self.slot,
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch),
"State slot is invalid."
);
// Check the validator count
if let Some(num_validators) = self.num_validators {
assert_eq!(
state.validator_registry.len(),
num_validators,
"State validator count != expected."
);
info!("OK: num_validators = {}.", num_validators);
}
// Check the previous epoch attestations
if let Some(n) = self.num_previous_epoch_attestations {
assert_eq!(
state.previous_epoch_attestations.len(),
n,
"previous epoch attestations count != expected."
);
info!("OK: num_previous_epoch_attestations = {}.", n);
}
// Check the current epoch attestations
if let Some(n) = self.num_current_epoch_attestations {
assert_eq!(
state.current_epoch_attestations.len(),
n,
"current epoch attestations count != expected."
);
info!("OK: num_current_epoch_attestations = {}.", n);
}
// Check for slashed validators.
if let Some(ref slashed_validators) = self.slashed_validators {
let actually_slashed_validators: Vec<u64> = state
.validator_registry
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator.slashed {
Some(i as u64)
} else {
None
}
})
.collect();
assert_eq!(
actually_slashed_validators, *slashed_validators,
"Slashed validators != expected."
);
info!("OK: slashed_validators = {:?}.", slashed_validators);
}
// Check for exited validators.
if let Some(ref exited_validators) = self.exited_validators {
let actually_exited_validators: Vec<u64> = state
.validator_registry
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator.is_exited_at(state_epoch) {
Some(i as u64)
} else {
None
}
})
.collect();
assert_eq!(
actually_exited_validators, *exited_validators,
"Exited validators != expected."
);
info!("OK: exited_validators = {:?}.", exited_validators);
}
// Check for validators that have initiated exit.
if let Some(ref exit_initiated_validators) = self.exit_initiated_validators {
let actual: Vec<u64> = state
.validator_registry
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator.initiated_exit {
Some(i as u64)
} else {
None
}
})
.collect();
assert_eq!(
actual, *exit_initiated_validators,
"Exit initiated validators != expected."
);
info!(
"OK: exit_initiated_validators = {:?}.",
exit_initiated_validators
);
}
// Check validator balances.
if let Some(ref balances) = self.balances {
for (index, comparison, expected) in balances {
let actual = *state
.validator_balances
.get(*index as usize)
.expect("Balance check specifies unknown validator");
let result = match comparison.as_ref() {
"eq" => actual == *expected,
_ => panic!("Unknown balance comparison (use `eq`)"),
};
assert!(
result,
format!(
"Validator balance for {}: {} !{} {}.",
index, actual, comparison, expected
)
);
info!("OK: validator balance for {:?}.", index);
}
}
}
}
/// Parse the `transfers` section of the YAML document.
fn parse_balances(yaml: &Yaml) -> Option<Vec<BalanceCheckTuple>> {
let mut tuples = vec![];
for exit in yaml["balances"].as_vec()? {
let from =
as_u64(exit, "validator_index").expect("Incomplete balance check (validator_index)");
let comparison = exit["comparison"]
.clone()
.into_string()
.expect("Incomplete balance check (amount)");
let balance = as_u64(exit, "balance").expect("Incomplete balance check (balance)");
tuples.push((from, comparison, balance));
}
Some(tuples)
}

View File

@ -1,19 +0,0 @@
use yaml_rust::Yaml;
pub fn as_usize(yaml: &Yaml, key: &str) -> Option<usize> {
yaml[key].as_i64().and_then(|n| Some(n as usize))
}
pub fn as_u64(yaml: &Yaml, key: &str) -> Option<u64> {
yaml[key].as_i64().and_then(|n| Some(n as u64))
}
pub fn as_vec_u64(yaml: &Yaml, key: &str) -> Option<Vec<u64>> {
yaml[key].clone().into_vec().and_then(|vec| {
Some(
vec.iter()
.map(|item| item.as_i64().unwrap() as u64)
.collect(),
)
})
}

View File

@ -1,100 +0,0 @@
use attester::{
BeaconNode as AttesterBeaconNode, BeaconNodeError as NodeError,
PublishOutcome as AttestationPublishOutcome,
};
use beacon_chain::BeaconChain;
use block_proposer::{
BeaconNode as BeaconBlockNode, BeaconNodeError as BeaconBlockNodeError,
PublishOutcome as BlockPublishOutcome,
};
use db::ClientDB;
use fork_choice::ForkChoice;
use parking_lot::RwLock;
use slot_clock::SlotClock;
use std::sync::Arc;
use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot};
/// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit
/// blocks/attestations.
///
/// `BeaconBlock`s and `FreeAttestation`s are not actually published to the `BeaconChain`, instead
/// they are stored inside this struct. This is to allow one to benchmark the submission of the
/// block/attestation directly, or modify it before submission.
pub struct DirectBeaconNode<T: ClientDB, U: SlotClock, F: ForkChoice> {
beacon_chain: Arc<BeaconChain<T, U, F>>,
published_blocks: RwLock<Vec<BeaconBlock>>,
published_attestations: RwLock<Vec<FreeAttestation>>,
}
impl<T: ClientDB, U: SlotClock, F: ForkChoice> DirectBeaconNode<T, U, F> {
pub fn new(beacon_chain: Arc<BeaconChain<T, U, F>>) -> Self {
Self {
beacon_chain,
published_blocks: RwLock::new(vec![]),
published_attestations: RwLock::new(vec![]),
}
}
/// Get the last published block (if any).
pub fn last_published_block(&self) -> Option<BeaconBlock> {
Some(self.published_blocks.read().last()?.clone())
}
}
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterBeaconNode for DirectBeaconNode<T, U, F> {
fn produce_attestation_data(
&self,
_slot: Slot,
shard: u64,
) -> Result<Option<AttestationData>, NodeError> {
match self.beacon_chain.produce_attestation_data(shard) {
Ok(attestation_data) => Ok(Some(attestation_data)),
Err(e) => Err(NodeError::RemoteFailure(format!("{:?}", e))),
}
}
fn publish_attestation(
&self,
free_attestation: FreeAttestation,
) -> Result<AttestationPublishOutcome, NodeError> {
self.published_attestations.write().push(free_attestation);
Ok(AttestationPublishOutcome::ValidAttestation)
}
}
impl<T: ClientDB, U: SlotClock, F: ForkChoice> BeaconBlockNode for DirectBeaconNode<T, U, F> {
/// Requests a new `BeaconBlock from the `BeaconChain`.
fn produce_beacon_block(
&self,
slot: Slot,
randao_reveal: &Signature,
) -> Result<Option<BeaconBlock>, BeaconBlockNodeError> {
let (block, _state) = self
.beacon_chain
.produce_block(randao_reveal.clone())
.map_err(|e| {
BeaconBlockNodeError::RemoteFailure(format!("Did not produce block: {:?}", e))
})?;
if block.slot == slot {
Ok(Some(block))
} else {
Err(BeaconBlockNodeError::RemoteFailure(
"Unable to produce at non-current slot.".to_string(),
))
}
}
/// A block is not _actually_ published to the `BeaconChain`, instead it is stored in the
/// `published_block_vec` and a successful `ValidBlock` is returned to the caller.
///
/// The block may be retrieved and then applied to the `BeaconChain` manually, potentially in a
/// benchmarking scenario.
fn publish_beacon_block(
&self,
block: BeaconBlock,
) -> Result<BlockPublishOutcome, BeaconBlockNodeError> {
self.published_blocks.write().push(block);
Ok(BlockPublishOutcome::ValidBlock)
}
}

View File

@ -1,74 +0,0 @@
use attester::{
DutiesReader as AttesterDutiesReader, DutiesReaderError as AttesterDutiesReaderError,
};
use beacon_chain::BeaconChain;
use block_proposer::{
DutiesReader as ProducerDutiesReader, DutiesReaderError as ProducerDutiesReaderError,
};
use db::ClientDB;
use fork_choice::ForkChoice;
use slot_clock::SlotClock;
use std::sync::Arc;
use types::{Fork, PublicKey, Slot};
/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from
/// it.
pub struct DirectDuties<T: ClientDB, U: SlotClock, F: ForkChoice> {
beacon_chain: Arc<BeaconChain<T, U, F>>,
pubkey: PublicKey,
}
impl<T: ClientDB, U: SlotClock, F: ForkChoice> DirectDuties<T, U, F> {
pub fn new(pubkey: PublicKey, beacon_chain: Arc<BeaconChain<T, U, F>>) -> Self {
Self {
beacon_chain,
pubkey,
}
}
}
impl<T: ClientDB, U: SlotClock, F: ForkChoice> ProducerDutiesReader for DirectDuties<T, U, F> {
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, ProducerDutiesReaderError> {
let validator_index = self
.beacon_chain
.validator_index(&self.pubkey)
.ok_or_else(|| ProducerDutiesReaderError::UnknownValidator)?;
match self.beacon_chain.block_proposer(slot) {
Ok(proposer) if proposer == validator_index => Ok(true),
Ok(_) => Ok(false),
Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch),
}
}
fn fork(&self) -> Result<Fork, ProducerDutiesReaderError> {
Ok(self.beacon_chain.state.read().fork.clone())
}
}
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterDutiesReader for DirectDuties<T, U, F> {
fn validator_index(&self) -> Option<u64> {
match self.beacon_chain.validator_index(&self.pubkey) {
Some(index) => Some(index as u64),
None => None,
}
}
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, AttesterDutiesReaderError> {
if let Some(validator_index) = self.validator_index() {
match self
.beacon_chain
.validator_attestion_slot_and_shard(validator_index as usize)
{
Ok(Some((attest_slot, attest_shard))) if attest_slot == slot => {
Ok(Some(attest_shard))
}
Ok(Some(_)) => Ok(None),
Ok(None) => Err(AttesterDutiesReaderError::UnknownEpoch),
Err(_) => unreachable!("Error when getting validator attestation shard."),
}
} else {
Err(AttesterDutiesReaderError::UnknownValidator)
}
}
}

View File

@ -1,36 +0,0 @@
use attester::Signer as AttesterSigner;
use block_proposer::Signer as BlockProposerSigner;
use types::{Keypair, Signature};
/// A test-only struct used to perform signing for a proposer or attester.
pub struct LocalSigner {
keypair: Keypair,
}
impl LocalSigner {
/// Produce a new TestSigner with signing enabled by default.
pub fn new(keypair: Keypair) -> Self {
Self { keypair }
}
/// Sign some message.
fn bls_sign(&self, message: &[u8], domain: u64) -> Option<Signature> {
Some(Signature::new(message, domain, &self.keypair.sk))
}
}
impl BlockProposerSigner for LocalSigner {
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
self.bls_sign(message, domain)
}
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
self.bls_sign(message, domain)
}
}
impl AttesterSigner for LocalSigner {
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
self.bls_sign(message, domain)
}
}

View File

@ -1,119 +0,0 @@
mod direct_beacon_node;
mod direct_duties;
mod local_signer;
use attester::Attester;
use beacon_chain::BeaconChain;
use block_proposer::PollOutcome as BlockPollOutcome;
use block_proposer::{BlockProducer, Error as BlockPollError};
use db::MemoryDB;
use direct_beacon_node::DirectBeaconNode;
use direct_duties::DirectDuties;
use fork_choice::BitwiseLMDGhost;
use local_signer::LocalSigner;
use slot_clock::TestingSlotClock;
use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, Keypair, Slot};
#[derive(Debug, PartialEq)]
pub enum BlockProduceError {
DidNotProduce(BlockPollOutcome),
PollError(BlockPollError),
}
type TestingBlockProducer = BlockProducer<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>;
type TestingAttester = Attester<
TestingSlotClock,
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
LocalSigner,
>;
/// A `BlockProducer` and `Attester` which sign using a common keypair.
///
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon
/// chain tests.
pub struct ValidatorHarness {
pub block_producer: TestingBlockProducer,
pub attester: TestingAttester,
pub spec: Arc<ChainSpec>,
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
pub keypair: Keypair,
pub beacon_node: Arc<DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
pub slot_clock: Arc<TestingSlotClock>,
pub signer: Arc<LocalSigner>,
}
impl ValidatorHarness {
/// Create a new ValidatorHarness that signs with the given keypair, operates per the given spec and connects to the
/// supplied beacon node.
///
/// A `BlockProducer` and `Attester` is created..
pub fn new(
keypair: Keypair,
beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
spec: Arc<ChainSpec>,
) -> Self {
let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot.as_u64()));
let signer = Arc::new(LocalSigner::new(keypair.clone()));
let beacon_node = Arc::new(DirectBeaconNode::new(beacon_chain.clone()));
let epoch_map = Arc::new(DirectDuties::new(keypair.pk.clone(), beacon_chain.clone()));
let block_producer = BlockProducer::new(
spec.clone(),
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
let attester = Attester::new(
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
Self {
block_producer,
attester,
spec,
epoch_map,
keypair,
beacon_node,
slot_clock,
signer,
}
}
/// Run the `poll` function on the `BlockProducer` and produce a block.
///
/// An error is returned if the producer refuses to produce.
pub fn produce_block(&mut self) -> Result<BeaconBlock, BlockProduceError> {
// Using `DirectBeaconNode`, the validator will always return sucessufully if it tries to
// publish a block.
match self.block_producer.poll() {
Ok(BlockPollOutcome::BlockProduced(_)) => {}
Ok(outcome) => return Err(BlockProduceError::DidNotProduce(outcome)),
Err(error) => return Err(BlockProduceError::PollError(error)),
};
Ok(self
.beacon_node
.last_published_block()
.expect("Unable to obtain produced block."))
}
/// Set the validators slot clock to the specified slot.
///
/// The validators slot clock will always read this value until it is set to something else.
pub fn set_slot(&mut self, slot: Slot) {
self.slot_clock.set_slot(slot.as_u64())
}
}

View File

@ -1,46 +0,0 @@
#![cfg(not(debug_assertions))]
use env_logger::{Builder, Env};
use log::debug;
use test_harness::BeaconChainHarness;
use types::ChainSpec;
#[test]
fn it_can_build_on_genesis_block() {
Builder::from_env(Env::default().default_filter_or("info")).init();
let spec = ChainSpec::few_validators();
let validator_count = 8;
let mut harness = BeaconChainHarness::new(spec, validator_count as usize);
harness.advance_chain_with_block();
}
#[test]
#[ignore]
fn it_can_produce_past_first_epoch_boundary() {
Builder::from_env(Env::default().default_filter_or("info")).init();
let spec = ChainSpec::few_validators();
let validator_count = 8;
debug!("Starting harness build...");
let mut harness = BeaconChainHarness::new(spec, validator_count);
debug!("Harness built, tests starting..");
let blocks = harness.spec.slots_per_epoch * 2 + 1;
for i in 0..blocks {
harness.advance_chain_with_block();
debug!("Produced block {}/{}.", i + 1, blocks);
}
harness.run_fork_choice();
let dump = harness.chain_dump().expect("Chain dump failed.");
assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block.
}

View File

@ -5,7 +5,6 @@ authors = ["Age Manning <Age@AgeManning.com>"]
edition = "2018"
[dev-dependencies]
test_harness = { path = "../beacon_chain/test_harness" }
sloggers = "0.3.2"
[dependencies]

View File

@ -1,570 +0,0 @@
use crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender};
use eth2_libp2p::rpc::methods::*;
use eth2_libp2p::rpc::{RPCMethod, RPCRequest, RPCResponse, RequestId};
use eth2_libp2p::{PeerId, RPCEvent};
use network::beacon_chain::BeaconChain as NetworkBeaconChain;
use network::message_handler::{HandlerMessage, MessageHandler};
use network::service::{NetworkMessage, OutgoingMessage};
use sloggers::terminal::{Destination, TerminalLoggerBuilder};
use sloggers::types::Severity;
use sloggers::Build;
use std::time::Duration;
use test_harness::BeaconChainHarness;
use tokio::runtime::TaskExecutor;
use types::{test_utils::TestingBeaconStateBuilder, *};
pub struct SyncNode {
pub id: usize,
sender: Sender<HandlerMessage>,
receiver: Receiver<NetworkMessage>,
peer_id: PeerId,
harness: BeaconChainHarness,
}
impl SyncNode {
fn from_beacon_state_builder(
id: usize,
executor: &TaskExecutor,
state_builder: TestingBeaconStateBuilder,
spec: &ChainSpec,
logger: slog::Logger,
) -> Self {
let harness = BeaconChainHarness::from_beacon_state_builder(state_builder, spec.clone());
let (network_sender, network_receiver) = unbounded();
let message_handler_sender = MessageHandler::spawn(
harness.beacon_chain.clone(),
network_sender,
executor,
logger,
)
.unwrap();
Self {
id,
sender: message_handler_sender,
receiver: network_receiver,
peer_id: PeerId::random(),
harness,
}
}
fn increment_beacon_chain_slot(&mut self) {
self.harness.increment_beacon_chain_slot();
}
fn send(&self, message: HandlerMessage) {
self.sender.send(message).unwrap();
}
fn recv(&self) -> Result<NetworkMessage, RecvTimeoutError> {
self.receiver.recv_timeout(Duration::from_millis(500))
}
fn hello_message(&self) -> HelloMessage {
self.harness.beacon_chain.hello_message()
}
pub fn connect_to(&mut self, node: &SyncNode) {
let message = HandlerMessage::PeerDialed(self.peer_id.clone());
node.send(message);
}
/// Reads the receive queue from one node and passes the message to the other. Also returns a
/// copy of the message.
///
/// self -----> node
/// |
/// us
///
/// Named after the unix `tee` command.
fn tee(&mut self, node: &SyncNode) -> NetworkMessage {
let network_message = self.recv().expect("Timeout on tee");
let handler_message = match network_message.clone() {
NetworkMessage::Send(_to_peer_id, OutgoingMessage::RPC(event)) => {
HandlerMessage::RPC(self.peer_id.clone(), event)
}
_ => panic!("tee cannot parse {:?}", network_message),
};
node.send(handler_message);
network_message
}
fn tee_hello_request(&mut self, node: &SyncNode) -> HelloMessage {
let request = self.tee_rpc_request(node);
match request {
RPCRequest::Hello(message) => message,
_ => panic!("tee_hello_request got: {:?}", request),
}
}
fn tee_hello_response(&mut self, node: &SyncNode) -> HelloMessage {
let response = self.tee_rpc_response(node);
match response {
RPCResponse::Hello(message) => message,
_ => panic!("tee_hello_response got: {:?}", response),
}
}
fn tee_block_root_request(&mut self, node: &SyncNode) -> BeaconBlockRootsRequest {
let msg = self.tee_rpc_request(node);
match msg {
RPCRequest::BeaconBlockRoots(data) => data,
_ => panic!("tee_block_root_request got: {:?}", msg),
}
}
fn tee_block_root_response(&mut self, node: &SyncNode) -> BeaconBlockRootsResponse {
let msg = self.tee_rpc_response(node);
match msg {
RPCResponse::BeaconBlockRoots(data) => data,
_ => panic!("tee_block_root_response got: {:?}", msg),
}
}
fn tee_block_header_request(&mut self, node: &SyncNode) -> BeaconBlockHeadersRequest {
let msg = self.tee_rpc_request(node);
match msg {
RPCRequest::BeaconBlockHeaders(data) => data,
_ => panic!("tee_block_header_request got: {:?}", msg),
}
}
fn tee_block_header_response(&mut self, node: &SyncNode) -> BeaconBlockHeadersResponse {
let msg = self.tee_rpc_response(node);
match msg {
RPCResponse::BeaconBlockHeaders(data) => data,
_ => panic!("tee_block_header_response got: {:?}", msg),
}
}
fn tee_block_body_request(&mut self, node: &SyncNode) -> BeaconBlockBodiesRequest {
let msg = self.tee_rpc_request(node);
match msg {
RPCRequest::BeaconBlockBodies(data) => data,
_ => panic!("tee_block_body_request got: {:?}", msg),
}
}
fn tee_block_body_response(&mut self, node: &SyncNode) -> BeaconBlockBodiesResponse {
let msg = self.tee_rpc_response(node);
match msg {
RPCResponse::BeaconBlockBodies(data) => data,
_ => panic!("tee_block_body_response got: {:?}", msg),
}
}
fn tee_rpc_request(&mut self, node: &SyncNode) -> RPCRequest {
let network_message = self.tee(node);
match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Request {
id: _,
method_id: _,
body,
}),
) => body,
_ => panic!("tee_rpc_request failed! got {:?}", network_message),
}
}
fn tee_rpc_response(&mut self, node: &SyncNode) -> RPCResponse {
let network_message = self.tee(node);
match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Response {
id: _,
method_id: _,
result,
}),
) => result,
_ => panic!("tee_rpc_response failed! got {:?}", network_message),
}
}
pub fn get_block_root_request(&self) -> BeaconBlockRootsRequest {
let request = self.recv_rpc_request().expect("No block root request");
match request {
RPCRequest::BeaconBlockRoots(request) => request,
_ => panic!("Did not get block root request"),
}
}
pub fn get_block_headers_request(&self) -> BeaconBlockHeadersRequest {
let request = self.recv_rpc_request().expect("No block headers request");
match request {
RPCRequest::BeaconBlockHeaders(request) => request,
_ => panic!("Did not get block headers request"),
}
}
pub fn get_block_bodies_request(&self) -> BeaconBlockBodiesRequest {
let request = self.recv_rpc_request().expect("No block bodies request");
match request {
RPCRequest::BeaconBlockBodies(request) => request,
_ => panic!("Did not get block bodies request"),
}
}
fn _recv_rpc_response(&self) -> Result<RPCResponse, RecvTimeoutError> {
let network_message = self.recv()?;
Ok(match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Response {
id: _,
method_id: _,
result,
}),
) => result,
_ => panic!("get_rpc_response failed! got {:?}", network_message),
})
}
fn recv_rpc_request(&self) -> Result<RPCRequest, RecvTimeoutError> {
let network_message = self.recv()?;
Ok(match network_message {
NetworkMessage::Send(
_peer_id,
OutgoingMessage::RPC(RPCEvent::Request {
id: _,
method_id: _,
body,
}),
) => body,
_ => panic!("get_rpc_request failed! got {:?}", network_message),
})
}
}
fn get_logger() -> slog::Logger {
let mut builder = TerminalLoggerBuilder::new();
builder.level(Severity::Debug);
builder.destination(Destination::Stderr);
builder.build().unwrap()
}
pub struct SyncMaster {
harness: BeaconChainHarness,
peer_id: PeerId,
response_ids: Vec<RequestId>,
}
impl SyncMaster {
fn from_beacon_state_builder(
state_builder: TestingBeaconStateBuilder,
node_count: usize,
spec: &ChainSpec,
) -> Self {
let harness = BeaconChainHarness::from_beacon_state_builder(state_builder, spec.clone());
let peer_id = PeerId::random();
let response_ids = vec![RequestId::from(0); node_count];
Self {
harness,
peer_id,
response_ids,
}
}
pub fn response_id(&mut self, node: &SyncNode) -> RequestId {
let id = self.response_ids[node.id].clone();
self.response_ids[node.id].increment();
id
}
pub fn do_hello_with(&mut self, node: &SyncNode) {
let message = HandlerMessage::PeerDialed(self.peer_id.clone());
node.send(message);
let request = node.recv_rpc_request().expect("No hello response");
match request {
RPCRequest::Hello(_hello) => {
let hello = self.harness.beacon_chain.hello_message();
let response = self.rpc_response(node, RPCResponse::Hello(hello));
node.send(response);
}
_ => panic!("Got message other than hello from node."),
}
}
pub fn respond_to_block_roots_request(
&mut self,
node: &SyncNode,
request: BeaconBlockRootsRequest,
) {
let roots = self
.harness
.beacon_chain
.get_block_roots(request.start_slot, request.count as usize, 0)
.expect("Beacon chain did not give block roots")
.iter()
.enumerate()
.map(|(i, root)| BlockRootSlot {
block_root: *root,
slot: Slot::from(i) + request.start_slot,
})
.collect();
let response = RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots });
self.send_rpc_response(node, response)
}
pub fn respond_to_block_headers_request(
&mut self,
node: &SyncNode,
request: BeaconBlockHeadersRequest,
) {
let roots = self
.harness
.beacon_chain
.get_block_roots(
request.start_slot,
request.max_headers as usize,
request.skip_slots as usize,
)
.expect("Beacon chain did not give blocks");
if roots.is_empty() {
panic!("Roots was empty when trying to get headers.")
}
assert_eq!(
roots[0], request.start_root,
"Got the wrong start root when getting headers"
);
let headers: Vec<BeaconBlockHeader> = roots
.iter()
.map(|root| {
let block = self
.harness
.beacon_chain
.get_block(root)
.expect("Failed to load block")
.expect("Block did not exist");
block.block_header()
})
.collect();
let response = RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers });
self.send_rpc_response(node, response)
}
pub fn respond_to_block_bodies_request(
&mut self,
node: &SyncNode,
request: BeaconBlockBodiesRequest,
) {
let block_bodies: Vec<BeaconBlockBody> = request
.block_roots
.iter()
.map(|root| {
let block = self
.harness
.beacon_chain
.get_block(root)
.expect("Failed to load block")
.expect("Block did not exist");
block.body
})
.collect();
let response = RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { block_bodies });
self.send_rpc_response(node, response)
}
fn send_rpc_response(&mut self, node: &SyncNode, rpc_response: RPCResponse) {
node.send(self.rpc_response(node, rpc_response));
}
fn rpc_response(&mut self, node: &SyncNode, rpc_response: RPCResponse) -> HandlerMessage {
HandlerMessage::RPC(
self.peer_id.clone(),
RPCEvent::Response {
id: self.response_id(node),
method_id: RPCMethod::Hello.into(),
result: rpc_response,
},
)
}
}
fn test_setup(
state_builder: TestingBeaconStateBuilder,
node_count: usize,
spec: &ChainSpec,
logger: slog::Logger,
) -> (tokio::runtime::Runtime, SyncMaster, Vec<SyncNode>) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let mut nodes = Vec::with_capacity(node_count);
for id in 0..node_count {
let node = SyncNode::from_beacon_state_builder(
id,
&runtime.executor(),
state_builder.clone(),
&spec,
logger.clone(),
);
nodes.push(node);
}
let master = SyncMaster::from_beacon_state_builder(state_builder, node_count, &spec);
(runtime, master, nodes)
}
pub fn build_blocks(blocks: usize, master: &mut SyncMaster, nodes: &mut Vec<SyncNode>) {
for _ in 0..blocks {
master.harness.advance_chain_with_block();
for i in 0..nodes.len() {
nodes[i].increment_beacon_chain_slot();
}
}
master.harness.run_fork_choice();
for i in 0..nodes.len() {
nodes[i].harness.run_fork_choice();
}
}
#[test]
#[ignore]
fn sync_node_with_master() {
let logger = get_logger();
let spec = ChainSpec::few_validators();
let validator_count = 8;
let node_count = 1;
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
let (runtime, mut master, mut nodes) =
test_setup(state_builder, node_count, &spec, logger.clone());
let original_node_slot = nodes[0].hello_message().best_slot;
build_blocks(2, &mut master, &mut nodes);
master.do_hello_with(&nodes[0]);
let roots_request = nodes[0].get_block_root_request();
assert_eq!(roots_request.start_slot, original_node_slot + 1);
assert_eq!(roots_request.count, 2);
master.respond_to_block_roots_request(&nodes[0], roots_request);
let headers_request = nodes[0].get_block_headers_request();
assert_eq!(headers_request.start_slot, original_node_slot + 1);
assert_eq!(headers_request.max_headers, 2);
assert_eq!(headers_request.skip_slots, 0);
master.respond_to_block_headers_request(&nodes[0], headers_request);
let bodies_request = nodes[0].get_block_bodies_request();
assert_eq!(bodies_request.block_roots.len(), 2);
master.respond_to_block_bodies_request(&nodes[0], bodies_request);
std::thread::sleep(Duration::from_millis(10000));
runtime.shutdown_now();
}
#[test]
#[ignore]
fn sync_two_nodes() {
let logger = get_logger();
let spec = ChainSpec::few_validators();
let validator_count = 8;
let node_count = 2;
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
let (runtime, _master, mut nodes) =
test_setup(state_builder, node_count, &spec, logger.clone());
// let original_node_slot = nodes[0].hello_message().best_slot;
let mut node_a = nodes.remove(0);
let mut node_b = nodes.remove(0);
let blocks = 2;
// Node A builds out a longer, better chain.
for _ in 0..blocks {
// Node A should build a block.
node_a.harness.advance_chain_with_block();
// Node B should just increment it's slot without a block.
node_b.harness.increment_beacon_chain_slot();
}
node_a.harness.run_fork_choice();
// A connects to B.
node_a.connect_to(&node_b);
// B says hello to A.
node_b.tee_hello_request(&node_a);
// A says hello back.
node_a.tee_hello_response(&node_b);
// B requests block roots from A.
node_b.tee_block_root_request(&node_a);
// A provides block roots to A.
node_a.tee_block_root_response(&node_b);
// B requests block headers from A.
node_b.tee_block_header_request(&node_a);
// A provides block headers to B.
node_a.tee_block_header_response(&node_b);
// B requests block bodies from A.
node_b.tee_block_body_request(&node_a);
// A provides block bodies to B.
node_a.tee_block_body_response(&node_b);
std::thread::sleep(Duration::from_secs(20));
node_b.harness.run_fork_choice();
let node_a_chain = node_a
.harness
.beacon_chain
.chain_dump()
.expect("Can't dump node a chain");
let node_b_chain = node_b
.harness
.beacon_chain
.chain_dump()
.expect("Can't dump node b chain");
assert_eq!(
node_a_chain.len(),
node_b_chain.len(),
"Chains should be equal length"
);
assert_eq!(node_a_chain, node_b_chain, "Chains should be identical");
runtime.shutdown_now();
}

View File

@ -1,11 +0,0 @@
[package]
name = "attester"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
slot_clock = { path = "../../eth2/utils/slot_clock" }
ssz = { path = "../../eth2/utils/ssz" }
tree_hash = { path = "../../eth2/utils/tree_hash" }
types = { path = "../../eth2/types" }

View File

@ -1,257 +0,0 @@
pub mod test_utils;
mod traits;
use slot_clock::SlotClock;
use std::sync::Arc;
use tree_hash::TreeHash;
use types::{AttestationData, AttestationDataAndCustodyBit, FreeAttestation, Signature, Slot};
pub use self::traits::{
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
};
const PHASE_0_CUSTODY_BIT: bool = false;
const DOMAIN_ATTESTATION: u64 = 1;
#[derive(Debug, PartialEq)]
pub enum PollOutcome {
AttestationProduced(Slot),
AttestationNotRequired(Slot),
SlashableAttestationNotProduced(Slot),
BeaconNodeUnableToProduceAttestation(Slot),
ProducerDutiesUnknown(Slot),
SlotAlreadyProcessed(Slot),
SignerRejection(Slot),
ValidatorIsUnknown(Slot),
}
#[derive(Debug, PartialEq)]
pub enum Error {
SlotClockError,
SlotUnknowable,
EpochMapPoisoned,
SlotClockPoisoned,
EpochLengthIsZero,
BeaconNodeError(BeaconNodeError),
}
/// A polling state machine which performs block production duties, based upon some epoch duties
/// (`EpochDutiesMap`) and a concept of time (`SlotClock`).
///
/// Ensures that messages are not slashable.
///
/// Relies upon an external service to keep the `EpochDutiesMap` updated.
pub struct Attester<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
pub last_processed_slot: Option<Slot>,
duties: Arc<V>,
slot_clock: Arc<T>,
beacon_node: Arc<U>,
signer: Arc<W>,
}
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V, W> {
/// Returns a new instance where `last_processed_slot == 0`.
pub fn new(duties: Arc<V>, slot_clock: Arc<T>, beacon_node: Arc<U>, signer: Arc<W>) -> Self {
Self {
last_processed_slot: None,
duties,
slot_clock,
beacon_node,
signer,
}
}
}
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V, W> {
/// Poll the `BeaconNode` and produce an attestation if required.
pub fn poll(&mut self) -> Result<PollOutcome, Error> {
let slot = self
.slot_clock
.present_slot()
.map_err(|_| Error::SlotClockError)?
.ok_or(Error::SlotUnknowable)?;
if !self.is_processed_slot(slot) {
self.last_processed_slot = Some(slot);
let shard = match self.duties.attestation_shard(slot) {
Ok(Some(result)) => result,
Ok(None) => return Ok(PollOutcome::AttestationNotRequired(slot)),
Err(DutiesReaderError::UnknownEpoch) => {
return Ok(PollOutcome::ProducerDutiesUnknown(slot));
}
Err(DutiesReaderError::UnknownValidator) => {
return Ok(PollOutcome::ValidatorIsUnknown(slot));
}
Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero),
Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned),
};
self.produce_attestation(slot, shard)
} else {
Ok(PollOutcome::SlotAlreadyProcessed(slot))
}
}
fn produce_attestation(&mut self, slot: Slot, shard: u64) -> Result<PollOutcome, Error> {
let attestation_data = match self.beacon_node.produce_attestation_data(slot, shard)? {
Some(attestation_data) => attestation_data,
None => return Ok(PollOutcome::BeaconNodeUnableToProduceAttestation(slot)),
};
if !self.safe_to_produce(&attestation_data) {
return Ok(PollOutcome::SlashableAttestationNotProduced(slot));
}
let signature = match self.sign_attestation_data(&attestation_data) {
Some(signature) => signature,
None => return Ok(PollOutcome::SignerRejection(slot)),
};
let validator_index = match self.duties.validator_index() {
Some(validator_index) => validator_index,
None => return Ok(PollOutcome::ValidatorIsUnknown(slot)),
};
let free_attestation = FreeAttestation {
data: attestation_data,
signature,
validator_index,
};
self.beacon_node.publish_attestation(free_attestation)?;
Ok(PollOutcome::AttestationProduced(slot))
}
fn is_processed_slot(&self, slot: Slot) -> bool {
match self.last_processed_slot {
Some(processed_slot) if slot <= processed_slot => true,
_ => false,
}
}
/// Consumes a block, returning that block signed by the validators private key.
///
/// Important: this function will not check to ensure the block is not slashable. This must be
/// done upstream.
fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option<Signature> {
self.store_produce(attestation_data);
let message = AttestationDataAndCustodyBit {
data: attestation_data.clone(),
custody_bit: PHASE_0_CUSTODY_BIT,
}
.tree_hash_root();
self.signer
.sign_attestation_message(&message[..], DOMAIN_ATTESTATION)
}
/// Returns `true` if signing some attestation_data is safe (non-slashable).
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn safe_to_produce(&self, _attestation_data: &AttestationData) -> bool {
// TODO: ensure the producer doesn't produce slashable blocks.
// https://github.com/sigp/lighthouse/issues/160
true
}
/// Record that a block was produced so that slashable votes may not be made in the future.
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn store_produce(&mut self, _block: &AttestationData) {
// TODO: record this block production to prevent future slashings.
// https://github.com/sigp/lighthouse/issues/160
}
}
impl From<BeaconNodeError> for Error {
fn from(e: BeaconNodeError) -> Error {
Error::BeaconNodeError(e)
}
}
#[cfg(test)]
mod tests {
use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode};
use super::*;
use slot_clock::TestingSlotClock;
use types::{
test_utils::{SeedableRng, TestRandom, XorShiftRng},
ChainSpec, Keypair,
};
// TODO: implement more thorough testing.
// https://github.com/sigp/lighthouse/issues/160
//
// These tests should serve as a good example for future tests.
#[test]
pub fn polling() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let spec = Arc::new(ChainSpec::foundation());
let slot_clock = Arc::new(TestingSlotClock::new(0));
let beacon_node = Arc::new(SimulatedBeaconNode::default());
let signer = Arc::new(LocalSigner::new(Keypair::random()));
let mut duties = EpochMap::new(spec.slots_per_epoch);
let attest_slot = Slot::new(100);
let attest_epoch = attest_slot / spec.slots_per_epoch;
let attest_shard = 12;
duties.insert_attestation_shard(attest_slot, attest_shard);
duties.set_validator_index(Some(2));
let duties = Arc::new(duties);
let mut attester = Attester::new(
duties.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
// Configure responses from the BeaconNode.
beacon_node.set_next_produce_result(Ok(Some(AttestationData::random_for_test(&mut rng))));
beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidAttestation));
// One slot before attestation slot...
slot_clock.set_slot(attest_slot.as_u64() - 1);
assert_eq!(
attester.poll(),
Ok(PollOutcome::AttestationNotRequired(attest_slot - 1))
);
// On the attest slot...
slot_clock.set_slot(attest_slot.as_u64());
assert_eq!(
attester.poll(),
Ok(PollOutcome::AttestationProduced(attest_slot))
);
// Trying the same attest slot again...
slot_clock.set_slot(attest_slot.as_u64());
assert_eq!(
attester.poll(),
Ok(PollOutcome::SlotAlreadyProcessed(attest_slot))
);
// One slot after the attest slot...
slot_clock.set_slot(attest_slot.as_u64() + 1);
assert_eq!(
attester.poll(),
Ok(PollOutcome::AttestationNotRequired(attest_slot + 1))
);
// In an epoch without known duties...
let slot = (attest_epoch + 1) * spec.slots_per_epoch;
slot_clock.set_slot(slot.into());
assert_eq!(
attester.poll(),
Ok(PollOutcome::ProducerDutiesUnknown(slot))
);
}
}

View File

@ -1,44 +0,0 @@
use crate::{DutiesReader, DutiesReaderError};
use std::collections::HashMap;
use types::{Epoch, Slot};
pub struct EpochMap {
slots_per_epoch: u64,
validator_index: Option<u64>,
map: HashMap<Epoch, (Slot, u64)>,
}
impl EpochMap {
pub fn new(slots_per_epoch: u64) -> Self {
Self {
slots_per_epoch,
validator_index: None,
map: HashMap::new(),
}
}
pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) {
let epoch = slot.epoch(self.slots_per_epoch);
self.map.insert(epoch, (slot, shard));
}
pub fn set_validator_index(&mut self, index: Option<u64>) {
self.validator_index = index;
}
}
impl DutiesReader for EpochMap {
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError> {
let epoch = slot.epoch(self.slots_per_epoch);
match self.map.get(&epoch) {
Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)),
Some((attest_slot, _attest_shard)) if *attest_slot != slot => Ok(None),
_ => Err(DutiesReaderError::UnknownEpoch),
}
}
fn validator_index(&self) -> Option<u64> {
self.validator_index
}
}

View File

@ -1,31 +0,0 @@
use crate::traits::Signer;
use std::sync::RwLock;
use types::{Keypair, Signature};
/// A test-only struct used to simulate a Beacon Node.
pub struct LocalSigner {
keypair: Keypair,
should_sign: RwLock<bool>,
}
impl LocalSigner {
/// Produce a new LocalSigner with signing enabled by default.
pub fn new(keypair: Keypair) -> Self {
Self {
keypair,
should_sign: RwLock::new(true),
}
}
/// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages
/// will be signed.
pub fn enable_signing(&self, enabled: bool) {
*self.should_sign.write().unwrap() = enabled;
}
}
impl Signer for LocalSigner {
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
Some(Signature::new(message, domain, &self.keypair.sk))
}
}

View File

@ -1,7 +0,0 @@
mod epoch_map;
mod local_signer;
mod simulated_beacon_node;
pub use self::epoch_map::EpochMap;
pub use self::local_signer::LocalSigner;
pub use self::simulated_beacon_node::SimulatedBeaconNode;

View File

@ -1,44 +0,0 @@
use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome};
use std::sync::RwLock;
use types::{AttestationData, FreeAttestation, Slot};
type ProduceResult = Result<Option<AttestationData>, BeaconNodeError>;
type PublishResult = Result<PublishOutcome, BeaconNodeError>;
/// A test-only struct used to simulate a Beacon Node.
#[derive(Default)]
pub struct SimulatedBeaconNode {
pub produce_input: RwLock<Option<(Slot, u64)>>,
pub produce_result: RwLock<Option<ProduceResult>>,
pub publish_input: RwLock<Option<FreeAttestation>>,
pub publish_result: RwLock<Option<PublishResult>>,
}
impl SimulatedBeaconNode {
pub fn set_next_produce_result(&self, result: ProduceResult) {
*self.produce_result.write().unwrap() = Some(result);
}
pub fn set_next_publish_result(&self, result: PublishResult) {
*self.publish_result.write().unwrap() = Some(result);
}
}
impl BeaconNode for SimulatedBeaconNode {
fn produce_attestation_data(&self, slot: Slot, shard: u64) -> ProduceResult {
*self.produce_input.write().unwrap() = Some((slot, shard));
match *self.produce_result.read().unwrap() {
Some(ref r) => r.clone(),
None => panic!("TestBeaconNode: produce_result == None"),
}
}
fn publish_attestation(&self, free_attestation: FreeAttestation) -> PublishResult {
*self.publish_input.write().unwrap() = Some(free_attestation.clone());
match *self.publish_result.read().unwrap() {
Some(ref r) => r.clone(),
None => panic!("TestBeaconNode: publish_result == None"),
}
}
}

View File

@ -1,49 +0,0 @@
use types::{AttestationData, FreeAttestation, Signature, Slot};
#[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeError {
RemoteFailure(String),
DecodeFailure,
}
#[derive(Debug, PartialEq, Clone)]
pub enum PublishOutcome {
ValidAttestation,
InvalidAttestation(String),
}
/// Defines the methods required to produce and publish blocks on a Beacon Node.
pub trait BeaconNode: Send + Sync {
fn produce_attestation_data(
&self,
slot: Slot,
shard: u64,
) -> Result<Option<AttestationData>, BeaconNodeError>;
fn publish_attestation(
&self,
free_attestation: FreeAttestation,
) -> Result<PublishOutcome, BeaconNodeError>;
}
#[derive(Debug, PartialEq, Clone)]
pub enum DutiesReaderError {
UnknownValidator,
UnknownEpoch,
EpochLengthIsZero,
Poisoned,
}
/// Informs a validator of their duties (e.g., block production).
pub trait DutiesReader: Send + Sync {
/// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.`
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError>;
/// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.`
fn validator_index(&self) -> Option<u64>;
}
/// Signs message using an internally-maintained private key.
pub trait Signer {
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature>;
}

View File

@ -1,12 +0,0 @@
[package]
name = "block_proposer"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
int_to_bytes = { path = "../utils/int_to_bytes" }
slot_clock = { path = "../utils/slot_clock" }
ssz = { path = "../utils/ssz" }
tree_hash = { path = "../../eth2/utils/tree_hash" }
types = { path = "../types" }

View File

@ -1,303 +0,0 @@
pub mod test_utils;
mod traits;
use slot_clock::SlotClock;
use std::sync::Arc;
use tree_hash::{SignedRoot, TreeHash};
use types::{BeaconBlock, ChainSpec, Domain, Slot};
pub use self::traits::{
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
};
#[derive(Debug, PartialEq)]
pub enum PollOutcome {
/// A new block was produced.
BlockProduced(Slot),
/// A block was not produced as it would have been slashable.
SlashableBlockNotProduced(Slot),
/// The validator duties did not require a block to be produced.
BlockProductionNotRequired(Slot),
/// The duties for the present epoch were not found.
ProducerDutiesUnknown(Slot),
/// The slot has already been processed, execution was skipped.
SlotAlreadyProcessed(Slot),
/// The Beacon Node was unable to produce a block at that slot.
BeaconNodeUnableToProduceBlock(Slot),
/// The signer failed to sign the message.
SignerRejection(Slot),
/// The public key for this validator is not an active validator.
ValidatorIsUnknown(Slot),
/// Unable to determine a `Fork` for signature domain generation.
UnableToGetFork(Slot),
}
#[derive(Debug, PartialEq)]
pub enum Error {
SlotClockError,
SlotUnknowable,
EpochMapPoisoned,
SlotClockPoisoned,
EpochLengthIsZero,
BeaconNodeError(BeaconNodeError),
}
/// A polling state machine which performs block production duties, based upon some epoch duties
/// (`EpochDutiesMap`) and a concept of time (`SlotClock`).
///
/// Ensures that messages are not slashable.
///
/// Relies upon an external service to keep the `EpochDutiesMap` updated.
pub struct BlockProducer<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
pub last_processed_slot: Option<Slot>,
spec: Arc<ChainSpec>,
epoch_map: Arc<V>,
slot_clock: Arc<T>,
beacon_node: Arc<U>,
signer: Arc<W>,
}
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U, V, W> {
/// Returns a new instance where `last_processed_slot == 0`.
pub fn new(
spec: Arc<ChainSpec>,
epoch_map: Arc<V>,
slot_clock: Arc<T>,
beacon_node: Arc<U>,
signer: Arc<W>,
) -> Self {
Self {
last_processed_slot: None,
spec,
epoch_map,
slot_clock,
beacon_node,
signer,
}
}
}
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U, V, W> {
/// "Poll" to see if the validator is required to take any action.
///
/// The slot clock will be read and any new actions undertaken.
pub fn poll(&mut self) -> Result<PollOutcome, Error> {
let slot = self
.slot_clock
.present_slot()
.map_err(|_| Error::SlotClockError)?
.ok_or(Error::SlotUnknowable)?;
// If this is a new slot.
if !self.is_processed_slot(slot) {
let is_block_production_slot = match self.epoch_map.is_block_production_slot(slot) {
Ok(result) => result,
Err(DutiesReaderError::UnknownEpoch) => {
return Ok(PollOutcome::ProducerDutiesUnknown(slot));
}
Err(DutiesReaderError::UnknownValidator) => {
return Ok(PollOutcome::ValidatorIsUnknown(slot));
}
Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero),
Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned),
};
if is_block_production_slot {
self.last_processed_slot = Some(slot);
self.produce_block(slot)
} else {
Ok(PollOutcome::BlockProductionNotRequired(slot))
}
} else {
Ok(PollOutcome::SlotAlreadyProcessed(slot))
}
}
fn is_processed_slot(&self, slot: Slot) -> bool {
match self.last_processed_slot {
Some(processed_slot) if processed_slot >= slot => true,
_ => false,
}
}
/// Produce a block at some slot.
///
/// Assumes that a block is required at this slot (does not check the duties).
///
/// Ensures the message is not slashable.
///
/// !!! UNSAFE !!!
///
/// The slash-protection code is not yet implemented. There is zero protection against
/// slashing.
fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> {
let fork = match self.epoch_map.fork() {
Ok(fork) => fork,
Err(_) => return Ok(PollOutcome::UnableToGetFork(slot)),
};
let randao_reveal = {
// TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`.
let message = slot.epoch(self.spec.slots_per_epoch).tree_hash_root();
match self.signer.sign_randao_reveal(
&message,
self.spec
.get_domain(slot.epoch(self.spec.slots_per_epoch), Domain::Randao, &fork),
) {
None => return Ok(PollOutcome::SignerRejection(slot)),
Some(signature) => signature,
}
};
if let Some(block) = self
.beacon_node
.produce_beacon_block(slot, &randao_reveal)?
{
if self.safe_to_produce(&block) {
let domain = self.spec.get_domain(
slot.epoch(self.spec.slots_per_epoch),
Domain::BeaconBlock,
&fork,
);
if let Some(block) = self.sign_block(block, domain) {
self.beacon_node.publish_beacon_block(block)?;
Ok(PollOutcome::BlockProduced(slot))
} else {
Ok(PollOutcome::SignerRejection(slot))
}
} else {
Ok(PollOutcome::SlashableBlockNotProduced(slot))
}
} else {
Ok(PollOutcome::BeaconNodeUnableToProduceBlock(slot))
}
}
/// Consumes a block, returning that block signed by the validators private key.
///
/// Important: this function will not check to ensure the block is not slashable. This must be
/// done upstream.
fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option<BeaconBlock> {
self.store_produce(&block);
match self
.signer
.sign_block_proposal(&block.signed_root()[..], domain)
{
None => None,
Some(signature) => {
block.signature = signature;
Some(block)
}
}
}
/// Returns `true` if signing a block is safe (non-slashable).
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn safe_to_produce(&self, _block: &BeaconBlock) -> bool {
// TODO: ensure the producer doesn't produce slashable blocks.
// https://github.com/sigp/lighthouse/issues/160
true
}
/// Record that a block was produced so that slashable votes may not be made in the future.
///
/// !!! UNSAFE !!!
///
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
fn store_produce(&mut self, _block: &BeaconBlock) {
// TODO: record this block production to prevent future slashings.
// https://github.com/sigp/lighthouse/issues/160
}
}
impl From<BeaconNodeError> for Error {
fn from(e: BeaconNodeError) -> Error {
Error::BeaconNodeError(e)
}
}
#[cfg(test)]
mod tests {
use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode};
use super::*;
use slot_clock::TestingSlotClock;
use types::{
test_utils::{SeedableRng, TestRandom, XorShiftRng},
Keypair,
};
// TODO: implement more thorough testing.
// https://github.com/sigp/lighthouse/issues/160
//
// These tests should serve as a good example for future tests.
#[test]
pub fn polling() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let spec = Arc::new(ChainSpec::foundation());
let slot_clock = Arc::new(TestingSlotClock::new(0));
let beacon_node = Arc::new(SimulatedBeaconNode::default());
let signer = Arc::new(LocalSigner::new(Keypair::random()));
let mut epoch_map = EpochMap::new(spec.slots_per_epoch);
let produce_slot = Slot::new(100);
let produce_epoch = produce_slot.epoch(spec.slots_per_epoch);
epoch_map.map.insert(produce_epoch, produce_slot);
let epoch_map = Arc::new(epoch_map);
let mut block_proposer = BlockProducer::new(
spec.clone(),
epoch_map.clone(),
slot_clock.clone(),
beacon_node.clone(),
signer.clone(),
);
// Configure responses from the BeaconNode.
beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng))));
beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock));
// One slot before production slot...
slot_clock.set_slot(produce_slot.as_u64() - 1);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1))
);
// On the produce slot...
slot_clock.set_slot(produce_slot.as_u64());
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProduced(produce_slot.into()))
);
// Trying the same produce slot again...
slot_clock.set_slot(produce_slot.as_u64());
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::SlotAlreadyProcessed(produce_slot))
);
// One slot after the produce slot...
slot_clock.set_slot(produce_slot.as_u64() + 1);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1))
);
// In an epoch without known duties...
let slot = (produce_epoch.as_u64() + 1) * spec.slots_per_epoch;
slot_clock.set_slot(slot);
assert_eq!(
block_proposer.poll(),
Ok(PollOutcome::ProducerDutiesUnknown(Slot::new(slot)))
);
}
}

View File

@ -1,36 +0,0 @@
use crate::{DutiesReader, DutiesReaderError};
use std::collections::HashMap;
use types::{Epoch, Fork, Slot};
pub struct EpochMap {
slots_per_epoch: u64,
pub map: HashMap<Epoch, Slot>,
}
impl EpochMap {
pub fn new(slots_per_epoch: u64) -> Self {
Self {
slots_per_epoch,
map: HashMap::new(),
}
}
}
impl DutiesReader for EpochMap {
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> {
let epoch = slot.epoch(self.slots_per_epoch);
match self.map.get(&epoch) {
Some(s) if *s == slot => Ok(true),
Some(s) if *s != slot => Ok(false),
_ => Err(DutiesReaderError::UnknownEpoch),
}
}
fn fork(&self) -> Result<Fork, DutiesReaderError> {
Ok(Fork {
previous_version: [0; 4],
current_version: [0; 4],
epoch: Epoch::new(0),
})
}
}

View File

@ -1,35 +0,0 @@
use crate::traits::Signer;
use std::sync::RwLock;
use types::{Keypair, Signature};
/// A test-only struct used to simulate a Beacon Node.
pub struct LocalSigner {
keypair: Keypair,
should_sign: RwLock<bool>,
}
impl LocalSigner {
/// Produce a new LocalSigner with signing enabled by default.
pub fn new(keypair: Keypair) -> Self {
Self {
keypair,
should_sign: RwLock::new(true),
}
}
/// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages
/// will be signed.
pub fn enable_signing(&self, enabled: bool) {
*self.should_sign.write().unwrap() = enabled;
}
}
impl Signer for LocalSigner {
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
Some(Signature::new(message, domain, &self.keypair.sk))
}
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
Some(Signature::new(message, domain, &self.keypair.sk))
}
}

View File

@ -1,7 +0,0 @@
mod epoch_map;
mod local_signer;
mod simulated_beacon_node;
pub use self::epoch_map::EpochMap;
pub use self::local_signer::LocalSigner;
pub use self::simulated_beacon_node::SimulatedBeaconNode;

View File

@ -1,48 +0,0 @@
use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome};
use std::sync::RwLock;
use types::{BeaconBlock, Signature, Slot};
type ProduceResult = Result<Option<BeaconBlock>, BeaconNodeError>;
type PublishResult = Result<PublishOutcome, BeaconNodeError>;
/// A test-only struct used to simulate a Beacon Node.
#[derive(Default)]
pub struct SimulatedBeaconNode {
pub produce_input: RwLock<Option<(Slot, Signature)>>,
pub produce_result: RwLock<Option<ProduceResult>>,
pub publish_input: RwLock<Option<BeaconBlock>>,
pub publish_result: RwLock<Option<PublishResult>>,
}
impl SimulatedBeaconNode {
/// Set the result to be returned when `produce_beacon_block` is called.
pub fn set_next_produce_result(&self, result: ProduceResult) {
*self.produce_result.write().unwrap() = Some(result);
}
/// Set the result to be returned when `publish_beacon_block` is called.
pub fn set_next_publish_result(&self, result: PublishResult) {
*self.publish_result.write().unwrap() = Some(result);
}
}
impl BeaconNode for SimulatedBeaconNode {
/// Returns the value specified by the `set_next_produce_result`.
fn produce_beacon_block(&self, slot: Slot, randao_reveal: &Signature) -> ProduceResult {
*self.produce_input.write().unwrap() = Some((slot, randao_reveal.clone()));
match *self.produce_result.read().unwrap() {
Some(ref r) => r.clone(),
None => panic!("SimulatedBeaconNode: produce_result == None"),
}
}
/// Returns the value specified by the `set_next_publish_result`.
fn publish_beacon_block(&self, block: BeaconBlock) -> PublishResult {
*self.publish_input.write().unwrap() = Some(block);
match *self.publish_result.read().unwrap() {
Some(ref r) => r.clone(),
None => panic!("SimulatedBeaconNode: publish_result == None"),
}
}
}

View File

@ -1,50 +0,0 @@
use types::{BeaconBlock, Fork, Signature, Slot};
#[derive(Debug, PartialEq, Clone)]
pub enum BeaconNodeError {
RemoteFailure(String),
DecodeFailure,
}
#[derive(Debug, PartialEq, Clone)]
pub enum PublishOutcome {
ValidBlock,
InvalidBlock(String),
}
/// Defines the methods required to produce and publish blocks on a Beacon Node.
pub trait BeaconNode: Send + Sync {
/// Request that the node produces a block.
///
/// Returns Ok(None) if the Beacon Node is unable to produce at the given slot.
fn produce_beacon_block(
&self,
slot: Slot,
randao_reveal: &Signature,
) -> Result<Option<BeaconBlock>, BeaconNodeError>;
/// Request that the node publishes a block.
///
/// Returns `true` if the publish was sucessful.
fn publish_beacon_block(&self, block: BeaconBlock) -> Result<PublishOutcome, BeaconNodeError>;
}
#[derive(Debug, PartialEq, Clone)]
pub enum DutiesReaderError {
UnknownValidator,
UnknownEpoch,
EpochLengthIsZero,
Poisoned,
}
/// Informs a validator of their duties (e.g., block production).
pub trait DutiesReader: Send + Sync {
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError>;
fn fork(&self) -> Result<Fork, DutiesReaderError>;
}
/// Signs message using an internally-maintained private key.
pub trait Signer {
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature>;
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature>;
}

View File

@ -13,8 +13,6 @@ name = "validator_client"
path = "src/lib.rs"
[dependencies]
block_proposer = { path = "../eth2/block_proposer" }
attester = { path = "../eth2/attester" }
bls = { path = "../eth2/utils/bls" }
ssz = { path = "../eth2/utils/ssz" }
tree_hash = { path = "../eth2/utils/tree_hash" }