Merge branch 'fixed-vec' into sos
This commit is contained in:
commit
3ef46c03d1
36
.gitlab-ci.yml
Normal file
36
.gitlab-ci.yml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#Adapted from https://users.rust-lang.org/t/my-gitlab-config-docs-tests/16396
|
||||||
|
|
||||||
|
image: 'sigp/lighthouse:latest'
|
||||||
|
|
||||||
|
stages:
|
||||||
|
- test
|
||||||
|
- document
|
||||||
|
|
||||||
|
variables:
|
||||||
|
CARGO_HOME: /cache/cargocache
|
||||||
|
|
||||||
|
check-fmt:
|
||||||
|
stage: test
|
||||||
|
script:
|
||||||
|
- cargo build --manifest-path protos/Cargo.toml
|
||||||
|
- cargo fmt --all -- --check
|
||||||
|
|
||||||
|
test-dev:
|
||||||
|
stage: test
|
||||||
|
script:
|
||||||
|
- cargo test --verbose --all
|
||||||
|
|
||||||
|
test-release:
|
||||||
|
stage: test
|
||||||
|
script:
|
||||||
|
- cargo test --verbose --all --release
|
||||||
|
|
||||||
|
documentation:
|
||||||
|
stage: document
|
||||||
|
script:
|
||||||
|
- cargo doc --no-deps
|
||||||
|
- aws s3 sync target/doc/ s3://lighthouse-docs.sigmaprime.io/ --exclude '.lock' --delete
|
||||||
|
# Configure the below when we want to have a default page (and update S3 bucket index).
|
||||||
|
# - echo '<meta http-equiv="refresh" content="0; url={{ LIBRARY NAME }}">' > public/index.html
|
||||||
|
only:
|
||||||
|
- master
|
23
.travis.yml
23
.travis.yml
@ -2,8 +2,6 @@ language: rust
|
|||||||
cache:
|
cache:
|
||||||
directories:
|
directories:
|
||||||
- /home/travis/.cargo
|
- /home/travis/.cargo
|
||||||
before_cache:
|
|
||||||
- rm -rf /home/travis/.cargo/registry
|
|
||||||
before_install:
|
before_install:
|
||||||
- curl -OL https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-linux-x86_64.zip
|
- curl -OL https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-linux-x86_64.zip
|
||||||
- unzip protoc-3.4.0-linux-x86_64.zip -d protoc3
|
- unzip protoc-3.4.0-linux-x86_64.zip -d protoc3
|
||||||
@ -11,33 +9,14 @@ before_install:
|
|||||||
- sudo mv protoc3/include/* /usr/local/include/
|
- sudo mv protoc3/include/* /usr/local/include/
|
||||||
- sudo chown $USER /usr/local/bin/protoc
|
- sudo chown $USER /usr/local/bin/protoc
|
||||||
- sudo chown -R $USER /usr/local/include/google
|
- sudo chown -R $USER /usr/local/include/google
|
||||||
env:
|
|
||||||
- BUILD=--all
|
|
||||||
- BUILD=--release --all
|
|
||||||
- BUILD= --manifest-path eth2/state_processing/Cargo.toml --release --features fake_crypto
|
|
||||||
script:
|
script:
|
||||||
- cargo build --verbose $BUILD
|
- cargo build --verbose --all --release
|
||||||
- cargo test --verbose $BUILD
|
|
||||||
- cargo fmt --all -- --check
|
|
||||||
# No clippy until later...
|
|
||||||
#- cargo clippy
|
|
||||||
rust:
|
rust:
|
||||||
- stable
|
|
||||||
- beta
|
- beta
|
||||||
- nightly
|
- nightly
|
||||||
matrix:
|
matrix:
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- rust: nightly
|
- rust: nightly
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
exclude:
|
|
||||||
- rust: beta
|
|
||||||
env: BUILD=--release --all
|
|
||||||
- rust: beta
|
|
||||||
env: BUILD= --manifest-path eth2/state_processing/Cargo.toml --release --features fake_crypto
|
|
||||||
- rust: nightly
|
|
||||||
env: BUILD=--release --all
|
|
||||||
- rust: nightly
|
|
||||||
env: BUILD= --manifest-path eth2/state_processing/Cargo.toml --release --features fake_crypto
|
|
||||||
install:
|
install:
|
||||||
- rustup component add rustfmt
|
- rustup component add rustfmt
|
||||||
- rustup component add clippy
|
|
||||||
|
@ -1,15 +1,13 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"eth2/attester",
|
|
||||||
"eth2/block_proposer",
|
|
||||||
"eth2/fork_choice",
|
"eth2/fork_choice",
|
||||||
"eth2/operation_pool",
|
"eth2/operation_pool",
|
||||||
"eth2/state_processing",
|
"eth2/state_processing",
|
||||||
"eth2/state_processing/yaml_utils",
|
|
||||||
"eth2/types",
|
"eth2/types",
|
||||||
"eth2/utils/bls",
|
"eth2/utils/bls",
|
||||||
"eth2/utils/boolean-bitfield",
|
"eth2/utils/boolean-bitfield",
|
||||||
"eth2/utils/cached_tree_hash",
|
"eth2/utils/cached_tree_hash",
|
||||||
|
"eth2/utils/fixed_len_vec",
|
||||||
"eth2/utils/hashing",
|
"eth2/utils/hashing",
|
||||||
"eth2/utils/honey-badger-split",
|
"eth2/utils/honey-badger-split",
|
||||||
"eth2/utils/merkle_proof",
|
"eth2/utils/merkle_proof",
|
||||||
@ -31,7 +29,6 @@ members = [
|
|||||||
"beacon_node/rpc",
|
"beacon_node/rpc",
|
||||||
"beacon_node/version",
|
"beacon_node/version",
|
||||||
"beacon_node/beacon_chain",
|
"beacon_node/beacon_chain",
|
||||||
"beacon_node/beacon_chain/test_harness",
|
|
||||||
"protos",
|
"protos",
|
||||||
"validator_client",
|
"validator_client",
|
||||||
"account_manager",
|
"account_manager",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
FROM rust:latest
|
FROM rust:latest
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y clang libclang-dev cmake build-essential git unzip autoconf libtool
|
RUN apt-get update && apt-get install -y clang libclang-dev cmake build-essential git unzip autoconf libtool awscli
|
||||||
|
|
||||||
RUN git clone https://github.com/google/protobuf.git && \
|
RUN git clone https://github.com/google/protobuf.git && \
|
||||||
cd protobuf && \
|
cd protobuf && \
|
||||||
@ -14,8 +14,8 @@ RUN git clone https://github.com/google/protobuf.git && \
|
|||||||
rm -r protobuf
|
rm -r protobuf
|
||||||
|
|
||||||
|
|
||||||
RUN mkdir /cargocache && chmod -R ugo+rwX /cargocache
|
RUN mkdir -p /cache/cargocache && chmod -R ugo+rwX /cache/cargocache
|
||||||
|
|
||||||
ENV CARGO_HOME /cargocache
|
ENV CARGO_HOME /cache/cargocache
|
||||||
|
|
||||||
RUN rustup component add rustfmt clippy
|
RUN rustup component add rustfmt clippy
|
||||||
|
2
Jenkinsfile
vendored
2
Jenkinsfile
vendored
@ -2,7 +2,7 @@ pipeline {
|
|||||||
agent {
|
agent {
|
||||||
dockerfile {
|
dockerfile {
|
||||||
filename 'Dockerfile'
|
filename 'Dockerfile'
|
||||||
args '-v cargo-cache:/cargocache:rw -e "CARGO_HOME=/cargocache"'
|
args '-v cargo-cache:/cache/cargocache:rw -e "CARGO_HOME=/cache/cargocache"'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stages {
|
stages {
|
||||||
|
@ -24,6 +24,7 @@ present-Ethereum functionality.
|
|||||||
- [About Lighthouse](docs/lighthouse.md): Goals, Ideology and Ethos surrounding
|
- [About Lighthouse](docs/lighthouse.md): Goals, Ideology and Ethos surrounding
|
||||||
this implementation.
|
this implementation.
|
||||||
- [What is Ethereum Serenity](docs/serenity.md): an introduction to Ethereum Serenity.
|
- [What is Ethereum Serenity](docs/serenity.md): an introduction to Ethereum Serenity.
|
||||||
|
- [Lighthouse Technical Documentation](http://lighthouse-docs.sigmaprime.io/): The Rust generated documentation, updated regularly.
|
||||||
|
|
||||||
If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
If you'd like some background on Sigma Prime, please see the [Lighthouse Update
|
||||||
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the
|
\#00](https://lighthouse.sigmaprime.io/update-00.html) blog post or the
|
||||||
|
@ -11,3 +11,4 @@ slog = "^2.2.3"
|
|||||||
slog-term = "^2.4.0"
|
slog-term = "^2.4.0"
|
||||||
slog-async = "^2.3.0"
|
slog-async = "^2.3.0"
|
||||||
validator_client = { path = "../validator_client" }
|
validator_client = { path = "../validator_client" }
|
||||||
|
types = { path = "../eth2/types" }
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Lighthouse Accounts Manager
|
# Lighthouse Account Manager
|
||||||
|
|
||||||
The accounts manager (AM) is a stand-alone binary which allows
|
The account manager (AM) is a stand-alone binary which allows
|
||||||
users to generate and manage the cryptographic keys necessary to
|
users to generate and manage the cryptographic keys necessary to
|
||||||
interact with Ethereum Serenity.
|
interact with Ethereum Serenity.
|
||||||
|
|
||||||
@ -21,4 +21,14 @@ staking on Ethereum 1.x (TPD)
|
|||||||
The AM is not a service, and does not run continuously, nor does it
|
The AM is not a service, and does not run continuously, nor does it
|
||||||
interact with any running services.
|
interact with any running services.
|
||||||
It is intended to be executed separately from other Lighthouse binaries
|
It is intended to be executed separately from other Lighthouse binaries
|
||||||
and produce files which can be consumed by them.
|
and produce files which can be consumed by them.&
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Simply run `./account_manager generate` to generate a new random private key,
|
||||||
|
which will be automatically saved to the correct directory.
|
||||||
|
|
||||||
|
If you prefer to use our "deterministic" keys for testing purposes, simply
|
||||||
|
run `./accounts_manager generate_deterministic -i <index>`, where `index` is
|
||||||
|
the validator index for the key. This will reliably produce the same key each time
|
||||||
|
and save it to the directory.
|
@ -2,6 +2,7 @@ use bls::Keypair;
|
|||||||
use clap::{App, Arg, SubCommand};
|
use clap::{App, Arg, SubCommand};
|
||||||
use slog::{debug, info, o, Drain};
|
use slog::{debug, info, o, Drain};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use types::test_utils::generate_deterministic_keypair;
|
||||||
use validator_client::Config as ValidatorClientConfig;
|
use validator_client::Config as ValidatorClientConfig;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
@ -29,6 +30,21 @@ fn main() {
|
|||||||
.version("0.0.1")
|
.version("0.0.1")
|
||||||
.author("Sigma Prime <contact@sigmaprime.io>"),
|
.author("Sigma Prime <contact@sigmaprime.io>"),
|
||||||
)
|
)
|
||||||
|
.subcommand(
|
||||||
|
SubCommand::with_name("generate_deterministic")
|
||||||
|
.about("Generates a deterministic validator private key FOR TESTING")
|
||||||
|
.version("0.0.1")
|
||||||
|
.author("Sigma Prime <contact@sigmaprime.io>")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("validator index")
|
||||||
|
.long("index")
|
||||||
|
.short("i")
|
||||||
|
.value_name("index")
|
||||||
|
.help("The index of the validator, for which the test key is generated")
|
||||||
|
.takes_value(true)
|
||||||
|
.required(true),
|
||||||
|
),
|
||||||
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
let config = ValidatorClientConfig::parse_args(&matches, &log)
|
let config = ValidatorClientConfig::parse_args(&matches, &log)
|
||||||
@ -51,6 +67,23 @@ fn main() {
|
|||||||
key_path.to_string_lossy()
|
key_path.to_string_lossy()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
("generate_deterministic", Some(gen_d_matches)) => {
|
||||||
|
let validator_index = gen_d_matches
|
||||||
|
.value_of("validator index")
|
||||||
|
.expect("Validator index required.")
|
||||||
|
.parse::<u64>()
|
||||||
|
.expect("Invalid validator index.") as usize;
|
||||||
|
let keypair = generate_deterministic_keypair(validator_index);
|
||||||
|
let key_path: PathBuf = config
|
||||||
|
.save_key(&keypair)
|
||||||
|
.expect("Unable to save newly generated deterministic private key.");
|
||||||
|
debug!(
|
||||||
|
log,
|
||||||
|
"Deterministic Keypair generated {:?}, saved to: {:?}",
|
||||||
|
keypair.identifier(),
|
||||||
|
key_path.to_string_lossy()
|
||||||
|
);
|
||||||
|
}
|
||||||
_ => panic!(
|
_ => panic!(
|
||||||
"The account manager must be run with a subcommand. See help for more information."
|
"The account manager must be run with a subcommand. See help for more information."
|
||||||
),
|
),
|
||||||
|
@ -5,7 +5,6 @@ authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
block_proposer = { path = "../../eth2/block_proposer" }
|
|
||||||
bls = { path = "../../eth2/utils/bls" }
|
bls = { path = "../../eth2/utils/bls" }
|
||||||
boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" }
|
boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" }
|
||||||
db = { path = "../db" }
|
db = { path = "../db" }
|
||||||
|
@ -83,30 +83,31 @@ impl BlockProcessingOutcome {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice> {
|
pub struct BeaconChain<T: ClientDB + Sized, U: SlotClock, F: ForkChoice, B: EthSpec> {
|
||||||
pub block_store: Arc<BeaconBlockStore<T>>,
|
pub block_store: Arc<BeaconBlockStore<T>>,
|
||||||
pub state_store: Arc<BeaconStateStore<T>>,
|
pub state_store: Arc<BeaconStateStore<T>>,
|
||||||
pub slot_clock: U,
|
pub slot_clock: U,
|
||||||
pub op_pool: OperationPool,
|
pub op_pool: OperationPool<B>,
|
||||||
canonical_head: RwLock<CheckPoint>,
|
canonical_head: RwLock<CheckPoint<B>>,
|
||||||
finalized_head: RwLock<CheckPoint>,
|
finalized_head: RwLock<CheckPoint<B>>,
|
||||||
pub state: RwLock<BeaconState>,
|
pub state: RwLock<BeaconState<B>>,
|
||||||
pub spec: ChainSpec,
|
pub spec: ChainSpec,
|
||||||
pub fork_choice: RwLock<F>,
|
pub fork_choice: RwLock<F>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, U, F> BeaconChain<T, U, F>
|
impl<T, U, F, B> BeaconChain<T, U, F, B>
|
||||||
where
|
where
|
||||||
T: ClientDB,
|
T: ClientDB,
|
||||||
U: SlotClock,
|
U: SlotClock,
|
||||||
F: ForkChoice,
|
F: ForkChoice,
|
||||||
|
B: EthSpec,
|
||||||
{
|
{
|
||||||
/// Instantiate a new Beacon Chain, from genesis.
|
/// Instantiate a new Beacon Chain, from genesis.
|
||||||
pub fn from_genesis(
|
pub fn from_genesis(
|
||||||
state_store: Arc<BeaconStateStore<T>>,
|
state_store: Arc<BeaconStateStore<T>>,
|
||||||
block_store: Arc<BeaconBlockStore<T>>,
|
block_store: Arc<BeaconBlockStore<T>>,
|
||||||
slot_clock: U,
|
slot_clock: U,
|
||||||
mut genesis_state: BeaconState,
|
mut genesis_state: BeaconState<B>,
|
||||||
genesis_block: BeaconBlock,
|
genesis_block: BeaconBlock,
|
||||||
spec: ChainSpec,
|
spec: ChainSpec,
|
||||||
fork_choice: F,
|
fork_choice: F,
|
||||||
@ -190,7 +191,6 @@ where
|
|||||||
count: usize,
|
count: usize,
|
||||||
skip: usize,
|
skip: usize,
|
||||||
) -> Result<Vec<Hash256>, Error> {
|
) -> Result<Vec<Hash256>, Error> {
|
||||||
let spec = &self.spec;
|
|
||||||
let step_by = Slot::from(skip + 1);
|
let step_by = Slot::from(skip + 1);
|
||||||
|
|
||||||
let mut roots: Vec<Hash256> = vec![];
|
let mut roots: Vec<Hash256> = vec![];
|
||||||
@ -218,7 +218,7 @@ where
|
|||||||
//
|
//
|
||||||
// If we get `SlotOutOfBounds` error, load the oldest available historic
|
// If we get `SlotOutOfBounds` error, load the oldest available historic
|
||||||
// state from the DB.
|
// state from the DB.
|
||||||
match state.get_block_root(slot, spec) {
|
match state.get_block_root(slot) {
|
||||||
Ok(root) => {
|
Ok(root) => {
|
||||||
if slot < earliest_slot {
|
if slot < earliest_slot {
|
||||||
break;
|
break;
|
||||||
@ -230,9 +230,9 @@ where
|
|||||||
Err(BeaconStateError::SlotOutOfBounds) => {
|
Err(BeaconStateError::SlotOutOfBounds) => {
|
||||||
// Read the earliest historic state in the current slot.
|
// Read the earliest historic state in the current slot.
|
||||||
let earliest_historic_slot =
|
let earliest_historic_slot =
|
||||||
state.slot - Slot::from(spec.slots_per_historical_root);
|
state.slot - Slot::from(B::SlotsPerHistoricalRoot::to_usize());
|
||||||
// Load the earlier state from disk.
|
// Load the earlier state from disk.
|
||||||
let new_state_root = state.get_state_root(earliest_historic_slot, spec)?;
|
let new_state_root = state.get_state_root(earliest_historic_slot)?;
|
||||||
|
|
||||||
// Break if the DB is unable to load the state.
|
// Break if the DB is unable to load the state.
|
||||||
state = match self.state_store.get_deserialized(&new_state_root) {
|
state = match self.state_store.get_deserialized(&new_state_root) {
|
||||||
@ -270,7 +270,7 @@ where
|
|||||||
&self,
|
&self,
|
||||||
new_beacon_block: BeaconBlock,
|
new_beacon_block: BeaconBlock,
|
||||||
new_beacon_block_root: Hash256,
|
new_beacon_block_root: Hash256,
|
||||||
new_beacon_state: BeaconState,
|
new_beacon_state: BeaconState<B>,
|
||||||
new_beacon_state_root: Hash256,
|
new_beacon_state_root: Hash256,
|
||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
@ -292,7 +292,7 @@ where
|
|||||||
/// It is important to note that the `beacon_state` returned may not match the present slot. It
|
/// It is important to note that the `beacon_state` returned may not match the present slot. It
|
||||||
/// is the state as it was when the head block was received, which could be some slots prior to
|
/// is the state as it was when the head block was received, which could be some slots prior to
|
||||||
/// now.
|
/// now.
|
||||||
pub fn head(&self) -> RwLockReadGuard<CheckPoint> {
|
pub fn head(&self) -> RwLockReadGuard<CheckPoint<B>> {
|
||||||
self.canonical_head.read()
|
self.canonical_head.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -302,7 +302,7 @@ where
|
|||||||
/// state and calling `catchup_state` as it will not result in an old state being installed and
|
/// state and calling `catchup_state` as it will not result in an old state being installed and
|
||||||
/// then having it iteratively updated -- in such a case it's possible for another thread to
|
/// then having it iteratively updated -- in such a case it's possible for another thread to
|
||||||
/// find the state at an old slot.
|
/// find the state at an old slot.
|
||||||
pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> {
|
pub fn update_state(&self, mut state: BeaconState<B>) -> Result<(), Error> {
|
||||||
let present_slot = match self.slot_clock.present_slot() {
|
let present_slot = match self.slot_clock.present_slot() {
|
||||||
Ok(Some(slot)) => slot,
|
Ok(Some(slot)) => slot,
|
||||||
_ => return Err(Error::UnableToReadSlot),
|
_ => return Err(Error::UnableToReadSlot),
|
||||||
@ -357,7 +357,7 @@ where
|
|||||||
&self,
|
&self,
|
||||||
new_beacon_block: BeaconBlock,
|
new_beacon_block: BeaconBlock,
|
||||||
new_beacon_block_root: Hash256,
|
new_beacon_block_root: Hash256,
|
||||||
new_beacon_state: BeaconState,
|
new_beacon_state: BeaconState<B>,
|
||||||
new_beacon_state_root: Hash256,
|
new_beacon_state_root: Hash256,
|
||||||
) {
|
) {
|
||||||
let mut finalized_head = self.finalized_head.write();
|
let mut finalized_head = self.finalized_head.write();
|
||||||
@ -371,7 +371,7 @@ where
|
|||||||
|
|
||||||
/// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen,
|
/// Returns a read-lock guarded `CheckPoint` struct for reading the justified head (as chosen,
|
||||||
/// indirectly, by the fork-choice rule).
|
/// indirectly, by the fork-choice rule).
|
||||||
pub fn finalized_head(&self) -> RwLockReadGuard<CheckPoint> {
|
pub fn finalized_head(&self) -> RwLockReadGuard<CheckPoint<B>> {
|
||||||
self.finalized_head.read()
|
self.finalized_head.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -493,17 +493,14 @@ where
|
|||||||
} else {
|
} else {
|
||||||
// If the current head block is not from this slot, use the slot from the previous
|
// If the current head block is not from this slot, use the slot from the previous
|
||||||
// epoch.
|
// epoch.
|
||||||
*self.state.read().get_block_root(
|
|
||||||
current_epoch_start_slot - self.spec.slots_per_epoch,
|
|
||||||
&self.spec,
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If we're not on the first slot of the epoch.
|
|
||||||
*self
|
*self
|
||||||
.state
|
.state
|
||||||
.read()
|
.read()
|
||||||
.get_block_root(current_epoch_start_slot, &self.spec)?
|
.get_block_root(current_epoch_start_slot - self.spec.slots_per_epoch)?
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If we're not on the first slot of the epoch.
|
||||||
|
*self.state.read().get_block_root(current_epoch_start_slot)?
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(AttestationData {
|
Ok(AttestationData {
|
||||||
@ -667,7 +664,7 @@ where
|
|||||||
pub fn produce_block(
|
pub fn produce_block(
|
||||||
&self,
|
&self,
|
||||||
randao_reveal: Signature,
|
randao_reveal: Signature,
|
||||||
) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
|
) -> Result<(BeaconBlock, BeaconState<B>), BlockProductionError> {
|
||||||
debug!("Producing block at slot {}...", self.state.read().slot);
|
debug!("Producing block at slot {}...", self.state.read().slot);
|
||||||
|
|
||||||
let mut state = self.state.read().clone();
|
let mut state = self.state.read().clone();
|
||||||
@ -677,7 +674,7 @@ where
|
|||||||
trace!("Finding attestations for new block...");
|
trace!("Finding attestations for new block...");
|
||||||
|
|
||||||
let previous_block_root = *state
|
let previous_block_root = *state
|
||||||
.get_block_root(state.slot - 1, &self.spec)
|
.get_block_root(state.slot - 1)
|
||||||
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?;
|
.map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?;
|
||||||
|
|
||||||
let (proposer_slashings, attester_slashings) =
|
let (proposer_slashings, attester_slashings) =
|
||||||
@ -762,7 +759,7 @@ where
|
|||||||
///
|
///
|
||||||
/// This could be a very expensive operation and should only be done in testing/analysis
|
/// This could be a very expensive operation and should only be done in testing/analysis
|
||||||
/// activities.
|
/// activities.
|
||||||
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, Error> {
|
pub fn chain_dump(&self) -> Result<Vec<CheckPoint<B>>, Error> {
|
||||||
let mut dump = vec![];
|
let mut dump = vec![];
|
||||||
|
|
||||||
let mut last_slot = CheckPoint {
|
let mut last_slot = CheckPoint {
|
||||||
|
@ -1,22 +1,22 @@
|
|||||||
use serde_derive::Serialize;
|
use serde_derive::Serialize;
|
||||||
use types::{BeaconBlock, BeaconState, Hash256};
|
use types::{BeaconBlock, BeaconState, EthSpec, Hash256};
|
||||||
|
|
||||||
/// Represents some block and it's associated state. Generally, this will be used for tracking the
|
/// Represents some block and it's associated state. Generally, this will be used for tracking the
|
||||||
/// head, justified head and finalized head.
|
/// head, justified head and finalized head.
|
||||||
#[derive(Clone, Serialize, PartialEq, Debug)]
|
#[derive(Clone, Serialize, PartialEq, Debug)]
|
||||||
pub struct CheckPoint {
|
pub struct CheckPoint<B: EthSpec> {
|
||||||
pub beacon_block: BeaconBlock,
|
pub beacon_block: BeaconBlock,
|
||||||
pub beacon_block_root: Hash256,
|
pub beacon_block_root: Hash256,
|
||||||
pub beacon_state: BeaconState,
|
pub beacon_state: BeaconState<B>,
|
||||||
pub beacon_state_root: Hash256,
|
pub beacon_state_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CheckPoint {
|
impl<B: EthSpec> CheckPoint<B> {
|
||||||
/// Create a new checkpoint.
|
/// Create a new checkpoint.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
beacon_block: BeaconBlock,
|
beacon_block: BeaconBlock,
|
||||||
beacon_block_root: Hash256,
|
beacon_block_root: Hash256,
|
||||||
beacon_state: BeaconState,
|
beacon_state: BeaconState<B>,
|
||||||
beacon_state_root: Hash256,
|
beacon_state_root: Hash256,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -32,7 +32,7 @@ impl CheckPoint {
|
|||||||
&mut self,
|
&mut self,
|
||||||
beacon_block: BeaconBlock,
|
beacon_block: BeaconBlock,
|
||||||
beacon_block_root: Hash256,
|
beacon_block_root: Hash256,
|
||||||
beacon_state: BeaconState,
|
beacon_state: BeaconState<B>,
|
||||||
beacon_state_root: Hash256,
|
beacon_state_root: Hash256,
|
||||||
) {
|
) {
|
||||||
self.beacon_block = beacon_block;
|
self.beacon_block = beacon_block;
|
||||||
|
@ -11,14 +11,21 @@ use std::path::PathBuf;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::test_utils::TestingBeaconStateBuilder;
|
use types::test_utils::TestingBeaconStateBuilder;
|
||||||
use types::{BeaconBlock, ChainSpec, Hash256};
|
use types::{BeaconBlock, ChainSpec, FewValidatorsEthSpec, FoundationEthSpec, Hash256};
|
||||||
|
|
||||||
//TODO: Correct this for prod
|
//TODO: Correct this for prod
|
||||||
//TODO: Account for historical db
|
//TODO: Account for historical db
|
||||||
pub fn initialise_beacon_chain(
|
pub fn initialise_beacon_chain(
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
db_name: Option<&PathBuf>,
|
db_name: Option<&PathBuf>,
|
||||||
) -> Arc<BeaconChain<DiskDB, SystemTimeSlotClock, BitwiseLMDGhost<DiskDB>>> {
|
) -> Arc<
|
||||||
|
BeaconChain<
|
||||||
|
DiskDB,
|
||||||
|
SystemTimeSlotClock,
|
||||||
|
BitwiseLMDGhost<DiskDB, FoundationEthSpec>,
|
||||||
|
FoundationEthSpec,
|
||||||
|
>,
|
||||||
|
> {
|
||||||
// set up the db
|
// set up the db
|
||||||
let db = Arc::new(DiskDB::open(
|
let db = Arc::new(DiskDB::open(
|
||||||
db_name.expect("Database directory must be included"),
|
db_name.expect("Database directory must be included"),
|
||||||
@ -64,7 +71,14 @@ pub fn initialise_beacon_chain(
|
|||||||
pub fn initialise_test_beacon_chain(
|
pub fn initialise_test_beacon_chain(
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
_db_name: Option<&PathBuf>,
|
_db_name: Option<&PathBuf>,
|
||||||
) -> Arc<BeaconChain<MemoryDB, SystemTimeSlotClock, BitwiseLMDGhost<MemoryDB>>> {
|
) -> Arc<
|
||||||
|
BeaconChain<
|
||||||
|
MemoryDB,
|
||||||
|
SystemTimeSlotClock,
|
||||||
|
BitwiseLMDGhost<MemoryDB, FewValidatorsEthSpec>,
|
||||||
|
FewValidatorsEthSpec,
|
||||||
|
>,
|
||||||
|
> {
|
||||||
let db = Arc::new(MemoryDB::open());
|
let db = Arc::new(MemoryDB::open());
|
||||||
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
|
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
|
||||||
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
|
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
|
||||||
|
@ -7,17 +7,18 @@ use fork_choice::BitwiseLMDGhost;
|
|||||||
use slot_clock::TestingSlotClock;
|
use slot_clock::TestingSlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::test_utils::TestingBeaconStateBuilder;
|
|
||||||
use types::*;
|
use types::*;
|
||||||
|
use types::{test_utils::TestingBeaconStateBuilder, EthSpec, FewValidatorsEthSpec};
|
||||||
|
|
||||||
type TestingBeaconChain = BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>;
|
type TestingBeaconChain<B> =
|
||||||
|
BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB, FewValidatorsEthSpec>, B>;
|
||||||
|
|
||||||
pub struct TestingBeaconChainBuilder {
|
pub struct TestingBeaconChainBuilder<B: EthSpec> {
|
||||||
state_builder: TestingBeaconStateBuilder,
|
state_builder: TestingBeaconStateBuilder<B>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TestingBeaconChainBuilder {
|
impl<B: EthSpec> TestingBeaconChainBuilder<B> {
|
||||||
pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain {
|
pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain<B> {
|
||||||
let db = Arc::new(MemoryDB::open());
|
let db = Arc::new(MemoryDB::open());
|
||||||
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
|
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
|
||||||
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
|
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
|
||||||
@ -43,8 +44,8 @@ impl TestingBeaconChainBuilder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<TestingBeaconStateBuilder> for TestingBeaconChainBuilder {
|
impl<B: EthSpec> From<TestingBeaconStateBuilder<B>> for TestingBeaconChainBuilder<B> {
|
||||||
fn from(state_builder: TestingBeaconStateBuilder) -> TestingBeaconChainBuilder {
|
fn from(state_builder: TestingBeaconStateBuilder<B>) -> TestingBeaconChainBuilder<B> {
|
||||||
TestingBeaconChainBuilder { state_builder }
|
TestingBeaconChainBuilder { state_builder }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,43 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "test_harness"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "test_harness"
|
|
||||||
path = "src/bin.rs"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
name = "test_harness"
|
|
||||||
path = "src/lib.rs"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
state_processing = { path = "../../../eth2/state_processing" }
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
attester = { path = "../../../eth2/attester" }
|
|
||||||
beacon_chain = { path = "../../beacon_chain" }
|
|
||||||
block_proposer = { path = "../../../eth2/block_proposer" }
|
|
||||||
bls = { path = "../../../eth2/utils/bls" }
|
|
||||||
boolean-bitfield = { path = "../../../eth2/utils/boolean-bitfield" }
|
|
||||||
clap = "2.32.0"
|
|
||||||
db = { path = "../../db" }
|
|
||||||
parking_lot = "0.7"
|
|
||||||
failure = "0.1"
|
|
||||||
failure_derive = "0.1"
|
|
||||||
fork_choice = { path = "../../../eth2/fork_choice" }
|
|
||||||
hashing = { path = "../../../eth2/utils/hashing" }
|
|
||||||
int_to_bytes = { path = "../../../eth2/utils/int_to_bytes" }
|
|
||||||
log = "0.4"
|
|
||||||
env_logger = "0.6.0"
|
|
||||||
rayon = "1.0"
|
|
||||||
serde = "1.0"
|
|
||||||
serde_derive = "1.0"
|
|
||||||
serde_json = "1.0"
|
|
||||||
serde_yaml = "0.8"
|
|
||||||
slot_clock = { path = "../../../eth2/utils/slot_clock" }
|
|
||||||
ssz = { path = "../../../eth2/utils/ssz" }
|
|
||||||
tree_hash = { path = "../../../eth2/utils/tree_hash" }
|
|
||||||
types = { path = "../../../eth2/types" }
|
|
||||||
yaml-rust = "0.4.2"
|
|
@ -1,150 +0,0 @@
|
|||||||
# Test Harness
|
|
||||||
|
|
||||||
Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
|
|
||||||
|
|
||||||
This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
|
|
||||||
directly to the `BeaconChain` via an `Arc`.
|
|
||||||
|
|
||||||
The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
|
|
||||||
instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
|
|
||||||
producing blocks and attestations.
|
|
||||||
|
|
||||||
The crate consists of a library and binary, examples for using both are
|
|
||||||
described below.
|
|
||||||
|
|
||||||
## YAML
|
|
||||||
|
|
||||||
Both the library and the binary are capable of parsing tests from a YAML file,
|
|
||||||
in fact this is the sole purpose of the binary.
|
|
||||||
|
|
||||||
You can find YAML test cases [here](specs/). An example is included below:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
title: Validator Registry Tests
|
|
||||||
summary: Tests deposit and slashing effects on validator registry.
|
|
||||||
test_suite: validator_registry
|
|
||||||
fork: tchaikovsky
|
|
||||||
version: 1.0
|
|
||||||
test_cases:
|
|
||||||
- config:
|
|
||||||
slots_per_epoch: 64
|
|
||||||
deposits_for_chain_start: 1000
|
|
||||||
num_slots: 64
|
|
||||||
skip_slots: [2, 3]
|
|
||||||
deposits:
|
|
||||||
# At slot 1, create a new validator deposit of 32 ETH.
|
|
||||||
- slot: 1
|
|
||||||
amount: 32
|
|
||||||
# Trigger more deposits...
|
|
||||||
- slot: 3
|
|
||||||
amount: 32
|
|
||||||
- slot: 5
|
|
||||||
amount: 32
|
|
||||||
proposer_slashings:
|
|
||||||
# At slot 2, trigger a proposer slashing for validator #42.
|
|
||||||
- slot: 2
|
|
||||||
validator_index: 42
|
|
||||||
# Trigger another slashing...
|
|
||||||
- slot: 8
|
|
||||||
validator_index: 13
|
|
||||||
attester_slashings:
|
|
||||||
# At slot 2, trigger an attester slashing for validators #11 and #12.
|
|
||||||
- slot: 2
|
|
||||||
validator_indices: [11, 12]
|
|
||||||
# Trigger another slashing...
|
|
||||||
- slot: 5
|
|
||||||
validator_indices: [14]
|
|
||||||
results:
|
|
||||||
num_skipped_slots: 2
|
|
||||||
states:
|
|
||||||
- slot: 63
|
|
||||||
num_validators: 1003
|
|
||||||
slashed_validators: [11, 12, 13, 14, 42]
|
|
||||||
exited_validators: []
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Thanks to [prsym](http://github.com/prysmaticlabs/prysm) for coming up with the
|
|
||||||
base YAML format.
|
|
||||||
|
|
||||||
### Notes
|
|
||||||
|
|
||||||
Wherever `slot` is used, it is actually the "slot height", or slots since
|
|
||||||
genesis. This allows the tests to disregard the `GENESIS_EPOCH`.
|
|
||||||
|
|
||||||
### Differences from Prysmatic's format
|
|
||||||
|
|
||||||
1. The detail for `deposits`, `proposer_slashings` and `attester_slashings` is
|
|
||||||
ommitted from the test specification. It assumed they should be valid
|
|
||||||
objects.
|
|
||||||
2. There is a `states` list in `results` that runs checks against any state
|
|
||||||
specified by a `slot` number. This is in contrast to the variables in
|
|
||||||
`results` that assume the last (highest) state should be inspected.
|
|
||||||
|
|
||||||
#### Reasoning
|
|
||||||
|
|
||||||
Respective reasonings for above changes:
|
|
||||||
|
|
||||||
1. This removes the concerns of the actual object structure from the tests.
|
|
||||||
This allows for more variation in the deposits/slashings objects without
|
|
||||||
needing to update the tests. Also, it makes it makes it easier to create
|
|
||||||
tests.
|
|
||||||
2. This gives more fine-grained control over the tests. It allows for checking
|
|
||||||
that certain events happened at certain times whilst making the tests only
|
|
||||||
slightly more verbose.
|
|
||||||
|
|
||||||
_Notes: it may be useful to add an extra field to each slashing type to
|
|
||||||
indicate if it should be valid or not. It also may be useful to add an option
|
|
||||||
for double-vote/surround-vote attester slashings. The `amount` field was left
|
|
||||||
on `deposits` as it changes the behaviour of state significantly._
|
|
||||||
|
|
||||||
## Binary Usage Example
|
|
||||||
|
|
||||||
Follow these steps to run as a binary:
|
|
||||||
|
|
||||||
1. Navigate to the root of this crate (where this readme is located)
|
|
||||||
2. Run `$ cargo run --release -- --yaml examples/validator_registry.yaml`
|
|
||||||
|
|
||||||
_Note: the `--release` flag builds the binary without all the debugging
|
|
||||||
instrumentation. The test is much faster built using `--release`. As is
|
|
||||||
customary in cargo, the flags before `--` are passed to cargo and the flags
|
|
||||||
after are passed to the binary._
|
|
||||||
|
|
||||||
### CLI Options
|
|
||||||
|
|
||||||
```
|
|
||||||
Lighthouse Test Harness Runner 0.0.1
|
|
||||||
Sigma Prime <contact@sigmaprime.io>
|
|
||||||
Runs `test_harness` using a YAML test_case.
|
|
||||||
|
|
||||||
USAGE:
|
|
||||||
test_harness --log-level <LOG_LEVEL> --yaml <FILE>
|
|
||||||
|
|
||||||
FLAGS:
|
|
||||||
-h, --help Prints help information
|
|
||||||
-V, --version Prints version information
|
|
||||||
|
|
||||||
OPTIONS:
|
|
||||||
--log-level <LOG_LEVEL> Logging level. [default: debug] [possible values: error, warn, info, debug, trace]
|
|
||||||
--yaml <FILE> YAML file test_case.
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## Library Usage Example
|
|
||||||
|
|
||||||
```rust
|
|
||||||
use test_harness::BeaconChainHarness;
|
|
||||||
use types::ChainSpec;
|
|
||||||
|
|
||||||
let validator_count = 8;
|
|
||||||
let spec = ChainSpec::few_validators();
|
|
||||||
|
|
||||||
let mut harness = BeaconChainHarness::new(spec, validator_count);
|
|
||||||
|
|
||||||
harness.advance_chain_with_block();
|
|
||||||
|
|
||||||
let chain = harness.chain_dump().unwrap();
|
|
||||||
|
|
||||||
// One block should have been built on top of the genesis block.
|
|
||||||
assert_eq!(chain.len(), 2);
|
|
||||||
```
|
|
@ -1,63 +0,0 @@
|
|||||||
title: Validator Registry Tests
|
|
||||||
summary: Tests deposit and slashing effects on validator registry.
|
|
||||||
test_suite: validator_registry
|
|
||||||
fork: tchaikovsky
|
|
||||||
version: 1.0
|
|
||||||
test_cases:
|
|
||||||
- config:
|
|
||||||
slots_per_epoch: 64
|
|
||||||
deposits_for_chain_start: 1000
|
|
||||||
num_slots: 64
|
|
||||||
skip_slots: [2, 3]
|
|
||||||
persistent_committee_period: 0
|
|
||||||
deposits:
|
|
||||||
# At slot 1, create a new validator deposit of 5 ETH.
|
|
||||||
- slot: 1
|
|
||||||
amount: 5000000000
|
|
||||||
# Trigger more deposits...
|
|
||||||
- slot: 3
|
|
||||||
amount: 5000000000
|
|
||||||
- slot: 5
|
|
||||||
amount: 32000000000
|
|
||||||
exits:
|
|
||||||
# At slot 10, submit an exit for validator #50.
|
|
||||||
- slot: 10
|
|
||||||
validator_index: 50
|
|
||||||
transfers:
|
|
||||||
- slot: 6
|
|
||||||
from: 1000
|
|
||||||
to: 1001
|
|
||||||
amount: 5000000000
|
|
||||||
proposer_slashings:
|
|
||||||
# At slot 2, trigger a proposer slashing for validator #42.
|
|
||||||
- slot: 2
|
|
||||||
validator_index: 42
|
|
||||||
# Trigger another slashing...
|
|
||||||
- slot: 8
|
|
||||||
validator_index: 13
|
|
||||||
attester_slashings:
|
|
||||||
# At slot 2, trigger an attester slashing for validators #11 and #12.
|
|
||||||
- slot: 2
|
|
||||||
validator_indices: [11, 12]
|
|
||||||
# Trigger another slashing...
|
|
||||||
- slot: 5
|
|
||||||
validator_indices: [14]
|
|
||||||
results:
|
|
||||||
num_skipped_slots: 2
|
|
||||||
states:
|
|
||||||
- slot: 63
|
|
||||||
num_validators: 1003
|
|
||||||
num_previous_epoch_attestations: 0
|
|
||||||
# slots_per_epoch - attestation_inclusion_delay - skip_slots
|
|
||||||
num_current_epoch_attestations: 57
|
|
||||||
slashed_validators: [11, 12, 13, 14, 42]
|
|
||||||
exited_validators: []
|
|
||||||
exit_initiated_validators: [50]
|
|
||||||
balances:
|
|
||||||
- validator_index: 1000
|
|
||||||
comparison: "eq"
|
|
||||||
balance: 0
|
|
||||||
- validator_index: 1001
|
|
||||||
comparison: "eq"
|
|
||||||
balance: 10000000000
|
|
||||||
|
|
@ -1,350 +0,0 @@
|
|||||||
use super::ValidatorHarness;
|
|
||||||
use beacon_chain::{BeaconChain, BlockProcessingOutcome};
|
|
||||||
pub use beacon_chain::{BeaconChainError, CheckPoint};
|
|
||||||
use db::{
|
|
||||||
stores::{BeaconBlockStore, BeaconStateStore},
|
|
||||||
MemoryDB,
|
|
||||||
};
|
|
||||||
use fork_choice::BitwiseLMDGhost;
|
|
||||||
use log::debug;
|
|
||||||
use rayon::prelude::*;
|
|
||||||
use slot_clock::TestingSlotClock;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tree_hash::TreeHash;
|
|
||||||
use types::{test_utils::TestingBeaconStateBuilder, *};
|
|
||||||
|
|
||||||
type TestingBeaconChain = BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>;
|
|
||||||
|
|
||||||
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
|
|
||||||
/// to it. Each validator is provided a borrow to the beacon chain, where it may read
|
|
||||||
/// information and submit blocks/attestations for processing.
|
|
||||||
///
|
|
||||||
/// This test harness is useful for testing validator and internal state transition logic. It
|
|
||||||
/// is not useful for testing that multiple beacon nodes can reach consensus.
|
|
||||||
pub struct BeaconChainHarness {
|
|
||||||
pub db: Arc<MemoryDB>,
|
|
||||||
pub beacon_chain: Arc<TestingBeaconChain>,
|
|
||||||
pub block_store: Arc<BeaconBlockStore<MemoryDB>>,
|
|
||||||
pub state_store: Arc<BeaconStateStore<MemoryDB>>,
|
|
||||||
pub validators: Vec<ValidatorHarness>,
|
|
||||||
pub spec: Arc<ChainSpec>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BeaconChainHarness {
|
|
||||||
/// Create a new harness with:
|
|
||||||
///
|
|
||||||
/// - A keypair, `BlockProducer` and `Attester` for each validator.
|
|
||||||
/// - A new BeaconChain struct where the given validators are in the genesis.
|
|
||||||
pub fn new(spec: ChainSpec, validator_count: usize) -> Self {
|
|
||||||
let state_builder =
|
|
||||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
|
|
||||||
Self::from_beacon_state_builder(state_builder, spec)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_beacon_state_builder(
|
|
||||||
state_builder: TestingBeaconStateBuilder,
|
|
||||||
spec: ChainSpec,
|
|
||||||
) -> Self {
|
|
||||||
let db = Arc::new(MemoryDB::open());
|
|
||||||
let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
|
|
||||||
let state_store = Arc::new(BeaconStateStore::new(db.clone()));
|
|
||||||
let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
|
|
||||||
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
|
|
||||||
|
|
||||||
let (mut genesis_state, keypairs) = state_builder.build();
|
|
||||||
|
|
||||||
let mut genesis_block = BeaconBlock::empty(&spec);
|
|
||||||
genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root());
|
|
||||||
|
|
||||||
genesis_state
|
|
||||||
.build_epoch_cache(RelativeEpoch::Previous, &spec)
|
|
||||||
.unwrap();
|
|
||||||
genesis_state
|
|
||||||
.build_epoch_cache(RelativeEpoch::Current, &spec)
|
|
||||||
.unwrap();
|
|
||||||
genesis_state
|
|
||||||
.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)
|
|
||||||
.unwrap();
|
|
||||||
genesis_state
|
|
||||||
.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Create the Beacon Chain
|
|
||||||
let beacon_chain = Arc::new(
|
|
||||||
BeaconChain::from_genesis(
|
|
||||||
state_store.clone(),
|
|
||||||
block_store.clone(),
|
|
||||||
slot_clock,
|
|
||||||
genesis_state,
|
|
||||||
genesis_block,
|
|
||||||
spec.clone(),
|
|
||||||
fork_choice,
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let spec = Arc::new(spec);
|
|
||||||
|
|
||||||
debug!("Creating validator producer and attester instances...");
|
|
||||||
|
|
||||||
// Spawn the test validator instances.
|
|
||||||
let validators: Vec<ValidatorHarness> = keypairs
|
|
||||||
.iter()
|
|
||||||
.map(|keypair| {
|
|
||||||
ValidatorHarness::new(keypair.clone(), beacon_chain.clone(), spec.clone())
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
debug!("Created {} ValidatorHarnesss", validators.len());
|
|
||||||
|
|
||||||
Self {
|
|
||||||
db,
|
|
||||||
beacon_chain,
|
|
||||||
block_store,
|
|
||||||
state_store,
|
|
||||||
validators,
|
|
||||||
spec,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Move the `slot_clock` for the `BeaconChain` forward one slot.
|
|
||||||
///
|
|
||||||
/// This is the equivalent of advancing a system clock forward one `SLOT_DURATION`.
|
|
||||||
///
|
|
||||||
/// Returns the new slot.
|
|
||||||
pub fn increment_beacon_chain_slot(&mut self) -> Slot {
|
|
||||||
let slot = self.beacon_chain.present_slot() + 1;
|
|
||||||
|
|
||||||
let nth_slot = slot
|
|
||||||
- slot
|
|
||||||
.epoch(self.spec.slots_per_epoch)
|
|
||||||
.start_slot(self.spec.slots_per_epoch);
|
|
||||||
let nth_epoch = slot.epoch(self.spec.slots_per_epoch) - self.spec.genesis_epoch;
|
|
||||||
debug!(
|
|
||||||
"Advancing BeaconChain to slot {}, epoch {} (epoch height: {}, slot {} in epoch.).",
|
|
||||||
slot,
|
|
||||||
slot.epoch(self.spec.slots_per_epoch),
|
|
||||||
nth_epoch,
|
|
||||||
nth_slot
|
|
||||||
);
|
|
||||||
|
|
||||||
self.beacon_chain.slot_clock.set_slot(slot.as_u64());
|
|
||||||
self.beacon_chain
|
|
||||||
.catchup_state()
|
|
||||||
.expect("Failed to catch state");
|
|
||||||
slot
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn gather_attesations(&mut self) -> Vec<Attestation> {
|
|
||||||
let present_slot = self.beacon_chain.present_slot();
|
|
||||||
let state = self.beacon_chain.state.read();
|
|
||||||
|
|
||||||
let mut attestations = vec![];
|
|
||||||
|
|
||||||
for committee in state
|
|
||||||
.get_crosslink_committees_at_slot(present_slot, &self.spec)
|
|
||||||
.unwrap()
|
|
||||||
{
|
|
||||||
for &validator in &committee.committee {
|
|
||||||
let duties = state
|
|
||||||
.get_attestation_duties(validator, &self.spec)
|
|
||||||
.unwrap()
|
|
||||||
.expect("Attesting validators by definition have duties");
|
|
||||||
|
|
||||||
// Obtain `AttestationData` from the beacon chain.
|
|
||||||
let data = self
|
|
||||||
.beacon_chain
|
|
||||||
.produce_attestation_data(duties.shard)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Produce an aggregate signature with a single signature.
|
|
||||||
let aggregate_signature = {
|
|
||||||
let message = AttestationDataAndCustodyBit {
|
|
||||||
data: data.clone(),
|
|
||||||
custody_bit: false,
|
|
||||||
}
|
|
||||||
.tree_hash_root();
|
|
||||||
let domain = self.spec.get_domain(
|
|
||||||
state.slot.epoch(self.spec.slots_per_epoch),
|
|
||||||
Domain::Attestation,
|
|
||||||
&state.fork,
|
|
||||||
);
|
|
||||||
let sig =
|
|
||||||
Signature::new(&message, domain, &self.validators[validator].keypair.sk);
|
|
||||||
|
|
||||||
let mut agg_sig = AggregateSignature::new();
|
|
||||||
agg_sig.add(&sig);
|
|
||||||
|
|
||||||
agg_sig
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len);
|
|
||||||
let custody_bitfield = Bitfield::with_capacity(duties.committee_len);
|
|
||||||
|
|
||||||
aggregation_bitfield.set(duties.committee_index, true);
|
|
||||||
|
|
||||||
attestations.push(Attestation {
|
|
||||||
aggregation_bitfield,
|
|
||||||
data,
|
|
||||||
custody_bitfield,
|
|
||||||
aggregate_signature,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
attestations
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the block from the proposer for the slot.
|
|
||||||
///
|
|
||||||
/// Note: the validator will only produce it _once per slot_. So, if you call this twice you'll
|
|
||||||
/// only get a block once.
|
|
||||||
pub fn produce_block(&mut self) -> BeaconBlock {
|
|
||||||
let present_slot = self.beacon_chain.present_slot();
|
|
||||||
|
|
||||||
let proposer = self.beacon_chain.block_proposer(present_slot).unwrap();
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"Producing block from validator #{} for slot {}.",
|
|
||||||
proposer, present_slot
|
|
||||||
);
|
|
||||||
|
|
||||||
// Ensure the validators slot clock is accurate.
|
|
||||||
self.validators[proposer].set_slot(present_slot);
|
|
||||||
|
|
||||||
self.validators[proposer].produce_block().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advances the chain with a BeaconBlock and attestations from all validators.
|
|
||||||
///
|
|
||||||
/// This is the ideal scenario for the Beacon Chain, 100% honest participation from
|
|
||||||
/// validators.
|
|
||||||
pub fn advance_chain_with_block(&mut self) -> BeaconBlock {
|
|
||||||
self.increment_beacon_chain_slot();
|
|
||||||
|
|
||||||
// Produce a new block.
|
|
||||||
let block = self.produce_block();
|
|
||||||
debug!("Submitting block for processing...");
|
|
||||||
match self.beacon_chain.process_block(block.clone()) {
|
|
||||||
Ok(BlockProcessingOutcome::ValidBlock(_)) => {}
|
|
||||||
other => panic!("block processing failed with {:?}", other),
|
|
||||||
};
|
|
||||||
debug!("...block processed by BeaconChain.");
|
|
||||||
|
|
||||||
debug!("Producing attestations...");
|
|
||||||
|
|
||||||
// Produce new attestations.
|
|
||||||
let attestations = self.gather_attesations();
|
|
||||||
|
|
||||||
debug!("Processing {} attestations...", attestations.len());
|
|
||||||
|
|
||||||
attestations
|
|
||||||
.par_iter()
|
|
||||||
.enumerate()
|
|
||||||
.for_each(|(i, attestation)| {
|
|
||||||
self.beacon_chain
|
|
||||||
.process_attestation(attestation.clone())
|
|
||||||
.unwrap_or_else(|_| panic!("Attestation {} invalid: {:?}", i, attestation));
|
|
||||||
});
|
|
||||||
|
|
||||||
debug!("Attestations processed.");
|
|
||||||
|
|
||||||
block
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signs a message using some validators secret key with the `Fork` info from the latest state
|
|
||||||
/// of the `BeaconChain`.
|
|
||||||
///
|
|
||||||
/// Useful for producing slashable messages and other objects that `BeaconChainHarness` does
|
|
||||||
/// not produce naturally.
|
|
||||||
pub fn validator_sign(
|
|
||||||
&self,
|
|
||||||
validator_index: usize,
|
|
||||||
message: &[u8],
|
|
||||||
epoch: Epoch,
|
|
||||||
domain_type: Domain,
|
|
||||||
) -> Option<Signature> {
|
|
||||||
let validator = self.validators.get(validator_index)?;
|
|
||||||
|
|
||||||
let domain = self
|
|
||||||
.spec
|
|
||||||
.get_domain(epoch, domain_type, &self.beacon_chain.state.read().fork);
|
|
||||||
|
|
||||||
Some(Signature::new(message, domain, &validator.keypair.sk))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the current `Fork` of the `beacon_chain`.
|
|
||||||
pub fn fork(&self) -> Fork {
|
|
||||||
self.beacon_chain.state.read().fork.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the current `epoch` of the `beacon_chain`.
|
|
||||||
pub fn epoch(&self) -> Epoch {
|
|
||||||
self.beacon_chain
|
|
||||||
.state
|
|
||||||
.read()
|
|
||||||
.slot
|
|
||||||
.epoch(self.spec.slots_per_epoch)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the keypair for some validator index.
|
|
||||||
pub fn validator_keypair(&self, validator_index: usize) -> Option<&Keypair> {
|
|
||||||
self.validators
|
|
||||||
.get(validator_index)
|
|
||||||
.and_then(|v| Some(&v.keypair))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new
|
|
||||||
/// `ValidatorHarness` instance for this validator.
|
|
||||||
///
|
|
||||||
/// If a new `ValidatorHarness` was created, the validator should become fully operational as
|
|
||||||
/// if the validator were created during `BeaconChainHarness` instantiation.
|
|
||||||
pub fn add_deposit(&mut self, deposit: Deposit, keypair: Option<Keypair>) {
|
|
||||||
self.beacon_chain.process_deposit(deposit).unwrap();
|
|
||||||
|
|
||||||
// If a keypair is present, add a new `ValidatorHarness` to the rig.
|
|
||||||
if let Some(keypair) = keypair {
|
|
||||||
let validator =
|
|
||||||
ValidatorHarness::new(keypair, self.beacon_chain.clone(), self.spec.clone());
|
|
||||||
self.validators.push(validator);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Submit an exit to the `BeaconChain` for inclusion in some block.
|
|
||||||
///
|
|
||||||
/// Note: the `ValidatorHarness` for this validator continues to exist. Once it is exited it
|
|
||||||
/// will stop receiving duties from the beacon chain and just do nothing when prompted to
|
|
||||||
/// produce/attest.
|
|
||||||
pub fn add_exit(&mut self, exit: VoluntaryExit) {
|
|
||||||
self.beacon_chain.process_voluntary_exit(exit).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Submit an transfer to the `BeaconChain` for inclusion in some block.
|
|
||||||
pub fn add_transfer(&mut self, transfer: Transfer) {
|
|
||||||
self.beacon_chain.process_transfer(transfer).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Submit a proposer slashing to the `BeaconChain` for inclusion in some block.
|
|
||||||
pub fn add_proposer_slashing(&mut self, proposer_slashing: ProposerSlashing) {
|
|
||||||
self.beacon_chain
|
|
||||||
.process_proposer_slashing(proposer_slashing)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Submit an attester slashing to the `BeaconChain` for inclusion in some block.
|
|
||||||
pub fn add_attester_slashing(&mut self, attester_slashing: AttesterSlashing) {
|
|
||||||
self.beacon_chain
|
|
||||||
.process_attester_slashing(attester_slashing)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes the fork choice rule on the `BeaconChain`, selecting a new canonical head.
|
|
||||||
pub fn run_fork_choice(&mut self) {
|
|
||||||
self.beacon_chain.fork_choice().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Dump all blocks and states from the canonical beacon chain.
|
|
||||||
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, BeaconChainError> {
|
|
||||||
self.beacon_chain.chain_dump()
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,102 +0,0 @@
|
|||||||
use clap::{App, Arg, SubCommand};
|
|
||||||
use env_logger::{Builder, Env};
|
|
||||||
use gen_keys::gen_keys;
|
|
||||||
use run_test::run_test;
|
|
||||||
use std::fs;
|
|
||||||
use types::test_utils::keypairs_path;
|
|
||||||
use types::ChainSpec;
|
|
||||||
|
|
||||||
mod beacon_chain_harness;
|
|
||||||
mod gen_keys;
|
|
||||||
mod run_test;
|
|
||||||
mod test_case;
|
|
||||||
mod validator_harness;
|
|
||||||
|
|
||||||
use validator_harness::ValidatorHarness;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let validator_file_path = keypairs_path();
|
|
||||||
|
|
||||||
let _ = fs::create_dir(validator_file_path.parent().unwrap());
|
|
||||||
|
|
||||||
let matches = App::new("Lighthouse Test Harness Runner")
|
|
||||||
.version("0.0.1")
|
|
||||||
.author("Sigma Prime <contact@sigmaprime.io>")
|
|
||||||
.about("Runs `test_harness` using a YAML test_case.")
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("log")
|
|
||||||
.long("log-level")
|
|
||||||
.short("l")
|
|
||||||
.value_name("LOG_LEVEL")
|
|
||||||
.help("Logging level.")
|
|
||||||
.possible_values(&["error", "warn", "info", "debug", "trace"])
|
|
||||||
.default_value("debug")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("spec")
|
|
||||||
.long("spec")
|
|
||||||
.short("s")
|
|
||||||
.value_name("SPECIFICATION")
|
|
||||||
.help("ChainSpec instantiation.")
|
|
||||||
.possible_values(&["foundation", "few_validators"])
|
|
||||||
.default_value("foundation"),
|
|
||||||
)
|
|
||||||
.subcommand(
|
|
||||||
SubCommand::with_name("run_test")
|
|
||||||
.about("Executes a YAML test specification")
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("yaml")
|
|
||||||
.long("yaml")
|
|
||||||
.value_name("FILE")
|
|
||||||
.help("YAML file test_case.")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("validators_dir")
|
|
||||||
.long("validators-dir")
|
|
||||||
.short("v")
|
|
||||||
.value_name("VALIDATORS_DIR")
|
|
||||||
.help("A directory with validator deposits and keypair YAML."),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.subcommand(
|
|
||||||
SubCommand::with_name("gen_keys")
|
|
||||||
.about("Builds a file of BLS keypairs for faster tests.")
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("validator_count")
|
|
||||||
.long("validator_count")
|
|
||||||
.short("n")
|
|
||||||
.value_name("VALIDATOR_COUNT")
|
|
||||||
.help("Number of validators to generate.")
|
|
||||||
.required(true),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::with_name("output_file")
|
|
||||||
.long("output_file")
|
|
||||||
.short("d")
|
|
||||||
.value_name("GENESIS_TIME")
|
|
||||||
.help("Output directory for generated YAML.")
|
|
||||||
.default_value(validator_file_path.to_str().unwrap()),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.get_matches();
|
|
||||||
|
|
||||||
if let Some(log_level) = matches.value_of("log") {
|
|
||||||
Builder::from_env(Env::default().default_filter_or(log_level)).init();
|
|
||||||
}
|
|
||||||
|
|
||||||
let _spec = match matches.value_of("spec") {
|
|
||||||
Some("foundation") => ChainSpec::foundation(),
|
|
||||||
Some("few_validators") => ChainSpec::few_validators(),
|
|
||||||
_ => unreachable!(), // Has a default value, should always exist.
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(matches) = matches.subcommand_matches("run_test") {
|
|
||||||
run_test(matches);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(matches) = matches.subcommand_matches("gen_keys") {
|
|
||||||
gen_keys(matches);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
use clap::{value_t, ArgMatches};
|
|
||||||
use log::debug;
|
|
||||||
use std::path::Path;
|
|
||||||
use types::test_utils::{generate_deterministic_keypairs, KeypairsFile};
|
|
||||||
|
|
||||||
/// Creates a file containing BLS keypairs.
|
|
||||||
pub fn gen_keys(matches: &ArgMatches) {
|
|
||||||
let validator_count = value_t!(matches.value_of("validator_count"), usize)
|
|
||||||
.expect("Validator count is required argument");
|
|
||||||
let output_file = matches
|
|
||||||
.value_of("output_file")
|
|
||||||
.expect("Output file has a default value.");
|
|
||||||
|
|
||||||
let keypairs = generate_deterministic_keypairs(validator_count);
|
|
||||||
|
|
||||||
debug!("Writing keypairs to file...");
|
|
||||||
|
|
||||||
let keypairs_path = Path::new(output_file);
|
|
||||||
|
|
||||||
keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap();
|
|
||||||
}
|
|
@ -1,33 +0,0 @@
|
|||||||
//! Provides a testing environment for the `BeaconChain`, `Attester` and `BlockProposer` objects.
|
|
||||||
//!
|
|
||||||
//! This environment bypasses networking and client run-times and connects the `Attester` and `Proposer`
|
|
||||||
//! directly to the `BeaconChain` via an `Arc`.
|
|
||||||
//!
|
|
||||||
//! The `BeaconChainHarness` contains a single `BeaconChain` instance and many `ValidatorHarness`
|
|
||||||
//! instances. All of the `ValidatorHarness` instances work to advance the `BeaconChain` by
|
|
||||||
//! producing blocks and attestations.
|
|
||||||
//!
|
|
||||||
//! Example:
|
|
||||||
//! ```rust,no_run
|
|
||||||
//! use test_harness::BeaconChainHarness;
|
|
||||||
//! use types::ChainSpec;
|
|
||||||
//!
|
|
||||||
//! let validator_count = 8;
|
|
||||||
//! let spec = ChainSpec::few_validators();
|
|
||||||
//!
|
|
||||||
//! let mut harness = BeaconChainHarness::new(spec, validator_count);
|
|
||||||
//!
|
|
||||||
//! harness.advance_chain_with_block();
|
|
||||||
//!
|
|
||||||
//! let chain = harness.chain_dump().unwrap();
|
|
||||||
//!
|
|
||||||
//! // One block should have been built on top of the genesis block.
|
|
||||||
//! assert_eq!(chain.len(), 2);
|
|
||||||
//! ```
|
|
||||||
|
|
||||||
mod beacon_chain_harness;
|
|
||||||
pub mod test_case;
|
|
||||||
mod validator_harness;
|
|
||||||
|
|
||||||
pub use self::beacon_chain_harness::BeaconChainHarness;
|
|
||||||
pub use self::validator_harness::ValidatorHarness;
|
|
@ -1,37 +0,0 @@
|
|||||||
use crate::test_case::TestCase;
|
|
||||||
use clap::ArgMatches;
|
|
||||||
use std::{fs::File, io::prelude::*};
|
|
||||||
use yaml_rust::YamlLoader;
|
|
||||||
|
|
||||||
/// Runs a YAML-specified test case.
|
|
||||||
pub fn run_test(matches: &ArgMatches) {
|
|
||||||
if let Some(yaml_file) = matches.value_of("yaml") {
|
|
||||||
let docs = {
|
|
||||||
let mut file = File::open(yaml_file).unwrap();
|
|
||||||
|
|
||||||
let mut yaml_str = String::new();
|
|
||||||
file.read_to_string(&mut yaml_str).unwrap();
|
|
||||||
|
|
||||||
YamlLoader::load_from_str(&yaml_str).unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
for doc in &docs {
|
|
||||||
// For each `test_cases` YAML in the document, build a `TestCase`, execute it and
|
|
||||||
// assert that the execution result matches the test_case description.
|
|
||||||
//
|
|
||||||
// In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
|
|
||||||
// and a new `BeaconChain` is built as per the test_case.
|
|
||||||
//
|
|
||||||
// After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
|
|
||||||
// and states in the chain is obtained and checked against the `results` specified in
|
|
||||||
// the `test_case`.
|
|
||||||
//
|
|
||||||
// If any of the expectations in the results are not met, the process
|
|
||||||
// panics with a message.
|
|
||||||
for test_case in doc["test_cases"].as_vec().unwrap() {
|
|
||||||
let test_case = TestCase::from_yaml(test_case);
|
|
||||||
test_case.assert_result_valid(test_case.execute())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,312 +0,0 @@
|
|||||||
//! Defines execution and testing specs for a `BeaconChainHarness` instance. Supports loading from
|
|
||||||
//! a YAML file.
|
|
||||||
|
|
||||||
use crate::beacon_chain_harness::BeaconChainHarness;
|
|
||||||
use beacon_chain::CheckPoint;
|
|
||||||
use log::{info, warn};
|
|
||||||
use tree_hash::SignedRoot;
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
use types::test_utils::*;
|
|
||||||
use yaml_rust::Yaml;
|
|
||||||
|
|
||||||
mod config;
|
|
||||||
mod results;
|
|
||||||
mod state_check;
|
|
||||||
mod yaml_helpers;
|
|
||||||
|
|
||||||
pub use config::Config;
|
|
||||||
pub use results::Results;
|
|
||||||
pub use state_check::StateCheck;
|
|
||||||
|
|
||||||
/// Defines the execution and testing of a `BeaconChainHarness` instantiation.
|
|
||||||
///
|
|
||||||
/// Typical workflow is:
|
|
||||||
///
|
|
||||||
/// 1. Instantiate the `TestCase` from YAML: `let test_case = TestCase::from_yaml(&my_yaml);`
|
|
||||||
/// 2. Execute the test_case: `let result = test_case.execute();`
|
|
||||||
/// 3. Test the results against the test_case: `test_case.assert_result_valid(result);`
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct TestCase {
|
|
||||||
/// Defines the execution.
|
|
||||||
pub config: Config,
|
|
||||||
/// Defines tests to run against the execution result.
|
|
||||||
pub results: Results,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The result of executing a `TestCase`.
|
|
||||||
///
|
|
||||||
pub struct ExecutionResult {
|
|
||||||
/// The canonical beacon chain generated from the execution.
|
|
||||||
pub chain: Vec<CheckPoint>,
|
|
||||||
/// The spec used for execution.
|
|
||||||
pub spec: ChainSpec,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestCase {
|
|
||||||
/// Load the test case from a YAML document.
|
|
||||||
pub fn from_yaml(test_case: &Yaml) -> Self {
|
|
||||||
Self {
|
|
||||||
results: Results::from_yaml(&test_case["results"]),
|
|
||||||
config: Config::from_yaml(&test_case["config"]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a `ChainSpec::foundation()`.
|
|
||||||
///
|
|
||||||
/// If specified in `config`, returns it with a modified `slots_per_epoch`.
|
|
||||||
fn spec(&self) -> ChainSpec {
|
|
||||||
let mut spec = ChainSpec::foundation();
|
|
||||||
|
|
||||||
if let Some(n) = self.config.slots_per_epoch {
|
|
||||||
spec.slots_per_epoch = n;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(n) = self.config.persistent_committee_period {
|
|
||||||
spec.persistent_committee_period = n;
|
|
||||||
}
|
|
||||||
|
|
||||||
spec
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes the test case, returning an `ExecutionResult`.
|
|
||||||
#[allow(clippy::cyclomatic_complexity)]
|
|
||||||
pub fn execute(&self) -> ExecutionResult {
|
|
||||||
let spec = self.spec();
|
|
||||||
let validator_count = self.config.deposits_for_chain_start;
|
|
||||||
let slots = self.config.num_slots;
|
|
||||||
|
|
||||||
info!(
|
|
||||||
"Building BeaconChainHarness with {} validators...",
|
|
||||||
validator_count
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut harness = BeaconChainHarness::new(spec, validator_count);
|
|
||||||
|
|
||||||
info!("Starting simulation across {} slots...", slots);
|
|
||||||
|
|
||||||
// Start at 1 because genesis counts as a slot.
|
|
||||||
for slot_height in 1..slots {
|
|
||||||
// Used to ensure that deposits in the same slot have incremental deposit indices.
|
|
||||||
let mut deposit_index_offset = 0;
|
|
||||||
|
|
||||||
// Feed deposits to the BeaconChain.
|
|
||||||
if let Some(ref deposits) = self.config.deposits {
|
|
||||||
for (slot, amount) in deposits {
|
|
||||||
if *slot == slot_height {
|
|
||||||
info!("Including deposit at slot height {}.", slot_height);
|
|
||||||
let (deposit, keypair) =
|
|
||||||
build_deposit(&harness, *amount, deposit_index_offset);
|
|
||||||
harness.add_deposit(deposit, Some(keypair.clone()));
|
|
||||||
deposit_index_offset += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Feed proposer slashings to the BeaconChain.
|
|
||||||
if let Some(ref slashings) = self.config.proposer_slashings {
|
|
||||||
for (slot, validator_index) in slashings {
|
|
||||||
if *slot == slot_height {
|
|
||||||
info!(
|
|
||||||
"Including proposer slashing at slot height {} for validator #{}.",
|
|
||||||
slot_height, validator_index
|
|
||||||
);
|
|
||||||
let slashing = build_proposer_slashing(&harness, *validator_index);
|
|
||||||
harness.add_proposer_slashing(slashing);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Feed attester slashings to the BeaconChain.
|
|
||||||
if let Some(ref slashings) = self.config.attester_slashings {
|
|
||||||
for (slot, validator_indices) in slashings {
|
|
||||||
if *slot == slot_height {
|
|
||||||
info!(
|
|
||||||
"Including attester slashing at slot height {} for validators {:?}.",
|
|
||||||
slot_height, validator_indices
|
|
||||||
);
|
|
||||||
let slashing =
|
|
||||||
build_double_vote_attester_slashing(&harness, &validator_indices[..]);
|
|
||||||
harness.add_attester_slashing(slashing);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Feed exits to the BeaconChain.
|
|
||||||
if let Some(ref exits) = self.config.exits {
|
|
||||||
for (slot, validator_index) in exits {
|
|
||||||
if *slot == slot_height {
|
|
||||||
info!(
|
|
||||||
"Including exit at slot height {} for validator {}.",
|
|
||||||
slot_height, validator_index
|
|
||||||
);
|
|
||||||
let exit = build_exit(&harness, *validator_index);
|
|
||||||
harness.add_exit(exit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Feed transfers to the BeaconChain.
|
|
||||||
if let Some(ref transfers) = self.config.transfers {
|
|
||||||
for (slot, from, to, amount) in transfers {
|
|
||||||
if *slot == slot_height {
|
|
||||||
info!(
|
|
||||||
"Including transfer at slot height {} from validator {}.",
|
|
||||||
slot_height, from
|
|
||||||
);
|
|
||||||
let transfer = build_transfer(&harness, *from, *to, *amount);
|
|
||||||
harness.add_transfer(transfer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a block or skip a slot.
|
|
||||||
match self.config.skip_slots {
|
|
||||||
Some(ref skip_slots) if skip_slots.contains(&slot_height) => {
|
|
||||||
warn!("Skipping slot at height {}.", slot_height);
|
|
||||||
harness.increment_beacon_chain_slot();
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
info!("Producing block at slot height {}.", slot_height);
|
|
||||||
harness.advance_chain_with_block();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
harness.run_fork_choice();
|
|
||||||
|
|
||||||
info!("Test execution complete!");
|
|
||||||
|
|
||||||
info!("Building chain dump for analysis...");
|
|
||||||
|
|
||||||
ExecutionResult {
|
|
||||||
chain: harness.chain_dump().expect("Chain dump failed."),
|
|
||||||
spec: (*harness.spec).clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks that the `ExecutionResult` is consistent with the specifications in `self.results`.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics with a message if any result does not match exepectations.
|
|
||||||
pub fn assert_result_valid(&self, execution_result: ExecutionResult) {
|
|
||||||
info!("Verifying test results...");
|
|
||||||
let spec = &execution_result.spec;
|
|
||||||
|
|
||||||
if let Some(num_skipped_slots) = self.results.num_skipped_slots {
|
|
||||||
assert_eq!(
|
|
||||||
execution_result.chain.len(),
|
|
||||||
self.config.num_slots as usize - num_skipped_slots,
|
|
||||||
"actual skipped slots != expected."
|
|
||||||
);
|
|
||||||
info!(
|
|
||||||
"OK: Chain length is {} ({} skipped slots).",
|
|
||||||
execution_result.chain.len(),
|
|
||||||
num_skipped_slots
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(ref state_checks) = self.results.state_checks {
|
|
||||||
for checkpoint in &execution_result.chain {
|
|
||||||
let state = &checkpoint.beacon_state;
|
|
||||||
|
|
||||||
for state_check in state_checks {
|
|
||||||
let adjusted_state_slot =
|
|
||||||
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch);
|
|
||||||
|
|
||||||
if state_check.slot == adjusted_state_slot {
|
|
||||||
state_check.assert_valid(state, spec);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot.
|
|
||||||
fn build_transfer(
|
|
||||||
harness: &BeaconChainHarness,
|
|
||||||
sender: u64,
|
|
||||||
recipient: u64,
|
|
||||||
amount: u64,
|
|
||||||
) -> Transfer {
|
|
||||||
let slot = harness.beacon_chain.state.read().slot + 1;
|
|
||||||
|
|
||||||
let mut builder = TestingTransferBuilder::new(sender, recipient, amount, slot);
|
|
||||||
|
|
||||||
let keypair = harness.validator_keypair(sender as usize).unwrap();
|
|
||||||
builder.sign(keypair.clone(), &harness.fork(), &harness.spec);
|
|
||||||
|
|
||||||
builder.build()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`.
|
|
||||||
///
|
|
||||||
/// `index_offset` is used to ensure that `deposit.index == state.index` when adding multiple
|
|
||||||
/// deposits.
|
|
||||||
fn build_deposit(
|
|
||||||
harness: &BeaconChainHarness,
|
|
||||||
amount: u64,
|
|
||||||
index_offset: u64,
|
|
||||||
) -> (Deposit, Keypair) {
|
|
||||||
let keypair = Keypair::random();
|
|
||||||
|
|
||||||
let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount);
|
|
||||||
builder.set_index(harness.beacon_chain.state.read().deposit_index + index_offset);
|
|
||||||
builder.sign(&keypair, harness.epoch(), &harness.fork(), &harness.spec);
|
|
||||||
|
|
||||||
(builder.build(), keypair)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`.
|
|
||||||
fn build_exit(harness: &BeaconChainHarness, validator_index: u64) -> VoluntaryExit {
|
|
||||||
let epoch = harness
|
|
||||||
.beacon_chain
|
|
||||||
.state
|
|
||||||
.read()
|
|
||||||
.current_epoch(&harness.spec);
|
|
||||||
|
|
||||||
let mut exit = VoluntaryExit {
|
|
||||||
epoch,
|
|
||||||
validator_index,
|
|
||||||
signature: Signature::empty_signature(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let message = exit.signed_root();
|
|
||||||
|
|
||||||
exit.signature = harness
|
|
||||||
.validator_sign(validator_index as usize, &message[..], epoch, Domain::Exit)
|
|
||||||
.expect("Unable to sign VoluntaryExit");
|
|
||||||
|
|
||||||
exit
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds an `AttesterSlashing` for some `validator_indices`.
|
|
||||||
///
|
|
||||||
/// Signs the message using a `BeaconChainHarness`.
|
|
||||||
fn build_double_vote_attester_slashing(
|
|
||||||
harness: &BeaconChainHarness,
|
|
||||||
validator_indices: &[u64],
|
|
||||||
) -> AttesterSlashing {
|
|
||||||
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
|
|
||||||
harness
|
|
||||||
.validator_sign(validator_index as usize, message, epoch, domain)
|
|
||||||
.expect("Unable to sign AttesterSlashing")
|
|
||||||
};
|
|
||||||
|
|
||||||
TestingAttesterSlashingBuilder::double_vote(validator_indices, signer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds an `ProposerSlashing` for some `validator_index`.
|
|
||||||
///
|
|
||||||
/// Signs the message using a `BeaconChainHarness`.
|
|
||||||
fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -> ProposerSlashing {
|
|
||||||
let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| {
|
|
||||||
harness
|
|
||||||
.validator_sign(validator_index as usize, message, epoch, domain)
|
|
||||||
.expect("Unable to sign AttesterSlashing")
|
|
||||||
};
|
|
||||||
|
|
||||||
TestingProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec)
|
|
||||||
}
|
|
@ -1,135 +0,0 @@
|
|||||||
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
|
|
||||||
use types::*;
|
|
||||||
use yaml_rust::Yaml;
|
|
||||||
|
|
||||||
pub type ValidatorIndex = u64;
|
|
||||||
pub type ValidatorIndices = Vec<u64>;
|
|
||||||
pub type GweiAmount = u64;
|
|
||||||
|
|
||||||
pub type DepositTuple = (SlotHeight, GweiAmount);
|
|
||||||
pub type ExitTuple = (SlotHeight, ValidatorIndex);
|
|
||||||
pub type ProposerSlashingTuple = (SlotHeight, ValidatorIndex);
|
|
||||||
pub type AttesterSlashingTuple = (SlotHeight, ValidatorIndices);
|
|
||||||
/// (slot_height, from, to, amount)
|
|
||||||
pub type TransferTuple = (SlotHeight, ValidatorIndex, ValidatorIndex, GweiAmount);
|
|
||||||
|
|
||||||
/// Defines the execution of a `BeaconStateHarness` across a series of slots.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Config {
|
|
||||||
/// Initial validators.
|
|
||||||
pub deposits_for_chain_start: usize,
|
|
||||||
/// Number of slots in an epoch.
|
|
||||||
pub slots_per_epoch: Option<u64>,
|
|
||||||
/// Affects the number of epochs a validator must be active before they can withdraw.
|
|
||||||
pub persistent_committee_period: Option<u64>,
|
|
||||||
/// Number of slots to build before ending execution.
|
|
||||||
pub num_slots: u64,
|
|
||||||
/// Number of slots that should be skipped due to inactive validator.
|
|
||||||
pub skip_slots: Option<Vec<u64>>,
|
|
||||||
/// Deposits to be included during execution.
|
|
||||||
pub deposits: Option<Vec<DepositTuple>>,
|
|
||||||
/// Proposer slashings to be included during execution.
|
|
||||||
pub proposer_slashings: Option<Vec<ProposerSlashingTuple>>,
|
|
||||||
/// Attester slashings to be including during execution.
|
|
||||||
pub attester_slashings: Option<Vec<AttesterSlashingTuple>>,
|
|
||||||
/// Exits to be including during execution.
|
|
||||||
pub exits: Option<Vec<ExitTuple>>,
|
|
||||||
/// Transfers to be including during execution.
|
|
||||||
pub transfers: Option<Vec<TransferTuple>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Config {
|
|
||||||
/// Load from a YAML document.
|
|
||||||
///
|
|
||||||
/// Expects to receive the `config` section of the document.
|
|
||||||
pub fn from_yaml(yaml: &Yaml) -> Self {
|
|
||||||
Self {
|
|
||||||
deposits_for_chain_start: as_usize(&yaml, "deposits_for_chain_start")
|
|
||||||
.expect("Must specify validator count"),
|
|
||||||
slots_per_epoch: as_u64(&yaml, "slots_per_epoch"),
|
|
||||||
persistent_committee_period: as_u64(&yaml, "persistent_committee_period"),
|
|
||||||
num_slots: as_u64(&yaml, "num_slots").expect("Must specify `config.num_slots`"),
|
|
||||||
skip_slots: as_vec_u64(yaml, "skip_slots"),
|
|
||||||
deposits: parse_deposits(&yaml),
|
|
||||||
proposer_slashings: parse_proposer_slashings(&yaml),
|
|
||||||
attester_slashings: parse_attester_slashings(&yaml),
|
|
||||||
exits: parse_exits(&yaml),
|
|
||||||
transfers: parse_transfers(&yaml),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the `transfers` section of the YAML document.
|
|
||||||
fn parse_transfers(yaml: &Yaml) -> Option<Vec<TransferTuple>> {
|
|
||||||
let mut tuples = vec![];
|
|
||||||
|
|
||||||
for exit in yaml["transfers"].as_vec()? {
|
|
||||||
let slot = as_u64(exit, "slot").expect("Incomplete transfer (slot)");
|
|
||||||
let from = as_u64(exit, "from").expect("Incomplete transfer (from)");
|
|
||||||
let to = as_u64(exit, "to").expect("Incomplete transfer (to)");
|
|
||||||
let amount = as_u64(exit, "amount").expect("Incomplete transfer (amount)");
|
|
||||||
|
|
||||||
tuples.push((SlotHeight::from(slot), from, to, amount));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(tuples)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the `attester_slashings` section of the YAML document.
|
|
||||||
fn parse_exits(yaml: &Yaml) -> Option<Vec<ExitTuple>> {
|
|
||||||
let mut tuples = vec![];
|
|
||||||
|
|
||||||
for exit in yaml["exits"].as_vec()? {
|
|
||||||
let slot = as_u64(exit, "slot").expect("Incomplete exit (slot)");
|
|
||||||
let validator_index =
|
|
||||||
as_u64(exit, "validator_index").expect("Incomplete exit (validator_index)");
|
|
||||||
|
|
||||||
tuples.push((SlotHeight::from(slot), validator_index));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(tuples)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the `attester_slashings` section of the YAML document.
|
|
||||||
fn parse_attester_slashings(yaml: &Yaml) -> Option<Vec<AttesterSlashingTuple>> {
|
|
||||||
let mut slashings = vec![];
|
|
||||||
|
|
||||||
for slashing in yaml["attester_slashings"].as_vec()? {
|
|
||||||
let slot = as_u64(slashing, "slot").expect("Incomplete attester_slashing (slot)");
|
|
||||||
let validator_indices = as_vec_u64(slashing, "validator_indices")
|
|
||||||
.expect("Incomplete attester_slashing (validator_indices)");
|
|
||||||
|
|
||||||
slashings.push((SlotHeight::from(slot), validator_indices));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(slashings)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the `proposer_slashings` section of the YAML document.
|
|
||||||
fn parse_proposer_slashings(yaml: &Yaml) -> Option<Vec<ProposerSlashingTuple>> {
|
|
||||||
let mut slashings = vec![];
|
|
||||||
|
|
||||||
for slashing in yaml["proposer_slashings"].as_vec()? {
|
|
||||||
let slot = as_u64(slashing, "slot").expect("Incomplete proposer slashing (slot)_");
|
|
||||||
let validator_index = as_u64(slashing, "validator_index")
|
|
||||||
.expect("Incomplete proposer slashing (validator_index)");
|
|
||||||
|
|
||||||
slashings.push((SlotHeight::from(slot), validator_index));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(slashings)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the `deposits` section of the YAML document.
|
|
||||||
fn parse_deposits(yaml: &Yaml) -> Option<Vec<DepositTuple>> {
|
|
||||||
let mut deposits = vec![];
|
|
||||||
|
|
||||||
for deposit in yaml["deposits"].as_vec()? {
|
|
||||||
let slot = as_u64(deposit, "slot").expect("Incomplete deposit (slot)");
|
|
||||||
let amount = as_u64(deposit, "amount").expect("Incomplete deposit (amount)");
|
|
||||||
|
|
||||||
deposits.push((SlotHeight::from(slot), amount))
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(deposits)
|
|
||||||
}
|
|
@ -1,34 +0,0 @@
|
|||||||
use super::state_check::StateCheck;
|
|
||||||
use super::yaml_helpers::as_usize;
|
|
||||||
use yaml_rust::Yaml;
|
|
||||||
|
|
||||||
/// A series of tests to be carried out upon an `ExecutionResult`, returned from executing a
|
|
||||||
/// `TestCase`.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Results {
|
|
||||||
pub num_skipped_slots: Option<usize>,
|
|
||||||
pub state_checks: Option<Vec<StateCheck>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Results {
|
|
||||||
/// Load from a YAML document.
|
|
||||||
///
|
|
||||||
/// Expects the `results` section of the YAML document.
|
|
||||||
pub fn from_yaml(yaml: &Yaml) -> Self {
|
|
||||||
Self {
|
|
||||||
num_skipped_slots: as_usize(yaml, "num_skipped_slots"),
|
|
||||||
state_checks: parse_state_checks(yaml),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the `state_checks` section of the YAML document.
|
|
||||||
fn parse_state_checks(yaml: &Yaml) -> Option<Vec<StateCheck>> {
|
|
||||||
let mut states = vec![];
|
|
||||||
|
|
||||||
for state_yaml in yaml["states"].as_vec()? {
|
|
||||||
states.push(StateCheck::from_yaml(state_yaml));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(states)
|
|
||||||
}
|
|
@ -1,206 +0,0 @@
|
|||||||
use super::yaml_helpers::{as_u64, as_usize, as_vec_u64};
|
|
||||||
use log::info;
|
|
||||||
use types::*;
|
|
||||||
use yaml_rust::Yaml;
|
|
||||||
|
|
||||||
type ValidatorIndex = u64;
|
|
||||||
type BalanceGwei = u64;
|
|
||||||
|
|
||||||
type BalanceCheckTuple = (ValidatorIndex, String, BalanceGwei);
|
|
||||||
|
|
||||||
/// Tests to be conducted upon a `BeaconState` object generated during the execution of a
|
|
||||||
/// `TestCase`.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct StateCheck {
|
|
||||||
/// Checked against `beacon_state.slot`.
|
|
||||||
pub slot: Slot,
|
|
||||||
/// Checked against `beacon_state.validator_registry.len()`.
|
|
||||||
pub num_validators: Option<usize>,
|
|
||||||
/// The number of pending attestations from the previous epoch that should be in the state.
|
|
||||||
pub num_previous_epoch_attestations: Option<usize>,
|
|
||||||
/// The number of pending attestations from the current epoch that should be in the state.
|
|
||||||
pub num_current_epoch_attestations: Option<usize>,
|
|
||||||
/// A list of validator indices which have been penalized. Must be in ascending order.
|
|
||||||
pub slashed_validators: Option<Vec<u64>>,
|
|
||||||
/// A list of validator indices which have been fully exited. Must be in ascending order.
|
|
||||||
pub exited_validators: Option<Vec<u64>>,
|
|
||||||
/// A list of validator indices which have had an exit initiated. Must be in ascending order.
|
|
||||||
pub exit_initiated_validators: Option<Vec<u64>>,
|
|
||||||
/// A list of balances to check.
|
|
||||||
pub balances: Option<Vec<BalanceCheckTuple>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl StateCheck {
|
|
||||||
/// Load from a YAML document.
|
|
||||||
///
|
|
||||||
/// Expects the `state_check` section of the YAML document.
|
|
||||||
pub fn from_yaml(yaml: &Yaml) -> Self {
|
|
||||||
Self {
|
|
||||||
slot: Slot::from(as_u64(&yaml, "slot").expect("State must specify slot")),
|
|
||||||
num_validators: as_usize(&yaml, "num_validators"),
|
|
||||||
num_previous_epoch_attestations: as_usize(&yaml, "num_previous_epoch_attestations"),
|
|
||||||
num_current_epoch_attestations: as_usize(&yaml, "num_current_epoch_attestations"),
|
|
||||||
slashed_validators: as_vec_u64(&yaml, "slashed_validators"),
|
|
||||||
exited_validators: as_vec_u64(&yaml, "exited_validators"),
|
|
||||||
exit_initiated_validators: as_vec_u64(&yaml, "exit_initiated_validators"),
|
|
||||||
balances: parse_balances(&yaml),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Performs all checks against a `BeaconState`
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics with an error message if any test fails.
|
|
||||||
#[allow(clippy::cyclomatic_complexity)]
|
|
||||||
pub fn assert_valid(&self, state: &BeaconState, spec: &ChainSpec) {
|
|
||||||
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
|
|
||||||
|
|
||||||
info!("Running state check for slot height {}.", self.slot);
|
|
||||||
|
|
||||||
// Check the state slot.
|
|
||||||
assert_eq!(
|
|
||||||
self.slot,
|
|
||||||
state.slot - spec.genesis_epoch.start_slot(spec.slots_per_epoch),
|
|
||||||
"State slot is invalid."
|
|
||||||
);
|
|
||||||
|
|
||||||
// Check the validator count
|
|
||||||
if let Some(num_validators) = self.num_validators {
|
|
||||||
assert_eq!(
|
|
||||||
state.validator_registry.len(),
|
|
||||||
num_validators,
|
|
||||||
"State validator count != expected."
|
|
||||||
);
|
|
||||||
info!("OK: num_validators = {}.", num_validators);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the previous epoch attestations
|
|
||||||
if let Some(n) = self.num_previous_epoch_attestations {
|
|
||||||
assert_eq!(
|
|
||||||
state.previous_epoch_attestations.len(),
|
|
||||||
n,
|
|
||||||
"previous epoch attestations count != expected."
|
|
||||||
);
|
|
||||||
info!("OK: num_previous_epoch_attestations = {}.", n);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the current epoch attestations
|
|
||||||
if let Some(n) = self.num_current_epoch_attestations {
|
|
||||||
assert_eq!(
|
|
||||||
state.current_epoch_attestations.len(),
|
|
||||||
n,
|
|
||||||
"current epoch attestations count != expected."
|
|
||||||
);
|
|
||||||
info!("OK: num_current_epoch_attestations = {}.", n);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for slashed validators.
|
|
||||||
if let Some(ref slashed_validators) = self.slashed_validators {
|
|
||||||
let actually_slashed_validators: Vec<u64> = state
|
|
||||||
.validator_registry
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(i, validator)| {
|
|
||||||
if validator.slashed {
|
|
||||||
Some(i as u64)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(
|
|
||||||
actually_slashed_validators, *slashed_validators,
|
|
||||||
"Slashed validators != expected."
|
|
||||||
);
|
|
||||||
info!("OK: slashed_validators = {:?}.", slashed_validators);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for exited validators.
|
|
||||||
if let Some(ref exited_validators) = self.exited_validators {
|
|
||||||
let actually_exited_validators: Vec<u64> = state
|
|
||||||
.validator_registry
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(i, validator)| {
|
|
||||||
if validator.is_exited_at(state_epoch) {
|
|
||||||
Some(i as u64)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(
|
|
||||||
actually_exited_validators, *exited_validators,
|
|
||||||
"Exited validators != expected."
|
|
||||||
);
|
|
||||||
info!("OK: exited_validators = {:?}.", exited_validators);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for validators that have initiated exit.
|
|
||||||
if let Some(ref exit_initiated_validators) = self.exit_initiated_validators {
|
|
||||||
let actual: Vec<u64> = state
|
|
||||||
.validator_registry
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(i, validator)| {
|
|
||||||
if validator.initiated_exit {
|
|
||||||
Some(i as u64)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
assert_eq!(
|
|
||||||
actual, *exit_initiated_validators,
|
|
||||||
"Exit initiated validators != expected."
|
|
||||||
);
|
|
||||||
info!(
|
|
||||||
"OK: exit_initiated_validators = {:?}.",
|
|
||||||
exit_initiated_validators
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check validator balances.
|
|
||||||
if let Some(ref balances) = self.balances {
|
|
||||||
for (index, comparison, expected) in balances {
|
|
||||||
let actual = *state
|
|
||||||
.validator_balances
|
|
||||||
.get(*index as usize)
|
|
||||||
.expect("Balance check specifies unknown validator");
|
|
||||||
|
|
||||||
let result = match comparison.as_ref() {
|
|
||||||
"eq" => actual == *expected,
|
|
||||||
_ => panic!("Unknown balance comparison (use `eq`)"),
|
|
||||||
};
|
|
||||||
assert!(
|
|
||||||
result,
|
|
||||||
format!(
|
|
||||||
"Validator balance for {}: {} !{} {}.",
|
|
||||||
index, actual, comparison, expected
|
|
||||||
)
|
|
||||||
);
|
|
||||||
info!("OK: validator balance for {:?}.", index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the `transfers` section of the YAML document.
|
|
||||||
fn parse_balances(yaml: &Yaml) -> Option<Vec<BalanceCheckTuple>> {
|
|
||||||
let mut tuples = vec![];
|
|
||||||
|
|
||||||
for exit in yaml["balances"].as_vec()? {
|
|
||||||
let from =
|
|
||||||
as_u64(exit, "validator_index").expect("Incomplete balance check (validator_index)");
|
|
||||||
let comparison = exit["comparison"]
|
|
||||||
.clone()
|
|
||||||
.into_string()
|
|
||||||
.expect("Incomplete balance check (amount)");
|
|
||||||
let balance = as_u64(exit, "balance").expect("Incomplete balance check (balance)");
|
|
||||||
|
|
||||||
tuples.push((from, comparison, balance));
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(tuples)
|
|
||||||
}
|
|
@ -1,19 +0,0 @@
|
|||||||
use yaml_rust::Yaml;
|
|
||||||
|
|
||||||
pub fn as_usize(yaml: &Yaml, key: &str) -> Option<usize> {
|
|
||||||
yaml[key].as_i64().and_then(|n| Some(n as usize))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_u64(yaml: &Yaml, key: &str) -> Option<u64> {
|
|
||||||
yaml[key].as_i64().and_then(|n| Some(n as u64))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_vec_u64(yaml: &Yaml, key: &str) -> Option<Vec<u64>> {
|
|
||||||
yaml[key].clone().into_vec().and_then(|vec| {
|
|
||||||
Some(
|
|
||||||
vec.iter()
|
|
||||||
.map(|item| item.as_i64().unwrap() as u64)
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,100 +0,0 @@
|
|||||||
use attester::{
|
|
||||||
BeaconNode as AttesterBeaconNode, BeaconNodeError as NodeError,
|
|
||||||
PublishOutcome as AttestationPublishOutcome,
|
|
||||||
};
|
|
||||||
use beacon_chain::BeaconChain;
|
|
||||||
use block_proposer::{
|
|
||||||
BeaconNode as BeaconBlockNode, BeaconNodeError as BeaconBlockNodeError,
|
|
||||||
PublishOutcome as BlockPublishOutcome,
|
|
||||||
};
|
|
||||||
use db::ClientDB;
|
|
||||||
use fork_choice::ForkChoice;
|
|
||||||
use parking_lot::RwLock;
|
|
||||||
use slot_clock::SlotClock;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use types::{AttestationData, BeaconBlock, FreeAttestation, Signature, Slot};
|
|
||||||
|
|
||||||
/// Connect directly to a borrowed `BeaconChain` instance so an attester/producer can request/submit
|
|
||||||
/// blocks/attestations.
|
|
||||||
///
|
|
||||||
/// `BeaconBlock`s and `FreeAttestation`s are not actually published to the `BeaconChain`, instead
|
|
||||||
/// they are stored inside this struct. This is to allow one to benchmark the submission of the
|
|
||||||
/// block/attestation directly, or modify it before submission.
|
|
||||||
pub struct DirectBeaconNode<T: ClientDB, U: SlotClock, F: ForkChoice> {
|
|
||||||
beacon_chain: Arc<BeaconChain<T, U, F>>,
|
|
||||||
published_blocks: RwLock<Vec<BeaconBlock>>,
|
|
||||||
published_attestations: RwLock<Vec<FreeAttestation>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ClientDB, U: SlotClock, F: ForkChoice> DirectBeaconNode<T, U, F> {
|
|
||||||
pub fn new(beacon_chain: Arc<BeaconChain<T, U, F>>) -> Self {
|
|
||||||
Self {
|
|
||||||
beacon_chain,
|
|
||||||
published_blocks: RwLock::new(vec![]),
|
|
||||||
published_attestations: RwLock::new(vec![]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the last published block (if any).
|
|
||||||
pub fn last_published_block(&self) -> Option<BeaconBlock> {
|
|
||||||
Some(self.published_blocks.read().last()?.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterBeaconNode for DirectBeaconNode<T, U, F> {
|
|
||||||
fn produce_attestation_data(
|
|
||||||
&self,
|
|
||||||
_slot: Slot,
|
|
||||||
shard: u64,
|
|
||||||
) -> Result<Option<AttestationData>, NodeError> {
|
|
||||||
match self.beacon_chain.produce_attestation_data(shard) {
|
|
||||||
Ok(attestation_data) => Ok(Some(attestation_data)),
|
|
||||||
Err(e) => Err(NodeError::RemoteFailure(format!("{:?}", e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_attestation(
|
|
||||||
&self,
|
|
||||||
free_attestation: FreeAttestation,
|
|
||||||
) -> Result<AttestationPublishOutcome, NodeError> {
|
|
||||||
self.published_attestations.write().push(free_attestation);
|
|
||||||
Ok(AttestationPublishOutcome::ValidAttestation)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ClientDB, U: SlotClock, F: ForkChoice> BeaconBlockNode for DirectBeaconNode<T, U, F> {
|
|
||||||
/// Requests a new `BeaconBlock from the `BeaconChain`.
|
|
||||||
fn produce_beacon_block(
|
|
||||||
&self,
|
|
||||||
slot: Slot,
|
|
||||||
randao_reveal: &Signature,
|
|
||||||
) -> Result<Option<BeaconBlock>, BeaconBlockNodeError> {
|
|
||||||
let (block, _state) = self
|
|
||||||
.beacon_chain
|
|
||||||
.produce_block(randao_reveal.clone())
|
|
||||||
.map_err(|e| {
|
|
||||||
BeaconBlockNodeError::RemoteFailure(format!("Did not produce block: {:?}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if block.slot == slot {
|
|
||||||
Ok(Some(block))
|
|
||||||
} else {
|
|
||||||
Err(BeaconBlockNodeError::RemoteFailure(
|
|
||||||
"Unable to produce at non-current slot.".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A block is not _actually_ published to the `BeaconChain`, instead it is stored in the
|
|
||||||
/// `published_block_vec` and a successful `ValidBlock` is returned to the caller.
|
|
||||||
///
|
|
||||||
/// The block may be retrieved and then applied to the `BeaconChain` manually, potentially in a
|
|
||||||
/// benchmarking scenario.
|
|
||||||
fn publish_beacon_block(
|
|
||||||
&self,
|
|
||||||
block: BeaconBlock,
|
|
||||||
) -> Result<BlockPublishOutcome, BeaconBlockNodeError> {
|
|
||||||
self.published_blocks.write().push(block);
|
|
||||||
Ok(BlockPublishOutcome::ValidBlock)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,74 +0,0 @@
|
|||||||
use attester::{
|
|
||||||
DutiesReader as AttesterDutiesReader, DutiesReaderError as AttesterDutiesReaderError,
|
|
||||||
};
|
|
||||||
use beacon_chain::BeaconChain;
|
|
||||||
use block_proposer::{
|
|
||||||
DutiesReader as ProducerDutiesReader, DutiesReaderError as ProducerDutiesReaderError,
|
|
||||||
};
|
|
||||||
use db::ClientDB;
|
|
||||||
use fork_choice::ForkChoice;
|
|
||||||
use slot_clock::SlotClock;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use types::{Fork, PublicKey, Slot};
|
|
||||||
|
|
||||||
/// Connects directly to a borrowed `BeaconChain` and reads attester/proposer duties directly from
|
|
||||||
/// it.
|
|
||||||
pub struct DirectDuties<T: ClientDB, U: SlotClock, F: ForkChoice> {
|
|
||||||
beacon_chain: Arc<BeaconChain<T, U, F>>,
|
|
||||||
pubkey: PublicKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ClientDB, U: SlotClock, F: ForkChoice> DirectDuties<T, U, F> {
|
|
||||||
pub fn new(pubkey: PublicKey, beacon_chain: Arc<BeaconChain<T, U, F>>) -> Self {
|
|
||||||
Self {
|
|
||||||
beacon_chain,
|
|
||||||
pubkey,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ClientDB, U: SlotClock, F: ForkChoice> ProducerDutiesReader for DirectDuties<T, U, F> {
|
|
||||||
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, ProducerDutiesReaderError> {
|
|
||||||
let validator_index = self
|
|
||||||
.beacon_chain
|
|
||||||
.validator_index(&self.pubkey)
|
|
||||||
.ok_or_else(|| ProducerDutiesReaderError::UnknownValidator)?;
|
|
||||||
|
|
||||||
match self.beacon_chain.block_proposer(slot) {
|
|
||||||
Ok(proposer) if proposer == validator_index => Ok(true),
|
|
||||||
Ok(_) => Ok(false),
|
|
||||||
Err(_) => Err(ProducerDutiesReaderError::UnknownEpoch),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fork(&self) -> Result<Fork, ProducerDutiesReaderError> {
|
|
||||||
Ok(self.beacon_chain.state.read().fork.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ClientDB, U: SlotClock, F: ForkChoice> AttesterDutiesReader for DirectDuties<T, U, F> {
|
|
||||||
fn validator_index(&self) -> Option<u64> {
|
|
||||||
match self.beacon_chain.validator_index(&self.pubkey) {
|
|
||||||
Some(index) => Some(index as u64),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, AttesterDutiesReaderError> {
|
|
||||||
if let Some(validator_index) = self.validator_index() {
|
|
||||||
match self
|
|
||||||
.beacon_chain
|
|
||||||
.validator_attestion_slot_and_shard(validator_index as usize)
|
|
||||||
{
|
|
||||||
Ok(Some((attest_slot, attest_shard))) if attest_slot == slot => {
|
|
||||||
Ok(Some(attest_shard))
|
|
||||||
}
|
|
||||||
Ok(Some(_)) => Ok(None),
|
|
||||||
Ok(None) => Err(AttesterDutiesReaderError::UnknownEpoch),
|
|
||||||
Err(_) => unreachable!("Error when getting validator attestation shard."),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(AttesterDutiesReaderError::UnknownValidator)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
use attester::Signer as AttesterSigner;
|
|
||||||
use block_proposer::Signer as BlockProposerSigner;
|
|
||||||
use types::{Keypair, Signature};
|
|
||||||
|
|
||||||
/// A test-only struct used to perform signing for a proposer or attester.
|
|
||||||
pub struct LocalSigner {
|
|
||||||
keypair: Keypair,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LocalSigner {
|
|
||||||
/// Produce a new TestSigner with signing enabled by default.
|
|
||||||
pub fn new(keypair: Keypair) -> Self {
|
|
||||||
Self { keypair }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sign some message.
|
|
||||||
fn bls_sign(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
|
||||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockProposerSigner for LocalSigner {
|
|
||||||
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
|
||||||
self.bls_sign(message, domain)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
|
||||||
self.bls_sign(message, domain)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AttesterSigner for LocalSigner {
|
|
||||||
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
|
||||||
self.bls_sign(message, domain)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,119 +0,0 @@
|
|||||||
mod direct_beacon_node;
|
|
||||||
mod direct_duties;
|
|
||||||
mod local_signer;
|
|
||||||
|
|
||||||
use attester::Attester;
|
|
||||||
use beacon_chain::BeaconChain;
|
|
||||||
use block_proposer::PollOutcome as BlockPollOutcome;
|
|
||||||
use block_proposer::{BlockProducer, Error as BlockPollError};
|
|
||||||
use db::MemoryDB;
|
|
||||||
use direct_beacon_node::DirectBeaconNode;
|
|
||||||
use direct_duties::DirectDuties;
|
|
||||||
use fork_choice::BitwiseLMDGhost;
|
|
||||||
use local_signer::LocalSigner;
|
|
||||||
use slot_clock::TestingSlotClock;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use types::{BeaconBlock, ChainSpec, Keypair, Slot};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum BlockProduceError {
|
|
||||||
DidNotProduce(BlockPollOutcome),
|
|
||||||
PollError(BlockPollError),
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestingBlockProducer = BlockProducer<
|
|
||||||
TestingSlotClock,
|
|
||||||
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
LocalSigner,
|
|
||||||
>;
|
|
||||||
|
|
||||||
type TestingAttester = Attester<
|
|
||||||
TestingSlotClock,
|
|
||||||
DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>,
|
|
||||||
LocalSigner,
|
|
||||||
>;
|
|
||||||
|
|
||||||
/// A `BlockProducer` and `Attester` which sign using a common keypair.
|
|
||||||
///
|
|
||||||
/// The test validator connects directly to a borrowed `BeaconChain` struct. It is useful for
|
|
||||||
/// testing that the core proposer and attester logic is functioning. Also for supporting beacon
|
|
||||||
/// chain tests.
|
|
||||||
pub struct ValidatorHarness {
|
|
||||||
pub block_producer: TestingBlockProducer,
|
|
||||||
pub attester: TestingAttester,
|
|
||||||
pub spec: Arc<ChainSpec>,
|
|
||||||
pub epoch_map: Arc<DirectDuties<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
|
|
||||||
pub keypair: Keypair,
|
|
||||||
pub beacon_node: Arc<DirectBeaconNode<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
|
|
||||||
pub slot_clock: Arc<TestingSlotClock>,
|
|
||||||
pub signer: Arc<LocalSigner>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ValidatorHarness {
|
|
||||||
/// Create a new ValidatorHarness that signs with the given keypair, operates per the given spec and connects to the
|
|
||||||
/// supplied beacon node.
|
|
||||||
///
|
|
||||||
/// A `BlockProducer` and `Attester` is created..
|
|
||||||
pub fn new(
|
|
||||||
keypair: Keypair,
|
|
||||||
beacon_chain: Arc<BeaconChain<MemoryDB, TestingSlotClock, BitwiseLMDGhost<MemoryDB>>>,
|
|
||||||
spec: Arc<ChainSpec>,
|
|
||||||
) -> Self {
|
|
||||||
let slot_clock = Arc::new(TestingSlotClock::new(spec.genesis_slot.as_u64()));
|
|
||||||
let signer = Arc::new(LocalSigner::new(keypair.clone()));
|
|
||||||
let beacon_node = Arc::new(DirectBeaconNode::new(beacon_chain.clone()));
|
|
||||||
let epoch_map = Arc::new(DirectDuties::new(keypair.pk.clone(), beacon_chain.clone()));
|
|
||||||
|
|
||||||
let block_producer = BlockProducer::new(
|
|
||||||
spec.clone(),
|
|
||||||
epoch_map.clone(),
|
|
||||||
slot_clock.clone(),
|
|
||||||
beacon_node.clone(),
|
|
||||||
signer.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let attester = Attester::new(
|
|
||||||
epoch_map.clone(),
|
|
||||||
slot_clock.clone(),
|
|
||||||
beacon_node.clone(),
|
|
||||||
signer.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
block_producer,
|
|
||||||
attester,
|
|
||||||
spec,
|
|
||||||
epoch_map,
|
|
||||||
keypair,
|
|
||||||
beacon_node,
|
|
||||||
slot_clock,
|
|
||||||
signer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Run the `poll` function on the `BlockProducer` and produce a block.
|
|
||||||
///
|
|
||||||
/// An error is returned if the producer refuses to produce.
|
|
||||||
pub fn produce_block(&mut self) -> Result<BeaconBlock, BlockProduceError> {
|
|
||||||
// Using `DirectBeaconNode`, the validator will always return sucessufully if it tries to
|
|
||||||
// publish a block.
|
|
||||||
match self.block_producer.poll() {
|
|
||||||
Ok(BlockPollOutcome::BlockProduced(_)) => {}
|
|
||||||
Ok(outcome) => return Err(BlockProduceError::DidNotProduce(outcome)),
|
|
||||||
Err(error) => return Err(BlockProduceError::PollError(error)),
|
|
||||||
};
|
|
||||||
Ok(self
|
|
||||||
.beacon_node
|
|
||||||
.last_published_block()
|
|
||||||
.expect("Unable to obtain produced block."))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the validators slot clock to the specified slot.
|
|
||||||
///
|
|
||||||
/// The validators slot clock will always read this value until it is set to something else.
|
|
||||||
pub fn set_slot(&mut self, slot: Slot) {
|
|
||||||
self.slot_clock.set_slot(slot.as_u64())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
#![cfg(not(debug_assertions))]
|
|
||||||
|
|
||||||
use env_logger::{Builder, Env};
|
|
||||||
use log::debug;
|
|
||||||
use test_harness::BeaconChainHarness;
|
|
||||||
use types::ChainSpec;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn it_can_build_on_genesis_block() {
|
|
||||||
Builder::from_env(Env::default().default_filter_or("info")).init();
|
|
||||||
|
|
||||||
let spec = ChainSpec::few_validators();
|
|
||||||
let validator_count = 8;
|
|
||||||
|
|
||||||
let mut harness = BeaconChainHarness::new(spec, validator_count as usize);
|
|
||||||
|
|
||||||
harness.advance_chain_with_block();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn it_can_produce_past_first_epoch_boundary() {
|
|
||||||
Builder::from_env(Env::default().default_filter_or("info")).init();
|
|
||||||
|
|
||||||
let spec = ChainSpec::few_validators();
|
|
||||||
let validator_count = 8;
|
|
||||||
|
|
||||||
debug!("Starting harness build...");
|
|
||||||
|
|
||||||
let mut harness = BeaconChainHarness::new(spec, validator_count);
|
|
||||||
|
|
||||||
debug!("Harness built, tests starting..");
|
|
||||||
|
|
||||||
let blocks = harness.spec.slots_per_epoch * 2 + 1;
|
|
||||||
|
|
||||||
for i in 0..blocks {
|
|
||||||
harness.advance_chain_with_block();
|
|
||||||
debug!("Produced block {}/{}.", i + 1, blocks);
|
|
||||||
}
|
|
||||||
|
|
||||||
harness.run_fork_choice();
|
|
||||||
|
|
||||||
let dump = harness.chain_dump().expect("Chain dump failed.");
|
|
||||||
|
|
||||||
assert_eq!(dump.len() as u64, blocks + 1); // + 1 for genesis block.
|
|
||||||
}
|
|
@ -9,8 +9,8 @@ use std::net::{IpAddr, Ipv4Addr};
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::multiaddr::Protocol;
|
use types::multiaddr::Protocol;
|
||||||
use types::multiaddr::ToMultiaddr;
|
use types::multiaddr::ToMultiaddr;
|
||||||
use types::ChainSpec;
|
|
||||||
use types::Multiaddr;
|
use types::Multiaddr;
|
||||||
|
use types::{ChainSpec, EthSpec, LighthouseTestnetEthSpec};
|
||||||
|
|
||||||
/// Stores the client configuration for this Lighthouse instance.
|
/// Stores the client configuration for this Lighthouse instance.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@ -35,7 +35,7 @@ impl Default for ClientConfig {
|
|||||||
fs::create_dir_all(&data_dir)
|
fs::create_dir_all(&data_dir)
|
||||||
.unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir));
|
.unwrap_or_else(|_| panic!("Unable to create {:?}", &data_dir));
|
||||||
|
|
||||||
let default_spec = ChainSpec::lighthouse_testnet();
|
let default_spec = LighthouseTestnetEthSpec::spec();
|
||||||
let default_net_conf = NetworkConfig::new(default_spec.boot_nodes.clone());
|
let default_net_conf = NetworkConfig::new(default_spec.boot_nodes.clone());
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
@ -1,23 +1,22 @@
|
|||||||
use crate::ClientConfig;
|
use crate::{ArcBeaconChain, ClientConfig};
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
db::{ClientDB, DiskDB, MemoryDB},
|
db::{ClientDB, DiskDB, MemoryDB},
|
||||||
fork_choice::BitwiseLMDGhost,
|
fork_choice::BitwiseLMDGhost,
|
||||||
initialise,
|
initialise,
|
||||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||||
BeaconChain,
|
|
||||||
};
|
};
|
||||||
use fork_choice::ForkChoice;
|
use fork_choice::ForkChoice;
|
||||||
|
use types::{EthSpec, FewValidatorsEthSpec, FoundationEthSpec};
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub trait ClientTypes {
|
pub trait ClientTypes {
|
||||||
type DB: ClientDB + 'static;
|
type DB: ClientDB + 'static;
|
||||||
type SlotClock: SlotClock + 'static;
|
type SlotClock: SlotClock + 'static;
|
||||||
type ForkChoice: ForkChoice + 'static;
|
type ForkChoice: ForkChoice + 'static;
|
||||||
|
type EthSpec: EthSpec + 'static;
|
||||||
|
|
||||||
fn initialise_beacon_chain(
|
fn initialise_beacon_chain(
|
||||||
config: &ClientConfig,
|
config: &ClientConfig,
|
||||||
) -> Arc<BeaconChain<Self::DB, Self::SlotClock, Self::ForkChoice>>;
|
) -> ArcBeaconChain<Self::DB, Self::SlotClock, Self::ForkChoice, Self::EthSpec>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct StandardClientType;
|
pub struct StandardClientType;
|
||||||
@ -25,11 +24,12 @@ pub struct StandardClientType;
|
|||||||
impl ClientTypes for StandardClientType {
|
impl ClientTypes for StandardClientType {
|
||||||
type DB = DiskDB;
|
type DB = DiskDB;
|
||||||
type SlotClock = SystemTimeSlotClock;
|
type SlotClock = SystemTimeSlotClock;
|
||||||
type ForkChoice = BitwiseLMDGhost<DiskDB>;
|
type ForkChoice = BitwiseLMDGhost<DiskDB, Self::EthSpec>;
|
||||||
|
type EthSpec = FoundationEthSpec;
|
||||||
|
|
||||||
fn initialise_beacon_chain(
|
fn initialise_beacon_chain(
|
||||||
config: &ClientConfig,
|
config: &ClientConfig,
|
||||||
) -> Arc<BeaconChain<Self::DB, Self::SlotClock, Self::ForkChoice>> {
|
) -> ArcBeaconChain<Self::DB, Self::SlotClock, Self::ForkChoice, Self::EthSpec> {
|
||||||
initialise::initialise_beacon_chain(&config.spec, Some(&config.db_name))
|
initialise::initialise_beacon_chain(&config.spec, Some(&config.db_name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -39,11 +39,12 @@ pub struct TestingClientType;
|
|||||||
impl ClientTypes for TestingClientType {
|
impl ClientTypes for TestingClientType {
|
||||||
type DB = MemoryDB;
|
type DB = MemoryDB;
|
||||||
type SlotClock = SystemTimeSlotClock;
|
type SlotClock = SystemTimeSlotClock;
|
||||||
type ForkChoice = BitwiseLMDGhost<MemoryDB>;
|
type ForkChoice = BitwiseLMDGhost<MemoryDB, Self::EthSpec>;
|
||||||
|
type EthSpec = FewValidatorsEthSpec;
|
||||||
|
|
||||||
fn initialise_beacon_chain(
|
fn initialise_beacon_chain(
|
||||||
config: &ClientConfig,
|
config: &ClientConfig,
|
||||||
) -> Arc<BeaconChain<Self::DB, Self::SlotClock, Self::ForkChoice>> {
|
) -> ArcBeaconChain<Self::DB, Self::SlotClock, Self::ForkChoice, Self::EthSpec> {
|
||||||
initialise::initialise_test_beacon_chain(&config.spec, None)
|
initialise::initialise_test_beacon_chain(&config.spec, None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,9 @@ use std::sync::Arc;
|
|||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
use tokio::timer::Interval;
|
use tokio::timer::Interval;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
|
type ArcBeaconChain<D, S, F, B> = Arc<BeaconChain<D, S, F, B>>;
|
||||||
|
|
||||||
/// Main beacon node client service. This provides the connection and initialisation of the clients
|
/// Main beacon node client service. This provides the connection and initialisation of the clients
|
||||||
/// sub-services in multiple threads.
|
/// sub-services in multiple threads.
|
||||||
@ -27,9 +30,9 @@ pub struct Client<T: ClientTypes> {
|
|||||||
/// Configuration for the lighthouse client.
|
/// Configuration for the lighthouse client.
|
||||||
_config: ClientConfig,
|
_config: ClientConfig,
|
||||||
/// The beacon chain for the running client.
|
/// The beacon chain for the running client.
|
||||||
_beacon_chain: Arc<BeaconChain<T::DB, T::SlotClock, T::ForkChoice>>,
|
_beacon_chain: ArcBeaconChain<T::DB, T::SlotClock, T::ForkChoice, T::EthSpec>,
|
||||||
/// Reference to the network service.
|
/// Reference to the network service.
|
||||||
pub network: Arc<NetworkService>,
|
pub network: Arc<NetworkService<T::EthSpec>>,
|
||||||
/// Signal to terminate the RPC server.
|
/// Signal to terminate the RPC server.
|
||||||
pub rpc_exit_signal: Option<Signal>,
|
pub rpc_exit_signal: Option<Signal>,
|
||||||
/// Signal to terminate the slot timer.
|
/// Signal to terminate the slot timer.
|
||||||
@ -141,11 +144,12 @@ impl<TClientType: ClientTypes> Client<TClientType> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_state_catchup<T, U, F>(chain: &Arc<BeaconChain<T, U, F>>, log: &slog::Logger)
|
fn do_state_catchup<T, U, F, B>(chain: &Arc<BeaconChain<T, U, F, B>>, log: &slog::Logger)
|
||||||
where
|
where
|
||||||
T: ClientDB,
|
T: ClientDB,
|
||||||
U: SlotClock,
|
U: SlotClock,
|
||||||
F: ForkChoice,
|
F: ForkChoice,
|
||||||
|
B: EthSpec,
|
||||||
{
|
{
|
||||||
if let Some(genesis_height) = chain.slots_since_genesis() {
|
if let Some(genesis_height) = chain.slots_since_genesis() {
|
||||||
let result = chain.catchup_state();
|
let result = chain.catchup_state();
|
||||||
|
@ -97,7 +97,7 @@ impl ClientDB for DiskDB {
|
|||||||
None => Err(DBError {
|
None => Err(DBError {
|
||||||
message: "Unknown column".to_string(),
|
message: "Unknown column".to_string(),
|
||||||
}),
|
}),
|
||||||
Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()),
|
Some(handle) => self.db.put_cf(handle, key, val).map_err(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@ use super::STATES_DB_COLUMN as DB_COLUMN;
|
|||||||
use super::{ClientDB, DBError};
|
use super::{ClientDB, DBError};
|
||||||
use ssz::decode;
|
use ssz::decode;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconState, Hash256};
|
use types::{BeaconState, EthSpec, Hash256};
|
||||||
|
|
||||||
pub struct BeaconStateStore<T>
|
pub struct BeaconStateStore<T>
|
||||||
where
|
where
|
||||||
@ -19,11 +19,14 @@ impl<T: ClientDB> BeaconStateStore<T> {
|
|||||||
Self { db }
|
Self { db }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_deserialized(&self, hash: &Hash256) -> Result<Option<BeaconState>, DBError> {
|
pub fn get_deserialized<B: EthSpec>(
|
||||||
|
&self,
|
||||||
|
hash: &Hash256,
|
||||||
|
) -> Result<Option<BeaconState<B>>, DBError> {
|
||||||
match self.get(&hash)? {
|
match self.get(&hash)? {
|
||||||
None => Ok(None),
|
None => Ok(None),
|
||||||
Some(ssz) => {
|
Some(ssz) => {
|
||||||
let state = decode::<BeaconState>(&ssz).map_err(|_| DBError {
|
let state = decode::<BeaconState<B>>(&ssz).map_err(|_| DBError {
|
||||||
message: "Bad State SSZ.".to_string(),
|
message: "Bad State SSZ.".to_string(),
|
||||||
})?;
|
})?;
|
||||||
Ok(Some(state))
|
Ok(Some(state))
|
||||||
@ -40,7 +43,7 @@ mod tests {
|
|||||||
use ssz::ssz_encode;
|
use ssz::ssz_encode;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use types::Hash256;
|
use types::{FoundationBeaconState, Hash256};
|
||||||
|
|
||||||
test_crud_for_store!(BeaconStateStore, DB_COLUMN);
|
test_crud_for_store!(BeaconStateStore, DB_COLUMN);
|
||||||
|
|
||||||
@ -50,7 +53,7 @@ mod tests {
|
|||||||
let store = BeaconStateStore::new(db.clone());
|
let store = BeaconStateStore::new(db.clone());
|
||||||
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
let mut rng = XorShiftRng::from_seed([42; 16]);
|
||||||
let state = BeaconState::random_for_test(&mut rng);
|
let state: FoundationBeaconState = BeaconState::random_for_test(&mut rng);
|
||||||
let state_root = state.canonical_root();
|
let state_root = state.canonical_root();
|
||||||
|
|
||||||
store.put(&state_root, &ssz_encode(&state)).unwrap();
|
store.put(&state_root, &ssz_encode(&state)).unwrap();
|
||||||
|
@ -236,7 +236,7 @@ mod test {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ssz_encoding() {
|
fn ssz_encoding() {
|
||||||
let original = PubsubMessage::Block(BeaconBlock::empty(&ChainSpec::foundation()));
|
let original = PubsubMessage::Block(BeaconBlock::empty(&FoundationEthSpec::spec()));
|
||||||
|
|
||||||
let encoded = ssz_encode(&original);
|
let encoded = ssz_encode(&original);
|
||||||
|
|
||||||
|
@ -5,7 +5,6 @@ authors = ["Age Manning <Age@AgeManning.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
test_harness = { path = "../beacon_chain/test_harness" }
|
|
||||||
sloggers = "0.3.2"
|
sloggers = "0.3.2"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
@ -8,19 +8,21 @@ use beacon_chain::{
|
|||||||
AttestationValidationError, CheckPoint,
|
AttestationValidationError, CheckPoint,
|
||||||
};
|
};
|
||||||
use eth2_libp2p::rpc::HelloMessage;
|
use eth2_libp2p::rpc::HelloMessage;
|
||||||
use types::{Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, Hash256, Slot};
|
use types::{
|
||||||
|
Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot,
|
||||||
|
};
|
||||||
|
|
||||||
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome, InvalidBlock};
|
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome, InvalidBlock};
|
||||||
|
|
||||||
/// The network's API to the beacon chain.
|
/// The network's API to the beacon chain.
|
||||||
pub trait BeaconChain: Send + Sync {
|
pub trait BeaconChain<B: EthSpec>: Send + Sync {
|
||||||
fn get_spec(&self) -> &ChainSpec;
|
fn get_spec(&self) -> &ChainSpec;
|
||||||
|
|
||||||
fn get_state(&self) -> RwLockReadGuard<BeaconState>;
|
fn get_state(&self) -> RwLockReadGuard<BeaconState<B>>;
|
||||||
|
|
||||||
fn slot(&self) -> Slot;
|
fn slot(&self) -> Slot;
|
||||||
|
|
||||||
fn head(&self) -> RwLockReadGuard<CheckPoint>;
|
fn head(&self) -> RwLockReadGuard<CheckPoint<B>>;
|
||||||
|
|
||||||
fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, BeaconChainError>;
|
fn get_block(&self, block_root: &Hash256) -> Result<Option<BeaconBlock>, BeaconChainError>;
|
||||||
|
|
||||||
@ -28,7 +30,7 @@ pub trait BeaconChain: Send + Sync {
|
|||||||
|
|
||||||
fn best_block_root(&self) -> Hash256;
|
fn best_block_root(&self) -> Hash256;
|
||||||
|
|
||||||
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint>;
|
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint<B>>;
|
||||||
|
|
||||||
fn finalized_epoch(&self) -> Epoch;
|
fn finalized_epoch(&self) -> Epoch;
|
||||||
|
|
||||||
@ -62,17 +64,18 @@ pub trait BeaconChain: Send + Sync {
|
|||||||
fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, BeaconChainError>;
|
fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, BeaconChainError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, U, F> BeaconChain for RawBeaconChain<T, U, F>
|
impl<T, U, F, B> BeaconChain<B> for RawBeaconChain<T, U, F, B>
|
||||||
where
|
where
|
||||||
T: ClientDB + Sized,
|
T: ClientDB + Sized,
|
||||||
U: SlotClock,
|
U: SlotClock,
|
||||||
F: ForkChoice,
|
F: ForkChoice,
|
||||||
|
B: EthSpec,
|
||||||
{
|
{
|
||||||
fn get_spec(&self) -> &ChainSpec {
|
fn get_spec(&self) -> &ChainSpec {
|
||||||
&self.spec
|
&self.spec
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_state(&self) -> RwLockReadGuard<BeaconState> {
|
fn get_state(&self) -> RwLockReadGuard<BeaconState<B>> {
|
||||||
self.state.read()
|
self.state.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +83,7 @@ where
|
|||||||
self.get_state().slot
|
self.get_state().slot
|
||||||
}
|
}
|
||||||
|
|
||||||
fn head(&self) -> RwLockReadGuard<CheckPoint> {
|
fn head(&self) -> RwLockReadGuard<CheckPoint<B>> {
|
||||||
self.head()
|
self.head()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +95,7 @@ where
|
|||||||
self.get_state().finalized_epoch
|
self.get_state().finalized_epoch
|
||||||
}
|
}
|
||||||
|
|
||||||
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint> {
|
fn finalized_head(&self) -> RwLockReadGuard<CheckPoint<B>> {
|
||||||
self.finalized_head()
|
self.finalized_head()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@ use slog::{debug, warn};
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
/// Timeout for RPC requests.
|
/// Timeout for RPC requests.
|
||||||
// const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
|
// const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
|
||||||
@ -20,11 +21,11 @@ use std::time::Instant;
|
|||||||
// const HELLO_TIMEOUT: Duration = Duration::from_secs(30);
|
// const HELLO_TIMEOUT: Duration = Duration::from_secs(30);
|
||||||
|
|
||||||
/// Handles messages received from the network and client and organises syncing.
|
/// Handles messages received from the network and client and organises syncing.
|
||||||
pub struct MessageHandler {
|
pub struct MessageHandler<B: EthSpec> {
|
||||||
/// Currently loaded and initialised beacon chain.
|
/// Currently loaded and initialised beacon chain.
|
||||||
_chain: Arc<BeaconChain>,
|
_chain: Arc<BeaconChain<B>>,
|
||||||
/// The syncing framework.
|
/// The syncing framework.
|
||||||
sync: SimpleSync,
|
sync: SimpleSync<B>,
|
||||||
/// The context required to send messages to, and process messages from peers.
|
/// The context required to send messages to, and process messages from peers.
|
||||||
network_context: NetworkContext,
|
network_context: NetworkContext,
|
||||||
/// The `MessageHandler` logger.
|
/// The `MessageHandler` logger.
|
||||||
@ -44,10 +45,10 @@ pub enum HandlerMessage {
|
|||||||
PubsubMessage(PeerId, Box<PubsubMessage>),
|
PubsubMessage(PeerId, Box<PubsubMessage>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageHandler {
|
impl<B: EthSpec> MessageHandler<B> {
|
||||||
/// Initializes and runs the MessageHandler.
|
/// Initializes and runs the MessageHandler.
|
||||||
pub fn spawn(
|
pub fn spawn(
|
||||||
beacon_chain: Arc<BeaconChain>,
|
beacon_chain: Arc<BeaconChain<B>>,
|
||||||
network_send: crossbeam_channel::Sender<NetworkMessage>,
|
network_send: crossbeam_channel::Sender<NetworkMessage>,
|
||||||
executor: &tokio::runtime::TaskExecutor,
|
executor: &tokio::runtime::TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
@ -299,7 +300,7 @@ impl NetworkContext {
|
|||||||
let next_id = self
|
let next_id = self
|
||||||
.outgoing_request_ids
|
.outgoing_request_ids
|
||||||
.entry(peer_id.clone())
|
.entry(peer_id.clone())
|
||||||
.and_modify(|id| id.increment())
|
.and_modify(RequestId::increment)
|
||||||
.or_insert_with(|| RequestId::from(1));
|
.or_insert_with(|| RequestId::from(1));
|
||||||
|
|
||||||
next_id.previous()
|
next_id.previous()
|
||||||
|
@ -10,22 +10,23 @@ use futures::prelude::*;
|
|||||||
use futures::sync::oneshot;
|
use futures::sync::oneshot;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
use slog::{debug, info, o, trace};
|
use slog::{debug, info, o, trace};
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
use types::Topic;
|
use types::{EthSpec, Topic};
|
||||||
|
|
||||||
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
||||||
pub struct Service {
|
pub struct Service<B: EthSpec> {
|
||||||
//libp2p_service: Arc<Mutex<LibP2PService>>,
|
//libp2p_service: Arc<Mutex<LibP2PService>>,
|
||||||
_libp2p_exit: oneshot::Sender<()>,
|
_libp2p_exit: oneshot::Sender<()>,
|
||||||
network_send: crossbeam_channel::Sender<NetworkMessage>,
|
network_send: crossbeam_channel::Sender<NetworkMessage>,
|
||||||
//message_handler: MessageHandler,
|
_phantom: PhantomData<B>, //message_handler: MessageHandler,
|
||||||
//message_handler_send: Sender<HandlerMessage>,
|
//message_handler_send: Sender<HandlerMessage>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl<B: EthSpec> Service<B> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
beacon_chain: Arc<BeaconChain>,
|
beacon_chain: Arc<BeaconChain<B>>,
|
||||||
config: &NetworkConfig,
|
config: &NetworkConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
@ -56,6 +57,7 @@ impl Service {
|
|||||||
let network_service = Service {
|
let network_service = Service {
|
||||||
_libp2p_exit: libp2p_exit,
|
_libp2p_exit: libp2p_exit,
|
||||||
network_send: network_send.clone(),
|
network_send: network_send.clone(),
|
||||||
|
_phantom: PhantomData,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((Arc::new(network_service), network_send))
|
Ok((Arc::new(network_service), network_send))
|
||||||
|
@ -5,7 +5,7 @@ use slog::{debug, error};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot};
|
use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
/// Provides a queue for fully and partially built `BeaconBlock`s.
|
/// Provides a queue for fully and partially built `BeaconBlock`s.
|
||||||
///
|
///
|
||||||
@ -19,8 +19,8 @@ use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot};
|
|||||||
/// `BeaconBlockBody` as the key.
|
/// `BeaconBlockBody` as the key.
|
||||||
/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore
|
/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore
|
||||||
/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`.
|
/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`.
|
||||||
pub struct ImportQueue {
|
pub struct ImportQueue<B: EthSpec> {
|
||||||
pub chain: Arc<BeaconChain>,
|
pub chain: Arc<BeaconChain<B>>,
|
||||||
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
|
/// Partially imported blocks, keyed by the root of `BeaconBlockBody`.
|
||||||
pub partials: Vec<PartialBeaconBlock>,
|
pub partials: Vec<PartialBeaconBlock>,
|
||||||
/// Time before a queue entry is considered state.
|
/// Time before a queue entry is considered state.
|
||||||
@ -29,9 +29,9 @@ pub struct ImportQueue {
|
|||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ImportQueue {
|
impl<B: EthSpec> ImportQueue<B> {
|
||||||
/// Return a new, empty queue.
|
/// Return a new, empty queue.
|
||||||
pub fn new(chain: Arc<BeaconChain>, stale_time: Duration, log: slog::Logger) -> Self {
|
pub fn new(chain: Arc<BeaconChain<B>>, stale_time: Duration, log: slog::Logger) -> Self {
|
||||||
Self {
|
Self {
|
||||||
chain,
|
chain,
|
||||||
partials: vec![],
|
partials: vec![],
|
||||||
|
@ -9,7 +9,7 @@ use std::collections::HashMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{Attestation, BeaconBlock, Epoch, Hash256, Slot};
|
use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
/// The number of slots that we can import blocks ahead of us, before going into full Sync mode.
|
/// The number of slots that we can import blocks ahead of us, before going into full Sync mode.
|
||||||
const SLOT_IMPORT_TOLERANCE: u64 = 100;
|
const SLOT_IMPORT_TOLERANCE: u64 = 100;
|
||||||
@ -88,8 +88,8 @@ impl From<HelloMessage> for PeerSyncInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<&Arc<BeaconChain>> for PeerSyncInfo {
|
impl<B: EthSpec> From<&Arc<BeaconChain<B>>> for PeerSyncInfo {
|
||||||
fn from(chain: &Arc<BeaconChain>) -> PeerSyncInfo {
|
fn from(chain: &Arc<BeaconChain<B>>) -> PeerSyncInfo {
|
||||||
Self::from(chain.hello_message())
|
Self::from(chain.hello_message())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -103,22 +103,22 @@ pub enum SyncState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Simple Syncing protocol.
|
/// Simple Syncing protocol.
|
||||||
pub struct SimpleSync {
|
pub struct SimpleSync<B: EthSpec> {
|
||||||
/// A reference to the underlying beacon chain.
|
/// A reference to the underlying beacon chain.
|
||||||
chain: Arc<BeaconChain>,
|
chain: Arc<BeaconChain<B>>,
|
||||||
/// A mapping of Peers to their respective PeerSyncInfo.
|
/// A mapping of Peers to their respective PeerSyncInfo.
|
||||||
known_peers: HashMap<PeerId, PeerSyncInfo>,
|
known_peers: HashMap<PeerId, PeerSyncInfo>,
|
||||||
/// A queue to allow importing of blocks
|
/// A queue to allow importing of blocks
|
||||||
import_queue: ImportQueue,
|
import_queue: ImportQueue<B>,
|
||||||
/// The current state of the syncing protocol.
|
/// The current state of the syncing protocol.
|
||||||
state: SyncState,
|
state: SyncState,
|
||||||
/// Sync logger.
|
/// Sync logger.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleSync {
|
impl<B: EthSpec> SimpleSync<B> {
|
||||||
/// Instantiate a `SimpleSync` instance, with no peers and an empty queue.
|
/// Instantiate a `SimpleSync` instance, with no peers and an empty queue.
|
||||||
pub fn new(beacon_chain: Arc<BeaconChain>, log: &slog::Logger) -> Self {
|
pub fn new(beacon_chain: Arc<BeaconChain<B>>, log: &slog::Logger) -> Self {
|
||||||
let sync_logger = log.new(o!("Service"=> "Sync"));
|
let sync_logger = log.new(o!("Service"=> "Sync"));
|
||||||
|
|
||||||
let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS);
|
let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS);
|
||||||
|
@ -1,570 +0,0 @@
|
|||||||
use crossbeam_channel::{unbounded, Receiver, RecvTimeoutError, Sender};
|
|
||||||
use eth2_libp2p::rpc::methods::*;
|
|
||||||
use eth2_libp2p::rpc::{RPCMethod, RPCRequest, RPCResponse, RequestId};
|
|
||||||
use eth2_libp2p::{PeerId, RPCEvent};
|
|
||||||
use network::beacon_chain::BeaconChain as NetworkBeaconChain;
|
|
||||||
use network::message_handler::{HandlerMessage, MessageHandler};
|
|
||||||
use network::service::{NetworkMessage, OutgoingMessage};
|
|
||||||
use sloggers::terminal::{Destination, TerminalLoggerBuilder};
|
|
||||||
use sloggers::types::Severity;
|
|
||||||
use sloggers::Build;
|
|
||||||
use std::time::Duration;
|
|
||||||
use test_harness::BeaconChainHarness;
|
|
||||||
use tokio::runtime::TaskExecutor;
|
|
||||||
use types::{test_utils::TestingBeaconStateBuilder, *};
|
|
||||||
|
|
||||||
pub struct SyncNode {
|
|
||||||
pub id: usize,
|
|
||||||
sender: Sender<HandlerMessage>,
|
|
||||||
receiver: Receiver<NetworkMessage>,
|
|
||||||
peer_id: PeerId,
|
|
||||||
harness: BeaconChainHarness,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SyncNode {
|
|
||||||
fn from_beacon_state_builder(
|
|
||||||
id: usize,
|
|
||||||
executor: &TaskExecutor,
|
|
||||||
state_builder: TestingBeaconStateBuilder,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
logger: slog::Logger,
|
|
||||||
) -> Self {
|
|
||||||
let harness = BeaconChainHarness::from_beacon_state_builder(state_builder, spec.clone());
|
|
||||||
|
|
||||||
let (network_sender, network_receiver) = unbounded();
|
|
||||||
let message_handler_sender = MessageHandler::spawn(
|
|
||||||
harness.beacon_chain.clone(),
|
|
||||||
network_sender,
|
|
||||||
executor,
|
|
||||||
logger,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Self {
|
|
||||||
id,
|
|
||||||
sender: message_handler_sender,
|
|
||||||
receiver: network_receiver,
|
|
||||||
peer_id: PeerId::random(),
|
|
||||||
harness,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn increment_beacon_chain_slot(&mut self) {
|
|
||||||
self.harness.increment_beacon_chain_slot();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send(&self, message: HandlerMessage) {
|
|
||||||
self.sender.send(message).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recv(&self) -> Result<NetworkMessage, RecvTimeoutError> {
|
|
||||||
self.receiver.recv_timeout(Duration::from_millis(500))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hello_message(&self) -> HelloMessage {
|
|
||||||
self.harness.beacon_chain.hello_message()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn connect_to(&mut self, node: &SyncNode) {
|
|
||||||
let message = HandlerMessage::PeerDialed(self.peer_id.clone());
|
|
||||||
node.send(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads the receive queue from one node and passes the message to the other. Also returns a
|
|
||||||
/// copy of the message.
|
|
||||||
///
|
|
||||||
/// self -----> node
|
|
||||||
/// |
|
|
||||||
/// us
|
|
||||||
///
|
|
||||||
/// Named after the unix `tee` command.
|
|
||||||
fn tee(&mut self, node: &SyncNode) -> NetworkMessage {
|
|
||||||
let network_message = self.recv().expect("Timeout on tee");
|
|
||||||
|
|
||||||
let handler_message = match network_message.clone() {
|
|
||||||
NetworkMessage::Send(_to_peer_id, OutgoingMessage::RPC(event)) => {
|
|
||||||
HandlerMessage::RPC(self.peer_id.clone(), event)
|
|
||||||
}
|
|
||||||
_ => panic!("tee cannot parse {:?}", network_message),
|
|
||||||
};
|
|
||||||
|
|
||||||
node.send(handler_message);
|
|
||||||
|
|
||||||
network_message
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_hello_request(&mut self, node: &SyncNode) -> HelloMessage {
|
|
||||||
let request = self.tee_rpc_request(node);
|
|
||||||
|
|
||||||
match request {
|
|
||||||
RPCRequest::Hello(message) => message,
|
|
||||||
_ => panic!("tee_hello_request got: {:?}", request),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_hello_response(&mut self, node: &SyncNode) -> HelloMessage {
|
|
||||||
let response = self.tee_rpc_response(node);
|
|
||||||
|
|
||||||
match response {
|
|
||||||
RPCResponse::Hello(message) => message,
|
|
||||||
_ => panic!("tee_hello_response got: {:?}", response),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_block_root_request(&mut self, node: &SyncNode) -> BeaconBlockRootsRequest {
|
|
||||||
let msg = self.tee_rpc_request(node);
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
RPCRequest::BeaconBlockRoots(data) => data,
|
|
||||||
_ => panic!("tee_block_root_request got: {:?}", msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_block_root_response(&mut self, node: &SyncNode) -> BeaconBlockRootsResponse {
|
|
||||||
let msg = self.tee_rpc_response(node);
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
RPCResponse::BeaconBlockRoots(data) => data,
|
|
||||||
_ => panic!("tee_block_root_response got: {:?}", msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_block_header_request(&mut self, node: &SyncNode) -> BeaconBlockHeadersRequest {
|
|
||||||
let msg = self.tee_rpc_request(node);
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
RPCRequest::BeaconBlockHeaders(data) => data,
|
|
||||||
_ => panic!("tee_block_header_request got: {:?}", msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_block_header_response(&mut self, node: &SyncNode) -> BeaconBlockHeadersResponse {
|
|
||||||
let msg = self.tee_rpc_response(node);
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
RPCResponse::BeaconBlockHeaders(data) => data,
|
|
||||||
_ => panic!("tee_block_header_response got: {:?}", msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_block_body_request(&mut self, node: &SyncNode) -> BeaconBlockBodiesRequest {
|
|
||||||
let msg = self.tee_rpc_request(node);
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
RPCRequest::BeaconBlockBodies(data) => data,
|
|
||||||
_ => panic!("tee_block_body_request got: {:?}", msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_block_body_response(&mut self, node: &SyncNode) -> BeaconBlockBodiesResponse {
|
|
||||||
let msg = self.tee_rpc_response(node);
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
RPCResponse::BeaconBlockBodies(data) => data,
|
|
||||||
_ => panic!("tee_block_body_response got: {:?}", msg),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_rpc_request(&mut self, node: &SyncNode) -> RPCRequest {
|
|
||||||
let network_message = self.tee(node);
|
|
||||||
|
|
||||||
match network_message {
|
|
||||||
NetworkMessage::Send(
|
|
||||||
_peer_id,
|
|
||||||
OutgoingMessage::RPC(RPCEvent::Request {
|
|
||||||
id: _,
|
|
||||||
method_id: _,
|
|
||||||
body,
|
|
||||||
}),
|
|
||||||
) => body,
|
|
||||||
_ => panic!("tee_rpc_request failed! got {:?}", network_message),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tee_rpc_response(&mut self, node: &SyncNode) -> RPCResponse {
|
|
||||||
let network_message = self.tee(node);
|
|
||||||
|
|
||||||
match network_message {
|
|
||||||
NetworkMessage::Send(
|
|
||||||
_peer_id,
|
|
||||||
OutgoingMessage::RPC(RPCEvent::Response {
|
|
||||||
id: _,
|
|
||||||
method_id: _,
|
|
||||||
result,
|
|
||||||
}),
|
|
||||||
) => result,
|
|
||||||
_ => panic!("tee_rpc_response failed! got {:?}", network_message),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_block_root_request(&self) -> BeaconBlockRootsRequest {
|
|
||||||
let request = self.recv_rpc_request().expect("No block root request");
|
|
||||||
|
|
||||||
match request {
|
|
||||||
RPCRequest::BeaconBlockRoots(request) => request,
|
|
||||||
_ => panic!("Did not get block root request"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_block_headers_request(&self) -> BeaconBlockHeadersRequest {
|
|
||||||
let request = self.recv_rpc_request().expect("No block headers request");
|
|
||||||
|
|
||||||
match request {
|
|
||||||
RPCRequest::BeaconBlockHeaders(request) => request,
|
|
||||||
_ => panic!("Did not get block headers request"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_block_bodies_request(&self) -> BeaconBlockBodiesRequest {
|
|
||||||
let request = self.recv_rpc_request().expect("No block bodies request");
|
|
||||||
|
|
||||||
match request {
|
|
||||||
RPCRequest::BeaconBlockBodies(request) => request,
|
|
||||||
_ => panic!("Did not get block bodies request"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn _recv_rpc_response(&self) -> Result<RPCResponse, RecvTimeoutError> {
|
|
||||||
let network_message = self.recv()?;
|
|
||||||
Ok(match network_message {
|
|
||||||
NetworkMessage::Send(
|
|
||||||
_peer_id,
|
|
||||||
OutgoingMessage::RPC(RPCEvent::Response {
|
|
||||||
id: _,
|
|
||||||
method_id: _,
|
|
||||||
result,
|
|
||||||
}),
|
|
||||||
) => result,
|
|
||||||
_ => panic!("get_rpc_response failed! got {:?}", network_message),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recv_rpc_request(&self) -> Result<RPCRequest, RecvTimeoutError> {
|
|
||||||
let network_message = self.recv()?;
|
|
||||||
Ok(match network_message {
|
|
||||||
NetworkMessage::Send(
|
|
||||||
_peer_id,
|
|
||||||
OutgoingMessage::RPC(RPCEvent::Request {
|
|
||||||
id: _,
|
|
||||||
method_id: _,
|
|
||||||
body,
|
|
||||||
}),
|
|
||||||
) => body,
|
|
||||||
_ => panic!("get_rpc_request failed! got {:?}", network_message),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_logger() -> slog::Logger {
|
|
||||||
let mut builder = TerminalLoggerBuilder::new();
|
|
||||||
builder.level(Severity::Debug);
|
|
||||||
builder.destination(Destination::Stderr);
|
|
||||||
builder.build().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct SyncMaster {
|
|
||||||
harness: BeaconChainHarness,
|
|
||||||
peer_id: PeerId,
|
|
||||||
response_ids: Vec<RequestId>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SyncMaster {
|
|
||||||
fn from_beacon_state_builder(
|
|
||||||
state_builder: TestingBeaconStateBuilder,
|
|
||||||
node_count: usize,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Self {
|
|
||||||
let harness = BeaconChainHarness::from_beacon_state_builder(state_builder, spec.clone());
|
|
||||||
let peer_id = PeerId::random();
|
|
||||||
let response_ids = vec![RequestId::from(0); node_count];
|
|
||||||
|
|
||||||
Self {
|
|
||||||
harness,
|
|
||||||
peer_id,
|
|
||||||
response_ids,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn response_id(&mut self, node: &SyncNode) -> RequestId {
|
|
||||||
let id = self.response_ids[node.id].clone();
|
|
||||||
self.response_ids[node.id].increment();
|
|
||||||
id
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn do_hello_with(&mut self, node: &SyncNode) {
|
|
||||||
let message = HandlerMessage::PeerDialed(self.peer_id.clone());
|
|
||||||
node.send(message);
|
|
||||||
|
|
||||||
let request = node.recv_rpc_request().expect("No hello response");
|
|
||||||
|
|
||||||
match request {
|
|
||||||
RPCRequest::Hello(_hello) => {
|
|
||||||
let hello = self.harness.beacon_chain.hello_message();
|
|
||||||
let response = self.rpc_response(node, RPCResponse::Hello(hello));
|
|
||||||
node.send(response);
|
|
||||||
}
|
|
||||||
_ => panic!("Got message other than hello from node."),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn respond_to_block_roots_request(
|
|
||||||
&mut self,
|
|
||||||
node: &SyncNode,
|
|
||||||
request: BeaconBlockRootsRequest,
|
|
||||||
) {
|
|
||||||
let roots = self
|
|
||||||
.harness
|
|
||||||
.beacon_chain
|
|
||||||
.get_block_roots(request.start_slot, request.count as usize, 0)
|
|
||||||
.expect("Beacon chain did not give block roots")
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, root)| BlockRootSlot {
|
|
||||||
block_root: *root,
|
|
||||||
slot: Slot::from(i) + request.start_slot,
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let response = RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots });
|
|
||||||
self.send_rpc_response(node, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn respond_to_block_headers_request(
|
|
||||||
&mut self,
|
|
||||||
node: &SyncNode,
|
|
||||||
request: BeaconBlockHeadersRequest,
|
|
||||||
) {
|
|
||||||
let roots = self
|
|
||||||
.harness
|
|
||||||
.beacon_chain
|
|
||||||
.get_block_roots(
|
|
||||||
request.start_slot,
|
|
||||||
request.max_headers as usize,
|
|
||||||
request.skip_slots as usize,
|
|
||||||
)
|
|
||||||
.expect("Beacon chain did not give blocks");
|
|
||||||
|
|
||||||
if roots.is_empty() {
|
|
||||||
panic!("Roots was empty when trying to get headers.")
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
roots[0], request.start_root,
|
|
||||||
"Got the wrong start root when getting headers"
|
|
||||||
);
|
|
||||||
|
|
||||||
let headers: Vec<BeaconBlockHeader> = roots
|
|
||||||
.iter()
|
|
||||||
.map(|root| {
|
|
||||||
let block = self
|
|
||||||
.harness
|
|
||||||
.beacon_chain
|
|
||||||
.get_block(root)
|
|
||||||
.expect("Failed to load block")
|
|
||||||
.expect("Block did not exist");
|
|
||||||
block.block_header()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let response = RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers });
|
|
||||||
self.send_rpc_response(node, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn respond_to_block_bodies_request(
|
|
||||||
&mut self,
|
|
||||||
node: &SyncNode,
|
|
||||||
request: BeaconBlockBodiesRequest,
|
|
||||||
) {
|
|
||||||
let block_bodies: Vec<BeaconBlockBody> = request
|
|
||||||
.block_roots
|
|
||||||
.iter()
|
|
||||||
.map(|root| {
|
|
||||||
let block = self
|
|
||||||
.harness
|
|
||||||
.beacon_chain
|
|
||||||
.get_block(root)
|
|
||||||
.expect("Failed to load block")
|
|
||||||
.expect("Block did not exist");
|
|
||||||
block.body
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let response = RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { block_bodies });
|
|
||||||
self.send_rpc_response(node, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn send_rpc_response(&mut self, node: &SyncNode, rpc_response: RPCResponse) {
|
|
||||||
node.send(self.rpc_response(node, rpc_response));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rpc_response(&mut self, node: &SyncNode, rpc_response: RPCResponse) -> HandlerMessage {
|
|
||||||
HandlerMessage::RPC(
|
|
||||||
self.peer_id.clone(),
|
|
||||||
RPCEvent::Response {
|
|
||||||
id: self.response_id(node),
|
|
||||||
method_id: RPCMethod::Hello.into(),
|
|
||||||
result: rpc_response,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_setup(
|
|
||||||
state_builder: TestingBeaconStateBuilder,
|
|
||||||
node_count: usize,
|
|
||||||
spec: &ChainSpec,
|
|
||||||
logger: slog::Logger,
|
|
||||||
) -> (tokio::runtime::Runtime, SyncMaster, Vec<SyncNode>) {
|
|
||||||
let runtime = tokio::runtime::Runtime::new().unwrap();
|
|
||||||
|
|
||||||
let mut nodes = Vec::with_capacity(node_count);
|
|
||||||
for id in 0..node_count {
|
|
||||||
let node = SyncNode::from_beacon_state_builder(
|
|
||||||
id,
|
|
||||||
&runtime.executor(),
|
|
||||||
state_builder.clone(),
|
|
||||||
&spec,
|
|
||||||
logger.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
nodes.push(node);
|
|
||||||
}
|
|
||||||
|
|
||||||
let master = SyncMaster::from_beacon_state_builder(state_builder, node_count, &spec);
|
|
||||||
|
|
||||||
(runtime, master, nodes)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build_blocks(blocks: usize, master: &mut SyncMaster, nodes: &mut Vec<SyncNode>) {
|
|
||||||
for _ in 0..blocks {
|
|
||||||
master.harness.advance_chain_with_block();
|
|
||||||
for i in 0..nodes.len() {
|
|
||||||
nodes[i].increment_beacon_chain_slot();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
master.harness.run_fork_choice();
|
|
||||||
|
|
||||||
for i in 0..nodes.len() {
|
|
||||||
nodes[i].harness.run_fork_choice();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn sync_node_with_master() {
|
|
||||||
let logger = get_logger();
|
|
||||||
let spec = ChainSpec::few_validators();
|
|
||||||
let validator_count = 8;
|
|
||||||
let node_count = 1;
|
|
||||||
|
|
||||||
let state_builder =
|
|
||||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
|
|
||||||
|
|
||||||
let (runtime, mut master, mut nodes) =
|
|
||||||
test_setup(state_builder, node_count, &spec, logger.clone());
|
|
||||||
|
|
||||||
let original_node_slot = nodes[0].hello_message().best_slot;
|
|
||||||
|
|
||||||
build_blocks(2, &mut master, &mut nodes);
|
|
||||||
|
|
||||||
master.do_hello_with(&nodes[0]);
|
|
||||||
|
|
||||||
let roots_request = nodes[0].get_block_root_request();
|
|
||||||
assert_eq!(roots_request.start_slot, original_node_slot + 1);
|
|
||||||
assert_eq!(roots_request.count, 2);
|
|
||||||
|
|
||||||
master.respond_to_block_roots_request(&nodes[0], roots_request);
|
|
||||||
|
|
||||||
let headers_request = nodes[0].get_block_headers_request();
|
|
||||||
assert_eq!(headers_request.start_slot, original_node_slot + 1);
|
|
||||||
assert_eq!(headers_request.max_headers, 2);
|
|
||||||
assert_eq!(headers_request.skip_slots, 0);
|
|
||||||
|
|
||||||
master.respond_to_block_headers_request(&nodes[0], headers_request);
|
|
||||||
|
|
||||||
let bodies_request = nodes[0].get_block_bodies_request();
|
|
||||||
assert_eq!(bodies_request.block_roots.len(), 2);
|
|
||||||
|
|
||||||
master.respond_to_block_bodies_request(&nodes[0], bodies_request);
|
|
||||||
|
|
||||||
std::thread::sleep(Duration::from_millis(10000));
|
|
||||||
runtime.shutdown_now();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn sync_two_nodes() {
|
|
||||||
let logger = get_logger();
|
|
||||||
let spec = ChainSpec::few_validators();
|
|
||||||
let validator_count = 8;
|
|
||||||
let node_count = 2;
|
|
||||||
|
|
||||||
let state_builder =
|
|
||||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
|
|
||||||
|
|
||||||
let (runtime, _master, mut nodes) =
|
|
||||||
test_setup(state_builder, node_count, &spec, logger.clone());
|
|
||||||
|
|
||||||
// let original_node_slot = nodes[0].hello_message().best_slot;
|
|
||||||
let mut node_a = nodes.remove(0);
|
|
||||||
let mut node_b = nodes.remove(0);
|
|
||||||
|
|
||||||
let blocks = 2;
|
|
||||||
|
|
||||||
// Node A builds out a longer, better chain.
|
|
||||||
for _ in 0..blocks {
|
|
||||||
// Node A should build a block.
|
|
||||||
node_a.harness.advance_chain_with_block();
|
|
||||||
// Node B should just increment it's slot without a block.
|
|
||||||
node_b.harness.increment_beacon_chain_slot();
|
|
||||||
}
|
|
||||||
node_a.harness.run_fork_choice();
|
|
||||||
|
|
||||||
// A connects to B.
|
|
||||||
node_a.connect_to(&node_b);
|
|
||||||
|
|
||||||
// B says hello to A.
|
|
||||||
node_b.tee_hello_request(&node_a);
|
|
||||||
// A says hello back.
|
|
||||||
node_a.tee_hello_response(&node_b);
|
|
||||||
|
|
||||||
// B requests block roots from A.
|
|
||||||
node_b.tee_block_root_request(&node_a);
|
|
||||||
// A provides block roots to A.
|
|
||||||
node_a.tee_block_root_response(&node_b);
|
|
||||||
|
|
||||||
// B requests block headers from A.
|
|
||||||
node_b.tee_block_header_request(&node_a);
|
|
||||||
// A provides block headers to B.
|
|
||||||
node_a.tee_block_header_response(&node_b);
|
|
||||||
|
|
||||||
// B requests block bodies from A.
|
|
||||||
node_b.tee_block_body_request(&node_a);
|
|
||||||
// A provides block bodies to B.
|
|
||||||
node_a.tee_block_body_response(&node_b);
|
|
||||||
|
|
||||||
std::thread::sleep(Duration::from_secs(20));
|
|
||||||
|
|
||||||
node_b.harness.run_fork_choice();
|
|
||||||
|
|
||||||
let node_a_chain = node_a
|
|
||||||
.harness
|
|
||||||
.beacon_chain
|
|
||||||
.chain_dump()
|
|
||||||
.expect("Can't dump node a chain");
|
|
||||||
|
|
||||||
let node_b_chain = node_b
|
|
||||||
.harness
|
|
||||||
.beacon_chain
|
|
||||||
.chain_dump()
|
|
||||||
.expect("Can't dump node b chain");
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
node_a_chain.len(),
|
|
||||||
node_b_chain.len(),
|
|
||||||
"Chains should be equal length"
|
|
||||||
);
|
|
||||||
assert_eq!(node_a_chain, node_b_chain, "Chains should be identical");
|
|
||||||
|
|
||||||
runtime.shutdown_now();
|
|
||||||
}
|
|
@ -9,15 +9,15 @@ use protos::services_grpc::AttestationService;
|
|||||||
use slog::{error, info, trace, warn};
|
use slog::{error, info, trace, warn};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::Attestation;
|
use types::{Attestation, EthSpec};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AttestationServiceInstance {
|
pub struct AttestationServiceInstance<B: EthSpec> {
|
||||||
pub chain: Arc<BeaconChain>,
|
pub chain: Arc<BeaconChain<B>>,
|
||||||
pub log: slog::Logger,
|
pub log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AttestationService for AttestationServiceInstance {
|
impl<B: EthSpec> AttestationService for AttestationServiceInstance<B> {
|
||||||
/// Produce the `AttestationData` for signing by a validator.
|
/// Produce the `AttestationData` for signing by a validator.
|
||||||
fn produce_attestation_data(
|
fn produce_attestation_data(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
@ -13,16 +13,16 @@ use slog::Logger;
|
|||||||
use slog::{error, info, trace, warn};
|
use slog::{error, info, trace, warn};
|
||||||
use ssz::{ssz_encode, Decodable};
|
use ssz::{ssz_encode, Decodable};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconBlock, Signature, Slot};
|
use types::{BeaconBlock, EthSpec, Signature, Slot};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BeaconBlockServiceInstance {
|
pub struct BeaconBlockServiceInstance<B: EthSpec> {
|
||||||
pub chain: Arc<BeaconChain>,
|
pub chain: Arc<BeaconChain<B>>,
|
||||||
pub network_chan: crossbeam_channel::Sender<NetworkMessage>,
|
pub network_chan: crossbeam_channel::Sender<NetworkMessage>,
|
||||||
pub log: Logger,
|
pub log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BeaconBlockService for BeaconBlockServiceInstance {
|
impl<B: EthSpec> BeaconBlockService for BeaconBlockServiceInstance<B> {
|
||||||
/// Produce a `BeaconBlock` for signing by a validator.
|
/// Produce a `BeaconBlock` for signing by a validator.
|
||||||
fn produce_beacon_block(
|
fn produce_beacon_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
@ -8,15 +8,15 @@ use beacon_chain::{
|
|||||||
AttestationValidationError, BlockProductionError,
|
AttestationValidationError, BlockProductionError,
|
||||||
};
|
};
|
||||||
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome};
|
pub use beacon_chain::{BeaconChainError, BlockProcessingOutcome};
|
||||||
use types::{Attestation, AttestationData, BeaconBlock};
|
use types::{Attestation, AttestationData, BeaconBlock, EthSpec};
|
||||||
|
|
||||||
/// The RPC's API to the beacon chain.
|
/// The RPC's API to the beacon chain.
|
||||||
pub trait BeaconChain: Send + Sync {
|
pub trait BeaconChain<B: EthSpec>: Send + Sync {
|
||||||
fn get_spec(&self) -> &ChainSpec;
|
fn get_spec(&self) -> &ChainSpec;
|
||||||
|
|
||||||
fn get_state(&self) -> RwLockReadGuard<BeaconState>;
|
fn get_state(&self) -> RwLockReadGuard<BeaconState<B>>;
|
||||||
|
|
||||||
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState>;
|
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState<B>>;
|
||||||
|
|
||||||
fn process_block(&self, block: BeaconBlock)
|
fn process_block(&self, block: BeaconBlock)
|
||||||
-> Result<BlockProcessingOutcome, BeaconChainError>;
|
-> Result<BlockProcessingOutcome, BeaconChainError>;
|
||||||
@ -24,7 +24,7 @@ pub trait BeaconChain: Send + Sync {
|
|||||||
fn produce_block(
|
fn produce_block(
|
||||||
&self,
|
&self,
|
||||||
randao_reveal: Signature,
|
randao_reveal: Signature,
|
||||||
) -> Result<(BeaconBlock, BeaconState), BlockProductionError>;
|
) -> Result<(BeaconBlock, BeaconState<B>), BlockProductionError>;
|
||||||
|
|
||||||
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError>;
|
fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, BeaconChainError>;
|
||||||
|
|
||||||
@ -34,21 +34,22 @@ pub trait BeaconChain: Send + Sync {
|
|||||||
) -> Result<(), AttestationValidationError>;
|
) -> Result<(), AttestationValidationError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, U, F> BeaconChain for RawBeaconChain<T, U, F>
|
impl<T, U, F, B> BeaconChain<B> for RawBeaconChain<T, U, F, B>
|
||||||
where
|
where
|
||||||
T: ClientDB + Sized,
|
T: ClientDB + Sized,
|
||||||
U: SlotClock,
|
U: SlotClock,
|
||||||
F: ForkChoice,
|
F: ForkChoice,
|
||||||
|
B: EthSpec,
|
||||||
{
|
{
|
||||||
fn get_spec(&self) -> &ChainSpec {
|
fn get_spec(&self) -> &ChainSpec {
|
||||||
&self.spec
|
&self.spec
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_state(&self) -> RwLockReadGuard<BeaconState> {
|
fn get_state(&self) -> RwLockReadGuard<BeaconState<B>> {
|
||||||
self.state.read()
|
self.state.read()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState> {
|
fn get_mut_state(&self) -> RwLockWriteGuard<BeaconState<B>> {
|
||||||
self.state.write()
|
self.state.write()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,7 +63,7 @@ where
|
|||||||
fn produce_block(
|
fn produce_block(
|
||||||
&self,
|
&self,
|
||||||
randao_reveal: Signature,
|
randao_reveal: Signature,
|
||||||
) -> Result<(BeaconBlock, BeaconState), BlockProductionError> {
|
) -> Result<(BeaconBlock, BeaconState<B>), BlockProductionError> {
|
||||||
self.produce_block(randao_reveal)
|
self.produce_block(randao_reveal)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,14 +5,15 @@ use protos::services::{Empty, Fork, NodeInfoResponse};
|
|||||||
use protos::services_grpc::BeaconNodeService;
|
use protos::services_grpc::BeaconNodeService;
|
||||||
use slog::{trace, warn};
|
use slog::{trace, warn};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BeaconNodeServiceInstance {
|
pub struct BeaconNodeServiceInstance<B: EthSpec> {
|
||||||
pub chain: Arc<BeaconChain>,
|
pub chain: Arc<BeaconChain<B>>,
|
||||||
pub log: slog::Logger,
|
pub log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BeaconNodeService for BeaconNodeServiceInstance {
|
impl<B: EthSpec> BeaconNodeService for BeaconNodeServiceInstance<B> {
|
||||||
/// Provides basic node information.
|
/// Provides basic node information.
|
||||||
fn info(&mut self, ctx: RpcContext, _req: Empty, sink: UnarySink<NodeInfoResponse>) {
|
fn info(&mut self, ctx: RpcContext, _req: Empty, sink: UnarySink<NodeInfoResponse>) {
|
||||||
trace!(self.log, "Node info requested via RPC");
|
trace!(self.log, "Node info requested via RPC");
|
||||||
|
@ -21,12 +21,13 @@ use protos::services_grpc::{
|
|||||||
use slog::{info, o, warn};
|
use slog::{info, o, warn};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::runtime::TaskExecutor;
|
use tokio::runtime::TaskExecutor;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
pub fn start_server(
|
pub fn start_server<B: EthSpec>(
|
||||||
config: &RPCConfig,
|
config: &RPCConfig,
|
||||||
executor: &TaskExecutor,
|
executor: &TaskExecutor,
|
||||||
network_chan: crossbeam_channel::Sender<NetworkMessage>,
|
network_chan: crossbeam_channel::Sender<NetworkMessage>,
|
||||||
beacon_chain: Arc<BeaconChain>,
|
beacon_chain: Arc<BeaconChain<B>>,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
) -> exit_future::Signal {
|
) -> exit_future::Signal {
|
||||||
let log = log.new(o!("Service"=>"RPC"));
|
let log = log.new(o!("Service"=>"RPC"));
|
||||||
|
@ -7,16 +7,16 @@ use protos::services_grpc::ValidatorService;
|
|||||||
use slog::{trace, warn};
|
use slog::{trace, warn};
|
||||||
use ssz::decode;
|
use ssz::decode;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{Epoch, RelativeEpoch};
|
use types::{Epoch, EthSpec, RelativeEpoch};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ValidatorServiceInstance {
|
pub struct ValidatorServiceInstance<B: EthSpec> {
|
||||||
pub chain: Arc<BeaconChain>,
|
pub chain: Arc<BeaconChain<B>>,
|
||||||
pub log: slog::Logger,
|
pub log: slog::Logger,
|
||||||
}
|
}
|
||||||
//TODO: Refactor Errors
|
//TODO: Refactor Errors
|
||||||
|
|
||||||
impl ValidatorService for ValidatorServiceInstance {
|
impl<B: EthSpec> ValidatorService for ValidatorServiceInstance<B> {
|
||||||
/// For a list of validator public keys, this function returns the slot at which each
|
/// For a list of validator public keys, this function returns the slot at which each
|
||||||
/// validator must propose a block, attest to a shard, their shard committee and the shard they
|
/// validator must propose a block, attest to a shard, their shard committee and the shard they
|
||||||
/// need to attest to.
|
/// need to attest to.
|
||||||
|
14
docs/documentation.md
Normal file
14
docs/documentation.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
# Lighthouse Technical Documentation
|
||||||
|
|
||||||
|
The technical documentation, as generated by Rust, is available at [lighthouse-docs.sigmaprime.io](http://lighthouse-docs.sigmaprime.io/).
|
||||||
|
|
||||||
|
This documentation is generated from Lighthouse and updated regularly.
|
||||||
|
|
||||||
|
|
||||||
|
### How to update:
|
||||||
|
|
||||||
|
- `cargo doc`: builds the docs inside the `target/doc/` directory.
|
||||||
|
- `aws s3 sync target/doc/ s3://lighthouse-docs.sigmaprime.io/`: Uploads all of the docs, as generated with `cargo doc`, to the S3 bucket.
|
||||||
|
|
||||||
|
**Note**: You will need appropriate credentials to make the upload.
|
||||||
|
|
@ -1,11 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "attester"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
|
||||||
ssz = { path = "../../eth2/utils/ssz" }
|
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
|
||||||
types = { path = "../../eth2/types" }
|
|
@ -1,257 +0,0 @@
|
|||||||
pub mod test_utils;
|
|
||||||
mod traits;
|
|
||||||
|
|
||||||
use slot_clock::SlotClock;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tree_hash::TreeHash;
|
|
||||||
use types::{AttestationData, AttestationDataAndCustodyBit, FreeAttestation, Signature, Slot};
|
|
||||||
|
|
||||||
pub use self::traits::{
|
|
||||||
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
|
|
||||||
};
|
|
||||||
|
|
||||||
const PHASE_0_CUSTODY_BIT: bool = false;
|
|
||||||
const DOMAIN_ATTESTATION: u64 = 1;
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum PollOutcome {
|
|
||||||
AttestationProduced(Slot),
|
|
||||||
AttestationNotRequired(Slot),
|
|
||||||
SlashableAttestationNotProduced(Slot),
|
|
||||||
BeaconNodeUnableToProduceAttestation(Slot),
|
|
||||||
ProducerDutiesUnknown(Slot),
|
|
||||||
SlotAlreadyProcessed(Slot),
|
|
||||||
SignerRejection(Slot),
|
|
||||||
ValidatorIsUnknown(Slot),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum Error {
|
|
||||||
SlotClockError,
|
|
||||||
SlotUnknowable,
|
|
||||||
EpochMapPoisoned,
|
|
||||||
SlotClockPoisoned,
|
|
||||||
EpochLengthIsZero,
|
|
||||||
BeaconNodeError(BeaconNodeError),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A polling state machine which performs block production duties, based upon some epoch duties
|
|
||||||
/// (`EpochDutiesMap`) and a concept of time (`SlotClock`).
|
|
||||||
///
|
|
||||||
/// Ensures that messages are not slashable.
|
|
||||||
///
|
|
||||||
/// Relies upon an external service to keep the `EpochDutiesMap` updated.
|
|
||||||
pub struct Attester<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
|
|
||||||
pub last_processed_slot: Option<Slot>,
|
|
||||||
duties: Arc<V>,
|
|
||||||
slot_clock: Arc<T>,
|
|
||||||
beacon_node: Arc<U>,
|
|
||||||
signer: Arc<W>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V, W> {
|
|
||||||
/// Returns a new instance where `last_processed_slot == 0`.
|
|
||||||
pub fn new(duties: Arc<V>, slot_clock: Arc<T>, beacon_node: Arc<U>, signer: Arc<W>) -> Self {
|
|
||||||
Self {
|
|
||||||
last_processed_slot: None,
|
|
||||||
duties,
|
|
||||||
slot_clock,
|
|
||||||
beacon_node,
|
|
||||||
signer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> Attester<T, U, V, W> {
|
|
||||||
/// Poll the `BeaconNode` and produce an attestation if required.
|
|
||||||
pub fn poll(&mut self) -> Result<PollOutcome, Error> {
|
|
||||||
let slot = self
|
|
||||||
.slot_clock
|
|
||||||
.present_slot()
|
|
||||||
.map_err(|_| Error::SlotClockError)?
|
|
||||||
.ok_or(Error::SlotUnknowable)?;
|
|
||||||
|
|
||||||
if !self.is_processed_slot(slot) {
|
|
||||||
self.last_processed_slot = Some(slot);
|
|
||||||
|
|
||||||
let shard = match self.duties.attestation_shard(slot) {
|
|
||||||
Ok(Some(result)) => result,
|
|
||||||
Ok(None) => return Ok(PollOutcome::AttestationNotRequired(slot)),
|
|
||||||
Err(DutiesReaderError::UnknownEpoch) => {
|
|
||||||
return Ok(PollOutcome::ProducerDutiesUnknown(slot));
|
|
||||||
}
|
|
||||||
Err(DutiesReaderError::UnknownValidator) => {
|
|
||||||
return Ok(PollOutcome::ValidatorIsUnknown(slot));
|
|
||||||
}
|
|
||||||
Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero),
|
|
||||||
Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.produce_attestation(slot, shard)
|
|
||||||
} else {
|
|
||||||
Ok(PollOutcome::SlotAlreadyProcessed(slot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn produce_attestation(&mut self, slot: Slot, shard: u64) -> Result<PollOutcome, Error> {
|
|
||||||
let attestation_data = match self.beacon_node.produce_attestation_data(slot, shard)? {
|
|
||||||
Some(attestation_data) => attestation_data,
|
|
||||||
None => return Ok(PollOutcome::BeaconNodeUnableToProduceAttestation(slot)),
|
|
||||||
};
|
|
||||||
|
|
||||||
if !self.safe_to_produce(&attestation_data) {
|
|
||||||
return Ok(PollOutcome::SlashableAttestationNotProduced(slot));
|
|
||||||
}
|
|
||||||
|
|
||||||
let signature = match self.sign_attestation_data(&attestation_data) {
|
|
||||||
Some(signature) => signature,
|
|
||||||
None => return Ok(PollOutcome::SignerRejection(slot)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let validator_index = match self.duties.validator_index() {
|
|
||||||
Some(validator_index) => validator_index,
|
|
||||||
None => return Ok(PollOutcome::ValidatorIsUnknown(slot)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let free_attestation = FreeAttestation {
|
|
||||||
data: attestation_data,
|
|
||||||
signature,
|
|
||||||
validator_index,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.beacon_node.publish_attestation(free_attestation)?;
|
|
||||||
Ok(PollOutcome::AttestationProduced(slot))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_processed_slot(&self, slot: Slot) -> bool {
|
|
||||||
match self.last_processed_slot {
|
|
||||||
Some(processed_slot) if slot <= processed_slot => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consumes a block, returning that block signed by the validators private key.
|
|
||||||
///
|
|
||||||
/// Important: this function will not check to ensure the block is not slashable. This must be
|
|
||||||
/// done upstream.
|
|
||||||
fn sign_attestation_data(&mut self, attestation_data: &AttestationData) -> Option<Signature> {
|
|
||||||
self.store_produce(attestation_data);
|
|
||||||
|
|
||||||
let message = AttestationDataAndCustodyBit {
|
|
||||||
data: attestation_data.clone(),
|
|
||||||
custody_bit: PHASE_0_CUSTODY_BIT,
|
|
||||||
}
|
|
||||||
.tree_hash_root();
|
|
||||||
|
|
||||||
self.signer
|
|
||||||
.sign_attestation_message(&message[..], DOMAIN_ATTESTATION)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `true` if signing some attestation_data is safe (non-slashable).
|
|
||||||
///
|
|
||||||
/// !!! UNSAFE !!!
|
|
||||||
///
|
|
||||||
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
|
|
||||||
fn safe_to_produce(&self, _attestation_data: &AttestationData) -> bool {
|
|
||||||
// TODO: ensure the producer doesn't produce slashable blocks.
|
|
||||||
// https://github.com/sigp/lighthouse/issues/160
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Record that a block was produced so that slashable votes may not be made in the future.
|
|
||||||
///
|
|
||||||
/// !!! UNSAFE !!!
|
|
||||||
///
|
|
||||||
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
|
|
||||||
fn store_produce(&mut self, _block: &AttestationData) {
|
|
||||||
// TODO: record this block production to prevent future slashings.
|
|
||||||
// https://github.com/sigp/lighthouse/issues/160
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BeaconNodeError> for Error {
|
|
||||||
fn from(e: BeaconNodeError) -> Error {
|
|
||||||
Error::BeaconNodeError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode};
|
|
||||||
use super::*;
|
|
||||||
use slot_clock::TestingSlotClock;
|
|
||||||
use types::{
|
|
||||||
test_utils::{SeedableRng, TestRandom, XorShiftRng},
|
|
||||||
ChainSpec, Keypair,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: implement more thorough testing.
|
|
||||||
// https://github.com/sigp/lighthouse/issues/160
|
|
||||||
//
|
|
||||||
// These tests should serve as a good example for future tests.
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn polling() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
|
|
||||||
let spec = Arc::new(ChainSpec::foundation());
|
|
||||||
let slot_clock = Arc::new(TestingSlotClock::new(0));
|
|
||||||
let beacon_node = Arc::new(SimulatedBeaconNode::default());
|
|
||||||
let signer = Arc::new(LocalSigner::new(Keypair::random()));
|
|
||||||
|
|
||||||
let mut duties = EpochMap::new(spec.slots_per_epoch);
|
|
||||||
let attest_slot = Slot::new(100);
|
|
||||||
let attest_epoch = attest_slot / spec.slots_per_epoch;
|
|
||||||
let attest_shard = 12;
|
|
||||||
duties.insert_attestation_shard(attest_slot, attest_shard);
|
|
||||||
duties.set_validator_index(Some(2));
|
|
||||||
let duties = Arc::new(duties);
|
|
||||||
|
|
||||||
let mut attester = Attester::new(
|
|
||||||
duties.clone(),
|
|
||||||
slot_clock.clone(),
|
|
||||||
beacon_node.clone(),
|
|
||||||
signer.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Configure responses from the BeaconNode.
|
|
||||||
beacon_node.set_next_produce_result(Ok(Some(AttestationData::random_for_test(&mut rng))));
|
|
||||||
beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidAttestation));
|
|
||||||
|
|
||||||
// One slot before attestation slot...
|
|
||||||
slot_clock.set_slot(attest_slot.as_u64() - 1);
|
|
||||||
assert_eq!(
|
|
||||||
attester.poll(),
|
|
||||||
Ok(PollOutcome::AttestationNotRequired(attest_slot - 1))
|
|
||||||
);
|
|
||||||
|
|
||||||
// On the attest slot...
|
|
||||||
slot_clock.set_slot(attest_slot.as_u64());
|
|
||||||
assert_eq!(
|
|
||||||
attester.poll(),
|
|
||||||
Ok(PollOutcome::AttestationProduced(attest_slot))
|
|
||||||
);
|
|
||||||
|
|
||||||
// Trying the same attest slot again...
|
|
||||||
slot_clock.set_slot(attest_slot.as_u64());
|
|
||||||
assert_eq!(
|
|
||||||
attester.poll(),
|
|
||||||
Ok(PollOutcome::SlotAlreadyProcessed(attest_slot))
|
|
||||||
);
|
|
||||||
|
|
||||||
// One slot after the attest slot...
|
|
||||||
slot_clock.set_slot(attest_slot.as_u64() + 1);
|
|
||||||
assert_eq!(
|
|
||||||
attester.poll(),
|
|
||||||
Ok(PollOutcome::AttestationNotRequired(attest_slot + 1))
|
|
||||||
);
|
|
||||||
|
|
||||||
// In an epoch without known duties...
|
|
||||||
let slot = (attest_epoch + 1) * spec.slots_per_epoch;
|
|
||||||
slot_clock.set_slot(slot.into());
|
|
||||||
assert_eq!(
|
|
||||||
attester.poll(),
|
|
||||||
Ok(PollOutcome::ProducerDutiesUnknown(slot))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
use crate::{DutiesReader, DutiesReaderError};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use types::{Epoch, Slot};
|
|
||||||
|
|
||||||
pub struct EpochMap {
|
|
||||||
slots_per_epoch: u64,
|
|
||||||
validator_index: Option<u64>,
|
|
||||||
map: HashMap<Epoch, (Slot, u64)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EpochMap {
|
|
||||||
pub fn new(slots_per_epoch: u64) -> Self {
|
|
||||||
Self {
|
|
||||||
slots_per_epoch,
|
|
||||||
validator_index: None,
|
|
||||||
map: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn insert_attestation_shard(&mut self, slot: Slot, shard: u64) {
|
|
||||||
let epoch = slot.epoch(self.slots_per_epoch);
|
|
||||||
self.map.insert(epoch, (slot, shard));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_validator_index(&mut self, index: Option<u64>) {
|
|
||||||
self.validator_index = index;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DutiesReader for EpochMap {
|
|
||||||
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError> {
|
|
||||||
let epoch = slot.epoch(self.slots_per_epoch);
|
|
||||||
|
|
||||||
match self.map.get(&epoch) {
|
|
||||||
Some((attest_slot, attest_shard)) if *attest_slot == slot => Ok(Some(*attest_shard)),
|
|
||||||
Some((attest_slot, _attest_shard)) if *attest_slot != slot => Ok(None),
|
|
||||||
_ => Err(DutiesReaderError::UnknownEpoch),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn validator_index(&self) -> Option<u64> {
|
|
||||||
self.validator_index
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,31 +0,0 @@
|
|||||||
use crate::traits::Signer;
|
|
||||||
use std::sync::RwLock;
|
|
||||||
use types::{Keypair, Signature};
|
|
||||||
|
|
||||||
/// A test-only struct used to simulate a Beacon Node.
|
|
||||||
pub struct LocalSigner {
|
|
||||||
keypair: Keypair,
|
|
||||||
should_sign: RwLock<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LocalSigner {
|
|
||||||
/// Produce a new LocalSigner with signing enabled by default.
|
|
||||||
pub fn new(keypair: Keypair) -> Self {
|
|
||||||
Self {
|
|
||||||
keypair,
|
|
||||||
should_sign: RwLock::new(true),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages
|
|
||||||
/// will be signed.
|
|
||||||
pub fn enable_signing(&self, enabled: bool) {
|
|
||||||
*self.should_sign.write().unwrap() = enabled;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Signer for LocalSigner {
|
|
||||||
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
|
||||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,7 +0,0 @@
|
|||||||
mod epoch_map;
|
|
||||||
mod local_signer;
|
|
||||||
mod simulated_beacon_node;
|
|
||||||
|
|
||||||
pub use self::epoch_map::EpochMap;
|
|
||||||
pub use self::local_signer::LocalSigner;
|
|
||||||
pub use self::simulated_beacon_node::SimulatedBeaconNode;
|
|
@ -1,44 +0,0 @@
|
|||||||
use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome};
|
|
||||||
use std::sync::RwLock;
|
|
||||||
use types::{AttestationData, FreeAttestation, Slot};
|
|
||||||
|
|
||||||
type ProduceResult = Result<Option<AttestationData>, BeaconNodeError>;
|
|
||||||
type PublishResult = Result<PublishOutcome, BeaconNodeError>;
|
|
||||||
|
|
||||||
/// A test-only struct used to simulate a Beacon Node.
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct SimulatedBeaconNode {
|
|
||||||
pub produce_input: RwLock<Option<(Slot, u64)>>,
|
|
||||||
pub produce_result: RwLock<Option<ProduceResult>>,
|
|
||||||
|
|
||||||
pub publish_input: RwLock<Option<FreeAttestation>>,
|
|
||||||
pub publish_result: RwLock<Option<PublishResult>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SimulatedBeaconNode {
|
|
||||||
pub fn set_next_produce_result(&self, result: ProduceResult) {
|
|
||||||
*self.produce_result.write().unwrap() = Some(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_next_publish_result(&self, result: PublishResult) {
|
|
||||||
*self.publish_result.write().unwrap() = Some(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BeaconNode for SimulatedBeaconNode {
|
|
||||||
fn produce_attestation_data(&self, slot: Slot, shard: u64) -> ProduceResult {
|
|
||||||
*self.produce_input.write().unwrap() = Some((slot, shard));
|
|
||||||
match *self.produce_result.read().unwrap() {
|
|
||||||
Some(ref r) => r.clone(),
|
|
||||||
None => panic!("TestBeaconNode: produce_result == None"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn publish_attestation(&self, free_attestation: FreeAttestation) -> PublishResult {
|
|
||||||
*self.publish_input.write().unwrap() = Some(free_attestation.clone());
|
|
||||||
match *self.publish_result.read().unwrap() {
|
|
||||||
Some(ref r) => r.clone(),
|
|
||||||
None => panic!("TestBeaconNode: publish_result == None"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,49 +0,0 @@
|
|||||||
use types::{AttestationData, FreeAttestation, Signature, Slot};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum BeaconNodeError {
|
|
||||||
RemoteFailure(String),
|
|
||||||
DecodeFailure,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PublishOutcome {
|
|
||||||
ValidAttestation,
|
|
||||||
InvalidAttestation(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Defines the methods required to produce and publish blocks on a Beacon Node.
|
|
||||||
pub trait BeaconNode: Send + Sync {
|
|
||||||
fn produce_attestation_data(
|
|
||||||
&self,
|
|
||||||
slot: Slot,
|
|
||||||
shard: u64,
|
|
||||||
) -> Result<Option<AttestationData>, BeaconNodeError>;
|
|
||||||
|
|
||||||
fn publish_attestation(
|
|
||||||
&self,
|
|
||||||
free_attestation: FreeAttestation,
|
|
||||||
) -> Result<PublishOutcome, BeaconNodeError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum DutiesReaderError {
|
|
||||||
UnknownValidator,
|
|
||||||
UnknownEpoch,
|
|
||||||
EpochLengthIsZero,
|
|
||||||
Poisoned,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Informs a validator of their duties (e.g., block production).
|
|
||||||
pub trait DutiesReader: Send + Sync {
|
|
||||||
/// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.`
|
|
||||||
fn attestation_shard(&self, slot: Slot) -> Result<Option<u64>, DutiesReaderError>;
|
|
||||||
|
|
||||||
/// Returns `Some(shard)` if this slot is an attestation slot. Otherwise, returns `None.`
|
|
||||||
fn validator_index(&self) -> Option<u64>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signs message using an internally-maintained private key.
|
|
||||||
pub trait Signer {
|
|
||||||
fn sign_attestation_message(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
|
||||||
}
|
|
@ -1,12 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "block_proposer"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
int_to_bytes = { path = "../utils/int_to_bytes" }
|
|
||||||
slot_clock = { path = "../utils/slot_clock" }
|
|
||||||
ssz = { path = "../utils/ssz" }
|
|
||||||
tree_hash = { path = "../../eth2/utils/tree_hash" }
|
|
||||||
types = { path = "../types" }
|
|
@ -1,303 +0,0 @@
|
|||||||
pub mod test_utils;
|
|
||||||
mod traits;
|
|
||||||
|
|
||||||
use slot_clock::SlotClock;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tree_hash::{SignedRoot, TreeHash};
|
|
||||||
use types::{BeaconBlock, ChainSpec, Domain, Slot};
|
|
||||||
|
|
||||||
pub use self::traits::{
|
|
||||||
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum PollOutcome {
|
|
||||||
/// A new block was produced.
|
|
||||||
BlockProduced(Slot),
|
|
||||||
/// A block was not produced as it would have been slashable.
|
|
||||||
SlashableBlockNotProduced(Slot),
|
|
||||||
/// The validator duties did not require a block to be produced.
|
|
||||||
BlockProductionNotRequired(Slot),
|
|
||||||
/// The duties for the present epoch were not found.
|
|
||||||
ProducerDutiesUnknown(Slot),
|
|
||||||
/// The slot has already been processed, execution was skipped.
|
|
||||||
SlotAlreadyProcessed(Slot),
|
|
||||||
/// The Beacon Node was unable to produce a block at that slot.
|
|
||||||
BeaconNodeUnableToProduceBlock(Slot),
|
|
||||||
/// The signer failed to sign the message.
|
|
||||||
SignerRejection(Slot),
|
|
||||||
/// The public key for this validator is not an active validator.
|
|
||||||
ValidatorIsUnknown(Slot),
|
|
||||||
/// Unable to determine a `Fork` for signature domain generation.
|
|
||||||
UnableToGetFork(Slot),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum Error {
|
|
||||||
SlotClockError,
|
|
||||||
SlotUnknowable,
|
|
||||||
EpochMapPoisoned,
|
|
||||||
SlotClockPoisoned,
|
|
||||||
EpochLengthIsZero,
|
|
||||||
BeaconNodeError(BeaconNodeError),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A polling state machine which performs block production duties, based upon some epoch duties
|
|
||||||
/// (`EpochDutiesMap`) and a concept of time (`SlotClock`).
|
|
||||||
///
|
|
||||||
/// Ensures that messages are not slashable.
|
|
||||||
///
|
|
||||||
/// Relies upon an external service to keep the `EpochDutiesMap` updated.
|
|
||||||
pub struct BlockProducer<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> {
|
|
||||||
pub last_processed_slot: Option<Slot>,
|
|
||||||
spec: Arc<ChainSpec>,
|
|
||||||
epoch_map: Arc<V>,
|
|
||||||
slot_clock: Arc<T>,
|
|
||||||
beacon_node: Arc<U>,
|
|
||||||
signer: Arc<W>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U, V, W> {
|
|
||||||
/// Returns a new instance where `last_processed_slot == 0`.
|
|
||||||
pub fn new(
|
|
||||||
spec: Arc<ChainSpec>,
|
|
||||||
epoch_map: Arc<V>,
|
|
||||||
slot_clock: Arc<T>,
|
|
||||||
beacon_node: Arc<U>,
|
|
||||||
signer: Arc<W>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
last_processed_slot: None,
|
|
||||||
spec,
|
|
||||||
epoch_map,
|
|
||||||
slot_clock,
|
|
||||||
beacon_node,
|
|
||||||
signer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U, V, W> {
|
|
||||||
/// "Poll" to see if the validator is required to take any action.
|
|
||||||
///
|
|
||||||
/// The slot clock will be read and any new actions undertaken.
|
|
||||||
pub fn poll(&mut self) -> Result<PollOutcome, Error> {
|
|
||||||
let slot = self
|
|
||||||
.slot_clock
|
|
||||||
.present_slot()
|
|
||||||
.map_err(|_| Error::SlotClockError)?
|
|
||||||
.ok_or(Error::SlotUnknowable)?;
|
|
||||||
|
|
||||||
// If this is a new slot.
|
|
||||||
if !self.is_processed_slot(slot) {
|
|
||||||
let is_block_production_slot = match self.epoch_map.is_block_production_slot(slot) {
|
|
||||||
Ok(result) => result,
|
|
||||||
Err(DutiesReaderError::UnknownEpoch) => {
|
|
||||||
return Ok(PollOutcome::ProducerDutiesUnknown(slot));
|
|
||||||
}
|
|
||||||
Err(DutiesReaderError::UnknownValidator) => {
|
|
||||||
return Ok(PollOutcome::ValidatorIsUnknown(slot));
|
|
||||||
}
|
|
||||||
Err(DutiesReaderError::EpochLengthIsZero) => return Err(Error::EpochLengthIsZero),
|
|
||||||
Err(DutiesReaderError::Poisoned) => return Err(Error::EpochMapPoisoned),
|
|
||||||
};
|
|
||||||
|
|
||||||
if is_block_production_slot {
|
|
||||||
self.last_processed_slot = Some(slot);
|
|
||||||
|
|
||||||
self.produce_block(slot)
|
|
||||||
} else {
|
|
||||||
Ok(PollOutcome::BlockProductionNotRequired(slot))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(PollOutcome::SlotAlreadyProcessed(slot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_processed_slot(&self, slot: Slot) -> bool {
|
|
||||||
match self.last_processed_slot {
|
|
||||||
Some(processed_slot) if processed_slot >= slot => true,
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Produce a block at some slot.
|
|
||||||
///
|
|
||||||
/// Assumes that a block is required at this slot (does not check the duties).
|
|
||||||
///
|
|
||||||
/// Ensures the message is not slashable.
|
|
||||||
///
|
|
||||||
/// !!! UNSAFE !!!
|
|
||||||
///
|
|
||||||
/// The slash-protection code is not yet implemented. There is zero protection against
|
|
||||||
/// slashing.
|
|
||||||
fn produce_block(&mut self, slot: Slot) -> Result<PollOutcome, Error> {
|
|
||||||
let fork = match self.epoch_map.fork() {
|
|
||||||
Ok(fork) => fork,
|
|
||||||
Err(_) => return Ok(PollOutcome::UnableToGetFork(slot)),
|
|
||||||
};
|
|
||||||
|
|
||||||
let randao_reveal = {
|
|
||||||
// TODO: add domain, etc to this message. Also ensure result matches `into_to_bytes32`.
|
|
||||||
let message = slot.epoch(self.spec.slots_per_epoch).tree_hash_root();
|
|
||||||
|
|
||||||
match self.signer.sign_randao_reveal(
|
|
||||||
&message,
|
|
||||||
self.spec
|
|
||||||
.get_domain(slot.epoch(self.spec.slots_per_epoch), Domain::Randao, &fork),
|
|
||||||
) {
|
|
||||||
None => return Ok(PollOutcome::SignerRejection(slot)),
|
|
||||||
Some(signature) => signature,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(block) = self
|
|
||||||
.beacon_node
|
|
||||||
.produce_beacon_block(slot, &randao_reveal)?
|
|
||||||
{
|
|
||||||
if self.safe_to_produce(&block) {
|
|
||||||
let domain = self.spec.get_domain(
|
|
||||||
slot.epoch(self.spec.slots_per_epoch),
|
|
||||||
Domain::BeaconBlock,
|
|
||||||
&fork,
|
|
||||||
);
|
|
||||||
if let Some(block) = self.sign_block(block, domain) {
|
|
||||||
self.beacon_node.publish_beacon_block(block)?;
|
|
||||||
Ok(PollOutcome::BlockProduced(slot))
|
|
||||||
} else {
|
|
||||||
Ok(PollOutcome::SignerRejection(slot))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(PollOutcome::SlashableBlockNotProduced(slot))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(PollOutcome::BeaconNodeUnableToProduceBlock(slot))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Consumes a block, returning that block signed by the validators private key.
|
|
||||||
///
|
|
||||||
/// Important: this function will not check to ensure the block is not slashable. This must be
|
|
||||||
/// done upstream.
|
|
||||||
fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option<BeaconBlock> {
|
|
||||||
self.store_produce(&block);
|
|
||||||
|
|
||||||
match self
|
|
||||||
.signer
|
|
||||||
.sign_block_proposal(&block.signed_root()[..], domain)
|
|
||||||
{
|
|
||||||
None => None,
|
|
||||||
Some(signature) => {
|
|
||||||
block.signature = signature;
|
|
||||||
Some(block)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `true` if signing a block is safe (non-slashable).
|
|
||||||
///
|
|
||||||
/// !!! UNSAFE !!!
|
|
||||||
///
|
|
||||||
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
|
|
||||||
fn safe_to_produce(&self, _block: &BeaconBlock) -> bool {
|
|
||||||
// TODO: ensure the producer doesn't produce slashable blocks.
|
|
||||||
// https://github.com/sigp/lighthouse/issues/160
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Record that a block was produced so that slashable votes may not be made in the future.
|
|
||||||
///
|
|
||||||
/// !!! UNSAFE !!!
|
|
||||||
///
|
|
||||||
/// Important: this function is presently stubbed-out. It provides ZERO SAFETY.
|
|
||||||
fn store_produce(&mut self, _block: &BeaconBlock) {
|
|
||||||
// TODO: record this block production to prevent future slashings.
|
|
||||||
// https://github.com/sigp/lighthouse/issues/160
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<BeaconNodeError> for Error {
|
|
||||||
fn from(e: BeaconNodeError) -> Error {
|
|
||||||
Error::BeaconNodeError(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::test_utils::{EpochMap, LocalSigner, SimulatedBeaconNode};
|
|
||||||
use super::*;
|
|
||||||
use slot_clock::TestingSlotClock;
|
|
||||||
use types::{
|
|
||||||
test_utils::{SeedableRng, TestRandom, XorShiftRng},
|
|
||||||
Keypair,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: implement more thorough testing.
|
|
||||||
// https://github.com/sigp/lighthouse/issues/160
|
|
||||||
//
|
|
||||||
// These tests should serve as a good example for future tests.
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn polling() {
|
|
||||||
let mut rng = XorShiftRng::from_seed([42; 16]);
|
|
||||||
|
|
||||||
let spec = Arc::new(ChainSpec::foundation());
|
|
||||||
let slot_clock = Arc::new(TestingSlotClock::new(0));
|
|
||||||
let beacon_node = Arc::new(SimulatedBeaconNode::default());
|
|
||||||
let signer = Arc::new(LocalSigner::new(Keypair::random()));
|
|
||||||
|
|
||||||
let mut epoch_map = EpochMap::new(spec.slots_per_epoch);
|
|
||||||
let produce_slot = Slot::new(100);
|
|
||||||
let produce_epoch = produce_slot.epoch(spec.slots_per_epoch);
|
|
||||||
epoch_map.map.insert(produce_epoch, produce_slot);
|
|
||||||
let epoch_map = Arc::new(epoch_map);
|
|
||||||
|
|
||||||
let mut block_proposer = BlockProducer::new(
|
|
||||||
spec.clone(),
|
|
||||||
epoch_map.clone(),
|
|
||||||
slot_clock.clone(),
|
|
||||||
beacon_node.clone(),
|
|
||||||
signer.clone(),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Configure responses from the BeaconNode.
|
|
||||||
beacon_node.set_next_produce_result(Ok(Some(BeaconBlock::random_for_test(&mut rng))));
|
|
||||||
beacon_node.set_next_publish_result(Ok(PublishOutcome::ValidBlock));
|
|
||||||
|
|
||||||
// One slot before production slot...
|
|
||||||
slot_clock.set_slot(produce_slot.as_u64() - 1);
|
|
||||||
assert_eq!(
|
|
||||||
block_proposer.poll(),
|
|
||||||
Ok(PollOutcome::BlockProductionNotRequired(produce_slot - 1))
|
|
||||||
);
|
|
||||||
|
|
||||||
// On the produce slot...
|
|
||||||
slot_clock.set_slot(produce_slot.as_u64());
|
|
||||||
assert_eq!(
|
|
||||||
block_proposer.poll(),
|
|
||||||
Ok(PollOutcome::BlockProduced(produce_slot.into()))
|
|
||||||
);
|
|
||||||
|
|
||||||
// Trying the same produce slot again...
|
|
||||||
slot_clock.set_slot(produce_slot.as_u64());
|
|
||||||
assert_eq!(
|
|
||||||
block_proposer.poll(),
|
|
||||||
Ok(PollOutcome::SlotAlreadyProcessed(produce_slot))
|
|
||||||
);
|
|
||||||
|
|
||||||
// One slot after the produce slot...
|
|
||||||
slot_clock.set_slot(produce_slot.as_u64() + 1);
|
|
||||||
assert_eq!(
|
|
||||||
block_proposer.poll(),
|
|
||||||
Ok(PollOutcome::BlockProductionNotRequired(produce_slot + 1))
|
|
||||||
);
|
|
||||||
|
|
||||||
// In an epoch without known duties...
|
|
||||||
let slot = (produce_epoch.as_u64() + 1) * spec.slots_per_epoch;
|
|
||||||
slot_clock.set_slot(slot);
|
|
||||||
assert_eq!(
|
|
||||||
block_proposer.poll(),
|
|
||||||
Ok(PollOutcome::ProducerDutiesUnknown(Slot::new(slot)))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
use crate::{DutiesReader, DutiesReaderError};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use types::{Epoch, Fork, Slot};
|
|
||||||
|
|
||||||
pub struct EpochMap {
|
|
||||||
slots_per_epoch: u64,
|
|
||||||
pub map: HashMap<Epoch, Slot>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EpochMap {
|
|
||||||
pub fn new(slots_per_epoch: u64) -> Self {
|
|
||||||
Self {
|
|
||||||
slots_per_epoch,
|
|
||||||
map: HashMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DutiesReader for EpochMap {
|
|
||||||
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError> {
|
|
||||||
let epoch = slot.epoch(self.slots_per_epoch);
|
|
||||||
match self.map.get(&epoch) {
|
|
||||||
Some(s) if *s == slot => Ok(true),
|
|
||||||
Some(s) if *s != slot => Ok(false),
|
|
||||||
_ => Err(DutiesReaderError::UnknownEpoch),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn fork(&self) -> Result<Fork, DutiesReaderError> {
|
|
||||||
Ok(Fork {
|
|
||||||
previous_version: [0; 4],
|
|
||||||
current_version: [0; 4],
|
|
||||||
epoch: Epoch::new(0),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
use crate::traits::Signer;
|
|
||||||
use std::sync::RwLock;
|
|
||||||
use types::{Keypair, Signature};
|
|
||||||
|
|
||||||
/// A test-only struct used to simulate a Beacon Node.
|
|
||||||
pub struct LocalSigner {
|
|
||||||
keypair: Keypair,
|
|
||||||
should_sign: RwLock<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LocalSigner {
|
|
||||||
/// Produce a new LocalSigner with signing enabled by default.
|
|
||||||
pub fn new(keypair: Keypair) -> Self {
|
|
||||||
Self {
|
|
||||||
keypair,
|
|
||||||
should_sign: RwLock::new(true),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// If set to `false`, the service will refuse to sign all messages. Otherwise, all messages
|
|
||||||
/// will be signed.
|
|
||||||
pub fn enable_signing(&self, enabled: bool) {
|
|
||||||
*self.should_sign.write().unwrap() = enabled;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Signer for LocalSigner {
|
|
||||||
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
|
||||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature> {
|
|
||||||
Some(Signature::new(message, domain, &self.keypair.sk))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,7 +0,0 @@
|
|||||||
mod epoch_map;
|
|
||||||
mod local_signer;
|
|
||||||
mod simulated_beacon_node;
|
|
||||||
|
|
||||||
pub use self::epoch_map::EpochMap;
|
|
||||||
pub use self::local_signer::LocalSigner;
|
|
||||||
pub use self::simulated_beacon_node::SimulatedBeaconNode;
|
|
@ -1,48 +0,0 @@
|
|||||||
use crate::traits::{BeaconNode, BeaconNodeError, PublishOutcome};
|
|
||||||
use std::sync::RwLock;
|
|
||||||
use types::{BeaconBlock, Signature, Slot};
|
|
||||||
|
|
||||||
type ProduceResult = Result<Option<BeaconBlock>, BeaconNodeError>;
|
|
||||||
type PublishResult = Result<PublishOutcome, BeaconNodeError>;
|
|
||||||
|
|
||||||
/// A test-only struct used to simulate a Beacon Node.
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct SimulatedBeaconNode {
|
|
||||||
pub produce_input: RwLock<Option<(Slot, Signature)>>,
|
|
||||||
pub produce_result: RwLock<Option<ProduceResult>>,
|
|
||||||
|
|
||||||
pub publish_input: RwLock<Option<BeaconBlock>>,
|
|
||||||
pub publish_result: RwLock<Option<PublishResult>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SimulatedBeaconNode {
|
|
||||||
/// Set the result to be returned when `produce_beacon_block` is called.
|
|
||||||
pub fn set_next_produce_result(&self, result: ProduceResult) {
|
|
||||||
*self.produce_result.write().unwrap() = Some(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the result to be returned when `publish_beacon_block` is called.
|
|
||||||
pub fn set_next_publish_result(&self, result: PublishResult) {
|
|
||||||
*self.publish_result.write().unwrap() = Some(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BeaconNode for SimulatedBeaconNode {
|
|
||||||
/// Returns the value specified by the `set_next_produce_result`.
|
|
||||||
fn produce_beacon_block(&self, slot: Slot, randao_reveal: &Signature) -> ProduceResult {
|
|
||||||
*self.produce_input.write().unwrap() = Some((slot, randao_reveal.clone()));
|
|
||||||
match *self.produce_result.read().unwrap() {
|
|
||||||
Some(ref r) => r.clone(),
|
|
||||||
None => panic!("SimulatedBeaconNode: produce_result == None"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the value specified by the `set_next_publish_result`.
|
|
||||||
fn publish_beacon_block(&self, block: BeaconBlock) -> PublishResult {
|
|
||||||
*self.publish_input.write().unwrap() = Some(block);
|
|
||||||
match *self.publish_result.read().unwrap() {
|
|
||||||
Some(ref r) => r.clone(),
|
|
||||||
None => panic!("SimulatedBeaconNode: publish_result == None"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,50 +0,0 @@
|
|||||||
use types::{BeaconBlock, Fork, Signature, Slot};
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum BeaconNodeError {
|
|
||||||
RemoteFailure(String),
|
|
||||||
DecodeFailure,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum PublishOutcome {
|
|
||||||
ValidBlock,
|
|
||||||
InvalidBlock(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Defines the methods required to produce and publish blocks on a Beacon Node.
|
|
||||||
pub trait BeaconNode: Send + Sync {
|
|
||||||
/// Request that the node produces a block.
|
|
||||||
///
|
|
||||||
/// Returns Ok(None) if the Beacon Node is unable to produce at the given slot.
|
|
||||||
fn produce_beacon_block(
|
|
||||||
&self,
|
|
||||||
slot: Slot,
|
|
||||||
randao_reveal: &Signature,
|
|
||||||
) -> Result<Option<BeaconBlock>, BeaconNodeError>;
|
|
||||||
|
|
||||||
/// Request that the node publishes a block.
|
|
||||||
///
|
|
||||||
/// Returns `true` if the publish was sucessful.
|
|
||||||
fn publish_beacon_block(&self, block: BeaconBlock) -> Result<PublishOutcome, BeaconNodeError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
|
||||||
pub enum DutiesReaderError {
|
|
||||||
UnknownValidator,
|
|
||||||
UnknownEpoch,
|
|
||||||
EpochLengthIsZero,
|
|
||||||
Poisoned,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Informs a validator of their duties (e.g., block production).
|
|
||||||
pub trait DutiesReader: Send + Sync {
|
|
||||||
fn is_block_production_slot(&self, slot: Slot) -> Result<bool, DutiesReaderError>;
|
|
||||||
fn fork(&self) -> Result<Fork, DutiesReaderError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Signs message using an internally-maintained private key.
|
|
||||||
pub trait Signer {
|
|
||||||
fn sign_block_proposal(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
|
||||||
fn sign_randao_reveal(&self, message: &[u8], domain: u64) -> Option<Signature>;
|
|
||||||
}
|
|
@ -9,8 +9,9 @@ use db::{
|
|||||||
};
|
};
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconBlock, ChainSpec, Hash256, Slot, SlotHeight};
|
use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot, SlotHeight};
|
||||||
|
|
||||||
//TODO: Pruning - Children
|
//TODO: Pruning - Children
|
||||||
//TODO: Handle Syncing
|
//TODO: Handle Syncing
|
||||||
@ -33,7 +34,7 @@ fn power_of_2_below(x: u64) -> u64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm.
|
/// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm.
|
||||||
pub struct BitwiseLMDGhost<T: ClientDB + Sized> {
|
pub struct BitwiseLMDGhost<T: ClientDB + Sized, B> {
|
||||||
/// A cache of known ancestors at given heights for a specific block.
|
/// A cache of known ancestors at given heights for a specific block.
|
||||||
//TODO: Consider FnvHashMap
|
//TODO: Consider FnvHashMap
|
||||||
cache: HashMap<CacheKey<u64>, Hash256>,
|
cache: HashMap<CacheKey<u64>, Hash256>,
|
||||||
@ -50,9 +51,10 @@ pub struct BitwiseLMDGhost<T: ClientDB + Sized> {
|
|||||||
/// State storage access.
|
/// State storage access.
|
||||||
state_store: Arc<BeaconStateStore<T>>,
|
state_store: Arc<BeaconStateStore<T>>,
|
||||||
max_known_height: SlotHeight,
|
max_known_height: SlotHeight,
|
||||||
|
_phantom: PhantomData<B>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> BitwiseLMDGhost<T>
|
impl<T, B: EthSpec> BitwiseLMDGhost<T, B>
|
||||||
where
|
where
|
||||||
T: ClientDB + Sized,
|
T: ClientDB + Sized,
|
||||||
{
|
{
|
||||||
@ -68,6 +70,7 @@ where
|
|||||||
max_known_height: SlotHeight::new(0),
|
max_known_height: SlotHeight::new(0),
|
||||||
block_store,
|
block_store,
|
||||||
state_store,
|
state_store,
|
||||||
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,7 +88,7 @@ where
|
|||||||
// build a hashmap of block_hash to weighted votes
|
// build a hashmap of block_hash to weighted votes
|
||||||
let mut latest_votes: HashMap<Hash256, u64> = HashMap::new();
|
let mut latest_votes: HashMap<Hash256, u64> = HashMap::new();
|
||||||
// gets the current weighted votes
|
// gets the current weighted votes
|
||||||
let current_state = self
|
let current_state: BeaconState<B> = self
|
||||||
.state_store
|
.state_store
|
||||||
.get_deserialized(&state_root)?
|
.get_deserialized(&state_root)?
|
||||||
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
|
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
|
||||||
@ -240,7 +243,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
|
impl<T: ClientDB + Sized, B: EthSpec> ForkChoice for BitwiseLMDGhost<T, B> {
|
||||||
fn add_block(
|
fn add_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
|
@ -9,8 +9,9 @@ use db::{
|
|||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconBlock, ChainSpec, Hash256, Slot, SlotHeight};
|
use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot, SlotHeight};
|
||||||
|
|
||||||
//TODO: Pruning - Children
|
//TODO: Pruning - Children
|
||||||
//TODO: Handle Syncing
|
//TODO: Handle Syncing
|
||||||
@ -33,7 +34,7 @@ fn power_of_2_below(x: u64) -> u64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Stores the necessary data structures to run the optimised lmd ghost algorithm.
|
/// Stores the necessary data structures to run the optimised lmd ghost algorithm.
|
||||||
pub struct OptimizedLMDGhost<T: ClientDB + Sized> {
|
pub struct OptimizedLMDGhost<T: ClientDB + Sized, B> {
|
||||||
/// A cache of known ancestors at given heights for a specific block.
|
/// A cache of known ancestors at given heights for a specific block.
|
||||||
//TODO: Consider FnvHashMap
|
//TODO: Consider FnvHashMap
|
||||||
cache: HashMap<CacheKey<u64>, Hash256>,
|
cache: HashMap<CacheKey<u64>, Hash256>,
|
||||||
@ -50,9 +51,10 @@ pub struct OptimizedLMDGhost<T: ClientDB + Sized> {
|
|||||||
/// State storage access.
|
/// State storage access.
|
||||||
state_store: Arc<BeaconStateStore<T>>,
|
state_store: Arc<BeaconStateStore<T>>,
|
||||||
max_known_height: SlotHeight,
|
max_known_height: SlotHeight,
|
||||||
|
_phantom: PhantomData<B>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> OptimizedLMDGhost<T>
|
impl<T, B: EthSpec> OptimizedLMDGhost<T, B>
|
||||||
where
|
where
|
||||||
T: ClientDB + Sized,
|
T: ClientDB + Sized,
|
||||||
{
|
{
|
||||||
@ -68,6 +70,7 @@ where
|
|||||||
max_known_height: SlotHeight::new(0),
|
max_known_height: SlotHeight::new(0),
|
||||||
block_store,
|
block_store,
|
||||||
state_store,
|
state_store,
|
||||||
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,7 +88,7 @@ where
|
|||||||
// build a hashmap of block_hash to weighted votes
|
// build a hashmap of block_hash to weighted votes
|
||||||
let mut latest_votes: HashMap<Hash256, u64> = HashMap::new();
|
let mut latest_votes: HashMap<Hash256, u64> = HashMap::new();
|
||||||
// gets the current weighted votes
|
// gets the current weighted votes
|
||||||
let current_state = self
|
let current_state: BeaconState<B> = self
|
||||||
.state_store
|
.state_store
|
||||||
.get_deserialized(&state_root)?
|
.get_deserialized(&state_root)?
|
||||||
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
|
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
|
||||||
@ -211,7 +214,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ClientDB + Sized> ForkChoice for OptimizedLMDGhost<T> {
|
impl<T: ClientDB + Sized, B: EthSpec> ForkChoice for OptimizedLMDGhost<T, B> {
|
||||||
fn add_block(
|
fn add_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
|
@ -7,12 +7,13 @@ use db::{
|
|||||||
};
|
};
|
||||||
use log::{debug, trace};
|
use log::{debug, trace};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconBlock, ChainSpec, Hash256, Slot};
|
use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
//TODO: Pruning and syncing
|
//TODO: Pruning and syncing
|
||||||
|
|
||||||
pub struct SlowLMDGhost<T: ClientDB + Sized> {
|
pub struct SlowLMDGhost<T: ClientDB + Sized, B> {
|
||||||
/// The latest attestation targets as a map of validator index to block hash.
|
/// The latest attestation targets as a map of validator index to block hash.
|
||||||
//TODO: Could this be a fixed size vec
|
//TODO: Could this be a fixed size vec
|
||||||
latest_attestation_targets: HashMap<u64, Hash256>,
|
latest_attestation_targets: HashMap<u64, Hash256>,
|
||||||
@ -22,9 +23,10 @@ pub struct SlowLMDGhost<T: ClientDB + Sized> {
|
|||||||
block_store: Arc<BeaconBlockStore<T>>,
|
block_store: Arc<BeaconBlockStore<T>>,
|
||||||
/// State storage access.
|
/// State storage access.
|
||||||
state_store: Arc<BeaconStateStore<T>>,
|
state_store: Arc<BeaconStateStore<T>>,
|
||||||
|
_phantom: PhantomData<B>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> SlowLMDGhost<T>
|
impl<T, B: EthSpec> SlowLMDGhost<T, B>
|
||||||
where
|
where
|
||||||
T: ClientDB + Sized,
|
T: ClientDB + Sized,
|
||||||
{
|
{
|
||||||
@ -37,6 +39,7 @@ where
|
|||||||
children: HashMap::new(),
|
children: HashMap::new(),
|
||||||
block_store,
|
block_store,
|
||||||
state_store,
|
state_store,
|
||||||
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,7 +57,7 @@ where
|
|||||||
// build a hashmap of block_hash to weighted votes
|
// build a hashmap of block_hash to weighted votes
|
||||||
let mut latest_votes: HashMap<Hash256, u64> = HashMap::new();
|
let mut latest_votes: HashMap<Hash256, u64> = HashMap::new();
|
||||||
// gets the current weighted votes
|
// gets the current weighted votes
|
||||||
let current_state = self
|
let current_state: BeaconState<B> = self
|
||||||
.state_store
|
.state_store
|
||||||
.get_deserialized(&state_root)?
|
.get_deserialized(&state_root)?
|
||||||
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
|
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
|
||||||
@ -105,7 +108,7 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
|
impl<T: ClientDB + Sized, B: EthSpec> ForkChoice for SlowLMDGhost<T, B> {
|
||||||
/// Process when a block is added
|
/// Process when a block is added
|
||||||
fn add_block(
|
fn add_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
@ -25,7 +25,9 @@ use std::collections::HashMap;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::{fs::File, io::prelude::*, path::PathBuf};
|
use std::{fs::File, io::prelude::*, path::PathBuf};
|
||||||
use types::test_utils::TestingBeaconStateBuilder;
|
use types::test_utils::TestingBeaconStateBuilder;
|
||||||
use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Keypair, Slot};
|
use types::{
|
||||||
|
BeaconBlock, BeaconBlockBody, Eth1Data, EthSpec, FoundationEthSpec, Hash256, Keypair, Slot,
|
||||||
|
};
|
||||||
use yaml_rust::yaml;
|
use yaml_rust::yaml;
|
||||||
|
|
||||||
// Note: We Assume the block Id's are hex-encoded.
|
// Note: We Assume the block Id's are hex-encoded.
|
||||||
@ -82,7 +84,7 @@ fn test_yaml_vectors(
|
|||||||
let test_cases = load_test_cases_from_yaml(yaml_file_path);
|
let test_cases = load_test_cases_from_yaml(yaml_file_path);
|
||||||
|
|
||||||
// default vars
|
// default vars
|
||||||
let spec = ChainSpec::foundation();
|
let spec = FoundationEthSpec::spec();
|
||||||
let zero_hash = Hash256::zero();
|
let zero_hash = Hash256::zero();
|
||||||
let eth1_data = Eth1Data {
|
let eth1_data = Eth1Data {
|
||||||
deposit_root: zero_hash.clone(),
|
deposit_root: zero_hash.clone(),
|
||||||
@ -227,23 +229,27 @@ fn setup_inital_state(
|
|||||||
|
|
||||||
// the fork choice instantiation
|
// the fork choice instantiation
|
||||||
let fork_choice: Box<ForkChoice> = match fork_choice_algo {
|
let fork_choice: Box<ForkChoice> = match fork_choice_algo {
|
||||||
ForkChoiceAlgorithm::OptimizedLMDGhost => Box::new(OptimizedLMDGhost::new(
|
ForkChoiceAlgorithm::OptimizedLMDGhost => {
|
||||||
block_store.clone(),
|
let f: OptimizedLMDGhost<MemoryDB, FoundationEthSpec> =
|
||||||
state_store.clone(),
|
OptimizedLMDGhost::new(block_store.clone(), state_store.clone());
|
||||||
)),
|
Box::new(f)
|
||||||
ForkChoiceAlgorithm::BitwiseLMDGhost => Box::new(BitwiseLMDGhost::new(
|
}
|
||||||
block_store.clone(),
|
ForkChoiceAlgorithm::BitwiseLMDGhost => {
|
||||||
state_store.clone(),
|
let f: BitwiseLMDGhost<MemoryDB, FoundationEthSpec> =
|
||||||
)),
|
BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
|
||||||
|
Box::new(f)
|
||||||
|
}
|
||||||
ForkChoiceAlgorithm::SlowLMDGhost => {
|
ForkChoiceAlgorithm::SlowLMDGhost => {
|
||||||
Box::new(SlowLMDGhost::new(block_store.clone(), state_store.clone()))
|
let f: SlowLMDGhost<MemoryDB, FoundationEthSpec> =
|
||||||
|
SlowLMDGhost::new(block_store.clone(), state_store.clone());
|
||||||
|
Box::new(f)
|
||||||
}
|
}
|
||||||
ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())),
|
ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())),
|
||||||
};
|
};
|
||||||
|
|
||||||
let spec = ChainSpec::foundation();
|
let spec = FoundationEthSpec::spec();
|
||||||
|
|
||||||
let mut state_builder =
|
let mut state_builder: TestingBeaconStateBuilder<FoundationEthSpec> =
|
||||||
TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec);
|
TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec);
|
||||||
state_builder.build_caches(&spec).unwrap();
|
state_builder.build_caches(&spec).unwrap();
|
||||||
let (state, _keypairs) = state_builder.build();
|
let (state, _keypairs) = state_builder.build();
|
||||||
|
@ -13,10 +13,11 @@ use state_processing::per_block_processing::{
|
|||||||
verify_transfer_time_independent_only,
|
verify_transfer_time_independent_only,
|
||||||
};
|
};
|
||||||
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
|
use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet};
|
||||||
|
use std::marker::PhantomData;
|
||||||
use types::chain_spec::Domain;
|
use types::chain_spec::Domain;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttestationData, AttesterSlashing, BeaconState, ChainSpec, Deposit, Epoch,
|
Attestation, AttestationData, AttesterSlashing, BeaconState, ChainSpec, Deposit, Epoch,
|
||||||
ProposerSlashing, Transfer, Validator, VoluntaryExit,
|
EthSpec, ProposerSlashing, Transfer, Validator, VoluntaryExit,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -25,7 +26,7 @@ const VERIFY_DEPOSIT_PROOFS: bool = false;
|
|||||||
const VERIFY_DEPOSIT_PROOFS: bool = false; // TODO: enable this
|
const VERIFY_DEPOSIT_PROOFS: bool = false; // TODO: enable this
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct OperationPool {
|
pub struct OperationPool<T: EthSpec + Default> {
|
||||||
/// Map from attestation ID (see below) to vectors of attestations.
|
/// Map from attestation ID (see below) to vectors of attestations.
|
||||||
attestations: RwLock<HashMap<AttestationId, Vec<Attestation>>>,
|
attestations: RwLock<HashMap<AttestationId, Vec<Attestation>>>,
|
||||||
/// Map from deposit index to deposit data.
|
/// Map from deposit index to deposit data.
|
||||||
@ -42,6 +43,7 @@ pub struct OperationPool {
|
|||||||
voluntary_exits: RwLock<HashMap<u64, VoluntaryExit>>,
|
voluntary_exits: RwLock<HashMap<u64, VoluntaryExit>>,
|
||||||
/// Set of transfers.
|
/// Set of transfers.
|
||||||
transfers: RwLock<HashSet<Transfer>>,
|
transfers: RwLock<HashSet<Transfer>>,
|
||||||
|
_phantom: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
|
/// Serialized `AttestationData` augmented with a domain to encode the fork info.
|
||||||
@ -52,14 +54,22 @@ struct AttestationId(Vec<u8>);
|
|||||||
const DOMAIN_BYTES_LEN: usize = 8;
|
const DOMAIN_BYTES_LEN: usize = 8;
|
||||||
|
|
||||||
impl AttestationId {
|
impl AttestationId {
|
||||||
fn from_data(attestation: &AttestationData, state: &BeaconState, spec: &ChainSpec) -> Self {
|
fn from_data<T: EthSpec>(
|
||||||
|
attestation: &AttestationData,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Self {
|
||||||
let mut bytes = ssz_encode(attestation);
|
let mut bytes = ssz_encode(attestation);
|
||||||
let epoch = attestation.slot.epoch(spec.slots_per_epoch);
|
let epoch = attestation.slot.epoch(spec.slots_per_epoch);
|
||||||
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
|
bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec));
|
||||||
AttestationId(bytes)
|
AttestationId(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compute_domain_bytes(epoch: Epoch, state: &BeaconState, spec: &ChainSpec) -> Vec<u8> {
|
fn compute_domain_bytes<T: EthSpec>(
|
||||||
|
epoch: Epoch,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Vec<u8> {
|
||||||
int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork))
|
int_to_bytes8(spec.get_domain(epoch, Domain::Attestation, &state.fork))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,7 +85,11 @@ impl AttestationId {
|
|||||||
/// receive for including it in a block.
|
/// receive for including it in a block.
|
||||||
// TODO: this could be optimised with a map from validator index to whether that validator has
|
// TODO: this could be optimised with a map from validator index to whether that validator has
|
||||||
// attested in each of the current and previous epochs. Currently quadractic in number of validators.
|
// attested in each of the current and previous epochs. Currently quadractic in number of validators.
|
||||||
fn attestation_score(attestation: &Attestation, state: &BeaconState, spec: &ChainSpec) -> usize {
|
fn attestation_score<T: EthSpec>(
|
||||||
|
attestation: &Attestation,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> usize {
|
||||||
// Bitfield of validators whose attestations are new/fresh.
|
// Bitfield of validators whose attestations are new/fresh.
|
||||||
let mut new_validators = attestation.aggregation_bitfield.clone();
|
let mut new_validators = attestation.aggregation_bitfield.clone();
|
||||||
|
|
||||||
@ -113,7 +127,7 @@ pub enum DepositInsertStatus {
|
|||||||
Replaced(Box<Deposit>),
|
Replaced(Box<Deposit>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl OperationPool {
|
impl<T: EthSpec> OperationPool<T> {
|
||||||
/// Create a new operation pool.
|
/// Create a new operation pool.
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self::default()
|
Self::default()
|
||||||
@ -123,7 +137,7 @@ impl OperationPool {
|
|||||||
pub fn insert_attestation(
|
pub fn insert_attestation(
|
||||||
&self,
|
&self,
|
||||||
attestation: Attestation,
|
attestation: Attestation,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), AttestationValidationError> {
|
) -> Result<(), AttestationValidationError> {
|
||||||
// Check that attestation signatures are valid.
|
// Check that attestation signatures are valid.
|
||||||
@ -161,15 +175,11 @@ impl OperationPool {
|
|||||||
|
|
||||||
/// Total number of attestations in the pool, including attestations for the same data.
|
/// Total number of attestations in the pool, including attestations for the same data.
|
||||||
pub fn num_attestations(&self) -> usize {
|
pub fn num_attestations(&self) -> usize {
|
||||||
self.attestations
|
self.attestations.read().values().map(Vec::len).sum()
|
||||||
.read()
|
|
||||||
.values()
|
|
||||||
.map(|atts| atts.len())
|
|
||||||
.sum()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a list of attestations for inclusion in a block.
|
/// Get a list of attestations for inclusion in a block.
|
||||||
pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Attestation> {
|
pub fn get_attestations(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Vec<Attestation> {
|
||||||
// Attestations for the current fork, which may be from the current or previous epoch.
|
// Attestations for the current fork, which may be from the current or previous epoch.
|
||||||
let prev_epoch = state.previous_epoch(spec);
|
let prev_epoch = state.previous_epoch(spec);
|
||||||
let current_epoch = state.current_epoch(spec);
|
let current_epoch = state.current_epoch(spec);
|
||||||
@ -204,7 +214,7 @@ impl OperationPool {
|
|||||||
// TODO: we could probably prune other attestations here:
|
// TODO: we could probably prune other attestations here:
|
||||||
// - ones that are completely covered by attestations included in the state
|
// - ones that are completely covered by attestations included in the state
|
||||||
// - maybe ones invalidated by the confirmation of one fork over another
|
// - maybe ones invalidated by the confirmation of one fork over another
|
||||||
pub fn prune_attestations(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
|
pub fn prune_attestations(&self, finalized_state: &BeaconState<T>, spec: &ChainSpec) {
|
||||||
self.attestations.write().retain(|_, attestations| {
|
self.attestations.write().retain(|_, attestations| {
|
||||||
// All the attestations in this bucket have the same data, so we only need to
|
// All the attestations in this bucket have the same data, so we only need to
|
||||||
// check the first one.
|
// check the first one.
|
||||||
@ -220,7 +230,7 @@ impl OperationPool {
|
|||||||
pub fn insert_deposit(
|
pub fn insert_deposit(
|
||||||
&self,
|
&self,
|
||||||
deposit: Deposit,
|
deposit: Deposit,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<DepositInsertStatus, DepositValidationError> {
|
) -> Result<DepositInsertStatus, DepositValidationError> {
|
||||||
use DepositInsertStatus::*;
|
use DepositInsertStatus::*;
|
||||||
@ -245,7 +255,7 @@ impl OperationPool {
|
|||||||
/// Get an ordered list of deposits for inclusion in a block.
|
/// Get an ordered list of deposits for inclusion in a block.
|
||||||
///
|
///
|
||||||
/// Take at most the maximum number of deposits, beginning from the current deposit index.
|
/// Take at most the maximum number of deposits, beginning from the current deposit index.
|
||||||
pub fn get_deposits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Deposit> {
|
pub fn get_deposits(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Vec<Deposit> {
|
||||||
let start_idx = state.deposit_index;
|
let start_idx = state.deposit_index;
|
||||||
(start_idx..start_idx + spec.max_deposits)
|
(start_idx..start_idx + spec.max_deposits)
|
||||||
.map(|idx| self.deposits.read().get(&idx).cloned())
|
.map(|idx| self.deposits.read().get(&idx).cloned())
|
||||||
@ -255,7 +265,7 @@ impl OperationPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Remove all deposits with index less than the deposit index of the latest finalised block.
|
/// Remove all deposits with index less than the deposit index of the latest finalised block.
|
||||||
pub fn prune_deposits(&self, state: &BeaconState) -> BTreeMap<u64, Deposit> {
|
pub fn prune_deposits(&self, state: &BeaconState<T>) -> BTreeMap<u64, Deposit> {
|
||||||
let deposits_keep = self.deposits.write().split_off(&state.deposit_index);
|
let deposits_keep = self.deposits.write().split_off(&state.deposit_index);
|
||||||
std::mem::replace(&mut self.deposits.write(), deposits_keep)
|
std::mem::replace(&mut self.deposits.write(), deposits_keep)
|
||||||
}
|
}
|
||||||
@ -269,7 +279,7 @@ impl OperationPool {
|
|||||||
pub fn insert_proposer_slashing(
|
pub fn insert_proposer_slashing(
|
||||||
&self,
|
&self,
|
||||||
slashing: ProposerSlashing,
|
slashing: ProposerSlashing,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), ProposerSlashingValidationError> {
|
) -> Result<(), ProposerSlashingValidationError> {
|
||||||
// TODO: should maybe insert anyway if the proposer is unknown in the validator index,
|
// TODO: should maybe insert anyway if the proposer is unknown in the validator index,
|
||||||
@ -286,7 +296,7 @@ impl OperationPool {
|
|||||||
/// Depends on the fork field of the state, but not on the state's epoch.
|
/// Depends on the fork field of the state, but not on the state's epoch.
|
||||||
fn attester_slashing_id(
|
fn attester_slashing_id(
|
||||||
slashing: &AttesterSlashing,
|
slashing: &AttesterSlashing,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> (AttestationId, AttestationId) {
|
) -> (AttestationId, AttestationId) {
|
||||||
(
|
(
|
||||||
@ -299,7 +309,7 @@ impl OperationPool {
|
|||||||
pub fn insert_attester_slashing(
|
pub fn insert_attester_slashing(
|
||||||
&self,
|
&self,
|
||||||
slashing: AttesterSlashing,
|
slashing: AttesterSlashing,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), AttesterSlashingValidationError> {
|
) -> Result<(), AttesterSlashingValidationError> {
|
||||||
verify_attester_slashing(state, &slashing, true, spec)?;
|
verify_attester_slashing(state, &slashing, true, spec)?;
|
||||||
@ -315,7 +325,7 @@ impl OperationPool {
|
|||||||
/// earlier in the block.
|
/// earlier in the block.
|
||||||
pub fn get_slashings(
|
pub fn get_slashings(
|
||||||
&self,
|
&self,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing>) {
|
) -> (Vec<ProposerSlashing>, Vec<AttesterSlashing>) {
|
||||||
let proposer_slashings = filter_limit_operations(
|
let proposer_slashings = filter_limit_operations(
|
||||||
@ -370,7 +380,7 @@ impl OperationPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Prune proposer slashings for all slashed or withdrawn validators.
|
/// Prune proposer slashings for all slashed or withdrawn validators.
|
||||||
pub fn prune_proposer_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
|
pub fn prune_proposer_slashings(&self, finalized_state: &BeaconState<T>, spec: &ChainSpec) {
|
||||||
prune_validator_hash_map(
|
prune_validator_hash_map(
|
||||||
&mut self.proposer_slashings.write(),
|
&mut self.proposer_slashings.write(),
|
||||||
|validator| {
|
|validator| {
|
||||||
@ -383,7 +393,7 @@ impl OperationPool {
|
|||||||
|
|
||||||
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
|
/// Prune attester slashings for all slashed or withdrawn validators, or attestations on another
|
||||||
/// fork.
|
/// fork.
|
||||||
pub fn prune_attester_slashings(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
|
pub fn prune_attester_slashings(&self, finalized_state: &BeaconState<T>, spec: &ChainSpec) {
|
||||||
self.attester_slashings.write().retain(|id, slashing| {
|
self.attester_slashings.write().retain(|id, slashing| {
|
||||||
let fork_ok = &Self::attester_slashing_id(slashing, finalized_state, spec) == id;
|
let fork_ok = &Self::attester_slashing_id(slashing, finalized_state, spec) == id;
|
||||||
let curr_epoch = finalized_state.current_epoch(spec);
|
let curr_epoch = finalized_state.current_epoch(spec);
|
||||||
@ -402,7 +412,7 @@ impl OperationPool {
|
|||||||
pub fn insert_voluntary_exit(
|
pub fn insert_voluntary_exit(
|
||||||
&self,
|
&self,
|
||||||
exit: VoluntaryExit,
|
exit: VoluntaryExit,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), ExitValidationError> {
|
) -> Result<(), ExitValidationError> {
|
||||||
verify_exit_time_independent_only(state, &exit, spec)?;
|
verify_exit_time_independent_only(state, &exit, spec)?;
|
||||||
@ -413,7 +423,11 @@ impl OperationPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get a list of voluntary exits for inclusion in a block.
|
/// Get a list of voluntary exits for inclusion in a block.
|
||||||
pub fn get_voluntary_exits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<VoluntaryExit> {
|
pub fn get_voluntary_exits(
|
||||||
|
&self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Vec<VoluntaryExit> {
|
||||||
filter_limit_operations(
|
filter_limit_operations(
|
||||||
self.voluntary_exits.read().values(),
|
self.voluntary_exits.read().values(),
|
||||||
|exit| verify_exit(state, exit, spec).is_ok(),
|
|exit| verify_exit(state, exit, spec).is_ok(),
|
||||||
@ -422,7 +436,7 @@ impl OperationPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Prune if validator has already exited at the last finalized state.
|
/// Prune if validator has already exited at the last finalized state.
|
||||||
pub fn prune_voluntary_exits(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
|
pub fn prune_voluntary_exits(&self, finalized_state: &BeaconState<T>, spec: &ChainSpec) {
|
||||||
prune_validator_hash_map(
|
prune_validator_hash_map(
|
||||||
&mut self.voluntary_exits.write(),
|
&mut self.voluntary_exits.write(),
|
||||||
|validator| validator.is_exited_at(finalized_state.current_epoch(spec)),
|
|validator| validator.is_exited_at(finalized_state.current_epoch(spec)),
|
||||||
@ -434,7 +448,7 @@ impl OperationPool {
|
|||||||
pub fn insert_transfer(
|
pub fn insert_transfer(
|
||||||
&self,
|
&self,
|
||||||
transfer: Transfer,
|
transfer: Transfer,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), TransferValidationError> {
|
) -> Result<(), TransferValidationError> {
|
||||||
// The signature of the transfer isn't hashed, but because we check
|
// The signature of the transfer isn't hashed, but because we check
|
||||||
@ -448,7 +462,7 @@ impl OperationPool {
|
|||||||
/// Get a list of transfers for inclusion in a block.
|
/// Get a list of transfers for inclusion in a block.
|
||||||
// TODO: improve the economic optimality of this function by accounting for
|
// TODO: improve the economic optimality of this function by accounting for
|
||||||
// dependencies between transfers in the same block e.g. A pays B, B pays C
|
// dependencies between transfers in the same block e.g. A pays B, B pays C
|
||||||
pub fn get_transfers(&self, state: &BeaconState, spec: &ChainSpec) -> Vec<Transfer> {
|
pub fn get_transfers(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Vec<Transfer> {
|
||||||
self.transfers
|
self.transfers
|
||||||
.read()
|
.read()
|
||||||
.iter()
|
.iter()
|
||||||
@ -460,14 +474,14 @@ impl OperationPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Prune the set of transfers by removing all those whose slot has already passed.
|
/// Prune the set of transfers by removing all those whose slot has already passed.
|
||||||
pub fn prune_transfers(&self, finalized_state: &BeaconState) {
|
pub fn prune_transfers(&self, finalized_state: &BeaconState<T>) {
|
||||||
self.transfers
|
self.transfers
|
||||||
.write()
|
.write()
|
||||||
.retain(|transfer| transfer.slot > finalized_state.slot)
|
.retain(|transfer| transfer.slot > finalized_state.slot)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prune all types of transactions given the latest finalized state.
|
/// Prune all types of transactions given the latest finalized state.
|
||||||
pub fn prune_all(&self, finalized_state: &BeaconState, spec: &ChainSpec) {
|
pub fn prune_all(&self, finalized_state: &BeaconState<T>, spec: &ChainSpec) {
|
||||||
self.prune_attestations(finalized_state, spec);
|
self.prune_attestations(finalized_state, spec);
|
||||||
self.prune_deposits(finalized_state);
|
self.prune_deposits(finalized_state);
|
||||||
self.prune_proposer_slashings(finalized_state, spec);
|
self.prune_proposer_slashings(finalized_state, spec);
|
||||||
@ -487,7 +501,10 @@ impl OperationPool {
|
|||||||
///
|
///
|
||||||
/// - Their `AttestationData` is equal.
|
/// - Their `AttestationData` is equal.
|
||||||
/// - `attestation` does not contain any signatures that `PendingAttestation` does not have.
|
/// - `attestation` does not contain any signatures that `PendingAttestation` does not have.
|
||||||
fn superior_attestation_exists_in_state(state: &BeaconState, attestation: &Attestation) -> bool {
|
fn superior_attestation_exists_in_state<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
attestation: &Attestation,
|
||||||
|
) -> bool {
|
||||||
state
|
state
|
||||||
.current_epoch_attestations
|
.current_epoch_attestations
|
||||||
.iter()
|
.iter()
|
||||||
@ -522,10 +539,10 @@ where
|
|||||||
/// The keys in the map should be validator indices, which will be looked up
|
/// The keys in the map should be validator indices, which will be looked up
|
||||||
/// in the state's validator registry and then passed to `prune_if`.
|
/// in the state's validator registry and then passed to `prune_if`.
|
||||||
/// Entries for unknown validators will be kept.
|
/// Entries for unknown validators will be kept.
|
||||||
fn prune_validator_hash_map<T, F>(
|
fn prune_validator_hash_map<T, F, B: EthSpec>(
|
||||||
map: &mut HashMap<u64, T>,
|
map: &mut HashMap<u64, T>,
|
||||||
prune_if: F,
|
prune_if: F,
|
||||||
finalized_state: &BeaconState,
|
finalized_state: &BeaconState<B>,
|
||||||
) where
|
) where
|
||||||
F: Fn(&Validator) -> bool,
|
F: Fn(&Validator) -> bool,
|
||||||
{
|
{
|
||||||
@ -649,7 +666,11 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create a random deposit (with a valid proof of posession)
|
// Create a random deposit (with a valid proof of posession)
|
||||||
fn make_deposit(rng: &mut XorShiftRng, state: &BeaconState, spec: &ChainSpec) -> Deposit {
|
fn make_deposit<T: EthSpec>(
|
||||||
|
rng: &mut XorShiftRng,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Deposit {
|
||||||
let keypair = Keypair::random();
|
let keypair = Keypair::random();
|
||||||
let mut deposit = Deposit::random_for_test(rng);
|
let mut deposit = Deposit::random_for_test(rng);
|
||||||
let mut deposit_input = DepositInput {
|
let mut deposit_input = DepositInput {
|
||||||
@ -668,9 +689,9 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create `count` dummy deposits with sequential deposit IDs beginning from `start`.
|
// Create `count` dummy deposits with sequential deposit IDs beginning from `start`.
|
||||||
fn dummy_deposits(
|
fn dummy_deposits<T: EthSpec>(
|
||||||
rng: &mut XorShiftRng,
|
rng: &mut XorShiftRng,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
start: u64,
|
start: u64,
|
||||||
count: u64,
|
count: u64,
|
||||||
@ -685,23 +706,28 @@ mod tests {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState) {
|
fn test_state(rng: &mut XorShiftRng) -> (ChainSpec, BeaconState<FoundationEthSpec>) {
|
||||||
let spec = ChainSpec::foundation();
|
let spec = FoundationEthSpec::spec();
|
||||||
|
|
||||||
let mut state = BeaconState::random_for_test(rng);
|
let mut state = BeaconState::random_for_test(rng);
|
||||||
|
|
||||||
state.fork = Fork::genesis(&spec);
|
state.fork = Fork::genesis(&spec);
|
||||||
|
|
||||||
(spec, state)
|
(spec, state)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(debug_assertions))]
|
||||||
|
mod release_tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
/// Create a signed attestation for use in tests.
|
/// Create a signed attestation for use in tests.
|
||||||
/// Signed by all validators in `committee[signing_range]` and `committee[extra_signer]`.
|
/// Signed by all validators in `committee[signing_range]` and `committee[extra_signer]`.
|
||||||
#[cfg(not(debug_assertions))]
|
fn signed_attestation<R: std::slice::SliceIndex<[usize], Output = [usize]>, B: EthSpec>(
|
||||||
fn signed_attestation<R: std::slice::SliceIndex<[usize], Output = [usize]>>(
|
|
||||||
committee: &CrosslinkCommittee,
|
committee: &CrosslinkCommittee,
|
||||||
keypairs: &[Keypair],
|
keypairs: &[Keypair],
|
||||||
signing_range: R,
|
signing_range: R,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
state: &BeaconState,
|
state: &BeaconState<B>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
extra_signer: Option<usize>,
|
extra_signer: Option<usize>,
|
||||||
) -> Attestation {
|
) -> Attestation {
|
||||||
@ -728,25 +754,32 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test state for attestation-related tests.
|
/// Test state for attestation-related tests.
|
||||||
#[cfg(not(debug_assertions))]
|
fn attestation_test_state<B: EthSpec>(
|
||||||
fn attestation_test_state(
|
|
||||||
spec: &ChainSpec,
|
|
||||||
num_committees: usize,
|
num_committees: usize,
|
||||||
) -> (BeaconState, Vec<Keypair>) {
|
) -> (BeaconState<B>, Vec<Keypair>, ChainSpec) {
|
||||||
|
let spec = B::spec();
|
||||||
|
|
||||||
let num_validators =
|
let num_validators =
|
||||||
num_committees * (spec.slots_per_epoch * spec.target_committee_size) as usize;
|
num_committees * (spec.slots_per_epoch * spec.target_committee_size) as usize;
|
||||||
let mut state_builder =
|
let mut state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(
|
||||||
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, spec);
|
num_validators,
|
||||||
|
&spec,
|
||||||
|
);
|
||||||
let slot_offset = 1000 * spec.slots_per_epoch + spec.slots_per_epoch / 2;
|
let slot_offset = 1000 * spec.slots_per_epoch + spec.slots_per_epoch / 2;
|
||||||
let slot = spec.genesis_slot + slot_offset;
|
let slot = spec.genesis_slot + slot_offset;
|
||||||
state_builder.teleport_to_slot(slot, spec);
|
state_builder.teleport_to_slot(slot, &spec);
|
||||||
state_builder.build_caches(spec).unwrap();
|
state_builder.build_caches(&spec).unwrap();
|
||||||
state_builder.build()
|
let (state, keypairs) = state_builder.build();
|
||||||
|
|
||||||
|
(state, keypairs, FoundationEthSpec::spec())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the latest crosslink in the state to match the attestation.
|
/// Set the latest crosslink in the state to match the attestation.
|
||||||
#[cfg(not(debug_assertions))]
|
fn fake_latest_crosslink<B: EthSpec>(
|
||||||
fn fake_latest_crosslink(att: &Attestation, state: &mut BeaconState, spec: &ChainSpec) {
|
att: &Attestation,
|
||||||
|
state: &mut BeaconState<B>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) {
|
||||||
state.latest_crosslinks[att.data.shard as usize] = Crosslink {
|
state.latest_crosslinks[att.data.shard as usize] = Crosslink {
|
||||||
crosslink_data_root: att.data.crosslink_data_root,
|
crosslink_data_root: att.data.crosslink_data_root,
|
||||||
epoch: att.data.slot.epoch(spec.slots_per_epoch),
|
epoch: att.data.slot.epoch(spec.slots_per_epoch),
|
||||||
@ -754,10 +787,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
fn test_attestation_score() {
|
fn test_attestation_score() {
|
||||||
let spec = &ChainSpec::foundation();
|
let (ref mut state, ref keypairs, ref spec) =
|
||||||
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
|
attestation_test_state::<FoundationEthSpec>(1);
|
||||||
|
|
||||||
let slot = state.slot - 1;
|
let slot = state.slot - 1;
|
||||||
let committees = state
|
let committees = state
|
||||||
.get_crosslink_committees_at_slot(slot, spec)
|
.get_crosslink_committees_at_slot(slot, spec)
|
||||||
@ -786,10 +819,10 @@ mod tests {
|
|||||||
|
|
||||||
/// End-to-end test of basic attestation handling.
|
/// End-to-end test of basic attestation handling.
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
fn attestation_aggregation_insert_get_prune() {
|
fn attestation_aggregation_insert_get_prune() {
|
||||||
let spec = &ChainSpec::foundation();
|
let (ref mut state, ref keypairs, ref spec) =
|
||||||
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
|
attestation_test_state::<FoundationEthSpec>(1);
|
||||||
|
|
||||||
let op_pool = OperationPool::new();
|
let op_pool = OperationPool::new();
|
||||||
|
|
||||||
let slot = state.slot - 1;
|
let slot = state.slot - 1;
|
||||||
@ -852,10 +885,10 @@ mod tests {
|
|||||||
|
|
||||||
/// Adding an attestation already in the pool should not increase the size of the pool.
|
/// Adding an attestation already in the pool should not increase the size of the pool.
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
fn attestation_duplicate() {
|
fn attestation_duplicate() {
|
||||||
let spec = &ChainSpec::foundation();
|
let (ref mut state, ref keypairs, ref spec) =
|
||||||
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
|
attestation_test_state::<FoundationEthSpec>(1);
|
||||||
|
|
||||||
let op_pool = OperationPool::new();
|
let op_pool = OperationPool::new();
|
||||||
|
|
||||||
let slot = state.slot - 1;
|
let slot = state.slot - 1;
|
||||||
@ -879,10 +912,10 @@ mod tests {
|
|||||||
/// Adding lots of attestations that only intersect pairwise should lead to two aggregate
|
/// Adding lots of attestations that only intersect pairwise should lead to two aggregate
|
||||||
/// attestations.
|
/// attestations.
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
fn attestation_pairwise_overlapping() {
|
fn attestation_pairwise_overlapping() {
|
||||||
let spec = &ChainSpec::foundation();
|
let (ref mut state, ref keypairs, ref spec) =
|
||||||
let (ref mut state, ref keypairs) = attestation_test_state(spec, 1);
|
attestation_test_state::<FoundationEthSpec>(1);
|
||||||
|
|
||||||
let op_pool = OperationPool::new();
|
let op_pool = OperationPool::new();
|
||||||
|
|
||||||
let slot = state.slot - 1;
|
let slot = state.slot - 1;
|
||||||
@ -922,12 +955,13 @@ mod tests {
|
|||||||
/// high-quality attestations. To ensure that no aggregation occurs, ALL attestations
|
/// high-quality attestations. To ensure that no aggregation occurs, ALL attestations
|
||||||
/// are also signed by the 0th member of the committee.
|
/// are also signed by the 0th member of the committee.
|
||||||
#[test]
|
#[test]
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
fn attestation_get_max() {
|
fn attestation_get_max() {
|
||||||
let spec = &ChainSpec::foundation();
|
|
||||||
let small_step_size = 2;
|
let small_step_size = 2;
|
||||||
let big_step_size = 4;
|
let big_step_size = 4;
|
||||||
let (ref mut state, ref keypairs) = attestation_test_state(spec, big_step_size);
|
|
||||||
|
let (ref mut state, ref keypairs, ref spec) =
|
||||||
|
attestation_test_state::<FoundationEthSpec>(big_step_size);
|
||||||
|
|
||||||
let op_pool = OperationPool::new();
|
let op_pool = OperationPool::new();
|
||||||
|
|
||||||
let slot = state.slot - 1;
|
let slot = state.slot - 1;
|
||||||
@ -982,6 +1016,7 @@ mod tests {
|
|||||||
assert!(att.aggregation_bitfield.num_set_bits() >= big_step_size);
|
assert!(att.aggregation_bitfield.num_set_bits() >= big_step_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: more tests
|
// TODO: more tests
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,6 @@ env_logger = "0.6.0"
|
|||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
serde_yaml = "0.8"
|
serde_yaml = "0.8"
|
||||||
yaml-utils = { path = "yaml_utils" }
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../utils/bls" }
|
bls = { path = "../utils/bls" }
|
||||||
@ -30,6 +29,3 @@ tree_hash = { path = "../utils/tree_hash" }
|
|||||||
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
tree_hash_derive = { path = "../utils/tree_hash_derive" }
|
||||||
types = { path = "../types" }
|
types = { path = "../types" }
|
||||||
rayon = "1.0"
|
rayon = "1.0"
|
||||||
|
|
||||||
[features]
|
|
||||||
fake_crypto = ["bls/fake_crypto"]
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../utils/bls/build.rs
|
|
@ -3,8 +3,8 @@ use types::{BeaconStateError as Error, *};
|
|||||||
/// Exit the validator of the given `index`.
|
/// Exit the validator of the given `index`.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn exit_validator(
|
pub fn exit_validator<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -4,8 +4,8 @@ use types::{BeaconStateError as Error, *};
|
|||||||
/// Slash the validator with index ``index``.
|
/// Slash the validator with index ``index``.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn slash_validator(
|
pub fn slash_validator<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -36,8 +36,7 @@ pub fn slash_validator(
|
|||||||
|
|
||||||
state.set_slashed_balance(
|
state.set_slashed_balance(
|
||||||
current_epoch,
|
current_epoch,
|
||||||
state.get_slashed_balance(current_epoch, spec)? + effective_balance,
|
state.get_slashed_balance(current_epoch)? + effective_balance,
|
||||||
spec,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let whistleblower_index =
|
let whistleblower_index =
|
||||||
@ -56,7 +55,7 @@ pub fn slash_validator(
|
|||||||
state.validator_registry[validator_index].slashed = true;
|
state.validator_registry[validator_index].slashed = true;
|
||||||
|
|
||||||
state.validator_registry[validator_index].withdrawable_epoch =
|
state.validator_registry[validator_index].withdrawable_epoch =
|
||||||
current_epoch + Epoch::from(spec.latest_slashed_exit_length);
|
current_epoch + Epoch::from(T::LatestSlashedExitLength::to_usize());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -10,12 +10,12 @@ pub enum GenesisError {
|
|||||||
/// Returns the genesis `BeaconState`
|
/// Returns the genesis `BeaconState`
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn get_genesis_state(
|
pub fn get_genesis_state<T: EthSpec>(
|
||||||
genesis_validator_deposits: &[Deposit],
|
genesis_validator_deposits: &[Deposit],
|
||||||
genesis_time: u64,
|
genesis_time: u64,
|
||||||
genesis_eth1_data: Eth1Data,
|
genesis_eth1_data: Eth1Data,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<BeaconState, BlockProcessingError> {
|
) -> Result<BeaconState<T>, BlockProcessingError> {
|
||||||
// Get the genesis `BeaconState`
|
// Get the genesis `BeaconState`
|
||||||
let mut state = BeaconState::genesis(genesis_time, genesis_eth1_data, spec);
|
let mut state = BeaconState::genesis(genesis_time, genesis_eth1_data, spec);
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ pub fn get_genesis_state(
|
|||||||
.get_cached_active_validator_indices(RelativeEpoch::Current, spec)?
|
.get_cached_active_validator_indices(RelativeEpoch::Current, spec)?
|
||||||
.to_vec();
|
.to_vec();
|
||||||
let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.tree_hash_root());
|
let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.tree_hash_root());
|
||||||
state.fill_active_index_roots_with(genesis_active_index_root, spec);
|
state.fill_active_index_roots_with(genesis_active_index_root);
|
||||||
|
|
||||||
// Generate the current shuffling seed.
|
// Generate the current shuffling seed.
|
||||||
state.current_shuffling_seed = state.generate_seed(spec.genesis_epoch, spec)?;
|
state.current_shuffling_seed = state.generate_seed(spec.genesis_epoch, spec)?;
|
||||||
|
@ -40,8 +40,8 @@ const VERIFY_DEPOSIT_MERKLE_PROOFS: bool = false;
|
|||||||
/// returns an error describing why the block was invalid or how the function failed to execute.
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn per_block_processing(
|
pub fn per_block_processing<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -55,8 +55,8 @@ pub fn per_block_processing(
|
|||||||
/// returns an error describing why the block was invalid or how the function failed to execute.
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn per_block_processing_without_verifying_block_signature(
|
pub fn per_block_processing_without_verifying_block_signature<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -70,8 +70,8 @@ pub fn per_block_processing_without_verifying_block_signature(
|
|||||||
/// returns an error describing why the block was invalid or how the function failed to execute.
|
/// returns an error describing why the block was invalid or how the function failed to execute.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn per_block_processing_signature_optional(
|
fn per_block_processing_signature_optional<T: EthSpec>(
|
||||||
mut state: &mut BeaconState,
|
mut state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
should_verify_block_signature: bool,
|
should_verify_block_signature: bool,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -100,8 +100,8 @@ fn per_block_processing_signature_optional(
|
|||||||
/// Processes the block header.
|
/// Processes the block header.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_block_header(
|
pub fn process_block_header<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -125,8 +125,8 @@ pub fn process_block_header(
|
|||||||
/// Verifies the signature of a block.
|
/// Verifies the signature of a block.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_block_signature(
|
pub fn verify_block_signature<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -153,8 +153,8 @@ pub fn verify_block_signature(
|
|||||||
/// `state.latest_randao_mixes`.
|
/// `state.latest_randao_mixes`.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_randao(
|
pub fn process_randao<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
block: &BeaconBlock,
|
block: &BeaconBlock,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -184,7 +184,10 @@ pub fn process_randao(
|
|||||||
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
|
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> {
|
pub fn process_eth1_data<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
eth1_data: &Eth1Data,
|
||||||
|
) -> Result<(), Error> {
|
||||||
// Attempt to find a `Eth1DataVote` with matching `Eth1Data`.
|
// Attempt to find a `Eth1DataVote` with matching `Eth1Data`.
|
||||||
let matching_eth1_vote_index = state
|
let matching_eth1_vote_index = state
|
||||||
.eth1_data_votes
|
.eth1_data_votes
|
||||||
@ -210,8 +213,8 @@ pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Resul
|
|||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_proposer_slashings(
|
pub fn process_proposer_slashings<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
proposer_slashings: &[ProposerSlashing],
|
proposer_slashings: &[ProposerSlashing],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -243,8 +246,8 @@ pub fn process_proposer_slashings(
|
|||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_attester_slashings(
|
pub fn process_attester_slashings<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
attester_slashings: &[AttesterSlashing],
|
attester_slashings: &[AttesterSlashing],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -301,8 +304,8 @@ pub fn process_attester_slashings(
|
|||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_attestations(
|
pub fn process_attestations<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
attestations: &[Attestation],
|
attestations: &[Attestation],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -343,8 +346,8 @@ pub fn process_attestations(
|
|||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_deposits(
|
pub fn process_deposits<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
deposits: &[Deposit],
|
deposits: &[Deposit],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -413,8 +416,8 @@ pub fn process_deposits(
|
|||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_exits(
|
pub fn process_exits<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
voluntary_exits: &[VoluntaryExit],
|
voluntary_exits: &[VoluntaryExit],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -445,8 +448,8 @@ pub fn process_exits(
|
|||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_transfers(
|
pub fn process_transfers<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
transfers: &[Transfer],
|
transfers: &[Transfer],
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -9,8 +9,8 @@ use types::*;
|
|||||||
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn validate_attestation(
|
pub fn validate_attestation<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestation: &Attestation,
|
attestation: &Attestation,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -18,8 +18,8 @@ pub fn validate_attestation(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Like `validate_attestation` but doesn't run checks which may become true in future states.
|
/// Like `validate_attestation` but doesn't run checks which may become true in future states.
|
||||||
pub fn validate_attestation_time_independent_only(
|
pub fn validate_attestation_time_independent_only<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestation: &Attestation,
|
attestation: &Attestation,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -32,8 +32,8 @@ pub fn validate_attestation_time_independent_only(
|
|||||||
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn validate_attestation_without_signature(
|
pub fn validate_attestation_without_signature<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestation: &Attestation,
|
attestation: &Attestation,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -45,8 +45,8 @@ pub fn validate_attestation_without_signature(
|
|||||||
///
|
///
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn validate_attestation_parametric(
|
fn validate_attestation_parametric<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestation: &Attestation,
|
attestation: &Attestation,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
verify_signature: bool,
|
verify_signature: bool,
|
||||||
@ -168,9 +168,9 @@ fn validate_attestation_parametric(
|
|||||||
/// match the current (or previous) justified epoch and root from the state.
|
/// match the current (or previous) justified epoch and root from the state.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn verify_justified_epoch_and_root(
|
fn verify_justified_epoch_and_root<T: EthSpec>(
|
||||||
attestation: &Attestation,
|
attestation: &Attestation,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
|
let state_epoch = state.slot.epoch(spec.slots_per_epoch);
|
||||||
@ -223,8 +223,8 @@ fn verify_justified_epoch_and_root(
|
|||||||
/// - A `validator_index` in `committee` is not in `state.validator_registry`.
|
/// - A `validator_index` in `committee` is not in `state.validator_registry`.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn verify_attestation_signature(
|
fn verify_attestation_signature<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
committee: &[usize],
|
committee: &[usize],
|
||||||
a: &Attestation,
|
a: &Attestation,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -8,8 +8,8 @@ use types::*;
|
|||||||
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_attester_slashing(
|
pub fn verify_attester_slashing<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attester_slashing: &AttesterSlashing,
|
attester_slashing: &AttesterSlashing,
|
||||||
should_verify_slashable_attestations: bool,
|
should_verify_slashable_attestations: bool,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -42,8 +42,8 @@ pub fn verify_attester_slashing(
|
|||||||
/// Returns Ok(indices) if `indices.len() > 0`.
|
/// Returns Ok(indices) if `indices.len() > 0`.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn gather_attester_slashing_indices(
|
pub fn gather_attester_slashing_indices<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attester_slashing: &AttesterSlashing,
|
attester_slashing: &AttesterSlashing,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<Vec<u64>, Error> {
|
) -> Result<Vec<u64>, Error> {
|
||||||
@ -57,8 +57,8 @@ pub fn gather_attester_slashing_indices(
|
|||||||
|
|
||||||
/// Same as `gather_attester_slashing_indices` but allows the caller to specify the criteria
|
/// Same as `gather_attester_slashing_indices` but allows the caller to specify the criteria
|
||||||
/// for determining whether a given validator should be considered slashed.
|
/// for determining whether a given validator should be considered slashed.
|
||||||
pub fn gather_attester_slashing_indices_modular<F>(
|
pub fn gather_attester_slashing_indices_modular<F, T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attester_slashing: &AttesterSlashing,
|
attester_slashing: &AttesterSlashing,
|
||||||
is_slashed: F,
|
is_slashed: F,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -16,8 +16,8 @@ use types::*;
|
|||||||
/// Note: this function is incomplete.
|
/// Note: this function is incomplete.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_deposit(
|
pub fn verify_deposit<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
deposit: &Deposit,
|
deposit: &Deposit,
|
||||||
verify_merkle_branch: bool,
|
verify_merkle_branch: bool,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -47,7 +47,10 @@ pub fn verify_deposit(
|
|||||||
/// Verify that the `Deposit` index is correct.
|
/// Verify that the `Deposit` index is correct.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_deposit_index(state: &BeaconState, deposit: &Deposit) -> Result<(), Error> {
|
pub fn verify_deposit_index<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
deposit: &Deposit,
|
||||||
|
) -> Result<(), Error> {
|
||||||
verify!(
|
verify!(
|
||||||
deposit.index == state.deposit_index,
|
deposit.index == state.deposit_index,
|
||||||
Invalid::BadIndex {
|
Invalid::BadIndex {
|
||||||
@ -65,8 +68,8 @@ pub fn verify_deposit_index(state: &BeaconState, deposit: &Deposit) -> Result<()
|
|||||||
/// ## Errors
|
/// ## Errors
|
||||||
///
|
///
|
||||||
/// Errors if the state's `pubkey_cache` is not current.
|
/// Errors if the state's `pubkey_cache` is not current.
|
||||||
pub fn get_existing_validator_index(
|
pub fn get_existing_validator_index<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
deposit: &Deposit,
|
deposit: &Deposit,
|
||||||
) -> Result<Option<u64>, Error> {
|
) -> Result<Option<u64>, Error> {
|
||||||
let deposit_input = &deposit.deposit_data.deposit_input;
|
let deposit_input = &deposit.deposit_data.deposit_input;
|
||||||
@ -89,11 +92,15 @@ pub fn get_existing_validator_index(
|
|||||||
/// Verify that a deposit is included in the state's eth1 deposit root.
|
/// Verify that a deposit is included in the state's eth1 deposit root.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool {
|
fn verify_deposit_merkle_proof<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
deposit: &Deposit,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> bool {
|
||||||
let leaf = hash(&get_serialized_deposit_data(deposit));
|
let leaf = hash(&get_serialized_deposit_data(deposit));
|
||||||
verify_merkle_proof(
|
verify_merkle_proof(
|
||||||
Hash256::from_slice(&leaf),
|
Hash256::from_slice(&leaf),
|
||||||
&deposit.proof,
|
&deposit.proof[..],
|
||||||
spec.deposit_contract_tree_depth as usize,
|
spec.deposit_contract_tree_depth as usize,
|
||||||
deposit.index as usize,
|
deposit.index as usize,
|
||||||
state.latest_eth1_data.deposit_root,
|
state.latest_eth1_data.deposit_root,
|
||||||
|
@ -8,8 +8,8 @@ use types::*;
|
|||||||
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_exit(
|
pub fn verify_exit<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
exit: &VoluntaryExit,
|
exit: &VoluntaryExit,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -17,8 +17,8 @@ pub fn verify_exit(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Like `verify_exit` but doesn't run checks which may become true in future states.
|
/// Like `verify_exit` but doesn't run checks which may become true in future states.
|
||||||
pub fn verify_exit_time_independent_only(
|
pub fn verify_exit_time_independent_only<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
exit: &VoluntaryExit,
|
exit: &VoluntaryExit,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -26,8 +26,8 @@ pub fn verify_exit_time_independent_only(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
|
/// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true.
|
||||||
fn verify_exit_parametric(
|
fn verify_exit_parametric<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
exit: &VoluntaryExit,
|
exit: &VoluntaryExit,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
time_independent_only: bool,
|
time_independent_only: bool,
|
||||||
|
@ -8,9 +8,9 @@ use types::*;
|
|||||||
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_proposer_slashing(
|
pub fn verify_proposer_slashing<T: EthSpec>(
|
||||||
proposer_slashing: &ProposerSlashing,
|
proposer_slashing: &ProposerSlashing,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let proposer = state
|
let proposer = state
|
||||||
|
@ -11,8 +11,8 @@ use types::*;
|
|||||||
/// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity.
|
/// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_slashable_attestation(
|
pub fn verify_slashable_attestation<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
slashable_attestation: &SlashableAttestation,
|
slashable_attestation: &SlashableAttestation,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -11,8 +11,8 @@ use types::*;
|
|||||||
/// Note: this function is incomplete.
|
/// Note: this function is incomplete.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn verify_transfer(
|
pub fn verify_transfer<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -20,8 +20,8 @@ pub fn verify_transfer(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Like `verify_transfer` but doesn't run checks which may become true in future states.
|
/// Like `verify_transfer` but doesn't run checks which may become true in future states.
|
||||||
pub fn verify_transfer_time_independent_only(
|
pub fn verify_transfer_time_independent_only<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -29,8 +29,8 @@ pub fn verify_transfer_time_independent_only(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Parametric version of `verify_transfer` that allows some checks to be skipped.
|
/// Parametric version of `verify_transfer` that allows some checks to be skipped.
|
||||||
fn verify_transfer_parametric(
|
fn verify_transfer_parametric<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
time_independent_only: bool,
|
time_independent_only: bool,
|
||||||
@ -123,8 +123,8 @@ fn verify_transfer_parametric(
|
|||||||
/// Does not check that the transfer is valid, however checks for overflow in all actions.
|
/// Does not check that the transfer is valid, however checks for overflow in all actions.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn execute_transfer(
|
pub fn execute_transfer<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
transfer: &Transfer,
|
transfer: &Transfer,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
@ -33,7 +33,10 @@ pub type WinningRootHashSet = HashMap<u64, WinningRoot>;
|
|||||||
/// returned, a state might be "half-processed" and therefore in an invalid state.
|
/// returned, a state might be "half-processed" and therefore in an invalid state.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
pub fn per_epoch_processing<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
// Ensure the previous and next epoch caches are built.
|
// Ensure the previous and next epoch caches are built.
|
||||||
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
|
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
|
||||||
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
|
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
|
||||||
@ -87,7 +90,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result
|
|||||||
/// Maybe resets the eth1 period.
|
/// Maybe resets the eth1 period.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn maybe_reset_eth1_period(state: &mut BeaconState, spec: &ChainSpec) {
|
pub fn maybe_reset_eth1_period<T: EthSpec>(state: &mut BeaconState<T>, spec: &ChainSpec) {
|
||||||
let next_epoch = state.next_epoch(spec);
|
let next_epoch = state.next_epoch(spec);
|
||||||
let voting_period = spec.epochs_per_eth1_voting_period;
|
let voting_period = spec.epochs_per_eth1_voting_period;
|
||||||
|
|
||||||
@ -109,8 +112,8 @@ pub fn maybe_reset_eth1_period(state: &mut BeaconState, spec: &ChainSpec) {
|
|||||||
/// - `previous_justified_epoch`
|
/// - `previous_justified_epoch`
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn update_justification_and_finalization(
|
pub fn update_justification_and_finalization<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
total_balances: &TotalBalances,
|
total_balances: &TotalBalances,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -160,13 +163,13 @@ pub fn update_justification_and_finalization(
|
|||||||
if new_justified_epoch != state.current_justified_epoch {
|
if new_justified_epoch != state.current_justified_epoch {
|
||||||
state.current_justified_epoch = new_justified_epoch;
|
state.current_justified_epoch = new_justified_epoch;
|
||||||
state.current_justified_root =
|
state.current_justified_root =
|
||||||
*state.get_block_root(new_justified_epoch.start_slot(spec.slots_per_epoch), spec)?;
|
*state.get_block_root(new_justified_epoch.start_slot(spec.slots_per_epoch))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if new_finalized_epoch != state.finalized_epoch {
|
if new_finalized_epoch != state.finalized_epoch {
|
||||||
state.finalized_epoch = new_finalized_epoch;
|
state.finalized_epoch = new_finalized_epoch;
|
||||||
state.finalized_root =
|
state.finalized_root =
|
||||||
*state.get_block_root(new_finalized_epoch.start_slot(spec.slots_per_epoch), spec)?;
|
*state.get_block_root(new_finalized_epoch.start_slot(spec.slots_per_epoch))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -179,8 +182,8 @@ pub fn update_justification_and_finalization(
|
|||||||
/// Also returns a `WinningRootHashSet` for later use during epoch processing.
|
/// Also returns a `WinningRootHashSet` for later use during epoch processing.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_crosslinks(
|
pub fn process_crosslinks<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<WinningRootHashSet, Error> {
|
) -> Result<WinningRootHashSet, Error> {
|
||||||
let mut winning_root_for_shards: WinningRootHashSet = HashMap::new();
|
let mut winning_root_for_shards: WinningRootHashSet = HashMap::new();
|
||||||
@ -222,7 +225,10 @@ pub fn process_crosslinks(
|
|||||||
/// Finish up an epoch update.
|
/// Finish up an epoch update.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn finish_epoch_update(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
pub fn finish_epoch_update<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
let current_epoch = state.current_epoch(spec);
|
let current_epoch = state.current_epoch(spec);
|
||||||
let next_epoch = state.next_epoch(spec);
|
let next_epoch = state.next_epoch(spec);
|
||||||
|
|
||||||
@ -241,11 +247,7 @@ pub fn finish_epoch_update(state: &mut BeaconState, spec: &ChainSpec) -> Result<
|
|||||||
state.set_active_index_root(next_epoch, active_index_root, spec)?;
|
state.set_active_index_root(next_epoch, active_index_root, spec)?;
|
||||||
|
|
||||||
// Set total slashed balances
|
// Set total slashed balances
|
||||||
state.set_slashed_balance(
|
state.set_slashed_balance(next_epoch, state.get_slashed_balance(current_epoch)?)?;
|
||||||
next_epoch,
|
|
||||||
state.get_slashed_balance(current_epoch, spec)?,
|
|
||||||
spec,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Set randao mix
|
// Set randao mix
|
||||||
state.set_randao_mix(
|
state.set_randao_mix(
|
||||||
@ -257,8 +259,8 @@ pub fn finish_epoch_update(state: &mut BeaconState, spec: &ChainSpec) -> Result<
|
|||||||
state.slot -= 1;
|
state.slot -= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if next_epoch.as_u64() % (spec.slots_per_historical_root as u64 / spec.slots_per_epoch) == 0 {
|
if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / spec.slots_per_epoch) == 0 {
|
||||||
let historical_batch: HistoricalBatch = state.historical_batch();
|
let historical_batch = state.historical_batch();
|
||||||
state
|
state
|
||||||
.historical_roots
|
.historical_roots
|
||||||
.push(Hash256::from_slice(&historical_batch.tree_hash_root()[..]));
|
.push(Hash256::from_slice(&historical_batch.tree_hash_root()[..]));
|
||||||
|
@ -33,8 +33,8 @@ impl std::ops::AddAssign for Delta {
|
|||||||
/// Apply attester and proposer rewards.
|
/// Apply attester and proposer rewards.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn apply_rewards(
|
pub fn apply_rewards<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
validator_statuses: &mut ValidatorStatuses,
|
validator_statuses: &mut ValidatorStatuses,
|
||||||
winning_root_for_shards: &WinningRootHashSet,
|
winning_root_for_shards: &WinningRootHashSet,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -80,9 +80,9 @@ pub fn apply_rewards(
|
|||||||
/// attestation in the previous epoch.
|
/// attestation in the previous epoch.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn get_proposer_deltas(
|
fn get_proposer_deltas<T: EthSpec>(
|
||||||
deltas: &mut Vec<Delta>,
|
deltas: &mut Vec<Delta>,
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
validator_statuses: &mut ValidatorStatuses,
|
validator_statuses: &mut ValidatorStatuses,
|
||||||
winning_root_for_shards: &WinningRootHashSet,
|
winning_root_for_shards: &WinningRootHashSet,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -121,9 +121,9 @@ fn get_proposer_deltas(
|
|||||||
/// Apply rewards for participation in attestations during the previous epoch.
|
/// Apply rewards for participation in attestations during the previous epoch.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn get_justification_and_finalization_deltas(
|
fn get_justification_and_finalization_deltas<T: EthSpec>(
|
||||||
deltas: &mut Vec<Delta>,
|
deltas: &mut Vec<Delta>,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
validator_statuses: &ValidatorStatuses,
|
validator_statuses: &ValidatorStatuses,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -262,9 +262,9 @@ fn compute_inactivity_leak_delta(
|
|||||||
/// Calculate the deltas based upon the winning roots for attestations during the previous epoch.
|
/// Calculate the deltas based upon the winning roots for attestations during the previous epoch.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn get_crosslink_deltas(
|
fn get_crosslink_deltas<T: EthSpec>(
|
||||||
deltas: &mut Vec<Delta>,
|
deltas: &mut Vec<Delta>,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
validator_statuses: &ValidatorStatuses,
|
validator_statuses: &ValidatorStatuses,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -296,8 +296,8 @@ fn get_crosslink_deltas(
|
|||||||
/// Returns the base reward for some validator.
|
/// Returns the base reward for some validator.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn get_base_reward(
|
fn get_base_reward<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
index: usize,
|
index: usize,
|
||||||
previous_total_balance: u64,
|
previous_total_balance: u64,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -313,8 +313,8 @@ fn get_base_reward(
|
|||||||
/// Returns the inactivity penalty for some validator.
|
/// Returns the inactivity penalty for some validator.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn get_inactivity_penalty(
|
fn get_inactivity_penalty<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
index: usize,
|
index: usize,
|
||||||
epochs_since_finality: u64,
|
epochs_since_finality: u64,
|
||||||
previous_total_balance: u64,
|
previous_total_balance: u64,
|
||||||
@ -329,6 +329,6 @@ fn get_inactivity_penalty(
|
|||||||
/// Returns the epochs since the last finalized epoch.
|
/// Returns the epochs since the last finalized epoch.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn epochs_since_finality(state: &BeaconState, spec: &ChainSpec) -> Epoch {
|
fn epochs_since_finality<T: EthSpec>(state: &BeaconState<T>, spec: &ChainSpec) -> Epoch {
|
||||||
state.current_epoch(spec) + 1 - state.finalized_epoch
|
state.current_epoch(spec) + 1 - state.finalized_epoch
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,8 @@ use types::*;
|
|||||||
/// Returns validator indices which participated in the attestation.
|
/// Returns validator indices which participated in the attestation.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn get_attestation_participants(
|
pub fn get_attestation_participants<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestation_data: &AttestationData,
|
attestation_data: &AttestationData,
|
||||||
bitfield: &Bitfield,
|
bitfield: &Bitfield,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -6,8 +6,8 @@ use types::*;
|
|||||||
/// slot.
|
/// slot.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn inclusion_distance(
|
pub fn inclusion_distance<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestations: &[&PendingAttestation],
|
attestations: &[&PendingAttestation],
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -19,8 +19,8 @@ pub fn inclusion_distance(
|
|||||||
/// Returns the slot of the earliest included attestation for some validator.
|
/// Returns the slot of the earliest included attestation for some validator.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn inclusion_slot(
|
pub fn inclusion_slot<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestations: &[&PendingAttestation],
|
attestations: &[&PendingAttestation],
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -32,8 +32,8 @@ pub fn inclusion_slot(
|
|||||||
/// Finds the earliest included attestation for some validator.
|
/// Finds the earliest included attestation for some validator.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn earliest_included_attestation(
|
fn earliest_included_attestation<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
attestations: &[&PendingAttestation],
|
attestations: &[&PendingAttestation],
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -5,7 +5,10 @@ use types::{BeaconStateError as Error, *};
|
|||||||
/// ``EJECTION_BALANCE``.
|
/// ``EJECTION_BALANCE``.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_ejections(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
pub fn process_ejections<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
// There is an awkward double (triple?) loop here because we can't loop across the borrowed
|
// There is an awkward double (triple?) loop here because we can't loop across the borrowed
|
||||||
// active validator indices and mutate state in the one loop.
|
// active validator indices and mutate state in the one loop.
|
||||||
let exitable: Vec<usize> = state
|
let exitable: Vec<usize> = state
|
||||||
|
@ -3,7 +3,7 @@ use types::*;
|
|||||||
/// Process the exit queue.
|
/// Process the exit queue.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_exit_queue(state: &mut BeaconState, spec: &ChainSpec) {
|
pub fn process_exit_queue<T: EthSpec>(state: &mut BeaconState<T>, spec: &ChainSpec) {
|
||||||
let current_epoch = state.current_epoch(spec);
|
let current_epoch = state.current_epoch(spec);
|
||||||
|
|
||||||
let eligible = |index: usize| {
|
let eligible = |index: usize| {
|
||||||
@ -32,8 +32,8 @@ pub fn process_exit_queue(state: &mut BeaconState, spec: &ChainSpec) {
|
|||||||
/// Initiate an exit for the validator of the given `index`.
|
/// Initiate an exit for the validator of the given `index`.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn prepare_validator_for_withdrawal(
|
fn prepare_validator_for_withdrawal<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) {
|
) {
|
||||||
|
@ -3,20 +3,20 @@ use types::{BeaconStateError as Error, *};
|
|||||||
/// Process slashings.
|
/// Process slashings.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_slashings(
|
pub fn process_slashings<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
current_total_balance: u64,
|
current_total_balance: u64,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let current_epoch = state.current_epoch(spec);
|
let current_epoch = state.current_epoch(spec);
|
||||||
|
|
||||||
let total_at_start = state.get_slashed_balance(current_epoch + 1, spec)?;
|
let total_at_start = state.get_slashed_balance(current_epoch + 1)?;
|
||||||
let total_at_end = state.get_slashed_balance(current_epoch, spec)?;
|
let total_at_end = state.get_slashed_balance(current_epoch)?;
|
||||||
let total_penalities = total_at_end - total_at_start;
|
let total_penalities = total_at_end - total_at_start;
|
||||||
|
|
||||||
for (index, validator) in state.validator_registry.iter().enumerate() {
|
for (index, validator) in state.validator_registry.iter().enumerate() {
|
||||||
let should_penalize = current_epoch.as_usize()
|
let should_penalize = current_epoch.as_usize()
|
||||||
== validator.withdrawable_epoch.as_usize() - spec.latest_slashed_exit_length / 2;
|
== validator.withdrawable_epoch.as_usize() - T::LatestSlashedExitLength::to_usize() / 2;
|
||||||
|
|
||||||
if validator.slashed && should_penalize {
|
if validator.slashed && should_penalize {
|
||||||
let effective_balance = state.get_effective_balance(index, spec)?;
|
let effective_balance = state.get_effective_balance(index, spec)?;
|
||||||
|
@ -8,9 +8,10 @@ use types::*;
|
|||||||
fn runs_without_error() {
|
fn runs_without_error() {
|
||||||
Builder::from_env(Env::default().default_filter_or("error")).init();
|
Builder::from_env(Env::default().default_filter_or("error")).init();
|
||||||
|
|
||||||
let spec = ChainSpec::few_validators();
|
let spec = FewValidatorsEthSpec::spec();
|
||||||
|
|
||||||
let mut builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec);
|
let mut builder: TestingBeaconStateBuilder<FewValidatorsEthSpec> =
|
||||||
|
TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec);
|
||||||
|
|
||||||
let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch);
|
let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch);
|
||||||
builder.teleport_to_slot(target_slot, &spec);
|
builder.teleport_to_slot(target_slot, &spec);
|
||||||
|
@ -5,8 +5,8 @@ use types::*;
|
|||||||
/// Peforms a validator registry update, if required.
|
/// Peforms a validator registry update, if required.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn update_registry_and_shuffling_data(
|
pub fn update_registry_and_shuffling_data<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
current_total_balance: u64,
|
current_total_balance: u64,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -50,8 +50,8 @@ pub fn update_registry_and_shuffling_data(
|
|||||||
/// Returns `true` if the validator registry should be updated during an epoch processing.
|
/// Returns `true` if the validator registry should be updated during an epoch processing.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn should_update_validator_registry(
|
pub fn should_update_validator_registry<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<bool, BeaconStateError> {
|
) -> Result<bool, BeaconStateError> {
|
||||||
if state.finalized_epoch <= state.validator_registry_update_epoch {
|
if state.finalized_epoch <= state.validator_registry_update_epoch {
|
||||||
@ -79,8 +79,8 @@ pub fn should_update_validator_registry(
|
|||||||
/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized.
|
/// Note: Utilizes the cache and will fail if the appropriate cache is not initialized.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn update_validator_registry(
|
pub fn update_validator_registry<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
current_total_balance: u64,
|
current_total_balance: u64,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -134,8 +134,8 @@ pub fn update_validator_registry(
|
|||||||
/// Activate the validator of the given ``index``.
|
/// Activate the validator of the given ``index``.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn activate_validator(
|
pub fn activate_validator<T: EthSpec>(
|
||||||
state: &mut BeaconState,
|
state: &mut BeaconState<T>,
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
is_genesis: bool,
|
is_genesis: bool,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -161,7 +161,10 @@ impl ValidatorStatuses {
|
|||||||
/// - Total balances for the current and previous epochs.
|
/// - Total balances for the current and previous epochs.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result<Self, BeaconStateError> {
|
pub fn new<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Self, BeaconStateError> {
|
||||||
let mut statuses = Vec::with_capacity(state.validator_registry.len());
|
let mut statuses = Vec::with_capacity(state.validator_registry.len());
|
||||||
let mut total_balances = TotalBalances::default();
|
let mut total_balances = TotalBalances::default();
|
||||||
|
|
||||||
@ -196,9 +199,9 @@ impl ValidatorStatuses {
|
|||||||
/// `total_balances` fields.
|
/// `total_balances` fields.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_attestations(
|
pub fn process_attestations<T: EthSpec>(
|
||||||
&mut self,
|
&mut self,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BeaconStateError> {
|
) -> Result<(), BeaconStateError> {
|
||||||
for a in state
|
for a in state
|
||||||
@ -243,7 +246,7 @@ impl ValidatorStatuses {
|
|||||||
status.is_previous_epoch_boundary_attester = true;
|
status.is_previous_epoch_boundary_attester = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if has_common_beacon_block_root(a, state, spec)? {
|
if has_common_beacon_block_root(a, state)? {
|
||||||
self.total_balances.previous_epoch_head_attesters += attesting_balance;
|
self.total_balances.previous_epoch_head_attesters += attesting_balance;
|
||||||
status.is_previous_epoch_head_attester = true;
|
status.is_previous_epoch_head_attester = true;
|
||||||
}
|
}
|
||||||
@ -262,9 +265,9 @@ impl ValidatorStatuses {
|
|||||||
/// "winning" shard block root for the previous epoch.
|
/// "winning" shard block root for the previous epoch.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn process_winning_roots(
|
pub fn process_winning_roots<T: EthSpec>(
|
||||||
&mut self,
|
&mut self,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
winning_roots: &WinningRootHashSet,
|
winning_roots: &WinningRootHashSet,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BeaconStateError> {
|
) -> Result<(), BeaconStateError> {
|
||||||
@ -313,14 +316,14 @@ fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool
|
|||||||
/// the first slot of the given epoch.
|
/// the first slot of the given epoch.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn has_common_epoch_boundary_root(
|
fn has_common_epoch_boundary_root<T: EthSpec>(
|
||||||
a: &PendingAttestation,
|
a: &PendingAttestation,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<bool, BeaconStateError> {
|
) -> Result<bool, BeaconStateError> {
|
||||||
let slot = epoch.start_slot(spec.slots_per_epoch);
|
let slot = epoch.start_slot(spec.slots_per_epoch);
|
||||||
let state_boundary_root = *state.get_block_root(slot, spec)?;
|
let state_boundary_root = *state.get_block_root(slot)?;
|
||||||
|
|
||||||
Ok(a.data.target_root == state_boundary_root)
|
Ok(a.data.target_root == state_boundary_root)
|
||||||
}
|
}
|
||||||
@ -329,12 +332,11 @@ fn has_common_epoch_boundary_root(
|
|||||||
/// the current slot of the `PendingAttestation`.
|
/// the current slot of the `PendingAttestation`.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn has_common_beacon_block_root(
|
fn has_common_beacon_block_root<T: EthSpec>(
|
||||||
a: &PendingAttestation,
|
a: &PendingAttestation,
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
|
||||||
) -> Result<bool, BeaconStateError> {
|
) -> Result<bool, BeaconStateError> {
|
||||||
let state_block_root = *state.get_block_root(a.data.slot, spec)?;
|
let state_block_root = *state.get_block_root(a.data.slot)?;
|
||||||
|
|
||||||
Ok(a.data.beacon_block_root == state_block_root)
|
Ok(a.data.beacon_block_root == state_block_root)
|
||||||
}
|
}
|
||||||
|
@ -35,8 +35,8 @@ impl WinningRoot {
|
|||||||
/// per-epoch processing.
|
/// per-epoch processing.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn winning_root(
|
pub fn winning_root<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
shard: u64,
|
shard: u64,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<Option<WinningRoot>, BeaconStateError> {
|
) -> Result<Option<WinningRoot>, BeaconStateError> {
|
||||||
@ -90,7 +90,11 @@ pub fn winning_root(
|
|||||||
/// Returns `true` if pending attestation `a` is eligible to become a winning root.
|
/// Returns `true` if pending attestation `a` is eligible to become a winning root.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn is_eligible_for_winning_root(state: &BeaconState, a: &PendingAttestation, shard: Shard) -> bool {
|
fn is_eligible_for_winning_root<T: EthSpec>(
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
a: &PendingAttestation,
|
||||||
|
shard: Shard,
|
||||||
|
) -> bool {
|
||||||
if shard >= state.latest_crosslinks.len() as u64 {
|
if shard >= state.latest_crosslinks.len() as u64 {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -101,8 +105,8 @@ fn is_eligible_for_winning_root(state: &BeaconState, a: &PendingAttestation, sha
|
|||||||
/// Returns all indices which voted for a given crosslink. Does not contain duplicates.
|
/// Returns all indices which voted for a given crosslink. Does not contain duplicates.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
fn get_attesting_validator_indices(
|
fn get_attesting_validator_indices<T: EthSpec>(
|
||||||
state: &BeaconState,
|
state: &BeaconState<T>,
|
||||||
shard: u64,
|
shard: u64,
|
||||||
crosslink_data_root: &Hash256,
|
crosslink_data_root: &Hash256,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -11,7 +11,10 @@ pub enum Error {
|
|||||||
/// Advances a state forward by one slot, performing per-epoch processing if required.
|
/// Advances a state forward by one slot, performing per-epoch processing if required.
|
||||||
///
|
///
|
||||||
/// Spec v0.5.1
|
/// Spec v0.5.1
|
||||||
pub fn per_slot_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
pub fn per_slot_processing<T: EthSpec>(
|
||||||
|
state: &mut BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
cache_state(state, spec)?;
|
cache_state(state, spec)?;
|
||||||
|
|
||||||
if (state.slot + 1) % spec.slots_per_epoch == 0 {
|
if (state.slot + 1) % spec.slots_per_epoch == 0 {
|
||||||
@ -23,7 +26,7 @@ pub fn per_slot_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
fn cache_state<T: EthSpec>(state: &mut BeaconState<T>, spec: &ChainSpec) -> Result<(), Error> {
|
||||||
let previous_slot_state_root = state.update_tree_hash_cache()?;
|
let previous_slot_state_root = state.update_tree_hash_cache()?;
|
||||||
|
|
||||||
// Note: increment the state slot here to allow use of our `state_root` and `block_root`
|
// Note: increment the state slot here to allow use of our `state_root` and `block_root`
|
||||||
@ -39,10 +42,10 @@ fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Store the previous slot's post state transition root.
|
// Store the previous slot's post state transition root.
|
||||||
state.set_state_root(previous_slot, previous_slot_state_root, spec)?;
|
state.set_state_root(previous_slot, previous_slot_state_root)?;
|
||||||
|
|
||||||
let latest_block_root = Hash256::from_slice(&state.latest_block_header.signed_root()[..]);
|
let latest_block_root = Hash256::from_slice(&state.latest_block_header.signed_root()[..]);
|
||||||
state.set_block_root(previous_slot, latest_block_root, spec)?;
|
state.set_block_root(previous_slot, latest_block_root)?;
|
||||||
|
|
||||||
// Set the state slot back to what it should be.
|
// Set the state slot back to what it should be.
|
||||||
state.slot -= 1;
|
state.slot -= 1;
|
||||||
|
@ -1,153 +0,0 @@
|
|||||||
#![cfg(not(debug_assertions))]
|
|
||||||
|
|
||||||
use serde_derive::Deserialize;
|
|
||||||
use serde_yaml;
|
|
||||||
use state_processing::{per_block_processing, per_slot_processing};
|
|
||||||
use std::{fs::File, io::prelude::*, path::PathBuf};
|
|
||||||
use types::*;
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
pub struct ExpectedState {
|
|
||||||
pub slot: Option<Slot>,
|
|
||||||
pub genesis_time: Option<u64>,
|
|
||||||
pub fork: Option<Fork>,
|
|
||||||
pub validator_registry: Option<Vec<Validator>>,
|
|
||||||
pub validator_balances: Option<Vec<u64>>,
|
|
||||||
pub previous_epoch_attestations: Option<Vec<PendingAttestation>>,
|
|
||||||
pub current_epoch_attestations: Option<Vec<PendingAttestation>>,
|
|
||||||
pub historical_roots: Option<Vec<Hash256>>,
|
|
||||||
pub finalized_epoch: Option<Epoch>,
|
|
||||||
pub latest_block_roots: Option<TreeHashVector<Hash256>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExpectedState {
|
|
||||||
// Return a list of fields that differ, and a string representation of the beacon state's field.
|
|
||||||
fn check(&self, state: &BeaconState) -> Vec<(&str, String)> {
|
|
||||||
// Check field equality
|
|
||||||
macro_rules! cfe {
|
|
||||||
($field_name:ident) => {
|
|
||||||
if self.$field_name.as_ref().map_or(true, |$field_name| {
|
|
||||||
println!(" > Checking {}", stringify!($field_name));
|
|
||||||
$field_name == &state.$field_name
|
|
||||||
}) {
|
|
||||||
vec![]
|
|
||||||
} else {
|
|
||||||
vec![(stringify!($field_name), format!("{:#?}", state.$field_name))]
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
vec![
|
|
||||||
cfe!(slot),
|
|
||||||
cfe!(genesis_time),
|
|
||||||
cfe!(fork),
|
|
||||||
cfe!(validator_registry),
|
|
||||||
cfe!(validator_balances),
|
|
||||||
cfe!(previous_epoch_attestations),
|
|
||||||
cfe!(current_epoch_attestations),
|
|
||||||
cfe!(historical_roots),
|
|
||||||
cfe!(finalized_epoch),
|
|
||||||
cfe!(latest_block_roots),
|
|
||||||
]
|
|
||||||
.into_iter()
|
|
||||||
.flat_map(|x| x)
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
pub struct TestCase {
|
|
||||||
pub name: String,
|
|
||||||
pub config: ChainSpec,
|
|
||||||
pub verify_signatures: bool,
|
|
||||||
pub initial_state: BeaconState,
|
|
||||||
pub blocks: Vec<BeaconBlock>,
|
|
||||||
pub expected_state: ExpectedState,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
pub struct TestDoc {
|
|
||||||
pub title: String,
|
|
||||||
pub summary: String,
|
|
||||||
pub fork: String,
|
|
||||||
pub test_cases: Vec<TestCase>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_test_case(test_name: &str) -> TestDoc {
|
|
||||||
let mut file = {
|
|
||||||
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
|
||||||
file_path_buf.push(format!("yaml_utils/specs/{}", test_name));
|
|
||||||
|
|
||||||
File::open(file_path_buf).unwrap()
|
|
||||||
};
|
|
||||||
let mut yaml_str = String::new();
|
|
||||||
file.read_to_string(&mut yaml_str).unwrap();
|
|
||||||
yaml_str = yaml_str.to_lowercase();
|
|
||||||
|
|
||||||
serde_yaml::from_str(&yaml_str.as_str()).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_state_transition_test(test_name: &str) {
|
|
||||||
let doc = load_test_case(test_name);
|
|
||||||
|
|
||||||
// Run Tests
|
|
||||||
let mut ok = true;
|
|
||||||
for (i, test_case) in doc.test_cases.iter().enumerate() {
|
|
||||||
let fake_crypto = cfg!(feature = "fake_crypto");
|
|
||||||
if !test_case.verify_signatures == fake_crypto {
|
|
||||||
println!("Running {}", test_case.name);
|
|
||||||
} else {
|
|
||||||
println!(
|
|
||||||
"Skipping {} (fake_crypto: {}, need fake: {})",
|
|
||||||
test_case.name, fake_crypto, !test_case.verify_signatures
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let mut state = test_case.initial_state.clone();
|
|
||||||
for (j, block) in test_case.blocks.iter().enumerate() {
|
|
||||||
while block.slot > state.slot {
|
|
||||||
per_slot_processing(&mut state, &test_case.config).unwrap();
|
|
||||||
}
|
|
||||||
let res = per_block_processing(&mut state, &block, &test_case.config);
|
|
||||||
if res.is_err() {
|
|
||||||
println!("Error in {} (#{}), on block {}", test_case.name, i, j);
|
|
||||||
println!("{:?}", res);
|
|
||||||
ok = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mismatched_fields = test_case.expected_state.check(&state);
|
|
||||||
if !mismatched_fields.is_empty() {
|
|
||||||
println!(
|
|
||||||
"Error in expected state, these fields didn't match: {:?}",
|
|
||||||
mismatched_fields.iter().map(|(f, _)| f).collect::<Vec<_>>()
|
|
||||||
);
|
|
||||||
for (field_name, state_val) in mismatched_fields {
|
|
||||||
println!("state.{} was: {}", field_name, state_val);
|
|
||||||
}
|
|
||||||
ok = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(ok, "one or more tests failed, see above");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
fn test_read_yaml() {
|
|
||||||
load_test_case("sanity-check_small-config_32-vals.yaml");
|
|
||||||
load_test_case("sanity-check_default-config_100-vals.yaml");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[cfg(not(debug_assertions))]
|
|
||||||
fn run_state_transition_tests_small() {
|
|
||||||
run_state_transition_test("sanity-check_small-config_32-vals.yaml");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run with --ignored to run this test
|
|
||||||
#[test]
|
|
||||||
#[ignore]
|
|
||||||
fn run_state_transition_tests_large() {
|
|
||||||
run_state_transition_test("sanity-check_default-config_100-vals.yaml");
|
|
||||||
}
|
|
@ -1,14 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "yaml-utils"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors = ["Kirk Baird <baird.k@outlook.com>"]
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
reqwest = "0.9"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
name = "yaml_utils"
|
|
||||||
path = "src/lib.rs"
|
|
@ -1,27 +0,0 @@
|
|||||||
extern crate reqwest;
|
|
||||||
|
|
||||||
use std::fs::File;
|
|
||||||
use std::io::copy;
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
// These test files are not to be stored in the lighthouse repo as they are quite large (32MB).
|
|
||||||
// They will be downloaded at build time by yaml-utils crate (in build.rs)
|
|
||||||
let git_path = "https://raw.githubusercontent.com/ethereum/eth2.0-tests/master/state/";
|
|
||||||
let test_names = vec![
|
|
||||||
"sanity-check_default-config_100-vals.yaml",
|
|
||||||
"sanity-check_small-config_32-vals.yaml",
|
|
||||||
];
|
|
||||||
|
|
||||||
for test in test_names {
|
|
||||||
let mut target = String::from(git_path);
|
|
||||||
target.push_str(test);
|
|
||||||
let mut response = reqwest::get(target.as_str()).unwrap();
|
|
||||||
|
|
||||||
let mut dest = {
|
|
||||||
let mut file_name = String::from("specs/");
|
|
||||||
file_name.push_str(test);
|
|
||||||
File::create(file_name).unwrap()
|
|
||||||
};
|
|
||||||
copy(&mut response, &mut dest).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,15 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Script to extract all the fields of the state mentioned in `expected_state` fields of tests
|
|
||||||
# in the `spec` directory. These fields can then be added to the `ExpectedState` struct.
|
|
||||||
# Might take a while to run.
|
|
||||||
|
|
||||||
import os, yaml
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
yaml_files = (filename for filename in os.listdir("specs") if filename.endswith(".yaml"))
|
|
||||||
parsed_yaml = (yaml.load(open("specs/" + filename, "r")) for filename in yaml_files)
|
|
||||||
all_fields = set()
|
|
||||||
for y in parsed_yaml:
|
|
||||||
all_fields.update(*({key for key in case["expected_state"]} for case in y["test_cases"]))
|
|
||||||
print(all_fields)
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user