From 9a964be58baeb9510183181736533f511eb239c5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 10:50:12 +1100 Subject: [PATCH 01/56] Update test_harness clap args structure Prepares it for adding a new subcommand --- .../beacon_chain/test_harness/src/bin.rs | 55 ++++++------------- .../beacon_chain/test_harness/src/run_test.rs | 36 ++++++++++++ 2 files changed, 52 insertions(+), 39 deletions(-) create mode 100644 beacon_node/beacon_chain/test_harness/src/run_test.rs diff --git a/beacon_node/beacon_chain/test_harness/src/bin.rs b/beacon_node/beacon_chain/test_harness/src/bin.rs index 283cb0dfa..3769788d8 100644 --- a/beacon_node/beacon_chain/test_harness/src/bin.rs +++ b/beacon_node/beacon_chain/test_harness/src/bin.rs @@ -1,10 +1,9 @@ -use clap::{App, Arg}; +use clap::{App, Arg, SubCommand}; use env_logger::{Builder, Env}; -use std::{fs::File, io::prelude::*}; -use test_case::TestCase; -use yaml_rust::YamlLoader; +use run_test::run_test; mod beacon_chain_harness; +mod run_test; mod test_case; mod validator_harness; @@ -15,13 +14,6 @@ fn main() { .version("0.0.1") .author("Sigma Prime ") .about("Runs `test_harness` using a YAML test_case.") - .arg( - Arg::with_name("yaml") - .long("yaml") - .value_name("FILE") - .help("YAML file test_case.") - .required(true), - ) .arg( Arg::with_name("log") .long("log-level") @@ -31,39 +23,24 @@ fn main() { .default_value("debug") .required(true), ) + .subcommand( + SubCommand::with_name("run_test") + .about("Executes a YAML test specification") + .arg( + Arg::with_name("yaml") + .long("yaml") + .value_name("FILE") + .help("YAML file test_case.") + .required(true), + ), + ) .get_matches(); if let Some(log_level) = matches.value_of("log") { Builder::from_env(Env::default().default_filter_or(log_level)).init(); } - if let Some(yaml_file) = matches.value_of("yaml") { - let docs = { - let mut file = File::open(yaml_file).unwrap(); - - let mut yaml_str = String::new(); - file.read_to_string(&mut yaml_str).unwrap(); - - YamlLoader::load_from_str(&yaml_str).unwrap() - }; - - for doc in &docs { - // For each `test_cases` YAML in the document, build a `TestCase`, execute it and - // assert that the execution result matches the test_case description. - // - // In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis - // and a new `BeaconChain` is built as per the test_case. - // - // After the `BeaconChain` has been built out as per the test_case, a dump of all blocks - // and states in the chain is obtained and checked against the `results` specified in - // the `test_case`. - // - // If any of the expectations in the results are not met, the process - // panics with a message. - for test_case in doc["test_cases"].as_vec().unwrap() { - let test_case = TestCase::from_yaml(test_case); - test_case.assert_result_valid(test_case.execute()) - } - } + if let Some(matches) = matches.subcommand_matches("run_test") { + run_test(matches); } } diff --git a/beacon_node/beacon_chain/test_harness/src/run_test.rs b/beacon_node/beacon_chain/test_harness/src/run_test.rs new file mode 100644 index 000000000..1a816afe0 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/run_test.rs @@ -0,0 +1,36 @@ +use crate::test_case::TestCase; +use clap::ArgMatches; +use std::{fs::File, io::prelude::*}; +use yaml_rust::YamlLoader; + +pub fn run_test(matches: &ArgMatches) { + if let Some(yaml_file) = matches.value_of("yaml") { + let docs = { + let mut file = File::open(yaml_file).unwrap(); + + let mut yaml_str = String::new(); + file.read_to_string(&mut yaml_str).unwrap(); + + YamlLoader::load_from_str(&yaml_str).unwrap() + }; + + for doc in &docs { + // For each `test_cases` YAML in the document, build a `TestCase`, execute it and + // assert that the execution result matches the test_case description. + // + // In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis + // and a new `BeaconChain` is built as per the test_case. + // + // After the `BeaconChain` has been built out as per the test_case, a dump of all blocks + // and states in the chain is obtained and checked against the `results` specified in + // the `test_case`. + // + // If any of the expectations in the results are not met, the process + // panics with a message. + for test_case in doc["test_cases"].as_vec().unwrap() { + let test_case = TestCase::from_yaml(test_case); + test_case.assert_result_valid(test_case.execute()) + } + } + } +} From b98f514d6812161ff2f794e20c3f4b19ea84462f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 10:50:43 +1100 Subject: [PATCH 02/56] Break BeaconChainHarness validator gen into fn Prepares for allowing for loading from file --- .../test_harness/src/beacon_chain_harness.rs | 40 ++------------ .../beacon_chain_harness/generate_deposits.rs | 54 +++++++++++++++++++ 2 files changed, 59 insertions(+), 35 deletions(-) create mode 100644 beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index f220619ce..95899e23b 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -1,12 +1,12 @@ use super::ValidatorHarness; use beacon_chain::{BeaconChain, BlockProcessingOutcome}; pub use beacon_chain::{BeaconChainError, CheckPoint}; -use bls::{create_proof_of_possession, get_withdrawal_credentials}; use db::{ stores::{BeaconBlockStore, BeaconStateStore}, MemoryDB, }; use fork_choice::BitwiseLMDGhost; +use generate_deposits::generate_deposits_with_random_keypairs; use log::debug; use rayon::prelude::*; use slot_clock::TestingSlotClock; @@ -15,6 +15,8 @@ use std::iter::FromIterator; use std::sync::Arc; use types::*; +mod generate_deposits; + /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// to it. Each validator is provided a borrow to the beacon chain, where it may read /// information and submit blocks/attestations for processing. @@ -47,40 +49,8 @@ impl BeaconChainHarness { block_hash: Hash256::zero(), }; - debug!("Generating validator keypairs..."); - - let keypairs: Vec = (0..validator_count) - .collect::>() - .par_iter() - .map(|_| Keypair::random()) - .collect(); - - debug!("Creating validator deposits..."); - - let initial_validator_deposits = keypairs - .par_iter() - .map(|keypair| Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - // Validator can withdraw using their main keypair. - withdrawal_credentials: Hash256::from_slice( - &get_withdrawal_credentials( - &keypair.pk, - spec.bls_withdrawal_prefix_byte, - )[..], - ), - proof_of_possession: create_proof_of_possession(&keypair), - }, - }, - }) - .collect(); - - debug!("Creating the BeaconChain..."); + let (keypairs, initial_validator_deposits) = + generate_deposits_with_random_keypairs(validator_count, genesis_time, &spec); // Create the Beacon Chain let beacon_chain = Arc::new( diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs new file mode 100644 index 000000000..39924fb67 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs @@ -0,0 +1,54 @@ +use bls::{create_proof_of_possession, get_withdrawal_credentials}; +use log::debug; +use rayon::prelude::*; +use types::*; + +/// Generates `validator_count` deposits using randomly generated keypairs and some default specs +/// for the deposits. +pub fn generate_deposits_with_random_keypairs( + validator_count: usize, + genesis_time: u64, + spec: &ChainSpec, +) -> (Vec, Vec) { + debug!( + "Generating {} random validator keypairs...", + validator_count + ); + + let keypairs: Vec = (0..validator_count) + .collect::>() + .par_iter() + .map(|_| Keypair::random()) + .collect(); + + debug!( + "Generating {} validator deposits from random keypairs...", + validator_count + ); + + let initial_validator_deposits = + keypairs + .par_iter() + .map(|keypair| Deposit { + branch: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + // Validator can withdraw using their main keypair. + withdrawal_credentials: Hash256::from_slice( + &get_withdrawal_credentials( + &keypair.pk, + spec.bls_withdrawal_prefix_byte, + )[..], + ), + proof_of_possession: create_proof_of_possession(&keypair), + }, + }, + }) + .collect(); + + (keypairs, initial_validator_deposits) +} From 3b6431b4b457c00c1b92b50bd32b54834ad47996 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 13:15:41 +1100 Subject: [PATCH 03/56] Impl serde ser and deser for bls keypairs --- eth2/utils/bls/Cargo.toml | 1 + eth2/utils/bls/src/keypair.rs | 3 ++- eth2/utils/bls/src/lib.rs | 1 + eth2/utils/bls/src/public_key.rs | 16 +++++++++++++++- eth2/utils/bls/src/secret_key.rs | 27 ++++++++++++++++++++++++++- eth2/utils/bls/src/signature.rs | 17 ++++++++++++++++- 6 files changed, 61 insertions(+), 4 deletions(-) diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 7a436307b..5ac38595a 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -9,4 +9,5 @@ bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.5 hashing = { path = "../hashing" } hex = "0.3" serde = "1.0" +serde_derive = "1.0" ssz = { path = "../ssz" } diff --git a/eth2/utils/bls/src/keypair.rs b/eth2/utils/bls/src/keypair.rs index 1cce9c10e..d60a2fc25 100644 --- a/eth2/utils/bls/src/keypair.rs +++ b/eth2/utils/bls/src/keypair.rs @@ -1,6 +1,7 @@ use super::{PublicKey, SecretKey}; +use serde_derive::{Deserialize, Serialize}; -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct Keypair { pub sk: SecretKey, pub pk: PublicKey, diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index bb109b0a1..8b3f8b2ba 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -6,6 +6,7 @@ mod aggregate_signature; mod keypair; mod public_key; mod secret_key; +mod serde_vistors; mod signature; pub use crate::aggregate_public_key::AggregatePublicKey; diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index c7fd526a0..3ab2b60bb 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -1,6 +1,8 @@ +use super::serde_vistors::HexVisitor; use super::SecretKey; use bls_aggregates::PublicKey as RawPublicKey; use hex::encode as hex_encode; +use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{ decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, @@ -61,7 +63,19 @@ impl Serialize for PublicKey { where S: Serializer, { - serializer.serialize_bytes(&ssz_encode(self)) + serializer.serialize_str(&hex_encode(ssz_encode(self))) + } +} + +impl<'de> Deserialize<'de> for PublicKey { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = deserializer.deserialize_str(HexVisitor)?; + let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) + .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; + Ok(pubkey) } } diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index f2d54f4ac..06c968389 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -1,5 +1,9 @@ +use super::serde_vistors::HexVisitor; use bls_aggregates::{DecodeError as BlsDecodeError, SecretKey as RawSecretKey}; -use ssz::{decode_ssz_list, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use hex::encode as hex_encode; +use serde::de::{Deserialize, Deserializer}; +use serde::ser::{Serialize, Serializer}; +use ssz::{decode_ssz_list, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; /// A single BLS signature. /// @@ -40,6 +44,27 @@ impl Decodable for SecretKey { } } +impl Serialize for SecretKey { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&hex_encode(ssz_encode(self))) + } +} + +impl<'de> Deserialize<'de> for SecretKey { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = deserializer.deserialize_str(HexVisitor)?; + let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) + .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; + Ok(pubkey) + } +} + impl TreeHash for SecretKey { fn hash_tree_root_internal(&self) -> Vec { self.0.as_bytes().clone() diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index c0c31ef27..86c54cba7 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -1,5 +1,8 @@ +use super::serde_vistors::HexVisitor; use super::{PublicKey, SecretKey}; use bls_aggregates::Signature as RawSignature; +use hex::encode as hex_encode; +use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use ssz::{ decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, @@ -83,7 +86,19 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_bytes(&ssz_encode(self)) + serializer.serialize_str(&hex_encode(ssz_encode(self))) + } +} + +impl<'de> Deserialize<'de> for Signature { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let bytes = deserializer.deserialize_str(HexVisitor)?; + let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) + .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; + Ok(pubkey) } } From 7ddbdc15bb8f460c70bd148d12e538f5bbca4c1a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 13:16:18 +1100 Subject: [PATCH 04/56] Impl serde deser for types::Deposit --- eth2/types/src/deposit.rs | 4 ++-- eth2/types/src/deposit_data.rs | 4 ++-- eth2/types/src/deposit_input.rs | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 2e69ea599..91c6ef2ac 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -1,14 +1,14 @@ use super::{DepositData, Hash256}; use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// A deposit to potentially become a beacon chain validator. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct Deposit { pub branch: Vec, pub index: u64, diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 1eb2722a9..61b82f4b3 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -1,14 +1,14 @@ use super::DepositInput; use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// Data generated by the deposit contract. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct DepositData { pub amount: u64, pub timestamp: u64, diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index c4c79c3d1..32f57ab6e 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -2,14 +2,14 @@ use super::Hash256; use crate::test_utils::TestRandom; use bls::{PublicKey, Signature}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// The data supplied by the user to the deposit contract. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct DepositInput { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, From 2f484db82c40880d31fc1d42cbc78ac94c5f0cdb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 13:16:33 +1100 Subject: [PATCH 05/56] Expose `Signature` in `types` crate --- eth2/types/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 9bf60f2c9..76fcb43ed 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -72,4 +72,4 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; /// Maps a slot to a block proposer. pub type ProposerMap = HashMap; -pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; +pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature}; From ec9e0bbddfc1896eaa4d5e8466d5a5fb465e3044 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 13:18:02 +1100 Subject: [PATCH 06/56] Allow test_harness to load validators from file Also adds a command to test_harness binary to generate validators --- .../beacon_chain/test_harness/.gitignore | 1 + .../beacon_chain/test_harness/Cargo.toml | 2 + .../test_harness/src/beacon_chain_harness.rs | 21 +++++-- .../beacon_chain_harness/generate_deposits.rs | 18 ++++-- .../load_deposits_from_file.rs | 38 ++++++++++++ .../beacon_chain/test_harness/src/bin.rs | 58 +++++++++++++++++++ .../beacon_chain/test_harness/src/prepare.rs | 35 +++++++++++ .../beacon_chain/test_harness/src/run_test.rs | 7 ++- .../test_harness/src/test_case.rs | 5 +- 9 files changed, 174 insertions(+), 11 deletions(-) create mode 100644 beacon_node/beacon_chain/test_harness/.gitignore create mode 100644 beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs create mode 100644 beacon_node/beacon_chain/test_harness/src/prepare.rs diff --git a/beacon_node/beacon_chain/test_harness/.gitignore b/beacon_node/beacon_chain/test_harness/.gitignore new file mode 100644 index 000000000..5f605cba0 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/.gitignore @@ -0,0 +1 @@ +validators/ diff --git a/beacon_node/beacon_chain/test_harness/Cargo.toml b/beacon_node/beacon_chain/test_harness/Cargo.toml index bd7a58270..448934eb3 100644 --- a/beacon_node/beacon_chain/test_harness/Cargo.toml +++ b/beacon_node/beacon_chain/test_harness/Cargo.toml @@ -33,12 +33,14 @@ failure = "0.1" failure_derive = "0.1" fork_choice = { path = "../../../eth2/fork_choice" } hashing = { path = "../../../eth2/utils/hashing" } +int_to_bytes = { path = "../../../eth2/utils/int_to_bytes" } log = "0.4" env_logger = "0.6.0" rayon = "1.0" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" +serde_yaml = "0.8" slot_clock = { path = "../../../eth2/utils/slot_clock" } ssz = { path = "../../../eth2/utils/ssz" } types = { path = "../../../eth2/types" } diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 95899e23b..3eabfcb1f 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -6,16 +6,20 @@ use db::{ MemoryDB, }; use fork_choice::BitwiseLMDGhost; -use generate_deposits::generate_deposits_with_random_keypairs; use log::debug; use rayon::prelude::*; use slot_clock::TestingSlotClock; use std::collections::HashSet; use std::iter::FromIterator; +use std::path::Path; use std::sync::Arc; use types::*; mod generate_deposits; +mod load_deposits_from_file; + +pub use generate_deposits::generate_deposits_with_deterministic_keypairs; +pub use load_deposits_from_file::load_deposits_from_file; /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// to it. Each validator is provided a borrow to the beacon chain, where it may read @@ -37,7 +41,7 @@ impl BeaconChainHarness { /// /// - A keypair, `BlockProducer` and `Attester` for each validator. /// - A new BeaconChain struct where the given validators are in the genesis. - pub fn new(spec: ChainSpec, validator_count: usize) -> Self { + pub fn new(spec: ChainSpec, validator_count: usize, validators_dir: Option<&Path>) -> Self { let db = Arc::new(MemoryDB::open()); let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone())); @@ -49,8 +53,17 @@ impl BeaconChainHarness { block_hash: Hash256::zero(), }; - let (keypairs, initial_validator_deposits) = - generate_deposits_with_random_keypairs(validator_count, genesis_time, &spec); + let (keypairs, initial_validator_deposits) = if let Some(path) = validators_dir { + let keypairs_path = path.join("keypairs.yaml"); + let deposits_path = path.join("deposits.yaml"); + load_deposits_from_file( + validator_count, + &keypairs_path.as_path(), + &deposits_path.as_path(), + ) + } else { + generate_deposits_with_deterministic_keypairs(validator_count, genesis_time, &spec) + }; // Create the Beacon Chain let beacon_chain = Arc::new( diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs index 39924fb67..b07168df9 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs @@ -1,11 +1,16 @@ use bls::{create_proof_of_possession, get_withdrawal_credentials}; +use int_to_bytes::int_to_bytes48; use log::debug; use rayon::prelude::*; use types::*; -/// Generates `validator_count` deposits using randomly generated keypairs and some default specs -/// for the deposits. -pub fn generate_deposits_with_random_keypairs( +/// Generates `validator_count` deposits using keypairs where the secret key is the index of the +/// validator. +/// +/// For example, the first validator has a secret key of `int_to_bytes48(1)`, the second has +/// `int_to_bytes48(2)` and so on. (We skip `0` as it generates a weird looking public key and is +/// probably invalid). +pub fn generate_deposits_with_deterministic_keypairs( validator_count: usize, genesis_time: u64, spec: &ChainSpec, @@ -18,7 +23,12 @@ pub fn generate_deposits_with_random_keypairs( let keypairs: Vec = (0..validator_count) .collect::>() .par_iter() - .map(|_| Keypair::random()) + .map(|&i| { + let secret = int_to_bytes48(i as u64 + 1); + let sk = SecretKey::from_bytes(&secret).unwrap(); + let pk = PublicKey::from_secret_key(&sk); + Keypair { sk, pk } + }) .collect(); debug!( diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs new file mode 100644 index 000000000..9cba3d3c4 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs @@ -0,0 +1,38 @@ +use log::debug; +use serde_yaml; +use std::fs::File; +use std::path::Path; +use types::*; + +pub fn load_deposits_from_file( + validator_count: usize, + keypairs_path: &Path, + deposits_path: &Path, +) -> (Vec, Vec) { + debug!("Loading keypairs from file..."); + let keypairs_file = File::open(keypairs_path).unwrap(); + let mut keypairs: Vec = serde_yaml::from_reader(&keypairs_file).unwrap(); + + debug!("Loading deposits from file..."); + let deposits_file = File::open(deposits_path).unwrap(); + let mut deposits: Vec = serde_yaml::from_reader(&deposits_file).unwrap(); + + assert!( + keypairs.len() >= validator_count, + "Unable to load {} keypairs from file ({} available)", + validator_count, + keypairs.len() + ); + + assert!( + deposits.len() >= validator_count, + "Unable to load {} deposits from file ({} available)", + validator_count, + deposits.len() + ); + + keypairs.truncate(validator_count); + deposits.truncate(validator_count); + + (keypairs, deposits) +} diff --git a/beacon_node/beacon_chain/test_harness/src/bin.rs b/beacon_node/beacon_chain/test_harness/src/bin.rs index 3769788d8..0a02264a3 100644 --- a/beacon_node/beacon_chain/test_harness/src/bin.rs +++ b/beacon_node/beacon_chain/test_harness/src/bin.rs @@ -1,8 +1,11 @@ use clap::{App, Arg, SubCommand}; use env_logger::{Builder, Env}; +use prepare::prepare; use run_test::run_test; +use types::ChainSpec; mod beacon_chain_harness; +mod prepare; mod run_test; mod test_case; mod validator_harness; @@ -17,12 +20,22 @@ fn main() { .arg( Arg::with_name("log") .long("log-level") + .short("l") .value_name("LOG_LEVEL") .help("Logging level.") .possible_values(&["error", "warn", "info", "debug", "trace"]) .default_value("debug") .required(true), ) + .arg( + Arg::with_name("spec") + .long("spec") + .short("s") + .value_name("SPECIFICATION") + .help("ChainSpec instantiation.") + .possible_values(&["foundation", "few_validators"]) + .default_value("foundation"), + ) .subcommand( SubCommand::with_name("run_test") .about("Executes a YAML test specification") @@ -32,6 +45,41 @@ fn main() { .value_name("FILE") .help("YAML file test_case.") .required(true), + ) + .arg( + Arg::with_name("validators_dir") + .long("validators-dir") + .short("v") + .value_name("VALIDATORS_DIR") + .help("A directory with validator deposits and keypair YAML."), + ), + ) + .subcommand( + SubCommand::with_name("prepare") + .about("Builds validator YAML files for faster tests.") + .arg( + Arg::with_name("validator_count") + .long("validator_count") + .short("n") + .value_name("VALIDATOR_COUNT") + .help("Number of validators to generate.") + .required(true), + ) + .arg( + Arg::with_name("genesis_time") + .long("genesis_time") + .short("t") + .value_name("GENESIS_TIME") + .help("Time for validator deposits.") + .required(true), + ) + .arg( + Arg::with_name("output_dir") + .long("output_dir") + .short("d") + .value_name("GENESIS_TIME") + .help("Output directory for generated YAML.") + .default_value("validators"), ), ) .get_matches(); @@ -40,7 +88,17 @@ fn main() { Builder::from_env(Env::default().default_filter_or(log_level)).init(); } + let spec = match matches.value_of("spec") { + Some("foundation") => ChainSpec::foundation(), + Some("few_validators") => ChainSpec::few_validators(), + _ => unreachable!(), // Has a default value, should always exist. + }; + if let Some(matches) = matches.subcommand_matches("run_test") { run_test(matches); } + + if let Some(matches) = matches.subcommand_matches("prepare") { + prepare(matches, &spec); + } } diff --git a/beacon_node/beacon_chain/test_harness/src/prepare.rs b/beacon_node/beacon_chain/test_harness/src/prepare.rs new file mode 100644 index 000000000..160b0f7ee --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/prepare.rs @@ -0,0 +1,35 @@ +use crate::beacon_chain_harness::generate_deposits_with_deterministic_keypairs; +use clap::{value_t, ArgMatches}; +use log::debug; +use serde_yaml; +use std::path::Path; +use std::{fs, fs::File}; +use types::*; + +const KEYPAIRS_FILE: &str = "keypairs.yaml"; +const DEPOSITS_FILE: &str = "deposits.yaml"; + +pub fn prepare(matches: &ArgMatches, spec: &ChainSpec) { + let validator_count = value_t!(matches.value_of("validator_count"), usize) + .expect("Validator count is required argument"); + let genesis_time = + value_t!(matches.value_of("genesis_time"), u64).expect("Genesis time is required argument"); + let output_dir = matches + .value_of("output_dir") + .expect("Output dir has a default value."); + + let (keypairs, deposits) = + generate_deposits_with_deterministic_keypairs(validator_count, genesis_time, &spec); + + debug!("Created keypairs and deposits, writing to file..."); + + fs::create_dir_all(Path::new(output_dir)).unwrap(); + + let keypairs_path = Path::new(output_dir).join(KEYPAIRS_FILE); + let keypairs_file = File::create(keypairs_path).unwrap(); + serde_yaml::to_writer(keypairs_file, &keypairs).unwrap(); + + let deposits_path = Path::new(output_dir).join(DEPOSITS_FILE); + let deposits_file = File::create(deposits_path).unwrap(); + serde_yaml::to_writer(deposits_file, &deposits).unwrap(); +} diff --git a/beacon_node/beacon_chain/test_harness/src/run_test.rs b/beacon_node/beacon_chain/test_harness/src/run_test.rs index 1a816afe0..51a993bd7 100644 --- a/beacon_node/beacon_chain/test_harness/src/run_test.rs +++ b/beacon_node/beacon_chain/test_harness/src/run_test.rs @@ -1,5 +1,6 @@ use crate::test_case::TestCase; use clap::ArgMatches; +use std::path::Path; use std::{fs::File, io::prelude::*}; use yaml_rust::YamlLoader; @@ -15,6 +16,10 @@ pub fn run_test(matches: &ArgMatches) { }; for doc in &docs { + let validators_dir = matches + .value_of("validators_dir") + .and_then(|dir_str| Some(Path::new(dir_str))); + // For each `test_cases` YAML in the document, build a `TestCase`, execute it and // assert that the execution result matches the test_case description. // @@ -29,7 +34,7 @@ pub fn run_test(matches: &ArgMatches) { // panics with a message. for test_case in doc["test_cases"].as_vec().unwrap() { let test_case = TestCase::from_yaml(test_case); - test_case.assert_result_valid(test_case.execute()) + test_case.assert_result_valid(test_case.execute(validators_dir)) } } } diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index b2709edfc..e7b2defe6 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -6,6 +6,7 @@ use beacon_chain::CheckPoint; use bls::{create_proof_of_possession, get_withdrawal_credentials}; use log::{info, warn}; use ssz::SignedRoot; +use std::path::Path; use types::*; use types::{ @@ -70,7 +71,7 @@ impl TestCase { /// Executes the test case, returning an `ExecutionResult`. #[allow(clippy::cyclomatic_complexity)] - pub fn execute(&self) -> ExecutionResult { + pub fn execute(&self, validators_dir: Option<&Path>) -> ExecutionResult { let spec = self.spec(); let validator_count = self.config.deposits_for_chain_start; let slots = self.config.num_slots; @@ -80,7 +81,7 @@ impl TestCase { validator_count ); - let mut harness = BeaconChainHarness::new(spec, validator_count); + let mut harness = BeaconChainHarness::new(spec, validator_count, validators_dir); info!("Starting simulation across {} slots...", slots); From 5c1458ba46832911a32fb2df1e6fbcbb79298a6f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 13:19:37 +1100 Subject: [PATCH 07/56] Add bls serde_vistors file --- eth2/utils/bls/src/serde_vistors.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 eth2/utils/bls/src/serde_vistors.rs diff --git a/eth2/utils/bls/src/serde_vistors.rs b/eth2/utils/bls/src/serde_vistors.rs new file mode 100644 index 000000000..55eadb883 --- /dev/null +++ b/eth2/utils/bls/src/serde_vistors.rs @@ -0,0 +1,20 @@ +use hex; +use serde::de::{self, Visitor}; +use std::fmt; + +pub struct HexVisitor; + +impl<'de> Visitor<'de> for HexVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a hex string (without 0x prefix)") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + Ok(hex::decode(value).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?) + } +} From e76b5e1c3a0bc32dc0843fcd12d0395528f604b3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 15:06:13 +1100 Subject: [PATCH 08/56] Re-work deposit generation for memory efficiency Helps ensure that variables are dropped after they're finished being used. --- .../test_harness/src/beacon_chain_harness.rs | 6 +++-- .../beacon_chain_harness/generate_deposits.rs | 23 +++++++++++-------- .../beacon_chain/test_harness/src/prepare.rs | 20 ++++++++++++---- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 3eabfcb1f..8ec07a994 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -18,7 +18,7 @@ use types::*; mod generate_deposits; mod load_deposits_from_file; -pub use generate_deposits::generate_deposits_with_deterministic_keypairs; +pub use generate_deposits::{generate_deposits_from_keypairs, generate_deterministic_keypairs}; pub use load_deposits_from_file::load_deposits_from_file; /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected @@ -62,7 +62,9 @@ impl BeaconChainHarness { &deposits_path.as_path(), ) } else { - generate_deposits_with_deterministic_keypairs(validator_count, genesis_time, &spec) + let keypairs = generate_deterministic_keypairs(validator_count); + let deposits = generate_deposits_from_keypairs(&keypairs, genesis_time, &spec); + (keypairs, deposits) }; // Create the Beacon Chain diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs index b07168df9..f2d68d644 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs @@ -4,19 +4,15 @@ use log::debug; use rayon::prelude::*; use types::*; -/// Generates `validator_count` deposits using keypairs where the secret key is the index of the +/// Generates `validator_count` keypairs where the secret key is the index of the /// validator. /// /// For example, the first validator has a secret key of `int_to_bytes48(1)`, the second has /// `int_to_bytes48(2)` and so on. (We skip `0` as it generates a weird looking public key and is /// probably invalid). -pub fn generate_deposits_with_deterministic_keypairs( - validator_count: usize, - genesis_time: u64, - spec: &ChainSpec, -) -> (Vec, Vec) { +pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { debug!( - "Generating {} random validator keypairs...", + "Generating {} deterministic validator keypairs...", validator_count ); @@ -31,9 +27,18 @@ pub fn generate_deposits_with_deterministic_keypairs( }) .collect(); + keypairs +} + +/// Generates a `Deposit` for each keypairs +pub fn generate_deposits_from_keypairs( + keypairs: &[Keypair], + genesis_time: u64, + spec: &ChainSpec, +) -> Vec { debug!( "Generating {} validator deposits from random keypairs...", - validator_count + keypairs.len() ); let initial_validator_deposits = @@ -60,5 +65,5 @@ pub fn generate_deposits_with_deterministic_keypairs( }) .collect(); - (keypairs, initial_validator_deposits) + initial_validator_deposits } diff --git a/beacon_node/beacon_chain/test_harness/src/prepare.rs b/beacon_node/beacon_chain/test_harness/src/prepare.rs index 160b0f7ee..001522955 100644 --- a/beacon_node/beacon_chain/test_harness/src/prepare.rs +++ b/beacon_node/beacon_chain/test_harness/src/prepare.rs @@ -1,4 +1,6 @@ -use crate::beacon_chain_harness::generate_deposits_with_deterministic_keypairs; +use crate::beacon_chain_harness::{ + generate_deposits_from_keypairs, generate_deterministic_keypairs, +}; use clap::{value_t, ArgMatches}; use log::debug; use serde_yaml; @@ -18,17 +20,27 @@ pub fn prepare(matches: &ArgMatches, spec: &ChainSpec) { .value_of("output_dir") .expect("Output dir has a default value."); - let (keypairs, deposits) = - generate_deposits_with_deterministic_keypairs(validator_count, genesis_time, &spec); - debug!("Created keypairs and deposits, writing to file..."); fs::create_dir_all(Path::new(output_dir)).unwrap(); + // Ensure that keypairs is dropped before writing deposits, this provides a big memory saving + // for large validator_counts. + let deposits = { + let keypairs = generate_deterministic_keypairs(validator_count); + write_keypairs(output_dir, &keypairs); + generate_deposits_from_keypairs(&keypairs, genesis_time, &spec) + }; + write_deposits(output_dir, &deposits); +} + +fn write_keypairs(output_dir: &str, keypairs: &[Keypair]) { let keypairs_path = Path::new(output_dir).join(KEYPAIRS_FILE); let keypairs_file = File::create(keypairs_path).unwrap(); serde_yaml::to_writer(keypairs_file, &keypairs).unwrap(); +} +fn write_deposits(output_dir: &str, deposits: &[Deposit]) { let deposits_path = Path::new(output_dir).join(DEPOSITS_FILE); let deposits_file = File::create(deposits_path).unwrap(); serde_yaml::to_writer(deposits_file, &deposits).unwrap(); From 6efe2ad3e3c4cf78f0f6cf7218a6718451ba3a0a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 15:09:33 +1100 Subject: [PATCH 09/56] Add debug logs to test_harness prepare --- beacon_node/beacon_chain/test_harness/src/prepare.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/beacon_node/beacon_chain/test_harness/src/prepare.rs b/beacon_node/beacon_chain/test_harness/src/prepare.rs index 001522955..0a4f4c34f 100644 --- a/beacon_node/beacon_chain/test_harness/src/prepare.rs +++ b/beacon_node/beacon_chain/test_harness/src/prepare.rs @@ -27,10 +27,14 @@ pub fn prepare(matches: &ArgMatches, spec: &ChainSpec) { // Ensure that keypairs is dropped before writing deposits, this provides a big memory saving // for large validator_counts. let deposits = { + debug!("Creating {} keypairs...", validator_count); let keypairs = generate_deterministic_keypairs(validator_count); + debug!("Writing {} keypairs to file...", validator_count); write_keypairs(output_dir, &keypairs); + debug!("Creating {} deposits to file...", validator_count); generate_deposits_from_keypairs(&keypairs, genesis_time, &spec) }; + debug!("Writing {} deposits to file...", validator_count); write_deposits(output_dir, &deposits); } From 4b21252ce4777627fd50108bd2d9f75cec4bed29 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 15:33:45 +1100 Subject: [PATCH 10/56] Refactor BeaconChain and BeaconState genesis Now it more easily supports using pre-build validator registries. --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 +-- .../test_harness/src/beacon_chain_harness.rs | 16 +- eth2/types/src/beacon_state.rs | 42 ++--- eth2/types/src/beacon_state/builder.rs | 154 ++++++------------ 4 files changed, 83 insertions(+), 148 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3d2efa8ae..b0e84e1e1 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -73,31 +73,18 @@ where F: ForkChoice, { /// Instantiate a new Beacon Chain, from genesis. - #[allow(clippy::too_many_arguments)] // Will be re-factored in the coming weeks. - pub fn genesis( + pub fn from_genesis( state_store: Arc>, block_store: Arc>, slot_clock: U, - genesis_time: u64, - latest_eth1_data: Eth1Data, - initial_validator_deposits: Vec, + mut genesis_state: BeaconState, + genesis_block: BeaconBlock, spec: ChainSpec, fork_choice: F, ) -> Result { - if initial_validator_deposits.is_empty() { - return Err(Error::InsufficientValidators); - } - - let mut genesis_state = BeaconState::genesis( - genesis_time, - initial_validator_deposits, - latest_eth1_data, - &spec, - )?; let state_root = genesis_state.canonical_root(); state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; - let genesis_block = BeaconBlock::genesis(state_root, &spec); let block_root = genesis_block.canonical_root(); block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 8ec07a994..c41f6fa1e 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -9,11 +9,12 @@ use fork_choice::BitwiseLMDGhost; use log::debug; use rayon::prelude::*; use slot_clock::TestingSlotClock; +use ssz::TreeHash; use std::collections::HashSet; use std::iter::FromIterator; use std::path::Path; use std::sync::Arc; -use types::*; +use types::{beacon_state::BeaconStateBuilder, *}; mod generate_deposits; mod load_deposits_from_file; @@ -67,15 +68,20 @@ impl BeaconChainHarness { (keypairs, deposits) }; + let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); + state_builder.process_initial_deposits(&initial_validator_deposits, &spec); + let genesis_state = state_builder.build(&spec).unwrap(); + let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + let genesis_block = BeaconBlock::genesis(state_root, &spec); + // Create the Beacon Chain let beacon_chain = Arc::new( - BeaconChain::genesis( + BeaconChain::from_genesis( state_store.clone(), block_store.clone(), slot_clock, - genesis_time, - latest_eth1_data, - initial_validator_deposits, + genesis_state, + genesis_block, spec.clone(), fork_choice, ) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 809408b32..f3d533527 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -114,18 +114,13 @@ pub struct BeaconState { impl BeaconState { /// Produce the first state of the Beacon Chain. - pub fn genesis_without_validators( - genesis_time: u64, - latest_eth1_data: Eth1Data, - spec: &ChainSpec, - ) -> Result { - debug!("Creating genesis state (without validator processing)."); + pub fn genesis(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> BeaconState { let initial_crosslink = Crosslink { epoch: spec.genesis_epoch, crosslink_data_root: spec.zero_hash, }; - Ok(BeaconState { + BeaconState { /* * Misc */ @@ -188,19 +183,15 @@ impl BeaconState { */ cache_index_offset: 0, caches: vec![EpochCache::empty(); CACHED_EPOCHS], - }) + } } /// Produce the first state of the Beacon Chain. - pub fn genesis( - genesis_time: u64, + pub fn process_initial_deposits( + &mut self, initial_validator_deposits: Vec, - latest_eth1_data: Eth1Data, spec: &ChainSpec, - ) -> Result { - let mut genesis_state = - BeaconState::genesis_without_validators(genesis_time, latest_eth1_data, spec)?; - + ) -> Result<(), Error> { debug!("Processing genesis deposits..."); let deposit_data = initial_validator_deposits @@ -208,29 +199,28 @@ impl BeaconState { .map(|deposit| &deposit.deposit_data) .collect(); - genesis_state.process_deposits(deposit_data, spec); + self.process_deposits(deposit_data, spec); trace!("Processed genesis deposits."); - for validator_index in 0..genesis_state.validator_registry.len() { - if genesis_state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount - { - genesis_state.activate_validator(validator_index, true, spec); + for validator_index in 0..self.validator_registry.len() { + if self.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount { + self.activate_validator(validator_index, true, spec); } } - genesis_state.deposit_index = initial_validator_deposits.len() as u64; + self.deposit_index = initial_validator_deposits.len() as u64; let genesis_active_index_root = hash_tree_root(get_active_validator_indices( - &genesis_state.validator_registry, + &self.validator_registry, spec.genesis_epoch, )); - genesis_state.latest_active_index_roots = + self.latest_active_index_roots = vec![genesis_active_index_root; spec.latest_active_index_roots_length]; - genesis_state.current_shuffling_seed = - genesis_state.generate_seed(spec.genesis_epoch, spec)?; - Ok(genesis_state) + self.current_shuffling_seed = self.generate_seed(spec.genesis_epoch, spec)?; + + Ok(()) } /// Returns the `hash_tree_root` of the state. diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs index 0be297db7..4bb5e2cc6 100644 --- a/eth2/types/src/beacon_state/builder.rs +++ b/eth2/types/src/beacon_state/builder.rs @@ -1,5 +1,9 @@ +use super::BeaconStateError; use crate::*; +use crate::{validator_registry::get_active_validator_indices, *}; use bls::create_proof_of_possession; +use rayon::prelude::*; +use ssz::TreeHash; /// Builds a `BeaconState` for use in testing or benchmarking. /// @@ -16,128 +20,73 @@ use bls::create_proof_of_possession; /// Step (4) produces a clone of the BeaconState and doesn't consume the `BeaconStateBuilder` to /// allow access to `self.keypairs` and `self.spec`. pub struct BeaconStateBuilder { - pub validator_count: usize, - pub state: Option, - pub genesis_time: u64, - pub latest_eth1_data: Eth1Data, - pub spec: ChainSpec, - pub keypairs: Vec, + pub state: BeaconState, } impl BeaconStateBuilder { /// Create a new builder with the given number of validators. - pub fn new(validator_count: usize) -> Self { - let genesis_time = 10_000_000; - - let latest_eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }; - - let spec = ChainSpec::foundation(); - + /// + /// Spec v0.4.0 + pub fn new(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> Self { Self { - validator_count, - state: None, - genesis_time, - latest_eth1_data, - spec, - keypairs: vec![], + state: BeaconState::genesis(genesis_time, latest_eth1_data, spec), } } + /// Produce the first state of the Beacon Chain. + /// + /// Spec v0.4.0 + pub fn process_initial_deposits( + &mut self, + initial_validator_deposits: &[Deposit], + spec: &ChainSpec, + ) { + let deposit_data = initial_validator_deposits + .par_iter() + .map(|deposit| &deposit.deposit_data) + .collect(); + + self.state.process_deposits(deposit_data, spec); + + for validator_index in 0..self.state.validator_registry.len() { + if self.state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount { + self.state.activate_validator(validator_index, true, spec); + } + } + + self.state.deposit_index = initial_validator_deposits.len() as u64; + } + /// Builds a `BeaconState` using the `BeaconState::genesis(..)` function. /// /// Each validator is assigned a unique, randomly-generated keypair and all /// proof-of-possessions are verified during genesis. - pub fn build(&mut self) -> Result<(), BeaconStateError> { - self.keypairs = (0..self.validator_count) - .collect::>() - .iter() - .map(|_| Keypair::random()) - .collect(); + /// + /// Spec v0.4.0 + pub fn build(mut self, spec: &ChainSpec) -> Result { + let genesis_active_index_root = + get_active_validator_indices(&self.state.validator_registry, spec.genesis_epoch) + .hash_tree_root(); - let initial_validator_deposits = self - .keypairs - .iter() - .map(|keypair| Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: self.genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. - proof_of_possession: create_proof_of_possession(&keypair), - }, - }, - }) - .collect(); + self.state.latest_active_index_roots = vec![ + Hash256::from_slice(&genesis_active_index_root); + spec.latest_active_index_roots_length + ]; - let state = BeaconState::genesis( - self.genesis_time, - initial_validator_deposits, - self.latest_eth1_data.clone(), - &self.spec, - )?; + self.state.current_shuffling_seed = self.state.generate_seed(spec.genesis_epoch, spec)?; - self.state = Some(state); - - Ok(()) - } - - /// Builds a `BeaconState` using the `BeaconState::genesis(..)` function, without supplying any - /// validators. Instead validators are added to the state post-genesis. - /// - /// One keypair is randomly generated and all validators are assigned this same keypair. - /// Proof-of-possessions are not created (or validated). - /// - /// This function runs orders of magnitude faster than `Self::build()`, however it will be - /// erroneous for functions which use a validators public key as an identifier (e.g., - /// deposits). - pub fn build_fast(&mut self) -> Result<(), BeaconStateError> { - let common_keypair = Keypair::random(); - - let mut validator_registry = Vec::with_capacity(self.validator_count); - let mut validator_balances = Vec::with_capacity(self.validator_count); - self.keypairs = Vec::with_capacity(self.validator_count); - - for _ in 0..self.validator_count { - self.keypairs.push(common_keypair.clone()); - validator_balances.push(32_000_000_000); - validator_registry.push(Validator { - pubkey: common_keypair.pk.clone(), - withdrawal_credentials: Hash256::zero(), - activation_epoch: self.spec.genesis_epoch, - ..Validator::default() - }) - } - - let state = BeaconState { - validator_registry, - validator_balances, - ..BeaconState::genesis( - self.genesis_time, - vec![], - self.latest_eth1_data.clone(), - &self.spec, - )? - }; - - self.state = Some(state); - - Ok(()) + Ok(self.state) } + /* /// Sets the `BeaconState` to be in the last slot of the given epoch. /// /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., /// highest justified and finalized slots, full justification bitfield, etc). - pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch) { - let state = self.state.as_mut().expect("Genesis required"); + pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { + let state = &mut self.state; - let slot = epoch.end_slot(self.spec.slots_per_epoch); + let slot = epoch.end_slot(spec.slots_per_epoch); state.slot = slot; state.validator_registry_update_epoch = epoch - 1; @@ -159,7 +108,7 @@ impl BeaconStateBuilder { /// /// These attestations should be fully conducive to justification and finalization. pub fn insert_attestations(&mut self) { - let state = self.state.as_mut().expect("Genesis required"); + let state = &mut self.state; state .build_epoch_cache(RelativeEpoch::Previous, &self.spec) @@ -198,8 +147,10 @@ impl BeaconStateBuilder { pub fn cloned_state(&self) -> BeaconState { self.state.as_ref().expect("Genesis required").clone() } + */ } +/* /// Builds a valid PendingAttestation with full participation for some committee. fn committee_to_pending_attestation( state: &BeaconState, @@ -261,3 +212,4 @@ fn committee_to_pending_attestation( inclusion_slot: slot, } } +*/ From ddac7540bc5f687b5e9aeee6b918c72f711f7b07 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 8 Mar 2019 16:10:21 +1100 Subject: [PATCH 11/56] Allow test_harness to load validators from file. --- .../test_harness/src/beacon_chain_harness.rs | 45 ++++++++++++---- .../beacon_chain/test_harness/src/prepare.rs | 52 +++++++++++++------ eth2/types/Cargo.toml | 1 + eth2/types/src/beacon_state/builder.rs | 40 +++++++++++--- eth2/types/src/slot_epoch.rs | 6 +-- eth2/types/src/validator.rs | 4 +- 6 files changed, 109 insertions(+), 39 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index c41f6fa1e..1ebe4dc74 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -11,6 +11,7 @@ use rayon::prelude::*; use slot_clock::TestingSlotClock; use ssz::TreeHash; use std::collections::HashSet; +use std::fs::File; use std::iter::FromIterator; use std::path::Path; use std::sync::Arc; @@ -54,22 +55,44 @@ impl BeaconChainHarness { block_hash: Hash256::zero(), }; - let (keypairs, initial_validator_deposits) = if let Some(path) = validators_dir { - let keypairs_path = path.join("keypairs.yaml"); - let deposits_path = path.join("deposits.yaml"); - load_deposits_from_file( - validator_count, - &keypairs_path.as_path(), - &deposits_path.as_path(), - ) + let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); + + // If a `validators_dir` is specified, load the keypairs and validators from YAML files. + // + // Otherwise, build all the keypairs and initial validator deposits manually. + // + // It is _much_ faster to load from YAML, however it does skip all the initial processing + // and verification of `Deposits`, so it is a slightly less comprehensive test. + let keypairs = if let Some(path) = validators_dir { + debug!("Loading validator keypairs from file..."); + let keypairs_file = File::open(path.join("keypairs.yaml")).unwrap(); + let mut keypairs: Vec = serde_yaml::from_reader(&keypairs_file).unwrap(); + keypairs.truncate(validator_count); + + debug!("Loading validators from file..."); + let validators_file = File::open(path.join("validators.yaml")).unwrap(); + let mut validators: Vec = serde_yaml::from_reader(&validators_file).unwrap(); + validators.truncate(validator_count); + + let balances = vec![32_000_000_000; validator_count]; + + state_builder.import_existing_validators( + validators, + balances, + validator_count as u64, + &spec, + ); + + keypairs } else { + debug!("Generating validator keypairs..."); let keypairs = generate_deterministic_keypairs(validator_count); + debug!("Generating initial validator deposits..."); let deposits = generate_deposits_from_keypairs(&keypairs, genesis_time, &spec); - (keypairs, deposits) + state_builder.process_initial_deposits(&deposits, &spec); + keypairs }; - let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); - state_builder.process_initial_deposits(&initial_validator_deposits, &spec); let genesis_state = state_builder.build(&spec).unwrap(); let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); let genesis_block = BeaconBlock::genesis(state_root, &spec); diff --git a/beacon_node/beacon_chain/test_harness/src/prepare.rs b/beacon_node/beacon_chain/test_harness/src/prepare.rs index 0a4f4c34f..36a99317f 100644 --- a/beacon_node/beacon_chain/test_harness/src/prepare.rs +++ b/beacon_node/beacon_chain/test_harness/src/prepare.rs @@ -1,6 +1,5 @@ -use crate::beacon_chain_harness::{ - generate_deposits_from_keypairs, generate_deterministic_keypairs, -}; +use crate::beacon_chain_harness::generate_deterministic_keypairs; +use bls::get_withdrawal_credentials; use clap::{value_t, ArgMatches}; use log::debug; use serde_yaml; @@ -9,33 +8,52 @@ use std::{fs, fs::File}; use types::*; const KEYPAIRS_FILE: &str = "keypairs.yaml"; -const DEPOSITS_FILE: &str = "deposits.yaml"; +const VALIDATORS_FILE: &str = "validators.yaml"; pub fn prepare(matches: &ArgMatches, spec: &ChainSpec) { let validator_count = value_t!(matches.value_of("validator_count"), usize) .expect("Validator count is required argument"); - let genesis_time = - value_t!(matches.value_of("genesis_time"), u64).expect("Genesis time is required argument"); let output_dir = matches .value_of("output_dir") .expect("Output dir has a default value."); - debug!("Created keypairs and deposits, writing to file..."); + debug!("Created keypairs and validators, writing to file..."); fs::create_dir_all(Path::new(output_dir)).unwrap(); - // Ensure that keypairs is dropped before writing deposits, this provides a big memory saving + // Ensure that keypairs is dropped before writing validators, this provides a big memory saving // for large validator_counts. - let deposits = { + let validators: Vec = { debug!("Creating {} keypairs...", validator_count); let keypairs = generate_deterministic_keypairs(validator_count); debug!("Writing {} keypairs to file...", validator_count); write_keypairs(output_dir, &keypairs); - debug!("Creating {} deposits to file...", validator_count); - generate_deposits_from_keypairs(&keypairs, genesis_time, &spec) + debug!("Creating {} validators...", validator_count); + keypairs + .iter() + .map(|keypair| generate_validator(&keypair, spec)) + .collect() }; - debug!("Writing {} deposits to file...", validator_count); - write_deposits(output_dir, &deposits); + + debug!("Writing {} validators to file...", validator_count); + write_validators(output_dir, &validators); +} + +fn generate_validator(keypair: &Keypair, spec: &ChainSpec) -> Validator { + let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( + &keypair.pk, + spec.bls_withdrawal_prefix_byte, + )); + + Validator { + pubkey: keypair.pk.clone(), + withdrawal_credentials, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + initiated_exit: false, + slashed: false, + } } fn write_keypairs(output_dir: &str, keypairs: &[Keypair]) { @@ -44,8 +62,8 @@ fn write_keypairs(output_dir: &str, keypairs: &[Keypair]) { serde_yaml::to_writer(keypairs_file, &keypairs).unwrap(); } -fn write_deposits(output_dir: &str, deposits: &[Deposit]) { - let deposits_path = Path::new(output_dir).join(DEPOSITS_FILE); - let deposits_file = File::create(deposits_path).unwrap(); - serde_yaml::to_writer(deposits_file, &deposits).unwrap(); +fn write_validators(output_dir: &str, validators: &[Validator]) { + let validators_path = Path::new(output_dir).join(VALIDATORS_FILE); + let validators_file = File::create(validators_path).unwrap(); + serde_yaml::to_writer(validators_file, &validators).unwrap(); } diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index ea1343dba..e2930040d 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -17,6 +17,7 @@ rand = "0.5.5" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" +serde_yaml = "0.8" slog = "^2.2.3" ssz = { path = "../utils/ssz" } ssz_derive = { path = "../utils/ssz_derive" } diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs index 4bb5e2cc6..f6d7b3900 100644 --- a/eth2/types/src/beacon_state/builder.rs +++ b/eth2/types/src/beacon_state/builder.rs @@ -33,7 +33,7 @@ impl BeaconStateBuilder { } } - /// Produce the first state of the Beacon Chain. + /// Process deposit objects. /// /// Spec v0.4.0 pub fn process_initial_deposits( @@ -48,19 +48,47 @@ impl BeaconStateBuilder { self.state.process_deposits(deposit_data, spec); + self.activate_genesis_validators(spec); + + self.state.deposit_index = initial_validator_deposits.len() as u64; + } + + fn activate_genesis_validators(&mut self, spec: &ChainSpec) { for validator_index in 0..self.state.validator_registry.len() { if self.state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount { self.state.activate_validator(validator_index, true, spec); } } - - self.state.deposit_index = initial_validator_deposits.len() as u64; } - /// Builds a `BeaconState` using the `BeaconState::genesis(..)` function. + /// Instantiate the validator registry from a YAML file. /// - /// Each validator is assigned a unique, randomly-generated keypair and all - /// proof-of-possessions are verified during genesis. + /// This skips a lot of signing and verification, useful for fast test setups. + /// + /// Spec v0.4.0 + pub fn import_existing_validators( + &mut self, + validators: Vec, + initial_balances: Vec, + deposit_index: u64, + spec: &ChainSpec, + ) { + self.state.validator_registry = validators; + + assert_eq!( + self.state.validator_registry.len(), + initial_balances.len(), + "Not enough balances for validators" + ); + + self.state.validator_balances = initial_balances; + + self.activate_genesis_validators(spec); + + self.state.deposit_index = deposit_index; + } + + /// Updates the final state variables and returns a fully built genesis state. /// /// Spec v0.4.0 pub fn build(mut self, spec: &ChainSpec) -> Result { diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 7753027a6..2af7f5196 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -12,7 +12,7 @@ use crate::slot_height::SlotHeight; /// may lead to programming errors which are not detected by the compiler. use crate::test_utils::TestRandom; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use slog; use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use std::cmp::{Ord, Ordering}; @@ -21,10 +21,10 @@ use std::hash::{Hash, Hasher}; use std::iter::Iterator; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; -#[derive(Eq, Debug, Clone, Copy, Default, Serialize)] +#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] pub struct Slot(u64); -#[derive(Eq, Debug, Clone, Copy, Default, Serialize)] +#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] pub struct Epoch(u64); impl_common!(Slot); diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 43701ca05..59f6c5826 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -1,13 +1,13 @@ use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey}; use rand::RngCore; -use serde_derive::Serialize; +use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; /// Information about a `BeaconChain` validator. /// /// Spec v0.4.0 -#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TestRandom, TreeHash)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] pub struct Validator { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, From 63743a962c60ca5100054ad7aa38e8300f8ef9cd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 10:37:41 +1100 Subject: [PATCH 12/56] Add per-epoch benchmarks, optimise function. --- Cargo.toml | 1 + eth2/state_processing/Cargo.toml | 2 + eth2/state_processing/benches/benches.rs | 289 +++++++++++++++-- .../benching_utils/Cargo.toml | 17 + .../benching_utils/src/lib.rs | 195 +++++++++++ .../src/per_epoch_processing.rs | 302 +++++++++++------- .../src/per_epoch_processing/attester_sets.rs | 4 +- .../src/per_epoch_processing/errors.rs | 2 + .../src/per_epoch_processing/tests.rs | 15 +- eth2/types/src/beacon_state/helpers.rs | 2 +- 10 files changed, 670 insertions(+), 159 deletions(-) create mode 100644 eth2/state_processing/benching_utils/Cargo.toml create mode 100644 eth2/state_processing/benching_utils/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index c5aae7f43..8f4dbb268 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "eth2/block_proposer", "eth2/fork_choice", "eth2/state_processing", + "eth2/state_processing/benching_utils", "eth2/types", "eth2/utils/bls", "eth2/utils/boolean-bitfield", diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index c51ce8372..962d23a77 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -11,9 +11,11 @@ harness = false [dev-dependencies] criterion = "0.2" env_logger = "0.6.0" +benching_utils = { path = "./benching_utils" } [dependencies] bls = { path = "../utils/bls" } +fnv = "1.0" hashing = { path = "../utils/hashing" } int_to_bytes = { path = "../utils/int_to_bytes" } integer-sqrt = "0.1" diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 682259eef..5c064a08f 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,60 +1,291 @@ use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use state_processing::{ + per_epoch_processing, + per_epoch_processing::{ + calculate_active_validator_indices, calculate_attester_sets, clean_attestations, + process_crosslinks, process_eth1_data, process_justification, + process_rewards_and_penalities, process_validator_registry, update_active_tree_index_roots, + update_latest_slashed_balances, + }, +}; // use env_logger::{Builder, Env}; -use state_processing::SlotProcessable; -use types::beacon_state::BeaconStateBuilder; -use types::*; +use benching_utils::BeaconStateBencher; +use types::{validator_registry::get_active_validator_indices, *}; fn epoch_processing(c: &mut Criterion) { // Builder::from_env(Env::default().default_filter_or("debug")).init(); + // + let spec = ChainSpec::foundation(); - let mut builder = BeaconStateBuilder::new(16_384); + let validator_count = 16_384; - builder.build_fast().unwrap(); - builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4); - - let mut state = builder.cloned_state(); + let mut builder = BeaconStateBencher::new(validator_count, &spec); + builder.teleport_to_end_of_epoch(spec.genesis_epoch + 4, &spec); + builder.insert_attestations(&spec); + let mut state = builder.build(); // Build all the caches so the following state does _not_ include the cache-building time. state - .build_epoch_cache(RelativeEpoch::Previous, &builder.spec) + .build_epoch_cache(RelativeEpoch::Previous, &spec) .unwrap(); state - .build_epoch_cache(RelativeEpoch::Current, &builder.spec) - .unwrap(); - state - .build_epoch_cache(RelativeEpoch::Next, &builder.spec) + .build_epoch_cache(RelativeEpoch::Current, &spec) .unwrap(); + state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap(); - let cached_state = state.clone(); + // Assert that the state has the maximum possible attestations. + let committees_per_epoch = spec.get_epoch_committee_count(validator_count); + let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; + let previous_epoch_attestations = committees_per_epoch; + let current_epoch_attestations = + committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay); + assert_eq!( + state.latest_attestations.len() as u64, + previous_epoch_attestations + current_epoch_attestations + ); - // Drop all the caches so the following state includes the cache-building time. - state.drop_cache(RelativeEpoch::Previous); - state.drop_cache(RelativeEpoch::Current); - state.drop_cache(RelativeEpoch::Next); + // Assert that each attestation in the state has full participation. + let committee_size = validator_count / committees_per_epoch as usize; + for a in &state.latest_attestations { + assert_eq!(a.aggregation_bitfield.num_set_bits(), committee_size); + } - let cacheless_state = state; + // Assert that we will run the first arm of process_rewards_and_penalities + let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; + assert!(epochs_since_finality <= 4); - let spec_a = builder.spec.clone(); - let spec_b = builder.spec.clone(); + bench_epoch_processing(c, &state, &spec, "16k_validators"); +} +fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { + let state_clone = state.clone(); + let spec_clone = spec.clone(); c.bench( - "epoch processing", - Benchmark::new("with pre-built caches", move |b| { + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("full run", move |b| { b.iter_with_setup( - || cached_state.clone(), - |mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_a).unwrap()), + || state_clone.clone(), + |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), ) }) .sample_size(10), ); + let state_clone = state.clone(); + let spec_clone = spec.clone(); c.bench( - "epoch processing", - Benchmark::new("without pre-built caches", move |b| { + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_active_validator_indices", move |b| { b.iter_with_setup( - || cacheless_state.clone(), - |mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_b).unwrap()), + || state_clone.clone(), + |mut state| black_box(calculate_active_validator_indices(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_current_total_balance", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |state| { + black_box(state.get_total_balance(&active_validator_indices[..], &spec_clone)) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_previous_total_balance", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |state| { + black_box(state.get_total_balance( + &get_active_validator_indices( + &state.validator_registry, + state.previous_epoch(&spec_clone), + )[..], + &spec_clone, + )) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_eth1_data", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(process_eth1_data(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_attester_sets", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(calculate_attester_sets(&mut state, &spec_clone).unwrap()), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + let previous_epoch = state.previous_epoch(&spec); + let attesters = calculate_attester_sets(&state, &spec).unwrap(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let current_total_balance = state.get_total_balance(&active_validator_indices[..], &spec); + let previous_total_balance = state.get_total_balance( + &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], + &spec, + ); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_justification", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| { + black_box(process_justification( + &mut state, + current_total_balance, + previous_total_balance, + attesters.previous_epoch_boundary.balance, + attesters.current_epoch_boundary.balance, + &spec_clone, + )) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_crosslinks", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()), + ) + }) + .sample_size(10), + ); + + let mut state_clone = state.clone(); + let spec_clone = spec.clone(); + let previous_epoch = state.previous_epoch(&spec); + let attesters = calculate_attester_sets(&state, &spec).unwrap(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let previous_total_balance = state.get_total_balance( + &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], + &spec, + ); + let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_rewards_and_penalties", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| { + black_box( + process_rewards_and_penalities( + &mut state, + &active_validator_indices, + &attesters, + previous_total_balance, + &winning_root_for_shards, + &spec_clone, + ) + .unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_ejections", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(state.process_ejections(&spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_validator_registry", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(process_validator_registry(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("update_active_tree_index_roots", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| { + black_box(update_active_tree_index_roots(&mut state, &spec_clone).unwrap()) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("update_latest_slashed_balances", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(update_latest_slashed_balances(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("clean_attestations", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(clean_attestations(&mut state, &spec_clone)), ) }) .sample_size(10), diff --git a/eth2/state_processing/benching_utils/Cargo.toml b/eth2/state_processing/benching_utils/Cargo.toml new file mode 100644 index 000000000..00815406a --- /dev/null +++ b/eth2/state_processing/benching_utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "benching_utils" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +bls = { path = "../../utils/bls" } +hashing = { path = "../../utils/hashing" } +int_to_bytes = { path = "../../utils/int_to_bytes" } +integer-sqrt = "0.1" +log = "0.4" +merkle_proof = { path = "../../utils/merkle_proof" } +ssz = { path = "../../utils/ssz" } +ssz_derive = { path = "../../utils/ssz_derive" } +types = { path = "../../types" } +rayon = "1.0" diff --git a/eth2/state_processing/benching_utils/src/lib.rs b/eth2/state_processing/benching_utils/src/lib.rs new file mode 100644 index 000000000..c70b7828a --- /dev/null +++ b/eth2/state_processing/benching_utils/src/lib.rs @@ -0,0 +1,195 @@ +use bls::get_withdrawal_credentials; +use int_to_bytes::int_to_bytes48; +use rayon::prelude::*; +use types::beacon_state::BeaconStateBuilder; +use types::*; + +pub struct BeaconStateBencher { + state: BeaconState, +} + +impl BeaconStateBencher { + pub fn new(validator_count: usize, spec: &ChainSpec) -> Self { + let keypairs: Vec = (0..validator_count) + .collect::>() + .par_iter() + .map(|&i| { + let secret = int_to_bytes48(i as u64 + 1); + let sk = SecretKey::from_bytes(&secret).unwrap(); + let pk = PublicKey::from_secret_key(&sk); + Keypair { sk, pk } + }) + .collect(); + + let validators = keypairs + .iter() + .map(|keypair| { + let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( + &keypair.pk, + spec.bls_withdrawal_prefix_byte, + )); + + Validator { + pubkey: keypair.pk.clone(), + withdrawal_credentials, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + initiated_exit: false, + slashed: false, + } + }) + .collect(); + + let mut state_builder = BeaconStateBuilder::new( + 0, + Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }, + spec, + ); + + let balances = vec![32_000_000_000; validator_count]; + + state_builder.import_existing_validators( + validators, + balances, + validator_count as u64, + spec, + ); + + Self { + state: state_builder.build(spec).unwrap(), + } + } + + pub fn build(self) -> BeaconState { + self.state + } + + /// Sets the `BeaconState` to be in the last slot of the given epoch. + /// + /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., + /// highest justified and finalized slots, full justification bitfield, etc). + pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { + let state = &mut self.state; + + let slot = epoch.end_slot(spec.slots_per_epoch); + + state.slot = slot; + state.validator_registry_update_epoch = epoch - 1; + + state.previous_shuffling_epoch = epoch - 1; + state.current_shuffling_epoch = epoch; + + state.previous_shuffling_seed = Hash256::from_low_u64_le(0); + state.current_shuffling_seed = Hash256::from_low_u64_le(1); + + state.previous_justified_epoch = epoch - 2; + state.justified_epoch = epoch - 1; + state.justification_bitfield = u64::max_value(); + state.finalized_epoch = epoch - 1; + } + + /// Creates a full set of attestations for the `BeaconState`. Each attestation has full + /// participation from its committee and references the expected beacon_block hashes. + /// + /// These attestations should be fully conducive to justification and finalization. + pub fn insert_attestations(&mut self, spec: &ChainSpec) { + let state = &mut self.state; + + state + .build_epoch_cache(RelativeEpoch::Previous, spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Current, spec) + .unwrap(); + + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let first_slot = previous_epoch.start_slot(spec.slots_per_epoch).as_u64(); + let last_slot = current_epoch.end_slot(spec.slots_per_epoch).as_u64() + - spec.min_attestation_inclusion_delay; + let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); + + for slot in first_slot..last_slot + 1 { + let slot = Slot::from(slot); + + let committees = state + .get_crosslink_committees_at_slot(slot, spec) + .unwrap() + .clone(); + + for (committee, shard) in committees { + state + .latest_attestations + .push(committee_to_pending_attestation( + state, &committee, shard, slot, spec, + )) + } + } + } +} + +fn committee_to_pending_attestation( + state: &BeaconState, + committee: &[usize], + shard: u64, + slot: Slot, + spec: &ChainSpec, +) -> PendingAttestation { + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let mut aggregation_bitfield = Bitfield::new(); + let mut custody_bitfield = Bitfield::new(); + + for (i, _) in committee.iter().enumerate() { + aggregation_bitfield.set(i, true); + custody_bitfield.set(i, true); + } + + let is_previous_epoch = + state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); + + let justified_epoch = if is_previous_epoch { + state.previous_justified_epoch + } else { + state.justified_epoch + }; + + let epoch_boundary_root = if is_previous_epoch { + *state + .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + } else { + *state + .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + }; + + let justified_block_root = *state + .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap(); + + PendingAttestation { + aggregation_bitfield, + data: AttestationData { + slot, + shard, + beacon_block_root: *state.get_block_root(slot, spec).unwrap(), + epoch_boundary_root, + crosslink_data_root: Hash256::zero(), + latest_crosslink: Crosslink { + epoch: slot.epoch(spec.slots_per_epoch), + crosslink_data_root: Hash256::zero(), + }, + justified_epoch, + justified_block_root, + }, + custody_bitfield, + inclusion_slot: slot + spec.min_attestation_inclusion_delay, + } +} diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index bd8aca3c4..99275bd10 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,11 +1,12 @@ use attester_sets::AttesterSets; use errors::EpochProcessingError as Error; +use fnv::FnvHashSet; use inclusion_distance::{inclusion_distance, inclusion_slot}; use integer_sqrt::IntegerSquareRoot; use log::debug; use rayon::prelude::*; use ssz::TreeHash; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::iter::FromIterator; use types::{validator_registry::get_active_validator_indices, *}; use winning_root::{winning_root, WinningRoot}; @@ -17,9 +18,7 @@ pub mod tests; pub mod winning_root; pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - let current_epoch = state.current_epoch(spec); let previous_epoch = state.previous_epoch(spec); - let next_epoch = state.next_epoch(spec); debug!( "Starting per-epoch processing on epoch {}...", @@ -31,14 +30,12 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Next, spec)?; - let attesters = AttesterSets::new(&state, spec)?; + let attesters = calculate_attester_sets(&state, spec)?; - let active_validator_indices = get_active_validator_indices( - &state.validator_registry, - state.slot.epoch(spec.slots_per_epoch), - ); + let active_validator_indices = calculate_active_validator_indices(&state, spec); let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); + let previous_total_balance = state.get_total_balance( &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], spec, @@ -59,11 +56,9 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result let winning_root_for_shards = process_crosslinks(state, spec)?; // Rewards and Penalities - let active_validator_indices_hashset: HashSet = - HashSet::from_iter(active_validator_indices.iter().cloned()); process_rewards_and_penalities( state, - active_validator_indices_hashset, + &active_validator_indices, &attesters, previous_total_balance, &winning_root_for_shards, @@ -77,27 +72,9 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result process_validator_registry(state, spec)?; // Final updates - let active_tree_root = get_active_validator_indices( - &state.validator_registry, - next_epoch + Epoch::from(spec.activation_exit_delay), - ) - .hash_tree_root(); - state.latest_active_index_roots[(next_epoch.as_usize() - + spec.activation_exit_delay as usize) - % spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]); - - state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] = - state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length]; - state.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = state - .get_randao_mix(current_epoch, spec) - .and_then(|x| Some(*x)) - .ok_or_else(|| Error::NoRandaoSeed)?; - state.latest_attestations = state - .latest_attestations - .iter() - .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch) - .cloned() - .collect(); + update_active_tree_index_roots(state, spec)?; + update_latest_slashed_balances(state, spec); + clean_attestations(state, spec); // Rotate the epoch caches to suit the epoch transition. state.advance_caches(); @@ -107,8 +84,22 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result Ok(()) } +pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) -> Vec { + get_active_validator_indices( + &state.validator_registry, + state.slot.epoch(spec.slots_per_epoch), + ) +} + +pub fn calculate_attester_sets( + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + AttesterSets::new(&state, spec) +} + /// Spec v0.4.0 -fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { +pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { let next_epoch = state.next_epoch(spec); let voting_period = spec.epochs_per_eth1_voting_period; @@ -123,7 +114,7 @@ fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { } /// Spec v0.4.0 -fn process_justification( +pub fn process_justification( state: &mut BeaconState, current_total_balance: u64, previous_total_balance: u64, @@ -201,7 +192,7 @@ fn process_justification( pub type WinningRootHashSet = HashMap; -fn process_crosslinks( +pub fn process_crosslinks( state: &mut BeaconState, spec: &ChainSpec, ) -> Result { @@ -260,9 +251,9 @@ fn process_crosslinks( } /// Spec v0.4.0 -fn process_rewards_and_penalities( +pub fn process_rewards_and_penalities( state: &mut BeaconState, - active_validator_indices: HashSet, + active_validator_indices: &[usize], attesters: &AttesterSets, previous_total_balance: u64, winning_root_for_shards: &WinningRootHashSet, @@ -270,6 +261,9 @@ fn process_rewards_and_penalities( ) -> Result<(), Error> { let next_epoch = state.next_epoch(spec); + let active_validator_indices: FnvHashSet = + FnvHashSet::from_iter(active_validator_indices.iter().cloned()); + let previous_epoch_attestations: Vec<&PendingAttestation> = state .latest_attestations .par_iter() @@ -281,95 +275,126 @@ fn process_rewards_and_penalities( if base_reward_quotient == 0 { return Err(Error::BaseRewardQuotientIsZero); } + if previous_total_balance == 0 { + return Err(Error::PreviousTotalBalanceIsZero); + } // Justification and finalization let epochs_since_finality = next_epoch - state.finalized_epoch; if epochs_since_finality <= 4 { - for index in 0..state.validator_balances.len() { - let base_reward = state.base_reward(index, base_reward_quotient, spec); + state.validator_balances = state + .validator_balances + .par_iter() + .enumerate() + .map(|(index, &balance)| { + let mut balance = balance; + let base_reward = state.base_reward(index, base_reward_quotient, spec); - // Expected FFG source - if attesters.previous_epoch.indices.contains(&index) { - safe_add_assign!( - state.validator_balances[index], - base_reward * attesters.previous_epoch.balance / previous_total_balance - ); - } else if active_validator_indices.contains(&index) { - safe_sub_assign!(state.validator_balances[index], base_reward); - } - - // Expected FFG target - if attesters.previous_epoch_boundary.indices.contains(&index) { - safe_add_assign!( - state.validator_balances[index], - base_reward * attesters.previous_epoch_boundary.balance - / previous_total_balance - ); - } else if active_validator_indices.contains(&index) { - safe_sub_assign!(state.validator_balances[index], base_reward); - } - - // Expected beacon chain head - if attesters.previous_epoch_head.indices.contains(&index) { - safe_add_assign!( - state.validator_balances[index], - base_reward * attesters.previous_epoch_head.balance / previous_total_balance - ); - } else if active_validator_indices.contains(&index) { - safe_sub_assign!(state.validator_balances[index], base_reward); - } - } - - // Inclusion distance - for &index in &attesters.previous_epoch.indices { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - let inclusion_distance = - inclusion_distance(state, &previous_epoch_attestations, index, spec)?; - - safe_add_assign!( - state.validator_balances[index], - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance - ) - } - } else { - for index in 0..state.validator_balances.len() { - let inactivity_penalty = - state.inactivity_penalty(index, epochs_since_finality, base_reward_quotient, spec); - - if active_validator_indices.contains(&index) { - if !attesters.previous_epoch.indices.contains(&index) { - safe_sub_assign!(state.validator_balances[index], inactivity_penalty); - } - if !attesters.previous_epoch_boundary.indices.contains(&index) { - safe_sub_assign!(state.validator_balances[index], inactivity_penalty); - } - if !attesters.previous_epoch_head.indices.contains(&index) { - safe_sub_assign!(state.validator_balances[index], inactivity_penalty); - } - - if state.validator_registry[index].slashed { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - safe_sub_assign!( - state.validator_balances[index], - 2 * inactivity_penalty + base_reward + // Expected FFG source + if attesters.previous_epoch.indices.contains(&index) { + safe_add_assign!( + balance, + base_reward * attesters.previous_epoch.balance / previous_total_balance ); + } else if active_validator_indices.contains(&index) { + safe_sub_assign!(balance, base_reward); } - } - } - for &index in &attesters.previous_epoch.indices { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - let inclusion_distance = - inclusion_distance(state, &previous_epoch_attestations, index, spec)?; + // Expected FFG target + if attesters.previous_epoch_boundary.indices.contains(&index) { + safe_add_assign!( + balance, + base_reward * attesters.previous_epoch_boundary.balance + / previous_total_balance + ); + } else if active_validator_indices.contains(&index) { + safe_sub_assign!(balance, base_reward); + } - safe_sub_assign!( - state.validator_balances[index], - base_reward - - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance - ); - } + // Expected beacon chain head + if attesters.previous_epoch_head.indices.contains(&index) { + safe_add_assign!( + balance, + base_reward * attesters.previous_epoch_head.balance + / previous_total_balance + ); + } else if active_validator_indices.contains(&index) { + safe_sub_assign!(balance, base_reward); + }; + + if attesters.previous_epoch.indices.contains(&index) { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + inclusion_distance(state, &previous_epoch_attestations, index, spec); + + if let Ok(inclusion_distance) = inclusion_distance { + if inclusion_distance > 0 { + safe_add_assign!( + balance, + base_reward * spec.min_attestation_inclusion_delay + / inclusion_distance + ) + } + } + } + + balance + }) + .collect(); + } else { + state.validator_balances = state + .validator_balances + .par_iter() + .enumerate() + .map(|(index, &balance)| { + let mut balance = balance; + + let inactivity_penalty = state.inactivity_penalty( + index, + epochs_since_finality, + base_reward_quotient, + spec, + ); + + if active_validator_indices.contains(&index) { + if !attesters.previous_epoch.indices.contains(&index) { + safe_sub_assign!(balance, inactivity_penalty); + } + if !attesters.previous_epoch_boundary.indices.contains(&index) { + safe_sub_assign!(balance, inactivity_penalty); + } + if !attesters.previous_epoch_head.indices.contains(&index) { + safe_sub_assign!(balance, inactivity_penalty); + } + + if state.validator_registry[index].slashed { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + safe_sub_assign!(balance, 2 * inactivity_penalty + base_reward); + } + } + + if attesters.previous_epoch.indices.contains(&index) { + let base_reward = state.base_reward(index, base_reward_quotient, spec); + let inclusion_distance = + inclusion_distance(state, &previous_epoch_attestations, index, spec); + + if let Ok(inclusion_distance) = inclusion_distance { + if inclusion_distance > 0 { + safe_sub_assign!( + balance, + base_reward + - base_reward * spec.min_attestation_inclusion_delay + / inclusion_distance + ); + } + } + } + + balance + }) + .collect(); } // Attestation inclusion @@ -413,8 +438,8 @@ fn process_rewards_and_penalities( if let Some(winning_root) = winning_root_for_shards.get(&shard) { // Hash set de-dedups and (hopefully) offers a speed improvement from faster // lookups. - let attesting_validator_indices: HashSet = - HashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned()); + let attesting_validator_indices: FnvHashSet = + FnvHashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned()); for &index in &crosslink_committee { let base_reward = state.base_reward(index, base_reward_quotient, spec); @@ -444,7 +469,7 @@ fn process_rewards_and_penalities( } // Spec v0.4.0 -fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { +pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); let next_epoch = state.next_epoch(spec); @@ -489,3 +514,44 @@ fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Resu Ok(()) } + +// Spec v0.4.0 +pub fn update_active_tree_index_roots( + state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let next_epoch = state.next_epoch(spec); + + let active_tree_root = get_active_validator_indices( + &state.validator_registry, + next_epoch + Epoch::from(spec.activation_exit_delay), + ) + .hash_tree_root(); + + state.latest_active_index_roots[(next_epoch.as_usize() + + spec.activation_exit_delay as usize) + % spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]); + + Ok(()) +} + +// Spec v0.4.0 +pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) { + let current_epoch = state.current_epoch(spec); + let next_epoch = state.next_epoch(spec); + + state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] = + state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length]; +} + +// Spec v0.4.0 +pub fn clean_attestations(state: &mut BeaconState, spec: &ChainSpec) { + let current_epoch = state.current_epoch(spec); + + state.latest_attestations = state + .latest_attestations + .iter() + .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch) + .cloned() + .collect(); +} diff --git a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs index 2b674e1bc..1252d8057 100644 --- a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs +++ b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs @@ -1,9 +1,9 @@ -use std::collections::HashSet; +use fnv::FnvHashSet; use types::*; #[derive(Default)] pub struct Attesters { - pub indices: HashSet, + pub indices: FnvHashSet, pub balance: u64, } diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index 51e9b253c..7d8a5800d 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -6,6 +6,8 @@ pub enum EpochProcessingError { NoBlockRoots, BaseRewardQuotientIsZero, NoRandaoSeed, + PreviousTotalBalanceIsZero, + InclusionDistanceZero, BeaconStateError(BeaconStateError), InclusionError(InclusionError), } diff --git a/eth2/state_processing/src/per_epoch_processing/tests.rs b/eth2/state_processing/src/per_epoch_processing/tests.rs index 627df858b..8ff687904 100644 --- a/eth2/state_processing/src/per_epoch_processing/tests.rs +++ b/eth2/state_processing/src/per_epoch_processing/tests.rs @@ -1,21 +1,18 @@ #![cfg(test)] use crate::per_epoch_processing; +use benching_utils::BeaconStateBencher; use env_logger::{Builder, Env}; -use types::beacon_state::BeaconStateBuilder; use types::*; #[test] fn runs_without_error() { Builder::from_env(Env::default().default_filter_or("error")).init(); - let mut builder = BeaconStateBuilder::new(8); - builder.spec = ChainSpec::few_validators(); + let spec = ChainSpec::few_validators(); - builder.build().unwrap(); - builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4); + let mut builder = BeaconStateBencher::new(8, &spec); + builder.teleport_to_end_of_epoch(spec.genesis_epoch + 4, &spec); + let mut state = builder.build(); - let mut state = builder.cloned_state(); - - let spec = &builder.spec; - per_epoch_processing(&mut state, spec).unwrap(); + per_epoch_processing(&mut state, &spec).unwrap(); } diff --git a/eth2/types/src/beacon_state/helpers.rs b/eth2/types/src/beacon_state/helpers.rs index c93b16f76..adae7bab4 100644 --- a/eth2/types/src/beacon_state/helpers.rs +++ b/eth2/types/src/beacon_state/helpers.rs @@ -11,7 +11,7 @@ pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> boo } for i in committee_size..(bitfield.num_bytes() * 8) { - if bitfield.get(i).expect("Impossible due to previous check.") { + if bitfield.get(i).unwrap_or(false) { return false; } } From a77d1885a1ab2ab72b63f725754f5594ed0d33ff Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 10:39:05 +1100 Subject: [PATCH 13/56] Refactor BeaconStateBuilder Made it a production-only struct. All the testing stuff can be done with BeaconStateBencher --- .../test_harness/src/beacon_chain_harness.rs | 56 ++++++++++++++----- .../test_harness/src/test_case.rs | 2 +- 2 files changed, 42 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 1ebe4dc74..d2274ac69 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -1,6 +1,7 @@ use super::ValidatorHarness; use beacon_chain::{BeaconChain, BlockProcessingOutcome}; pub use beacon_chain::{BeaconChainError, CheckPoint}; +use bls::get_withdrawal_credentials; use db::{ stores::{BeaconBlockStore, BeaconStateStore}, MemoryDB, @@ -43,7 +44,12 @@ impl BeaconChainHarness { /// /// - A keypair, `BlockProducer` and `Attester` for each validator. /// - A new BeaconChain struct where the given validators are in the genesis. - pub fn new(spec: ChainSpec, validator_count: usize, validators_dir: Option<&Path>) -> Self { + pub fn new( + spec: ChainSpec, + validator_count: usize, + validators_dir: Option<&Path>, + skip_deposit_verification: bool, + ) -> Self { let db = Arc::new(MemoryDB::open()); let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone())); @@ -57,22 +63,47 @@ impl BeaconChainHarness { let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); - // If a `validators_dir` is specified, load the keypairs and validators from YAML files. + // If a `validators_dir` is specified, load the keypairs a YAML file. // - // Otherwise, build all the keypairs and initial validator deposits manually. - // - // It is _much_ faster to load from YAML, however it does skip all the initial processing - // and verification of `Deposits`, so it is a slightly less comprehensive test. + // Otherwise, generate them deterministically where the first validator has a secret key of + // `1`, etc. let keypairs = if let Some(path) = validators_dir { debug!("Loading validator keypairs from file..."); let keypairs_file = File::open(path.join("keypairs.yaml")).unwrap(); let mut keypairs: Vec = serde_yaml::from_reader(&keypairs_file).unwrap(); keypairs.truncate(validator_count); + keypairs + } else { + debug!("Generating validator keypairs..."); + generate_deterministic_keypairs(validator_count) + }; - debug!("Loading validators from file..."); - let validators_file = File::open(path.join("validators.yaml")).unwrap(); - let mut validators: Vec = serde_yaml::from_reader(&validators_file).unwrap(); - validators.truncate(validator_count); + // Skipping deposit verification means directly generating `Validator` records, instead + // of generating `Deposit` objects, verifying them and converting them into `Validator` + // records. + // + // It is much faster to skip deposit verification, however it does not test the initial + // validator induction part of beacon chain genesis. + if skip_deposit_verification { + let validators = keypairs + .iter() + .map(|keypair| { + let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( + &keypair.pk, + spec.bls_withdrawal_prefix_byte, + )); + + Validator { + pubkey: keypair.pk.clone(), + withdrawal_credentials, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + initiated_exit: false, + slashed: false, + } + }) + .collect(); let balances = vec![32_000_000_000; validator_count]; @@ -82,15 +113,10 @@ impl BeaconChainHarness { validator_count as u64, &spec, ); - - keypairs } else { - debug!("Generating validator keypairs..."); - let keypairs = generate_deterministic_keypairs(validator_count); debug!("Generating initial validator deposits..."); let deposits = generate_deposits_from_keypairs(&keypairs, genesis_time, &spec); state_builder.process_initial_deposits(&deposits, &spec); - keypairs }; let genesis_state = state_builder.build(&spec).unwrap(); diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index e7b2defe6..7bc7161a8 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -81,7 +81,7 @@ impl TestCase { validator_count ); - let mut harness = BeaconChainHarness::new(spec, validator_count, validators_dir); + let mut harness = BeaconChainHarness::new(spec, validator_count, validators_dir, true); info!("Starting simulation across {} slots...", slots); From ca5d9658ce66ab9e6bd32c4eec8e87eb412cf900 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 10:45:28 +1100 Subject: [PATCH 14/56] Move epoch processing benches into separate file --- eth2/state_processing/benches/benches.rs | 296 +----------------- .../benches/epoch_processing_benches.rs | 294 +++++++++++++++++ 2 files changed, 298 insertions(+), 292 deletions(-) create mode 100644 eth2/state_processing/benches/epoch_processing_benches.rs diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 5c064a08f..e42e91fb4 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,296 +1,8 @@ -use criterion::Criterion; -use criterion::{black_box, criterion_group, criterion_main, Benchmark}; -use state_processing::{ - per_epoch_processing, - per_epoch_processing::{ - calculate_active_validator_indices, calculate_attester_sets, clean_attestations, - process_crosslinks, process_eth1_data, process_justification, - process_rewards_and_penalities, process_validator_registry, update_active_tree_index_roots, - update_latest_slashed_balances, - }, -}; -// use env_logger::{Builder, Env}; -use benching_utils::BeaconStateBencher; -use types::{validator_registry::get_active_validator_indices, *}; +use criterion::{criterion_group, criterion_main}; -fn epoch_processing(c: &mut Criterion) { - // Builder::from_env(Env::default().default_filter_or("debug")).init(); - // - let spec = ChainSpec::foundation(); +mod epoch_processing_benches; - let validator_count = 16_384; +use epoch_processing_benches::epoch_processing_16k_validators; - let mut builder = BeaconStateBencher::new(validator_count, &spec); - builder.teleport_to_end_of_epoch(spec.genesis_epoch + 4, &spec); - builder.insert_attestations(&spec); - let mut state = builder.build(); - - // Build all the caches so the following state does _not_ include the cache-building time. - state - .build_epoch_cache(RelativeEpoch::Previous, &spec) - .unwrap(); - state - .build_epoch_cache(RelativeEpoch::Current, &spec) - .unwrap(); - state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap(); - - // Assert that the state has the maximum possible attestations. - let committees_per_epoch = spec.get_epoch_committee_count(validator_count); - let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; - let previous_epoch_attestations = committees_per_epoch; - let current_epoch_attestations = - committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay); - assert_eq!( - state.latest_attestations.len() as u64, - previous_epoch_attestations + current_epoch_attestations - ); - - // Assert that each attestation in the state has full participation. - let committee_size = validator_count / committees_per_epoch as usize; - for a in &state.latest_attestations { - assert_eq!(a.aggregation_bitfield.num_set_bits(), committee_size); - } - - // Assert that we will run the first arm of process_rewards_and_penalities - let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; - assert!(epochs_since_finality <= 4); - - bench_epoch_processing(c, &state, &spec, "16k_validators"); -} - -fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("full run", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("calculate_active_validator_indices", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(calculate_active_validator_indices(&mut state, &spec_clone)), - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("calculate_current_total_balance", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |state| { - black_box(state.get_total_balance(&active_validator_indices[..], &spec_clone)) - }, - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("calculate_previous_total_balance", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |state| { - black_box(state.get_total_balance( - &get_active_validator_indices( - &state.validator_registry, - state.previous_epoch(&spec_clone), - )[..], - &spec_clone, - )) - }, - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("process_eth1_data", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(process_eth1_data(&mut state, &spec_clone)), - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("calculate_attester_sets", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(calculate_attester_sets(&mut state, &spec_clone).unwrap()), - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let current_total_balance = state.get_total_balance(&active_validator_indices[..], &spec); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("process_justification", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| { - black_box(process_justification( - &mut state, - current_total_balance, - previous_total_balance, - attesters.previous_epoch_boundary.balance, - attesters.current_epoch_boundary.balance, - &spec_clone, - )) - }, - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("process_crosslinks", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()), - ) - }) - .sample_size(10), - ); - - let mut state_clone = state.clone(); - let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); - let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("process_rewards_and_penalties", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| { - black_box( - process_rewards_and_penalities( - &mut state, - &active_validator_indices, - &attesters, - previous_total_balance, - &winning_root_for_shards, - &spec_clone, - ) - .unwrap(), - ) - }, - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("process_ejections", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(state.process_ejections(&spec_clone)), - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("process_validator_registry", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(process_validator_registry(&mut state, &spec_clone)), - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("update_active_tree_index_roots", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| { - black_box(update_active_tree_index_roots(&mut state, &spec_clone).unwrap()) - }, - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("update_latest_slashed_balances", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(update_latest_slashed_balances(&mut state, &spec_clone)), - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("clean_attestations", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(clean_attestations(&mut state, &spec_clone)), - ) - }) - .sample_size(10), - ); -} - -criterion_group!(benches, epoch_processing,); +criterion_group!(benches, epoch_processing_16k_validators); criterion_main!(benches); diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs new file mode 100644 index 000000000..941180455 --- /dev/null +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -0,0 +1,294 @@ +use benching_utils::BeaconStateBencher; +use criterion::Criterion; +use criterion::{black_box, Benchmark}; +use state_processing::{ + per_epoch_processing, + per_epoch_processing::{ + calculate_active_validator_indices, calculate_attester_sets, clean_attestations, + process_crosslinks, process_eth1_data, process_justification, + process_rewards_and_penalities, process_validator_registry, update_active_tree_index_roots, + update_latest_slashed_balances, + }, +}; +use types::{validator_registry::get_active_validator_indices, *}; + +/// Run the benchmarking suite on a foundation spec with 16,384 validators. +pub fn epoch_processing_16k_validators(c: &mut Criterion) { + let spec = ChainSpec::foundation(); + + let validator_count = 16_384; + + let mut builder = BeaconStateBencher::new(validator_count, &spec); + builder.teleport_to_end_of_epoch(spec.genesis_epoch + 4, &spec); + builder.insert_attestations(&spec); + let mut state = builder.build(); + + // Build all the caches so the following state does _not_ include the cache-building time. + state + .build_epoch_cache(RelativeEpoch::Previous, &spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Current, &spec) + .unwrap(); + state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap(); + + // Assert that the state has the maximum possible attestations. + let committees_per_epoch = spec.get_epoch_committee_count(validator_count); + let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; + let previous_epoch_attestations = committees_per_epoch; + let current_epoch_attestations = + committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay); + assert_eq!( + state.latest_attestations.len() as u64, + previous_epoch_attestations + current_epoch_attestations + ); + + // Assert that each attestation in the state has full participation. + let committee_size = validator_count / committees_per_epoch as usize; + for a in &state.latest_attestations { + assert_eq!(a.aggregation_bitfield.num_set_bits(), committee_size); + } + + // Assert that we will run the first arm of process_rewards_and_penalities + let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; + assert!(epochs_since_finality <= 4); + + bench_epoch_processing(c, &state, &spec, "16k_validators"); +} + +/// Run the detailed benchmarking suite on the given `BeaconState`. +/// +/// `desc` will be added to the title of each bench. +fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("full run", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_active_validator_indices", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(calculate_active_validator_indices(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_current_total_balance", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |state| { + black_box(state.get_total_balance(&active_validator_indices[..], &spec_clone)) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_previous_total_balance", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |state| { + black_box(state.get_total_balance( + &get_active_validator_indices( + &state.validator_registry, + state.previous_epoch(&spec_clone), + )[..], + &spec_clone, + )) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_eth1_data", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(process_eth1_data(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("calculate_attester_sets", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(calculate_attester_sets(&mut state, &spec_clone).unwrap()), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + let previous_epoch = state.previous_epoch(&spec); + let attesters = calculate_attester_sets(&state, &spec).unwrap(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let current_total_balance = state.get_total_balance(&active_validator_indices[..], &spec); + let previous_total_balance = state.get_total_balance( + &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], + &spec, + ); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_justification", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| { + black_box(process_justification( + &mut state, + current_total_balance, + previous_total_balance, + attesters.previous_epoch_boundary.balance, + attesters.current_epoch_boundary.balance, + &spec_clone, + )) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_crosslinks", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()), + ) + }) + .sample_size(10), + ); + + let mut state_clone = state.clone(); + let spec_clone = spec.clone(); + let previous_epoch = state.previous_epoch(&spec); + let attesters = calculate_attester_sets(&state, &spec).unwrap(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let previous_total_balance = state.get_total_balance( + &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], + &spec, + ); + let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_rewards_and_penalties", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| { + black_box( + process_rewards_and_penalities( + &mut state, + &active_validator_indices, + &attesters, + previous_total_balance, + &winning_root_for_shards, + &spec_clone, + ) + .unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_ejections", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(state.process_ejections(&spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("process_validator_registry", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(process_validator_registry(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("update_active_tree_index_roots", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| { + black_box(update_active_tree_index_roots(&mut state, &spec_clone).unwrap()) + }, + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("update_latest_slashed_balances", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(update_latest_slashed_balances(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("clean_attestations", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(clean_attestations(&mut state, &spec_clone)), + ) + }) + .sample_size(10), + ); +} From 73ebb4bc2eb4e4326fc9bc8adf8ce3749d094728 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 14:11:49 +1100 Subject: [PATCH 15/56] Add incomplete per-block processing benchmarks Still needs to fill block with operations --- eth2/state_processing/benches/benches.rs | 9 +- .../benches/block_processing_benches.rs | 276 ++++++++++++++++++ .../benches/epoch_processing_benches.rs | 23 +- .../src/beacon_block_bencher.rs | 46 +++ .../src/beacon_state_bencher.rs | 213 ++++++++++++++ .../benching_utils/src/lib.rs | 198 +------------ .../src/per_block_processing.rs | 18 +- .../src/per_epoch_processing/tests.rs | 7 +- eth2/types/src/beacon_block.rs | 16 +- 9 files changed, 583 insertions(+), 223 deletions(-) create mode 100644 eth2/state_processing/benches/block_processing_benches.rs create mode 100644 eth2/state_processing/benching_utils/src/beacon_block_bencher.rs create mode 100644 eth2/state_processing/benching_utils/src/beacon_state_bencher.rs diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index e42e91fb4..52b939a69 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,8 +1,11 @@ use criterion::{criterion_group, criterion_main}; +mod block_processing_benches; mod epoch_processing_benches; -use epoch_processing_benches::epoch_processing_16k_validators; - -criterion_group!(benches, epoch_processing_16k_validators); +criterion_group!( + benches, + // epoch_processing_benches::epoch_processing_16k_validators, + block_processing_benches::block_processing_16k_validators, +); criterion_main!(benches); diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs new file mode 100644 index 000000000..b5fdaa5bd --- /dev/null +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -0,0 +1,276 @@ +use benching_utils::{BeaconBlockBencher, BeaconStateBencher}; +use criterion::Criterion; +use criterion::{black_box, Benchmark}; +use state_processing::{ + per_block_processing, + per_block_processing::{ + process_attestations, process_attester_slashings, process_deposits, process_eth1_data, + process_exits, process_proposer_slashings, process_randao, process_transfers, + verify_block_signature, + }, +}; +use types::*; + +/// Run the benchmarking suite on a foundation spec with 16,384 validators. +pub fn block_processing_16k_validators(c: &mut Criterion) { + let spec = ChainSpec::foundation(); + + let validator_count = 16_384; + + let (state, keypairs) = build_state(validator_count, &spec); + let block = build_block(&state, &keypairs, &spec); + + bench_block_processing( + c, + &block, + &state, + &spec, + &format!("{}_validators", validator_count), + ); +} + +fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec) { + let mut builder = BeaconStateBencher::new(validator_count, &spec); + + // Set the state to be just before an epoch transition. + let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + builder.teleport_to_slot(target_slot, &spec); + + // Builds all caches; benches will not contain shuffling/committee building times. + builder.build_caches(&spec).unwrap(); + + builder.build() +} + +fn build_block(state: &BeaconState, keypairs: &[Keypair], spec: &ChainSpec) -> BeaconBlock { + let mut builder = BeaconBlockBencher::new(spec); + + builder.set_slot(state.slot); + + let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); + let keypair = &keypairs[proposer_index]; + + builder.set_randao_reveal(&keypair.sk, &state.fork, spec); + + builder.build(&keypair.sk, &state.fork, spec) +} + +/// Run the detailed benchmarking suite on the given `BeaconState`. +/// +/// `desc` will be added to the title of each bench. +fn bench_block_processing( + c: &mut Criterion, + initial_block: &BeaconBlock, + initial_state: &BeaconState, + initial_spec: &ChainSpec, + desc: &str, +) { + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("verify_block_signature", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| black_box(verify_block_signature(&mut state, &block, &spec).unwrap()), + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_randao", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| black_box(process_randao(&mut state, &block, &spec).unwrap()), + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_eth1_data", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| black_box(process_eth1_data(&mut state, &block.eth1_data).unwrap()), + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_proposer_slashings", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box( + process_proposer_slashings( + &mut state, + &block.body.proposer_slashings, + &spec, + ) + .unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_attester_slashings", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box( + process_attester_slashings( + &mut state, + &block.body.attester_slashings, + &spec, + ) + .unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_attestations", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box( + process_attestations(&mut state, &block.body.attestations, &spec).unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_deposits", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box(process_deposits(&mut state, &block.body.deposits, &spec).unwrap()) + }, + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_exits", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box( + process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("process_transfers", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box(process_transfers(&mut state, &block.body.transfers, &spec).unwrap()) + }, + ) + }) + .sample_size(10), + ); + + let state = initial_state.clone(); + let block = initial_block.clone(); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("per_block_processing", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| black_box(per_block_processing(&mut state, &block, &spec).unwrap()), + ) + }) + .sample_size(10), + ); + + let mut state = initial_state.clone(); + state.drop_cache(RelativeEpoch::Previous); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("build_previous_state_epoch_cache", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box( + state + .build_epoch_cache(RelativeEpoch::Previous, &spec) + .unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); + + let mut state = initial_state.clone(); + state.drop_cache(RelativeEpoch::Current); + let spec = initial_spec.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("build_current_state_epoch_cache", move |b| { + b.iter_with_setup( + || state.clone(), + |mut state| { + black_box( + state + .build_epoch_cache(RelativeEpoch::Current, &spec) + .unwrap(), + ) + }, + ) + }) + .sample_size(10), + ); +} diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index 941180455..149d8f28e 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -19,18 +19,19 @@ pub fn epoch_processing_16k_validators(c: &mut Criterion) { let validator_count = 16_384; let mut builder = BeaconStateBencher::new(validator_count, &spec); - builder.teleport_to_end_of_epoch(spec.genesis_epoch + 4, &spec); - builder.insert_attestations(&spec); - let mut state = builder.build(); - // Build all the caches so the following state does _not_ include the cache-building time. - state - .build_epoch_cache(RelativeEpoch::Previous, &spec) - .unwrap(); - state - .build_epoch_cache(RelativeEpoch::Current, &spec) - .unwrap(); - state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap(); + // Set the state to be just before an epoch transition. + let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + builder.teleport_to_slot(target_slot, &spec); + + // Builds all caches; benches will not contain shuffling/committee building times. + builder.build_caches(&spec).unwrap(); + + // Inserts one attestation with full participation for each committee able to include an + // attestation in this state. + builder.insert_attestations(&spec); + + let (state, _keypairs) = builder.build(); // Assert that the state has the maximum possible attestations. let committees_per_epoch = spec.get_epoch_committee_count(validator_count); diff --git a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs new file mode 100644 index 000000000..67b7ccc9d --- /dev/null +++ b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs @@ -0,0 +1,46 @@ +use ssz::{SignedRoot, TreeHash}; +use types::*; + +pub struct BeaconBlockBencher { + block: BeaconBlock, +} + +impl BeaconBlockBencher { + pub fn new(spec: &ChainSpec) -> Self { + Self { + block: BeaconBlock::genesis(spec.zero_hash, spec), + } + } + + pub fn set_slot(&mut self, slot: Slot) { + self.block.slot = slot; + } + + /// Signs the block. + pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { + let proposal = self.block.proposal(spec); + let message = proposal.signed_root(); + let epoch = self.block.slot.epoch(spec.slots_per_epoch); + let domain = spec.get_domain(epoch, Domain::Proposal, fork); + self.block.signature = Signature::new(&message, domain, sk); + } + + /// Sets the randao to be a signature across the blocks epoch. + pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { + let epoch = self.block.slot.epoch(spec.slots_per_epoch); + let message = epoch.hash_tree_root(); + let domain = spec.get_domain(epoch, Domain::Randao, fork); + self.block.randao_reveal = Signature::new(&message, domain, sk); + } + + /// Signs and returns the block, consuming the builder. + pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { + self.sign(sk, fork, spec); + self.block + } + + /// Returns the block, consuming the builder. + pub fn build_without_signing(self) -> BeaconBlock { + self.block + } +} diff --git a/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs b/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs new file mode 100644 index 000000000..0ee4a75e9 --- /dev/null +++ b/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs @@ -0,0 +1,213 @@ +use bls::get_withdrawal_credentials; +use int_to_bytes::int_to_bytes48; +use rayon::prelude::*; +use types::beacon_state::BeaconStateBuilder; +use types::*; + +pub struct BeaconStateBencher { + state: BeaconState, + keypairs: Vec, +} + +impl BeaconStateBencher { + pub fn new(validator_count: usize, spec: &ChainSpec) -> Self { + let keypairs: Vec = (0..validator_count) + .collect::>() + .par_iter() + .map(|&i| { + let secret = int_to_bytes48(i as u64 + 1); + let sk = SecretKey::from_bytes(&secret).unwrap(); + let pk = PublicKey::from_secret_key(&sk); + Keypair { sk, pk } + }) + .collect(); + + let validators = keypairs + .iter() + .map(|keypair| { + let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( + &keypair.pk, + spec.bls_withdrawal_prefix_byte, + )); + + Validator { + pubkey: keypair.pk.clone(), + withdrawal_credentials, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + initiated_exit: false, + slashed: false, + } + }) + .collect(); + + let mut state_builder = BeaconStateBuilder::new( + 0, + Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + }, + spec, + ); + + let balances = vec![32_000_000_000; validator_count]; + + state_builder.import_existing_validators( + validators, + balances, + validator_count as u64, + spec, + ); + + Self { + state: state_builder.build(spec).unwrap(), + keypairs, + } + } + + pub fn build(self) -> (BeaconState, Vec) { + (self.state, self.keypairs) + } + + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { + let state = &mut self.state; + + state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; + state.build_epoch_cache(RelativeEpoch::Current, &spec)?; + state.build_epoch_cache(RelativeEpoch::Next, &spec)?; + + Ok(()) + } + + /// Sets the `BeaconState` to be in a slot, calling `teleport_to_epoch` to update the epoch. + pub fn teleport_to_slot(&mut self, slot: Slot, spec: &ChainSpec) { + self.teleport_to_epoch(slot.epoch(spec.slots_per_epoch), spec); + self.state.slot = slot; + } + + /// Sets the `BeaconState` to be in the first slot of the given epoch. + /// + /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., + /// highest justified and finalized slots, full justification bitfield, etc). + fn teleport_to_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { + let state = &mut self.state; + + let slot = epoch.start_slot(spec.slots_per_epoch); + + state.slot = slot; + state.validator_registry_update_epoch = epoch - 1; + + state.previous_shuffling_epoch = epoch - 1; + state.current_shuffling_epoch = epoch; + + state.previous_shuffling_seed = Hash256::from_low_u64_le(0); + state.current_shuffling_seed = Hash256::from_low_u64_le(1); + + state.previous_justified_epoch = epoch - 2; + state.justified_epoch = epoch - 1; + state.justification_bitfield = u64::max_value(); + state.finalized_epoch = epoch - 1; + } + + /// Creates a full set of attestations for the `BeaconState`. Each attestation has full + /// participation from its committee and references the expected beacon_block hashes. + /// + /// These attestations should be fully conducive to justification and finalization. + pub fn insert_attestations(&mut self, spec: &ChainSpec) { + let state = &mut self.state; + + state + .build_epoch_cache(RelativeEpoch::Previous, spec) + .unwrap(); + state + .build_epoch_cache(RelativeEpoch::Current, spec) + .unwrap(); + + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let first_slot = previous_epoch.start_slot(spec.slots_per_epoch).as_u64(); + let last_slot = current_epoch.end_slot(spec.slots_per_epoch).as_u64() + - spec.min_attestation_inclusion_delay; + let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); + + for slot in first_slot..last_slot + 1 { + let slot = Slot::from(slot); + + let committees = state + .get_crosslink_committees_at_slot(slot, spec) + .unwrap() + .clone(); + + for (committee, shard) in committees { + state + .latest_attestations + .push(committee_to_pending_attestation( + state, &committee, shard, slot, spec, + )) + } + } + } +} + +fn committee_to_pending_attestation( + state: &BeaconState, + committee: &[usize], + shard: u64, + slot: Slot, + spec: &ChainSpec, +) -> PendingAttestation { + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let mut aggregation_bitfield = Bitfield::new(); + let mut custody_bitfield = Bitfield::new(); + + for (i, _) in committee.iter().enumerate() { + aggregation_bitfield.set(i, true); + custody_bitfield.set(i, true); + } + + let is_previous_epoch = + state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); + + let justified_epoch = if is_previous_epoch { + state.previous_justified_epoch + } else { + state.justified_epoch + }; + + let epoch_boundary_root = if is_previous_epoch { + *state + .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + } else { + *state + .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + }; + + let justified_block_root = *state + .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap(); + + PendingAttestation { + aggregation_bitfield, + data: AttestationData { + slot, + shard, + beacon_block_root: *state.get_block_root(slot, spec).unwrap(), + epoch_boundary_root, + crosslink_data_root: Hash256::zero(), + latest_crosslink: Crosslink { + epoch: slot.epoch(spec.slots_per_epoch), + crosslink_data_root: Hash256::zero(), + }, + justified_epoch, + justified_block_root, + }, + custody_bitfield, + inclusion_slot: slot + spec.min_attestation_inclusion_delay, + } +} diff --git a/eth2/state_processing/benching_utils/src/lib.rs b/eth2/state_processing/benching_utils/src/lib.rs index c70b7828a..ba9548814 100644 --- a/eth2/state_processing/benching_utils/src/lib.rs +++ b/eth2/state_processing/benching_utils/src/lib.rs @@ -1,195 +1,5 @@ -use bls::get_withdrawal_credentials; -use int_to_bytes::int_to_bytes48; -use rayon::prelude::*; -use types::beacon_state::BeaconStateBuilder; -use types::*; +mod beacon_block_bencher; +mod beacon_state_bencher; -pub struct BeaconStateBencher { - state: BeaconState, -} - -impl BeaconStateBencher { - pub fn new(validator_count: usize, spec: &ChainSpec) -> Self { - let keypairs: Vec = (0..validator_count) - .collect::>() - .par_iter() - .map(|&i| { - let secret = int_to_bytes48(i as u64 + 1); - let sk = SecretKey::from_bytes(&secret).unwrap(); - let pk = PublicKey::from_secret_key(&sk); - Keypair { sk, pk } - }) - .collect(); - - let validators = keypairs - .iter() - .map(|keypair| { - let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( - &keypair.pk, - spec.bls_withdrawal_prefix_byte, - )); - - Validator { - pubkey: keypair.pk.clone(), - withdrawal_credentials, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - initiated_exit: false, - slashed: false, - } - }) - .collect(); - - let mut state_builder = BeaconStateBuilder::new( - 0, - Eth1Data { - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }, - spec, - ); - - let balances = vec![32_000_000_000; validator_count]; - - state_builder.import_existing_validators( - validators, - balances, - validator_count as u64, - spec, - ); - - Self { - state: state_builder.build(spec).unwrap(), - } - } - - pub fn build(self) -> BeaconState { - self.state - } - - /// Sets the `BeaconState` to be in the last slot of the given epoch. - /// - /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., - /// highest justified and finalized slots, full justification bitfield, etc). - pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { - let state = &mut self.state; - - let slot = epoch.end_slot(spec.slots_per_epoch); - - state.slot = slot; - state.validator_registry_update_epoch = epoch - 1; - - state.previous_shuffling_epoch = epoch - 1; - state.current_shuffling_epoch = epoch; - - state.previous_shuffling_seed = Hash256::from_low_u64_le(0); - state.current_shuffling_seed = Hash256::from_low_u64_le(1); - - state.previous_justified_epoch = epoch - 2; - state.justified_epoch = epoch - 1; - state.justification_bitfield = u64::max_value(); - state.finalized_epoch = epoch - 1; - } - - /// Creates a full set of attestations for the `BeaconState`. Each attestation has full - /// participation from its committee and references the expected beacon_block hashes. - /// - /// These attestations should be fully conducive to justification and finalization. - pub fn insert_attestations(&mut self, spec: &ChainSpec) { - let state = &mut self.state; - - state - .build_epoch_cache(RelativeEpoch::Previous, spec) - .unwrap(); - state - .build_epoch_cache(RelativeEpoch::Current, spec) - .unwrap(); - - let current_epoch = state.current_epoch(spec); - let previous_epoch = state.previous_epoch(spec); - - let first_slot = previous_epoch.start_slot(spec.slots_per_epoch).as_u64(); - let last_slot = current_epoch.end_slot(spec.slots_per_epoch).as_u64() - - spec.min_attestation_inclusion_delay; - let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); - - for slot in first_slot..last_slot + 1 { - let slot = Slot::from(slot); - - let committees = state - .get_crosslink_committees_at_slot(slot, spec) - .unwrap() - .clone(); - - for (committee, shard) in committees { - state - .latest_attestations - .push(committee_to_pending_attestation( - state, &committee, shard, slot, spec, - )) - } - } - } -} - -fn committee_to_pending_attestation( - state: &BeaconState, - committee: &[usize], - shard: u64, - slot: Slot, - spec: &ChainSpec, -) -> PendingAttestation { - let current_epoch = state.current_epoch(spec); - let previous_epoch = state.previous_epoch(spec); - - let mut aggregation_bitfield = Bitfield::new(); - let mut custody_bitfield = Bitfield::new(); - - for (i, _) in committee.iter().enumerate() { - aggregation_bitfield.set(i, true); - custody_bitfield.set(i, true); - } - - let is_previous_epoch = - state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); - - let justified_epoch = if is_previous_epoch { - state.previous_justified_epoch - } else { - state.justified_epoch - }; - - let epoch_boundary_root = if is_previous_epoch { - *state - .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - } else { - *state - .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - }; - - let justified_block_root = *state - .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap(); - - PendingAttestation { - aggregation_bitfield, - data: AttestationData { - slot, - shard, - beacon_block_root: *state.get_block_root(slot, spec).unwrap(), - epoch_boundary_root, - crosslink_data_root: Hash256::zero(), - latest_crosslink: Crosslink { - epoch: slot.epoch(spec.slots_per_epoch), - crosslink_data_root: Hash256::zero(), - }, - justified_epoch, - justified_block_root, - }, - custody_bitfield, - inclusion_slot: slot + spec.min_attestation_inclusion_delay, - } -} +pub use beacon_block_bencher::BeaconBlockBencher; +pub use beacon_state_bencher::BeaconStateBencher; diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 1ab1eed71..149e0bf79 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,7 +1,6 @@ use self::verify_proposer_slashing::verify_proposer_slashing; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use hashing::hash; -use log::debug; use ssz::{ssz_encode, SignedRoot, TreeHash}; use types::*; @@ -70,22 +69,21 @@ fn per_block_processing_signature_optional( // Verify that `block.slot == state.slot`. verify!(block.slot == state.slot, Invalid::StateSlotMismatch); - // Ensure the current epoch cache is built. + // Ensure the current and previous epoch cache is built. state.build_epoch_cache(RelativeEpoch::Current, spec)?; + state.build_epoch_cache(RelativeEpoch::Previous, spec)?; if should_verify_block_signature { verify_block_signature(&state, &block, &spec)?; } process_randao(&mut state, &block, &spec)?; process_eth1_data(&mut state, &block.eth1_data)?; - process_proposer_slashings(&mut state, &block.body.proposer_slashings[..], spec)?; - process_attester_slashings(&mut state, &block.body.attester_slashings[..], spec)?; - process_attestations(&mut state, &block.body.attestations[..], spec)?; - process_deposits(&mut state, &block.body.deposits[..], spec)?; - process_exits(&mut state, &block.body.voluntary_exits[..], spec)?; - process_transfers(&mut state, &block.body.transfers[..], spec)?; - - debug!("per_block_processing complete."); + process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?; + process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?; + process_attestations(&mut state, &block.body.attestations, spec)?; + process_deposits(&mut state, &block.body.deposits, spec)?; + process_exits(&mut state, &block.body.voluntary_exits, spec)?; + process_transfers(&mut state, &block.body.transfers, spec)?; Ok(()) } diff --git a/eth2/state_processing/src/per_epoch_processing/tests.rs b/eth2/state_processing/src/per_epoch_processing/tests.rs index 8ff687904..f3c68a173 100644 --- a/eth2/state_processing/src/per_epoch_processing/tests.rs +++ b/eth2/state_processing/src/per_epoch_processing/tests.rs @@ -11,8 +11,11 @@ fn runs_without_error() { let spec = ChainSpec::few_validators(); let mut builder = BeaconStateBencher::new(8, &spec); - builder.teleport_to_end_of_epoch(spec.genesis_epoch + 4, &spec); - let mut state = builder.build(); + + let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); + builder.teleport_to_slot(target_slot, &spec); + + let (mut state, _keypairs) = builder.build(); per_epoch_processing(&mut state, &spec).unwrap(); } diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 2e1e24ef7..9e1b3f7ae 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -1,9 +1,9 @@ use crate::test_utils::TestRandom; -use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; +use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Proposal, Slot}; use bls::Signature; use rand::RngCore; use serde_derive::Serialize; -use ssz::TreeHash; +use ssz::{SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; @@ -33,7 +33,6 @@ impl BeaconBlock { deposit_root: spec.zero_hash, block_hash: spec.zero_hash, }, - signature: spec.empty_signature.clone(), body: BeaconBlockBody { proposer_slashings: vec![], attester_slashings: vec![], @@ -42,6 +41,7 @@ impl BeaconBlock { voluntary_exits: vec![], transfers: vec![], }, + signature: spec.empty_signature.clone(), } } @@ -49,6 +49,16 @@ impl BeaconBlock { pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.hash_tree_root()[..]) } + + /// Returns an unsigned proposal for block. + pub fn proposal(&self, spec: &ChainSpec) -> Proposal { + Proposal { + slot: self.slot, + shard: spec.beacon_chain_shard_number, + block_root: Hash256::from_slice(&self.signed_root()), + signature: spec.empty_signature.clone(), + } + } } #[cfg(test)] From f8ec1e0cfa381610c8f19adda6a0a47bca3da427 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 18:56:01 +1100 Subject: [PATCH 16/56] Add slashings and attestations to per block benching --- .../benches/block_processing_benches.rs | 84 +++++- .../src/beacon_block_bencher.rs | 241 +++++++++++++++++- eth2/types/src/attester_slashing/builder.rs | 1 + 3 files changed, 322 insertions(+), 4 deletions(-) diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index b5fdaa5bd..75943b1ad 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -20,6 +20,39 @@ pub fn block_processing_16k_validators(c: &mut Criterion) { let (state, keypairs) = build_state(validator_count, &spec); let block = build_block(&state, &keypairs, &spec); + assert_eq!( + block.body.proposer_slashings.len(), + spec.max_proposer_slashings as usize, + "The block should have the maximum possible proposer slashings" + ); + + assert_eq!( + block.body.attester_slashings.len(), + spec.max_attester_slashings as usize, + "The block should have the maximum possible attester slashings" + ); + + for attester_slashing in &block.body.attester_slashings { + let len_1 = attester_slashing + .slashable_attestation_1 + .validator_indices + .len(); + let len_2 = attester_slashing + .slashable_attestation_1 + .validator_indices + .len(); + assert!( + (len_1 == len_2) && (len_2 == spec.max_indices_per_slashable_vote as usize), + "Each attester slashing should have the maximum possible validator indices" + ); + } + + assert_eq!( + block.body.attestations.len(), + spec.max_attestations as usize, + "The block should have the maximum possible attestations." + ); + bench_block_processing( c, &block, @@ -52,6 +85,45 @@ fn build_block(state: &BeaconState, keypairs: &[Keypair], spec: &ChainSpec) -> B builder.set_randao_reveal(&keypair.sk, &state.fork, spec); + // Insert the maximum possible number of `ProposerSlashing` objects. + for validator_index in 0..spec.max_proposer_slashings { + builder.insert_proposer_slashing( + validator_index, + &keypairs[validator_index as usize].sk, + &state.fork, + spec, + ); + } + + // Insert the maximum possible number of `AttesterSlashing` objects + let number_of_slashable_attesters = + spec.max_indices_per_slashable_vote * spec.max_attester_slashings; + let all_attester_slashing_indices: Vec = (spec.max_proposer_slashings + ..(spec.max_proposer_slashings + number_of_slashable_attesters)) + .collect(); + let attester_slashing_groups: Vec<&[u64]> = all_attester_slashing_indices + .chunks(spec.max_indices_per_slashable_vote as usize) + .collect(); + for attester_slashing_group in attester_slashing_groups { + let attester_slashing_keypairs: Vec<&SecretKey> = attester_slashing_group + .iter() + .map(|&validator_index| &keypairs[validator_index as usize].sk) + .collect(); + + builder.insert_attester_slashing( + &attester_slashing_group, + &attester_slashing_keypairs, + &state.fork, + spec, + ); + } + + // Insert the maximum possible number of `Attestation` objects. + let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); + builder + .fill_with_attestations(state, &all_secret_keys, spec) + .unwrap(); + builder.build(&keypair.sk, &state.fork, spec) } @@ -153,7 +225,10 @@ fn bench_block_processing( ); let state = initial_state.clone(); - let block = initial_block.clone(); + let mut block = initial_block.clone(); + // Slashings will invalidate the attestations. + block.body.proposer_slashings = vec![]; + block.body.attester_slashings = vec![]; let spec = initial_spec.clone(); c.bench( &format!("block_processing_{}", desc), @@ -221,11 +296,14 @@ fn bench_block_processing( ); let state = initial_state.clone(); - let block = initial_block.clone(); + let mut block = initial_block.clone(); + // Slashings will invalidate the attestations. + block.body.proposer_slashings = vec![]; + block.body.attester_slashings = vec![]; let spec = initial_spec.clone(); c.bench( &format!("block_processing_{}", desc), - Benchmark::new("per_block_processing", move |b| { + Benchmark::new("per_block_processing_no_slashings", move |b| { b.iter_with_setup( || state.clone(), |mut state| black_box(per_block_processing(&mut state, &block, &spec).unwrap()), diff --git a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs index 67b7ccc9d..989dbd929 100644 --- a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs +++ b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs @@ -1,5 +1,8 @@ +use rayon::prelude::*; use ssz::{SignedRoot, TreeHash}; -use types::*; +use types::{ + attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, *, +}; pub struct BeaconBlockBencher { block: BeaconBlock, @@ -33,6 +36,114 @@ impl BeaconBlockBencher { self.block.randao_reveal = Signature::new(&message, domain, sk); } + /// Inserts a signed, valid `ProposerSlashing` for the validator. + pub fn insert_proposer_slashing( + &mut self, + validator_index: u64, + secret_key: &SecretKey, + fork: &Fork, + spec: &ChainSpec, + ) { + let proposer_slashing = build_proposer_slashing(validator_index, secret_key, fork, spec); + self.block.body.proposer_slashings.push(proposer_slashing); + } + + /// Inserts a signed, valid `AttesterSlashing` for each validator index in `validator_indices`. + pub fn insert_attester_slashing( + &mut self, + validator_indices: &[u64], + secret_keys: &[&SecretKey], + fork: &Fork, + spec: &ChainSpec, + ) { + let attester_slashing = + build_double_vote_attester_slashing(validator_indices, secret_keys, fork, spec); + self.block.body.attester_slashings.push(attester_slashing); + } + + /// Fills the block with as many attestations as possible. + /// + /// Note: this will not perform well when `jepoch_committees_count % slots_per_epoch != 0` + pub fn fill_with_attestations( + &mut self, + state: &BeaconState, + secret_keys: &[&SecretKey], + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { + let mut slot = self.block.slot - spec.min_attestation_inclusion_delay; + let mut attestations_added = 0; + + // Stores the following (in order): + // + // - The slot of the committee. + // - A list of all validators in the committee. + // - A list of all validators in the committee that should sign the attestation. + // - The shard of the committee. + let mut committees: Vec<(Slot, Vec, Vec, u64)> = vec![]; + + // Loop backwards through slots gathering each committee, until: + // + // - The slot is too old to be included in a block at this slot. + // - The `MAX_ATTESTATIONS`. + loop { + if attestations_added == spec.max_attestations { + break; + } + if state.slot >= slot + spec.slots_per_epoch { + break; + } + + for (committee, shard) in state.get_crosslink_committees_at_slot(slot, spec)? { + committees.push((slot, committee.clone(), committee.clone(), *shard)) + } + + attestations_added += 1; + slot -= 1; + } + + // Loop through all the committees, splitting each one in half until we have + // `MAX_ATTESTATIONS` committees. + loop { + if committees.len() >= spec.max_attestations as usize { + break; + } + + for index in 0..committees.len() { + if committees.len() >= spec.max_attestations as usize { + break; + } + + let (slot, committee, mut signing_validators, shard) = committees[index].clone(); + + let new_signing_validators = + signing_validators.split_off(signing_validators.len() / 2); + + committees[index] = (slot, committee.clone(), signing_validators, shard); + committees.push((slot, committee, new_signing_validators, shard)); + } + } + + let mut attestations: Vec = committees + .par_iter() + .map(|(slot, committee, signing_validators, shard)| { + committee_to_attestation( + state, + &committee, + signing_validators, + secret_keys, + *shard, + *slot, + &state.fork, + spec, + ) + }) + .collect(); + + self.block.body.attestations.append(&mut attestations); + + Ok(()) + } + /// Signs and returns the block, consuming the builder. pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { self.sign(sk, fork, spec); @@ -44,3 +155,131 @@ impl BeaconBlockBencher { self.block } } + +/// Builds an `ProposerSlashing` for some `validator_index`. +/// +/// Signs the message using a `BeaconChainHarness`. +fn build_proposer_slashing( + validator_index: u64, + secret_key: &SecretKey, + fork: &Fork, + spec: &ChainSpec, +) -> ProposerSlashing { + let signer = |_validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| { + let domain = spec.get_domain(epoch, domain, fork); + Signature::new(message, domain, secret_key) + }; + + ProposerSlashingBuilder::double_vote(validator_index, signer, spec) +} + +/// Builds an `AttesterSlashing` for some `validator_indices`. +/// +/// Signs the message using a `BeaconChainHarness`. +fn build_double_vote_attester_slashing( + validator_indices: &[u64], + secret_keys: &[&SecretKey], + fork: &Fork, + spec: &ChainSpec, +) -> AttesterSlashing { + let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| { + let key_index = validator_indices + .iter() + .position(|&i| i == validator_index) + .expect("Unable to find attester slashing key"); + let domain = spec.get_domain(epoch, domain, fork); + Signature::new(message, domain, secret_keys[key_index]) + }; + + AttesterSlashingBuilder::double_vote(validator_indices, signer) +} + +/// Convert some committee into a valid `Attestation`. +/// +/// Note: `committee` must be the full committee for the attestation. `signing_validators` is a +/// list of validator indices that should sign the attestation. +fn committee_to_attestation( + state: &BeaconState, + committee: &[usize], + signing_validators: &[usize], + secret_keys: &[&SecretKey], + shard: u64, + slot: Slot, + fork: &Fork, + spec: &ChainSpec, +) -> Attestation { + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let is_previous_epoch = + state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); + + let justified_epoch = if is_previous_epoch { + state.previous_justified_epoch + } else { + state.justified_epoch + }; + + let epoch_boundary_root = if is_previous_epoch { + *state + .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + } else { + *state + .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + }; + + let justified_block_root = *state + .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap(); + + let data = AttestationData { + slot, + shard, + beacon_block_root: *state.get_block_root(slot, spec).unwrap(), + epoch_boundary_root, + crosslink_data_root: Hash256::zero(), + latest_crosslink: state.latest_crosslinks[shard as usize].clone(), + justified_epoch, + justified_block_root, + }; + + let mut aggregate_signature = AggregateSignature::new(); + let mut aggregation_bitfield = Bitfield::new(); + let mut custody_bitfield = Bitfield::new(); + + let message = AttestationDataAndCustodyBit { + data: data.clone(), + custody_bit: false, + } + .hash_tree_root(); + + let domain = spec.get_domain( + data.slot.epoch(spec.slots_per_epoch), + Domain::Attestation, + fork, + ); + + for (i, validator_index) in committee.iter().enumerate() { + custody_bitfield.set(i, false); + + if signing_validators + .iter() + .any(|&signer| *validator_index == signer) + { + aggregation_bitfield.set(i, true); + let signature = Signature::new(&message, domain, secret_keys[*validator_index]); + aggregate_signature.add(&signature); + } else { + aggregation_bitfield.set(i, false); + } + } + + Attestation { + aggregation_bitfield, + data, + custody_bitfield, + aggregate_signature, + } +} diff --git a/eth2/types/src/attester_slashing/builder.rs b/eth2/types/src/attester_slashing/builder.rs index 05301f30b..8edf4ed65 100644 --- a/eth2/types/src/attester_slashing/builder.rs +++ b/eth2/types/src/attester_slashing/builder.rs @@ -66,6 +66,7 @@ impl AttesterSlashingBuilder { let add_signatures = |attestation: &mut SlashableAttestation| { for (i, validator_index) in validator_indices.iter().enumerate() { + attestation.custody_bitfield.set(i, false); let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { data: attestation.data.clone(), custody_bit: attestation.custody_bitfield.get(i).unwrap(), From 4bf2490163a224fb0e1dcdedc50af14557f77b3d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 18:59:32 +1100 Subject: [PATCH 17/56] Re-organise types::test_utils dir --- eth2/types/src/test_utils/mod.rs | 56 +------------------ eth2/types/src/test_utils/test_random.rs | 55 ++++++++++++++++++ .../test_utils/{ => test_random}/address.rs | 0 .../{ => test_random}/aggregate_signature.rs | 0 .../test_utils/{ => test_random}/bitfield.rs | 2 +- .../test_utils/{ => test_random}/hash256.rs | 0 .../{ => test_random}/public_key.rs | 0 .../{ => test_random}/secret_key.rs | 0 .../test_utils/{ => test_random}/signature.rs | 0 9 files changed, 58 insertions(+), 55 deletions(-) create mode 100644 eth2/types/src/test_utils/test_random.rs rename eth2/types/src/test_utils/{ => test_random}/address.rs (100%) rename eth2/types/src/test_utils/{ => test_random}/aggregate_signature.rs (100%) rename eth2/types/src/test_utils/{ => test_random}/bitfield.rs (90%) rename eth2/types/src/test_utils/{ => test_random}/hash256.rs (100%) rename eth2/types/src/test_utils/{ => test_random}/public_key.rs (100%) rename eth2/types/src/test_utils/{ => test_random}/secret_key.rs (100%) rename eth2/types/src/test_utils/{ => test_random}/signature.rs (100%) diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 82e060fca..1e88ab34f 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,55 +1,3 @@ -use rand::RngCore; +mod test_random; -pub use rand::{prng::XorShiftRng, SeedableRng}; - -pub mod address; -pub mod aggregate_signature; -pub mod bitfield; -pub mod hash256; -pub mod public_key; -pub mod secret_key; -pub mod signature; - -pub trait TestRandom -where - T: RngCore, -{ - fn random_for_test(rng: &mut T) -> Self; -} - -impl TestRandom for bool { - fn random_for_test(rng: &mut T) -> Self { - (rng.next_u32() % 2) == 1 - } -} - -impl TestRandom for u64 { - fn random_for_test(rng: &mut T) -> Self { - rng.next_u64() - } -} - -impl TestRandom for u32 { - fn random_for_test(rng: &mut T) -> Self { - rng.next_u32() - } -} - -impl TestRandom for usize { - fn random_for_test(rng: &mut T) -> Self { - rng.next_u32() as usize - } -} - -impl TestRandom for Vec -where - U: TestRandom, -{ - fn random_for_test(rng: &mut T) -> Self { - vec![ - ::random_for_test(rng), - ::random_for_test(rng), - ::random_for_test(rng), - ] - } -} +pub use test_random::TestRandom; diff --git a/eth2/types/src/test_utils/test_random.rs b/eth2/types/src/test_utils/test_random.rs new file mode 100644 index 000000000..841d129a0 --- /dev/null +++ b/eth2/types/src/test_utils/test_random.rs @@ -0,0 +1,55 @@ +use rand::RngCore; + +pub use rand::{prng::XorShiftRng, SeedableRng}; + +mod address; +mod aggregate_signature; +mod bitfield; +mod hash256; +mod public_key; +mod secret_key; +mod signature; + +pub trait TestRandom +where + T: RngCore, +{ + fn random_for_test(rng: &mut T) -> Self; +} + +impl TestRandom for bool { + fn random_for_test(rng: &mut T) -> Self { + (rng.next_u32() % 2) == 1 + } +} + +impl TestRandom for u64 { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u64() + } +} + +impl TestRandom for u32 { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u32() + } +} + +impl TestRandom for usize { + fn random_for_test(rng: &mut T) -> Self { + rng.next_u32() as usize + } +} + +impl TestRandom for Vec +where + U: TestRandom, +{ + fn random_for_test(rng: &mut T) -> Self { + vec![ + ::random_for_test(rng), + ::random_for_test(rng), + ::random_for_test(rng), + ] + } +} diff --git a/eth2/types/src/test_utils/address.rs b/eth2/types/src/test_utils/test_random/address.rs similarity index 100% rename from eth2/types/src/test_utils/address.rs rename to eth2/types/src/test_utils/test_random/address.rs diff --git a/eth2/types/src/test_utils/aggregate_signature.rs b/eth2/types/src/test_utils/test_random/aggregate_signature.rs similarity index 100% rename from eth2/types/src/test_utils/aggregate_signature.rs rename to eth2/types/src/test_utils/test_random/aggregate_signature.rs diff --git a/eth2/types/src/test_utils/bitfield.rs b/eth2/types/src/test_utils/test_random/bitfield.rs similarity index 90% rename from eth2/types/src/test_utils/bitfield.rs rename to eth2/types/src/test_utils/test_random/bitfield.rs index 15011edd9..9748458f1 100644 --- a/eth2/types/src/test_utils/bitfield.rs +++ b/eth2/types/src/test_utils/test_random/bitfield.rs @@ -1,5 +1,5 @@ -use super::super::Bitfield; use super::TestRandom; +use crate::Bitfield; use rand::RngCore; impl TestRandom for Bitfield { diff --git a/eth2/types/src/test_utils/hash256.rs b/eth2/types/src/test_utils/test_random/hash256.rs similarity index 100% rename from eth2/types/src/test_utils/hash256.rs rename to eth2/types/src/test_utils/test_random/hash256.rs diff --git a/eth2/types/src/test_utils/public_key.rs b/eth2/types/src/test_utils/test_random/public_key.rs similarity index 100% rename from eth2/types/src/test_utils/public_key.rs rename to eth2/types/src/test_utils/test_random/public_key.rs diff --git a/eth2/types/src/test_utils/secret_key.rs b/eth2/types/src/test_utils/test_random/secret_key.rs similarity index 100% rename from eth2/types/src/test_utils/secret_key.rs rename to eth2/types/src/test_utils/test_random/secret_key.rs diff --git a/eth2/types/src/test_utils/signature.rs b/eth2/types/src/test_utils/test_random/signature.rs similarity index 100% rename from eth2/types/src/test_utils/signature.rs rename to eth2/types/src/test_utils/test_random/signature.rs From 62ab782ee2da6d9189edbf406936364e55eb458e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 20:09:02 +1100 Subject: [PATCH 18/56] Add TestingAttestationBuilder --- .../src/beacon_block_bencher.rs | 113 ++--------------- eth2/types/src/test_utils/mod.rs | 3 + eth2/types/src/test_utils/test_random.rs | 2 - .../test_utils/testing_attestation_builder.rs | 117 ++++++++++++++++++ 4 files changed, 132 insertions(+), 103 deletions(-) create mode 100644 eth2/types/src/test_utils/testing_attestation_builder.rs diff --git a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs index 989dbd929..5e7fddb55 100644 --- a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs +++ b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs @@ -1,7 +1,8 @@ use rayon::prelude::*; use ssz::{SignedRoot, TreeHash}; use types::{ - attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, *, + attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, + test_utils::TestingAttestationBuilder, *, }; pub struct BeaconBlockBencher { @@ -126,16 +127,16 @@ impl BeaconBlockBencher { let mut attestations: Vec = committees .par_iter() .map(|(slot, committee, signing_validators, shard)| { - committee_to_attestation( - state, - &committee, - signing_validators, - secret_keys, - *shard, - *slot, - &state.fork, - spec, - ) + let mut builder = + TestingAttestationBuilder::new(state, committee, *slot, *shard, spec); + + let signing_secret_keys: Vec<&SecretKey> = signing_validators + .iter() + .map(|validator_index| secret_keys[*validator_index]) + .collect(); + builder.sign(signing_validators, &signing_secret_keys, &state.fork, spec); + + builder.build() }) .collect(); @@ -193,93 +194,3 @@ fn build_double_vote_attester_slashing( AttesterSlashingBuilder::double_vote(validator_indices, signer) } - -/// Convert some committee into a valid `Attestation`. -/// -/// Note: `committee` must be the full committee for the attestation. `signing_validators` is a -/// list of validator indices that should sign the attestation. -fn committee_to_attestation( - state: &BeaconState, - committee: &[usize], - signing_validators: &[usize], - secret_keys: &[&SecretKey], - shard: u64, - slot: Slot, - fork: &Fork, - spec: &ChainSpec, -) -> Attestation { - let current_epoch = state.current_epoch(spec); - let previous_epoch = state.previous_epoch(spec); - - let is_previous_epoch = - state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); - - let justified_epoch = if is_previous_epoch { - state.previous_justified_epoch - } else { - state.justified_epoch - }; - - let epoch_boundary_root = if is_previous_epoch { - *state - .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - } else { - *state - .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - }; - - let justified_block_root = *state - .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap(); - - let data = AttestationData { - slot, - shard, - beacon_block_root: *state.get_block_root(slot, spec).unwrap(), - epoch_boundary_root, - crosslink_data_root: Hash256::zero(), - latest_crosslink: state.latest_crosslinks[shard as usize].clone(), - justified_epoch, - justified_block_root, - }; - - let mut aggregate_signature = AggregateSignature::new(); - let mut aggregation_bitfield = Bitfield::new(); - let mut custody_bitfield = Bitfield::new(); - - let message = AttestationDataAndCustodyBit { - data: data.clone(), - custody_bit: false, - } - .hash_tree_root(); - - let domain = spec.get_domain( - data.slot.epoch(spec.slots_per_epoch), - Domain::Attestation, - fork, - ); - - for (i, validator_index) in committee.iter().enumerate() { - custody_bitfield.set(i, false); - - if signing_validators - .iter() - .any(|&signer| *validator_index == signer) - { - aggregation_bitfield.set(i, true); - let signature = Signature::new(&message, domain, secret_keys[*validator_index]); - aggregate_signature.add(&signature); - } else { - aggregation_bitfield.set(i, false); - } - } - - Attestation { - aggregation_bitfield, - data, - custody_bitfield, - aggregate_signature, - } -} diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 1e88ab34f..6138940a2 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,3 +1,6 @@ mod test_random; +mod testing_attestation_builder; +pub use rand::{prng::XorShiftRng, SeedableRng}; pub use test_random::TestRandom; +pub use testing_attestation_builder::TestingAttestationBuilder; diff --git a/eth2/types/src/test_utils/test_random.rs b/eth2/types/src/test_utils/test_random.rs index 841d129a0..3b172463e 100644 --- a/eth2/types/src/test_utils/test_random.rs +++ b/eth2/types/src/test_utils/test_random.rs @@ -1,7 +1,5 @@ use rand::RngCore; -pub use rand::{prng::XorShiftRng, SeedableRng}; - mod address; mod aggregate_signature; mod bitfield; diff --git a/eth2/types/src/test_utils/testing_attestation_builder.rs b/eth2/types/src/test_utils/testing_attestation_builder.rs new file mode 100644 index 000000000..f52edadfe --- /dev/null +++ b/eth2/types/src/test_utils/testing_attestation_builder.rs @@ -0,0 +1,117 @@ +use crate::*; +use ssz::TreeHash; + +pub struct TestingAttestationBuilder { + committee: Vec, + attestation: Attestation, +} + +impl TestingAttestationBuilder { + pub fn new( + state: &BeaconState, + committee: &[usize], + slot: Slot, + shard: u64, + spec: &ChainSpec, + ) -> Self { + let current_epoch = state.current_epoch(spec); + let previous_epoch = state.previous_epoch(spec); + + let is_previous_epoch = + state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); + + let justified_epoch = if is_previous_epoch { + state.previous_justified_epoch + } else { + state.justified_epoch + }; + + let epoch_boundary_root = if is_previous_epoch { + *state + .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + } else { + *state + .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap() + }; + + let justified_block_root = *state + .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) + .unwrap(); + + let mut aggregation_bitfield = Bitfield::new(); + let mut custody_bitfield = Bitfield::new(); + + for (i, _) in committee.iter().enumerate() { + custody_bitfield.set(i, false); + aggregation_bitfield.set(i, false); + } + + let attestation = Attestation { + aggregation_bitfield, + data: AttestationData { + slot, + shard, + beacon_block_root: *state.get_block_root(slot, spec).unwrap(), + epoch_boundary_root, + crosslink_data_root: Hash256::zero(), + latest_crosslink: state.latest_crosslinks[shard as usize].clone(), + justified_epoch, + justified_block_root, + }, + custody_bitfield, + aggregate_signature: AggregateSignature::new(), + }; + + Self { + attestation, + committee: committee.to_vec(), + } + } + + pub fn sign( + &mut self, + signing_validators: &[usize], + secret_keys: &[&SecretKey], + fork: &Fork, + spec: &ChainSpec, + ) { + assert_eq!( + signing_validators.len(), + secret_keys.len(), + "Must be a key for each validator" + ); + + for (key_index, validator_index) in signing_validators.iter().enumerate() { + let committee_index = self + .committee + .iter() + .position(|v| *v == *validator_index) + .expect("Signing validator not in attestation committee"); + + self.attestation + .aggregation_bitfield + .set(committee_index, true); + + let message = AttestationDataAndCustodyBit { + data: self.attestation.data.clone(), + custody_bit: false, + } + .hash_tree_root(); + + let domain = spec.get_domain( + self.attestation.data.slot.epoch(spec.slots_per_epoch), + Domain::Attestation, + fork, + ); + + let signature = Signature::new(&message, domain, secret_keys[key_index]); + self.attestation.aggregate_signature.add(&signature) + } + } + + pub fn build(self) -> Attestation { + self.attestation + } +} From 6250c81bb9d2699fa17e1f111365ad731dcd1ed5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 20:09:17 +1100 Subject: [PATCH 19/56] Fix bug in attestation verification We were ensuring that a validator was present on the aggregation bitfield before adding their signature to the agg pub --- .../src/per_block_processing/errors.rs | 2 + .../validate_attestation.rs | 72 ++++++++++++------- 2 files changed, 47 insertions(+), 27 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index b97d8bacc..7e71a9b75 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -147,6 +147,8 @@ pub enum AttestationInvalid { /// /// (attestation_data_shard, attestation_data_slot) NoCommitteeForShard(u64, Slot), + /// The validator index was unknown. + UnknownValidator(u64), /// The attestation signature verification failed. BadSignature, /// The shard block root was not set to zero. This is a phase 0 requirement. diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index 54bd2d332..b15360850 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -159,18 +159,16 @@ fn validate_attestation_signature_optional( if verify_signature { let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); - verify!( - verify_attestation_signature( - state, - committee, - attestation_epoch, - &attestation.custody_bitfield, - &attestation.data, - &attestation.aggregate_signature, - spec - ), - Invalid::BadSignature - ); + verify_attestation_signature( + state, + committee, + attestation_epoch, + &attestation.aggregation_bitfield, + &attestation.custody_bitfield, + &attestation.data, + &attestation.aggregate_signature, + spec, + )?; } // [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`. @@ -195,30 +193,45 @@ fn verify_attestation_signature( state: &BeaconState, committee: &[usize], attestation_epoch: Epoch, + aggregation_bitfield: &Bitfield, custody_bitfield: &Bitfield, attestation_data: &AttestationData, aggregate_signature: &AggregateSignature, spec: &ChainSpec, -) -> bool { +) -> Result<(), Error> { let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2]; let mut message_exists = vec![false; 2]; for (i, v) in committee.iter().enumerate() { - let custody_bit = match custody_bitfield.get(i) { - Ok(bit) => bit, - // Invalidate signature if custody_bitfield.len() < committee - Err(_) => return false, - }; + let validator_signed = aggregation_bitfield.get(i).map_err(|_| { + Error::Invalid(Invalid::BadAggregationBitfieldLength( + committee.len(), + aggregation_bitfield.len(), + )) + })?; - message_exists[custody_bit as usize] = true; + if validator_signed { + let custody_bit: bool = match custody_bitfield.get(i) { + Ok(bit) => bit, + // Invalidate signature if custody_bitfield.len() < committee + Err(_) => { + return Err(Error::Invalid(Invalid::BadCustodyBitfieldLength( + committee.len(), + custody_bitfield.len(), + ))); + } + }; - match state.validator_registry.get(*v as usize) { - Some(validator) => { - aggregate_pubs[custody_bit as usize].add(&validator.pubkey); - } - // Invalidate signature if validator index is unknown. - None => return false, - }; + message_exists[custody_bit as usize] = true; + + match state.validator_registry.get(*v as usize) { + Some(validator) => { + aggregate_pubs[custody_bit as usize].add(&validator.pubkey); + } + // Return error if validator index is unknown. + None => return Err(Error::BeaconStateError(BeaconStateError::UnknownValidator)), + }; + } } // Message when custody bitfield is `false` @@ -251,5 +264,10 @@ fn verify_attestation_signature( let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork); - aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]) + verify!( + aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]), + Invalid::BadSignature + ); + + Ok(()) } From c1e386a0b16f9fd2c921524cec0f6362acd8ac75 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 9 Mar 2019 22:10:47 +1100 Subject: [PATCH 20/56] Add deposits, transfers and exits to benches --- .../benches/block_processing_benches.rs | 96 ++++++++++++++----- .../src/beacon_block_bencher.rs | 57 ++++++++++- eth2/types/src/test_utils/mod.rs | 6 ++ .../src/test_utils/testing_deposit_builder.rs | 48 ++++++++++ .../test_utils/testing_transfer_builder.rs | 37 +++++++ .../testing_voluntary_exit_builder.rs | 29 ++++++ 6 files changed, 248 insertions(+), 25 deletions(-) create mode 100644 eth2/types/src/test_utils/testing_deposit_builder.rs create mode 100644 eth2/types/src/test_utils/testing_transfer_builder.rs create mode 100644 eth2/types/src/test_utils/testing_voluntary_exit_builder.rs diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 75943b1ad..2ff2e7413 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -17,8 +17,8 @@ pub fn block_processing_16k_validators(c: &mut Criterion) { let validator_count = 16_384; - let (state, keypairs) = build_state(validator_count, &spec); - let block = build_block(&state, &keypairs, &spec); + let (mut state, keypairs) = build_state(validator_count, &spec); + let block = build_block(&mut state, &keypairs, &spec); assert_eq!( block.body.proposer_slashings.len(), @@ -53,6 +53,24 @@ pub fn block_processing_16k_validators(c: &mut Criterion) { "The block should have the maximum possible attestations." ); + assert_eq!( + block.body.deposits.len(), + spec.max_deposits as usize, + "The block should have the maximum possible deposits." + ); + + assert_eq!( + block.body.voluntary_exits.len(), + spec.max_voluntary_exits as usize, + "The block should have the maximum possible voluntary exits." + ); + + assert_eq!( + block.body.transfers.len(), + spec.max_transfers as usize, + "The block should have the maximum possible transfers." + ); + bench_block_processing( c, &block, @@ -75,7 +93,7 @@ fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec BeaconBlock { +fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) -> BeaconBlock { let mut builder = BeaconBlockBencher::new(spec); builder.set_slot(state.slot); @@ -85,8 +103,13 @@ fn build_block(state: &BeaconState, keypairs: &[Keypair], spec: &ChainSpec) -> B builder.set_randao_reveal(&keypair.sk, &state.fork, spec); + // Used as a stream of validator indices for use in slashings, exits, etc. + let mut validators_iter = (0..keypairs.len() as u64).into_iter(); + // Insert the maximum possible number of `ProposerSlashing` objects. - for validator_index in 0..spec.max_proposer_slashings { + for _ in 0..spec.max_proposer_slashings { + let validator_index = validators_iter.next().expect("Insufficient validators."); + builder.insert_proposer_slashing( validator_index, &keypairs[validator_index as usize].sk, @@ -96,26 +119,18 @@ fn build_block(state: &BeaconState, keypairs: &[Keypair], spec: &ChainSpec) -> B } // Insert the maximum possible number of `AttesterSlashing` objects - let number_of_slashable_attesters = - spec.max_indices_per_slashable_vote * spec.max_attester_slashings; - let all_attester_slashing_indices: Vec = (spec.max_proposer_slashings - ..(spec.max_proposer_slashings + number_of_slashable_attesters)) - .collect(); - let attester_slashing_groups: Vec<&[u64]> = all_attester_slashing_indices - .chunks(spec.max_indices_per_slashable_vote as usize) - .collect(); - for attester_slashing_group in attester_slashing_groups { - let attester_slashing_keypairs: Vec<&SecretKey> = attester_slashing_group - .iter() - .map(|&validator_index| &keypairs[validator_index as usize].sk) - .collect(); + for _ in 0..spec.max_attester_slashings { + let mut attesters: Vec = vec![]; + let mut secret_keys: Vec<&SecretKey> = vec![]; - builder.insert_attester_slashing( - &attester_slashing_group, - &attester_slashing_keypairs, - &state.fork, - spec, - ); + for _ in 0..spec.max_indices_per_slashable_vote { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + attesters.push(validator_index); + secret_keys.push(&keypairs[validator_index as usize].sk); + } + + builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); } // Insert the maximum possible number of `Attestation` objects. @@ -124,6 +139,41 @@ fn build_block(state: &BeaconState, keypairs: &[Keypair], spec: &ChainSpec) -> B .fill_with_attestations(state, &all_secret_keys, spec) .unwrap(); + // Insert the maximum possible number of `Deposit` objects. + for i in 0..spec.max_deposits { + builder.insert_deposit(32_000_000_000, state.deposit_index + i, spec); + } + + // Insert the maximum possible number of `Exit` objects. + for _ in 0..spec.max_voluntary_exits { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + builder.insert_exit( + state, + validator_index, + &keypairs[validator_index as usize].sk, + spec, + ); + } + + // Insert the maximum possible number of `Transfer` objects. + for _ in 0..spec.max_transfers { + let validator_index = validators_iter.next().expect("Insufficient validators."); + + // Manually set the validator to be withdrawn. + state.validator_registry[validator_index as usize].withdrawable_epoch = + state.previous_epoch(spec); + + builder.insert_transfer( + state, + validator_index, + validator_index, + 1, + keypairs[validator_index as usize].clone(), + spec, + ); + } + builder.build(&keypair.sk, &state.fork, spec) } diff --git a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs index 5e7fddb55..3eafdc0c9 100644 --- a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs +++ b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs @@ -1,8 +1,13 @@ use rayon::prelude::*; use ssz::{SignedRoot, TreeHash}; use types::{ - attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, - test_utils::TestingAttestationBuilder, *, + attester_slashing::AttesterSlashingBuilder, + proposer_slashing::ProposerSlashingBuilder, + test_utils::{ + TestingAttestationBuilder, TestingDepositBuilder, TestingTransferBuilder, + TestingVoluntaryExitBuilder, + }, + *, }; pub struct BeaconBlockBencher { @@ -145,6 +150,54 @@ impl BeaconBlockBencher { Ok(()) } + /// Insert a `Valid` deposit into the state. + pub fn insert_deposit(&mut self, amount: u64, index: u64, spec: &ChainSpec) { + let keypair = Keypair::random(); + + let mut builder = TestingDepositBuilder::new(amount); + builder.set_index(index); + builder.sign(&keypair, spec); + + self.block.body.deposits.push(builder.build()) + } + + /// Insert a `Valid` exit into the state. + pub fn insert_exit( + &mut self, + state: &BeaconState, + validator_index: u64, + secret_key: &SecretKey, + spec: &ChainSpec, + ) { + let mut builder = TestingVoluntaryExitBuilder::new( + state.slot.epoch(spec.slots_per_epoch), + validator_index, + ); + + builder.sign(secret_key, &state.fork, spec); + + self.block.body.voluntary_exits.push(builder.build()) + } + + /// Insert a `Valid` transfer into the state. + /// + /// Note: this will set the validator to be withdrawable by directly modifying the state + /// validator registry. This _may_ cause problems historic hashes, etc. + pub fn insert_transfer( + &mut self, + state: &BeaconState, + from: u64, + to: u64, + amount: u64, + keypair: Keypair, + spec: &ChainSpec, + ) { + let mut builder = TestingTransferBuilder::new(from, to, amount, state.slot); + builder.sign(keypair, &state.fork, spec); + + self.block.body.transfers.push(builder.build()) + } + /// Signs and returns the block, consuming the builder. pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { self.sign(sk, fork, spec); diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 6138940a2..2145f684a 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,6 +1,12 @@ mod test_random; mod testing_attestation_builder; +mod testing_deposit_builder; +mod testing_transfer_builder; +mod testing_voluntary_exit_builder; pub use rand::{prng::XorShiftRng, SeedableRng}; pub use test_random::TestRandom; pub use testing_attestation_builder::TestingAttestationBuilder; +pub use testing_deposit_builder::TestingDepositBuilder; +pub use testing_transfer_builder::TestingTransferBuilder; +pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs new file mode 100644 index 000000000..c7eadcfd1 --- /dev/null +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -0,0 +1,48 @@ +use crate::*; +use bls::{create_proof_of_possession, get_withdrawal_credentials}; + +pub struct TestingDepositBuilder { + deposit: Deposit, +} + +impl TestingDepositBuilder { + pub fn new(amount: u64) -> Self { + let keypair = Keypair::random(); + + let deposit = Deposit { + branch: vec![], + index: 0, + deposit_data: DepositData { + amount, + timestamp: 1, + deposit_input: DepositInput { + pubkey: keypair.pk, + withdrawal_credentials: Hash256::zero(), + proof_of_possession: Signature::empty_signature(), + }, + }, + }; + + Self { deposit } + } + + pub fn set_index(&mut self, index: u64) { + self.deposit.index = index; + } + + pub fn sign(&mut self, keypair: &Keypair, spec: &ChainSpec) { + self.deposit.deposit_data.deposit_input.pubkey = keypair.pk.clone(); + self.deposit.deposit_data.deposit_input.proof_of_possession = + create_proof_of_possession(&keypair); + self.deposit + .deposit_data + .deposit_input + .withdrawal_credentials = Hash256::from_slice( + &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], + ); + } + + pub fn build(self) -> Deposit { + self.deposit + } +} diff --git a/eth2/types/src/test_utils/testing_transfer_builder.rs b/eth2/types/src/test_utils/testing_transfer_builder.rs new file mode 100644 index 000000000..a8a6d7a17 --- /dev/null +++ b/eth2/types/src/test_utils/testing_transfer_builder.rs @@ -0,0 +1,37 @@ +use crate::*; +use ssz::SignedRoot; + +pub struct TestingTransferBuilder { + transfer: Transfer, +} + +impl TestingTransferBuilder { + pub fn new(from: u64, to: u64, amount: u64, slot: Slot) -> Self { + let keypair = Keypair::random(); + + let mut transfer = Transfer { + from, + to, + amount, + fee: 0, + slot, + pubkey: keypair.pk, + signature: Signature::empty_signature(), + }; + + Self { transfer } + } + + pub fn sign(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) { + self.transfer.pubkey = keypair.pk; + let message = self.transfer.signed_root(); + let epoch = self.transfer.slot.epoch(spec.slots_per_epoch); + let domain = spec.get_domain(epoch, Domain::Transfer, fork); + + self.transfer.signature = Signature::new(&message, domain, &keypair.sk); + } + + pub fn build(self) -> Transfer { + self.transfer + } +} diff --git a/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs b/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs new file mode 100644 index 000000000..92ef4484e --- /dev/null +++ b/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs @@ -0,0 +1,29 @@ +use crate::*; +use ssz::SignedRoot; + +pub struct TestingVoluntaryExitBuilder { + exit: VoluntaryExit, +} + +impl TestingVoluntaryExitBuilder { + pub fn new(epoch: Epoch, validator_index: u64) -> Self { + let exit = VoluntaryExit { + epoch, + validator_index, + signature: Signature::empty_signature(), + }; + + Self { exit } + } + + pub fn sign(&mut self, secret_key: &SecretKey, fork: &Fork, spec: &ChainSpec) { + let message = self.exit.signed_root(); + let domain = spec.get_domain(self.exit.epoch, Domain::Exit, fork); + + self.exit.signature = Signature::new(&message, domain, secret_key); + } + + pub fn build(self) -> VoluntaryExit { + self.exit + } +} From 90d00773cb0af9bf6cfd6656a317df17d8f6165e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 08:30:36 +1100 Subject: [PATCH 21/56] Add slashings back into per-block processing. I thought they would invalidate the attestations but I was wrong. --- .../benches/block_processing_benches.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 2ff2e7413..26c62d0d3 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -275,10 +275,7 @@ fn bench_block_processing( ); let state = initial_state.clone(); - let mut block = initial_block.clone(); - // Slashings will invalidate the attestations. - block.body.proposer_slashings = vec![]; - block.body.attester_slashings = vec![]; + let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( &format!("block_processing_{}", desc), @@ -346,14 +343,11 @@ fn bench_block_processing( ); let state = initial_state.clone(); - let mut block = initial_block.clone(); - // Slashings will invalidate the attestations. - block.body.proposer_slashings = vec![]; - block.body.attester_slashings = vec![]; + let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( &format!("block_processing_{}", desc), - Benchmark::new("per_block_processing_no_slashings", move |b| { + Benchmark::new("per_block_processing", move |b| { b.iter_with_setup( || state.clone(), |mut state| black_box(per_block_processing(&mut state, &block, &spec).unwrap()), From 5f3da0732f076d62278f88f15b468c5012a21836 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 08:31:40 +1100 Subject: [PATCH 22/56] Fix attestations bug in block builder. It was previously producing too many attestations in some scenarios. --- .../benching_utils/src/beacon_block_bencher.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs index 3eafdc0c9..46e822baa 100644 --- a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs +++ b/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs @@ -92,18 +92,20 @@ impl BeaconBlockBencher { // - The slot is too old to be included in a block at this slot. // - The `MAX_ATTESTATIONS`. loop { - if attestations_added == spec.max_attestations { - break; - } if state.slot >= slot + spec.slots_per_epoch { break; } for (committee, shard) in state.get_crosslink_committees_at_slot(slot, spec)? { - committees.push((slot, committee.clone(), committee.clone(), *shard)) + if attestations_added >= spec.max_attestations { + break; + } + + committees.push((slot, committee.clone(), committee.clone(), *shard)); + + attestations_added += 1; } - attestations_added += 1; slot -= 1; } From 1ca99b8c4c7d9afc5602e32e9c689583bdeb3b56 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 08:33:17 +1100 Subject: [PATCH 23/56] Optimise deposits processing. --- .../src/per_block_processing.rs | 129 +++++++++++++----- .../src/per_block_processing/errors.rs | 5 + .../verify_attester_slashing.rs | 19 ++- .../per_block_processing/verify_deposit.rs | 61 ++++++++- eth2/types/src/beacon_state.rs | 6 +- 5 files changed, 182 insertions(+), 38 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 149e0bf79..c446dcd85 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,12 +1,17 @@ use self::verify_proposer_slashing::verify_proposer_slashing; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use hashing::hash; +use rayon::prelude::*; use ssz::{ssz_encode, SignedRoot, TreeHash}; use types::*; -pub use self::verify_attester_slashing::verify_attester_slashing; +pub use self::verify_attester_slashing::{ + gather_attester_slashing_indices, verify_attester_slashing, +}; pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; -pub use verify_deposit::verify_deposit; +pub use verify_deposit::{ + build_public_key_hashmap, get_existing_validator_index, verify_deposit, verify_deposit_index, +}; pub use verify_exit::verify_exit; pub use verify_transfer::{execute_transfer, verify_transfer}; @@ -226,9 +231,17 @@ pub fn process_proposer_slashings( proposer_slashings.len() as u64 <= spec.max_proposer_slashings, Invalid::MaxProposerSlashingsExceeded ); - for (i, proposer_slashing) in proposer_slashings.iter().enumerate() { - verify_proposer_slashing(proposer_slashing, &state, spec) - .map_err(|e| e.into_with_index(i))?; + + // Verify proposer slashings in parallel. + proposer_slashings + .par_iter() + .enumerate() + .try_for_each(|(i, proposer_slashing)| { + verify_proposer_slashing(proposer_slashing, &state, spec) + .map_err(|e| e.into_with_index(i)) + })?; + + for proposer_slashing in proposer_slashings { state.slash_validator(proposer_slashing.proposer_index as usize, spec)?; } @@ -250,8 +263,19 @@ pub fn process_attester_slashings( attester_slashings.len() as u64 <= spec.max_attester_slashings, Invalid::MaxAttesterSlashingsExceed ); + + // Verify attester slashings in parallel. + attester_slashings + .par_iter() + .enumerate() + .try_for_each(|(i, attester_slashing)| { + verify_attester_slashing(&state, &attester_slashing, spec) + .map_err(|e| e.into_with_index(i)) + })?; + + // Gather the slashable indices and update the state in series. for (i, attester_slashing) in attester_slashings.iter().enumerate() { - let slashable_indices = verify_attester_slashing(&state, &attester_slashing, spec) + let slashable_indices = gather_attester_slashing_indices(&state, &attester_slashing) .map_err(|e| e.into_with_index(i))?; for i in slashable_indices { state.slash_validator(i as usize, spec)?; @@ -276,14 +300,20 @@ pub fn process_attestations( attestations.len() as u64 <= spec.max_attestations, Invalid::MaxAttestationsExceeded ); - for (i, attestation) in attestations.iter().enumerate() { - // Build the previous epoch cache only if required by an attestation. - if attestation.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec) { - state.build_epoch_cache(RelativeEpoch::Previous, spec)?; - } - validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))?; + // Ensure the previous epoch cache exists. + state.build_epoch_cache(RelativeEpoch::Previous, spec)?; + // Verify attestations in parallel. + attestations + .par_iter() + .enumerate() + .try_for_each(|(i, attestation)| { + validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i)) + })?; + + // Update the state in series. + for attestation in attestations { let pending_attestation = PendingAttestation { data: attestation.data.clone(), aggregation_bitfield: attestation.aggregation_bitfield.clone(), @@ -311,24 +341,53 @@ pub fn process_deposits( deposits.len() as u64 <= spec.max_deposits, Invalid::MaxDepositsExceeded ); - for (i, deposit) in deposits.iter().enumerate() { - verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec) - .map_err(|e| e.into_with_index(i))?; - state - .process_deposit( - deposit.deposit_data.deposit_input.pubkey.clone(), - deposit.deposit_data.amount, - deposit - .deposit_data - .deposit_input - .proof_of_possession - .clone(), - deposit.deposit_data.deposit_input.withdrawal_credentials, - None, - spec, - ) - .map_err(|_| Error::Invalid(Invalid::DepositProcessingFailed(i)))?; + // Verify deposits in parallel. + deposits + .par_iter() + .enumerate() + .try_for_each(|(i, deposit)| { + verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec) + .map_err(|e| e.into_with_index(i)) + })?; + + let public_key_to_index_hashmap = build_public_key_hashmap(&state); + + // Check `state.deposit_index` and update the state in series. + for (i, deposit) in deposits.iter().enumerate() { + verify_deposit_index(state, deposit).map_err(|e| e.into_with_index(i))?; + + // Get an `Option` where `u64` is the validator index if this deposit public key + // already exists in the beacon_state. + // + // This function also verifies the withdrawal credentials. + let validator_index = + get_existing_validator_index(state, deposit, &public_key_to_index_hashmap) + .map_err(|e| e.into_with_index(i))?; + + let deposit_data = &deposit.deposit_data; + let deposit_input = &deposit.deposit_data.deposit_input; + + if let Some(index) = validator_index { + // Update the existing validator balance. + safe_add_assign!( + state.validator_balances[index as usize], + deposit_data.amount + ); + } else { + // Create a new validator. + let validator = Validator { + pubkey: deposit_input.pubkey.clone(), + withdrawal_credentials: deposit_input.withdrawal_credentials.clone(), + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + initiated_exit: false, + slashed: false, + }; + state.validator_registry.push(validator); + state.validator_balances.push(deposit_data.amount); + } state.deposit_index += 1; } @@ -351,9 +410,17 @@ pub fn process_exits( voluntary_exits.len() as u64 <= spec.max_voluntary_exits, Invalid::MaxExitsExceeded ); - for (i, exit) in voluntary_exits.iter().enumerate() { - verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))?; + // Verify exits in parallel. + voluntary_exits + .par_iter() + .enumerate() + .try_for_each(|(i, exit)| { + verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i)) + })?; + + // Update the state in series. + for exit in voluntary_exits { state.initiate_validator_exit(exit.validator_index as usize); } diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 7e71a9b75..f64a3f8aa 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -294,6 +294,11 @@ pub enum DepositInvalid { /// /// (state_index, deposit_index) BadIndex(u64, u64), + /// The proof-of-possession does not match the given pubkey. + BadProofOfPossession, + /// The withdrawal credentials for the depositing validator did not match the withdrawal + /// credentials of an existing validator with the same public key. + BadWithdrawalCredentials, /// The specified `branch` and `index` did not form a valid proof that the deposit is included /// in the eth1 deposit root. BadMerkleProof, diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs index 71ac97469..2970712c5 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -12,7 +12,7 @@ pub fn verify_attester_slashing( state: &BeaconState, attester_slashing: &AttesterSlashing, spec: &ChainSpec, -) -> Result, Error> { +) -> Result<(), Error> { let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; @@ -31,6 +31,21 @@ pub fn verify_attester_slashing( verify_slashable_attestation(state, &slashable_attestation_2, spec) .map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?; + Ok(()) +} + +/// For a given attester slashing, return the indices able to be slashed. +/// +/// Returns Ok(indices) if `indices.len() > 0`. +/// +/// Spec v0.4.0 +pub fn gather_attester_slashing_indices( + state: &BeaconState, + attester_slashing: &AttesterSlashing, +) -> Result, Error> { + let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; + let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; + let mut slashable_indices = vec![]; for i in &slashable_attestation_1.validator_indices { let validator = state @@ -38,7 +53,7 @@ pub fn verify_attester_slashing( .get(*i as usize) .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; - if slashable_attestation_1.validator_indices.contains(&i) & !validator.slashed { + if slashable_attestation_2.validator_indices.contains(&i) & !validator.slashed { slashable_indices.push(*i); } } diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 69dae1533..0cf2a078f 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -1,15 +1,22 @@ use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error}; +use bls::verify_proof_of_possession; use hashing::hash; use merkle_proof::verify_merkle_proof; use ssz::ssz_encode; use ssz_derive::Encode; +use std::collections::HashMap; use types::*; +pub type PublicKeyValidatorIndexHashmap = HashMap; + /// Indicates if a `Deposit` is valid to be included in a block in the current epoch of the given /// state. /// /// Returns `Ok(())` if the `Deposit` is valid, otherwise indicates the reason for invalidity. /// +/// This function _does not_ check `state.deposit_index` so this function may be run in parallel. +/// See the `verify_deposit_index` function for this. +/// /// Note: this function is incomplete. /// /// Spec v0.4.0 @@ -20,8 +27,14 @@ pub fn verify_deposit( spec: &ChainSpec, ) -> Result<(), Error> { verify!( - deposit.index == state.deposit_index, - Invalid::BadIndex(state.deposit_index, deposit.index) + // TODO: update proof of possession. + // + // https://github.com/sigp/lighthouse/issues/239 + verify_proof_of_possession( + &deposit.deposit_data.deposit_input.proof_of_possession, + &deposit.deposit_data.deposit_input.pubkey + ), + Invalid::BadProofOfPossession ); if verify_merkle_branch { @@ -34,6 +47,50 @@ pub fn verify_deposit( Ok(()) } +/// Verify that the `Deposit` index is correct. +/// +/// Spec v0.4.0 +pub fn verify_deposit_index(state: &BeaconState, deposit: &Deposit) -> Result<(), Error> { + verify!( + deposit.index == state.deposit_index, + Invalid::BadIndex(state.deposit_index, deposit.index) + ); + + Ok(()) +} + +pub fn build_public_key_hashmap(state: &BeaconState) -> PublicKeyValidatorIndexHashmap { + let mut hashmap = HashMap::with_capacity(state.validator_registry.len()); + + for (i, validator) in state.validator_registry.iter().enumerate() { + hashmap.insert(validator.pubkey.clone(), i as u64); + } + + hashmap +} + +pub fn get_existing_validator_index( + state: &BeaconState, + deposit: &Deposit, + pubkey_map: &HashMap, +) -> Result, Error> { + let deposit_input = &deposit.deposit_data.deposit_input; + + let validator_index = pubkey_map.get(&deposit_input.pubkey).and_then(|i| Some(*i)); + + match validator_index { + None => Ok(None), + Some(index) => { + verify!( + deposit_input.withdrawal_credentials + == state.validator_registry[index as usize].withdrawal_credentials, + Invalid::BadWithdrawalCredentials + ); + Ok(Some(index)) + } + } +} + /// Verify that a deposit is included in the state's eth1 deposit root. /// /// Spec v0.4.0 diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index f3d533527..39970b9a7 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -608,6 +608,9 @@ impl BeaconState { /// this hashmap, each call to `process_deposits` requires an iteration though /// `self.validator_registry`. This becomes highly inefficient at scale. /// + /// TODO: this function also exists in a more optimal form in the `state_processing` crate as + /// `process_deposits`; unify these two functions. + /// /// Spec v0.4.0 pub fn process_deposit( &mut self, @@ -618,10 +621,7 @@ impl BeaconState { pubkey_map: Option<&HashMap>, spec: &ChainSpec, ) -> Result { - // TODO: update proof of possession to function written above ( - // requires bls::create_proof_of_possession to be updated // - // https://github.com/sigp/lighthouse/issues/239 if !verify_proof_of_possession(&proof_of_possession, &pubkey) { return Err(()); } From e7fba3a4735842b73e4c8a54e8f0119f5127ef3e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 08:36:49 +1100 Subject: [PATCH 24/56] Process transfers in parallel. --- eth2/state_processing/src/per_block_processing.rs | 9 ++++++++- eth2/types/src/test_utils/testing_transfer_builder.rs | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index c446dcd85..d871914d9 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -442,8 +442,15 @@ pub fn process_transfers( transfers.len() as u64 <= spec.max_transfers, Invalid::MaxTransfersExceed ); + + transfers + .par_iter() + .enumerate() + .try_for_each(|(i, transfer)| { + verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i)) + })?; + for (i, transfer) in transfers.iter().enumerate() { - verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))?; execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?; } diff --git a/eth2/types/src/test_utils/testing_transfer_builder.rs b/eth2/types/src/test_utils/testing_transfer_builder.rs index a8a6d7a17..c343e8fd2 100644 --- a/eth2/types/src/test_utils/testing_transfer_builder.rs +++ b/eth2/types/src/test_utils/testing_transfer_builder.rs @@ -9,7 +9,7 @@ impl TestingTransferBuilder { pub fn new(from: u64, to: u64, amount: u64, slot: Slot) -> Self { let keypair = Keypair::random(); - let mut transfer = Transfer { + let transfer = Transfer { from, to, amount, From 3f988493622ab0221649b91682e4ad8296f86542 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 08:55:45 +1100 Subject: [PATCH 25/56] Optimise attester slashing processing. --- .../src/per_block_processing.rs | 32 ++++++++++++++++--- .../src/per_block_processing/errors.rs | 9 ++++++ .../verify_attester_slashing.rs | 11 ++++--- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index d871914d9..7b5aafa7f 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -13,6 +13,7 @@ pub use verify_deposit::{ build_public_key_hashmap, get_existing_validator_index, verify_deposit, verify_deposit_index, }; pub use verify_exit::verify_exit; +pub use verify_slashable_attestation::verify_slashable_attestation; pub use verify_transfer::{execute_transfer, verify_transfer}; pub mod errors; @@ -264,19 +265,40 @@ pub fn process_attester_slashings( Invalid::MaxAttesterSlashingsExceed ); - // Verify attester slashings in parallel. - attester_slashings + // Verify the `SlashableAttestation`s in parallel (these are the resource-consuming objects, not + // the `AttesterSlashing`s themselves). + let mut slashable_attestations: Vec<&SlashableAttestation> = + Vec::with_capacity(attester_slashings.len() * 2); + for attester_slashing in attester_slashings { + slashable_attestations.push(&attester_slashing.slashable_attestation_1); + slashable_attestations.push(&attester_slashing.slashable_attestation_2); + } + + // Verify slashable attestations in parallel. + slashable_attestations .par_iter() .enumerate() - .try_for_each(|(i, attester_slashing)| { - verify_attester_slashing(&state, &attester_slashing, spec) + .try_for_each(|(i, slashable_attestation)| { + verify_slashable_attestation(&state, slashable_attestation, spec) .map_err(|e| e.into_with_index(i)) })?; + let all_slashable_attestations_have_been_checked = true; - // Gather the slashable indices and update the state in series. + // Gather the slashable indices and preform the final verification and update the state in series. for (i, attester_slashing) in attester_slashings.iter().enumerate() { + let should_verify_slashable_attestations = !all_slashable_attestations_have_been_checked; + + verify_attester_slashing( + &state, + &attester_slashing, + should_verify_slashable_attestations, + spec, + ) + .map_err(|e| e.into_with_index(i))?; + let slashable_indices = gather_attester_slashing_indices(&state, &attester_slashing) .map_err(|e| e.into_with_index(i))?; + for i in slashable_indices { state.slash_validator(i as usize, spec)?; } diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index f64a3f8aa..a3e3ebad1 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -76,6 +76,10 @@ pub enum BlockInvalid { MaxExitsExceeded, MaxTransfersExceed, AttestationInvalid(usize, AttestationInvalid), + /// A `SlashableAttestation` inside an `AttesterSlashing` was invalid. + /// + /// To determine the offending `AttesterSlashing` index, divide the error message `usize` by two. + SlashableAttestationInvalid(usize, SlashableAttestationInvalid), AttesterSlashingInvalid(usize, AttesterSlashingInvalid), ProposerSlashingInvalid(usize, ProposerSlashingInvalid), DepositInvalid(usize, DepositInvalid), @@ -235,6 +239,11 @@ impl Into for SlashableAttestationValidationError { } } +impl_into_with_index_without_beacon_error!( + SlashableAttestationValidationError, + SlashableAttestationInvalid +); + /* * `ProposerSlashing` Validation */ diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs index 2970712c5..d126849b6 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -11,6 +11,7 @@ use types::*; pub fn verify_attester_slashing( state: &BeaconState, attester_slashing: &AttesterSlashing, + should_verify_slashable_attestations: bool, spec: &ChainSpec, ) -> Result<(), Error> { let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; @@ -26,10 +27,12 @@ pub fn verify_attester_slashing( Invalid::NotSlashable ); - verify_slashable_attestation(state, &slashable_attestation_1, spec) - .map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?; - verify_slashable_attestation(state, &slashable_attestation_2, spec) - .map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?; + if should_verify_slashable_attestations { + verify_slashable_attestation(state, &slashable_attestation_1, spec) + .map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?; + verify_slashable_attestation(state, &slashable_attestation_2, spec) + .map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?; + } Ok(()) } From c33e2991760af6aedf58770140d72e31e5c8e010 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 12:56:37 +1100 Subject: [PATCH 26/56] Ensure epoch processing benches get new eth1 data --- eth2/state_processing/benches/block_processing_benches.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 26c62d0d3..755207f96 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -174,7 +174,12 @@ fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) ); } - builder.build(&keypair.sk, &state.fork, spec) + let mut block = builder.build(&keypair.sk, &state.fork, spec); + + // Set the eth1 data to be different from the state. + block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]); + + block } /// Run the detailed benchmarking suite on the given `BeaconState`. From e99da31da8be15b4e8a33d76a3334dcb9ca7b3fc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 12:56:57 +1100 Subject: [PATCH 27/56] Tidy BeaconStateBuilder struct --- eth2/types/src/beacon_state/builder.rs | 153 +------------------------ 1 file changed, 4 insertions(+), 149 deletions(-) diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs index f6d7b3900..372f0d43d 100644 --- a/eth2/types/src/beacon_state/builder.rs +++ b/eth2/types/src/beacon_state/builder.rs @@ -1,24 +1,14 @@ use super::BeaconStateError; +use crate::validator_registry::get_active_validator_indices; use crate::*; -use crate::{validator_registry::get_active_validator_indices, *}; -use bls::create_proof_of_possession; use rayon::prelude::*; use ssz::TreeHash; -/// Builds a `BeaconState` for use in testing or benchmarking. +/// Builds a `BeaconState` for use in production. /// -/// Building the `BeaconState` is a three step processes: +/// This struct should not be modified for use in testing scenarios. Use `TestingBeaconStateBuilder` for that purpose. /// -/// 1. Create a new `BeaconStateBuilder`. -/// 2. Call `Self::build()` or `Self::build_fast()` generate a `BeaconState`. -/// 3. (Optional) Use builder functions to modify the `BeaconState`. -/// 4. Call `Self::cloned_state()` to obtain a `BeaconState` cloned from this struct. -/// -/// Step (2) happens prior to step (3) because some functionality requires an existing -/// `BeaconState`. -/// -/// Step (4) produces a clone of the BeaconState and doesn't consume the `BeaconStateBuilder` to -/// allow access to `self.keypairs` and `self.spec`. +/// This struct should remain safe and sensible for production usage. pub struct BeaconStateBuilder { pub state: BeaconState, } @@ -105,139 +95,4 @@ impl BeaconStateBuilder { Ok(self.state) } - - /* - /// Sets the `BeaconState` to be in the last slot of the given epoch. - /// - /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e., - /// highest justified and finalized slots, full justification bitfield, etc). - pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { - let state = &mut self.state; - - let slot = epoch.end_slot(spec.slots_per_epoch); - - state.slot = slot; - state.validator_registry_update_epoch = epoch - 1; - - state.previous_shuffling_epoch = epoch - 1; - state.current_shuffling_epoch = epoch; - - state.previous_shuffling_seed = Hash256::from_low_u64_le(0); - state.current_shuffling_seed = Hash256::from_low_u64_le(1); - - state.previous_justified_epoch = epoch - 2; - state.justified_epoch = epoch - 1; - state.justification_bitfield = u64::max_value(); - state.finalized_epoch = epoch - 1; - } - - /// Creates a full set of attestations for the `BeaconState`. Each attestation has full - /// participation from its committee and references the expected beacon_block hashes. - /// - /// These attestations should be fully conducive to justification and finalization. - pub fn insert_attestations(&mut self) { - let state = &mut self.state; - - state - .build_epoch_cache(RelativeEpoch::Previous, &self.spec) - .unwrap(); - state - .build_epoch_cache(RelativeEpoch::Current, &self.spec) - .unwrap(); - - let current_epoch = state.current_epoch(&self.spec); - let previous_epoch = state.previous_epoch(&self.spec); - let current_epoch_depth = - (state.slot - current_epoch.end_slot(self.spec.slots_per_epoch)).as_usize(); - - let previous_epoch_slots = previous_epoch.slot_iter(self.spec.slots_per_epoch); - let current_epoch_slots = current_epoch - .slot_iter(self.spec.slots_per_epoch) - .take(current_epoch_depth); - - for slot in previous_epoch_slots.chain(current_epoch_slots) { - let committees = state - .get_crosslink_committees_at_slot(slot, &self.spec) - .unwrap() - .clone(); - - for (committee, shard) in committees { - state - .latest_attestations - .push(committee_to_pending_attestation( - state, &committee, shard, slot, &self.spec, - )) - } - } - } - - /// Returns a cloned `BeaconState`. - pub fn cloned_state(&self) -> BeaconState { - self.state.as_ref().expect("Genesis required").clone() - } - */ } - -/* -/// Builds a valid PendingAttestation with full participation for some committee. -fn committee_to_pending_attestation( - state: &BeaconState, - committee: &[usize], - shard: u64, - slot: Slot, - spec: &ChainSpec, -) -> PendingAttestation { - let current_epoch = state.current_epoch(spec); - let previous_epoch = state.previous_epoch(spec); - - let mut aggregation_bitfield = Bitfield::new(); - let mut custody_bitfield = Bitfield::new(); - - for (i, _) in committee.iter().enumerate() { - aggregation_bitfield.set(i, true); - custody_bitfield.set(i, true); - } - - let is_previous_epoch = - state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); - - let justified_epoch = if is_previous_epoch { - state.previous_justified_epoch - } else { - state.justified_epoch - }; - - let epoch_boundary_root = if is_previous_epoch { - *state - .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - } else { - *state - .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) - .unwrap() - }; - - let justified_block_root = *state - .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), &spec) - .unwrap(); - - PendingAttestation { - aggregation_bitfield, - data: AttestationData { - slot, - shard, - beacon_block_root: *state.get_block_root(slot, spec).unwrap(), - epoch_boundary_root, - crosslink_data_root: Hash256::zero(), - latest_crosslink: Crosslink { - epoch: slot.epoch(spec.slots_per_epoch), - crosslink_data_root: Hash256::zero(), - }, - justified_epoch, - justified_block_root, - }, - custody_bitfield, - inclusion_slot: slot, - } -} -*/ From 89fc386264e9fff71d6769d1adbc68c3c02afa92 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 13:38:57 +1100 Subject: [PATCH 28/56] Add extra checks for epoch benches finalization --- .../benches/epoch_processing_benches.rs | 43 ++++++++++++++++--- .../src/beacon_state_bencher.rs | 9 ++-- .../src/per_epoch_processing/attester_sets.rs | 2 +- 3 files changed, 44 insertions(+), 10 deletions(-) diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index 149d8f28e..6f9219658 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -33,7 +33,8 @@ pub fn epoch_processing_16k_validators(c: &mut Criterion) { let (state, _keypairs) = builder.build(); - // Assert that the state has the maximum possible attestations. + // Assert that the state has an attestations for each committee that is able to include an + // attestation in the state. let committees_per_epoch = spec.get_epoch_committee_count(validator_count); let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; let previous_epoch_attestations = committees_per_epoch; @@ -41,18 +42,26 @@ pub fn epoch_processing_16k_validators(c: &mut Criterion) { committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay); assert_eq!( state.latest_attestations.len() as u64, - previous_epoch_attestations + current_epoch_attestations + previous_epoch_attestations + current_epoch_attestations, + "The state should have an attestation for each committee." ); // Assert that each attestation in the state has full participation. let committee_size = validator_count / committees_per_epoch as usize; for a in &state.latest_attestations { - assert_eq!(a.aggregation_bitfield.num_set_bits(), committee_size); + assert_eq!( + a.aggregation_bitfield.num_set_bits(), + committee_size, + "Each attestation in the state should have full participation" + ); } // Assert that we will run the first arm of process_rewards_and_penalities let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; - assert!(epochs_since_finality <= 4); + assert_eq!( + epochs_since_finality, 4, + "Epochs since finality should be 4" + ); bench_epoch_processing(c, &state, &spec, "16k_validators"); } @@ -239,8 +248,32 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp .sample_size(10), ); - let state_clone = state.clone(); + let mut state_clone = state.clone(); let spec_clone = spec.clone(); + let previous_epoch = state.previous_epoch(&spec); + let attesters = calculate_attester_sets(&state, &spec).unwrap(); + let active_validator_indices = calculate_active_validator_indices(&state, &spec); + let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); + let previous_total_balance = state.get_total_balance( + &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], + &spec, + ); + assert_eq!( + state_clone.finalized_epoch, state_clone.validator_registry_update_epoch, + "The last registry update should be at the last finalized epoch." + ); + process_justification( + &mut state_clone, + current_total_balance, + previous_total_balance, + attesters.previous_epoch_boundary.balance, + attesters.current_epoch_boundary.balance, + spec, + ); + assert!( + state_clone.finalized_epoch > state_clone.validator_registry_update_epoch, + "The state should have been finalized." + ); c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("process_validator_registry", move |b| { diff --git a/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs b/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs index 0ee4a75e9..8ad4810e7 100644 --- a/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs +++ b/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs @@ -96,7 +96,6 @@ impl BeaconStateBencher { let slot = epoch.start_slot(spec.slots_per_epoch); state.slot = slot; - state.validator_registry_update_epoch = epoch - 1; state.previous_shuffling_epoch = epoch - 1; state.current_shuffling_epoch = epoch; @@ -104,10 +103,12 @@ impl BeaconStateBencher { state.previous_shuffling_seed = Hash256::from_low_u64_le(0); state.current_shuffling_seed = Hash256::from_low_u64_le(1); - state.previous_justified_epoch = epoch - 2; - state.justified_epoch = epoch - 1; + state.previous_justified_epoch = epoch - 3; + state.justified_epoch = epoch - 2; state.justification_bitfield = u64::max_value(); - state.finalized_epoch = epoch - 1; + + state.finalized_epoch = epoch - 3; + state.validator_registry_update_epoch = epoch - 3; } /// Creates a full set of attestations for the `BeaconState`. Each attestation has full diff --git a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs index 1252d8057..d82774ac2 100644 --- a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs +++ b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs @@ -13,7 +13,7 @@ impl Attesters { for i in additional_indices { self.indices.insert(*i); } - self.balance.saturating_add(additional_balance); + self.balance = self.balance.saturating_add(additional_balance); } } From 9cc8e2598fb1ea14e55c72d1e533ecd774217d7d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 17:48:26 +1100 Subject: [PATCH 29/56] Organise epoch benching file --- .../benches/epoch_processing_benches.rs | 53 ++++++++++--------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index 6f9219658..19c76fe36 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -12,6 +12,9 @@ use state_processing::{ }; use types::{validator_registry::get_active_validator_indices, *}; +pub const BENCHING_SAMPLE_SIZE: usize = 100; +pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; + /// Run the benchmarking suite on a foundation spec with 16,384 validators. pub fn epoch_processing_16k_validators(c: &mut Criterion) { let spec = ChainSpec::foundation(); @@ -70,19 +73,6 @@ pub fn epoch_processing_16k_validators(c: &mut Criterion) { /// /// `desc` will be added to the title of each bench. fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("epoch_process_with_caches_{}", desc), - Benchmark::new("full run", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), - ) - }) - .sample_size(10), - ); - let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( @@ -93,7 +83,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(calculate_active_validator_indices(&mut state, &spec_clone)), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -109,7 +99,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp }, ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -130,7 +120,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp }, ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -143,7 +133,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(process_eth1_data(&mut state, &spec_clone)), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -156,7 +146,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(calculate_attester_sets(&mut state, &spec_clone).unwrap()), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -199,7 +189,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let mut state_clone = state.clone(); @@ -232,7 +222,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp }, ) }) - .sample_size(10), + .sample_size(SMALL_BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -245,7 +235,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(state.process_ejections(&spec_clone)), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let mut state_clone = state.clone(); @@ -282,7 +272,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(process_validator_registry(&mut state, &spec_clone)), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -297,7 +287,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp }, ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -310,7 +300,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(update_latest_slashed_balances(&mut state, &spec_clone)), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), ); let state_clone = state.clone(); @@ -323,6 +313,19 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp |mut state| black_box(clean_attestations(&mut state, &spec_clone)), ) }) - .sample_size(10), + .sample_size(BENCHING_SAMPLE_SIZE), + ); + + let state_clone = state.clone(); + let spec_clone = spec.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("per_epoch_processing", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), + ) + }) + .sample_size(SMALL_BENCHING_SAMPLE_SIZE), ); } From f27b62d410397b5863e763772a4cf47e5498c4df Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 17:49:06 +1100 Subject: [PATCH 30/56] Add optimisation for epoch processing --- .../src/per_epoch_processing.rs | 21 +++++++++++++++++-- .../src/per_epoch_processing/errors.rs | 5 +++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 99275bd10..b8504ca7f 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,5 +1,6 @@ use attester_sets::AttesterSets; use errors::EpochProcessingError as Error; +use fnv::FnvHashMap; use fnv::FnvHashSet; use inclusion_distance::{inclusion_distance, inclusion_slot}; use integer_sqrt::IntegerSquareRoot; @@ -398,12 +399,28 @@ pub fn process_rewards_and_penalities( } // Attestation inclusion + let mut inclusion_slots: FnvHashMap = FnvHashMap::default(); + for a in previous_epoch_attestations { + let participants = + state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + let inclusion_distance = (a.inclusion_slot - a.data.slot).as_u64(); + for participant in participants { + if let Some((existing_distance, _)) = inclusion_slots.get(&participant) { + if *existing_distance <= inclusion_distance { + continue; + } + } + inclusion_slots.insert(participant, (Slot::from(inclusion_distance), a.data.slot)); + } + } for &index in &attesters.previous_epoch.indices { - let inclusion_slot = inclusion_slot(state, &previous_epoch_attestations[..], index, spec)?; + let (_, inclusion_slot) = inclusion_slots + .get(&index) + .ok_or_else(|| Error::InclusionSlotsInconsistent(index))?; let proposer_index = state - .get_beacon_proposer_index(inclusion_slot, spec) + .get_beacon_proposer_index(*inclusion_slot, spec) .map_err(|_| Error::UnableToDetermineProducer)?; let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index 7d8a5800d..c60e00cae 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -8,6 +8,11 @@ pub enum EpochProcessingError { NoRandaoSeed, PreviousTotalBalanceIsZero, InclusionDistanceZero, + /// Unable to get the inclusion distance for a validator that should have an inclusion + /// distance. This indicates an internal inconsistency. + /// + /// (validator_index) + InclusionSlotsInconsistent(usize), BeaconStateError(BeaconStateError), InclusionError(InclusionError), } From 21d75ef0bd78b808a6d530a4dbb9e82ff01eefdd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 18:31:14 +1100 Subject: [PATCH 31/56] Add tree hash benches --- eth2/state_processing/benches/benches.rs | 2 +- .../benches/block_processing_benches.rs | 10 ++++++++++ .../benches/epoch_processing_benches.rs | 13 +++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 52b939a69..239b782a3 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -5,7 +5,7 @@ mod epoch_processing_benches; criterion_group!( benches, - // epoch_processing_benches::epoch_processing_16k_validators, + epoch_processing_benches::epoch_processing_16k_validators, block_processing_benches::block_processing_16k_validators, ); criterion_main!(benches); diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 755207f96..0b2968082 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -1,6 +1,7 @@ use benching_utils::{BeaconBlockBencher, BeaconStateBencher}; use criterion::Criterion; use criterion::{black_box, Benchmark}; +use ssz::TreeHash; use state_processing::{ per_block_processing, per_block_processing::{ @@ -400,4 +401,13 @@ fn bench_block_processing( }) .sample_size(10), ); + + let block = initial_block.clone(); + c.bench( + &format!("block_processing_{}", desc), + Benchmark::new("tree_hash_block", move |b| { + b.iter(|| black_box(block.hash_tree_root())) + }) + .sample_size(10), + ); } diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index 19c76fe36..8172ba99a 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -1,6 +1,7 @@ use benching_utils::BeaconStateBencher; use criterion::Criterion; use criterion::{black_box, Benchmark}; +use ssz::TreeHash; use state_processing::{ per_epoch_processing, per_epoch_processing::{ @@ -328,4 +329,16 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp }) .sample_size(SMALL_BENCHING_SAMPLE_SIZE), ); + + let state_clone = state.clone(); + c.bench( + &format!("epoch_process_with_caches_{}", desc), + Benchmark::new("tree_hash_state", move |b| { + b.iter_with_setup( + || state_clone.clone(), + |state| black_box(state.hash_tree_root()), + ) + }) + .sample_size(SMALL_BENCHING_SAMPLE_SIZE), + ); } From 53456a6c79b55799117de324ed7963560244900e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 21:06:40 +1100 Subject: [PATCH 32/56] Remove last inclusion_slot(..) call --- .../src/per_epoch_processing.rs | 89 ++++++++++--------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index b8504ca7f..bb064ac34 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -2,7 +2,6 @@ use attester_sets::AttesterSets; use errors::EpochProcessingError as Error; use fnv::FnvHashMap; use fnv::FnvHashSet; -use inclusion_distance::{inclusion_distance, inclusion_slot}; use integer_sqrt::IntegerSquareRoot; use log::debug; use rayon::prelude::*; @@ -280,6 +279,28 @@ pub fn process_rewards_and_penalities( return Err(Error::PreviousTotalBalanceIsZero); } + // Map is ValidatorIndex -> ProposerIndex + let mut inclusion_slots: FnvHashMap = FnvHashMap::default(); + for a in &previous_epoch_attestations { + let participants = + state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + let inclusion_distance = (a.inclusion_slot - a.data.slot).as_u64(); + for participant in participants { + if let Some((existing_distance, _)) = inclusion_slots.get(&participant) { + if *existing_distance <= inclusion_distance { + continue; + } + } + let proposer_index = state + .get_beacon_proposer_index(a.data.slot, spec) + .map_err(|_| Error::UnableToDetermineProducer)?; + inclusion_slots.insert( + participant, + (Slot::from(inclusion_distance), proposer_index), + ); + } + } + // Justification and finalization let epochs_since_finality = next_epoch - state.finalized_epoch; @@ -327,17 +348,17 @@ pub fn process_rewards_and_penalities( if attesters.previous_epoch.indices.contains(&index) { let base_reward = state.base_reward(index, base_reward_quotient, spec); - let inclusion_distance = - inclusion_distance(state, &previous_epoch_attestations, index, spec); - if let Ok(inclusion_distance) = inclusion_distance { - if inclusion_distance > 0 { - safe_add_assign!( - balance, - base_reward * spec.min_attestation_inclusion_delay - / inclusion_distance - ) - } + let (inclusion_distance, _) = inclusion_slots + .get(&index) + .expect("Inconsistent inclusion_slots."); + + if *inclusion_distance > 0 { + safe_add_assign!( + balance, + base_reward * spec.min_attestation_inclusion_delay + / inclusion_distance.as_u64() + ) } } @@ -378,18 +399,17 @@ pub fn process_rewards_and_penalities( if attesters.previous_epoch.indices.contains(&index) { let base_reward = state.base_reward(index, base_reward_quotient, spec); - let inclusion_distance = - inclusion_distance(state, &previous_epoch_attestations, index, spec); - if let Ok(inclusion_distance) = inclusion_distance { - if inclusion_distance > 0 { - safe_sub_assign!( - balance, - base_reward - - base_reward * spec.min_attestation_inclusion_delay - / inclusion_distance - ); - } + let (inclusion_distance, _) = inclusion_slots + .get(&index) + .expect("Inconsistent inclusion_slots."); + + if *inclusion_distance > 0 { + safe_add_assign!( + balance, + base_reward * spec.min_attestation_inclusion_delay + / inclusion_distance.as_u64() + ) } } @@ -399,34 +419,17 @@ pub fn process_rewards_and_penalities( } // Attestation inclusion - let mut inclusion_slots: FnvHashMap = FnvHashMap::default(); - for a in previous_epoch_attestations { - let participants = - state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; - let inclusion_distance = (a.inclusion_slot - a.data.slot).as_u64(); - for participant in participants { - if let Some((existing_distance, _)) = inclusion_slots.get(&participant) { - if *existing_distance <= inclusion_distance { - continue; - } - } - inclusion_slots.insert(participant, (Slot::from(inclusion_distance), a.data.slot)); - } - } + // for &index in &attesters.previous_epoch.indices { - let (_, inclusion_slot) = inclusion_slots + let (_, proposer_index) = inclusion_slots .get(&index) .ok_or_else(|| Error::InclusionSlotsInconsistent(index))?; - let proposer_index = state - .get_beacon_proposer_index(*inclusion_slot, spec) - .map_err(|_| Error::UnableToDetermineProducer)?; - - let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); + let base_reward = state.base_reward(*proposer_index, base_reward_quotient, spec); safe_add_assign!( - state.validator_balances[proposer_index], + state.validator_balances[*proposer_index], base_reward / spec.attestation_inclusion_reward_quotient ); } From a44d80006a65ae291bded0ee8a0a15db324a56b0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 10 Mar 2019 21:07:09 +1100 Subject: [PATCH 33/56] Improve allocation in get_attestation_participants --- eth2/types/src/beacon_state.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 39970b9a7..603ae2f9d 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -531,12 +531,14 @@ impl BeaconState { return Err(Error::InvalidBitfield); } - let mut participants = vec![]; + let mut participants = Vec::with_capacity(committee.len()); for (i, validator_index) in committee.iter().enumerate() { - if bitfield.get(i).unwrap() { - participants.push(*validator_index); + match bitfield.get(i) { + Ok(bit) if bit == true => participants.push(*validator_index), + _ => {} } } + participants.shrink_to_fit(); Ok(participants) } From 6ae99a146293a76c754e77391b8920ec30e2860a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 10:56:31 +1100 Subject: [PATCH 34/56] Ensure drop times aren't included in benchmarks Also moves to the new `iter_batched` method on criterion (instead of `iter_with_setup`. --- .../benches/block_processing_benches.rs | 114 ++++++++-------- .../benches/epoch_processing_benches.rs | 123 +++++++++++------- 2 files changed, 142 insertions(+), 95 deletions(-) diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 0b2968082..840fcaeba 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -16,7 +16,7 @@ use types::*; pub fn block_processing_16k_validators(c: &mut Criterion) { let spec = ChainSpec::foundation(); - let validator_count = 16_384; + let validator_count = 300_032; let (mut state, keypairs) = build_state(validator_count, &spec); let block = build_block(&mut state, &keypairs, &spec); @@ -199,9 +199,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("verify_block_signature", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), - |mut state| black_box(verify_block_signature(&mut state, &block, &spec).unwrap()), + |mut state| { + verify_block_signature(&mut state, &block, &spec).unwrap(); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -213,9 +217,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_randao", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), - |mut state| black_box(process_randao(&mut state, &block, &spec).unwrap()), + |mut state| { + process_randao(&mut state, &block, &spec).unwrap(); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -226,9 +234,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_eth1_data", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), - |mut state| black_box(process_eth1_data(&mut state, &block.eth1_data).unwrap()), + |mut state| { + process_eth1_data(&mut state, &block.eth1_data).unwrap(); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -240,18 +252,14 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_proposer_slashings", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box( - process_proposer_slashings( - &mut state, - &block.body.proposer_slashings, - &spec, - ) - .unwrap(), - ) + process_proposer_slashings(&mut state, &block.body.proposer_slashings, &spec) + .unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -263,18 +271,14 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_attester_slashings", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box( - process_attester_slashings( - &mut state, - &block.body.attester_slashings, - &spec, - ) - .unwrap(), - ) + process_attester_slashings(&mut state, &block.body.attester_slashings, &spec) + .unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -286,13 +290,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_attestations", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box( - process_attestations(&mut state, &block.body.attestations, &spec).unwrap(), - ) + process_attestations(&mut state, &block.body.attestations, &spec).unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -304,11 +308,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_deposits", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box(process_deposits(&mut state, &block.body.deposits, &spec).unwrap()) + process_deposits(&mut state, &block.body.deposits, &spec).unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -320,13 +326,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_exits", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box( - process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap(), - ) + process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -338,11 +344,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("process_transfers", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box(process_transfers(&mut state, &block.body.transfers, &spec).unwrap()) + process_transfers(&mut state, &block.body.transfers, &spec).unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -354,9 +362,13 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("per_block_processing", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), - |mut state| black_box(per_block_processing(&mut state, &block, &spec).unwrap()), + |mut state| { + per_block_processing(&mut state, &block, &spec).unwrap(); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -368,15 +380,15 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("build_previous_state_epoch_cache", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box( - state - .build_epoch_cache(RelativeEpoch::Previous, &spec) - .unwrap(), - ) + state + .build_epoch_cache(RelativeEpoch::Previous, &spec) + .unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -388,15 +400,15 @@ fn bench_block_processing( c.bench( &format!("block_processing_{}", desc), Benchmark::new("build_current_state_epoch_cache", move |b| { - b.iter_with_setup( + b.iter_batched( || state.clone(), |mut state| { - black_box( - state - .build_epoch_cache(RelativeEpoch::Current, &spec) - .unwrap(), - ) + state + .build_epoch_cache(RelativeEpoch::Current, &spec) + .unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index 8172ba99a..e97dfde58 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -13,14 +13,14 @@ use state_processing::{ }; use types::{validator_registry::get_active_validator_indices, *}; -pub const BENCHING_SAMPLE_SIZE: usize = 100; +pub const BENCHING_SAMPLE_SIZE: usize = 10; pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; /// Run the benchmarking suite on a foundation spec with 16,384 validators. pub fn epoch_processing_16k_validators(c: &mut Criterion) { let spec = ChainSpec::foundation(); - let validator_count = 16_384; + let validator_count = 300_032; let mut builder = BeaconStateBencher::new(validator_count, &spec); @@ -67,7 +67,7 @@ pub fn epoch_processing_16k_validators(c: &mut Criterion) { "Epochs since finality should be 4" ); - bench_epoch_processing(c, &state, &spec, "16k_validators"); + bench_epoch_processing(c, &state, &spec, &format!("{}_validators", validator_count)); } /// Run the detailed benchmarking suite on the given `BeaconState`. @@ -79,9 +79,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("calculate_active_validator_indices", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), - |mut state| black_box(calculate_active_validator_indices(&mut state, &spec_clone)), + |mut state| { + calculate_active_validator_indices(&mut state, &spec_clone); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -93,11 +97,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("calculate_current_total_balance", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), |state| { - black_box(state.get_total_balance(&active_validator_indices[..], &spec_clone)) + state.get_total_balance(&active_validator_indices[..], &spec_clone); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -108,17 +114,19 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("calculate_previous_total_balance", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), |state| { - black_box(state.get_total_balance( + state.get_total_balance( &get_active_validator_indices( &state.validator_registry, state.previous_epoch(&spec_clone), )[..], &spec_clone, - )) + ); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -129,9 +137,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("process_eth1_data", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), - |mut state| black_box(process_eth1_data(&mut state, &spec_clone)), + |mut state| { + process_eth1_data(&mut state, &spec_clone); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -142,9 +154,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("calculate_attester_sets", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), - |mut state| black_box(calculate_attester_sets(&mut state, &spec_clone).unwrap()), + |mut state| { + calculate_attester_sets(&mut state, &spec_clone).unwrap(); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -163,18 +179,20 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("process_justification", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), |mut state| { - black_box(process_justification( + process_justification( &mut state, current_total_balance, previous_total_balance, attesters.previous_epoch_boundary.balance, attesters.current_epoch_boundary.balance, &spec_clone, - )) + ); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(10), @@ -185,9 +203,10 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("process_crosslinks", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), |mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()), + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -206,21 +225,21 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("process_rewards_and_penalties", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), |mut state| { - black_box( - process_rewards_and_penalities( - &mut state, - &active_validator_indices, - &attesters, - previous_total_balance, - &winning_root_for_shards, - &spec_clone, - ) - .unwrap(), + process_rewards_and_penalities( + &mut state, + &active_validator_indices, + &attesters, + previous_total_balance, + &winning_root_for_shards, + &spec_clone, ) + .unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(SMALL_BENCHING_SAMPLE_SIZE), @@ -231,9 +250,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("process_ejections", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), - |mut state| black_box(state.process_ejections(&spec_clone)), + |mut state| { + state.process_ejections(&spec_clone); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -268,9 +291,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("process_validator_registry", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), - |mut state| black_box(process_validator_registry(&mut state, &spec_clone)), + |mut state| { + process_validator_registry(&mut state, &spec_clone).unwrap(); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -281,11 +308,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("update_active_tree_index_roots", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), |mut state| { - black_box(update_active_tree_index_roots(&mut state, &spec_clone).unwrap()) + update_active_tree_index_roots(&mut state, &spec_clone).unwrap(); + state }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -296,9 +325,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("update_latest_slashed_balances", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), - |mut state| black_box(update_latest_slashed_balances(&mut state, &spec_clone)), + |mut state| { + update_latest_slashed_balances(&mut state, &spec_clone); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -309,9 +342,13 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("clean_attestations", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), - |mut state| black_box(clean_attestations(&mut state, &spec_clone)), + |mut state| { + clean_attestations(&mut state, &spec_clone); + state + }, + criterion::BatchSize::SmallInput, ) }) .sample_size(BENCHING_SAMPLE_SIZE), @@ -322,9 +359,10 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("per_epoch_processing", move |b| { - b.iter_with_setup( + b.iter_batched( || state_clone.clone(), |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), + criterion::BatchSize::SmallInput, ) }) .sample_size(SMALL_BENCHING_SAMPLE_SIZE), @@ -334,10 +372,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp c.bench( &format!("epoch_process_with_caches_{}", desc), Benchmark::new("tree_hash_state", move |b| { - b.iter_with_setup( - || state_clone.clone(), - |state| black_box(state.hash_tree_root()), - ) + b.iter(|| black_box(state_clone.hash_tree_root())) }) .sample_size(SMALL_BENCHING_SAMPLE_SIZE), ); From 0b7082e2b91aa7bc8fa940b50df13ccd0353d977 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 11:17:27 +1100 Subject: [PATCH 35/56] Move `benching_utils` structs into `types` --- Cargo.toml | 1 - eth2/state_processing/Cargo.toml | 1 - .../benches/block_processing_benches.rs | 6 +++--- .../benches/epoch_processing_benches.rs | 4 ++-- eth2/state_processing/benching_utils/Cargo.toml | 17 ----------------- eth2/state_processing/benching_utils/src/lib.rs | 5 ----- .../src/per_epoch_processing/tests.rs | 4 ++-- eth2/types/src/beacon_state/builder.rs | 2 +- eth2/types/src/beacon_state/tests.rs | 17 ++++------------- eth2/types/src/test_utils/mod.rs | 4 ++++ .../test_utils/testing_beacon_block_builder.rs} | 10 +++++----- .../test_utils/testing_beacon_state_builder.rs} | 8 ++++---- 12 files changed, 25 insertions(+), 54 deletions(-) delete mode 100644 eth2/state_processing/benching_utils/Cargo.toml delete mode 100644 eth2/state_processing/benching_utils/src/lib.rs rename eth2/{state_processing/benching_utils/src/beacon_block_bencher.rs => types/src/test_utils/testing_beacon_block_builder.rs} (99%) rename eth2/{state_processing/benching_utils/src/beacon_state_bencher.rs => types/src/test_utils/testing_beacon_state_builder.rs} (98%) diff --git a/Cargo.toml b/Cargo.toml index 8f4dbb268..c5aae7f43 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,6 @@ members = [ "eth2/block_proposer", "eth2/fork_choice", "eth2/state_processing", - "eth2/state_processing/benching_utils", "eth2/types", "eth2/utils/bls", "eth2/utils/boolean-bitfield", diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index 962d23a77..f6692b259 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -11,7 +11,6 @@ harness = false [dev-dependencies] criterion = "0.2" env_logger = "0.6.0" -benching_utils = { path = "./benching_utils" } [dependencies] bls = { path = "../utils/bls" } diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 840fcaeba..3c59e51e5 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -1,4 +1,3 @@ -use benching_utils::{BeaconBlockBencher, BeaconStateBencher}; use criterion::Criterion; use criterion::{black_box, Benchmark}; use ssz::TreeHash; @@ -10,6 +9,7 @@ use state_processing::{ verify_block_signature, }, }; +use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; use types::*; /// Run the benchmarking suite on a foundation spec with 16,384 validators. @@ -82,7 +82,7 @@ pub fn block_processing_16k_validators(c: &mut Criterion) { } fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec) { - let mut builder = BeaconStateBencher::new(validator_count, &spec); + let mut builder = TestingBeaconStateBuilder::new(validator_count, &spec); // Set the state to be just before an epoch transition. let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); @@ -95,7 +95,7 @@ fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec BeaconBlock { - let mut builder = BeaconBlockBencher::new(spec); + let mut builder = TestingBeaconBlockBuilder::new(spec); builder.set_slot(state.slot); diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index e97dfde58..85922fa07 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -1,4 +1,3 @@ -use benching_utils::BeaconStateBencher; use criterion::Criterion; use criterion::{black_box, Benchmark}; use ssz::TreeHash; @@ -11,6 +10,7 @@ use state_processing::{ update_latest_slashed_balances, }, }; +use types::test_utils::TestingBeaconStateBuilder; use types::{validator_registry::get_active_validator_indices, *}; pub const BENCHING_SAMPLE_SIZE: usize = 10; @@ -22,7 +22,7 @@ pub fn epoch_processing_16k_validators(c: &mut Criterion) { let validator_count = 300_032; - let mut builder = BeaconStateBencher::new(validator_count, &spec); + let mut builder = TestingBeaconStateBuilder::new(validator_count, &spec); // Set the state to be just before an epoch transition. let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); diff --git a/eth2/state_processing/benching_utils/Cargo.toml b/eth2/state_processing/benching_utils/Cargo.toml deleted file mode 100644 index 00815406a..000000000 --- a/eth2/state_processing/benching_utils/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "benching_utils" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -bls = { path = "../../utils/bls" } -hashing = { path = "../../utils/hashing" } -int_to_bytes = { path = "../../utils/int_to_bytes" } -integer-sqrt = "0.1" -log = "0.4" -merkle_proof = { path = "../../utils/merkle_proof" } -ssz = { path = "../../utils/ssz" } -ssz_derive = { path = "../../utils/ssz_derive" } -types = { path = "../../types" } -rayon = "1.0" diff --git a/eth2/state_processing/benching_utils/src/lib.rs b/eth2/state_processing/benching_utils/src/lib.rs deleted file mode 100644 index ba9548814..000000000 --- a/eth2/state_processing/benching_utils/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod beacon_block_bencher; -mod beacon_state_bencher; - -pub use beacon_block_bencher::BeaconBlockBencher; -pub use beacon_state_bencher::BeaconStateBencher; diff --git a/eth2/state_processing/src/per_epoch_processing/tests.rs b/eth2/state_processing/src/per_epoch_processing/tests.rs index f3c68a173..18c888e78 100644 --- a/eth2/state_processing/src/per_epoch_processing/tests.rs +++ b/eth2/state_processing/src/per_epoch_processing/tests.rs @@ -1,7 +1,7 @@ #![cfg(test)] use crate::per_epoch_processing; -use benching_utils::BeaconStateBencher; use env_logger::{Builder, Env}; +use types::test_utils::TestingBeaconStateBuilder; use types::*; #[test] @@ -10,7 +10,7 @@ fn runs_without_error() { let spec = ChainSpec::few_validators(); - let mut builder = BeaconStateBencher::new(8, &spec); + let mut builder = TestingBeaconStateBuilder::new(8, &spec); let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); builder.teleport_to_slot(target_slot, &spec); diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs index 372f0d43d..c36cd11f4 100644 --- a/eth2/types/src/beacon_state/builder.rs +++ b/eth2/types/src/beacon_state/builder.rs @@ -6,7 +6,7 @@ use ssz::TreeHash; /// Builds a `BeaconState` for use in production. /// -/// This struct should not be modified for use in testing scenarios. Use `TestingBeaconStateBuilder` for that purpose. +/// This struct should _not_ be modified for use in testing scenarios. Use `TestingBeaconStateBuilder` for that purpose. /// /// This struct should remain safe and sensible for production usage. pub struct BeaconStateBuilder { diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 40bfd146c..fc55520bb 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -1,29 +1,20 @@ #![cfg(test)] use super::*; +use crate::test_utils::TestingBeaconStateBuilder; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::{BeaconState, ChainSpec}; use ssz::{ssz_encode, Decodable}; -#[test] -pub fn can_produce_genesis_block() { - let mut builder = BeaconStateBuilder::new(2); - builder.build().unwrap(); -} - /// Tests that `get_attestation_participants` is consistent with the result of /// get_crosslink_committees_at_slot` with a full bitfield. #[test] pub fn get_attestation_participants_consistency() { let mut rng = XorShiftRng::from_seed([42; 16]); - let mut builder = BeaconStateBuilder::new(8); - builder.spec = ChainSpec::few_validators(); - - builder.build().unwrap(); - - let mut state = builder.cloned_state(); - let spec = builder.spec.clone(); + let spec = ChainSpec::few_validators(); + let builder = TestingBeaconStateBuilder::new(8, &spec); + let (mut state, _keypairs) = builder.build(); state .build_epoch_cache(RelativeEpoch::Previous, &spec) diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 2145f684a..01d966841 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,5 +1,7 @@ mod test_random; mod testing_attestation_builder; +mod testing_beacon_block_builder; +mod testing_beacon_state_builder; mod testing_deposit_builder; mod testing_transfer_builder; mod testing_voluntary_exit_builder; @@ -7,6 +9,8 @@ mod testing_voluntary_exit_builder; pub use rand::{prng::XorShiftRng, SeedableRng}; pub use test_random::TestRandom; pub use testing_attestation_builder::TestingAttestationBuilder; +pub use testing_beacon_block_builder::TestingBeaconBlockBuilder; +pub use testing_beacon_state_builder::TestingBeaconStateBuilder; pub use testing_deposit_builder::TestingDepositBuilder; pub use testing_transfer_builder::TestingTransferBuilder; pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; diff --git a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs similarity index 99% rename from eth2/state_processing/benching_utils/src/beacon_block_bencher.rs rename to eth2/types/src/test_utils/testing_beacon_block_builder.rs index 46e822baa..db4d887d4 100644 --- a/eth2/state_processing/benching_utils/src/beacon_block_bencher.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -1,6 +1,4 @@ -use rayon::prelude::*; -use ssz::{SignedRoot, TreeHash}; -use types::{ +use crate::{ attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, test_utils::{ @@ -9,12 +7,14 @@ use types::{ }, *, }; +use rayon::prelude::*; +use ssz::{SignedRoot, TreeHash}; -pub struct BeaconBlockBencher { +pub struct TestingBeaconBlockBuilder { block: BeaconBlock, } -impl BeaconBlockBencher { +impl TestingBeaconBlockBuilder { pub fn new(spec: &ChainSpec) -> Self { Self { block: BeaconBlock::genesis(spec.zero_hash, spec), diff --git a/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs similarity index 98% rename from eth2/state_processing/benching_utils/src/beacon_state_bencher.rs rename to eth2/types/src/test_utils/testing_beacon_state_builder.rs index 8ad4810e7..b3cfea6c0 100644 --- a/eth2/state_processing/benching_utils/src/beacon_state_bencher.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -1,15 +1,15 @@ +use crate::beacon_state::BeaconStateBuilder; +use crate::*; use bls::get_withdrawal_credentials; use int_to_bytes::int_to_bytes48; use rayon::prelude::*; -use types::beacon_state::BeaconStateBuilder; -use types::*; -pub struct BeaconStateBencher { +pub struct TestingBeaconStateBuilder { state: BeaconState, keypairs: Vec, } -impl BeaconStateBencher { +impl TestingBeaconStateBuilder { pub fn new(validator_count: usize, spec: &ChainSpec) -> Self { let keypairs: Vec = (0..validator_count) .collect::>() From 827365cfb0645c3d9eea27baf32d0a197b2d03a0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 11:33:35 +1100 Subject: [PATCH 36/56] Update fork_choice tests to use new state builder. --- eth2/fork_choice/tests/tests.rs | 43 ++++----------------------------- 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index a3cab6a7c..5bf3b7e57 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -12,7 +12,7 @@ extern crate types; extern crate yaml_rust; pub use beacon_chain::BeaconChain; -use bls::{PublicKey, Signature}; +use bls::Signature; use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::MemoryDB; //use env_logger::{Builder, Env}; @@ -21,9 +21,8 @@ use ssz::ssz_encode; use std::collections::HashMap; use std::sync::Arc; use std::{fs::File, io::prelude::*, path::PathBuf}; -use types::{ - BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Epoch, Eth1Data, Hash256, Slot, Validator, -}; +use types::test_utils::TestingBeaconStateBuilder; +use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; use yaml_rust::yaml; // Note: We Assume the block Id's are hex-encoded. @@ -207,8 +206,6 @@ fn setup_inital_state( fork_choice_algo: &ForkChoiceAlgorithm, no_validators: usize, ) -> (Box, Arc>, Hash256) { - let zero_hash = Hash256::zero(); - let db = Arc::new(MemoryDB::open()); let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone())); @@ -225,40 +222,10 @@ fn setup_inital_state( ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())), }; - // misc vars for setting up the state - let genesis_time = 1_550_381_159; - - let latest_eth1_data = Eth1Data { - deposit_root: zero_hash.clone(), - block_hash: zero_hash.clone(), - }; - - let initial_validator_deposits = vec![]; let spec = ChainSpec::foundation(); - // create the state - let mut state = BeaconState::genesis( - genesis_time, - initial_validator_deposits, - latest_eth1_data, - &spec, - ) - .unwrap(); - - let default_validator = Validator { - pubkey: PublicKey::default(), - withdrawal_credentials: zero_hash, - activation_epoch: Epoch::from(0u64), - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - initiated_exit: false, - slashed: false, - }; - // activate the validators - for _ in 0..no_validators { - state.validator_registry.push(default_validator.clone()); - state.validator_balances.push(32_000_000_000); - } + let state_builder = TestingBeaconStateBuilder::new(no_validators, &spec); + let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); state_store From 41844841c6e2ff68bab7a95e5664bd361ca37d34 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 11:52:16 +1100 Subject: [PATCH 37/56] Update project tests to use new genesis structure --- .../beacon_chain/test_harness/src/lib.rs | 2 +- .../beacon_chain/test_harness/tests/chain.rs | 4 ++-- beacon_node/src/main.rs | 22 ++++++++++++++----- eth2/types/src/beacon_state/builder.rs | 3 ++- 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/lib.rs b/beacon_node/beacon_chain/test_harness/src/lib.rs index 0703fd4a5..f58c1b598 100644 --- a/beacon_node/beacon_chain/test_harness/src/lib.rs +++ b/beacon_node/beacon_chain/test_harness/src/lib.rs @@ -15,7 +15,7 @@ //! let validator_count = 8; //! let spec = ChainSpec::few_validators(); //! -//! let mut harness = BeaconChainHarness::new(spec, validator_count); +//! let mut harness = BeaconChainHarness::new(spec, validator_count, None, true); //! //! harness.advance_chain_with_block(); //! diff --git a/beacon_node/beacon_chain/test_harness/tests/chain.rs b/beacon_node/beacon_chain/test_harness/tests/chain.rs index e72c3a5aa..e5a52a314 100644 --- a/beacon_node/beacon_chain/test_harness/tests/chain.rs +++ b/beacon_node/beacon_chain/test_harness/tests/chain.rs @@ -10,7 +10,7 @@ fn it_can_build_on_genesis_block() { let spec = ChainSpec::few_validators(); let validator_count = 8; - let mut harness = BeaconChainHarness::new(spec, validator_count as usize); + let mut harness = BeaconChainHarness::new(spec, validator_count as usize, None, true); harness.advance_chain_with_block(); } @@ -25,7 +25,7 @@ fn it_can_produce_past_first_epoch_boundary() { debug!("Starting harness build..."); - let mut harness = BeaconChainHarness::new(spec, validator_count); + let mut harness = BeaconChainHarness::new(spec, validator_count, None, true); debug!("Harness built, tests starting.."); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 072315b6b..c05438cfb 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -17,8 +17,12 @@ use db::{ use fork_choice::BitwiseLMDGhost; use slog::{error, info, o, Drain}; use slot_clock::SystemTimeSlotClock; +use ssz::TreeHash; use std::sync::Arc; -use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; +use types::{ + beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, + Eth1Data, Hash256, Keypair, +}; fn main() { let decorator = slog_term::TermDecorator::new().build(); @@ -97,7 +101,8 @@ fn main() { .iter() .map(|_| Keypair::random()) .collect(); - let initial_validator_deposits = keypairs + + let initial_validator_deposits: Vec = keypairs .iter() .map(|keypair| Deposit { branch: vec![], // branch verification is not specified. @@ -114,14 +119,19 @@ fn main() { }) .collect(); + let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); + state_builder.process_initial_deposits(&initial_validator_deposits, &spec); + let genesis_state = state_builder.build(&spec).unwrap(); + let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + let genesis_block = BeaconBlock::genesis(state_root, &spec); + // Genesis chain - let _chain_result = BeaconChain::genesis( + let _chain_result = BeaconChain::from_genesis( state_store.clone(), block_store.clone(), slot_clock, - genesis_time, - latest_eth1_data, - initial_validator_deposits, + genesis_state, + genesis_block, spec, fork_choice, ); diff --git a/eth2/types/src/beacon_state/builder.rs b/eth2/types/src/beacon_state/builder.rs index c36cd11f4..22ca3e622 100644 --- a/eth2/types/src/beacon_state/builder.rs +++ b/eth2/types/src/beacon_state/builder.rs @@ -53,7 +53,8 @@ impl BeaconStateBuilder { /// Instantiate the validator registry from a YAML file. /// - /// This skips a lot of signing and verification, useful for fast test setups. + /// This skips a lot of signing and verification, useful if signing and verification has been + /// completed previously. /// /// Spec v0.4.0 pub fn import_existing_validators( From df5266988894f248694de1d186b7ecbcb2a88606 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Mon, 11 Mar 2019 11:55:09 +1100 Subject: [PATCH 38/56] Add changes of create_proof_of_possession to pull --- beacon_node/src/main.rs | 2 +- eth2/utils/bls/src/lib.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 072315b6b..eb0b38d5f 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -108,7 +108,7 @@ fn main() { deposit_input: DepositInput { pubkey: keypair.pk.clone(), withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. - proof_of_possession: create_proof_of_possession(&keypair), + proof_of_possession: create_proof_of_possession(&keypair, Hash256::zero()), }, }, }) diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index 8b3f8b2ba..95f993ecb 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -20,6 +20,7 @@ pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; use hashing::hash; use ssz::ssz_encode; +use types::{DepositInput, Hash256}; /// For some signature and public key, ensure that the signature message was the public key and it /// was signed by the secret key that corresponds to that public key. @@ -31,7 +32,7 @@ pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { // TODO: Update this method // https://github.com/sigp/lighthouse/issues/239 -pub fn create_proof_of_possession(keypair: &Keypair) -> Signature { +pub fn create_proof_of_possession(keypair: &Keypair, withdrawal_credentials: &Hash256) -> Signature { Signature::new(&ssz_encode(&keypair.pk), 0, &keypair.sk) } From 36085f63e9a6ed69e85266856f1944bd82162dfd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 14:52:21 +1100 Subject: [PATCH 39/56] Rename state trans benches --- .../benches/block_processing_benches.rs | 26 ++++++++-------- .../benches/epoch_processing_benches.rs | 30 +++++++++---------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 3c59e51e5..ea1ada193 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -197,7 +197,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("verify_block_signature", move |b| { b.iter_batched( || state.clone(), @@ -215,7 +215,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_randao", move |b| { b.iter_batched( || state.clone(), @@ -232,7 +232,7 @@ fn bench_block_processing( let state = initial_state.clone(); let block = initial_block.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_eth1_data", move |b| { b.iter_batched( || state.clone(), @@ -250,7 +250,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_proposer_slashings", move |b| { b.iter_batched( || state.clone(), @@ -269,7 +269,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_attester_slashings", move |b| { b.iter_batched( || state.clone(), @@ -288,7 +288,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_attestations", move |b| { b.iter_batched( || state.clone(), @@ -306,7 +306,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_deposits", move |b| { b.iter_batched( || state.clone(), @@ -324,7 +324,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_exits", move |b| { b.iter_batched( || state.clone(), @@ -342,7 +342,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("process_transfers", move |b| { b.iter_batched( || state.clone(), @@ -360,7 +360,7 @@ fn bench_block_processing( let block = initial_block.clone(); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("per_block_processing", move |b| { b.iter_batched( || state.clone(), @@ -378,7 +378,7 @@ fn bench_block_processing( state.drop_cache(RelativeEpoch::Previous); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("build_previous_state_epoch_cache", move |b| { b.iter_batched( || state.clone(), @@ -398,7 +398,7 @@ fn bench_block_processing( state.drop_cache(RelativeEpoch::Current); let spec = initial_spec.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("build_current_state_epoch_cache", move |b| { b.iter_batched( || state.clone(), @@ -416,7 +416,7 @@ fn bench_block_processing( let block = initial_block.clone(); c.bench( - &format!("block_processing_{}", desc), + &format!("{}/block_processing", desc), Benchmark::new("tree_hash_block", move |b| { b.iter(|| black_box(block.hash_tree_root())) }) diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index 85922fa07..342889b3f 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -77,7 +77,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("calculate_active_validator_indices", move |b| { b.iter_batched( || state_clone.clone(), @@ -95,7 +95,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let spec_clone = spec.clone(); let active_validator_indices = calculate_active_validator_indices(&state, &spec); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("calculate_current_total_balance", move |b| { b.iter_batched( || state_clone.clone(), @@ -112,7 +112,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("calculate_previous_total_balance", move |b| { b.iter_batched( || state_clone.clone(), @@ -135,7 +135,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("process_eth1_data", move |b| { b.iter_batched( || state_clone.clone(), @@ -152,7 +152,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("calculate_attester_sets", move |b| { b.iter_batched( || state_clone.clone(), @@ -177,7 +177,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp &spec, ); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("process_justification", move |b| { b.iter_batched( || state_clone.clone(), @@ -201,7 +201,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("process_crosslinks", move |b| { b.iter_batched( || state_clone.clone(), @@ -223,7 +223,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp ); let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("process_rewards_and_penalties", move |b| { b.iter_batched( || state_clone.clone(), @@ -248,7 +248,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("process_ejections", move |b| { b.iter_batched( || state_clone.clone(), @@ -289,7 +289,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp "The state should have been finalized." ); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("process_validator_registry", move |b| { b.iter_batched( || state_clone.clone(), @@ -306,7 +306,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("update_active_tree_index_roots", move |b| { b.iter_batched( || state_clone.clone(), @@ -323,7 +323,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("update_latest_slashed_balances", move |b| { b.iter_batched( || state_clone.clone(), @@ -340,7 +340,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("clean_attestations", move |b| { b.iter_batched( || state_clone.clone(), @@ -357,7 +357,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("per_epoch_processing", move |b| { b.iter_batched( || state_clone.clone(), @@ -370,7 +370,7 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); c.bench( - &format!("epoch_process_with_caches_{}", desc), + &format!("{}/epoch_processing", desc), Benchmark::new("tree_hash_state", move |b| { b.iter(|| black_box(state_clone.hash_tree_root())) }) From 191759dad0f662dc29eed2945a942e3fd38c4f75 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Mon, 11 Mar 2019 15:18:45 +1100 Subject: [PATCH 40/56] Modify create_proof_of_possession, and verifying proof_of_possession in process_deposits --- .../test_harness/src/beacon_chain_harness.rs | 11 +++- .../beacon_chain_harness/generate_deposits.rs | 36 ++++++------ .../test_harness/src/test_case.rs | 14 ++++- beacon_node/src/main.rs | 20 +++++-- eth2/types/src/beacon_state.rs | 57 +++++++------------ eth2/types/src/deposit_input.rs | 22 ++++++- .../testing_beacon_block_builder.rs | 4 +- .../src/test_utils/testing_deposit_builder.rs | 18 +++--- eth2/utils/bls/src/lib.rs | 6 -- 9 files changed, 104 insertions(+), 84 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index d2274ac69..ea32e177d 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -115,7 +115,16 @@ impl BeaconChainHarness { ); } else { debug!("Generating initial validator deposits..."); - let deposits = generate_deposits_from_keypairs(&keypairs, genesis_time, &spec); + let deposits = generate_deposits_from_keypairs( + &keypairs, + genesis_time, + spec.get_domain(spec.genesis_epoch, Domain::Deposit, &Fork{ + previous_version: spec.genesis_fork_version, + current_version: spec.genesis_fork_version, + epoch: spec.genesis_epoch, + }), + &spec + ); state_builder.process_initial_deposits(&deposits, &spec); }; diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs index f2d68d644..2baf8984f 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs @@ -1,4 +1,4 @@ -use bls::{create_proof_of_possession, get_withdrawal_credentials}; +use bls::get_withdrawal_credentials; use int_to_bytes::int_to_bytes48; use log::debug; use rayon::prelude::*; @@ -34,6 +34,7 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { pub fn generate_deposits_from_keypairs( keypairs: &[Keypair], genesis_time: u64, + domain: u64, spec: &ChainSpec, ) -> Vec { debug!( @@ -44,24 +45,23 @@ pub fn generate_deposits_from_keypairs( let initial_validator_deposits = keypairs .par_iter() - .map(|keypair| Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - // Validator can withdraw using their main keypair. - withdrawal_credentials: Hash256::from_slice( - &get_withdrawal_credentials( - &keypair.pk, - spec.bls_withdrawal_prefix_byte, - )[..], - ), - proof_of_possession: create_proof_of_possession(&keypair), + .map(|keypair| { + let withdrawal_credentials = Hash256::from_slice( + &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..]); + Deposit { + branch: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + // Validator can withdraw using their main keypair. + withdrawal_credentials: withdrawal_credentials.clone(), + proof_of_possession: DepositInput::create_proof_of_possession(&keypair, &withdrawal_credentials, domain), + }, }, - }, + } }) .collect(); diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index 7bc7161a8..32a16ff80 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -3,7 +3,7 @@ use crate::beacon_chain_harness::BeaconChainHarness; use beacon_chain::CheckPoint; -use bls::{create_proof_of_possession, get_withdrawal_credentials}; +use bls::get_withdrawal_credentials; use log::{info, warn}; use ssz::SignedRoot; use std::path::Path; @@ -258,11 +258,19 @@ fn build_deposit( index_offset: u64, ) -> (Deposit, Keypair) { let keypair = Keypair::random(); - let proof_of_possession = create_proof_of_possession(&keypair); - let index = harness.beacon_chain.state.read().deposit_index + index_offset; let withdrawal_credentials = Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..], ); + let proof_of_possession = DepositInput::create_proof_of_possession( + &keypair, + &withdrawal_credentials, + harness.spec.get_domain( + harness.beacon_chain.state.read().current_epoch(&harness.spec), + Domain::Deposit, + &harness.beacon_chain.state.read().fork, + ) + ); + let index = harness.beacon_chain.state.read().deposit_index + index_offset; let deposit = Deposit { // Note: `branch` and `index` will need to be updated once the spec defines their diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 7606da10d..8fdfa3446 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -8,7 +8,6 @@ use std::path::PathBuf; use crate::config::LighthouseConfig; use crate::rpc::start_server; use beacon_chain::BeaconChain; -use bls::create_proof_of_possession; use clap::{App, Arg}; use db::{ stores::{BeaconBlockStore, BeaconStateStore}, @@ -20,8 +19,8 @@ use slot_clock::SystemTimeSlotClock; use ssz::TreeHash; use std::sync::Arc; use types::{ - beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, - Eth1Data, Hash256, Keypair, + beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Domain, Deposit, DepositData, DepositInput, + Eth1Data, Fork, Hash256, Keypair, }; fn main() { @@ -113,7 +112,20 @@ fn main() { deposit_input: DepositInput { pubkey: keypair.pk.clone(), withdrawal_credentials: Hash256::zero(), // Withdrawal not possible. - proof_of_possession: create_proof_of_possession(&keypair, Hash256::zero()), + proof_of_possession: DepositInput::create_proof_of_possession( + &keypair, + &Hash256::zero(), + spec.get_domain( + // Get domain from genesis fork_version + spec.genesis_epoch, + Domain::Deposit, + &Fork { + previous_version: spec.genesis_fork_version, + current_version: spec.genesis_fork_version, + epoch: spec.genesis_epoch, + } + ), + ), }, }, }) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 603ae2f9d..f98b3e47e 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1,7 +1,6 @@ use self::epoch_cache::EpochCache; use crate::test_utils::TestRandom; use crate::{validator_registry::get_active_validator_indices, *}; -use bls::verify_proof_of_possession; use helpers::*; use honey_badger_split::SplitExt; use int_to_bytes::int_to_bytes32; @@ -9,7 +8,7 @@ use log::{debug, error, trace}; use rand::RngCore; use rayon::prelude::*; use serde_derive::Serialize; -use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{hash, Decodable, DecodeError, Encodable, SignedRoot, SszStream, TreeHash}; use std::collections::HashMap; use swap_or_not_shuffle::shuffle_list; @@ -590,10 +589,8 @@ impl BeaconState { for deposit_data in deposits { let result = self.process_deposit( - deposit_data.deposit_input.pubkey.clone(), + deposit_data.deposit_input.clone(), deposit_data.amount, - deposit_data.deposit_input.proof_of_possession.clone(), - deposit_data.deposit_input.withdrawal_credentials, Some(&pubkey_map), spec, ); @@ -616,18 +613,29 @@ impl BeaconState { /// Spec v0.4.0 pub fn process_deposit( &mut self, - pubkey: PublicKey, + deposit_input: DepositInput, amount: u64, - proof_of_possession: Signature, - withdrawal_credentials: Hash256, pubkey_map: Option<&HashMap>, spec: &ChainSpec, ) -> Result { - // - if !verify_proof_of_possession(&proof_of_possession, &pubkey) { - return Err(()); + + let proof_is_valid = deposit_input.proof_of_possession.verify( + &deposit_input.signed_root(), + spec.get_domain( + self.current_epoch(&spec), + Domain::Deposit, + &self.fork, + ), + &deposit_input.pubkey, + ); + + if !proof_is_valid { + return Err(()) } + let pubkey = deposit_input.pubkey.clone(); + let withdrawal_credentials = deposit_input.withdrawal_credentials.clone(); + let validator_index = if let Some(pubkey_map) = pubkey_map { pubkey_map.get(&pubkey).and_then(|i| Some(*i)) } else { @@ -1055,33 +1063,6 @@ impl BeaconState { self.validator_registry_update_epoch = current_epoch; } - /// Confirm validator owns PublicKey - /// - /// Spec v0.4.0 - pub fn validate_proof_of_possession( - &self, - pubkey: PublicKey, - proof_of_possession: Signature, - withdrawal_credentials: Hash256, - spec: &ChainSpec, - ) -> bool { - let proof_of_possession_data = DepositInput { - pubkey: pubkey.clone(), - withdrawal_credentials, - proof_of_possession: Signature::empty_signature(), - }; - - proof_of_possession.verify( - &proof_of_possession_data.hash_tree_root(), - spec.get_domain( - self.slot.epoch(spec.slots_per_epoch), - Domain::Deposit, - &self.fork, - ), - &pubkey, - ) - } - /// Iterate through the validator registry and eject active validators with balance below /// ``EJECTION_BALANCE``. /// diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 32f57ab6e..bb2334672 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -1,21 +1,37 @@ use super::Hash256; use crate::test_utils::TestRandom; -use bls::{PublicKey, Signature}; +use bls::{Keypair, PublicKey, Signature}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz::{SignedRoot, TreeHash}; use test_random_derive::TestRandom; /// The data supplied by the user to the deposit contract. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, SignedRoot, TreeHash, TestRandom)] pub struct DepositInput { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, pub proof_of_possession: Signature, } +impl DepositInput { + /// Generate the 'proof_of_posession' signature for a given DepositInput details. + /// + /// Spec v0.4.0 + pub fn create_proof_of_possession(keypair: &Keypair, withdrawal_credentials: &Hash256, domain: u64) -> Signature { + let signable_deposite_input = DepositInput { + pubkey: keypair.pk.clone(), + withdrawal_credentials: withdrawal_credentials.clone(), + proof_of_possession: Signature::empty_signature(), + }; + let msg = signable_deposite_input.signed_root(); + Signature::new(msg.as_slice(), domain, &keypair.sk) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index db4d887d4..ad3667e41 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -153,12 +153,12 @@ impl TestingBeaconBlockBuilder { } /// Insert a `Valid` deposit into the state. - pub fn insert_deposit(&mut self, amount: u64, index: u64, spec: &ChainSpec) { + pub fn insert_deposit(&mut self, amount: u64, index: u64, domain: u64, spec: &ChainSpec) { let keypair = Keypair::random(); let mut builder = TestingDepositBuilder::new(amount); builder.set_index(index); - builder.sign(&keypair, spec); + builder.sign(&keypair, domain, spec); self.block.body.deposits.push(builder.build()) } diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index c7eadcfd1..4e754eab0 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -1,5 +1,5 @@ use crate::*; -use bls::{create_proof_of_possession, get_withdrawal_credentials}; +use bls::{get_withdrawal_credentials}; pub struct TestingDepositBuilder { deposit: Deposit, @@ -30,16 +30,16 @@ impl TestingDepositBuilder { self.deposit.index = index; } - pub fn sign(&mut self, keypair: &Keypair, spec: &ChainSpec) { + pub fn sign(&mut self, keypair: &Keypair, domain: u64, spec: &ChainSpec) { + let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..]); self.deposit.deposit_data.deposit_input.pubkey = keypair.pk.clone(); + self.deposit.deposit_data.deposit_input.withdrawal_credentials = withdrawal_credentials.clone(); self.deposit.deposit_data.deposit_input.proof_of_possession = - create_proof_of_possession(&keypair); - self.deposit - .deposit_data - .deposit_input - .withdrawal_credentials = Hash256::from_slice( - &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], - ); + DepositInput::create_proof_of_possession( + &keypair, + &withdrawal_credentials, + domain, + ); } pub fn build(self) -> Deposit { diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index 95f993ecb..4888ff567 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -20,7 +20,6 @@ pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; use hashing::hash; use ssz::ssz_encode; -use types::{DepositInput, Hash256}; /// For some signature and public key, ensure that the signature message was the public key and it /// was signed by the secret key that corresponds to that public key. @@ -30,11 +29,6 @@ pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { sig.verify(&ssz_encode(pubkey), 0, &pubkey) } -// TODO: Update this method -// https://github.com/sigp/lighthouse/issues/239 -pub fn create_proof_of_possession(keypair: &Keypair, withdrawal_credentials: &Hash256) -> Signature { - Signature::new(&ssz_encode(&keypair.pk), 0, &keypair.sk) -} /// Returns the withdrawal credentials for a given public key. pub fn get_withdrawal_credentials(pubkey: &PublicKey, prefix_byte: u8) -> Vec { From 25b08f009640b7e223d8947a925f836dadaea50f Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Mon, 11 Mar 2019 15:22:15 +1100 Subject: [PATCH 41/56] Run cargo fmt --- .../test_harness/src/beacon_chain_harness.rs | 16 ++++--- .../beacon_chain_harness/generate_deposits.rs | 46 ++++++++++--------- .../test_harness/src/test_case.rs | 8 +++- beacon_node/src/main.rs | 6 +-- eth2/types/src/beacon_state.rs | 9 +--- eth2/types/src/deposit_input.rs | 21 +++++++-- .../src/test_utils/testing_deposit_builder.rs | 17 +++---- eth2/utils/bls/src/lib.rs | 1 - 8 files changed, 73 insertions(+), 51 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index ea32e177d..7a84456b8 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -118,12 +118,16 @@ impl BeaconChainHarness { let deposits = generate_deposits_from_keypairs( &keypairs, genesis_time, - spec.get_domain(spec.genesis_epoch, Domain::Deposit, &Fork{ - previous_version: spec.genesis_fork_version, - current_version: spec.genesis_fork_version, - epoch: spec.genesis_epoch, - }), - &spec + spec.get_domain( + spec.genesis_epoch, + Domain::Deposit, + &Fork { + previous_version: spec.genesis_fork_version, + current_version: spec.genesis_fork_version, + epoch: spec.genesis_epoch, + }, + ), + &spec, ); state_builder.process_initial_deposits(&deposits, &spec); }; diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs index 2baf8984f..f568f03e5 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs @@ -42,28 +42,32 @@ pub fn generate_deposits_from_keypairs( keypairs.len() ); - let initial_validator_deposits = - keypairs - .par_iter() - .map(|keypair| { - let withdrawal_credentials = Hash256::from_slice( - &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..]); - Deposit { - branch: vec![], // branch verification is not specified. - index: 0, // index verification is not specified. - deposit_data: DepositData { - amount: 32_000_000_000, // 32 ETH (in Gwei) - timestamp: genesis_time - 1, - deposit_input: DepositInput { - pubkey: keypair.pk.clone(), - // Validator can withdraw using their main keypair. - withdrawal_credentials: withdrawal_credentials.clone(), - proof_of_possession: DepositInput::create_proof_of_possession(&keypair, &withdrawal_credentials, domain), - }, + let initial_validator_deposits = keypairs + .par_iter() + .map(|keypair| { + let withdrawal_credentials = Hash256::from_slice( + &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], + ); + Deposit { + branch: vec![], // branch verification is not specified. + index: 0, // index verification is not specified. + deposit_data: DepositData { + amount: 32_000_000_000, // 32 ETH (in Gwei) + timestamp: genesis_time - 1, + deposit_input: DepositInput { + pubkey: keypair.pk.clone(), + // Validator can withdraw using their main keypair. + withdrawal_credentials: withdrawal_credentials.clone(), + proof_of_possession: DepositInput::create_proof_of_possession( + &keypair, + &withdrawal_credentials, + domain, + ), }, - } - }) - .collect(); + }, + } + }) + .collect(); initial_validator_deposits } diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index 32a16ff80..a18b4688b 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -265,10 +265,14 @@ fn build_deposit( &keypair, &withdrawal_credentials, harness.spec.get_domain( - harness.beacon_chain.state.read().current_epoch(&harness.spec), + harness + .beacon_chain + .state + .read() + .current_epoch(&harness.spec), Domain::Deposit, &harness.beacon_chain.state.read().fork, - ) + ), ); let index = harness.beacon_chain.state.read().deposit_index + index_offset; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 8fdfa3446..c3182c789 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -19,8 +19,8 @@ use slot_clock::SystemTimeSlotClock; use ssz::TreeHash; use std::sync::Arc; use types::{ - beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Domain, Deposit, DepositData, DepositInput, - Eth1Data, Fork, Hash256, Keypair, + beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, + Domain, Eth1Data, Fork, Hash256, Keypair, }; fn main() { @@ -123,7 +123,7 @@ fn main() { previous_version: spec.genesis_fork_version, current_version: spec.genesis_fork_version, epoch: spec.genesis_epoch, - } + }, ), ), }, diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index f98b3e47e..f69746dae 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -618,19 +618,14 @@ impl BeaconState { pubkey_map: Option<&HashMap>, spec: &ChainSpec, ) -> Result { - let proof_is_valid = deposit_input.proof_of_possession.verify( &deposit_input.signed_root(), - spec.get_domain( - self.current_epoch(&spec), - Domain::Deposit, - &self.fork, - ), + spec.get_domain(self.current_epoch(&spec), Domain::Deposit, &self.fork), &deposit_input.pubkey, ); if !proof_is_valid { - return Err(()) + return Err(()); } let pubkey = deposit_input.pubkey.clone(); diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index bb2334672..2a61efd9c 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -3,14 +3,25 @@ use crate::test_utils::TestRandom; use bls::{Keypair, PublicKey, Signature}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use ssz::{SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use test_random_derive::TestRandom; /// The data supplied by the user to the deposit contract. /// /// Spec v0.4.0 -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, SignedRoot, TreeHash, TestRandom)] +#[derive( + Debug, + PartialEq, + Clone, + Serialize, + Deserialize, + Encode, + Decode, + SignedRoot, + TreeHash, + TestRandom, +)] pub struct DepositInput { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, @@ -21,7 +32,11 @@ impl DepositInput { /// Generate the 'proof_of_posession' signature for a given DepositInput details. /// /// Spec v0.4.0 - pub fn create_proof_of_possession(keypair: &Keypair, withdrawal_credentials: &Hash256, domain: u64) -> Signature { + pub fn create_proof_of_possession( + keypair: &Keypair, + withdrawal_credentials: &Hash256, + domain: u64, + ) -> Signature { let signable_deposite_input = DepositInput { pubkey: keypair.pk.clone(), withdrawal_credentials: withdrawal_credentials.clone(), diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index 4e754eab0..80e039a89 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -1,5 +1,5 @@ use crate::*; -use bls::{get_withdrawal_credentials}; +use bls::get_withdrawal_credentials; pub struct TestingDepositBuilder { deposit: Deposit, @@ -31,15 +31,16 @@ impl TestingDepositBuilder { } pub fn sign(&mut self, keypair: &Keypair, domain: u64, spec: &ChainSpec) { - let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..]); + let withdrawal_credentials = Hash256::from_slice( + &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], + ); self.deposit.deposit_data.deposit_input.pubkey = keypair.pk.clone(); - self.deposit.deposit_data.deposit_input.withdrawal_credentials = withdrawal_credentials.clone(); + self.deposit + .deposit_data + .deposit_input + .withdrawal_credentials = withdrawal_credentials.clone(); self.deposit.deposit_data.deposit_input.proof_of_possession = - DepositInput::create_proof_of_possession( - &keypair, - &withdrawal_credentials, - domain, - ); + DepositInput::create_proof_of_possession(&keypair, &withdrawal_credentials, domain); } pub fn build(self) -> Deposit { diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index 4888ff567..b995b78c9 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -29,7 +29,6 @@ pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { sig.verify(&ssz_encode(pubkey), 0, &pubkey) } - /// Returns the withdrawal credentials for a given public key. pub fn get_withdrawal_credentials(pubkey: &PublicKey, prefix_byte: u8) -> Vec { let hashed = hash(&ssz_encode(pubkey)); From 5e5cfb782ef85c7bab95dff70366db849caa10be Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 18:58:49 +1100 Subject: [PATCH 42/56] Add concurrency to TestBeaconStateBuilder Specifically to generating the initial validator objects. --- eth2/types/src/test_utils/testing_beacon_state_builder.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index b3cfea6c0..e3f3313de 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -23,7 +23,7 @@ impl TestingBeaconStateBuilder { .collect(); let validators = keypairs - .iter() + .par_iter() .map(|keypair| { let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( &keypair.pk, From 7e79a2b3d31032e2acf62e6b02a1435b7b1dd646 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 19:01:44 +1100 Subject: [PATCH 43/56] Improve PublicKey Hash impl efficiency Instead of SSZ-encoding, we just use the AMCL tobytes method. --- eth2/utils/bls/src/public_key.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 3ab2b60bb..ffe710d2d 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -93,7 +93,11 @@ impl PartialEq for PublicKey { impl Hash for PublicKey { fn hash(&self, state: &mut H) { - ssz_encode(self).hash(state) + // Note: this is not necessarily the consensus-ready hash. Instead, it is designed to be + // optimally fast for internal usage. + // + // To hash for consensus purposes, use the SSZ-encoded bytes. + self.0.as_bytes().hash(state) } } From 292991810d7f9d732eb1ba111624fa671f7ee2de Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 19:46:22 +1100 Subject: [PATCH 44/56] Move state processing benches around --- eth2/state_processing/benches/benches.rs | 14 +++++++++----- .../benches/block_processing_benches.rs | 4 +--- .../benches/epoch_processing_benches.rs | 4 +--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 239b782a3..516f15215 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,11 +1,15 @@ +use criterion::Criterion; use criterion::{criterion_group, criterion_main}; mod block_processing_benches; mod epoch_processing_benches; -criterion_group!( - benches, - epoch_processing_benches::epoch_processing_16k_validators, - block_processing_benches::block_processing_16k_validators, -); +pub const VALIDATOR_COUNT: usize = 300_032; + +pub fn state_processing(c: &mut Criterion) { + block_processing_benches::bench_block_processing_n_validators(c, VALIDATOR_COUNT); + epoch_processing_benches::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); +} + +criterion_group!(benches, state_processing,); criterion_main!(benches); diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index ea1ada193..06563b05b 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -13,11 +13,9 @@ use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; use types::*; /// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn block_processing_16k_validators(c: &mut Criterion) { +pub fn bench_block_processing_n_validators(c: &mut Criterion, validator_count: usize) { let spec = ChainSpec::foundation(); - let validator_count = 300_032; - let (mut state, keypairs) = build_state(validator_count, &spec); let block = build_block(&mut state, &keypairs, &spec); diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/epoch_processing_benches.rs index 342889b3f..f7ae13676 100644 --- a/eth2/state_processing/benches/epoch_processing_benches.rs +++ b/eth2/state_processing/benches/epoch_processing_benches.rs @@ -17,11 +17,9 @@ pub const BENCHING_SAMPLE_SIZE: usize = 10; pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; /// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn epoch_processing_16k_validators(c: &mut Criterion) { +pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) { let spec = ChainSpec::foundation(); - let validator_count = 300_032; - let mut builder = TestingBeaconStateBuilder::new(validator_count, &spec); // Set the state to be just before an epoch transition. From e81f1c31c9890ae34be093fdbd11523853983902 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 11 Mar 2019 19:47:33 +1100 Subject: [PATCH 45/56] Fix proof-of-possession issues. These were introduced in an earlier commit --- .../benches/block_processing_benches.rs | 2 +- .../per_block_processing/verify_deposit.rs | 16 +++++++------- eth2/types/src/deposit_input.rs | 22 ++++++++++++++++--- .../testing_beacon_block_builder.rs | 10 +++++++-- .../src/test_utils/testing_deposit_builder.rs | 6 ++++- eth2/utils/bls/src/lib.rs | 8 ------- 6 files changed, 41 insertions(+), 23 deletions(-) diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/block_processing_benches.rs index 06563b05b..9d9a5647b 100644 --- a/eth2/state_processing/benches/block_processing_benches.rs +++ b/eth2/state_processing/benches/block_processing_benches.rs @@ -140,7 +140,7 @@ fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) // Insert the maximum possible number of `Deposit` objects. for i in 0..spec.max_deposits { - builder.insert_deposit(32_000_000_000, state.deposit_index + i, spec); + builder.insert_deposit(32_000_000_000, state.deposit_index + i, state, spec); } // Insert the maximum possible number of `Exit` objects. diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 0cf2a078f..1aabbb973 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -1,5 +1,4 @@ use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error}; -use bls::verify_proof_of_possession; use hashing::hash; use merkle_proof::verify_merkle_proof; use ssz::ssz_encode; @@ -27,13 +26,14 @@ pub fn verify_deposit( spec: &ChainSpec, ) -> Result<(), Error> { verify!( - // TODO: update proof of possession. - // - // https://github.com/sigp/lighthouse/issues/239 - verify_proof_of_possession( - &deposit.deposit_data.deposit_input.proof_of_possession, - &deposit.deposit_data.deposit_input.pubkey - ), + deposit + .deposit_data + .deposit_input + .validate_proof_of_possession( + state.slot.epoch(spec.slots_per_epoch), + &state.fork, + spec + ), Invalid::BadProofOfPossession ); diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 2a61efd9c..1b506894d 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -1,5 +1,5 @@ -use super::Hash256; use crate::test_utils::TestRandom; +use crate::*; use bls::{Keypair, PublicKey, Signature}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; @@ -37,14 +37,30 @@ impl DepositInput { withdrawal_credentials: &Hash256, domain: u64, ) -> Signature { - let signable_deposite_input = DepositInput { + let signable_deposit_input = DepositInput { pubkey: keypair.pk.clone(), withdrawal_credentials: withdrawal_credentials.clone(), proof_of_possession: Signature::empty_signature(), }; - let msg = signable_deposite_input.signed_root(); + let msg = signable_deposit_input.signed_root(); + Signature::new(msg.as_slice(), domain, &keypair.sk) } + + /// Verify that proof-of-possession is valid. + /// + /// Spec v0.4.0 + pub fn validate_proof_of_possession( + &self, + epoch: Epoch, + fork: &Fork, + spec: &ChainSpec, + ) -> bool { + let msg = self.signed_root(); + let domain = spec.get_domain(epoch, Domain::Deposit, fork); + + self.proof_of_possession.verify(&msg, domain, &self.pubkey) + } } #[cfg(test)] diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index ad3667e41..bbdc5046b 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -153,12 +153,18 @@ impl TestingBeaconBlockBuilder { } /// Insert a `Valid` deposit into the state. - pub fn insert_deposit(&mut self, amount: u64, index: u64, domain: u64, spec: &ChainSpec) { + pub fn insert_deposit( + &mut self, + amount: u64, + index: u64, + state: &BeaconState, + spec: &ChainSpec, + ) { let keypair = Keypair::random(); let mut builder = TestingDepositBuilder::new(amount); builder.set_index(index); - builder.sign(&keypair, domain, spec); + builder.sign(&keypair, state, spec); self.block.body.deposits.push(builder.build()) } diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index 80e039a89..56e81cad0 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -30,10 +30,14 @@ impl TestingDepositBuilder { self.deposit.index = index; } - pub fn sign(&mut self, keypair: &Keypair, domain: u64, spec: &ChainSpec) { + pub fn sign(&mut self, keypair: &Keypair, state: &BeaconState, spec: &ChainSpec) { let withdrawal_credentials = Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], ); + + let epoch = state.current_epoch(spec); + let domain = spec.get_domain(epoch, Domain::Deposit, &state.fork); + self.deposit.deposit_data.deposit_input.pubkey = keypair.pk.clone(); self.deposit .deposit_data diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index b995b78c9..38a129908 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -21,14 +21,6 @@ pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; use hashing::hash; use ssz::ssz_encode; -/// For some signature and public key, ensure that the signature message was the public key and it -/// was signed by the secret key that corresponds to that public key. -pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { - // TODO: replace this function with state.validate_proof_of_possession - // https://github.com/sigp/lighthouse/issues/239 - sig.verify(&ssz_encode(pubkey), 0, &pubkey) -} - /// Returns the withdrawal credentials for a given public key. pub fn get_withdrawal_credentials(pubkey: &PublicKey, prefix_byte: u8) -> Vec { let hashed = hash(&ssz_encode(pubkey)); From 9de6a0c733bd94ed2f0ef3b5216e46c3dd186f05 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 09:57:00 +1100 Subject: [PATCH 46/56] Rename benching files --- ...ck_processing_benches.rs => bench_block_processing.rs} | 0 ...ch_processing_benches.rs => bench_epoch_processing.rs} | 0 eth2/state_processing/benches/benches.rs | 8 ++++---- 3 files changed, 4 insertions(+), 4 deletions(-) rename eth2/state_processing/benches/{block_processing_benches.rs => bench_block_processing.rs} (100%) rename eth2/state_processing/benches/{epoch_processing_benches.rs => bench_epoch_processing.rs} (100%) diff --git a/eth2/state_processing/benches/block_processing_benches.rs b/eth2/state_processing/benches/bench_block_processing.rs similarity index 100% rename from eth2/state_processing/benches/block_processing_benches.rs rename to eth2/state_processing/benches/bench_block_processing.rs diff --git a/eth2/state_processing/benches/epoch_processing_benches.rs b/eth2/state_processing/benches/bench_epoch_processing.rs similarity index 100% rename from eth2/state_processing/benches/epoch_processing_benches.rs rename to eth2/state_processing/benches/bench_epoch_processing.rs diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 516f15215..6e54a25f5 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,14 +1,14 @@ use criterion::Criterion; use criterion::{criterion_group, criterion_main}; -mod block_processing_benches; -mod epoch_processing_benches; +mod bench_block_processing; +mod bench_epoch_processing; pub const VALIDATOR_COUNT: usize = 300_032; pub fn state_processing(c: &mut Criterion) { - block_processing_benches::bench_block_processing_n_validators(c, VALIDATOR_COUNT); - epoch_processing_benches::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); + bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); + bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); } criterion_group!(benches, state_processing,); From f34ae86cde83811806d0322241295f1100cb0d69 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 12:46:44 +1100 Subject: [PATCH 47/56] Add support for loading keypairs from file --- .gitignore | 1 + .../beacon_chain/test_harness/.gitignore | 1 - .../test_harness/src/beacon_chain_harness.rs | 6 +- .../beacon_chain_harness/generate_deposits.rs | 27 ----- .../load_deposits_from_file.rs | 38 ------ .../beacon_chain/test_harness/src/bin.rs | 28 ++--- .../beacon_chain/test_harness/src/gen_keys.rs | 20 ++++ .../beacon_chain/test_harness/src/prepare.rs | 69 ----------- eth2/fork_choice/tests/tests.rs | 2 +- .../benches/bench_block_processing.rs | 17 ++- .../benches/bench_epoch_processing.rs | 9 +- eth2/state_processing/benches/benches.rs | 48 +++++++- .../src/per_epoch_processing/tests.rs | 2 +- eth2/types/src/beacon_state/tests.rs | 2 +- .../generate_deterministic_keypairs.rs | 30 +++++ eth2/types/src/test_utils/keypairs_file.rs | 113 ++++++++++++++++++ eth2/types/src/test_utils/mod.rs | 4 + .../testing_beacon_state_builder.rs | 19 ++- eth2/utils/bls/src/public_key.rs | 11 +- 19 files changed, 264 insertions(+), 183 deletions(-) delete mode 100644 beacon_node/beacon_chain/test_harness/.gitignore delete mode 100644 beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs create mode 100644 beacon_node/beacon_chain/test_harness/src/gen_keys.rs delete mode 100644 beacon_node/beacon_chain/test_harness/src/prepare.rs create mode 100644 eth2/types/src/test_utils/generate_deterministic_keypairs.rs create mode 100644 eth2/types/src/test_utils/keypairs_file.rs diff --git a/.gitignore b/.gitignore index 9050bdab9..346ef9afa 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ target/ Cargo.lock *.pk *.sk +*.raw_keypairs diff --git a/beacon_node/beacon_chain/test_harness/.gitignore b/beacon_node/beacon_chain/test_harness/.gitignore deleted file mode 100644 index 5f605cba0..000000000 --- a/beacon_node/beacon_chain/test_harness/.gitignore +++ /dev/null @@ -1 +0,0 @@ -validators/ diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index 7a84456b8..c442c05db 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -16,13 +16,11 @@ use std::fs::File; use std::iter::FromIterator; use std::path::Path; use std::sync::Arc; -use types::{beacon_state::BeaconStateBuilder, *}; +use types::{beacon_state::BeaconStateBuilder, test_utils::generate_deterministic_keypairs, *}; mod generate_deposits; -mod load_deposits_from_file; -pub use generate_deposits::{generate_deposits_from_keypairs, generate_deterministic_keypairs}; -pub use load_deposits_from_file::load_deposits_from_file; +pub use generate_deposits::generate_deposits_from_keypairs; /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// to it. Each validator is provided a borrow to the beacon chain, where it may read diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs index f568f03e5..bba3aec1c 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/generate_deposits.rs @@ -1,35 +1,8 @@ use bls::get_withdrawal_credentials; -use int_to_bytes::int_to_bytes48; use log::debug; use rayon::prelude::*; use types::*; -/// Generates `validator_count` keypairs where the secret key is the index of the -/// validator. -/// -/// For example, the first validator has a secret key of `int_to_bytes48(1)`, the second has -/// `int_to_bytes48(2)` and so on. (We skip `0` as it generates a weird looking public key and is -/// probably invalid). -pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { - debug!( - "Generating {} deterministic validator keypairs...", - validator_count - ); - - let keypairs: Vec = (0..validator_count) - .collect::>() - .par_iter() - .map(|&i| { - let secret = int_to_bytes48(i as u64 + 1); - let sk = SecretKey::from_bytes(&secret).unwrap(); - let pk = PublicKey::from_secret_key(&sk); - Keypair { sk, pk } - }) - .collect(); - - keypairs -} - /// Generates a `Deposit` for each keypairs pub fn generate_deposits_from_keypairs( keypairs: &[Keypair], diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs deleted file mode 100644 index 9cba3d3c4..000000000 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness/load_deposits_from_file.rs +++ /dev/null @@ -1,38 +0,0 @@ -use log::debug; -use serde_yaml; -use std::fs::File; -use std::path::Path; -use types::*; - -pub fn load_deposits_from_file( - validator_count: usize, - keypairs_path: &Path, - deposits_path: &Path, -) -> (Vec, Vec) { - debug!("Loading keypairs from file..."); - let keypairs_file = File::open(keypairs_path).unwrap(); - let mut keypairs: Vec = serde_yaml::from_reader(&keypairs_file).unwrap(); - - debug!("Loading deposits from file..."); - let deposits_file = File::open(deposits_path).unwrap(); - let mut deposits: Vec = serde_yaml::from_reader(&deposits_file).unwrap(); - - assert!( - keypairs.len() >= validator_count, - "Unable to load {} keypairs from file ({} available)", - validator_count, - keypairs.len() - ); - - assert!( - deposits.len() >= validator_count, - "Unable to load {} deposits from file ({} available)", - validator_count, - deposits.len() - ); - - keypairs.truncate(validator_count); - deposits.truncate(validator_count); - - (keypairs, deposits) -} diff --git a/beacon_node/beacon_chain/test_harness/src/bin.rs b/beacon_node/beacon_chain/test_harness/src/bin.rs index 0a02264a3..d5e43f67a 100644 --- a/beacon_node/beacon_chain/test_harness/src/bin.rs +++ b/beacon_node/beacon_chain/test_harness/src/bin.rs @@ -1,11 +1,11 @@ use clap::{App, Arg, SubCommand}; use env_logger::{Builder, Env}; -use prepare::prepare; +use gen_keys::gen_keys; use run_test::run_test; use types::ChainSpec; mod beacon_chain_harness; -mod prepare; +mod gen_keys; mod run_test; mod test_case; mod validator_harness; @@ -55,8 +55,8 @@ fn main() { ), ) .subcommand( - SubCommand::with_name("prepare") - .about("Builds validator YAML files for faster tests.") + SubCommand::with_name("gen_keys") + .about("Builds a file of BLS keypairs for faster tests.") .arg( Arg::with_name("validator_count") .long("validator_count") @@ -66,20 +66,12 @@ fn main() { .required(true), ) .arg( - Arg::with_name("genesis_time") - .long("genesis_time") - .short("t") - .value_name("GENESIS_TIME") - .help("Time for validator deposits.") - .required(true), - ) - .arg( - Arg::with_name("output_dir") - .long("output_dir") + Arg::with_name("output_file") + .long("output_file") .short("d") .value_name("GENESIS_TIME") .help("Output directory for generated YAML.") - .default_value("validators"), + .default_value("keypairs.raw_keypairs"), ), ) .get_matches(); @@ -88,7 +80,7 @@ fn main() { Builder::from_env(Env::default().default_filter_or(log_level)).init(); } - let spec = match matches.value_of("spec") { + let _spec = match matches.value_of("spec") { Some("foundation") => ChainSpec::foundation(), Some("few_validators") => ChainSpec::few_validators(), _ => unreachable!(), // Has a default value, should always exist. @@ -98,7 +90,7 @@ fn main() { run_test(matches); } - if let Some(matches) = matches.subcommand_matches("prepare") { - prepare(matches, &spec); + if let Some(matches) = matches.subcommand_matches("gen_keys") { + gen_keys(matches); } } diff --git a/beacon_node/beacon_chain/test_harness/src/gen_keys.rs b/beacon_node/beacon_chain/test_harness/src/gen_keys.rs new file mode 100644 index 000000000..f2f81b393 --- /dev/null +++ b/beacon_node/beacon_chain/test_harness/src/gen_keys.rs @@ -0,0 +1,20 @@ +use clap::{value_t, ArgMatches}; +use log::debug; +use std::path::Path; +use types::test_utils::{generate_deterministic_keypairs, KeypairsFile}; + +pub fn gen_keys(matches: &ArgMatches) { + let validator_count = value_t!(matches.value_of("validator_count"), usize) + .expect("Validator count is required argument"); + let output_file = matches + .value_of("output_file") + .expect("Output file has a default value."); + + let keypairs = generate_deterministic_keypairs(validator_count); + + debug!("Writing keypairs to file..."); + + let keypairs_path = Path::new(output_file); + + keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap(); +} diff --git a/beacon_node/beacon_chain/test_harness/src/prepare.rs b/beacon_node/beacon_chain/test_harness/src/prepare.rs deleted file mode 100644 index 36a99317f..000000000 --- a/beacon_node/beacon_chain/test_harness/src/prepare.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::beacon_chain_harness::generate_deterministic_keypairs; -use bls::get_withdrawal_credentials; -use clap::{value_t, ArgMatches}; -use log::debug; -use serde_yaml; -use std::path::Path; -use std::{fs, fs::File}; -use types::*; - -const KEYPAIRS_FILE: &str = "keypairs.yaml"; -const VALIDATORS_FILE: &str = "validators.yaml"; - -pub fn prepare(matches: &ArgMatches, spec: &ChainSpec) { - let validator_count = value_t!(matches.value_of("validator_count"), usize) - .expect("Validator count is required argument"); - let output_dir = matches - .value_of("output_dir") - .expect("Output dir has a default value."); - - debug!("Created keypairs and validators, writing to file..."); - - fs::create_dir_all(Path::new(output_dir)).unwrap(); - - // Ensure that keypairs is dropped before writing validators, this provides a big memory saving - // for large validator_counts. - let validators: Vec = { - debug!("Creating {} keypairs...", validator_count); - let keypairs = generate_deterministic_keypairs(validator_count); - debug!("Writing {} keypairs to file...", validator_count); - write_keypairs(output_dir, &keypairs); - debug!("Creating {} validators...", validator_count); - keypairs - .iter() - .map(|keypair| generate_validator(&keypair, spec)) - .collect() - }; - - debug!("Writing {} validators to file...", validator_count); - write_validators(output_dir, &validators); -} - -fn generate_validator(keypair: &Keypair, spec: &ChainSpec) -> Validator { - let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( - &keypair.pk, - spec.bls_withdrawal_prefix_byte, - )); - - Validator { - pubkey: keypair.pk.clone(), - withdrawal_credentials, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - initiated_exit: false, - slashed: false, - } -} - -fn write_keypairs(output_dir: &str, keypairs: &[Keypair]) { - let keypairs_path = Path::new(output_dir).join(KEYPAIRS_FILE); - let keypairs_file = File::create(keypairs_path).unwrap(); - serde_yaml::to_writer(keypairs_file, &keypairs).unwrap(); -} - -fn write_validators(output_dir: &str, validators: &[Validator]) { - let validators_path = Path::new(output_dir).join(VALIDATORS_FILE); - let validators_file = File::create(validators_path).unwrap(); - serde_yaml::to_writer(validators_file, &validators).unwrap(); -} diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 5bf3b7e57..364e8796c 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -224,7 +224,7 @@ fn setup_inital_state( let spec = ChainSpec::foundation(); - let state_builder = TestingBeaconStateBuilder::new(no_validators, &spec); + let state_builder = TestingBeaconStateBuilder::new(no_validators, None, &spec); let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); diff --git a/eth2/state_processing/benches/bench_block_processing.rs b/eth2/state_processing/benches/bench_block_processing.rs index 9d9a5647b..aa595b7ac 100644 --- a/eth2/state_processing/benches/bench_block_processing.rs +++ b/eth2/state_processing/benches/bench_block_processing.rs @@ -9,14 +9,19 @@ use state_processing::{ verify_block_signature, }, }; +use std::path::Path; use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; use types::*; /// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn bench_block_processing_n_validators(c: &mut Criterion, validator_count: usize) { +pub fn bench_block_processing_n_validators( + c: &mut Criterion, + validator_count: usize, + keypair_file: Option<&Path>, +) { let spec = ChainSpec::foundation(); - let (mut state, keypairs) = build_state(validator_count, &spec); + let (mut state, keypairs) = build_state(validator_count, keypair_file, &spec); let block = build_block(&mut state, &keypairs, &spec); assert_eq!( @@ -79,8 +84,12 @@ pub fn bench_block_processing_n_validators(c: &mut Criterion, validator_count: u ); } -fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec) { - let mut builder = TestingBeaconStateBuilder::new(validator_count, &spec); +fn build_state( + validator_count: usize, + keypair_file: Option<&Path>, + spec: &ChainSpec, +) -> (BeaconState, Vec) { + let mut builder = TestingBeaconStateBuilder::new(validator_count, keypair_file, &spec); // Set the state to be just before an epoch transition. let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index f7ae13676..5f07d1100 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -10,6 +10,7 @@ use state_processing::{ update_latest_slashed_balances, }, }; +use std::path::Path; use types::test_utils::TestingBeaconStateBuilder; use types::{validator_registry::get_active_validator_indices, *}; @@ -17,10 +18,14 @@ pub const BENCHING_SAMPLE_SIZE: usize = 10; pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; /// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) { +pub fn bench_epoch_processing_n_validators( + c: &mut Criterion, + validator_count: usize, + keypair_file: Option<&Path>, +) { let spec = ChainSpec::foundation(); - let mut builder = TestingBeaconStateBuilder::new(validator_count, &spec); + let mut builder = TestingBeaconStateBuilder::new(validator_count, keypair_file, &spec); // Set the state to be just before an epoch transition. let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 6e54a25f5..721049eeb 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,5 +1,9 @@ +use criterion::Benchmark; use criterion::Criterion; use criterion::{criterion_group, criterion_main}; +use std::path::Path; +use types::test_utils::TestingBeaconStateBuilder; +use types::*; mod bench_block_processing; mod bench_epoch_processing; @@ -7,9 +11,47 @@ mod bench_epoch_processing; pub const VALIDATOR_COUNT: usize = 300_032; pub fn state_processing(c: &mut Criterion) { - bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); - bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); + bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT, None); + bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT, None); } -criterion_group!(benches, state_processing,); +pub fn key_loading(c: &mut Criterion) { + let validator_count = 1000; + + c.bench( + &format!("{}_validators", validator_count), + Benchmark::new("generated", move |b| { + b.iter_batched( + || (), + |_| TestingBeaconStateBuilder::new(validator_count, None, &ChainSpec::foundation()), + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); + + // Note: path needs to be relative to where cargo is executed from. + let keypair_file = + Path::new("../../beacon_node/beacon_chain/test_harness/keypairs.raw_keypairs"); + c.bench( + &format!("{}_validators", validator_count), + Benchmark::new("from_file", move |b| { + b.iter_batched( + || (), + |_| { + TestingBeaconStateBuilder::new( + validator_count, + Some(&keypair_file), + &ChainSpec::foundation(), + ) + }, + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); +} + +// criterion_group!(benches, state_processing, key_loading); +criterion_group!(benches, key_loading); criterion_main!(benches); diff --git a/eth2/state_processing/src/per_epoch_processing/tests.rs b/eth2/state_processing/src/per_epoch_processing/tests.rs index 18c888e78..df014e1d6 100644 --- a/eth2/state_processing/src/per_epoch_processing/tests.rs +++ b/eth2/state_processing/src/per_epoch_processing/tests.rs @@ -10,7 +10,7 @@ fn runs_without_error() { let spec = ChainSpec::few_validators(); - let mut builder = TestingBeaconStateBuilder::new(8, &spec); + let mut builder = TestingBeaconStateBuilder::new(8, None, &spec); let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); builder.teleport_to_slot(target_slot, &spec); diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index fc55520bb..da5cdf0fb 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -13,7 +13,7 @@ pub fn get_attestation_participants_consistency() { let mut rng = XorShiftRng::from_seed([42; 16]); let spec = ChainSpec::few_validators(); - let builder = TestingBeaconStateBuilder::new(8, &spec); + let builder = TestingBeaconStateBuilder::new(8, None, &spec); let (mut state, _keypairs) = builder.build(); state diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs new file mode 100644 index 000000000..f2ce8709e --- /dev/null +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -0,0 +1,30 @@ +use crate::*; +use int_to_bytes::int_to_bytes48; +use log::debug; +use rayon::prelude::*; + +/// Generates `validator_count` keypairs where the secret key is the index of the +/// validator. +/// +/// For example, the first validator has a secret key of `int_to_bytes48(1)`, the second has +/// `int_to_bytes48(2)` and so on. (We skip `0` as it generates a weird looking public key and is +/// probably invalid). +pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { + debug!( + "Generating {} deterministic validator keypairs...", + validator_count + ); + + let keypairs: Vec = (0..validator_count) + .collect::>() + .par_iter() + .map(|&i| { + let secret = int_to_bytes48(i as u64 + 1); + let sk = SecretKey::from_bytes(&secret).unwrap(); + let pk = PublicKey::from_secret_key(&sk); + Keypair { sk, pk } + }) + .collect(); + + keypairs +} diff --git a/eth2/types/src/test_utils/keypairs_file.rs b/eth2/types/src/test_utils/keypairs_file.rs new file mode 100644 index 000000000..5828af9a9 --- /dev/null +++ b/eth2/types/src/test_utils/keypairs_file.rs @@ -0,0 +1,113 @@ +use crate::*; +use std::fs::File; +use std::io::{Error, ErrorKind, Read, Write}; +use std::path::Path; + +pub const PUBLIC_KEY_BYTES_LEN: usize = 48; +pub const SECRET_KEY_BYTES_LEN: usize = 48; + +pub const BATCH_SIZE: usize = 1_000; // ~15MB + +pub const KEYPAIR_BYTES_LEN: usize = PUBLIC_KEY_BYTES_LEN + SECRET_KEY_BYTES_LEN; +pub const BATCH_BYTE_LEN: usize = KEYPAIR_BYTES_LEN * BATCH_SIZE; + +pub trait KeypairsFile { + fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error>; + fn from_raw_file(path: &Path, count: usize) -> Result, Error>; +} + +impl KeypairsFile for Vec { + fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error> { + let mut keypairs_file = File::create(path)?; + + for keypair_batch in keypairs.chunks(BATCH_SIZE) { + let mut buf = Vec::with_capacity(BATCH_BYTE_LEN); + + for keypair in keypair_batch { + buf.append(&mut keypair.sk.as_raw().as_bytes()); + buf.append(&mut keypair.pk.as_raw().as_bytes()); + } + + keypairs_file.write_all(&buf)?; + } + + Ok(()) + } + + fn from_raw_file(path: &Path, count: usize) -> Result, Error> { + let mut keypairs_file = File::open(path)?; + + let mut keypairs = Vec::with_capacity(count); + + let indices: Vec = (0..count).collect(); + + for batch in indices.chunks(BATCH_SIZE) { + let mut buf = vec![0; batch.len() * KEYPAIR_BYTES_LEN]; + keypairs_file.read_exact(&mut buf)?; + + for (i, _) in batch.iter().enumerate() { + let sk_start = i * KEYPAIR_BYTES_LEN; + let sk_end = sk_start + SECRET_KEY_BYTES_LEN; + let sk = SecretKey::from_bytes(&buf[sk_start..sk_end]) + .map_err(|_| Error::new(ErrorKind::Other, "Invalid SecretKey bytes"))?; + + let pk_start = sk_end; + let pk_end = pk_start + PUBLIC_KEY_BYTES_LEN; + let pk = PublicKey::from_bytes(&buf[pk_start..pk_end]) + .map_err(|_| Error::new(ErrorKind::Other, "Invalid PublicKey bytes"))?; + + keypairs.push(Keypair { sk, pk }); + } + } + + Ok(keypairs) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::{distributions::Alphanumeric, thread_rng, Rng}; + use rayon::prelude::*; + use std::fs::remove_file; + + fn random_keypairs(n: usize) -> Vec { + (0..n).into_par_iter().map(|_| Keypair::random()).collect() + } + + fn random_tmp_file() -> String { + let mut rng = thread_rng(); + + rng.sample_iter(&Alphanumeric).take(7).collect() + } + + #[test] + #[ignore] + fn read_write_consistency_small_batch() { + let num_keypairs = 10; + let keypairs = random_keypairs(num_keypairs); + + let keypairs_path = Path::new("/tmp").join(random_tmp_file()); + keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap(); + + let decoded = Vec::from_raw_file(&keypairs_path, num_keypairs).unwrap(); + remove_file(keypairs_path).unwrap(); + + assert_eq!(keypairs, decoded); + } + + #[test] + #[ignore] + fn read_write_consistency_big_batch() { + let num_keypairs = BATCH_SIZE + 1; + let keypairs = random_keypairs(num_keypairs); + + let keypairs_path = Path::new("/tmp").join(random_tmp_file()); + keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap(); + + let decoded = Vec::from_raw_file(&keypairs_path, num_keypairs).unwrap(); + remove_file(keypairs_path).unwrap(); + + assert_eq!(keypairs, decoded); + } +} diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 01d966841..d34dbb89c 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,3 +1,5 @@ +mod generate_deterministic_keypairs; +mod keypairs_file; mod test_random; mod testing_attestation_builder; mod testing_beacon_block_builder; @@ -6,6 +8,8 @@ mod testing_deposit_builder; mod testing_transfer_builder; mod testing_voluntary_exit_builder; +pub use generate_deterministic_keypairs::generate_deterministic_keypairs; +pub use keypairs_file::KeypairsFile; pub use rand::{prng::XorShiftRng, SeedableRng}; pub use test_random::TestRandom; pub use testing_attestation_builder::TestingAttestationBuilder; diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index e3f3313de..3f9e7fd10 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -1,8 +1,9 @@ +use super::{generate_deterministic_keypairs, KeypairsFile}; use crate::beacon_state::BeaconStateBuilder; use crate::*; use bls::get_withdrawal_credentials; -use int_to_bytes::int_to_bytes48; use rayon::prelude::*; +use std::path::Path; pub struct TestingBeaconStateBuilder { state: BeaconState, @@ -10,17 +11,11 @@ pub struct TestingBeaconStateBuilder { } impl TestingBeaconStateBuilder { - pub fn new(validator_count: usize, spec: &ChainSpec) -> Self { - let keypairs: Vec = (0..validator_count) - .collect::>() - .par_iter() - .map(|&i| { - let secret = int_to_bytes48(i as u64 + 1); - let sk = SecretKey::from_bytes(&secret).unwrap(); - let pk = PublicKey::from_secret_key(&sk); - Keypair { sk, pk } - }) - .collect(); + pub fn new(validator_count: usize, keypairs_path: Option<&Path>, spec: &ChainSpec) -> Self { + let keypairs = match keypairs_path { + None => generate_deterministic_keypairs(validator_count), + Some(path) => Vec::from_raw_file(path, validator_count).unwrap(), + }; let validators = keypairs .par_iter() diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index ffe710d2d..ecdfce3eb 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -1,6 +1,6 @@ use super::serde_vistors::HexVisitor; use super::SecretKey; -use bls_aggregates::PublicKey as RawPublicKey; +use bls_aggregates::{DecodeError as BlsDecodeError, PublicKey as RawPublicKey}; use hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -22,7 +22,14 @@ impl PublicKey { PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) } - /// Returns the underlying signature. + /// Instantiate a PublicKey from existing bytes. + /// + /// Note: this is _not_ SSZ decoding. + pub fn from_bytes(bytes: &[u8]) -> Result { + Ok(Self(RawPublicKey::from_bytes(bytes)?)) + } + + /// Returns the underlying public key. pub fn as_raw(&self) -> &RawPublicKey { &self.0 } From cce88c9923136e51433eb894458581abf64e4303 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 14:39:16 +1100 Subject: [PATCH 48/56] Improve DX for loading validator keys from file --- .../beacon_chain/test_harness/src/bin.rs | 8 +- eth2/fork_choice/tests/tests.rs | 3 +- .../benches/bench_block_processing.rs | 18 ++--- .../benches/bench_epoch_processing.rs | 10 +-- eth2/state_processing/benches/benches.rs | 17 +++-- .../src/per_epoch_processing/tests.rs | 2 +- eth2/types/Cargo.toml | 1 + eth2/types/src/beacon_state/tests.rs | 2 +- eth2/types/src/test_utils/keypairs_file.rs | 32 +++++--- eth2/types/src/test_utils/mod.rs | 2 +- .../testing_beacon_state_builder.rs | 75 +++++++++++++++++-- 11 files changed, 119 insertions(+), 51 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/bin.rs b/beacon_node/beacon_chain/test_harness/src/bin.rs index d5e43f67a..df9ccd222 100644 --- a/beacon_node/beacon_chain/test_harness/src/bin.rs +++ b/beacon_node/beacon_chain/test_harness/src/bin.rs @@ -2,6 +2,8 @@ use clap::{App, Arg, SubCommand}; use env_logger::{Builder, Env}; use gen_keys::gen_keys; use run_test::run_test; +use std::fs; +use types::test_utils::keypairs_path; use types::ChainSpec; mod beacon_chain_harness; @@ -13,6 +15,10 @@ mod validator_harness; use validator_harness::ValidatorHarness; fn main() { + let validator_file_path = keypairs_path(); + + fs::create_dir(validator_file_path.parent().unwrap()).unwrap(); + let matches = App::new("Lighthouse Test Harness Runner") .version("0.0.1") .author("Sigma Prime ") @@ -71,7 +77,7 @@ fn main() { .short("d") .value_name("GENESIS_TIME") .help("Output directory for generated YAML.") - .default_value("keypairs.raw_keypairs"), + .default_value(validator_file_path.to_str().unwrap()), ), ) .get_matches(); diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 364e8796c..7228bca10 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -224,7 +224,8 @@ fn setup_inital_state( let spec = ChainSpec::foundation(); - let state_builder = TestingBeaconStateBuilder::new(no_validators, None, &spec); + let state_builder = + TestingBeaconStateBuilder::from_deterministic_keypairs(no_validators, &spec); let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); diff --git a/eth2/state_processing/benches/bench_block_processing.rs b/eth2/state_processing/benches/bench_block_processing.rs index aa595b7ac..1028d4a20 100644 --- a/eth2/state_processing/benches/bench_block_processing.rs +++ b/eth2/state_processing/benches/bench_block_processing.rs @@ -9,19 +9,14 @@ use state_processing::{ verify_block_signature, }, }; -use std::path::Path; use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; use types::*; /// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn bench_block_processing_n_validators( - c: &mut Criterion, - validator_count: usize, - keypair_file: Option<&Path>, -) { +pub fn bench_block_processing_n_validators(c: &mut Criterion, validator_count: usize) { let spec = ChainSpec::foundation(); - let (mut state, keypairs) = build_state(validator_count, keypair_file, &spec); + let (mut state, keypairs) = build_state(validator_count, &spec); let block = build_block(&mut state, &keypairs, &spec); assert_eq!( @@ -84,12 +79,9 @@ pub fn bench_block_processing_n_validators( ); } -fn build_state( - validator_count: usize, - keypair_file: Option<&Path>, - spec: &ChainSpec, -) -> (BeaconState, Vec) { - let mut builder = TestingBeaconStateBuilder::new(validator_count, keypair_file, &spec); +fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec) { + let mut builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); // Set the state to be just before an epoch transition. let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index 5f07d1100..e4981b200 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -10,7 +10,6 @@ use state_processing::{ update_latest_slashed_balances, }, }; -use std::path::Path; use types::test_utils::TestingBeaconStateBuilder; use types::{validator_registry::get_active_validator_indices, *}; @@ -18,14 +17,11 @@ pub const BENCHING_SAMPLE_SIZE: usize = 10; pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; /// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn bench_epoch_processing_n_validators( - c: &mut Criterion, - validator_count: usize, - keypair_file: Option<&Path>, -) { +pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) { let spec = ChainSpec::foundation(); - let mut builder = TestingBeaconStateBuilder::new(validator_count, keypair_file, &spec); + let mut builder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); // Set the state to be just before an epoch transition. let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 721049eeb..9b16f732a 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,7 +1,6 @@ use criterion::Benchmark; use criterion::Criterion; use criterion::{criterion_group, criterion_main}; -use std::path::Path; use types::test_utils::TestingBeaconStateBuilder; use types::*; @@ -11,8 +10,8 @@ mod bench_epoch_processing; pub const VALIDATOR_COUNT: usize = 300_032; pub fn state_processing(c: &mut Criterion) { - bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT, None); - bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT, None); + bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); + bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); } pub fn key_loading(c: &mut Criterion) { @@ -23,7 +22,12 @@ pub fn key_loading(c: &mut Criterion) { Benchmark::new("generated", move |b| { b.iter_batched( || (), - |_| TestingBeaconStateBuilder::new(validator_count, None, &ChainSpec::foundation()), + |_| { + TestingBeaconStateBuilder::from_deterministic_keypairs( + validator_count, + &ChainSpec::foundation(), + ) + }, criterion::BatchSize::SmallInput, ) }) @@ -31,17 +35,14 @@ pub fn key_loading(c: &mut Criterion) { ); // Note: path needs to be relative to where cargo is executed from. - let keypair_file = - Path::new("../../beacon_node/beacon_chain/test_harness/keypairs.raw_keypairs"); c.bench( &format!("{}_validators", validator_count), Benchmark::new("from_file", move |b| { b.iter_batched( || (), |_| { - TestingBeaconStateBuilder::new( + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( validator_count, - Some(&keypair_file), &ChainSpec::foundation(), ) }, diff --git a/eth2/state_processing/src/per_epoch_processing/tests.rs b/eth2/state_processing/src/per_epoch_processing/tests.rs index df014e1d6..69450edcd 100644 --- a/eth2/state_processing/src/per_epoch_processing/tests.rs +++ b/eth2/state_processing/src/per_epoch_processing/tests.rs @@ -10,7 +10,7 @@ fn runs_without_error() { let spec = ChainSpec::few_validators(); - let mut builder = TestingBeaconStateBuilder::new(8, None, &spec); + let mut builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); builder.teleport_to_slot(target_slot, &spec); diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index e2930040d..27aef19d6 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] bls = { path = "../utils/bls" } boolean-bitfield = { path = "../utils/boolean-bitfield" } +dirs = "1.0" ethereum-types = "0.5" hashing = { path = "../utils/hashing" } honey-badger-split = { path = "../utils/honey-badger-split" } diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index da5cdf0fb..61f3c03b0 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -13,7 +13,7 @@ pub fn get_attestation_participants_consistency() { let mut rng = XorShiftRng::from_seed([42; 16]); let spec = ChainSpec::few_validators(); - let builder = TestingBeaconStateBuilder::new(8, None, &spec); + let builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); let (mut state, _keypairs) = builder.build(); state diff --git a/eth2/types/src/test_utils/keypairs_file.rs b/eth2/types/src/test_utils/keypairs_file.rs index 5828af9a9..b0ac8424f 100644 --- a/eth2/types/src/test_utils/keypairs_file.rs +++ b/eth2/types/src/test_utils/keypairs_file.rs @@ -1,4 +1,5 @@ use crate::*; +use rayon::prelude::*; use std::fs::File; use std::io::{Error, ErrorKind, Read, Write}; use std::path::Path; @@ -45,19 +46,27 @@ impl KeypairsFile for Vec { let mut buf = vec![0; batch.len() * KEYPAIR_BYTES_LEN]; keypairs_file.read_exact(&mut buf)?; - for (i, _) in batch.iter().enumerate() { - let sk_start = i * KEYPAIR_BYTES_LEN; - let sk_end = sk_start + SECRET_KEY_BYTES_LEN; - let sk = SecretKey::from_bytes(&buf[sk_start..sk_end]) - .map_err(|_| Error::new(ErrorKind::Other, "Invalid SecretKey bytes"))?; + let mut keypair_batch = batch + .par_iter() + .enumerate() + .map(|(i, _)| { + let sk_start = i * KEYPAIR_BYTES_LEN; + let sk_end = sk_start + SECRET_KEY_BYTES_LEN; + let sk = SecretKey::from_bytes(&buf[sk_start..sk_end]) + .map_err(|_| Error::new(ErrorKind::Other, "Invalid SecretKey bytes")) + .unwrap(); - let pk_start = sk_end; - let pk_end = pk_start + PUBLIC_KEY_BYTES_LEN; - let pk = PublicKey::from_bytes(&buf[pk_start..pk_end]) - .map_err(|_| Error::new(ErrorKind::Other, "Invalid PublicKey bytes"))?; + let pk_start = sk_end; + let pk_end = pk_start + PUBLIC_KEY_BYTES_LEN; + let pk = PublicKey::from_bytes(&buf[pk_start..pk_end]) + .map_err(|_| Error::new(ErrorKind::Other, "Invalid PublicKey bytes")) + .unwrap(); - keypairs.push(Keypair { sk, pk }); - } + Keypair { sk, pk } + }) + .collect(); + + keypairs.append(&mut keypair_batch); } Ok(keypairs) @@ -68,7 +77,6 @@ impl KeypairsFile for Vec { mod tests { use super::*; use rand::{distributions::Alphanumeric, thread_rng, Rng}; - use rayon::prelude::*; use std::fs::remove_file; fn random_keypairs(n: usize) -> Vec { diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index d34dbb89c..26d340e7d 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -14,7 +14,7 @@ pub use rand::{prng::XorShiftRng, SeedableRng}; pub use test_random::TestRandom; pub use testing_attestation_builder::TestingAttestationBuilder; pub use testing_beacon_block_builder::TestingBeaconBlockBuilder; -pub use testing_beacon_state_builder::TestingBeaconStateBuilder; +pub use testing_beacon_state_builder::{keypairs_path, TestingBeaconStateBuilder}; pub use testing_deposit_builder::TestingDepositBuilder; pub use testing_transfer_builder::TestingTransferBuilder; pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 3f9e7fd10..53481f062 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -2,8 +2,22 @@ use super::{generate_deterministic_keypairs, KeypairsFile}; use crate::beacon_state::BeaconStateBuilder; use crate::*; use bls::get_withdrawal_credentials; +use dirs; use rayon::prelude::*; -use std::path::Path; +use std::path::{Path, PathBuf}; + +pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; + +/// Returns the directory where the generated keypairs should be stored. +/// +/// It is either `$HOME/.lighthouse/keypairs.raw_keypairs` or, if `$HOME` is not available, +/// `./keypairs.raw_keypairs`. +pub fn keypairs_path() -> PathBuf { + let dir = dirs::home_dir() + .and_then(|home| Some(home.join(".lighthouse"))) + .unwrap_or_else(|| PathBuf::from("")); + dir.join(KEYPAIRS_FILE) +} pub struct TestingBeaconStateBuilder { state: BeaconState, @@ -11,11 +25,52 @@ pub struct TestingBeaconStateBuilder { } impl TestingBeaconStateBuilder { - pub fn new(validator_count: usize, keypairs_path: Option<&Path>, spec: &ChainSpec) -> Self { - let keypairs = match keypairs_path { - None => generate_deterministic_keypairs(validator_count), - Some(path) => Vec::from_raw_file(path, validator_count).unwrap(), - }; + /// Attempts to load validators from a file in the `CARGO_MANIFEST_DIR`. If the file is + /// unavailable, it generates the keys at runtime. + /// + /// If the `CARGO_MANIFEST_DIR` environment variable is not set, the local directory is used. + /// + /// See the `Self::from_keypairs_file` method for more info. + /// + /// # Panics + /// + /// If the file does not contain enough keypairs or is invalid. + pub fn from_default_keypairs_file_if_exists(validator_count: usize, spec: &ChainSpec) -> Self { + let dir = dirs::home_dir() + .and_then(|home| Some(home.join(".lighthouse"))) + .unwrap_or_else(|| PathBuf::from("")); + let file = dir.join(KEYPAIRS_FILE); + + if file.exists() { + TestingBeaconStateBuilder::from_keypairs_file(validator_count, &file, spec) + } else { + TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, spec) + } + } + + /// Loads the initial validator keypairs from a file on disk. + /// + /// Loading keypairs from file is ~10x faster than generating them. Use the `gen_keys` command + /// on the `test_harness` binary to generate the keys. In the `test_harness` dir, run `cargo + /// run -- gen_keys -h` for help. + /// + /// # Panics + /// + /// If the file does not exist, is invalid or does not contain enough keypairs. + pub fn from_keypairs_file(validator_count: usize, path: &Path, spec: &ChainSpec) -> Self { + let keypairs = Vec::from_raw_file(path, validator_count).unwrap(); + TestingBeaconStateBuilder::from_keypairs(keypairs, spec) + } + + /// Generates the validator keypairs deterministically. + pub fn from_deterministic_keypairs(validator_count: usize, spec: &ChainSpec) -> Self { + let keypairs = generate_deterministic_keypairs(validator_count); + TestingBeaconStateBuilder::from_keypairs(keypairs, spec) + } + + /// Creates the builder from an existing set of keypairs. + pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { + let validator_count = keypairs.len(); let validators = keypairs .par_iter() @@ -61,10 +116,15 @@ impl TestingBeaconStateBuilder { } } + /// Consume the builder and return the `BeaconState` and the keypairs for each validator. pub fn build(self) -> (BeaconState, Vec) { (self.state, self.keypairs) } + /// Ensures that the state returned from `Self::build(..)` has all caches pre-built. + /// + /// Note: this performs the build when called. Ensure that no changes are made that would + /// invalidate this cache. pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { let state = &mut self.state; @@ -147,6 +207,9 @@ impl TestingBeaconStateBuilder { } } +/// Maps a committee to a `PendingAttestation`. +/// +/// The committee will be signed by all validators in the committee. fn committee_to_pending_attestation( state: &BeaconState, committee: &[usize], From efd56ebe375c73dd658e7c86c50a5431e22ad77f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 14:42:31 +1100 Subject: [PATCH 49/56] Ignore file-exists error. --- beacon_node/beacon_chain/test_harness/src/bin.rs | 2 +- eth2/types/src/test_utils/testing_beacon_state_builder.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/bin.rs b/beacon_node/beacon_chain/test_harness/src/bin.rs index df9ccd222..3afc921de 100644 --- a/beacon_node/beacon_chain/test_harness/src/bin.rs +++ b/beacon_node/beacon_chain/test_harness/src/bin.rs @@ -17,7 +17,7 @@ use validator_harness::ValidatorHarness; fn main() { let validator_file_path = keypairs_path(); - fs::create_dir(validator_file_path.parent().unwrap()).unwrap(); + let _ = fs::create_dir(validator_file_path.parent().unwrap()); let matches = App::new("Lighthouse Test Harness Runner") .version("0.0.1") diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 53481f062..63d6f5877 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -25,10 +25,10 @@ pub struct TestingBeaconStateBuilder { } impl TestingBeaconStateBuilder { - /// Attempts to load validators from a file in the `CARGO_MANIFEST_DIR`. If the file is - /// unavailable, it generates the keys at runtime. + /// Attempts to load validators from a file in `$HOME/.lighthouse/keypairs.raw_keypairs`. If + /// the file is unavailable, it generates the keys at runtime. /// - /// If the `CARGO_MANIFEST_DIR` environment variable is not set, the local directory is used. + /// If the `$HOME` environment variable is not set, the local directory is used. /// /// See the `Self::from_keypairs_file` method for more info. /// From c92f867cd851e5d6686ada6950b6c4bcd0853f29 Mon Sep 17 00:00:00 2001 From: Kirk Baird Date: Tue, 12 Mar 2019 16:01:09 +1100 Subject: [PATCH 50/56] Upgrade to signature scheme 0.6.0 --- eth2/types/src/test_utils/keypairs_file.rs | 2 +- eth2/utils/bls/Cargo.toml | 2 +- eth2/utils/bls/src/aggregate_signature.rs | 8 +------ eth2/utils/bls/src/public_key.rs | 28 +++++++++++----------- eth2/utils/bls/src/signature.rs | 9 ++++--- 5 files changed, 23 insertions(+), 26 deletions(-) diff --git a/eth2/types/src/test_utils/keypairs_file.rs b/eth2/types/src/test_utils/keypairs_file.rs index b0ac8424f..5804b9696 100644 --- a/eth2/types/src/test_utils/keypairs_file.rs +++ b/eth2/types/src/test_utils/keypairs_file.rs @@ -58,7 +58,7 @@ impl KeypairsFile for Vec { let pk_start = sk_end; let pk_end = pk_start + PUBLIC_KEY_BYTES_LEN; - let pk = PublicKey::from_bytes(&buf[pk_start..pk_end]) + let pk = PublicKey::from_uncompressed_bytes(&buf[pk_start..pk_end]) .map_err(|_| Error::new(ErrorKind::Other, "Invalid PublicKey bytes")) .unwrap(); diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 5ac38595a..468ed8050 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.5.2" } +bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.6.0" } hashing = { path = "../hashing" } hex = "0.3" serde = "1.0" diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 2d8776353..fa3628a89 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -48,15 +48,9 @@ impl AggregateSignature { domain: u64, aggregate_public_keys: &[&AggregatePublicKey], ) -> bool { - // TODO: the API for `RawAggregatePublicKey` shoudn't need to take an owned - // `AggregatePublicKey`. There is an issue to fix this, but in the meantime we need to - // clone. - // - // https://github.com/sigp/signature-schemes/issues/10 - let aggregate_public_keys: Vec = aggregate_public_keys + let aggregate_public_keys: Vec<&RawAggregatePublicKey> = aggregate_public_keys .iter() .map(|pk| pk.as_raw()) - .cloned() .collect(); // Messages are concatenated into one long message. diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index ecdfce3eb..eaf2c9d3f 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -1,6 +1,6 @@ use super::serde_vistors::HexVisitor; use super::SecretKey; -use bls_aggregates::{DecodeError as BlsDecodeError, PublicKey as RawPublicKey}; +use bls_aggregates::PublicKey as RawPublicKey; use hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -22,18 +22,22 @@ impl PublicKey { PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) } - /// Instantiate a PublicKey from existing bytes. - /// - /// Note: this is _not_ SSZ decoding. - pub fn from_bytes(bytes: &[u8]) -> Result { - Ok(Self(RawPublicKey::from_bytes(bytes)?)) - } - - /// Returns the underlying public key. + /// Returns the underlying signature. pub fn as_raw(&self) -> &RawPublicKey { &self.0 } + /// Returns the PublicKey as (x, y) bytes + pub fn as_uncompressed_bytes(&mut self) -> Vec { + RawPublicKey::as_uncompressed_bytes(&mut self.0) + } + + /// Converts (x, y) bytes to PublicKey + pub fn from_uncompressed_bytes(bytes: &[u8]) -> Result { + let pubkey = RawPublicKey::from_uncompressed_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; + Ok(PublicKey(pubkey)) + } + /// Returns the last 6 bytes of the SSZ encoding of the public key, as a hex string. /// /// Useful for providing a short identifier to the user. @@ -100,11 +104,7 @@ impl PartialEq for PublicKey { impl Hash for PublicKey { fn hash(&self, state: &mut H) { - // Note: this is not necessarily the consensus-ready hash. Instead, it is designed to be - // optimally fast for internal usage. - // - // To hash for consensus purposes, use the SSZ-encoded bytes. - self.0.as_bytes().hash(state) + ssz_encode(self).hash(state) } } diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 86c54cba7..760b0018a 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -54,9 +54,12 @@ impl Signature { /// Returns a new empty signature. pub fn empty_signature() -> Self { + // Empty Signature is currently being represented as BLS::Signature.point_at_infinity() + // However it should be represented as vec![0; 96] but this + // would require all signatures to be represented in byte form as opposed to Signature let mut empty: Vec = vec![0; 96]; - // TODO: Modify the way flags are used (b_flag should not be used for empty_signature in the future) - empty[0] += u8::pow(2, 6); + // Sets C_flag and B_flag to 1 and all else to 0 + empty[0] += u8::pow(2, 6) + u8::pow(2, 7); Signature(RawSignature::from_bytes(&empty).unwrap()) } } @@ -129,7 +132,7 @@ mod tests { assert_eq!(sig_as_bytes.len(), 96); for (i, one_byte) in sig_as_bytes.iter().enumerate() { if i == 0 { - assert_eq!(*one_byte, u8::pow(2, 6)); + assert_eq!(*one_byte, u8::pow(2, 6) + u8::pow(2, 7)); } else { assert_eq!(*one_byte, 0); } From 1b252c3f82db7b595710081a580367290cb6628b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 17:15:45 +1100 Subject: [PATCH 51/56] Implement new uncompressed bytes for PublicKey --- eth2/types/src/test_utils/keypairs_file.rs | 4 ++-- eth2/utils/bls/src/public_key.rs | 21 +++++++++++++++++---- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/eth2/types/src/test_utils/keypairs_file.rs b/eth2/types/src/test_utils/keypairs_file.rs index 5804b9696..ebc50f528 100644 --- a/eth2/types/src/test_utils/keypairs_file.rs +++ b/eth2/types/src/test_utils/keypairs_file.rs @@ -4,7 +4,7 @@ use std::fs::File; use std::io::{Error, ErrorKind, Read, Write}; use std::path::Path; -pub const PUBLIC_KEY_BYTES_LEN: usize = 48; +pub const PUBLIC_KEY_BYTES_LEN: usize = 96; pub const SECRET_KEY_BYTES_LEN: usize = 48; pub const BATCH_SIZE: usize = 1_000; // ~15MB @@ -26,7 +26,7 @@ impl KeypairsFile for Vec { for keypair in keypair_batch { buf.append(&mut keypair.sk.as_raw().as_bytes()); - buf.append(&mut keypair.pk.as_raw().as_bytes()); + buf.append(&mut keypair.pk.clone().as_uncompressed_bytes()); } keypairs_file.write_all(&buf)?; diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index eaf2c9d3f..777ccceaa 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -27,14 +27,21 @@ impl PublicKey { &self.0 } + /// Converts compressed bytes to PublicKey + pub fn from_bytes(bytes: &[u8]) -> Result { + let pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; + Ok(PublicKey(pubkey)) + } + /// Returns the PublicKey as (x, y) bytes - pub fn as_uncompressed_bytes(&mut self) -> Vec { - RawPublicKey::as_uncompressed_bytes(&mut self.0) + pub fn as_uncompressed_bytes(&self) -> Vec { + RawPublicKey::as_uncompressed_bytes(&mut self.0.clone()) } /// Converts (x, y) bytes to PublicKey pub fn from_uncompressed_bytes(bytes: &[u8]) -> Result { - let pubkey = RawPublicKey::from_uncompressed_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; + let pubkey = + RawPublicKey::from_uncompressed_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; Ok(PublicKey(pubkey)) } @@ -103,8 +110,14 @@ impl PartialEq for PublicKey { } impl Hash for PublicKey { + /// Note: this is distinct from consensus serialization, it will produce a different hash. + /// + /// This method uses the uncompressed bytes, which are much faster to obtain than the + /// compressed bytes required for consensus serialization. + /// + /// Use `ssz::Encode` to obtain the bytes required for consensus hashing. fn hash(&self, state: &mut H) { - ssz_encode(self).hash(state) + self.as_uncompressed_bytes().hash(state) } } From fbfa233d36c45af1dbd85192931427095aed2d64 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 17:16:12 +1100 Subject: [PATCH 52/56] Add debug messages to TestingBeaconStateBuilder --- .../benches/bench_block_processing.rs | 13 +++++ eth2/state_processing/benches/benches.rs | 50 ++++--------------- .../testing_beacon_state_builder.rs | 17 +++++-- 3 files changed, 35 insertions(+), 45 deletions(-) diff --git a/eth2/state_processing/benches/bench_block_processing.rs b/eth2/state_processing/benches/bench_block_processing.rs index 1028d4a20..031942473 100644 --- a/eth2/state_processing/benches/bench_block_processing.rs +++ b/eth2/state_processing/benches/bench_block_processing.rs @@ -1,5 +1,6 @@ use criterion::Criterion; use criterion::{black_box, Benchmark}; +use log::debug; use ssz::TreeHash; use state_processing::{ per_block_processing, @@ -107,6 +108,10 @@ fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) let mut validators_iter = (0..keypairs.len() as u64).into_iter(); // Insert the maximum possible number of `ProposerSlashing` objects. + debug!( + "Inserting {} proposer slashings...", + spec.max_proposer_slashings + ); for _ in 0..spec.max_proposer_slashings { let validator_index = validators_iter.next().expect("Insufficient validators."); @@ -119,6 +124,10 @@ fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) } // Insert the maximum possible number of `AttesterSlashing` objects + debug!( + "Inserting {} attester slashings...", + spec.max_attester_slashings + ); for _ in 0..spec.max_attester_slashings { let mut attesters: Vec = vec![]; let mut secret_keys: Vec<&SecretKey> = vec![]; @@ -134,17 +143,20 @@ fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) } // Insert the maximum possible number of `Attestation` objects. + debug!("Inserting {} attestations...", spec.max_attestations); let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); builder .fill_with_attestations(state, &all_secret_keys, spec) .unwrap(); // Insert the maximum possible number of `Deposit` objects. + debug!("Inserting {} deposits...", spec.max_deposits); for i in 0..spec.max_deposits { builder.insert_deposit(32_000_000_000, state.deposit_index + i, state, spec); } // Insert the maximum possible number of `Exit` objects. + debug!("Inserting {} exits...", spec.max_voluntary_exits); for _ in 0..spec.max_voluntary_exits { let validator_index = validators_iter.next().expect("Insufficient validators."); @@ -157,6 +169,7 @@ fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) } // Insert the maximum possible number of `Transfer` objects. + debug!("Inserting {} transfers...", spec.max_transfers); for _ in 0..spec.max_transfers { let validator_index = validators_iter.next().expect("Insufficient validators."); diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 9b16f732a..ad8c4f714 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -1,6 +1,7 @@ use criterion::Benchmark; use criterion::Criterion; use criterion::{criterion_group, criterion_main}; +use env_logger::{Builder, Env}; use types::test_utils::TestingBeaconStateBuilder; use types::*; @@ -9,50 +10,17 @@ mod bench_epoch_processing; pub const VALIDATOR_COUNT: usize = 300_032; +// `LOG_LEVEL == "debug"` gives logs, but they're very noisy and slow down benching. +pub const LOG_LEVEL: &str = ""; + pub fn state_processing(c: &mut Criterion) { + if LOG_LEVEL != "" { + Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init(); + } + bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); } -pub fn key_loading(c: &mut Criterion) { - let validator_count = 1000; - - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("generated", move |b| { - b.iter_batched( - || (), - |_| { - TestingBeaconStateBuilder::from_deterministic_keypairs( - validator_count, - &ChainSpec::foundation(), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - // Note: path needs to be relative to where cargo is executed from. - c.bench( - &format!("{}_validators", validator_count), - Benchmark::new("from_file", move |b| { - b.iter_batched( - || (), - |_| { - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( - validator_count, - &ChainSpec::foundation(), - ) - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); -} - -// criterion_group!(benches, state_processing, key_loading); -criterion_group!(benches, key_loading); +criterion_group!(benches, state_processing); criterion_main!(benches); diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index 63d6f5877..afefa4063 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -3,6 +3,7 @@ use crate::beacon_state::BeaconStateBuilder; use crate::*; use bls::get_withdrawal_credentials; use dirs; +use log::debug; use rayon::prelude::*; use std::path::{Path, PathBuf}; @@ -58,12 +59,14 @@ impl TestingBeaconStateBuilder { /// /// If the file does not exist, is invalid or does not contain enough keypairs. pub fn from_keypairs_file(validator_count: usize, path: &Path, spec: &ChainSpec) -> Self { + debug!("Loading {} keypairs from file...", validator_count); let keypairs = Vec::from_raw_file(path, validator_count).unwrap(); TestingBeaconStateBuilder::from_keypairs(keypairs, spec) } /// Generates the validator keypairs deterministically. pub fn from_deterministic_keypairs(validator_count: usize, spec: &ChainSpec) -> Self { + debug!("Generating {} deterministic keypairs...", validator_count); let keypairs = generate_deterministic_keypairs(validator_count); TestingBeaconStateBuilder::from_keypairs(keypairs, spec) } @@ -72,6 +75,10 @@ impl TestingBeaconStateBuilder { pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { let validator_count = keypairs.len(); + debug!( + "Building {} Validator objects from keypairs...", + validator_count + ); let validators = keypairs .par_iter() .map(|keypair| { @@ -103,6 +110,7 @@ impl TestingBeaconStateBuilder { let balances = vec![32_000_000_000; validator_count]; + debug!("Importing {} existing validators...", validator_count); state_builder.import_existing_validators( validators, balances, @@ -110,10 +118,11 @@ impl TestingBeaconStateBuilder { spec, ); - Self { - state: state_builder.build(spec).unwrap(), - keypairs, - } + let state = state_builder.build(spec).unwrap(); + + debug!("BeaconState built."); + + Self { state, keypairs } } /// Consume the builder and return the `BeaconState` and the keypairs for each validator. From 9057b436f339ab55d11858bba23998cbde919c97 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 17:19:35 +1100 Subject: [PATCH 53/56] Run rustfmt --- eth2/utils/bls/src/aggregate_signature.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index fa3628a89..af0879ec7 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -48,10 +48,8 @@ impl AggregateSignature { domain: u64, aggregate_public_keys: &[&AggregatePublicKey], ) -> bool { - let aggregate_public_keys: Vec<&RawAggregatePublicKey> = aggregate_public_keys - .iter() - .map(|pk| pk.as_raw()) - .collect(); + let aggregate_public_keys: Vec<&RawAggregatePublicKey> = + aggregate_public_keys.iter().map(|pk| pk.as_raw()).collect(); // Messages are concatenated into one long message. let mut msg: Vec = vec![]; From dc221f322063ea7739b0c82b8c399beb14e667b2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 17:30:00 +1100 Subject: [PATCH 54/56] Make attester/proposer slashing builders test-only They didn't do anything useful or safe for production. --- beacon_node/beacon_chain/test_harness/src/test_case.rs | 8 +++----- eth2/types/src/attester_slashing.rs | 4 ---- eth2/types/src/proposer_slashing.rs | 4 ---- eth2/types/src/test_utils/mod.rs | 4 ++++ .../testing_attester_slashing_builder.rs} | 4 ++-- .../src/test_utils/testing_beacon_block_builder.rs | 10 ++++------ .../testing_proposer_slashing_builder.rs} | 4 ++-- 7 files changed, 15 insertions(+), 23 deletions(-) rename eth2/types/src/{attester_slashing/builder.rs => test_utils/testing_attester_slashing_builder.rs} (97%) rename eth2/types/src/{proposer_slashing/builder.rs => test_utils/testing_proposer_slashing_builder.rs} (95%) diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index a18b4688b..b6b1ea5cc 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -9,9 +9,7 @@ use ssz::SignedRoot; use std::path::Path; use types::*; -use types::{ - attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, -}; +use types::test_utils::{TestingAttesterSlashingBuilder, TestingProposerSlashingBuilder}; use yaml_rust::Yaml; mod config; @@ -331,7 +329,7 @@ fn build_double_vote_attester_slashing( .expect("Unable to sign AttesterSlashing") }; - AttesterSlashingBuilder::double_vote(validator_indices, signer) + TestingAttesterSlashingBuilder::double_vote(validator_indices, signer) } /// Builds an `ProposerSlashing` for some `validator_index`. @@ -344,5 +342,5 @@ fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) - .expect("Unable to sign AttesterSlashing") }; - ProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec) + TestingProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec) } diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 1cb671960..7a0752b6a 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -4,10 +4,6 @@ use serde_derive::Serialize; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; -mod builder; - -pub use builder::AttesterSlashingBuilder; - /// Two conflicting attestations. /// /// Spec v0.4.0 diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index f86e7f3a8..394c55a01 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -5,10 +5,6 @@ use serde_derive::Serialize; use ssz_derive::{Decode, Encode, TreeHash}; use test_random_derive::TestRandom; -mod builder; - -pub use builder::ProposerSlashingBuilder; - /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.4.0 diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 26d340e7d..6fdbe53ad 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -2,9 +2,11 @@ mod generate_deterministic_keypairs; mod keypairs_file; mod test_random; mod testing_attestation_builder; +mod testing_attester_slashing_builder; mod testing_beacon_block_builder; mod testing_beacon_state_builder; mod testing_deposit_builder; +mod testing_proposer_slashing_builder; mod testing_transfer_builder; mod testing_voluntary_exit_builder; @@ -13,8 +15,10 @@ pub use keypairs_file::KeypairsFile; pub use rand::{prng::XorShiftRng, SeedableRng}; pub use test_random::TestRandom; pub use testing_attestation_builder::TestingAttestationBuilder; +pub use testing_attester_slashing_builder::TestingAttesterSlashingBuilder; pub use testing_beacon_block_builder::TestingBeaconBlockBuilder; pub use testing_beacon_state_builder::{keypairs_path, TestingBeaconStateBuilder}; pub use testing_deposit_builder::TestingDepositBuilder; +pub use testing_proposer_slashing_builder::TestingProposerSlashingBuilder; pub use testing_transfer_builder::TestingTransferBuilder; pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; diff --git a/eth2/types/src/attester_slashing/builder.rs b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs similarity index 97% rename from eth2/types/src/attester_slashing/builder.rs rename to eth2/types/src/test_utils/testing_attester_slashing_builder.rs index 8edf4ed65..d9da3db2d 100644 --- a/eth2/types/src/attester_slashing/builder.rs +++ b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs @@ -2,9 +2,9 @@ use crate::*; use ssz::TreeHash; /// Builds an `AttesterSlashing`. -pub struct AttesterSlashingBuilder(); +pub struct TestingAttesterSlashingBuilder(); -impl AttesterSlashingBuilder { +impl TestingAttesterSlashingBuilder { /// Builds an `AttesterSlashing` that is a double vote. /// /// The `signer` function is used to sign the double-vote and accepts: diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index bbdc5046b..3ebd24b0a 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -1,9 +1,7 @@ use crate::{ - attester_slashing::AttesterSlashingBuilder, - proposer_slashing::ProposerSlashingBuilder, test_utils::{ - TestingAttestationBuilder, TestingDepositBuilder, TestingTransferBuilder, - TestingVoluntaryExitBuilder, + TestingAttestationBuilder, TestingAttesterSlashingBuilder, TestingDepositBuilder, + TestingProposerSlashingBuilder, TestingTransferBuilder, TestingVoluntaryExitBuilder, }, *, }; @@ -232,7 +230,7 @@ fn build_proposer_slashing( Signature::new(message, domain, secret_key) }; - ProposerSlashingBuilder::double_vote(validator_index, signer, spec) + TestingProposerSlashingBuilder::double_vote(validator_index, signer, spec) } /// Builds an `AttesterSlashing` for some `validator_indices`. @@ -253,5 +251,5 @@ fn build_double_vote_attester_slashing( Signature::new(message, domain, secret_keys[key_index]) }; - AttesterSlashingBuilder::double_vote(validator_indices, signer) + TestingAttesterSlashingBuilder::double_vote(validator_indices, signer) } diff --git a/eth2/types/src/proposer_slashing/builder.rs b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs similarity index 95% rename from eth2/types/src/proposer_slashing/builder.rs rename to eth2/types/src/test_utils/testing_proposer_slashing_builder.rs index 472a76ec1..43ff3d0b7 100644 --- a/eth2/types/src/proposer_slashing/builder.rs +++ b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs @@ -2,9 +2,9 @@ use crate::*; use ssz::SignedRoot; /// Builds a `ProposerSlashing`. -pub struct ProposerSlashingBuilder(); +pub struct TestingProposerSlashingBuilder(); -impl ProposerSlashingBuilder { +impl TestingProposerSlashingBuilder { /// Builds a `ProposerSlashing` that is a double vote. /// /// The `signer` function is used to sign the double-vote and accepts: From f949919b9bf7aedf79d095399fd2559c7954453e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 18:02:53 +1100 Subject: [PATCH 55/56] Add comments to epoch_processing --- .../beacon_chain/test_harness/src/gen_keys.rs | 1 + .../beacon_chain/test_harness/src/run_test.rs | 1 + .../src/per_epoch_processing.rs | 67 +++++++++++++++---- .../src/per_epoch_processing/attester_sets.rs | 35 ++++++++++ 4 files changed, 90 insertions(+), 14 deletions(-) diff --git a/beacon_node/beacon_chain/test_harness/src/gen_keys.rs b/beacon_node/beacon_chain/test_harness/src/gen_keys.rs index f2f81b393..abd512423 100644 --- a/beacon_node/beacon_chain/test_harness/src/gen_keys.rs +++ b/beacon_node/beacon_chain/test_harness/src/gen_keys.rs @@ -3,6 +3,7 @@ use log::debug; use std::path::Path; use types::test_utils::{generate_deterministic_keypairs, KeypairsFile}; +/// Creates a file containing BLS keypairs. pub fn gen_keys(matches: &ArgMatches) { let validator_count = value_t!(matches.value_of("validator_count"), usize) .expect("Validator count is required argument"); diff --git a/beacon_node/beacon_chain/test_harness/src/run_test.rs b/beacon_node/beacon_chain/test_harness/src/run_test.rs index 51a993bd7..d4e2e1cf2 100644 --- a/beacon_node/beacon_chain/test_harness/src/run_test.rs +++ b/beacon_node/beacon_chain/test_harness/src/run_test.rs @@ -4,6 +4,7 @@ use std::path::Path; use std::{fs::File, io::prelude::*}; use yaml_rust::YamlLoader; +/// Runs a YAML-specified test case. pub fn run_test(matches: &ArgMatches) { if let Some(yaml_file) = matches.value_of("yaml") { let docs = { diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index bb064ac34..4abbe012c 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -3,7 +3,6 @@ use errors::EpochProcessingError as Error; use fnv::FnvHashMap; use fnv::FnvHashSet; use integer_sqrt::IntegerSquareRoot; -use log::debug; use rayon::prelude::*; use ssz::TreeHash; use std::collections::HashMap; @@ -17,14 +16,20 @@ pub mod inclusion_distance; pub mod tests; pub mod winning_root; +/// Maps a shard to a winning root. +/// +/// It is generated during crosslink processing and later used to reward/penalize validators. +pub type WinningRootHashSet = HashMap; + +/// Performs per-epoch processing on some BeaconState. +/// +/// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is +/// returned, a state might be "half-processed" and therefore in an invalid state. +/// +/// Spec v0.4.0 pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let previous_epoch = state.previous_epoch(spec); - debug!( - "Starting per-epoch processing on epoch {}...", - state.current_epoch(spec) - ); - // Ensure all of the caches are built. state.build_epoch_cache(RelativeEpoch::Previous, spec)?; state.build_epoch_cache(RelativeEpoch::Current, spec)?; @@ -79,11 +84,12 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result // Rotate the epoch caches to suit the epoch transition. state.advance_caches(); - debug!("Epoch transition complete."); - Ok(()) } +/// Returns a list of active validator indices for the state's current epoch. +/// +/// Spec v0.4.0 pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) -> Vec { get_active_validator_indices( &state.validator_registry, @@ -91,6 +97,14 @@ pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) ) } +/// Calculates various sets of attesters, including: +/// +/// - current epoch attesters +/// - current epoch boundary attesters +/// - previous epoch attesters +/// - etc. +/// +/// Spec v0.4.0 pub fn calculate_attester_sets( state: &BeaconState, spec: &ChainSpec, @@ -113,6 +127,13 @@ pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { } } +/// Update the following fields on the `BeaconState`: +/// +/// - `justification_bitfield`. +/// - `finalized_epoch` +/// - `justified_epoch` +/// - `previous_justified_epoch` +/// /// Spec v0.4.0 pub fn process_justification( state: &mut BeaconState, @@ -190,8 +211,13 @@ pub fn process_justification( state.justified_epoch = new_justified_epoch; } -pub type WinningRootHashSet = HashMap; - +/// Updates the following fields on the `BeaconState`: +/// +/// - `latest_crosslinks` +/// +/// Also returns a `WinningRootHashSet` for later use during epoch processing. +/// +/// Spec v0.4.0 pub fn process_crosslinks( state: &mut BeaconState, spec: &ChainSpec, @@ -250,6 +276,10 @@ pub fn process_crosslinks( Ok(winning_root_for_shards) } +/// Updates the following fields on the BeaconState: +/// +/// - `validator_balances` +/// /// Spec v0.4.0 pub fn process_rewards_and_penalities( state: &mut BeaconState, @@ -488,7 +518,9 @@ pub fn process_rewards_and_penalities( Ok(()) } -// Spec v0.4.0 +/// Peforms a validator registry update, if required. +/// +/// Spec v0.4.0 pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); let next_epoch = state.next_epoch(spec); @@ -535,7 +567,10 @@ pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Ok(()) } -// Spec v0.4.0 +/// Updates the state's `latest_active_index_roots` field with a tree hash the active validator +/// indices for the next epoch. +/// +/// Spec v0.4.0 pub fn update_active_tree_index_roots( state: &mut BeaconState, spec: &ChainSpec, @@ -555,7 +590,9 @@ pub fn update_active_tree_index_roots( Ok(()) } -// Spec v0.4.0 +/// Advances the state's `latest_slashed_balances` field. +/// +/// Spec v0.4.0 pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) { let current_epoch = state.current_epoch(spec); let next_epoch = state.next_epoch(spec); @@ -564,7 +601,9 @@ pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length]; } -// Spec v0.4.0 +/// Removes all pending attestations from the previous epoch. +/// +/// Spec v0.4.0 pub fn clean_attestations(state: &mut BeaconState, spec: &ChainSpec) { let current_epoch = state.current_epoch(spec); diff --git a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs index d82774ac2..03f49c1d3 100644 --- a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs +++ b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs @@ -1,13 +1,17 @@ use fnv::FnvHashSet; use types::*; +/// A set of validator indices, along with the total balance of all those attesters. #[derive(Default)] pub struct Attesters { + /// A set of validator indices. pub indices: FnvHashSet, + /// The total balance of all validators in `self.indices`. pub balance: u64, } impl Attesters { + /// Add the given indices to the set, incrementing the sets balance by the provided balance. fn add(&mut self, additional_indices: &[usize], additional_balance: u64) { self.indices.reserve(additional_indices.len()); for i in additional_indices { @@ -17,15 +21,35 @@ impl Attesters { } } +/// A collection of `Attester` objects, representing set of attesters that are rewarded/penalized +/// during an epoch transition. pub struct AttesterSets { + /// All validators who attested during the state's current epoch. pub current_epoch: Attesters, + /// All validators who attested that the beacon block root of the first slot of the state's + /// current epoch is the same as the one stored in this state. + /// + /// In short validators who agreed with the state about the first slot of the current epoch. pub current_epoch_boundary: Attesters, + /// All validators who attested during the state's previous epoch. pub previous_epoch: Attesters, + /// All validators who attested that the beacon block root of the first slot of the state's + /// previous epoch is the same as the one stored in this state. + /// + /// In short, validators who agreed with the state about the first slot of the previous epoch. pub previous_epoch_boundary: Attesters, + /// All validators who attested that the beacon block root at the pending attestation's slot is + /// the same as the one stored in this state. + /// + /// In short, validators who agreed with the state about the current beacon block root when + /// they attested. pub previous_epoch_head: Attesters, } impl AttesterSets { + /// Loop through all attestations in the state and instantiate a complete `AttesterSets` struct. + /// + /// Spec v0.4.0 pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { let mut current_epoch = Attesters::default(); let mut current_epoch_boundary = Attesters::default(); @@ -67,10 +91,17 @@ impl AttesterSets { } } +/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. +/// +/// Spec v0.4.0 fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { a.data.slot.epoch(spec.slots_per_epoch) == epoch } +/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for +/// the first slot of the given epoch. +/// +/// Spec v0.4.0 fn has_common_epoch_boundary_root( a: &PendingAttestation, state: &BeaconState, @@ -85,6 +116,10 @@ fn has_common_epoch_boundary_root( Ok(a.data.epoch_boundary_root == state_boundary_root) } +/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for +/// the current slot of the `PendingAttestation`. +/// +/// Spec v0.4.0 fn has_common_beacon_block_root( a: &PendingAttestation, state: &BeaconState, From 2be0373f01814a5ce17c4e5aeebd2a9a45cc15d9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 12 Mar 2019 18:26:41 +1100 Subject: [PATCH 56/56] Add comments to new functions/structs. --- eth2/types/src/beacon_block.rs | 6 +++ eth2/types/src/beacon_state.rs | 49 +++---------------- eth2/types/src/test_utils/keypairs_file.rs | 7 +++ .../test_utils/testing_attestation_builder.rs | 9 ++++ .../testing_attester_slashing_builder.rs | 2 + .../testing_beacon_block_builder.rs | 19 ++++++- .../testing_beacon_state_builder.rs | 3 ++ .../src/test_utils/testing_deposit_builder.rs | 11 +++++ .../testing_proposer_slashing_builder.rs | 2 + .../test_utils/testing_transfer_builder.rs | 8 +++ .../testing_voluntary_exit_builder.rs | 8 +++ 11 files changed, 80 insertions(+), 44 deletions(-) diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 9e1b3f7ae..b67c866a4 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -23,6 +23,8 @@ pub struct BeaconBlock { impl BeaconBlock { /// Produce the first block of the Beacon Chain. + /// + /// Spec v0.4.0 pub fn genesis(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock { BeaconBlock { slot: spec.genesis_slot, @@ -46,11 +48,15 @@ impl BeaconBlock { } /// Returns the `hash_tree_root` of the block. + /// + /// Spec v0.4.0 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.hash_tree_root()[..]) } /// Returns an unsigned proposal for block. + /// + /// Spec v0.4.0 pub fn proposal(&self, spec: &ChainSpec) -> Proposal { Proposal { slot: self.slot, diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index f69746dae..b4faa6a49 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -6,7 +6,6 @@ use honey_badger_split::SplitExt; use int_to_bytes::int_to_bytes32; use log::{debug, error, trace}; use rand::RngCore; -use rayon::prelude::*; use serde_derive::Serialize; use ssz::{hash, Decodable, DecodeError, Encodable, SignedRoot, SszStream, TreeHash}; use std::collections::HashMap; @@ -113,6 +112,11 @@ pub struct BeaconState { impl BeaconState { /// Produce the first state of the Beacon Chain. + /// + /// This does not fully build a genesis beacon state, it omits processing of initial validator + /// deposits. To obtain a full genesis beacon state, use the `BeaconStateBuilder`. + /// + /// Spec v0.4.0 pub fn genesis(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> BeaconState { let initial_crosslink = Crosslink { epoch: spec.genesis_epoch, @@ -185,44 +189,9 @@ impl BeaconState { } } - /// Produce the first state of the Beacon Chain. - pub fn process_initial_deposits( - &mut self, - initial_validator_deposits: Vec, - spec: &ChainSpec, - ) -> Result<(), Error> { - debug!("Processing genesis deposits..."); - - let deposit_data = initial_validator_deposits - .par_iter() - .map(|deposit| &deposit.deposit_data) - .collect(); - - self.process_deposits(deposit_data, spec); - - trace!("Processed genesis deposits."); - - for validator_index in 0..self.validator_registry.len() { - if self.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount { - self.activate_validator(validator_index, true, spec); - } - } - - self.deposit_index = initial_validator_deposits.len() as u64; - - let genesis_active_index_root = hash_tree_root(get_active_validator_indices( - &self.validator_registry, - spec.genesis_epoch, - )); - self.latest_active_index_roots = - vec![genesis_active_index_root; spec.latest_active_index_roots_length]; - - self.current_shuffling_seed = self.generate_seed(spec.genesis_epoch, spec)?; - - Ok(()) - } - /// Returns the `hash_tree_root` of the state. + /// + /// Spec v0.4.0 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.hash_tree_root()[..]) } @@ -1129,10 +1098,6 @@ impl BeaconState { } } -fn hash_tree_root(input: Vec) -> Hash256 { - Hash256::from_slice(&input.hash_tree_root()[..]) -} - impl Encodable for BeaconState { fn ssz_append(&self, s: &mut SszStream) { s.append(&self.slot); diff --git a/eth2/types/src/test_utils/keypairs_file.rs b/eth2/types/src/test_utils/keypairs_file.rs index ebc50f528..a1ea4d928 100644 --- a/eth2/types/src/test_utils/keypairs_file.rs +++ b/eth2/types/src/test_utils/keypairs_file.rs @@ -12,12 +12,17 @@ pub const BATCH_SIZE: usize = 1_000; // ~15MB pub const KEYPAIR_BYTES_LEN: usize = PUBLIC_KEY_BYTES_LEN + SECRET_KEY_BYTES_LEN; pub const BATCH_BYTE_LEN: usize = KEYPAIR_BYTES_LEN * BATCH_SIZE; +/// Defines a trait that allows reading/writing a vec of `Keypair` from/to a file. pub trait KeypairsFile { + /// Write to file, without guaranteeing interoperability with other clients. fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error>; + /// Read from file, without guaranteeing interoperability with other clients. fn from_raw_file(path: &Path, count: usize) -> Result, Error>; } impl KeypairsFile for Vec { + /// Write the keypairs to file, using the fastest possible method without guaranteeing + /// interoperability with other clients. fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error> { let mut keypairs_file = File::create(path)?; @@ -35,6 +40,8 @@ impl KeypairsFile for Vec { Ok(()) } + /// Read the keypairs from file, using the fastest possible method without guaranteeing + /// interoperability with other clients. fn from_raw_file(path: &Path, count: usize) -> Result, Error> { let mut keypairs_file = File::open(path)?; diff --git a/eth2/types/src/test_utils/testing_attestation_builder.rs b/eth2/types/src/test_utils/testing_attestation_builder.rs index f52edadfe..8c86d756d 100644 --- a/eth2/types/src/test_utils/testing_attestation_builder.rs +++ b/eth2/types/src/test_utils/testing_attestation_builder.rs @@ -1,12 +1,16 @@ use crate::*; use ssz::TreeHash; +/// Builds an attestation to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** pub struct TestingAttestationBuilder { committee: Vec, attestation: Attestation, } impl TestingAttestationBuilder { + /// Create a new attestation builder. pub fn new( state: &BeaconState, committee: &[usize], @@ -70,6 +74,10 @@ impl TestingAttestationBuilder { } } + /// Signs the attestation with a subset (or all) committee members. + /// + /// `secret_keys` must be supplied in the same order as `signing_validators`. I.e., the first + /// keypair must be that of the first signing validator. pub fn sign( &mut self, signing_validators: &[usize], @@ -111,6 +119,7 @@ impl TestingAttestationBuilder { } } + /// Consume the builder and return the attestation. pub fn build(self) -> Attestation { self.attestation } diff --git a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs index d9da3db2d..232de87ec 100644 --- a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs @@ -2,6 +2,8 @@ use crate::*; use ssz::TreeHash; /// Builds an `AttesterSlashing`. +/// +/// This struct should **never be used for production purposes.** pub struct TestingAttesterSlashingBuilder(); impl TestingAttesterSlashingBuilder { diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index 3ebd24b0a..97e395e1f 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -8,22 +8,29 @@ use crate::{ use rayon::prelude::*; use ssz::{SignedRoot, TreeHash}; +/// Builds a beacon block to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** pub struct TestingBeaconBlockBuilder { block: BeaconBlock, } impl TestingBeaconBlockBuilder { + /// Create a new builder from genesis. pub fn new(spec: &ChainSpec) -> Self { Self { block: BeaconBlock::genesis(spec.zero_hash, spec), } } + /// Set the slot of the block. pub fn set_slot(&mut self, slot: Slot) { self.block.slot = slot; } /// Signs the block. + /// + /// Modifying the block after signing may invalidate the signature. pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { let proposal = self.block.proposal(spec); let message = proposal.signed_root(); @@ -33,6 +40,8 @@ impl TestingBeaconBlockBuilder { } /// Sets the randao to be a signature across the blocks epoch. + /// + /// Modifying the block's slot after signing may invalidate the signature. pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { let epoch = self.block.slot.epoch(spec.slots_per_epoch); let message = epoch.hash_tree_root(); @@ -65,9 +74,15 @@ impl TestingBeaconBlockBuilder { self.block.body.attester_slashings.push(attester_slashing); } - /// Fills the block with as many attestations as possible. + /// Fills the block with `MAX_ATTESTATIONS` attestations. /// - /// Note: this will not perform well when `jepoch_committees_count % slots_per_epoch != 0` + /// It will first go and get each committee that is able to include an attestation in this + /// block. If there are enough committees, it will produce an attestation for each. If there + /// are _not_ enough committees, it will start splitting the committees in half until it + /// achieves the target. It will then produce separate attestations for each split committee. + /// + /// Note: the signed messages of the split committees will be identical -- it would be possible + /// to aggregate these split attestations. pub fn fill_with_attestations( &mut self, state: &BeaconState, diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index afefa4063..b2cf28c8a 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -20,6 +20,9 @@ pub fn keypairs_path() -> PathBuf { dir.join(KEYPAIRS_FILE) } +/// Builds a beacon state to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** pub struct TestingBeaconStateBuilder { state: BeaconState, keypairs: Vec, diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index 56e81cad0..729311468 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -1,11 +1,15 @@ use crate::*; use bls::get_withdrawal_credentials; +/// Builds an deposit to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** pub struct TestingDepositBuilder { deposit: Deposit, } impl TestingDepositBuilder { + /// Instantiates a new builder. pub fn new(amount: u64) -> Self { let keypair = Keypair::random(); @@ -26,10 +30,16 @@ impl TestingDepositBuilder { Self { deposit } } + /// Set the `deposit.index` value. pub fn set_index(&mut self, index: u64) { self.deposit.index = index; } + /// Signs the deposit, also setting the following values: + /// + /// - `pubkey` to the signing pubkey. + /// - `withdrawal_credentials` to the signing pubkey. + /// - `proof_of_possesssion` pub fn sign(&mut self, keypair: &Keypair, state: &BeaconState, spec: &ChainSpec) { let withdrawal_credentials = Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], @@ -47,6 +57,7 @@ impl TestingDepositBuilder { DepositInput::create_proof_of_possession(&keypair, &withdrawal_credentials, domain); } + /// Builds the deposit, consuming the builder. pub fn build(self) -> Deposit { self.deposit } diff --git a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs index 43ff3d0b7..7f16b679f 100644 --- a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs @@ -2,6 +2,8 @@ use crate::*; use ssz::SignedRoot; /// Builds a `ProposerSlashing`. +/// +/// This struct should **never be used for production purposes.** pub struct TestingProposerSlashingBuilder(); impl TestingProposerSlashingBuilder { diff --git a/eth2/types/src/test_utils/testing_transfer_builder.rs b/eth2/types/src/test_utils/testing_transfer_builder.rs index c343e8fd2..c4256ebea 100644 --- a/eth2/types/src/test_utils/testing_transfer_builder.rs +++ b/eth2/types/src/test_utils/testing_transfer_builder.rs @@ -1,11 +1,15 @@ use crate::*; use ssz::SignedRoot; +/// Builds a transfer to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** pub struct TestingTransferBuilder { transfer: Transfer, } impl TestingTransferBuilder { + /// Instantiates a new builder. pub fn new(from: u64, to: u64, amount: u64, slot: Slot) -> Self { let keypair = Keypair::random(); @@ -22,6 +26,9 @@ impl TestingTransferBuilder { Self { transfer } } + /// Signs the transfer. + /// + /// The keypair must match that of the `from` validator index. pub fn sign(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) { self.transfer.pubkey = keypair.pk; let message = self.transfer.signed_root(); @@ -31,6 +38,7 @@ impl TestingTransferBuilder { self.transfer.signature = Signature::new(&message, domain, &keypair.sk); } + /// Builds the transfer, consuming the builder. pub fn build(self) -> Transfer { self.transfer } diff --git a/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs b/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs index 92ef4484e..fe5c8325a 100644 --- a/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs +++ b/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs @@ -1,11 +1,15 @@ use crate::*; use ssz::SignedRoot; +/// Builds an exit to be used for testing purposes. +/// +/// This struct should **never be used for production purposes.** pub struct TestingVoluntaryExitBuilder { exit: VoluntaryExit, } impl TestingVoluntaryExitBuilder { + /// Instantiates a new builder. pub fn new(epoch: Epoch, validator_index: u64) -> Self { let exit = VoluntaryExit { epoch, @@ -16,6 +20,9 @@ impl TestingVoluntaryExitBuilder { Self { exit } } + /// Signs the exit. + /// + /// The signing secret key must match that of the exiting validator. pub fn sign(&mut self, secret_key: &SecretKey, fork: &Fork, spec: &ChainSpec) { let message = self.exit.signed_root(); let domain = spec.get_domain(self.exit.epoch, Domain::Exit, fork); @@ -23,6 +30,7 @@ impl TestingVoluntaryExitBuilder { self.exit.signature = Signature::new(&message, domain, secret_key); } + /// Builds the exit, consuming the builder. pub fn build(self) -> VoluntaryExit { self.exit }