Merge branch 'master' into lighthouse-255
This commit is contained in:
		
						commit
						e02bc82b6a
					
				
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -3,3 +3,4 @@ target/ | ||||
| Cargo.lock | ||||
| *.pk | ||||
| *.sk | ||||
| *.raw_keypairs | ||||
|  | ||||
| @ -73,31 +73,18 @@ where | ||||
|     F: ForkChoice, | ||||
| { | ||||
|     /// Instantiate a new Beacon Chain, from genesis.
 | ||||
|     #[allow(clippy::too_many_arguments)] // Will be re-factored in the coming weeks.
 | ||||
|     pub fn genesis( | ||||
|     pub fn from_genesis( | ||||
|         state_store: Arc<BeaconStateStore<T>>, | ||||
|         block_store: Arc<BeaconBlockStore<T>>, | ||||
|         slot_clock: U, | ||||
|         genesis_time: u64, | ||||
|         latest_eth1_data: Eth1Data, | ||||
|         initial_validator_deposits: Vec<Deposit>, | ||||
|         mut genesis_state: BeaconState, | ||||
|         genesis_block: BeaconBlock, | ||||
|         spec: ChainSpec, | ||||
|         fork_choice: F, | ||||
|     ) -> Result<Self, Error> { | ||||
|         if initial_validator_deposits.is_empty() { | ||||
|             return Err(Error::InsufficientValidators); | ||||
|         } | ||||
| 
 | ||||
|         let mut genesis_state = BeaconState::genesis( | ||||
|             genesis_time, | ||||
|             initial_validator_deposits, | ||||
|             latest_eth1_data, | ||||
|             &spec, | ||||
|         )?; | ||||
|         let state_root = genesis_state.canonical_root(); | ||||
|         state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; | ||||
| 
 | ||||
|         let genesis_block = BeaconBlock::genesis(state_root, &spec); | ||||
|         let block_root = genesis_block.canonical_root(); | ||||
|         block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; | ||||
| 
 | ||||
|  | ||||
| @ -33,12 +33,14 @@ failure = "0.1" | ||||
| failure_derive = "0.1" | ||||
| fork_choice = { path = "../../../eth2/fork_choice" } | ||||
| hashing = { path = "../../../eth2/utils/hashing" } | ||||
| int_to_bytes = { path = "../../../eth2/utils/int_to_bytes" } | ||||
| log = "0.4" | ||||
| env_logger = "0.6.0" | ||||
| rayon = "1.0" | ||||
| serde = "1.0" | ||||
| serde_derive = "1.0" | ||||
| serde_json = "1.0" | ||||
| serde_yaml = "0.8" | ||||
| slot_clock = { path = "../../../eth2/utils/slot_clock" } | ||||
| ssz = { path = "../../../eth2/utils/ssz" } | ||||
| types = { path = "../../../eth2/types" } | ||||
|  | ||||
| @ -1,7 +1,7 @@ | ||||
| use super::ValidatorHarness; | ||||
| use beacon_chain::{BeaconChain, BlockProcessingOutcome}; | ||||
| pub use beacon_chain::{BeaconChainError, CheckPoint}; | ||||
| use bls::{create_proof_of_possession, get_withdrawal_credentials}; | ||||
| use bls::get_withdrawal_credentials; | ||||
| use db::{ | ||||
|     stores::{BeaconBlockStore, BeaconStateStore}, | ||||
|     MemoryDB, | ||||
| @ -10,10 +10,17 @@ use fork_choice::BitwiseLMDGhost; | ||||
| use log::debug; | ||||
| use rayon::prelude::*; | ||||
| use slot_clock::TestingSlotClock; | ||||
| use ssz::TreeHash; | ||||
| use std::collections::HashSet; | ||||
| use std::fs::File; | ||||
| use std::iter::FromIterator; | ||||
| use std::path::Path; | ||||
| use std::sync::Arc; | ||||
| use types::*; | ||||
| use types::{beacon_state::BeaconStateBuilder, test_utils::generate_deterministic_keypairs, *}; | ||||
| 
 | ||||
| mod generate_deposits; | ||||
| 
 | ||||
| pub use generate_deposits::generate_deposits_from_keypairs; | ||||
| 
 | ||||
| /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
 | ||||
| /// to it. Each validator is provided a borrow to the beacon chain, where it may read
 | ||||
| @ -35,7 +42,12 @@ impl BeaconChainHarness { | ||||
|     ///
 | ||||
|     /// - A keypair, `BlockProducer` and `Attester` for each validator.
 | ||||
|     /// - A new BeaconChain struct where the given validators are in the genesis.
 | ||||
|     pub fn new(spec: ChainSpec, validator_count: usize) -> Self { | ||||
|     pub fn new( | ||||
|         spec: ChainSpec, | ||||
|         validator_count: usize, | ||||
|         validators_dir: Option<&Path>, | ||||
|         skip_deposit_verification: bool, | ||||
|     ) -> Self { | ||||
|         let db = Arc::new(MemoryDB::open()); | ||||
|         let block_store = Arc::new(BeaconBlockStore::new(db.clone())); | ||||
|         let state_store = Arc::new(BeaconStateStore::new(db.clone())); | ||||
| @ -47,50 +59,89 @@ impl BeaconChainHarness { | ||||
|             block_hash: Hash256::zero(), | ||||
|         }; | ||||
| 
 | ||||
|         debug!("Generating validator keypairs..."); | ||||
|         let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); | ||||
| 
 | ||||
|         let keypairs: Vec<Keypair> = (0..validator_count) | ||||
|             .collect::<Vec<usize>>() | ||||
|             .par_iter() | ||||
|             .map(|_| Keypair::random()) | ||||
|             .collect(); | ||||
|         // If a `validators_dir` is specified, load the keypairs a YAML file.
 | ||||
|         //
 | ||||
|         // Otherwise, generate them deterministically where the first validator has a secret key of
 | ||||
|         // `1`, etc.
 | ||||
|         let keypairs = if let Some(path) = validators_dir { | ||||
|             debug!("Loading validator keypairs from file..."); | ||||
|             let keypairs_file = File::open(path.join("keypairs.yaml")).unwrap(); | ||||
|             let mut keypairs: Vec<Keypair> = serde_yaml::from_reader(&keypairs_file).unwrap(); | ||||
|             keypairs.truncate(validator_count); | ||||
|             keypairs | ||||
|         } else { | ||||
|             debug!("Generating validator keypairs..."); | ||||
|             generate_deterministic_keypairs(validator_count) | ||||
|         }; | ||||
| 
 | ||||
|         debug!("Creating validator deposits..."); | ||||
|         // Skipping deposit verification means directly generating `Validator` records, instead
 | ||||
|         // of generating `Deposit` objects, verifying them and converting them into `Validator`
 | ||||
|         // records.
 | ||||
|         //
 | ||||
|         // It is much faster to skip deposit verification, however it does not test the initial
 | ||||
|         // validator induction part of beacon chain genesis.
 | ||||
|         if skip_deposit_verification { | ||||
|             let validators = keypairs | ||||
|                 .iter() | ||||
|                 .map(|keypair| { | ||||
|                     let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( | ||||
|                         &keypair.pk, | ||||
|                         spec.bls_withdrawal_prefix_byte, | ||||
|                     )); | ||||
| 
 | ||||
|         let initial_validator_deposits = keypairs | ||||
|             .par_iter() | ||||
|             .map(|keypair| Deposit { | ||||
|                 branch: vec![], // branch verification is not specified.
 | ||||
|                 index: 0,       // index verification is not specified.
 | ||||
|                 deposit_data: DepositData { | ||||
|                     amount: 32_000_000_000, // 32 ETH (in Gwei)
 | ||||
|                     timestamp: genesis_time - 1, | ||||
|                     deposit_input: DepositInput { | ||||
|                     Validator { | ||||
|                         pubkey: keypair.pk.clone(), | ||||
|                         // Validator can withdraw using their main keypair.
 | ||||
|                         withdrawal_credentials: Hash256::from_slice( | ||||
|                             &get_withdrawal_credentials( | ||||
|                                 &keypair.pk, | ||||
|                                 spec.bls_withdrawal_prefix_byte, | ||||
|                             )[..], | ||||
|                         ), | ||||
|                         proof_of_possession: create_proof_of_possession(&keypair), | ||||
|                     }, | ||||
|                 }, | ||||
|             }) | ||||
|             .collect(); | ||||
|                         withdrawal_credentials, | ||||
|                         activation_epoch: spec.far_future_epoch, | ||||
|                         exit_epoch: spec.far_future_epoch, | ||||
|                         withdrawable_epoch: spec.far_future_epoch, | ||||
|                         initiated_exit: false, | ||||
|                         slashed: false, | ||||
|                     } | ||||
|                 }) | ||||
|                 .collect(); | ||||
| 
 | ||||
|         debug!("Creating the BeaconChain..."); | ||||
|             let balances = vec![32_000_000_000; validator_count]; | ||||
| 
 | ||||
|             state_builder.import_existing_validators( | ||||
|                 validators, | ||||
|                 balances, | ||||
|                 validator_count as u64, | ||||
|                 &spec, | ||||
|             ); | ||||
|         } else { | ||||
|             debug!("Generating initial validator deposits..."); | ||||
|             let deposits = generate_deposits_from_keypairs( | ||||
|                 &keypairs, | ||||
|                 genesis_time, | ||||
|                 spec.get_domain( | ||||
|                     spec.genesis_epoch, | ||||
|                     Domain::Deposit, | ||||
|                     &Fork { | ||||
|                         previous_version: spec.genesis_fork_version, | ||||
|                         current_version: spec.genesis_fork_version, | ||||
|                         epoch: spec.genesis_epoch, | ||||
|                     }, | ||||
|                 ), | ||||
|                 &spec, | ||||
|             ); | ||||
|             state_builder.process_initial_deposits(&deposits, &spec); | ||||
|         }; | ||||
| 
 | ||||
|         let genesis_state = state_builder.build(&spec).unwrap(); | ||||
|         let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); | ||||
|         let genesis_block = BeaconBlock::genesis(state_root, &spec); | ||||
| 
 | ||||
|         // Create the Beacon Chain
 | ||||
|         let beacon_chain = Arc::new( | ||||
|             BeaconChain::genesis( | ||||
|             BeaconChain::from_genesis( | ||||
|                 state_store.clone(), | ||||
|                 block_store.clone(), | ||||
|                 slot_clock, | ||||
|                 genesis_time, | ||||
|                 latest_eth1_data, | ||||
|                 initial_validator_deposits, | ||||
|                 genesis_state, | ||||
|                 genesis_block, | ||||
|                 spec.clone(), | ||||
|                 fork_choice, | ||||
|             ) | ||||
|  | ||||
| @ -0,0 +1,46 @@ | ||||
| use bls::get_withdrawal_credentials; | ||||
| use log::debug; | ||||
| use rayon::prelude::*; | ||||
| use types::*; | ||||
| 
 | ||||
| /// Generates a `Deposit` for each keypairs
 | ||||
| pub fn generate_deposits_from_keypairs( | ||||
|     keypairs: &[Keypair], | ||||
|     genesis_time: u64, | ||||
|     domain: u64, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Vec<Deposit> { | ||||
|     debug!( | ||||
|         "Generating {} validator deposits from random keypairs...", | ||||
|         keypairs.len() | ||||
|     ); | ||||
| 
 | ||||
|     let initial_validator_deposits = keypairs | ||||
|         .par_iter() | ||||
|         .map(|keypair| { | ||||
|             let withdrawal_credentials = Hash256::from_slice( | ||||
|                 &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], | ||||
|             ); | ||||
|             Deposit { | ||||
|                 branch: vec![], // branch verification is not specified.
 | ||||
|                 index: 0,       // index verification is not specified.
 | ||||
|                 deposit_data: DepositData { | ||||
|                     amount: 32_000_000_000, // 32 ETH (in Gwei)
 | ||||
|                     timestamp: genesis_time - 1, | ||||
|                     deposit_input: DepositInput { | ||||
|                         pubkey: keypair.pk.clone(), | ||||
|                         // Validator can withdraw using their main keypair.
 | ||||
|                         withdrawal_credentials: withdrawal_credentials.clone(), | ||||
|                         proof_of_possession: DepositInput::create_proof_of_possession( | ||||
|                             &keypair, | ||||
|                             &withdrawal_credentials, | ||||
|                             domain, | ||||
|                         ), | ||||
|                     }, | ||||
|                 }, | ||||
|             } | ||||
|         }) | ||||
|         .collect(); | ||||
| 
 | ||||
|     initial_validator_deposits | ||||
| } | ||||
| @ -1,69 +1,102 @@ | ||||
| use clap::{App, Arg}; | ||||
| use clap::{App, Arg, SubCommand}; | ||||
| use env_logger::{Builder, Env}; | ||||
| use std::{fs::File, io::prelude::*}; | ||||
| use test_case::TestCase; | ||||
| use yaml_rust::YamlLoader; | ||||
| use gen_keys::gen_keys; | ||||
| use run_test::run_test; | ||||
| use std::fs; | ||||
| use types::test_utils::keypairs_path; | ||||
| use types::ChainSpec; | ||||
| 
 | ||||
| mod beacon_chain_harness; | ||||
| mod gen_keys; | ||||
| mod run_test; | ||||
| mod test_case; | ||||
| mod validator_harness; | ||||
| 
 | ||||
| use validator_harness::ValidatorHarness; | ||||
| 
 | ||||
| fn main() { | ||||
|     let validator_file_path = keypairs_path(); | ||||
| 
 | ||||
|     let _ = fs::create_dir(validator_file_path.parent().unwrap()); | ||||
| 
 | ||||
|     let matches = App::new("Lighthouse Test Harness Runner") | ||||
|         .version("0.0.1") | ||||
|         .author("Sigma Prime <contact@sigmaprime.io>") | ||||
|         .about("Runs `test_harness` using a YAML test_case.") | ||||
|         .arg( | ||||
|             Arg::with_name("yaml") | ||||
|                 .long("yaml") | ||||
|                 .value_name("FILE") | ||||
|                 .help("YAML file test_case.") | ||||
|                 .required(true), | ||||
|         ) | ||||
|         .arg( | ||||
|             Arg::with_name("log") | ||||
|                 .long("log-level") | ||||
|                 .short("l") | ||||
|                 .value_name("LOG_LEVEL") | ||||
|                 .help("Logging level.") | ||||
|                 .possible_values(&["error", "warn", "info", "debug", "trace"]) | ||||
|                 .default_value("debug") | ||||
|                 .required(true), | ||||
|         ) | ||||
|         .arg( | ||||
|             Arg::with_name("spec") | ||||
|                 .long("spec") | ||||
|                 .short("s") | ||||
|                 .value_name("SPECIFICATION") | ||||
|                 .help("ChainSpec instantiation.") | ||||
|                 .possible_values(&["foundation", "few_validators"]) | ||||
|                 .default_value("foundation"), | ||||
|         ) | ||||
|         .subcommand( | ||||
|             SubCommand::with_name("run_test") | ||||
|                 .about("Executes a YAML test specification") | ||||
|                 .arg( | ||||
|                     Arg::with_name("yaml") | ||||
|                         .long("yaml") | ||||
|                         .value_name("FILE") | ||||
|                         .help("YAML file test_case.") | ||||
|                         .required(true), | ||||
|                 ) | ||||
|                 .arg( | ||||
|                     Arg::with_name("validators_dir") | ||||
|                         .long("validators-dir") | ||||
|                         .short("v") | ||||
|                         .value_name("VALIDATORS_DIR") | ||||
|                         .help("A directory with validator deposits and keypair YAML."), | ||||
|                 ), | ||||
|         ) | ||||
|         .subcommand( | ||||
|             SubCommand::with_name("gen_keys") | ||||
|                 .about("Builds a file of BLS keypairs for faster tests.") | ||||
|                 .arg( | ||||
|                     Arg::with_name("validator_count") | ||||
|                         .long("validator_count") | ||||
|                         .short("n") | ||||
|                         .value_name("VALIDATOR_COUNT") | ||||
|                         .help("Number of validators to generate.") | ||||
|                         .required(true), | ||||
|                 ) | ||||
|                 .arg( | ||||
|                     Arg::with_name("output_file") | ||||
|                         .long("output_file") | ||||
|                         .short("d") | ||||
|                         .value_name("GENESIS_TIME") | ||||
|                         .help("Output directory for generated YAML.") | ||||
|                         .default_value(validator_file_path.to_str().unwrap()), | ||||
|                 ), | ||||
|         ) | ||||
|         .get_matches(); | ||||
| 
 | ||||
|     if let Some(log_level) = matches.value_of("log") { | ||||
|         Builder::from_env(Env::default().default_filter_or(log_level)).init(); | ||||
|     } | ||||
| 
 | ||||
|     if let Some(yaml_file) = matches.value_of("yaml") { | ||||
|         let docs = { | ||||
|             let mut file = File::open(yaml_file).unwrap(); | ||||
|     let _spec = match matches.value_of("spec") { | ||||
|         Some("foundation") => ChainSpec::foundation(), | ||||
|         Some("few_validators") => ChainSpec::few_validators(), | ||||
|         _ => unreachable!(), // Has a default value, should always exist.
 | ||||
|     }; | ||||
| 
 | ||||
|             let mut yaml_str = String::new(); | ||||
|             file.read_to_string(&mut yaml_str).unwrap(); | ||||
|     if let Some(matches) = matches.subcommand_matches("run_test") { | ||||
|         run_test(matches); | ||||
|     } | ||||
| 
 | ||||
|             YamlLoader::load_from_str(&yaml_str).unwrap() | ||||
|         }; | ||||
| 
 | ||||
|         for doc in &docs { | ||||
|             // For each `test_cases` YAML in the document, build a `TestCase`, execute it and
 | ||||
|             // assert that the execution result matches the test_case description.
 | ||||
|             //
 | ||||
|             // In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
 | ||||
|             // and a new `BeaconChain` is built as per the test_case.
 | ||||
|             //
 | ||||
|             // After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
 | ||||
|             // and states in the chain is obtained and checked against the `results` specified in
 | ||||
|             // the `test_case`.
 | ||||
|             //
 | ||||
|             // If any of the expectations in the results are not met, the process
 | ||||
|             // panics with a message.
 | ||||
|             for test_case in doc["test_cases"].as_vec().unwrap() { | ||||
|                 let test_case = TestCase::from_yaml(test_case); | ||||
|                 test_case.assert_result_valid(test_case.execute()) | ||||
|             } | ||||
|         } | ||||
|     if let Some(matches) = matches.subcommand_matches("gen_keys") { | ||||
|         gen_keys(matches); | ||||
|     } | ||||
| } | ||||
|  | ||||
							
								
								
									
										21
									
								
								beacon_node/beacon_chain/test_harness/src/gen_keys.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								beacon_node/beacon_chain/test_harness/src/gen_keys.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,21 @@ | ||||
| use clap::{value_t, ArgMatches}; | ||||
| use log::debug; | ||||
| use std::path::Path; | ||||
| use types::test_utils::{generate_deterministic_keypairs, KeypairsFile}; | ||||
| 
 | ||||
| /// Creates a file containing BLS keypairs.
 | ||||
| pub fn gen_keys(matches: &ArgMatches) { | ||||
|     let validator_count = value_t!(matches.value_of("validator_count"), usize) | ||||
|         .expect("Validator count is required argument"); | ||||
|     let output_file = matches | ||||
|         .value_of("output_file") | ||||
|         .expect("Output file has a default value."); | ||||
| 
 | ||||
|     let keypairs = generate_deterministic_keypairs(validator_count); | ||||
| 
 | ||||
|     debug!("Writing keypairs to file..."); | ||||
| 
 | ||||
|     let keypairs_path = Path::new(output_file); | ||||
| 
 | ||||
|     keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap(); | ||||
| } | ||||
| @ -15,7 +15,7 @@ | ||||
| //! let validator_count = 8;
 | ||||
| //! let spec = ChainSpec::few_validators();
 | ||||
| //!
 | ||||
| //! let mut harness = BeaconChainHarness::new(spec, validator_count);
 | ||||
| //! let mut harness = BeaconChainHarness::new(spec, validator_count, None, true);
 | ||||
| //!
 | ||||
| //! harness.advance_chain_with_block();
 | ||||
| //!
 | ||||
|  | ||||
							
								
								
									
										42
									
								
								beacon_node/beacon_chain/test_harness/src/run_test.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								beacon_node/beacon_chain/test_harness/src/run_test.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,42 @@ | ||||
| use crate::test_case::TestCase; | ||||
| use clap::ArgMatches; | ||||
| use std::path::Path; | ||||
| use std::{fs::File, io::prelude::*}; | ||||
| use yaml_rust::YamlLoader; | ||||
| 
 | ||||
| /// Runs a YAML-specified test case.
 | ||||
| pub fn run_test(matches: &ArgMatches) { | ||||
|     if let Some(yaml_file) = matches.value_of("yaml") { | ||||
|         let docs = { | ||||
|             let mut file = File::open(yaml_file).unwrap(); | ||||
| 
 | ||||
|             let mut yaml_str = String::new(); | ||||
|             file.read_to_string(&mut yaml_str).unwrap(); | ||||
| 
 | ||||
|             YamlLoader::load_from_str(&yaml_str).unwrap() | ||||
|         }; | ||||
| 
 | ||||
|         for doc in &docs { | ||||
|             let validators_dir = matches | ||||
|                 .value_of("validators_dir") | ||||
|                 .and_then(|dir_str| Some(Path::new(dir_str))); | ||||
| 
 | ||||
|             // For each `test_cases` YAML in the document, build a `TestCase`, execute it and
 | ||||
|             // assert that the execution result matches the test_case description.
 | ||||
|             //
 | ||||
|             // In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
 | ||||
|             // and a new `BeaconChain` is built as per the test_case.
 | ||||
|             //
 | ||||
|             // After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
 | ||||
|             // and states in the chain is obtained and checked against the `results` specified in
 | ||||
|             // the `test_case`.
 | ||||
|             //
 | ||||
|             // If any of the expectations in the results are not met, the process
 | ||||
|             // panics with a message.
 | ||||
|             for test_case in doc["test_cases"].as_vec().unwrap() { | ||||
|                 let test_case = TestCase::from_yaml(test_case); | ||||
|                 test_case.assert_result_valid(test_case.execute(validators_dir)) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -3,14 +3,13 @@ | ||||
| 
 | ||||
| use crate::beacon_chain_harness::BeaconChainHarness; | ||||
| use beacon_chain::CheckPoint; | ||||
| use bls::{create_proof_of_possession, get_withdrawal_credentials}; | ||||
| use bls::get_withdrawal_credentials; | ||||
| use log::{info, warn}; | ||||
| use ssz::SignedRoot; | ||||
| use std::path::Path; | ||||
| use types::*; | ||||
| 
 | ||||
| use types::{ | ||||
|     attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder, | ||||
| }; | ||||
| use types::test_utils::{TestingAttesterSlashingBuilder, TestingProposerSlashingBuilder}; | ||||
| use yaml_rust::Yaml; | ||||
| 
 | ||||
| mod config; | ||||
| @ -70,7 +69,7 @@ impl TestCase { | ||||
| 
 | ||||
|     /// Executes the test case, returning an `ExecutionResult`.
 | ||||
|     #[allow(clippy::cyclomatic_complexity)] | ||||
|     pub fn execute(&self) -> ExecutionResult { | ||||
|     pub fn execute(&self, validators_dir: Option<&Path>) -> ExecutionResult { | ||||
|         let spec = self.spec(); | ||||
|         let validator_count = self.config.deposits_for_chain_start; | ||||
|         let slots = self.config.num_slots; | ||||
| @ -80,7 +79,7 @@ impl TestCase { | ||||
|             validator_count | ||||
|         ); | ||||
| 
 | ||||
|         let mut harness = BeaconChainHarness::new(spec, validator_count); | ||||
|         let mut harness = BeaconChainHarness::new(spec, validator_count, validators_dir, true); | ||||
| 
 | ||||
|         info!("Starting simulation across {} slots...", slots); | ||||
| 
 | ||||
| @ -257,11 +256,23 @@ fn build_deposit( | ||||
|     index_offset: u64, | ||||
| ) -> (Deposit, Keypair) { | ||||
|     let keypair = Keypair::random(); | ||||
|     let proof_of_possession = create_proof_of_possession(&keypair); | ||||
|     let index = harness.beacon_chain.state.read().deposit_index + index_offset; | ||||
|     let withdrawal_credentials = Hash256::from_slice( | ||||
|         &get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..], | ||||
|     ); | ||||
|     let proof_of_possession = DepositInput::create_proof_of_possession( | ||||
|         &keypair, | ||||
|         &withdrawal_credentials, | ||||
|         harness.spec.get_domain( | ||||
|             harness | ||||
|                 .beacon_chain | ||||
|                 .state | ||||
|                 .read() | ||||
|                 .current_epoch(&harness.spec), | ||||
|             Domain::Deposit, | ||||
|             &harness.beacon_chain.state.read().fork, | ||||
|         ), | ||||
|     ); | ||||
|     let index = harness.beacon_chain.state.read().deposit_index + index_offset; | ||||
| 
 | ||||
|     let deposit = Deposit { | ||||
|         // Note: `branch` and `index` will need to be updated once the spec defines their
 | ||||
| @ -318,7 +329,7 @@ fn build_double_vote_attester_slashing( | ||||
|             .expect("Unable to sign AttesterSlashing") | ||||
|     }; | ||||
| 
 | ||||
|     AttesterSlashingBuilder::double_vote(validator_indices, signer) | ||||
|     TestingAttesterSlashingBuilder::double_vote(validator_indices, signer) | ||||
| } | ||||
| 
 | ||||
| /// Builds an `ProposerSlashing` for some `validator_index`.
 | ||||
| @ -331,5 +342,5 @@ fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) - | ||||
|             .expect("Unable to sign AttesterSlashing") | ||||
|     }; | ||||
| 
 | ||||
|     ProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec) | ||||
|     TestingProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec) | ||||
| } | ||||
|  | ||||
| @ -10,7 +10,7 @@ fn it_can_build_on_genesis_block() { | ||||
|     let spec = ChainSpec::few_validators(); | ||||
|     let validator_count = 8; | ||||
| 
 | ||||
|     let mut harness = BeaconChainHarness::new(spec, validator_count as usize); | ||||
|     let mut harness = BeaconChainHarness::new(spec, validator_count as usize, None, true); | ||||
| 
 | ||||
|     harness.advance_chain_with_block(); | ||||
| } | ||||
| @ -25,7 +25,7 @@ fn it_can_produce_past_first_epoch_boundary() { | ||||
| 
 | ||||
|     debug!("Starting harness build..."); | ||||
| 
 | ||||
|     let mut harness = BeaconChainHarness::new(spec, validator_count); | ||||
|     let mut harness = BeaconChainHarness::new(spec, validator_count, None, true); | ||||
| 
 | ||||
|     debug!("Harness built, tests starting.."); | ||||
| 
 | ||||
|  | ||||
| @ -198,6 +198,7 @@ mod tests { | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     #[ignore] | ||||
|     fn test_block_at_slot() { | ||||
|         let db = Arc::new(MemoryDB::open()); | ||||
|         let bs = Arc::new(BeaconBlockStore::new(db.clone())); | ||||
|  | ||||
| @ -8,7 +8,6 @@ use std::path::PathBuf; | ||||
| use crate::config::LighthouseConfig; | ||||
| use crate::rpc::start_server; | ||||
| use beacon_chain::BeaconChain; | ||||
| use bls::create_proof_of_possession; | ||||
| use clap::{App, Arg}; | ||||
| use db::{ | ||||
|     stores::{BeaconBlockStore, BeaconStateStore}, | ||||
| @ -17,8 +16,12 @@ use db::{ | ||||
| use fork_choice::BitwiseLMDGhost; | ||||
| use slog::{error, info, o, Drain}; | ||||
| use slot_clock::SystemTimeSlotClock; | ||||
| use ssz::TreeHash; | ||||
| use std::sync::Arc; | ||||
| use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; | ||||
| use types::{ | ||||
|     beacon_state::BeaconStateBuilder, BeaconBlock, ChainSpec, Deposit, DepositData, DepositInput, | ||||
|     Domain, Eth1Data, Fork, Hash256, Keypair, | ||||
| }; | ||||
| 
 | ||||
| fn main() { | ||||
|     let decorator = slog_term::TermDecorator::new().build(); | ||||
| @ -97,7 +100,8 @@ fn main() { | ||||
|         .iter() | ||||
|         .map(|_| Keypair::random()) | ||||
|         .collect(); | ||||
|     let initial_validator_deposits = keypairs | ||||
| 
 | ||||
|     let initial_validator_deposits: Vec<Deposit> = keypairs | ||||
|         .iter() | ||||
|         .map(|keypair| Deposit { | ||||
|             branch: vec![], // branch verification is not specified.
 | ||||
| @ -108,20 +112,38 @@ fn main() { | ||||
|                 deposit_input: DepositInput { | ||||
|                     pubkey: keypair.pk.clone(), | ||||
|                     withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
 | ||||
|                     proof_of_possession: create_proof_of_possession(&keypair), | ||||
|                     proof_of_possession: DepositInput::create_proof_of_possession( | ||||
|                         &keypair, | ||||
|                         &Hash256::zero(), | ||||
|                         spec.get_domain( | ||||
|                             // Get domain from genesis fork_version
 | ||||
|                             spec.genesis_epoch, | ||||
|                             Domain::Deposit, | ||||
|                             &Fork { | ||||
|                                 previous_version: spec.genesis_fork_version, | ||||
|                                 current_version: spec.genesis_fork_version, | ||||
|                                 epoch: spec.genesis_epoch, | ||||
|                             }, | ||||
|                         ), | ||||
|                     ), | ||||
|                 }, | ||||
|             }, | ||||
|         }) | ||||
|         .collect(); | ||||
| 
 | ||||
|     let mut state_builder = BeaconStateBuilder::new(genesis_time, latest_eth1_data, &spec); | ||||
|     state_builder.process_initial_deposits(&initial_validator_deposits, &spec); | ||||
|     let genesis_state = state_builder.build(&spec).unwrap(); | ||||
|     let state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); | ||||
|     let genesis_block = BeaconBlock::genesis(state_root, &spec); | ||||
| 
 | ||||
|     // Genesis chain
 | ||||
|     let _chain_result = BeaconChain::genesis( | ||||
|     let _chain_result = BeaconChain::from_genesis( | ||||
|         state_store.clone(), | ||||
|         block_store.clone(), | ||||
|         slot_clock, | ||||
|         genesis_time, | ||||
|         latest_eth1_data, | ||||
|         initial_validator_deposits, | ||||
|         genesis_state, | ||||
|         genesis_block, | ||||
|         spec, | ||||
|         fork_choice, | ||||
|     ); | ||||
|  | ||||
| @ -409,11 +409,23 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> { | ||||
|                         *child_votes.entry(child).or_insert_with(|| 0) += vote; | ||||
|                     } | ||||
|                 } | ||||
|                 // given the votes on the children, find the best child
 | ||||
|                 current_head = self | ||||
|                     .choose_best_child(&child_votes) | ||||
|                     .ok_or(ForkChoiceError::CannotFindBestChild)?; | ||||
|                 trace!("Best child found: {}", current_head); | ||||
|                 // check if we have votes of children, if not select the smallest hash child
 | ||||
|                 if child_votes.is_empty() { | ||||
|                     current_head = *children | ||||
|                         .iter() | ||||
|                         .min_by(|child1, child2| child1.cmp(child2)) | ||||
|                         .expect("Must be children here"); | ||||
|                     trace!( | ||||
|                         "Children have no votes - smallest hash chosen: {}", | ||||
|                         current_head | ||||
|                     ); | ||||
|                 } else { | ||||
|                     // given the votes on the children, find the best child
 | ||||
|                     current_head = self | ||||
|                         .choose_best_child(&child_votes) | ||||
|                         .ok_or(ForkChoiceError::CannotFindBestChild)?; | ||||
|                     trace!("Best child found: {}", current_head); | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             // didn't find head yet, proceed to next iteration
 | ||||
|  | ||||
| @ -22,6 +22,7 @@ extern crate types; | ||||
| 
 | ||||
| pub mod bitwise_lmd_ghost; | ||||
| pub mod longest_chain; | ||||
| pub mod optimized_lmd_ghost; | ||||
| pub mod slow_lmd_ghost; | ||||
| 
 | ||||
| use db::stores::BeaconBlockAtSlotError; | ||||
| @ -30,6 +31,7 @@ use types::{BeaconBlock, ChainSpec, Hash256}; | ||||
| 
 | ||||
| pub use bitwise_lmd_ghost::BitwiseLMDGhost; | ||||
| pub use longest_chain::LongestChain; | ||||
| pub use optimized_lmd_ghost::OptimizedLMDGhost; | ||||
| pub use slow_lmd_ghost::SlowLMDGhost; | ||||
| 
 | ||||
| /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures
 | ||||
| @ -101,4 +103,6 @@ pub enum ForkChoiceAlgorithm { | ||||
|     SlowLMDGhost, | ||||
|     /// An optimised version of bitwise LMD-GHOST by Vitalik.
 | ||||
|     BitwiseLMDGhost, | ||||
|     /// An optimised implementation of LMD ghost.
 | ||||
|     OptimizedLMDGhost, | ||||
| } | ||||
|  | ||||
							
								
								
									
										465
									
								
								eth2/fork_choice/src/optimized_lmd_ghost.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										465
									
								
								eth2/fork_choice/src/optimized_lmd_ghost.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,465 @@ | ||||
| //! The optimised bitwise LMD-GHOST fork choice rule.
 | ||||
| extern crate bit_vec; | ||||
| 
 | ||||
| use crate::{ForkChoice, ForkChoiceError}; | ||||
| use db::{ | ||||
|     stores::{BeaconBlockStore, BeaconStateStore}, | ||||
|     ClientDB, | ||||
| }; | ||||
| use log::{debug, trace}; | ||||
| use std::cmp::Ordering; | ||||
| use std::collections::HashMap; | ||||
| use std::sync::Arc; | ||||
| use types::{ | ||||
|     readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, | ||||
|     ChainSpec, Hash256, Slot, SlotHeight, | ||||
| }; | ||||
| 
 | ||||
| //TODO: Pruning - Children
 | ||||
| //TODO: Handle Syncing
 | ||||
| 
 | ||||
| // NOTE: This uses u32 to represent difference between block heights. Thus this is only
 | ||||
| // applicable for block height differences in the range of a u32.
 | ||||
| // This can potentially be parallelized in some parts.
 | ||||
| 
 | ||||
| /// Compute the base-2 logarithm of an integer, floored (rounded down)
 | ||||
| #[inline] | ||||
| fn log2_int(x: u64) -> u32 { | ||||
|     if x == 0 { | ||||
|         return 0; | ||||
|     } | ||||
|     63 - x.leading_zeros() | ||||
| } | ||||
| 
 | ||||
| fn power_of_2_below(x: u64) -> u64 { | ||||
|     2u64.pow(log2_int(x)) | ||||
| } | ||||
| 
 | ||||
| /// Stores the necessary data structures to run the optimised lmd ghost algorithm.
 | ||||
| pub struct OptimizedLMDGhost<T: ClientDB + Sized> { | ||||
|     /// A cache of known ancestors at given heights for a specific block.
 | ||||
|     //TODO: Consider FnvHashMap
 | ||||
|     cache: HashMap<CacheKey<u64>, Hash256>, | ||||
|     /// Log lookup table for blocks to their ancestors.
 | ||||
|     //TODO: Verify we only want/need a size 16 log lookup
 | ||||
|     ancestors: Vec<HashMap<Hash256, Hash256>>, | ||||
|     /// Stores the children for any given parent.
 | ||||
|     children: HashMap<Hash256, Vec<Hash256>>, | ||||
|     /// The latest attestation targets as a map of validator index to block hash.
 | ||||
|     //TODO: Could this be a fixed size vec
 | ||||
|     latest_attestation_targets: HashMap<u64, Hash256>, | ||||
|     /// Block storage access.
 | ||||
|     block_store: Arc<BeaconBlockStore<T>>, | ||||
|     /// State storage access.
 | ||||
|     state_store: Arc<BeaconStateStore<T>>, | ||||
|     max_known_height: SlotHeight, | ||||
| } | ||||
| 
 | ||||
| impl<T> OptimizedLMDGhost<T> | ||||
| where | ||||
|     T: ClientDB + Sized, | ||||
| { | ||||
|     pub fn new( | ||||
|         block_store: Arc<BeaconBlockStore<T>>, | ||||
|         state_store: Arc<BeaconStateStore<T>>, | ||||
|     ) -> Self { | ||||
|         OptimizedLMDGhost { | ||||
|             cache: HashMap::new(), | ||||
|             ancestors: vec![HashMap::new(); 16], | ||||
|             latest_attestation_targets: HashMap::new(), | ||||
|             children: HashMap::new(), | ||||
|             max_known_height: SlotHeight::new(0), | ||||
|             block_store, | ||||
|             state_store, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to
 | ||||
|     /// weighted votes.
 | ||||
|     pub fn get_latest_votes( | ||||
|         &self, | ||||
|         state_root: &Hash256, | ||||
|         block_slot: Slot, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<HashMap<Hash256, u64>, ForkChoiceError> { | ||||
|         // get latest votes
 | ||||
|         // Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) //
 | ||||
|         // FORK_CHOICE_BALANCE_INCREMENT
 | ||||
|         // build a hashmap of block_hash to weighted votes
 | ||||
|         let mut latest_votes: HashMap<Hash256, u64> = HashMap::new(); | ||||
|         // gets the current weighted votes
 | ||||
|         let current_state = self | ||||
|             .state_store | ||||
|             .get_deserialized(&state_root)? | ||||
|             .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; | ||||
| 
 | ||||
|         let active_validator_indices = get_active_validator_indices( | ||||
|             ¤t_state.validator_registry[..], | ||||
|             block_slot.epoch(spec.slots_per_epoch), | ||||
|         ); | ||||
| 
 | ||||
|         for index in active_validator_indices { | ||||
|             let balance = std::cmp::min( | ||||
|                 current_state.validator_balances[index], | ||||
|                 spec.max_deposit_amount, | ||||
|             ) / spec.fork_choice_balance_increment; | ||||
|             if balance > 0 { | ||||
|                 if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) { | ||||
|                     *latest_votes.entry(*target).or_insert_with(|| 0) += balance; | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         trace!("Latest votes: {:?}", latest_votes); | ||||
|         Ok(latest_votes) | ||||
|     } | ||||
| 
 | ||||
|     /// Gets the ancestor at a given height `at_height` of a block specified by `block_hash`.
 | ||||
|     fn get_ancestor( | ||||
|         &mut self, | ||||
|         block_hash: Hash256, | ||||
|         target_height: SlotHeight, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Option<Hash256> { | ||||
|         // return None if we can't get the block from the db.
 | ||||
|         let block_height = { | ||||
|             let block_slot = self | ||||
|                 .block_store | ||||
|                 .get_deserialized(&block_hash) | ||||
|                 .ok()? | ||||
|                 .expect("Should have returned already if None") | ||||
|                 .slot; | ||||
| 
 | ||||
|             block_slot.height(spec.genesis_slot) | ||||
|         }; | ||||
| 
 | ||||
|         // verify we haven't exceeded the block height
 | ||||
|         if target_height >= block_height { | ||||
|             if target_height > block_height { | ||||
|                 return None; | ||||
|             } else { | ||||
|                 return Some(block_hash); | ||||
|             } | ||||
|         } | ||||
|         // check if the result is stored in our cache
 | ||||
|         let cache_key = CacheKey::new(&block_hash, target_height.as_u64()); | ||||
|         if let Some(ancestor) = self.cache.get(&cache_key) { | ||||
|             return Some(*ancestor); | ||||
|         } | ||||
| 
 | ||||
|         // not in the cache recursively search for ancestors using a log-lookup
 | ||||
|         if let Some(ancestor) = { | ||||
|             let ancestor_lookup = self.ancestors | ||||
|                 [log2_int((block_height - target_height - 1u64).as_u64()) as usize] | ||||
|                 .get(&block_hash) | ||||
|                 //TODO: Panic if we can't lookup and fork choice fails
 | ||||
|                 .expect("All blocks should be added to the ancestor log lookup table"); | ||||
|             self.get_ancestor(*ancestor_lookup, target_height, &spec) | ||||
|         } { | ||||
|             // add the result to the cache
 | ||||
|             self.cache.insert(cache_key, ancestor); | ||||
|             return Some(ancestor); | ||||
|         } | ||||
| 
 | ||||
|         None | ||||
|     } | ||||
| 
 | ||||
|     // looks for an obvious block winner given the latest votes for a specific height
 | ||||
|     fn get_clear_winner( | ||||
|         &mut self, | ||||
|         latest_votes: &HashMap<Hash256, u64>, | ||||
|         block_height: SlotHeight, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Option<Hash256> { | ||||
|         // map of vote counts for every hash at this height
 | ||||
|         let mut current_votes: HashMap<Hash256, u64> = HashMap::new(); | ||||
|         let mut total_vote_count = 0; | ||||
| 
 | ||||
|         trace!("Clear winner at block height: {}", block_height); | ||||
|         // loop through the latest votes and count all votes
 | ||||
|         // these have already been weighted by balance
 | ||||
|         for (hash, votes) in latest_votes.iter() { | ||||
|             if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) { | ||||
|                 let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); | ||||
|                 current_votes.insert(ancestor, current_vote_value + *votes); | ||||
|                 total_vote_count += votes; | ||||
|             } | ||||
|         } | ||||
|         // Check if there is a clear block winner at this height. If so return it.
 | ||||
|         for (hash, votes) in current_votes.iter() { | ||||
|             if *votes > total_vote_count / 2 { | ||||
|                 // we have a clear winner, return it
 | ||||
|                 return Some(*hash); | ||||
|             } | ||||
|         } | ||||
|         // didn't find a clear winner
 | ||||
|         None | ||||
|     } | ||||
| 
 | ||||
|     // Finds the best child (one with highest votes)
 | ||||
|     fn choose_best_child(&self, votes: &HashMap<Hash256, u64>) -> Option<Hash256> { | ||||
|         if votes.is_empty() { | ||||
|             return None; | ||||
|         } | ||||
| 
 | ||||
|         // Iterate through hashmap to get child with maximum votes
 | ||||
|         let best_child = votes.iter().max_by(|(child1, v1), (child2, v2)| { | ||||
|             let mut result = v1.cmp(v2); | ||||
|             // If votes are equal, choose smaller hash to break ties deterministically
 | ||||
|             if result == Ordering::Equal { | ||||
|                 // Reverse so that max_by chooses smaller hash
 | ||||
|                 result = child1.cmp(child2).reverse(); | ||||
|             } | ||||
|             result | ||||
|         }); | ||||
| 
 | ||||
|         Some(*best_child.unwrap().0) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: ClientDB + Sized> ForkChoice for OptimizedLMDGhost<T> { | ||||
|     fn add_block( | ||||
|         &mut self, | ||||
|         block: &BeaconBlock, | ||||
|         block_hash: &Hash256, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<(), ForkChoiceError> { | ||||
|         // get the height of the parent
 | ||||
|         let parent_height = self | ||||
|             .block_store | ||||
|             .get_deserialized(&block.parent_root)? | ||||
|             .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? | ||||
|             .slot() | ||||
|             .height(spec.genesis_slot); | ||||
| 
 | ||||
|         let parent_hash = &block.parent_root; | ||||
| 
 | ||||
|         // add the new block to the children of parent
 | ||||
|         (*self | ||||
|             .children | ||||
|             .entry(block.parent_root) | ||||
|             .or_insert_with(|| vec![])) | ||||
|         .push(block_hash.clone()); | ||||
| 
 | ||||
|         // build the ancestor data structure
 | ||||
|         for index in 0..16 { | ||||
|             if parent_height % (1 << index) == 0 { | ||||
|                 self.ancestors[index].insert(*block_hash, *parent_hash); | ||||
|             } else { | ||||
|                 // TODO: This is unsafe. Will panic if parent_hash doesn't exist. Using it for debugging
 | ||||
|                 let parent_ancestor = self.ancestors[index][parent_hash]; | ||||
|                 self.ancestors[index].insert(*block_hash, parent_ancestor); | ||||
|             } | ||||
|         } | ||||
|         // update the max height
 | ||||
|         self.max_known_height = std::cmp::max(self.max_known_height, parent_height + 1); | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     fn add_attestation( | ||||
|         &mut self, | ||||
|         validator_index: u64, | ||||
|         target_block_root: &Hash256, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<(), ForkChoiceError> { | ||||
|         // simply add the attestation to the latest_attestation_target if the block_height is
 | ||||
|         // larger
 | ||||
|         trace!( | ||||
|             "Adding attestation of validator: {:?} for block: {}", | ||||
|             validator_index, | ||||
|             target_block_root | ||||
|         ); | ||||
|         let attestation_target = self | ||||
|             .latest_attestation_targets | ||||
|             .entry(validator_index) | ||||
|             .or_insert_with(|| *target_block_root); | ||||
|         // if we already have a value
 | ||||
|         if attestation_target != target_block_root { | ||||
|             trace!("Old attestation found: {:?}", attestation_target); | ||||
|             // get the height of the target block
 | ||||
|             let block_height = self | ||||
|                 .block_store | ||||
|                 .get_deserialized(&target_block_root)? | ||||
|                 .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? | ||||
|                 .slot() | ||||
|                 .height(spec.genesis_slot); | ||||
| 
 | ||||
|             // get the height of the past target block
 | ||||
|             let past_block_height = self | ||||
|                 .block_store | ||||
|                 .get_deserialized(&attestation_target)? | ||||
|                 .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? | ||||
|                 .slot() | ||||
|                 .height(spec.genesis_slot); | ||||
|             // update the attestation only if the new target is higher
 | ||||
|             if past_block_height < block_height { | ||||
|                 trace!("Updating old attestation"); | ||||
|                 *attestation_target = *target_block_root; | ||||
|             } | ||||
|         } | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Perform lmd_ghost on the current chain to find the head.
 | ||||
|     fn find_head( | ||||
|         &mut self, | ||||
|         justified_block_start: &Hash256, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<Hash256, ForkChoiceError> { | ||||
|         debug!( | ||||
|             "Starting optimised fork choice at block: {}", | ||||
|             justified_block_start | ||||
|         ); | ||||
|         let block = self | ||||
|             .block_store | ||||
|             .get_deserialized(&justified_block_start)? | ||||
|             .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; | ||||
| 
 | ||||
|         let block_slot = block.slot(); | ||||
|         let state_root = block.state_root(); | ||||
|         let mut block_height = block_slot.height(spec.genesis_slot); | ||||
| 
 | ||||
|         let mut current_head = *justified_block_start; | ||||
| 
 | ||||
|         let mut latest_votes = self.get_latest_votes(&state_root, block_slot, spec)?; | ||||
| 
 | ||||
|         // remove any votes that don't relate to our current head.
 | ||||
|         latest_votes | ||||
|             .retain(|hash, _| self.get_ancestor(*hash, block_height, spec) == Some(current_head)); | ||||
| 
 | ||||
|         // begin searching for the head
 | ||||
|         loop { | ||||
|             debug!( | ||||
|                 "Iteration for block: {} with vote length: {}", | ||||
|                 current_head, | ||||
|                 latest_votes.len() | ||||
|             ); | ||||
|             // if there are no children, we are done, return the current_head
 | ||||
|             let children = match self.children.get(¤t_head) { | ||||
|                 Some(children) => children.clone(), | ||||
|                 None => { | ||||
|                     debug!("Head found: {}", current_head); | ||||
|                     return Ok(current_head); | ||||
|                 } | ||||
|             }; | ||||
| 
 | ||||
|             // logarithmic lookup blocks to see if there are obvious winners, if so,
 | ||||
|             // progress to the next iteration.
 | ||||
|             let mut step = | ||||
|                 power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u64()) / 2; | ||||
|             while step > 0 { | ||||
|                 trace!("Current Step: {}", step); | ||||
|                 if let Some(clear_winner) = self.get_clear_winner( | ||||
|                     &latest_votes, | ||||
|                     block_height - (block_height % step) + step, | ||||
|                     spec, | ||||
|                 ) { | ||||
|                     current_head = clear_winner; | ||||
|                     break; | ||||
|                 } | ||||
|                 step /= 2; | ||||
|             } | ||||
|             if step > 0 { | ||||
|                 trace!("Found clear winner: {}", current_head); | ||||
|             } | ||||
|             // if our skip lookup failed and we only have one child, progress to that child
 | ||||
|             else if children.len() == 1 { | ||||
|                 current_head = children[0]; | ||||
|                 trace!( | ||||
|                     "Lookup failed, only one child, proceeding to child: {}", | ||||
|                     current_head | ||||
|                 ); | ||||
|             } | ||||
|             // we need to find the best child path to progress down.
 | ||||
|             else { | ||||
|                 trace!("Searching for best child"); | ||||
|                 let mut child_votes = HashMap::new(); | ||||
|                 for (voted_hash, vote) in latest_votes.iter() { | ||||
|                     // if the latest votes correspond to a child
 | ||||
|                     if let Some(child) = self.get_ancestor(*voted_hash, block_height + 1, spec) { | ||||
|                         // add up the votes for each child
 | ||||
|                         *child_votes.entry(child).or_insert_with(|| 0) += vote; | ||||
|                     } | ||||
|                 } | ||||
|                 // check if we have votes of children, if not select the smallest hash child
 | ||||
|                 if child_votes.is_empty() { | ||||
|                     current_head = *children | ||||
|                         .iter() | ||||
|                         .min_by(|child1, child2| child1.cmp(child2)) | ||||
|                         .expect("Must be children here"); | ||||
|                     trace!( | ||||
|                         "Children have no votes - smallest hash chosen: {}", | ||||
|                         current_head | ||||
|                     ); | ||||
|                 } else { | ||||
|                     // given the votes on the children, find the best child
 | ||||
|                     current_head = self | ||||
|                         .choose_best_child(&child_votes) | ||||
|                         .ok_or(ForkChoiceError::CannotFindBestChild)?; | ||||
|                     trace!("Best child found: {}", current_head); | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             // didn't find head yet, proceed to next iteration
 | ||||
|             // update block height
 | ||||
|             block_height = self | ||||
|                 .block_store | ||||
|                 .get_deserialized(¤t_head)? | ||||
|                 .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? | ||||
|                 .slot() | ||||
|                 .height(spec.genesis_slot); | ||||
|             // prune the latest votes for votes that are not part of current chosen chain
 | ||||
|             // more specifically, only keep votes that have head as an ancestor
 | ||||
|             for hash in latest_votes.keys() { | ||||
|                 trace!( | ||||
|                     "Ancestor for vote: {} at height: {} is: {:?}", | ||||
|                     hash, | ||||
|                     block_height, | ||||
|                     self.get_ancestor(*hash, block_height, spec) | ||||
|                 ); | ||||
|             } | ||||
|             latest_votes.retain(|hash, _| { | ||||
|                 self.get_ancestor(*hash, block_height, spec) == Some(current_head) | ||||
|             }); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Type for storing blocks in a memory cache. Key is comprised of block-hash plus the height.
 | ||||
| #[derive(PartialEq, Eq, Hash)] | ||||
| pub struct CacheKey<T> { | ||||
|     block_hash: Hash256, | ||||
|     block_height: T, | ||||
| } | ||||
| 
 | ||||
| impl<T> CacheKey<T> { | ||||
|     pub fn new(block_hash: &Hash256, block_height: T) -> Self { | ||||
|         CacheKey { | ||||
|             block_hash: *block_hash, | ||||
|             block_height, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_power_of_2_below() { | ||||
|         assert_eq!(power_of_2_below(4), 4); | ||||
|         assert_eq!(power_of_2_below(5), 4); | ||||
|         assert_eq!(power_of_2_below(7), 4); | ||||
|         assert_eq!(power_of_2_below(24), 16); | ||||
|         assert_eq!(power_of_2_below(32), 32); | ||||
|         assert_eq!(power_of_2_below(33), 32); | ||||
|         assert_eq!(power_of_2_below(63), 32); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_power_of_2_below_large() { | ||||
|         let pow: u64 = 1 << 24; | ||||
|         for x in (pow - 20)..(pow + 20) { | ||||
|             assert!(power_of_2_below(x) <= x, "{}", x); | ||||
|         } | ||||
|     } | ||||
| } | ||||
| @ -1 +0,0 @@ | ||||
| 
 | ||||
| @ -210,6 +210,7 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> { | ||||
|             trace!("Children found: {:?}", children); | ||||
| 
 | ||||
|             let mut head_vote_count = 0; | ||||
|             head_hash = children[0]; | ||||
|             for child_hash in children { | ||||
|                 let vote_count = self.get_vote_count(&latest_votes, &child_hash)?; | ||||
|                 trace!("Vote count for child: {} is: {}", child_hash, vote_count); | ||||
| @ -218,6 +219,12 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> { | ||||
|                     head_hash = *child_hash; | ||||
|                     head_vote_count = vote_count; | ||||
|                 } | ||||
|                 // resolve ties - choose smaller hash
 | ||||
|                 else if vote_count == head_vote_count { | ||||
|                     if *child_hash < head_hash { | ||||
|                         head_hash = *child_hash; | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|         Ok(head_hash) | ||||
|  | ||||
| @ -63,3 +63,82 @@ test_cases: | ||||
|     - b7: 2 | ||||
|   heads: | ||||
|     - id: 'b4' | ||||
| - blocks: | ||||
|     - id: 'b0' | ||||
|       parent: 'b0' | ||||
|     - id: 'b1' | ||||
|       parent: 'b0' | ||||
|     - id: 'b2' | ||||
|       parent: 'b0' | ||||
|     - id: 'b3' | ||||
|       parent: 'b0' | ||||
|     - id: 'b4' | ||||
|       parent: 'b1' | ||||
|     - id: 'b5' | ||||
|       parent: 'b1' | ||||
|     - id: 'b6' | ||||
|       parent: 'b2' | ||||
|     - id: 'b7' | ||||
|       parent: 'b2' | ||||
|     - id: 'b8' | ||||
|       parent: 'b3' | ||||
|     - id: 'b9' | ||||
|       parent: 'b3' | ||||
|   weights: | ||||
|     - b1: 2 | ||||
|     - b2: 1 | ||||
|     - b3: 1 | ||||
|     - b4: 7 | ||||
|     - b5: 5 | ||||
|     - b6: 2 | ||||
|     - b7: 4 | ||||
|     - b8: 4 | ||||
|     - b9: 2 | ||||
|   heads: | ||||
|     - id: 'b4' | ||||
| - blocks: | ||||
|     - id: 'b0' | ||||
|       parent: 'b0' | ||||
|     - id: 'b1' | ||||
|       parent: 'b0' | ||||
|     - id: 'b2' | ||||
|       parent: 'b0' | ||||
|     - id: 'b3' | ||||
|       parent: 'b0' | ||||
|     - id: 'b4' | ||||
|       parent: 'b1' | ||||
|     - id: 'b5' | ||||
|       parent: 'b1' | ||||
|     - id: 'b6' | ||||
|       parent: 'b2' | ||||
|     - id: 'b7' | ||||
|       parent: 'b2' | ||||
|     - id: 'b8' | ||||
|       parent: 'b3' | ||||
|     - id: 'b9' | ||||
|       parent: 'b3' | ||||
|   weights: | ||||
|     - b1: 1 | ||||
|     - b2: 1 | ||||
|     - b3: 1 | ||||
|     - b4: 7 | ||||
|     - b5: 5 | ||||
|     - b6: 2 | ||||
|     - b7: 4 | ||||
|     - b8: 4 | ||||
|     - b9: 2 | ||||
|   heads: | ||||
|     - id: 'b7' | ||||
| - blocks: | ||||
|     - id: 'b0' | ||||
|       parent: 'b0' | ||||
|     - id: 'b1' | ||||
|       parent: 'b0' | ||||
|     - id: 'b2' | ||||
|       parent: 'b0' | ||||
|   weights: | ||||
|     - b1: 0 | ||||
|     - b2: 0 | ||||
|   heads: | ||||
|     - id: 'b1' | ||||
| 
 | ||||
|  | ||||
| @ -35,3 +35,31 @@ test_cases: | ||||
|     - b3: 3 | ||||
|   heads: | ||||
|     - id: 'b1' | ||||
| # equal weights children. Should choose lower hash b2 | ||||
| - blocks: | ||||
|     - id: 'b0' | ||||
|       parent: 'b0' | ||||
|     - id: 'b1' | ||||
|       parent: 'b0' | ||||
|     - id: 'b2' | ||||
|       parent: 'b0' | ||||
|     - id: 'b3' | ||||
|       parent: 'b0' | ||||
|   weights: | ||||
|     - b1: 5 | ||||
|     - b2: 6 | ||||
|     - b3: 6 | ||||
|   heads: | ||||
|     - id: 'b2' | ||||
| - blocks: | ||||
|     - id: 'b0' | ||||
|       parent: 'b0' | ||||
|     - id: 'b1' | ||||
|       parent: 'b0' | ||||
|     - id: 'b2' | ||||
|       parent: 'b0' | ||||
|   weights: | ||||
|     - b1: 0 | ||||
|     - b2: 0 | ||||
|   heads: | ||||
|     - id: 'b1' | ||||
|  | ||||
| @ -3,7 +3,7 @@ | ||||
| extern crate beacon_chain; | ||||
| extern crate bls; | ||||
| extern crate db; | ||||
| //extern crate env_logger; // for debugging
 | ||||
| // extern crate env_logger; // for debugging
 | ||||
| extern crate fork_choice; | ||||
| extern crate hex; | ||||
| extern crate log; | ||||
| @ -12,22 +12,35 @@ extern crate types; | ||||
| extern crate yaml_rust; | ||||
| 
 | ||||
| pub use beacon_chain::BeaconChain; | ||||
| use bls::{PublicKey, Signature}; | ||||
| use bls::Signature; | ||||
| use db::stores::{BeaconBlockStore, BeaconStateStore}; | ||||
| use db::MemoryDB; | ||||
| //use env_logger::{Builder, Env};
 | ||||
| use fork_choice::{BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, SlowLMDGhost}; | ||||
| // use env_logger::{Builder, Env};
 | ||||
| use fork_choice::{ | ||||
|     BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost, | ||||
| }; | ||||
| use ssz::ssz_encode; | ||||
| use std::collections::HashMap; | ||||
| use std::sync::Arc; | ||||
| use std::{fs::File, io::prelude::*, path::PathBuf}; | ||||
| use types::{ | ||||
|     BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Epoch, Eth1Data, Hash256, Slot, Validator, | ||||
| }; | ||||
| use types::test_utils::TestingBeaconStateBuilder; | ||||
| use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Keypair, Slot}; | ||||
| use yaml_rust::yaml; | ||||
| 
 | ||||
| // Note: We Assume the block Id's are hex-encoded.
 | ||||
| 
 | ||||
| #[test] | ||||
| fn test_optimized_lmd_ghost() { | ||||
|     // set up logging
 | ||||
|     // Builder::from_env(Env::default().default_filter_or("trace")).init();
 | ||||
| 
 | ||||
|     test_yaml_vectors( | ||||
|         ForkChoiceAlgorithm::OptimizedLMDGhost, | ||||
|         "tests/lmd_ghost_test_vectors.yaml", | ||||
|         100, | ||||
|     ); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| fn test_bitwise_lmd_ghost() { | ||||
|     // set up logging
 | ||||
| @ -205,16 +218,18 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec<yaml_rust::Yaml> { | ||||
| // initialise a single validator and state. All blocks will reference this state root.
 | ||||
| fn setup_inital_state( | ||||
|     fork_choice_algo: &ForkChoiceAlgorithm, | ||||
|     no_validators: usize, | ||||
|     num_validators: usize, | ||||
| ) -> (Box<ForkChoice>, Arc<BeaconBlockStore<MemoryDB>>, Hash256) { | ||||
|     let zero_hash = Hash256::zero(); | ||||
| 
 | ||||
|     let db = Arc::new(MemoryDB::open()); | ||||
|     let block_store = Arc::new(BeaconBlockStore::new(db.clone())); | ||||
|     let state_store = Arc::new(BeaconStateStore::new(db.clone())); | ||||
| 
 | ||||
|     // the fork choice instantiation
 | ||||
|     let fork_choice: Box<ForkChoice> = match fork_choice_algo { | ||||
|         ForkChoiceAlgorithm::OptimizedLMDGhost => Box::new(OptimizedLMDGhost::new( | ||||
|             block_store.clone(), | ||||
|             state_store.clone(), | ||||
|         )), | ||||
|         ForkChoiceAlgorithm::BitwiseLMDGhost => Box::new(BitwiseLMDGhost::new( | ||||
|             block_store.clone(), | ||||
|             state_store.clone(), | ||||
| @ -225,40 +240,11 @@ fn setup_inital_state( | ||||
|         ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())), | ||||
|     }; | ||||
| 
 | ||||
|     // misc vars for setting up the state
 | ||||
|     let genesis_time = 1_550_381_159; | ||||
| 
 | ||||
|     let latest_eth1_data = Eth1Data { | ||||
|         deposit_root: zero_hash.clone(), | ||||
|         block_hash: zero_hash.clone(), | ||||
|     }; | ||||
| 
 | ||||
|     let initial_validator_deposits = vec![]; | ||||
|     let spec = ChainSpec::foundation(); | ||||
| 
 | ||||
|     // create the state
 | ||||
|     let mut state = BeaconState::genesis( | ||||
|         genesis_time, | ||||
|         initial_validator_deposits, | ||||
|         latest_eth1_data, | ||||
|         &spec, | ||||
|     ) | ||||
|     .unwrap(); | ||||
| 
 | ||||
|     let default_validator = Validator { | ||||
|         pubkey: PublicKey::default(), | ||||
|         withdrawal_credentials: zero_hash, | ||||
|         activation_epoch: Epoch::from(0u64), | ||||
|         exit_epoch: spec.far_future_epoch, | ||||
|         withdrawable_epoch: spec.far_future_epoch, | ||||
|         initiated_exit: false, | ||||
|         slashed: false, | ||||
|     }; | ||||
|     // activate the validators
 | ||||
|     for _ in 0..no_validators { | ||||
|         state.validator_registry.push(default_validator.clone()); | ||||
|         state.validator_balances.push(32_000_000_000); | ||||
|     } | ||||
|     let state_builder = | ||||
|         TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec); | ||||
|     let (state, _keypairs) = state_builder.build(); | ||||
| 
 | ||||
|     let state_root = state.canonical_root(); | ||||
|     state_store | ||||
|  | ||||
| @ -14,6 +14,7 @@ env_logger = "0.6.0" | ||||
| 
 | ||||
| [dependencies] | ||||
| bls = { path = "../utils/bls" } | ||||
| fnv = "1.0" | ||||
| hashing = { path = "../utils/hashing" } | ||||
| int_to_bytes = { path = "../utils/int_to_bytes" } | ||||
| integer-sqrt = "0.1" | ||||
|  | ||||
							
								
								
									
										437
									
								
								eth2/state_processing/benches/bench_block_processing.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										437
									
								
								eth2/state_processing/benches/bench_block_processing.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,437 @@ | ||||
| use criterion::Criterion; | ||||
| use criterion::{black_box, Benchmark}; | ||||
| use log::debug; | ||||
| use ssz::TreeHash; | ||||
| use state_processing::{ | ||||
|     per_block_processing, | ||||
|     per_block_processing::{ | ||||
|         process_attestations, process_attester_slashings, process_deposits, process_eth1_data, | ||||
|         process_exits, process_proposer_slashings, process_randao, process_transfers, | ||||
|         verify_block_signature, | ||||
|     }, | ||||
| }; | ||||
| use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; | ||||
| use types::*; | ||||
| 
 | ||||
| /// Run the benchmarking suite on a foundation spec with 16,384 validators.
 | ||||
| pub fn bench_block_processing_n_validators(c: &mut Criterion, validator_count: usize) { | ||||
|     let spec = ChainSpec::foundation(); | ||||
| 
 | ||||
|     let (mut state, keypairs) = build_state(validator_count, &spec); | ||||
|     let block = build_block(&mut state, &keypairs, &spec); | ||||
| 
 | ||||
|     assert_eq!( | ||||
|         block.body.proposer_slashings.len(), | ||||
|         spec.max_proposer_slashings as usize, | ||||
|         "The block should have the maximum possible proposer slashings" | ||||
|     ); | ||||
| 
 | ||||
|     assert_eq!( | ||||
|         block.body.attester_slashings.len(), | ||||
|         spec.max_attester_slashings as usize, | ||||
|         "The block should have the maximum possible attester slashings" | ||||
|     ); | ||||
| 
 | ||||
|     for attester_slashing in &block.body.attester_slashings { | ||||
|         let len_1 = attester_slashing | ||||
|             .slashable_attestation_1 | ||||
|             .validator_indices | ||||
|             .len(); | ||||
|         let len_2 = attester_slashing | ||||
|             .slashable_attestation_1 | ||||
|             .validator_indices | ||||
|             .len(); | ||||
|         assert!( | ||||
|             (len_1 == len_2) && (len_2 == spec.max_indices_per_slashable_vote as usize), | ||||
|             "Each attester slashing should have the maximum possible validator indices" | ||||
|         ); | ||||
|     } | ||||
| 
 | ||||
|     assert_eq!( | ||||
|         block.body.attestations.len(), | ||||
|         spec.max_attestations as usize, | ||||
|         "The block should have the maximum possible attestations." | ||||
|     ); | ||||
| 
 | ||||
|     assert_eq!( | ||||
|         block.body.deposits.len(), | ||||
|         spec.max_deposits as usize, | ||||
|         "The block should have the maximum possible deposits." | ||||
|     ); | ||||
| 
 | ||||
|     assert_eq!( | ||||
|         block.body.voluntary_exits.len(), | ||||
|         spec.max_voluntary_exits as usize, | ||||
|         "The block should have the maximum possible voluntary exits." | ||||
|     ); | ||||
| 
 | ||||
|     assert_eq!( | ||||
|         block.body.transfers.len(), | ||||
|         spec.max_transfers as usize, | ||||
|         "The block should have the maximum possible transfers." | ||||
|     ); | ||||
| 
 | ||||
|     bench_block_processing( | ||||
|         c, | ||||
|         &block, | ||||
|         &state, | ||||
|         &spec, | ||||
|         &format!("{}_validators", validator_count), | ||||
|     ); | ||||
| } | ||||
| 
 | ||||
| fn build_state(validator_count: usize, spec: &ChainSpec) -> (BeaconState, Vec<Keypair>) { | ||||
|     let mut builder = | ||||
|         TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); | ||||
| 
 | ||||
|     // Set the state to be just before an epoch transition.
 | ||||
|     let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); | ||||
|     builder.teleport_to_slot(target_slot, &spec); | ||||
| 
 | ||||
|     // Builds all caches; benches will not contain shuffling/committee building times.
 | ||||
|     builder.build_caches(&spec).unwrap(); | ||||
| 
 | ||||
|     builder.build() | ||||
| } | ||||
| 
 | ||||
| fn build_block(state: &mut BeaconState, keypairs: &[Keypair], spec: &ChainSpec) -> BeaconBlock { | ||||
|     let mut builder = TestingBeaconBlockBuilder::new(spec); | ||||
| 
 | ||||
|     builder.set_slot(state.slot); | ||||
| 
 | ||||
|     let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); | ||||
|     let keypair = &keypairs[proposer_index]; | ||||
| 
 | ||||
|     builder.set_randao_reveal(&keypair.sk, &state.fork, spec); | ||||
| 
 | ||||
|     // Used as a stream of validator indices for use in slashings, exits, etc.
 | ||||
|     let mut validators_iter = (0..keypairs.len() as u64).into_iter(); | ||||
| 
 | ||||
|     // Insert the maximum possible number of `ProposerSlashing` objects.
 | ||||
|     debug!( | ||||
|         "Inserting {} proposer slashings...", | ||||
|         spec.max_proposer_slashings | ||||
|     ); | ||||
|     for _ in 0..spec.max_proposer_slashings { | ||||
|         let validator_index = validators_iter.next().expect("Insufficient validators."); | ||||
| 
 | ||||
|         builder.insert_proposer_slashing( | ||||
|             validator_index, | ||||
|             &keypairs[validator_index as usize].sk, | ||||
|             &state.fork, | ||||
|             spec, | ||||
|         ); | ||||
|     } | ||||
| 
 | ||||
|     // Insert the maximum possible number of `AttesterSlashing` objects
 | ||||
|     debug!( | ||||
|         "Inserting {} attester slashings...", | ||||
|         spec.max_attester_slashings | ||||
|     ); | ||||
|     for _ in 0..spec.max_attester_slashings { | ||||
|         let mut attesters: Vec<u64> = vec![]; | ||||
|         let mut secret_keys: Vec<&SecretKey> = vec![]; | ||||
| 
 | ||||
|         for _ in 0..spec.max_indices_per_slashable_vote { | ||||
|             let validator_index = validators_iter.next().expect("Insufficient validators."); | ||||
| 
 | ||||
|             attesters.push(validator_index); | ||||
|             secret_keys.push(&keypairs[validator_index as usize].sk); | ||||
|         } | ||||
| 
 | ||||
|         builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); | ||||
|     } | ||||
| 
 | ||||
|     // Insert the maximum possible number of `Attestation` objects.
 | ||||
|     debug!("Inserting {} attestations...", spec.max_attestations); | ||||
|     let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); | ||||
|     builder | ||||
|         .fill_with_attestations(state, &all_secret_keys, spec) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     // Insert the maximum possible number of `Deposit` objects.
 | ||||
|     debug!("Inserting {} deposits...", spec.max_deposits); | ||||
|     for i in 0..spec.max_deposits { | ||||
|         builder.insert_deposit(32_000_000_000, state.deposit_index + i, state, spec); | ||||
|     } | ||||
| 
 | ||||
|     // Insert the maximum possible number of `Exit` objects.
 | ||||
|     debug!("Inserting {} exits...", spec.max_voluntary_exits); | ||||
|     for _ in 0..spec.max_voluntary_exits { | ||||
|         let validator_index = validators_iter.next().expect("Insufficient validators."); | ||||
| 
 | ||||
|         builder.insert_exit( | ||||
|             state, | ||||
|             validator_index, | ||||
|             &keypairs[validator_index as usize].sk, | ||||
|             spec, | ||||
|         ); | ||||
|     } | ||||
| 
 | ||||
|     // Insert the maximum possible number of `Transfer` objects.
 | ||||
|     debug!("Inserting {} transfers...", spec.max_transfers); | ||||
|     for _ in 0..spec.max_transfers { | ||||
|         let validator_index = validators_iter.next().expect("Insufficient validators."); | ||||
| 
 | ||||
|         // Manually set the validator to be withdrawn.
 | ||||
|         state.validator_registry[validator_index as usize].withdrawable_epoch = | ||||
|             state.previous_epoch(spec); | ||||
| 
 | ||||
|         builder.insert_transfer( | ||||
|             state, | ||||
|             validator_index, | ||||
|             validator_index, | ||||
|             1, | ||||
|             keypairs[validator_index as usize].clone(), | ||||
|             spec, | ||||
|         ); | ||||
|     } | ||||
| 
 | ||||
|     let mut block = builder.build(&keypair.sk, &state.fork, spec); | ||||
| 
 | ||||
|     // Set the eth1 data to be different from the state.
 | ||||
|     block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]); | ||||
| 
 | ||||
|     block | ||||
| } | ||||
| 
 | ||||
| /// Run the detailed benchmarking suite on the given `BeaconState`.
 | ||||
| ///
 | ||||
| /// `desc` will be added to the title of each bench.
 | ||||
| fn bench_block_processing( | ||||
|     c: &mut Criterion, | ||||
|     initial_block: &BeaconBlock, | ||||
|     initial_state: &BeaconState, | ||||
|     initial_spec: &ChainSpec, | ||||
|     desc: &str, | ||||
| ) { | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("verify_block_signature", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     verify_block_signature(&mut state, &block, &spec).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_randao", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_randao(&mut state, &block, &spec).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_eth1_data", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_eth1_data(&mut state, &block.eth1_data).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_proposer_slashings", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_proposer_slashings(&mut state, &block.body.proposer_slashings, &spec) | ||||
|                         .unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_attester_slashings", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_attester_slashings(&mut state, &block.body.attester_slashings, &spec) | ||||
|                         .unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_attestations", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_attestations(&mut state, &block.body.attestations, &spec).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_deposits", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_deposits(&mut state, &block.body.deposits, &spec).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_exits", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("process_transfers", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     process_transfers(&mut state, &block.body.transfers, &spec).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state = initial_state.clone(); | ||||
|     let block = initial_block.clone(); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("per_block_processing", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     per_block_processing(&mut state, &block, &spec).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let mut state = initial_state.clone(); | ||||
|     state.drop_cache(RelativeEpoch::Previous); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("build_previous_state_epoch_cache", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     state | ||||
|                         .build_epoch_cache(RelativeEpoch::Previous, &spec) | ||||
|                         .unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let mut state = initial_state.clone(); | ||||
|     state.drop_cache(RelativeEpoch::Current); | ||||
|     let spec = initial_spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("build_current_state_epoch_cache", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state.clone(), | ||||
|                 |mut state| { | ||||
|                     state | ||||
|                         .build_epoch_cache(RelativeEpoch::Current, &spec) | ||||
|                         .unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let block = initial_block.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/block_processing", desc), | ||||
|         Benchmark::new("tree_hash_block", move |b| { | ||||
|             b.iter(|| black_box(block.hash_tree_root())) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| } | ||||
							
								
								
									
										273
									
								
								eth2/state_processing/benches/bench_epoch_processing.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										273
									
								
								eth2/state_processing/benches/bench_epoch_processing.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,273 @@ | ||||
| use criterion::Criterion; | ||||
| use criterion::{black_box, Benchmark}; | ||||
| use ssz::TreeHash; | ||||
| use state_processing::{ | ||||
|     per_epoch_processing, | ||||
|     per_epoch_processing::{ | ||||
|         clean_attestations, initialize_validator_statuses, process_crosslinks, process_eth1_data, | ||||
|         process_justification, process_rewards_and_penalities, process_validator_registry, | ||||
|         update_active_tree_index_roots, update_latest_slashed_balances, | ||||
|     }, | ||||
| }; | ||||
| use types::test_utils::TestingBeaconStateBuilder; | ||||
| use types::*; | ||||
| 
 | ||||
| pub const BENCHING_SAMPLE_SIZE: usize = 10; | ||||
| pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; | ||||
| 
 | ||||
| /// Run the benchmarking suite on a foundation spec with 16,384 validators.
 | ||||
| pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) { | ||||
|     let spec = ChainSpec::foundation(); | ||||
| 
 | ||||
|     let mut builder = | ||||
|         TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); | ||||
| 
 | ||||
|     // Set the state to be just before an epoch transition.
 | ||||
|     let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); | ||||
|     builder.teleport_to_slot(target_slot, &spec); | ||||
| 
 | ||||
|     // Builds all caches; benches will not contain shuffling/committee building times.
 | ||||
|     builder.build_caches(&spec).unwrap(); | ||||
| 
 | ||||
|     // Inserts one attestation with full participation for each committee able to include an
 | ||||
|     // attestation in this state.
 | ||||
|     builder.insert_attestations(&spec); | ||||
| 
 | ||||
|     let (state, _keypairs) = builder.build(); | ||||
| 
 | ||||
|     // Assert that the state has an attestations for each committee that is able to include an
 | ||||
|     // attestation in the state.
 | ||||
|     let committees_per_epoch = spec.get_epoch_committee_count(validator_count); | ||||
|     let committees_per_slot = committees_per_epoch / spec.slots_per_epoch; | ||||
|     let previous_epoch_attestations = committees_per_epoch; | ||||
|     let current_epoch_attestations = | ||||
|         committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay); | ||||
|     assert_eq!( | ||||
|         state.latest_attestations.len() as u64, | ||||
|         previous_epoch_attestations + current_epoch_attestations, | ||||
|         "The state should have an attestation for each committee." | ||||
|     ); | ||||
| 
 | ||||
|     // Assert that each attestation in the state has full participation.
 | ||||
|     let committee_size = validator_count / committees_per_epoch as usize; | ||||
|     for a in &state.latest_attestations { | ||||
|         assert_eq!( | ||||
|             a.aggregation_bitfield.num_set_bits(), | ||||
|             committee_size, | ||||
|             "Each attestation in the state should have full participation" | ||||
|         ); | ||||
|     } | ||||
| 
 | ||||
|     // Assert that we will run the first arm of process_rewards_and_penalities
 | ||||
|     let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; | ||||
|     assert_eq!( | ||||
|         epochs_since_finality, 4, | ||||
|         "Epochs since finality should be 4" | ||||
|     ); | ||||
| 
 | ||||
|     bench_epoch_processing(c, &state, &spec, &format!("{}_validators", validator_count)); | ||||
| } | ||||
| 
 | ||||
| /// Run the detailed benchmarking suite on the given `BeaconState`.
 | ||||
| ///
 | ||||
| /// `desc` will be added to the title of each bench.
 | ||||
| fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("process_eth1_data", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     process_eth1_data(&mut state, &spec_clone); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("initialize_validator_statuses", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     initialize_validator_statuses(&mut state, &spec_clone).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     let attesters = initialize_validator_statuses(&state, &spec).unwrap(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("process_justification", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     process_justification(&mut state, &attesters.total_balances, &spec_clone); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("process_crosslinks", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()), | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let mut state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     let attesters = initialize_validator_statuses(&state, &spec).unwrap(); | ||||
|     let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("process_rewards_and_penalties", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || (state_clone.clone(), attesters.clone()), | ||||
|                 |(mut state, mut attesters)| { | ||||
|                     process_rewards_and_penalities( | ||||
|                         &mut state, | ||||
|                         &mut attesters, | ||||
|                         &winning_root_for_shards, | ||||
|                         &spec_clone, | ||||
|                     ) | ||||
|                     .unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(SMALL_BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("process_ejections", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     state.process_ejections(&spec_clone); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("process_validator_registry", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     process_validator_registry(&mut state, &spec_clone).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("update_active_tree_index_roots", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     update_active_tree_index_roots(&mut state, &spec_clone).unwrap(); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("update_latest_slashed_balances", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     update_latest_slashed_balances(&mut state, &spec_clone); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("clean_attestations", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| { | ||||
|                     clean_attestations(&mut state, &spec_clone); | ||||
|                     state | ||||
|                 }, | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     let spec_clone = spec.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("per_epoch_processing", move |b| { | ||||
|             b.iter_batched( | ||||
|                 || state_clone.clone(), | ||||
|                 |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), | ||||
|                 criterion::BatchSize::SmallInput, | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(SMALL_BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| 
 | ||||
|     let state_clone = state.clone(); | ||||
|     c.bench( | ||||
|         &format!("{}/epoch_processing", desc), | ||||
|         Benchmark::new("tree_hash_state", move |b| { | ||||
|             b.iter(|| black_box(state_clone.hash_tree_root())) | ||||
|         }) | ||||
|         .sample_size(SMALL_BENCHING_SAMPLE_SIZE), | ||||
|     ); | ||||
| } | ||||
| @ -1,65 +1,26 @@ | ||||
| use criterion::Benchmark; | ||||
| use criterion::Criterion; | ||||
| use criterion::{black_box, criterion_group, criterion_main, Benchmark}; | ||||
| // use env_logger::{Builder, Env};
 | ||||
| use state_processing::SlotProcessable; | ||||
| use types::beacon_state::BeaconStateBuilder; | ||||
| use criterion::{criterion_group, criterion_main}; | ||||
| use env_logger::{Builder, Env}; | ||||
| use types::test_utils::TestingBeaconStateBuilder; | ||||
| use types::*; | ||||
| 
 | ||||
| fn epoch_processing(c: &mut Criterion) { | ||||
|     // Builder::from_env(Env::default().default_filter_or("debug")).init();
 | ||||
| mod bench_block_processing; | ||||
| mod bench_epoch_processing; | ||||
| 
 | ||||
|     let mut builder = BeaconStateBuilder::new(16_384); | ||||
| pub const VALIDATOR_COUNT: usize = 300_032; | ||||
| 
 | ||||
|     builder.build_fast().unwrap(); | ||||
|     builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4); | ||||
| // `LOG_LEVEL == "debug"` gives logs, but they're very noisy and slow down benching.
 | ||||
| pub const LOG_LEVEL: &str = ""; | ||||
| 
 | ||||
|     let mut state = builder.cloned_state(); | ||||
| pub fn state_processing(c: &mut Criterion) { | ||||
|     if LOG_LEVEL != "" { | ||||
|         Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init(); | ||||
|     } | ||||
| 
 | ||||
|     // Build all the caches so the following state does _not_ include the cache-building time.
 | ||||
|     state | ||||
|         .build_epoch_cache(RelativeEpoch::Previous, &builder.spec) | ||||
|         .unwrap(); | ||||
|     state | ||||
|         .build_epoch_cache(RelativeEpoch::Current, &builder.spec) | ||||
|         .unwrap(); | ||||
|     state | ||||
|         .build_epoch_cache(RelativeEpoch::Next, &builder.spec) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     let cached_state = state.clone(); | ||||
| 
 | ||||
|     // Drop all the caches so the following state includes the cache-building time.
 | ||||
|     state.drop_cache(RelativeEpoch::Previous); | ||||
|     state.drop_cache(RelativeEpoch::Current); | ||||
|     state.drop_cache(RelativeEpoch::Next); | ||||
| 
 | ||||
|     let cacheless_state = state; | ||||
| 
 | ||||
|     let spec_a = builder.spec.clone(); | ||||
|     let spec_b = builder.spec.clone(); | ||||
| 
 | ||||
|     c.bench( | ||||
|         "epoch processing", | ||||
|         Benchmark::new("with pre-built caches", move |b| { | ||||
|             b.iter_with_setup( | ||||
|                 || cached_state.clone(), | ||||
|                 |mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_a).unwrap()), | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
| 
 | ||||
|     c.bench( | ||||
|         "epoch processing", | ||||
|         Benchmark::new("without pre-built caches", move |b| { | ||||
|             b.iter_with_setup( | ||||
|                 || cacheless_state.clone(), | ||||
|                 |mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_b).unwrap()), | ||||
|             ) | ||||
|         }) | ||||
|         .sample_size(10), | ||||
|     ); | ||||
|     bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); | ||||
|     bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); | ||||
| } | ||||
| 
 | ||||
| criterion_group!(benches, epoch_processing,); | ||||
| criterion_group!(benches, state_processing); | ||||
| criterion_main!(benches); | ||||
|  | ||||
| @ -1,14 +1,19 @@ | ||||
| use self::verify_proposer_slashing::verify_proposer_slashing; | ||||
| use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; | ||||
| use hashing::hash; | ||||
| use log::debug; | ||||
| use rayon::prelude::*; | ||||
| use ssz::{ssz_encode, SignedRoot, TreeHash}; | ||||
| use types::*; | ||||
| 
 | ||||
| pub use self::verify_attester_slashing::verify_attester_slashing; | ||||
| pub use self::verify_attester_slashing::{ | ||||
|     gather_attester_slashing_indices, verify_attester_slashing, | ||||
| }; | ||||
| pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; | ||||
| pub use verify_deposit::verify_deposit; | ||||
| pub use verify_deposit::{ | ||||
|     build_public_key_hashmap, get_existing_validator_index, verify_deposit, verify_deposit_index, | ||||
| }; | ||||
| pub use verify_exit::verify_exit; | ||||
| pub use verify_slashable_attestation::verify_slashable_attestation; | ||||
| pub use verify_transfer::{execute_transfer, verify_transfer}; | ||||
| 
 | ||||
| pub mod errors; | ||||
| @ -70,22 +75,21 @@ fn per_block_processing_signature_optional( | ||||
|     // Verify that `block.slot == state.slot`.
 | ||||
|     verify!(block.slot == state.slot, Invalid::StateSlotMismatch); | ||||
| 
 | ||||
|     // Ensure the current epoch cache is built.
 | ||||
|     // Ensure the current and previous epoch cache is built.
 | ||||
|     state.build_epoch_cache(RelativeEpoch::Current, spec)?; | ||||
|     state.build_epoch_cache(RelativeEpoch::Previous, spec)?; | ||||
| 
 | ||||
|     if should_verify_block_signature { | ||||
|         verify_block_signature(&state, &block, &spec)?; | ||||
|     } | ||||
|     process_randao(&mut state, &block, &spec)?; | ||||
|     process_eth1_data(&mut state, &block.eth1_data)?; | ||||
|     process_proposer_slashings(&mut state, &block.body.proposer_slashings[..], spec)?; | ||||
|     process_attester_slashings(&mut state, &block.body.attester_slashings[..], spec)?; | ||||
|     process_attestations(&mut state, &block.body.attestations[..], spec)?; | ||||
|     process_deposits(&mut state, &block.body.deposits[..], spec)?; | ||||
|     process_exits(&mut state, &block.body.voluntary_exits[..], spec)?; | ||||
|     process_transfers(&mut state, &block.body.transfers[..], spec)?; | ||||
| 
 | ||||
|     debug!("per_block_processing complete."); | ||||
|     process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?; | ||||
|     process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?; | ||||
|     process_attestations(&mut state, &block.body.attestations, spec)?; | ||||
|     process_deposits(&mut state, &block.body.deposits, spec)?; | ||||
|     process_exits(&mut state, &block.body.voluntary_exits, spec)?; | ||||
|     process_transfers(&mut state, &block.body.transfers, spec)?; | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
| @ -228,9 +232,17 @@ pub fn process_proposer_slashings( | ||||
|         proposer_slashings.len() as u64 <= spec.max_proposer_slashings, | ||||
|         Invalid::MaxProposerSlashingsExceeded | ||||
|     ); | ||||
|     for (i, proposer_slashing) in proposer_slashings.iter().enumerate() { | ||||
|         verify_proposer_slashing(proposer_slashing, &state, spec) | ||||
|             .map_err(|e| e.into_with_index(i))?; | ||||
| 
 | ||||
|     // Verify proposer slashings in parallel.
 | ||||
|     proposer_slashings | ||||
|         .par_iter() | ||||
|         .enumerate() | ||||
|         .try_for_each(|(i, proposer_slashing)| { | ||||
|             verify_proposer_slashing(proposer_slashing, &state, spec) | ||||
|                 .map_err(|e| e.into_with_index(i)) | ||||
|         })?; | ||||
| 
 | ||||
|     for proposer_slashing in proposer_slashings { | ||||
|         state.slash_validator(proposer_slashing.proposer_index as usize, spec)?; | ||||
|     } | ||||
| 
 | ||||
| @ -252,9 +264,41 @@ pub fn process_attester_slashings( | ||||
|         attester_slashings.len() as u64 <= spec.max_attester_slashings, | ||||
|         Invalid::MaxAttesterSlashingsExceed | ||||
|     ); | ||||
| 
 | ||||
|     // Verify the `SlashableAttestation`s in parallel (these are the resource-consuming objects, not
 | ||||
|     // the `AttesterSlashing`s themselves).
 | ||||
|     let mut slashable_attestations: Vec<&SlashableAttestation> = | ||||
|         Vec::with_capacity(attester_slashings.len() * 2); | ||||
|     for attester_slashing in attester_slashings { | ||||
|         slashable_attestations.push(&attester_slashing.slashable_attestation_1); | ||||
|         slashable_attestations.push(&attester_slashing.slashable_attestation_2); | ||||
|     } | ||||
| 
 | ||||
|     // Verify slashable attestations in parallel.
 | ||||
|     slashable_attestations | ||||
|         .par_iter() | ||||
|         .enumerate() | ||||
|         .try_for_each(|(i, slashable_attestation)| { | ||||
|             verify_slashable_attestation(&state, slashable_attestation, spec) | ||||
|                 .map_err(|e| e.into_with_index(i)) | ||||
|         })?; | ||||
|     let all_slashable_attestations_have_been_checked = true; | ||||
| 
 | ||||
|     // Gather the slashable indices and preform the final verification and update the state in series.
 | ||||
|     for (i, attester_slashing) in attester_slashings.iter().enumerate() { | ||||
|         let slashable_indices = verify_attester_slashing(&state, &attester_slashing, spec) | ||||
|         let should_verify_slashable_attestations = !all_slashable_attestations_have_been_checked; | ||||
| 
 | ||||
|         verify_attester_slashing( | ||||
|             &state, | ||||
|             &attester_slashing, | ||||
|             should_verify_slashable_attestations, | ||||
|             spec, | ||||
|         ) | ||||
|         .map_err(|e| e.into_with_index(i))?; | ||||
| 
 | ||||
|         let slashable_indices = gather_attester_slashing_indices(&state, &attester_slashing) | ||||
|             .map_err(|e| e.into_with_index(i))?; | ||||
| 
 | ||||
|         for i in slashable_indices { | ||||
|             state.slash_validator(i as usize, spec)?; | ||||
|         } | ||||
| @ -278,14 +322,20 @@ pub fn process_attestations( | ||||
|         attestations.len() as u64 <= spec.max_attestations, | ||||
|         Invalid::MaxAttestationsExceeded | ||||
|     ); | ||||
|     for (i, attestation) in attestations.iter().enumerate() { | ||||
|         // Build the previous epoch cache only if required by an attestation.
 | ||||
|         if attestation.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec) { | ||||
|             state.build_epoch_cache(RelativeEpoch::Previous, spec)?; | ||||
|         } | ||||
| 
 | ||||
|         validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))?; | ||||
|     // Ensure the previous epoch cache exists.
 | ||||
|     state.build_epoch_cache(RelativeEpoch::Previous, spec)?; | ||||
| 
 | ||||
|     // Verify attestations in parallel.
 | ||||
|     attestations | ||||
|         .par_iter() | ||||
|         .enumerate() | ||||
|         .try_for_each(|(i, attestation)| { | ||||
|             validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i)) | ||||
|         })?; | ||||
| 
 | ||||
|     // Update the state in series.
 | ||||
|     for attestation in attestations { | ||||
|         let pending_attestation = PendingAttestation { | ||||
|             data: attestation.data.clone(), | ||||
|             aggregation_bitfield: attestation.aggregation_bitfield.clone(), | ||||
| @ -313,24 +363,53 @@ pub fn process_deposits( | ||||
|         deposits.len() as u64 <= spec.max_deposits, | ||||
|         Invalid::MaxDepositsExceeded | ||||
|     ); | ||||
|     for (i, deposit) in deposits.iter().enumerate() { | ||||
|         verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec) | ||||
|             .map_err(|e| e.into_with_index(i))?; | ||||
| 
 | ||||
|         state | ||||
|             .process_deposit( | ||||
|                 deposit.deposit_data.deposit_input.pubkey.clone(), | ||||
|                 deposit.deposit_data.amount, | ||||
|                 deposit | ||||
|                     .deposit_data | ||||
|                     .deposit_input | ||||
|                     .proof_of_possession | ||||
|                     .clone(), | ||||
|                 deposit.deposit_data.deposit_input.withdrawal_credentials, | ||||
|                 None, | ||||
|                 spec, | ||||
|             ) | ||||
|             .map_err(|_| Error::Invalid(Invalid::DepositProcessingFailed(i)))?; | ||||
|     // Verify deposits in parallel.
 | ||||
|     deposits | ||||
|         .par_iter() | ||||
|         .enumerate() | ||||
|         .try_for_each(|(i, deposit)| { | ||||
|             verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec) | ||||
|                 .map_err(|e| e.into_with_index(i)) | ||||
|         })?; | ||||
| 
 | ||||
|     let public_key_to_index_hashmap = build_public_key_hashmap(&state); | ||||
| 
 | ||||
|     // Check `state.deposit_index` and update the state in series.
 | ||||
|     for (i, deposit) in deposits.iter().enumerate() { | ||||
|         verify_deposit_index(state, deposit).map_err(|e| e.into_with_index(i))?; | ||||
| 
 | ||||
|         // Get an `Option<u64>` where `u64` is the validator index if this deposit public key
 | ||||
|         // already exists in the beacon_state.
 | ||||
|         //
 | ||||
|         // This function also verifies the withdrawal credentials.
 | ||||
|         let validator_index = | ||||
|             get_existing_validator_index(state, deposit, &public_key_to_index_hashmap) | ||||
|                 .map_err(|e| e.into_with_index(i))?; | ||||
| 
 | ||||
|         let deposit_data = &deposit.deposit_data; | ||||
|         let deposit_input = &deposit.deposit_data.deposit_input; | ||||
| 
 | ||||
|         if let Some(index) = validator_index { | ||||
|             // Update the existing validator balance.
 | ||||
|             safe_add_assign!( | ||||
|                 state.validator_balances[index as usize], | ||||
|                 deposit_data.amount | ||||
|             ); | ||||
|         } else { | ||||
|             // Create a new validator.
 | ||||
|             let validator = Validator { | ||||
|                 pubkey: deposit_input.pubkey.clone(), | ||||
|                 withdrawal_credentials: deposit_input.withdrawal_credentials.clone(), | ||||
|                 activation_epoch: spec.far_future_epoch, | ||||
|                 exit_epoch: spec.far_future_epoch, | ||||
|                 withdrawable_epoch: spec.far_future_epoch, | ||||
|                 initiated_exit: false, | ||||
|                 slashed: false, | ||||
|             }; | ||||
|             state.validator_registry.push(validator); | ||||
|             state.validator_balances.push(deposit_data.amount); | ||||
|         } | ||||
| 
 | ||||
|         state.deposit_index += 1; | ||||
|     } | ||||
| @ -353,9 +432,17 @@ pub fn process_exits( | ||||
|         voluntary_exits.len() as u64 <= spec.max_voluntary_exits, | ||||
|         Invalid::MaxExitsExceeded | ||||
|     ); | ||||
|     for (i, exit) in voluntary_exits.iter().enumerate() { | ||||
|         verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))?; | ||||
| 
 | ||||
|     // Verify exits in parallel.
 | ||||
|     voluntary_exits | ||||
|         .par_iter() | ||||
|         .enumerate() | ||||
|         .try_for_each(|(i, exit)| { | ||||
|             verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i)) | ||||
|         })?; | ||||
| 
 | ||||
|     // Update the state in series.
 | ||||
|     for exit in voluntary_exits { | ||||
|         state.initiate_validator_exit(exit.validator_index as usize); | ||||
|     } | ||||
| 
 | ||||
| @ -377,8 +464,15 @@ pub fn process_transfers( | ||||
|         transfers.len() as u64 <= spec.max_transfers, | ||||
|         Invalid::MaxTransfersExceed | ||||
|     ); | ||||
| 
 | ||||
|     transfers | ||||
|         .par_iter() | ||||
|         .enumerate() | ||||
|         .try_for_each(|(i, transfer)| { | ||||
|             verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i)) | ||||
|         })?; | ||||
| 
 | ||||
|     for (i, transfer) in transfers.iter().enumerate() { | ||||
|         verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))?; | ||||
|         execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?; | ||||
|     } | ||||
| 
 | ||||
|  | ||||
| @ -76,6 +76,10 @@ pub enum BlockInvalid { | ||||
|     MaxExitsExceeded, | ||||
|     MaxTransfersExceed, | ||||
|     AttestationInvalid(usize, AttestationInvalid), | ||||
|     /// A `SlashableAttestation` inside an `AttesterSlashing` was invalid.
 | ||||
|     ///
 | ||||
|     /// To determine the offending `AttesterSlashing` index, divide the error message `usize` by two.
 | ||||
|     SlashableAttestationInvalid(usize, SlashableAttestationInvalid), | ||||
|     AttesterSlashingInvalid(usize, AttesterSlashingInvalid), | ||||
|     ProposerSlashingInvalid(usize, ProposerSlashingInvalid), | ||||
|     DepositInvalid(usize, DepositInvalid), | ||||
| @ -147,6 +151,8 @@ pub enum AttestationInvalid { | ||||
|     ///
 | ||||
|     /// (attestation_data_shard, attestation_data_slot)
 | ||||
|     NoCommitteeForShard(u64, Slot), | ||||
|     /// The validator index was unknown.
 | ||||
|     UnknownValidator(u64), | ||||
|     /// The attestation signature verification failed.
 | ||||
|     BadSignature, | ||||
|     /// The shard block root was not set to zero. This is a phase 0 requirement.
 | ||||
| @ -233,6 +239,11 @@ impl Into<SlashableAttestationInvalid> for SlashableAttestationValidationError { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl_into_with_index_without_beacon_error!( | ||||
|     SlashableAttestationValidationError, | ||||
|     SlashableAttestationInvalid | ||||
| ); | ||||
| 
 | ||||
| /* | ||||
|  * `ProposerSlashing` Validation | ||||
|  */ | ||||
| @ -292,6 +303,11 @@ pub enum DepositInvalid { | ||||
|     ///
 | ||||
|     /// (state_index, deposit_index)
 | ||||
|     BadIndex(u64, u64), | ||||
|     /// The proof-of-possession does not match the given pubkey.
 | ||||
|     BadProofOfPossession, | ||||
|     /// The withdrawal credentials for the depositing validator did not match the withdrawal
 | ||||
|     /// credentials of an existing validator with the same public key.
 | ||||
|     BadWithdrawalCredentials, | ||||
|     /// The specified `branch` and `index` did not form a valid proof that the deposit is included
 | ||||
|     /// in the eth1 deposit root.
 | ||||
|     BadMerkleProof, | ||||
|  | ||||
| @ -159,18 +159,16 @@ fn validate_attestation_signature_optional( | ||||
| 
 | ||||
|     if verify_signature { | ||||
|         let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); | ||||
|         verify!( | ||||
|             verify_attestation_signature( | ||||
|                 state, | ||||
|                 committee, | ||||
|                 attestation_epoch, | ||||
|                 &attestation.custody_bitfield, | ||||
|                 &attestation.data, | ||||
|                 &attestation.aggregate_signature, | ||||
|                 spec | ||||
|             ), | ||||
|             Invalid::BadSignature | ||||
|         ); | ||||
|         verify_attestation_signature( | ||||
|             state, | ||||
|             committee, | ||||
|             attestation_epoch, | ||||
|             &attestation.aggregation_bitfield, | ||||
|             &attestation.custody_bitfield, | ||||
|             &attestation.data, | ||||
|             &attestation.aggregate_signature, | ||||
|             spec, | ||||
|         )?; | ||||
|     } | ||||
| 
 | ||||
|     // [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`.
 | ||||
| @ -195,30 +193,45 @@ fn verify_attestation_signature( | ||||
|     state: &BeaconState, | ||||
|     committee: &[usize], | ||||
|     attestation_epoch: Epoch, | ||||
|     aggregation_bitfield: &Bitfield, | ||||
|     custody_bitfield: &Bitfield, | ||||
|     attestation_data: &AttestationData, | ||||
|     aggregate_signature: &AggregateSignature, | ||||
|     spec: &ChainSpec, | ||||
| ) -> bool { | ||||
| ) -> Result<(), Error> { | ||||
|     let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2]; | ||||
|     let mut message_exists = vec![false; 2]; | ||||
| 
 | ||||
|     for (i, v) in committee.iter().enumerate() { | ||||
|         let custody_bit = match custody_bitfield.get(i) { | ||||
|             Ok(bit) => bit, | ||||
|             // Invalidate signature if custody_bitfield.len() < committee
 | ||||
|             Err(_) => return false, | ||||
|         }; | ||||
|         let validator_signed = aggregation_bitfield.get(i).map_err(|_| { | ||||
|             Error::Invalid(Invalid::BadAggregationBitfieldLength( | ||||
|                 committee.len(), | ||||
|                 aggregation_bitfield.len(), | ||||
|             )) | ||||
|         })?; | ||||
| 
 | ||||
|         message_exists[custody_bit as usize] = true; | ||||
|         if validator_signed { | ||||
|             let custody_bit: bool = match custody_bitfield.get(i) { | ||||
|                 Ok(bit) => bit, | ||||
|                 // Invalidate signature if custody_bitfield.len() < committee
 | ||||
|                 Err(_) => { | ||||
|                     return Err(Error::Invalid(Invalid::BadCustodyBitfieldLength( | ||||
|                         committee.len(), | ||||
|                         custody_bitfield.len(), | ||||
|                     ))); | ||||
|                 } | ||||
|             }; | ||||
| 
 | ||||
|         match state.validator_registry.get(*v as usize) { | ||||
|             Some(validator) => { | ||||
|                 aggregate_pubs[custody_bit as usize].add(&validator.pubkey); | ||||
|             } | ||||
|             // Invalidate signature if validator index is unknown.
 | ||||
|             None => return false, | ||||
|         }; | ||||
|             message_exists[custody_bit as usize] = true; | ||||
| 
 | ||||
|             match state.validator_registry.get(*v as usize) { | ||||
|                 Some(validator) => { | ||||
|                     aggregate_pubs[custody_bit as usize].add(&validator.pubkey); | ||||
|                 } | ||||
|                 // Return error if validator index is unknown.
 | ||||
|                 None => return Err(Error::BeaconStateError(BeaconStateError::UnknownValidator)), | ||||
|             }; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     // Message when custody bitfield is `false`
 | ||||
| @ -251,5 +264,10 @@ fn verify_attestation_signature( | ||||
| 
 | ||||
|     let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork); | ||||
| 
 | ||||
|     aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]) | ||||
|     verify!( | ||||
|         aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]), | ||||
|         Invalid::BadSignature | ||||
|     ); | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
|  | ||||
| @ -11,8 +11,9 @@ use types::*; | ||||
| pub fn verify_attester_slashing( | ||||
|     state: &BeaconState, | ||||
|     attester_slashing: &AttesterSlashing, | ||||
|     should_verify_slashable_attestations: bool, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<Vec<u64>, Error> { | ||||
| ) -> Result<(), Error> { | ||||
|     let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; | ||||
|     let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; | ||||
| 
 | ||||
| @ -26,10 +27,27 @@ pub fn verify_attester_slashing( | ||||
|         Invalid::NotSlashable | ||||
|     ); | ||||
| 
 | ||||
|     verify_slashable_attestation(state, &slashable_attestation_1, spec) | ||||
|         .map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?; | ||||
|     verify_slashable_attestation(state, &slashable_attestation_2, spec) | ||||
|         .map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?; | ||||
|     if should_verify_slashable_attestations { | ||||
|         verify_slashable_attestation(state, &slashable_attestation_1, spec) | ||||
|             .map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?; | ||||
|         verify_slashable_attestation(state, &slashable_attestation_2, spec) | ||||
|             .map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?; | ||||
|     } | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| /// For a given attester slashing, return the indices able to be slashed.
 | ||||
| ///
 | ||||
| /// Returns Ok(indices) if `indices.len() > 0`.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn gather_attester_slashing_indices( | ||||
|     state: &BeaconState, | ||||
|     attester_slashing: &AttesterSlashing, | ||||
| ) -> Result<Vec<u64>, Error> { | ||||
|     let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; | ||||
|     let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; | ||||
| 
 | ||||
|     let mut slashable_indices = vec![]; | ||||
|     for i in &slashable_attestation_1.validator_indices { | ||||
| @ -38,7 +56,7 @@ pub fn verify_attester_slashing( | ||||
|             .get(*i as usize) | ||||
|             .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; | ||||
| 
 | ||||
|         if slashable_attestation_1.validator_indices.contains(&i) & !validator.slashed { | ||||
|         if slashable_attestation_2.validator_indices.contains(&i) & !validator.slashed { | ||||
|             slashable_indices.push(*i); | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @ -3,13 +3,19 @@ use hashing::hash; | ||||
| use merkle_proof::verify_merkle_proof; | ||||
| use ssz::ssz_encode; | ||||
| use ssz_derive::Encode; | ||||
| use std::collections::HashMap; | ||||
| use types::*; | ||||
| 
 | ||||
| pub type PublicKeyValidatorIndexHashmap = HashMap<PublicKey, u64>; | ||||
| 
 | ||||
| /// Indicates if a `Deposit` is valid to be included in a block in the current epoch of the given
 | ||||
| /// state.
 | ||||
| ///
 | ||||
| /// Returns `Ok(())` if the `Deposit` is valid, otherwise indicates the reason for invalidity.
 | ||||
| ///
 | ||||
| /// This function _does not_ check `state.deposit_index` so this function may be run in parallel.
 | ||||
| /// See the `verify_deposit_index` function for this.
 | ||||
| ///
 | ||||
| /// Note: this function is incomplete.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| @ -20,8 +26,15 @@ pub fn verify_deposit( | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<(), Error> { | ||||
|     verify!( | ||||
|         deposit.index == state.deposit_index, | ||||
|         Invalid::BadIndex(state.deposit_index, deposit.index) | ||||
|         deposit | ||||
|             .deposit_data | ||||
|             .deposit_input | ||||
|             .validate_proof_of_possession( | ||||
|                 state.slot.epoch(spec.slots_per_epoch), | ||||
|                 &state.fork, | ||||
|                 spec | ||||
|             ), | ||||
|         Invalid::BadProofOfPossession | ||||
|     ); | ||||
| 
 | ||||
|     if verify_merkle_branch { | ||||
| @ -34,6 +47,50 @@ pub fn verify_deposit( | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| /// Verify that the `Deposit` index is correct.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn verify_deposit_index(state: &BeaconState, deposit: &Deposit) -> Result<(), Error> { | ||||
|     verify!( | ||||
|         deposit.index == state.deposit_index, | ||||
|         Invalid::BadIndex(state.deposit_index, deposit.index) | ||||
|     ); | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| pub fn build_public_key_hashmap(state: &BeaconState) -> PublicKeyValidatorIndexHashmap { | ||||
|     let mut hashmap = HashMap::with_capacity(state.validator_registry.len()); | ||||
| 
 | ||||
|     for (i, validator) in state.validator_registry.iter().enumerate() { | ||||
|         hashmap.insert(validator.pubkey.clone(), i as u64); | ||||
|     } | ||||
| 
 | ||||
|     hashmap | ||||
| } | ||||
| 
 | ||||
| pub fn get_existing_validator_index( | ||||
|     state: &BeaconState, | ||||
|     deposit: &Deposit, | ||||
|     pubkey_map: &HashMap<PublicKey, u64>, | ||||
| ) -> Result<Option<u64>, Error> { | ||||
|     let deposit_input = &deposit.deposit_data.deposit_input; | ||||
| 
 | ||||
|     let validator_index = pubkey_map.get(&deposit_input.pubkey).and_then(|i| Some(*i)); | ||||
| 
 | ||||
|     match validator_index { | ||||
|         None => Ok(None), | ||||
|         Some(index) => { | ||||
|             verify!( | ||||
|                 deposit_input.withdrawal_credentials | ||||
|                     == state.validator_registry[index as usize].withdrawal_credentials, | ||||
|                 Invalid::BadWithdrawalCredentials | ||||
|             ); | ||||
|             Ok(Some(index)) | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Verify that a deposit is included in the state's eth1 deposit root.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
|  | ||||
| @ -1,74 +1,46 @@ | ||||
| use attester_sets::AttesterSets; | ||||
| use errors::EpochProcessingError as Error; | ||||
| use inclusion_distance::{inclusion_distance, inclusion_slot}; | ||||
| use integer_sqrt::IntegerSquareRoot; | ||||
| use log::debug; | ||||
| use rayon::prelude::*; | ||||
| use ssz::TreeHash; | ||||
| use std::collections::{HashMap, HashSet}; | ||||
| use std::iter::FromIterator; | ||||
| use std::collections::HashMap; | ||||
| use types::{validator_registry::get_active_validator_indices, *}; | ||||
| use validator_statuses::{TotalBalances, ValidatorStatuses}; | ||||
| use winning_root::{winning_root, WinningRoot}; | ||||
| 
 | ||||
| pub mod attester_sets; | ||||
| pub mod errors; | ||||
| pub mod inclusion_distance; | ||||
| pub mod tests; | ||||
| pub mod validator_statuses; | ||||
| pub mod winning_root; | ||||
| 
 | ||||
| /// Maps a shard to a winning root.
 | ||||
| ///
 | ||||
| /// It is generated during crosslink processing and later used to reward/penalize validators.
 | ||||
| pub type WinningRootHashSet = HashMap<u64, WinningRoot>; | ||||
| 
 | ||||
| /// Performs per-epoch processing on some BeaconState.
 | ||||
| ///
 | ||||
| /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is
 | ||||
| /// returned, a state might be "half-processed" and therefore in an invalid state.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { | ||||
|     let current_epoch = state.current_epoch(spec); | ||||
|     let previous_epoch = state.previous_epoch(spec); | ||||
|     let next_epoch = state.next_epoch(spec); | ||||
| 
 | ||||
|     debug!( | ||||
|         "Starting per-epoch processing on epoch {}...", | ||||
|         state.current_epoch(spec) | ||||
|     ); | ||||
| 
 | ||||
|     // Ensure all of the caches are built.
 | ||||
|     state.build_epoch_cache(RelativeEpoch::Previous, spec)?; | ||||
|     state.build_epoch_cache(RelativeEpoch::Current, spec)?; | ||||
|     state.build_epoch_cache(RelativeEpoch::Next, spec)?; | ||||
| 
 | ||||
|     let attesters = AttesterSets::new(&state, spec)?; | ||||
| 
 | ||||
|     let active_validator_indices = get_active_validator_indices( | ||||
|         &state.validator_registry, | ||||
|         state.slot.epoch(spec.slots_per_epoch), | ||||
|     ); | ||||
| 
 | ||||
|     let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); | ||||
|     let previous_total_balance = state.get_total_balance( | ||||
|         &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], | ||||
|         spec, | ||||
|     ); | ||||
|     let mut statuses = initialize_validator_statuses(&state, spec)?; | ||||
| 
 | ||||
|     process_eth1_data(state, spec); | ||||
| 
 | ||||
|     process_justification( | ||||
|         state, | ||||
|         current_total_balance, | ||||
|         previous_total_balance, | ||||
|         attesters.previous_epoch_boundary.balance, | ||||
|         attesters.current_epoch_boundary.balance, | ||||
|         spec, | ||||
|     ); | ||||
|     process_justification(state, &statuses.total_balances, spec); | ||||
| 
 | ||||
|     // Crosslinks
 | ||||
|     let winning_root_for_shards = process_crosslinks(state, spec)?; | ||||
| 
 | ||||
|     // Rewards and Penalities
 | ||||
|     let active_validator_indices_hashset: HashSet<usize> = | ||||
|         HashSet::from_iter(active_validator_indices.iter().cloned()); | ||||
|     process_rewards_and_penalities( | ||||
|         state, | ||||
|         active_validator_indices_hashset, | ||||
|         &attesters, | ||||
|         previous_total_balance, | ||||
|         &winning_root_for_shards, | ||||
|         spec, | ||||
|     )?; | ||||
|     process_rewards_and_penalities(state, &mut statuses, &winning_root_for_shards, spec)?; | ||||
| 
 | ||||
|     // Ejections
 | ||||
|     state.process_ejections(spec); | ||||
| @ -77,38 +49,47 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result | ||||
|     process_validator_registry(state, spec)?; | ||||
| 
 | ||||
|     // Final updates
 | ||||
|     let active_tree_root = get_active_validator_indices( | ||||
|         &state.validator_registry, | ||||
|         next_epoch + Epoch::from(spec.activation_exit_delay), | ||||
|     ) | ||||
|     .hash_tree_root(); | ||||
|     state.latest_active_index_roots[(next_epoch.as_usize() | ||||
|         + spec.activation_exit_delay as usize) | ||||
|         % spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]); | ||||
| 
 | ||||
|     state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] = | ||||
|         state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length]; | ||||
|     state.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = state | ||||
|         .get_randao_mix(current_epoch, spec) | ||||
|         .and_then(|x| Some(*x)) | ||||
|         .ok_or_else(|| Error::NoRandaoSeed)?; | ||||
|     state.latest_attestations = state | ||||
|         .latest_attestations | ||||
|         .iter() | ||||
|         .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch) | ||||
|         .cloned() | ||||
|         .collect(); | ||||
|     update_active_tree_index_roots(state, spec)?; | ||||
|     update_latest_slashed_balances(state, spec); | ||||
|     clean_attestations(state, spec); | ||||
| 
 | ||||
|     // Rotate the epoch caches to suit the epoch transition.
 | ||||
|     state.advance_caches(); | ||||
| 
 | ||||
|     debug!("Epoch transition complete."); | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| /// Returns a list of active validator indices for the state's current epoch.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { | ||||
| pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) -> Vec<usize> { | ||||
|     get_active_validator_indices( | ||||
|         &state.validator_registry, | ||||
|         state.slot.epoch(spec.slots_per_epoch), | ||||
|     ) | ||||
| } | ||||
| 
 | ||||
| /// Calculates various sets of attesters, including:
 | ||||
| ///
 | ||||
| /// - current epoch attesters
 | ||||
| /// - current epoch boundary attesters
 | ||||
| /// - previous epoch attesters
 | ||||
| /// - etc.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn initialize_validator_statuses( | ||||
|     state: &BeaconState, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<ValidatorStatuses, BeaconStateError> { | ||||
|     let mut statuses = ValidatorStatuses::new(state, spec); | ||||
| 
 | ||||
|     statuses.process_attestations(&state, &state.latest_attestations, spec)?; | ||||
| 
 | ||||
|     Ok(statuses) | ||||
| } | ||||
| 
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { | ||||
|     let next_epoch = state.next_epoch(spec); | ||||
|     let voting_period = spec.epochs_per_eth1_voting_period; | ||||
| 
 | ||||
| @ -122,13 +103,17 @@ fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Update the following fields on the `BeaconState`:
 | ||||
| ///
 | ||||
| /// - `justification_bitfield`.
 | ||||
| /// - `finalized_epoch`
 | ||||
| /// - `justified_epoch`
 | ||||
| /// - `previous_justified_epoch`
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| fn process_justification( | ||||
| pub fn process_justification( | ||||
|     state: &mut BeaconState, | ||||
|     current_total_balance: u64, | ||||
|     previous_total_balance: u64, | ||||
|     previous_epoch_boundary_attesting_balance: u64, | ||||
|     current_epoch_boundary_attesting_balance: u64, | ||||
|     total_balances: &TotalBalances, | ||||
|     spec: &ChainSpec, | ||||
| ) { | ||||
|     let previous_epoch = state.previous_epoch(spec); | ||||
| @ -141,7 +126,8 @@ fn process_justification( | ||||
|     //
 | ||||
|     // - Set the 2nd bit of the bitfield.
 | ||||
|     // - Set the previous epoch to be justified.
 | ||||
|     if (3 * previous_epoch_boundary_attesting_balance) >= (2 * previous_total_balance) { | ||||
|     if (3 * total_balances.previous_epoch_boundary_attesters) >= (2 * total_balances.previous_epoch) | ||||
|     { | ||||
|         state.justification_bitfield |= 2; | ||||
|         new_justified_epoch = previous_epoch; | ||||
|     } | ||||
| @ -149,7 +135,7 @@ fn process_justification( | ||||
|     //
 | ||||
|     // - Set the 1st bit of the bitfield.
 | ||||
|     // - Set the current epoch to be justified.
 | ||||
|     if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { | ||||
|     if (3 * total_balances.current_epoch_boundary_attesters) >= (2 * total_balances.current_epoch) { | ||||
|         state.justification_bitfield |= 1; | ||||
|         new_justified_epoch = current_epoch; | ||||
|     } | ||||
| @ -199,9 +185,14 @@ fn process_justification( | ||||
|     state.justified_epoch = new_justified_epoch; | ||||
| } | ||||
| 
 | ||||
| pub type WinningRootHashSet = HashMap<u64, WinningRoot>; | ||||
| 
 | ||||
| fn process_crosslinks( | ||||
| /// Updates the following fields on the `BeaconState`:
 | ||||
| ///
 | ||||
| /// - `latest_crosslinks`
 | ||||
| ///
 | ||||
| /// Also returns a `WinningRootHashSet` for later use during epoch processing.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn process_crosslinks( | ||||
|     state: &mut BeaconState, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<WinningRootHashSet, Error> { | ||||
| @ -259,183 +250,148 @@ fn process_crosslinks( | ||||
|     Ok(winning_root_for_shards) | ||||
| } | ||||
| 
 | ||||
| /// Updates the following fields on the BeaconState:
 | ||||
| ///
 | ||||
| /// - `validator_balances`
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| fn process_rewards_and_penalities( | ||||
| pub fn process_rewards_and_penalities( | ||||
|     state: &mut BeaconState, | ||||
|     active_validator_indices: HashSet<usize>, | ||||
|     attesters: &AttesterSets, | ||||
|     previous_total_balance: u64, | ||||
|     statuses: &mut ValidatorStatuses, | ||||
|     winning_root_for_shards: &WinningRootHashSet, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<(), Error> { | ||||
|     let next_epoch = state.next_epoch(spec); | ||||
| 
 | ||||
|     let previous_epoch_attestations: Vec<&PendingAttestation> = state | ||||
|         .latest_attestations | ||||
|         .par_iter() | ||||
|         .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec)) | ||||
|         .collect(); | ||||
|     statuses.process_winning_roots(state, winning_root_for_shards, spec)?; | ||||
| 
 | ||||
|     let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; | ||||
|     let total_balances = &statuses.total_balances; | ||||
| 
 | ||||
|     let base_reward_quotient = | ||||
|         total_balances.previous_epoch.integer_sqrt() / spec.base_reward_quotient; | ||||
| 
 | ||||
|     // Guard against a divide-by-zero during the validator balance update.
 | ||||
|     if base_reward_quotient == 0 { | ||||
|         return Err(Error::BaseRewardQuotientIsZero); | ||||
|     } | ||||
|     // Guard against a divide-by-zero during the validator balance update.
 | ||||
|     if total_balances.previous_epoch == 0 { | ||||
|         return Err(Error::PreviousTotalBalanceIsZero); | ||||
|     } | ||||
|     // Guard against an out-of-bounds during the validator balance update.
 | ||||
|     if statuses.statuses.len() != state.validator_balances.len() { | ||||
|         return Err(Error::ValidatorStatusesInconsistent); | ||||
|     } | ||||
| 
 | ||||
|     // Justification and finalization
 | ||||
| 
 | ||||
|     let epochs_since_finality = next_epoch - state.finalized_epoch; | ||||
| 
 | ||||
|     if epochs_since_finality <= 4 { | ||||
|         for index in 0..state.validator_balances.len() { | ||||
|     state.validator_balances = state | ||||
|         .validator_balances | ||||
|         .par_iter() | ||||
|         .enumerate() | ||||
|         .map(|(index, &balance)| { | ||||
|             let mut balance = balance; | ||||
|             let status = &statuses.statuses[index]; | ||||
|             let base_reward = state.base_reward(index, base_reward_quotient, spec); | ||||
| 
 | ||||
|             // Expected FFG source
 | ||||
|             if attesters.previous_epoch.indices.contains(&index) { | ||||
|                 safe_add_assign!( | ||||
|                     state.validator_balances[index], | ||||
|                     base_reward * attesters.previous_epoch.balance / previous_total_balance | ||||
|                 ); | ||||
|             } else if active_validator_indices.contains(&index) { | ||||
|                 safe_sub_assign!(state.validator_balances[index], base_reward); | ||||
|             } | ||||
| 
 | ||||
|             // Expected FFG target
 | ||||
|             if attesters.previous_epoch_boundary.indices.contains(&index) { | ||||
|                 safe_add_assign!( | ||||
|                     state.validator_balances[index], | ||||
|                     base_reward * attesters.previous_epoch_boundary.balance | ||||
|                         / previous_total_balance | ||||
|                 ); | ||||
|             } else if active_validator_indices.contains(&index) { | ||||
|                 safe_sub_assign!(state.validator_balances[index], base_reward); | ||||
|             } | ||||
| 
 | ||||
|             // Expected beacon chain head
 | ||||
|             if attesters.previous_epoch_head.indices.contains(&index) { | ||||
|                 safe_add_assign!( | ||||
|                     state.validator_balances[index], | ||||
|                     base_reward * attesters.previous_epoch_head.balance / previous_total_balance | ||||
|                 ); | ||||
|             } else if active_validator_indices.contains(&index) { | ||||
|                 safe_sub_assign!(state.validator_balances[index], base_reward); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         // Inclusion distance
 | ||||
|         for &index in &attesters.previous_epoch.indices { | ||||
|             let base_reward = state.base_reward(index, base_reward_quotient, spec); | ||||
|             let inclusion_distance = | ||||
|                 inclusion_distance(state, &previous_epoch_attestations, index, spec)?; | ||||
| 
 | ||||
|             safe_add_assign!( | ||||
|                 state.validator_balances[index], | ||||
|                 base_reward * spec.min_attestation_inclusion_delay / inclusion_distance | ||||
|             ) | ||||
|         } | ||||
|     } else { | ||||
|         for index in 0..state.validator_balances.len() { | ||||
|             let inactivity_penalty = | ||||
|                 state.inactivity_penalty(index, epochs_since_finality, base_reward_quotient, spec); | ||||
| 
 | ||||
|             if active_validator_indices.contains(&index) { | ||||
|                 if !attesters.previous_epoch.indices.contains(&index) { | ||||
|                     safe_sub_assign!(state.validator_balances[index], inactivity_penalty); | ||||
|                 } | ||||
|                 if !attesters.previous_epoch_boundary.indices.contains(&index) { | ||||
|                     safe_sub_assign!(state.validator_balances[index], inactivity_penalty); | ||||
|                 } | ||||
|                 if !attesters.previous_epoch_head.indices.contains(&index) { | ||||
|                     safe_sub_assign!(state.validator_balances[index], inactivity_penalty); | ||||
|                 } | ||||
| 
 | ||||
|                 if state.validator_registry[index].slashed { | ||||
|                     let base_reward = state.base_reward(index, base_reward_quotient, spec); | ||||
|                     safe_sub_assign!( | ||||
|                         state.validator_balances[index], | ||||
|                         2 * inactivity_penalty + base_reward | ||||
|             if epochs_since_finality <= 4 { | ||||
|                 // Expected FFG source
 | ||||
|                 if status.is_previous_epoch_attester { | ||||
|                     safe_add_assign!( | ||||
|                         balance, | ||||
|                         base_reward * total_balances.previous_epoch_attesters | ||||
|                             / total_balances.previous_epoch | ||||
|                     ); | ||||
|                 } else if status.is_active_in_previous_epoch { | ||||
|                     safe_sub_assign!(balance, base_reward); | ||||
|                 } | ||||
| 
 | ||||
|                 // Expected FFG target
 | ||||
|                 if status.is_previous_epoch_boundary_attester { | ||||
|                     safe_add_assign!( | ||||
|                         balance, | ||||
|                         base_reward * total_balances.previous_epoch_boundary_attesters | ||||
|                             / total_balances.previous_epoch | ||||
|                     ); | ||||
|                 } else if status.is_active_in_previous_epoch { | ||||
|                     safe_sub_assign!(balance, base_reward); | ||||
|                 } | ||||
| 
 | ||||
|                 // Expected beacon chain head
 | ||||
|                 if status.is_previous_epoch_head_attester { | ||||
|                     safe_add_assign!( | ||||
|                         balance, | ||||
|                         base_reward * total_balances.previous_epoch_head_attesters | ||||
|                             / total_balances.previous_epoch | ||||
|                     ); | ||||
|                 } else if status.is_active_in_previous_epoch { | ||||
|                     safe_sub_assign!(balance, base_reward); | ||||
|                 }; | ||||
|             } else { | ||||
|                 let inactivity_penalty = state.inactivity_penalty( | ||||
|                     index, | ||||
|                     epochs_since_finality, | ||||
|                     base_reward_quotient, | ||||
|                     spec, | ||||
|                 ); | ||||
| 
 | ||||
|                 if status.is_active_in_previous_epoch { | ||||
|                     if !status.is_previous_epoch_attester { | ||||
|                         safe_sub_assign!(balance, inactivity_penalty); | ||||
|                     } | ||||
|                     if !status.is_previous_epoch_boundary_attester { | ||||
|                         safe_sub_assign!(balance, inactivity_penalty); | ||||
|                     } | ||||
|                     if !status.is_previous_epoch_head_attester { | ||||
|                         safe_sub_assign!(balance, inactivity_penalty); | ||||
|                     } | ||||
| 
 | ||||
|                     if state.validator_registry[index].slashed { | ||||
|                         let base_reward = state.base_reward(index, base_reward_quotient, spec); | ||||
|                         safe_sub_assign!(balance, 2 * inactivity_penalty + base_reward); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         for &index in &attesters.previous_epoch.indices { | ||||
|             let base_reward = state.base_reward(index, base_reward_quotient, spec); | ||||
|             let inclusion_distance = | ||||
|                 inclusion_distance(state, &previous_epoch_attestations, index, spec)?; | ||||
|             // Crosslinks
 | ||||
| 
 | ||||
|             safe_sub_assign!( | ||||
|                 state.validator_balances[index], | ||||
|                 base_reward | ||||
|                     - base_reward * spec.min_attestation_inclusion_delay / inclusion_distance | ||||
|             ); | ||||
|         } | ||||
|     } | ||||
|             if let Some(ref info) = status.winning_root_info { | ||||
|                 safe_add_assign!( | ||||
|                     balance, | ||||
|                     base_reward * info.total_attesting_balance / info.total_committee_balance | ||||
|                 ); | ||||
|             } else { | ||||
|                 safe_sub_assign!(balance, base_reward); | ||||
|             } | ||||
| 
 | ||||
|             balance | ||||
|         }) | ||||
|         .collect(); | ||||
| 
 | ||||
|     // Attestation inclusion
 | ||||
| 
 | ||||
|     for &index in &attesters.previous_epoch.indices { | ||||
|         let inclusion_slot = inclusion_slot(state, &previous_epoch_attestations[..], index, spec)?; | ||||
| 
 | ||||
|         let proposer_index = state | ||||
|             .get_beacon_proposer_index(inclusion_slot, spec) | ||||
|             .map_err(|_| Error::UnableToDetermineProducer)?; | ||||
| 
 | ||||
|         let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); | ||||
| 
 | ||||
|         safe_add_assign!( | ||||
|             state.validator_balances[proposer_index], | ||||
|             base_reward / spec.attestation_inclusion_reward_quotient | ||||
|         ); | ||||
|     // Guard against an out-of-bounds during the attester inclusion balance update.
 | ||||
|     if statuses.statuses.len() != state.validator_registry.len() { | ||||
|         return Err(Error::ValidatorStatusesInconsistent); | ||||
|     } | ||||
| 
 | ||||
|     //Crosslinks
 | ||||
|     for (index, _validator) in state.validator_registry.iter().enumerate() { | ||||
|         let status = &statuses.statuses[index]; | ||||
| 
 | ||||
|     for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { | ||||
|         // Clone removes the borrow which becomes an issue when mutating `state.balances`.
 | ||||
|         let crosslink_committees_at_slot = | ||||
|             state.get_crosslink_committees_at_slot(slot, spec)?.clone(); | ||||
|         if status.is_previous_epoch_attester { | ||||
|             let proposer_index = status.inclusion_info.proposer_index; | ||||
|             let inclusion_distance = status.inclusion_info.distance; | ||||
| 
 | ||||
|         for (crosslink_committee, shard) in crosslink_committees_at_slot { | ||||
|             let shard = shard as u64; | ||||
|             let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); | ||||
| 
 | ||||
|             // Note: I'm a little uncertain of the logic here -- I am waiting for spec v0.5.0 to
 | ||||
|             // clear it up.
 | ||||
|             //
 | ||||
|             // What happens here is:
 | ||||
|             //
 | ||||
|             // - If there was some crosslink root elected by the super-majority of this committee,
 | ||||
|             // then we reward all who voted for that root and penalize all that did not.
 | ||||
|             // - However, if there _was not_ some super-majority-voted crosslink root, then penalize
 | ||||
|             // all the validators.
 | ||||
|             //
 | ||||
|             // I'm not quite sure that the second case (no super-majority crosslink) is correct.
 | ||||
|             if let Some(winning_root) = winning_root_for_shards.get(&shard) { | ||||
|                 // Hash set de-dedups and (hopefully) offers a speed improvement from faster
 | ||||
|                 // lookups.
 | ||||
|                 let attesting_validator_indices: HashSet<usize> = | ||||
|                     HashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned()); | ||||
| 
 | ||||
|                 for &index in &crosslink_committee { | ||||
|                     let base_reward = state.base_reward(index, base_reward_quotient, spec); | ||||
| 
 | ||||
|                     let total_balance = state.get_total_balance(&crosslink_committee, spec); | ||||
| 
 | ||||
|                     if attesting_validator_indices.contains(&index) { | ||||
|                         safe_add_assign!( | ||||
|                             state.validator_balances[index], | ||||
|                             base_reward * winning_root.total_attesting_balance / total_balance | ||||
|                         ); | ||||
|                     } else { | ||||
|                         safe_sub_assign!(state.validator_balances[index], base_reward); | ||||
|                     } | ||||
|                 } | ||||
|             } else { | ||||
|                 for &index in &crosslink_committee { | ||||
|                     let base_reward = state.base_reward(index, base_reward_quotient, spec); | ||||
| 
 | ||||
|                     safe_sub_assign!(state.validator_balances[index], base_reward); | ||||
|                 } | ||||
|             if inclusion_distance > 0 && inclusion_distance < Slot::max_value() { | ||||
|                 safe_add_assign!( | ||||
|                     state.validator_balances[proposer_index], | ||||
|                     base_reward * spec.min_attestation_inclusion_delay | ||||
|                         / inclusion_distance.as_u64() | ||||
|                 ) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| @ -443,8 +399,10 @@ fn process_rewards_and_penalities( | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| // Spec v0.4.0
 | ||||
| fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { | ||||
| /// Peforms a validator registry update, if required.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { | ||||
|     let current_epoch = state.current_epoch(spec); | ||||
|     let next_epoch = state.next_epoch(spec); | ||||
| 
 | ||||
| @ -489,3 +447,51 @@ fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Resu | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| /// Updates the state's `latest_active_index_roots` field with a tree hash the active validator
 | ||||
| /// indices for the next epoch.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn update_active_tree_index_roots( | ||||
|     state: &mut BeaconState, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<(), Error> { | ||||
|     let next_epoch = state.next_epoch(spec); | ||||
| 
 | ||||
|     let active_tree_root = get_active_validator_indices( | ||||
|         &state.validator_registry, | ||||
|         next_epoch + Epoch::from(spec.activation_exit_delay), | ||||
|     ) | ||||
|     .hash_tree_root(); | ||||
| 
 | ||||
|     state.latest_active_index_roots[(next_epoch.as_usize() | ||||
|         + spec.activation_exit_delay as usize) | ||||
|         % spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]); | ||||
| 
 | ||||
|     Ok(()) | ||||
| } | ||||
| 
 | ||||
| /// Advances the state's `latest_slashed_balances` field.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) { | ||||
|     let current_epoch = state.current_epoch(spec); | ||||
|     let next_epoch = state.next_epoch(spec); | ||||
| 
 | ||||
|     state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] = | ||||
|         state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length]; | ||||
| } | ||||
| 
 | ||||
| /// Removes all pending attestations from the previous epoch.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| pub fn clean_attestations(state: &mut BeaconState, spec: &ChainSpec) { | ||||
|     let current_epoch = state.current_epoch(spec); | ||||
| 
 | ||||
|     state.latest_attestations = state | ||||
|         .latest_attestations | ||||
|         .iter() | ||||
|         .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch) | ||||
|         .cloned() | ||||
|         .collect(); | ||||
| } | ||||
|  | ||||
| @ -1,98 +0,0 @@ | ||||
| use std::collections::HashSet; | ||||
| use types::*; | ||||
| 
 | ||||
| #[derive(Default)] | ||||
| pub struct Attesters { | ||||
|     pub indices: HashSet<usize>, | ||||
|     pub balance: u64, | ||||
| } | ||||
| 
 | ||||
| impl Attesters { | ||||
|     fn add(&mut self, additional_indices: &[usize], additional_balance: u64) { | ||||
|         self.indices.reserve(additional_indices.len()); | ||||
|         for i in additional_indices { | ||||
|             self.indices.insert(*i); | ||||
|         } | ||||
|         self.balance.saturating_add(additional_balance); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| pub struct AttesterSets { | ||||
|     pub current_epoch: Attesters, | ||||
|     pub current_epoch_boundary: Attesters, | ||||
|     pub previous_epoch: Attesters, | ||||
|     pub previous_epoch_boundary: Attesters, | ||||
|     pub previous_epoch_head: Attesters, | ||||
| } | ||||
| 
 | ||||
| impl AttesterSets { | ||||
|     pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result<Self, BeaconStateError> { | ||||
|         let mut current_epoch = Attesters::default(); | ||||
|         let mut current_epoch_boundary = Attesters::default(); | ||||
|         let mut previous_epoch = Attesters::default(); | ||||
|         let mut previous_epoch_boundary = Attesters::default(); | ||||
|         let mut previous_epoch_head = Attesters::default(); | ||||
| 
 | ||||
|         for a in &state.latest_attestations { | ||||
|             let attesting_indices = | ||||
|                 state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; | ||||
|             let attesting_balance = state.get_total_balance(&attesting_indices, spec); | ||||
| 
 | ||||
|             if is_from_epoch(a, state.current_epoch(spec), spec) { | ||||
|                 current_epoch.add(&attesting_indices, attesting_balance); | ||||
| 
 | ||||
|                 if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { | ||||
|                     current_epoch_boundary.add(&attesting_indices, attesting_balance); | ||||
|                 } | ||||
|             } else if is_from_epoch(a, state.previous_epoch(spec), spec) { | ||||
|                 previous_epoch.add(&attesting_indices, attesting_balance); | ||||
| 
 | ||||
|                 if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { | ||||
|                     previous_epoch_boundary.add(&attesting_indices, attesting_balance); | ||||
|                 } | ||||
| 
 | ||||
|                 if has_common_beacon_block_root(a, state, spec)? { | ||||
|                     previous_epoch_head.add(&attesting_indices, attesting_balance); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(Self { | ||||
|             current_epoch, | ||||
|             current_epoch_boundary, | ||||
|             previous_epoch, | ||||
|             previous_epoch_boundary, | ||||
|             previous_epoch_head, | ||||
|         }) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { | ||||
|     a.data.slot.epoch(spec.slots_per_epoch) == epoch | ||||
| } | ||||
| 
 | ||||
| fn has_common_epoch_boundary_root( | ||||
|     a: &PendingAttestation, | ||||
|     state: &BeaconState, | ||||
|     epoch: Epoch, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<bool, BeaconStateError> { | ||||
|     let slot = epoch.start_slot(spec.slots_per_epoch); | ||||
|     let state_boundary_root = *state | ||||
|         .get_block_root(slot, spec) | ||||
|         .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; | ||||
| 
 | ||||
|     Ok(a.data.epoch_boundary_root == state_boundary_root) | ||||
| } | ||||
| 
 | ||||
| fn has_common_beacon_block_root( | ||||
|     a: &PendingAttestation, | ||||
|     state: &BeaconState, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<bool, BeaconStateError> { | ||||
|     let state_block_root = *state | ||||
|         .get_block_root(a.data.slot, spec) | ||||
|         .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; | ||||
| 
 | ||||
|     Ok(a.data.beacon_block_root == state_block_root) | ||||
| } | ||||
| @ -6,6 +6,14 @@ pub enum EpochProcessingError { | ||||
|     NoBlockRoots, | ||||
|     BaseRewardQuotientIsZero, | ||||
|     NoRandaoSeed, | ||||
|     PreviousTotalBalanceIsZero, | ||||
|     InclusionDistanceZero, | ||||
|     ValidatorStatusesInconsistent, | ||||
|     /// Unable to get the inclusion distance for a validator that should have an inclusion
 | ||||
|     /// distance. This indicates an internal inconsistency.
 | ||||
|     ///
 | ||||
|     /// (validator_index)
 | ||||
|     InclusionSlotsInconsistent(usize), | ||||
|     BeaconStateError(BeaconStateError), | ||||
|     InclusionError(InclusionError), | ||||
| } | ||||
|  | ||||
| @ -1,21 +1,21 @@ | ||||
| #![cfg(test)] | ||||
| use crate::per_epoch_processing; | ||||
| use env_logger::{Builder, Env}; | ||||
| use types::beacon_state::BeaconStateBuilder; | ||||
| use types::test_utils::TestingBeaconStateBuilder; | ||||
| use types::*; | ||||
| 
 | ||||
| #[test] | ||||
| fn runs_without_error() { | ||||
|     Builder::from_env(Env::default().default_filter_or("error")).init(); | ||||
| 
 | ||||
|     let mut builder = BeaconStateBuilder::new(8); | ||||
|     builder.spec = ChainSpec::few_validators(); | ||||
|     let spec = ChainSpec::few_validators(); | ||||
| 
 | ||||
|     builder.build().unwrap(); | ||||
|     builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4); | ||||
|     let mut builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); | ||||
| 
 | ||||
|     let mut state = builder.cloned_state(); | ||||
|     let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch); | ||||
|     builder.teleport_to_slot(target_slot, &spec); | ||||
| 
 | ||||
|     let spec = &builder.spec; | ||||
|     per_epoch_processing(&mut state, spec).unwrap(); | ||||
|     let (mut state, _keypairs) = builder.build(); | ||||
| 
 | ||||
|     per_epoch_processing(&mut state, &spec).unwrap(); | ||||
| } | ||||
|  | ||||
| @ -0,0 +1,319 @@ | ||||
| use super::WinningRootHashSet; | ||||
| use types::*; | ||||
| 
 | ||||
| /// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self`
 | ||||
| /// as is.
 | ||||
| macro_rules! set_self_if_other_is_true { | ||||
|     ($self_: ident, $other: ident, $var: ident) => { | ||||
|         if $other.$var { | ||||
|             $self_.$var = true; | ||||
|         } | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| /// The information required to reward some validator for their participation in a "winning"
 | ||||
| /// crosslink root.
 | ||||
| #[derive(Default, Clone)] | ||||
| pub struct WinningRootInfo { | ||||
|     /// The total balance of the crosslink committee.
 | ||||
|     pub total_committee_balance: u64, | ||||
|     /// The total balance of the crosslink committee that attested for the "winning" root.
 | ||||
|     pub total_attesting_balance: u64, | ||||
| } | ||||
| 
 | ||||
| /// The information required to reward a block producer for including an attestation in a block.
 | ||||
| #[derive(Clone)] | ||||
| pub struct InclusionInfo { | ||||
|     /// The earliest slot a validator had an attestation included in the previous epoch.
 | ||||
|     pub slot: Slot, | ||||
|     /// The distance between the attestation slot and the slot that attestation was included in a
 | ||||
|     /// block.
 | ||||
|     pub distance: Slot, | ||||
|     /// The index of the proposer at the slot where the attestation was included.
 | ||||
|     pub proposer_index: usize, | ||||
| } | ||||
| 
 | ||||
| impl Default for InclusionInfo { | ||||
|     /// Defaults to `slot` and `distance` at their maximum values and `proposer_index` at zero.
 | ||||
|     fn default() -> Self { | ||||
|         Self { | ||||
|             slot: Slot::max_value(), | ||||
|             distance: Slot::max_value(), | ||||
|             proposer_index: 0, | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl InclusionInfo { | ||||
|     /// Tests if some `other` `InclusionInfo` has a lower inclusion slot than `self`. If so,
 | ||||
|     /// replaces `self` with `other`.
 | ||||
|     pub fn update(&mut self, other: &Self) { | ||||
|         if other.slot < self.slot { | ||||
|             self.slot = other.slot; | ||||
|             self.distance = other.distance; | ||||
|             self.proposer_index = other.proposer_index; | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Information required to reward some validator during the current and previous epoch.
 | ||||
| #[derive(Default, Clone)] | ||||
| pub struct AttesterStatus { | ||||
|     /// True if the validator was active in the state's _current_ epoch.
 | ||||
|     pub is_active_in_current_epoch: bool, | ||||
|     /// True if the validator was active in the state's _previous_ epoch.
 | ||||
|     pub is_active_in_previous_epoch: bool, | ||||
| 
 | ||||
|     /// True if the validator had an attestation included in the _current_ epoch.
 | ||||
|     pub is_current_epoch_attester: bool, | ||||
|     /// True if the validator's beacon block root attestation for the first slot of the _current_
 | ||||
|     /// epoch matches the block root known to the state.
 | ||||
|     pub is_current_epoch_boundary_attester: bool, | ||||
|     /// True if the validator had an attestation included in the _previous_ epoch.
 | ||||
|     pub is_previous_epoch_attester: bool, | ||||
|     /// True if the validator's beacon block root attestation for the first slot of the _previous_
 | ||||
|     /// epoch matches the block root known to the state.
 | ||||
|     pub is_previous_epoch_boundary_attester: bool, | ||||
|     /// True if the validator's beacon block root attestation in the _previous_ epoch at the
 | ||||
|     /// attestation's slot (`attestation_data.slot`) matches the block root known to the state.
 | ||||
|     pub is_previous_epoch_head_attester: bool, | ||||
| 
 | ||||
|     /// Information used to reward the block producer of this validators earliest-included
 | ||||
|     /// attestation.
 | ||||
|     pub inclusion_info: InclusionInfo, | ||||
|     /// Information used to reward/penalize the validator if they voted in the super-majority for
 | ||||
|     /// some shard block.
 | ||||
|     pub winning_root_info: Option<WinningRootInfo>, | ||||
| } | ||||
| 
 | ||||
| impl AttesterStatus { | ||||
|     /// Accepts some `other` `AttesterStatus` and updates `self` if required.
 | ||||
|     ///
 | ||||
|     /// Will never set one of the `bool` fields to `false`, it will only set it to `true` if other
 | ||||
|     /// contains a `true` field.
 | ||||
|     ///
 | ||||
|     /// Note: does not update the winning root info, this is done manually.
 | ||||
|     pub fn update(&mut self, other: &Self) { | ||||
|         // Update all the bool fields, only updating `self` if `other` is true (never setting
 | ||||
|         // `self` to false).
 | ||||
|         set_self_if_other_is_true!(self, other, is_active_in_current_epoch); | ||||
|         set_self_if_other_is_true!(self, other, is_active_in_previous_epoch); | ||||
|         set_self_if_other_is_true!(self, other, is_current_epoch_attester); | ||||
|         set_self_if_other_is_true!(self, other, is_current_epoch_boundary_attester); | ||||
|         set_self_if_other_is_true!(self, other, is_previous_epoch_attester); | ||||
|         set_self_if_other_is_true!(self, other, is_previous_epoch_boundary_attester); | ||||
|         set_self_if_other_is_true!(self, other, is_previous_epoch_head_attester); | ||||
| 
 | ||||
|         self.inclusion_info.update(&other.inclusion_info); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// The total effective balances for different sets of validators during the previous and current
 | ||||
| /// epochs.
 | ||||
| #[derive(Default, Clone)] | ||||
| pub struct TotalBalances { | ||||
|     /// The total effective balance of all active validators during the _current_ epoch.
 | ||||
|     pub current_epoch: u64, | ||||
|     /// The total effective balance of all active validators during the _previous_ epoch.
 | ||||
|     pub previous_epoch: u64, | ||||
|     /// The total effective balance of all validators who attested during the _current_ epoch.
 | ||||
|     pub current_epoch_attesters: u64, | ||||
|     /// The total effective balance of all validators who attested during the _current_ epoch and
 | ||||
|     /// agreed with the state about the beacon block at the first slot of the _current_ epoch.
 | ||||
|     pub current_epoch_boundary_attesters: u64, | ||||
|     /// The total effective balance of all validators who attested during the _previous_ epoch.
 | ||||
|     pub previous_epoch_attesters: u64, | ||||
|     /// The total effective balance of all validators who attested during the _previous_ epoch and
 | ||||
|     /// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
 | ||||
|     pub previous_epoch_boundary_attesters: u64, | ||||
|     /// The total effective balance of all validators who attested during the _previous_ epoch and
 | ||||
|     /// agreed with the state about the beacon block at the time of attestation.
 | ||||
|     pub previous_epoch_head_attesters: u64, | ||||
| } | ||||
| 
 | ||||
| /// Summarised information about validator participation in the _previous and _current_ epochs of
 | ||||
| /// some `BeaconState`.
 | ||||
| #[derive(Clone)] | ||||
| pub struct ValidatorStatuses { | ||||
|     /// Information about each individual validator from the state's validator registy.
 | ||||
|     pub statuses: Vec<AttesterStatus>, | ||||
|     /// Summed balances for various sets of validators.
 | ||||
|     pub total_balances: TotalBalances, | ||||
| } | ||||
| 
 | ||||
| impl ValidatorStatuses { | ||||
|     /// Initializes a new instance, determining:
 | ||||
|     ///
 | ||||
|     /// - Active validators
 | ||||
|     /// - Total balances for the current and previous epochs.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn new(state: &BeaconState, spec: &ChainSpec) -> Self { | ||||
|         let mut statuses = Vec::with_capacity(state.validator_registry.len()); | ||||
|         let mut total_balances = TotalBalances::default(); | ||||
| 
 | ||||
|         for (i, validator) in state.validator_registry.iter().enumerate() { | ||||
|             let mut status = AttesterStatus::default(); | ||||
| 
 | ||||
|             if validator.is_active_at(state.current_epoch(spec)) { | ||||
|                 status.is_active_in_current_epoch = true; | ||||
|                 total_balances.current_epoch += state.get_effective_balance(i, spec); | ||||
|             } | ||||
| 
 | ||||
|             if validator.is_active_at(state.previous_epoch(spec)) { | ||||
|                 status.is_active_in_previous_epoch = true; | ||||
|                 total_balances.previous_epoch += state.get_effective_balance(i, spec); | ||||
|             } | ||||
| 
 | ||||
|             statuses.push(status); | ||||
|         } | ||||
| 
 | ||||
|         Self { | ||||
|             statuses, | ||||
|             total_balances, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Process some attestations from the given `state` updating the `statuses` and
 | ||||
|     /// `total_balances` fields.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn process_attestations( | ||||
|         &mut self, | ||||
|         state: &BeaconState, | ||||
|         attestations: &[PendingAttestation], | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<(), BeaconStateError> { | ||||
|         for a in attestations { | ||||
|             let attesting_indices = | ||||
|                 state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; | ||||
|             let attesting_balance = state.get_total_balance(&attesting_indices, spec); | ||||
| 
 | ||||
|             let mut status = AttesterStatus::default(); | ||||
| 
 | ||||
|             // Profile this attestation, updating the total balances and generating an
 | ||||
|             // `AttesterStatus` object that applies to all participants in the attestation.
 | ||||
|             if is_from_epoch(a, state.current_epoch(spec), spec) { | ||||
|                 self.total_balances.current_epoch_attesters += attesting_balance; | ||||
|                 status.is_current_epoch_attester = true; | ||||
| 
 | ||||
|                 if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { | ||||
|                     self.total_balances.current_epoch_boundary_attesters += attesting_balance; | ||||
|                     status.is_current_epoch_boundary_attester = true; | ||||
|                 } | ||||
|             } else if is_from_epoch(a, state.previous_epoch(spec), spec) { | ||||
|                 self.total_balances.previous_epoch_attesters += attesting_balance; | ||||
|                 status.is_previous_epoch_attester = true; | ||||
| 
 | ||||
|                 // The inclusion slot and distance are only required for previous epoch attesters.
 | ||||
|                 status.inclusion_info = InclusionInfo { | ||||
|                     slot: a.inclusion_slot, | ||||
|                     distance: inclusion_distance(a), | ||||
|                     proposer_index: state.get_beacon_proposer_index(a.inclusion_slot, spec)?, | ||||
|                 }; | ||||
| 
 | ||||
|                 if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { | ||||
|                     self.total_balances.previous_epoch_boundary_attesters += attesting_balance; | ||||
|                     status.is_previous_epoch_boundary_attester = true; | ||||
|                 } | ||||
| 
 | ||||
|                 if has_common_beacon_block_root(a, state, spec)? { | ||||
|                     self.total_balances.previous_epoch_head_attesters += attesting_balance; | ||||
|                     status.is_previous_epoch_head_attester = true; | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             // Loop through the participating validator indices and update the status vec.
 | ||||
|             for validator_index in attesting_indices { | ||||
|                 self.statuses[validator_index].update(&status); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Update the `statuses` for each validator based upon whether or not they attested to the
 | ||||
|     /// "winning" shard block root for the previous epoch.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn process_winning_roots( | ||||
|         &mut self, | ||||
|         state: &BeaconState, | ||||
|         winning_roots: &WinningRootHashSet, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<(), BeaconStateError> { | ||||
|         // Loop through each slot in the previous epoch.
 | ||||
|         for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { | ||||
|             let crosslink_committees_at_slot = | ||||
|                 state.get_crosslink_committees_at_slot(slot, spec)?; | ||||
| 
 | ||||
|             // Loop through each committee in the slot.
 | ||||
|             for (crosslink_committee, shard) in crosslink_committees_at_slot { | ||||
|                 // If there was some winning crosslink root for the committee's shard.
 | ||||
|                 if let Some(winning_root) = winning_roots.get(&shard) { | ||||
|                     let total_committee_balance = | ||||
|                         state.get_total_balance(&crosslink_committee, spec); | ||||
|                     for &validator_index in &winning_root.attesting_validator_indices { | ||||
|                         // Take note of the balance information for the winning root, it will be
 | ||||
|                         // used later to calculate rewards for that validator.
 | ||||
|                         self.statuses[validator_index].winning_root_info = Some(WinningRootInfo { | ||||
|                             total_committee_balance, | ||||
|                             total_attesting_balance: winning_root.total_attesting_balance, | ||||
|                         }) | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Returns the distance between when the attestation was created and when it was included in a
 | ||||
| /// block.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| fn inclusion_distance(a: &PendingAttestation) -> Slot { | ||||
|     a.inclusion_slot - a.data.slot | ||||
| } | ||||
| 
 | ||||
| /// Returns `true` if some `PendingAttestation` is from the supplied `epoch`.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { | ||||
|     a.data.slot.epoch(spec.slots_per_epoch) == epoch | ||||
| } | ||||
| 
 | ||||
| /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for
 | ||||
| /// the first slot of the given epoch.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| fn has_common_epoch_boundary_root( | ||||
|     a: &PendingAttestation, | ||||
|     state: &BeaconState, | ||||
|     epoch: Epoch, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<bool, BeaconStateError> { | ||||
|     let slot = epoch.start_slot(spec.slots_per_epoch); | ||||
|     let state_boundary_root = *state | ||||
|         .get_block_root(slot, spec) | ||||
|         .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; | ||||
| 
 | ||||
|     Ok(a.data.epoch_boundary_root == state_boundary_root) | ||||
| } | ||||
| 
 | ||||
| /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for
 | ||||
| /// the current slot of the `PendingAttestation`.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| fn has_common_beacon_block_root( | ||||
|     a: &PendingAttestation, | ||||
|     state: &BeaconState, | ||||
|     spec: &ChainSpec, | ||||
| ) -> Result<bool, BeaconStateError> { | ||||
|     let state_block_root = *state | ||||
|         .get_block_root(a.data.slot, spec) | ||||
|         .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; | ||||
| 
 | ||||
|     Ok(a.data.beacon_block_root == state_block_root) | ||||
| } | ||||
| @ -7,6 +7,7 @@ edition = "2018" | ||||
| [dependencies] | ||||
| bls = { path = "../utils/bls" } | ||||
| boolean-bitfield = { path = "../utils/boolean-bitfield" } | ||||
| dirs = "1.0" | ||||
| ethereum-types = "0.5" | ||||
| hashing = { path = "../utils/hashing" } | ||||
| honey-badger-split =  { path = "../utils/honey-badger-split" } | ||||
| @ -17,6 +18,7 @@ rand = "0.5.5" | ||||
| serde = "1.0" | ||||
| serde_derive = "1.0" | ||||
| serde_json = "1.0" | ||||
| serde_yaml = "0.8" | ||||
| slog = "^2.2.3" | ||||
| ssz = { path = "../utils/ssz" } | ||||
| ssz_derive = { path = "../utils/ssz_derive" } | ||||
|  | ||||
| @ -20,29 +20,6 @@ pub struct Attestation { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Attestation::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Attestation::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Attestation); | ||||
| } | ||||
|  | ||||
| @ -38,29 +38,6 @@ impl Eq for AttestationData {} | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = AttestationData::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = AttestationData::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(AttestationData); | ||||
| } | ||||
|  | ||||
| @ -25,31 +25,6 @@ impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit { | ||||
| #[cfg(test)] | ||||
| mod test { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
| 
 | ||||
|         let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
| 
 | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(AttestationDataAndCustodyBit); | ||||
| } | ||||
|  | ||||
| @ -4,10 +4,6 @@ use serde_derive::Serialize; | ||||
| use ssz_derive::{Decode, Encode, TreeHash}; | ||||
| use test_random_derive::TestRandom; | ||||
| 
 | ||||
| mod builder; | ||||
| 
 | ||||
| pub use builder::AttesterSlashingBuilder; | ||||
| 
 | ||||
| /// Two conflicting attestations.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| @ -20,29 +16,6 @@ pub struct AttesterSlashing { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = AttesterSlashing::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = AttesterSlashing::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(AttesterSlashing); | ||||
| } | ||||
|  | ||||
| @ -1,9 +1,9 @@ | ||||
| use crate::test_utils::TestRandom; | ||||
| use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; | ||||
| use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Proposal, Slot}; | ||||
| use bls::Signature; | ||||
| use rand::RngCore; | ||||
| use serde_derive::Serialize; | ||||
| use ssz::TreeHash; | ||||
| use ssz::{SignedRoot, TreeHash}; | ||||
| use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; | ||||
| use test_random_derive::TestRandom; | ||||
| 
 | ||||
| @ -23,6 +23,8 @@ pub struct BeaconBlock { | ||||
| 
 | ||||
| impl BeaconBlock { | ||||
|     /// Produce the first block of the Beacon Chain.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn genesis(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock { | ||||
|         BeaconBlock { | ||||
|             slot: spec.genesis_slot, | ||||
| @ -33,7 +35,6 @@ impl BeaconBlock { | ||||
|                 deposit_root: spec.zero_hash, | ||||
|                 block_hash: spec.zero_hash, | ||||
|             }, | ||||
|             signature: spec.empty_signature.clone(), | ||||
|             body: BeaconBlockBody { | ||||
|                 proposer_slashings: vec![], | ||||
|                 attester_slashings: vec![], | ||||
| @ -42,41 +43,33 @@ impl BeaconBlock { | ||||
|                 voluntary_exits: vec![], | ||||
|                 transfers: vec![], | ||||
|             }, | ||||
|             signature: spec.empty_signature.clone(), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the `hash_tree_root` of the block.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn canonical_root(&self) -> Hash256 { | ||||
|         Hash256::from_slice(&self.hash_tree_root()[..]) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns an unsigned proposal for block.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn proposal(&self, spec: &ChainSpec) -> Proposal { | ||||
|         Proposal { | ||||
|             slot: self.slot, | ||||
|             shard: spec.beacon_chain_shard_number, | ||||
|             block_root: Hash256::from_slice(&self.signed_root()), | ||||
|             signature: spec.empty_signature.clone(), | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = BeaconBlock::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = BeaconBlock::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(BeaconBlock); | ||||
| } | ||||
|  | ||||
| @ -21,29 +21,6 @@ pub struct BeaconBlockBody { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = BeaconBlockBody::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = BeaconBlockBody::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(BeaconBlockBody); | ||||
| } | ||||
|  | ||||
| @ -1,15 +1,13 @@ | ||||
| use self::epoch_cache::EpochCache; | ||||
| use crate::test_utils::TestRandom; | ||||
| use crate::{validator_registry::get_active_validator_indices, *}; | ||||
| use bls::verify_proof_of_possession; | ||||
| use helpers::*; | ||||
| use honey_badger_split::SplitExt; | ||||
| use int_to_bytes::int_to_bytes32; | ||||
| use log::{debug, error, trace}; | ||||
| use rand::RngCore; | ||||
| use rayon::prelude::*; | ||||
| use serde_derive::Serialize; | ||||
| use ssz::{hash, Decodable, DecodeError, Encodable, SszStream, TreeHash}; | ||||
| use ssz::{hash, Decodable, DecodeError, Encodable, SignedRoot, SszStream, TreeHash}; | ||||
| use std::collections::HashMap; | ||||
| use swap_or_not_shuffle::shuffle_list; | ||||
| 
 | ||||
| @ -114,18 +112,18 @@ pub struct BeaconState { | ||||
| 
 | ||||
| impl BeaconState { | ||||
|     /// Produce the first state of the Beacon Chain.
 | ||||
|     pub fn genesis_without_validators( | ||||
|         genesis_time: u64, | ||||
|         latest_eth1_data: Eth1Data, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<BeaconState, Error> { | ||||
|         debug!("Creating genesis state (without validator processing)."); | ||||
|     ///
 | ||||
|     /// This does not fully build a genesis beacon state, it omits processing of initial validator
 | ||||
|     /// deposits. To obtain a full genesis beacon state, use the `BeaconStateBuilder`.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn genesis(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> BeaconState { | ||||
|         let initial_crosslink = Crosslink { | ||||
|             epoch: spec.genesis_epoch, | ||||
|             crosslink_data_root: spec.zero_hash, | ||||
|         }; | ||||
| 
 | ||||
|         Ok(BeaconState { | ||||
|         BeaconState { | ||||
|             /* | ||||
|              * Misc | ||||
|              */ | ||||
| @ -188,52 +186,12 @@ impl BeaconState { | ||||
|              */ | ||||
|             cache_index_offset: 0, | ||||
|             caches: vec![EpochCache::empty(); CACHED_EPOCHS], | ||||
|         }) | ||||
|     } | ||||
| 
 | ||||
|     /// Produce the first state of the Beacon Chain.
 | ||||
|     pub fn genesis( | ||||
|         genesis_time: u64, | ||||
|         initial_validator_deposits: Vec<Deposit>, | ||||
|         latest_eth1_data: Eth1Data, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<BeaconState, Error> { | ||||
|         let mut genesis_state = | ||||
|             BeaconState::genesis_without_validators(genesis_time, latest_eth1_data, spec)?; | ||||
| 
 | ||||
|         debug!("Processing genesis deposits..."); | ||||
| 
 | ||||
|         let deposit_data = initial_validator_deposits | ||||
|             .par_iter() | ||||
|             .map(|deposit| &deposit.deposit_data) | ||||
|             .collect(); | ||||
| 
 | ||||
|         genesis_state.process_deposits(deposit_data, spec); | ||||
| 
 | ||||
|         trace!("Processed genesis deposits."); | ||||
| 
 | ||||
|         for validator_index in 0..genesis_state.validator_registry.len() { | ||||
|             if genesis_state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount | ||||
|             { | ||||
|                 genesis_state.activate_validator(validator_index, true, spec); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         genesis_state.deposit_index = initial_validator_deposits.len() as u64; | ||||
| 
 | ||||
|         let genesis_active_index_root = hash_tree_root(get_active_validator_indices( | ||||
|             &genesis_state.validator_registry, | ||||
|             spec.genesis_epoch, | ||||
|         )); | ||||
|         genesis_state.latest_active_index_roots = | ||||
|             vec![genesis_active_index_root; spec.latest_active_index_roots_length]; | ||||
|         genesis_state.current_shuffling_seed = | ||||
|             genesis_state.generate_seed(spec.genesis_epoch, spec)?; | ||||
| 
 | ||||
|         Ok(genesis_state) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the `hash_tree_root` of the state.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn canonical_root(&self) -> Hash256 { | ||||
|         Hash256::from_slice(&self.hash_tree_root()[..]) | ||||
|     } | ||||
| @ -541,12 +499,14 @@ impl BeaconState { | ||||
|             return Err(Error::InvalidBitfield); | ||||
|         } | ||||
| 
 | ||||
|         let mut participants = vec![]; | ||||
|         let mut participants = Vec::with_capacity(committee.len()); | ||||
|         for (i, validator_index) in committee.iter().enumerate() { | ||||
|             if bitfield.get(i).unwrap() { | ||||
|                 participants.push(*validator_index); | ||||
|             match bitfield.get(i) { | ||||
|                 Ok(bit) if bit == true => participants.push(*validator_index), | ||||
|                 _ => {} | ||||
|             } | ||||
|         } | ||||
|         participants.shrink_to_fit(); | ||||
| 
 | ||||
|         Ok(participants) | ||||
|     } | ||||
| @ -598,10 +558,8 @@ impl BeaconState { | ||||
| 
 | ||||
|         for deposit_data in deposits { | ||||
|             let result = self.process_deposit( | ||||
|                 deposit_data.deposit_input.pubkey.clone(), | ||||
|                 deposit_data.deposit_input.clone(), | ||||
|                 deposit_data.amount, | ||||
|                 deposit_data.deposit_input.proof_of_possession.clone(), | ||||
|                 deposit_data.deposit_input.withdrawal_credentials, | ||||
|                 Some(&pubkey_map), | ||||
|                 spec, | ||||
|             ); | ||||
| @ -618,24 +576,30 @@ impl BeaconState { | ||||
|     /// this hashmap, each call to `process_deposits` requires an iteration though
 | ||||
|     /// `self.validator_registry`. This becomes highly inefficient at scale.
 | ||||
|     ///
 | ||||
|     /// TODO: this function also exists in a more optimal form in the `state_processing` crate as
 | ||||
|     /// `process_deposits`; unify these two functions.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn process_deposit( | ||||
|         &mut self, | ||||
|         pubkey: PublicKey, | ||||
|         deposit_input: DepositInput, | ||||
|         amount: u64, | ||||
|         proof_of_possession: Signature, | ||||
|         withdrawal_credentials: Hash256, | ||||
|         pubkey_map: Option<&HashMap<PublicKey, usize>>, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<usize, ()> { | ||||
|         // TODO: update proof of possession to function written above (
 | ||||
|         // requires bls::create_proof_of_possession to be updated
 | ||||
|         //
 | ||||
|         // https://github.com/sigp/lighthouse/issues/239
 | ||||
|         if !verify_proof_of_possession(&proof_of_possession, &pubkey) { | ||||
|         let proof_is_valid = deposit_input.proof_of_possession.verify( | ||||
|             &deposit_input.signed_root(), | ||||
|             spec.get_domain(self.current_epoch(&spec), Domain::Deposit, &self.fork), | ||||
|             &deposit_input.pubkey, | ||||
|         ); | ||||
| 
 | ||||
|         if !proof_is_valid { | ||||
|             return Err(()); | ||||
|         } | ||||
| 
 | ||||
|         let pubkey = deposit_input.pubkey.clone(); | ||||
|         let withdrawal_credentials = deposit_input.withdrawal_credentials.clone(); | ||||
| 
 | ||||
|         let validator_index = if let Some(pubkey_map) = pubkey_map { | ||||
|             pubkey_map.get(&pubkey).and_then(|i| Some(*i)) | ||||
|         } else { | ||||
| @ -1063,33 +1027,6 @@ impl BeaconState { | ||||
|         self.validator_registry_update_epoch = current_epoch; | ||||
|     } | ||||
| 
 | ||||
|     /// Confirm validator owns PublicKey
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn validate_proof_of_possession( | ||||
|         &self, | ||||
|         pubkey: PublicKey, | ||||
|         proof_of_possession: Signature, | ||||
|         withdrawal_credentials: Hash256, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> bool { | ||||
|         let proof_of_possession_data = DepositInput { | ||||
|             pubkey: pubkey.clone(), | ||||
|             withdrawal_credentials, | ||||
|             proof_of_possession: Signature::empty_signature(), | ||||
|         }; | ||||
| 
 | ||||
|         proof_of_possession.verify( | ||||
|             &proof_of_possession_data.hash_tree_root(), | ||||
|             spec.get_domain( | ||||
|                 self.slot.epoch(spec.slots_per_epoch), | ||||
|                 Domain::Deposit, | ||||
|                 &self.fork, | ||||
|             ), | ||||
|             &pubkey, | ||||
|         ) | ||||
|     } | ||||
| 
 | ||||
|     /// Iterate through the validator registry and eject active validators with balance below
 | ||||
|     /// ``EJECTION_BALANCE``.
 | ||||
|     ///
 | ||||
| @ -1161,10 +1098,6 @@ impl BeaconState { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| fn hash_tree_root<T: TreeHash>(input: Vec<T>) -> Hash256 { | ||||
|     Hash256::from_slice(&input.hash_tree_root()[..]) | ||||
| } | ||||
| 
 | ||||
| impl Encodable for BeaconState { | ||||
|     fn ssz_append(&self, s: &mut SszStream) { | ||||
|         s.append(&self.slot); | ||||
| @ -1262,42 +1195,34 @@ impl Decodable for BeaconState { | ||||
| } | ||||
| 
 | ||||
| impl TreeHash for BeaconState { | ||||
|     fn hash_tree_root_internal(&self) -> Vec<u8> { | ||||
|     fn hash_tree_root(&self) -> Vec<u8> { | ||||
|         let mut result: Vec<u8> = vec![]; | ||||
|         result.append(&mut self.slot.hash_tree_root_internal()); | ||||
|         result.append(&mut self.genesis_time.hash_tree_root_internal()); | ||||
|         result.append(&mut self.fork.hash_tree_root_internal()); | ||||
|         result.append(&mut self.validator_registry.hash_tree_root_internal()); | ||||
|         result.append(&mut self.validator_balances.hash_tree_root_internal()); | ||||
|         result.append( | ||||
|             &mut self | ||||
|                 .validator_registry_update_epoch | ||||
|                 .hash_tree_root_internal(), | ||||
|         ); | ||||
|         result.append(&mut self.latest_randao_mixes.hash_tree_root_internal()); | ||||
|         result.append( | ||||
|             &mut self | ||||
|                 .previous_shuffling_start_shard | ||||
|                 .hash_tree_root_internal(), | ||||
|         ); | ||||
|         result.append(&mut self.current_shuffling_start_shard.hash_tree_root_internal()); | ||||
|         result.append(&mut self.previous_shuffling_epoch.hash_tree_root_internal()); | ||||
|         result.append(&mut self.current_shuffling_epoch.hash_tree_root_internal()); | ||||
|         result.append(&mut self.previous_shuffling_seed.hash_tree_root_internal()); | ||||
|         result.append(&mut self.current_shuffling_seed.hash_tree_root_internal()); | ||||
|         result.append(&mut self.previous_justified_epoch.hash_tree_root_internal()); | ||||
|         result.append(&mut self.justified_epoch.hash_tree_root_internal()); | ||||
|         result.append(&mut self.justification_bitfield.hash_tree_root_internal()); | ||||
|         result.append(&mut self.finalized_epoch.hash_tree_root_internal()); | ||||
|         result.append(&mut self.latest_crosslinks.hash_tree_root_internal()); | ||||
|         result.append(&mut self.latest_block_roots.hash_tree_root_internal()); | ||||
|         result.append(&mut self.latest_active_index_roots.hash_tree_root_internal()); | ||||
|         result.append(&mut self.latest_slashed_balances.hash_tree_root_internal()); | ||||
|         result.append(&mut self.latest_attestations.hash_tree_root_internal()); | ||||
|         result.append(&mut self.batched_block_roots.hash_tree_root_internal()); | ||||
|         result.append(&mut self.latest_eth1_data.hash_tree_root_internal()); | ||||
|         result.append(&mut self.eth1_data_votes.hash_tree_root_internal()); | ||||
|         result.append(&mut self.deposit_index.hash_tree_root_internal()); | ||||
|         result.append(&mut self.slot.hash_tree_root()); | ||||
|         result.append(&mut self.genesis_time.hash_tree_root()); | ||||
|         result.append(&mut self.fork.hash_tree_root()); | ||||
|         result.append(&mut self.validator_registry.hash_tree_root()); | ||||
|         result.append(&mut self.validator_balances.hash_tree_root()); | ||||
|         result.append(&mut self.validator_registry_update_epoch.hash_tree_root()); | ||||
|         result.append(&mut self.latest_randao_mixes.hash_tree_root()); | ||||
|         result.append(&mut self.previous_shuffling_start_shard.hash_tree_root()); | ||||
|         result.append(&mut self.current_shuffling_start_shard.hash_tree_root()); | ||||
|         result.append(&mut self.previous_shuffling_epoch.hash_tree_root()); | ||||
|         result.append(&mut self.current_shuffling_epoch.hash_tree_root()); | ||||
|         result.append(&mut self.previous_shuffling_seed.hash_tree_root()); | ||||
|         result.append(&mut self.current_shuffling_seed.hash_tree_root()); | ||||
|         result.append(&mut self.previous_justified_epoch.hash_tree_root()); | ||||
|         result.append(&mut self.justified_epoch.hash_tree_root()); | ||||
|         result.append(&mut self.justification_bitfield.hash_tree_root()); | ||||
|         result.append(&mut self.finalized_epoch.hash_tree_root()); | ||||
|         result.append(&mut self.latest_crosslinks.hash_tree_root()); | ||||
|         result.append(&mut self.latest_block_roots.hash_tree_root()); | ||||
|         result.append(&mut self.latest_active_index_roots.hash_tree_root()); | ||||
|         result.append(&mut self.latest_slashed_balances.hash_tree_root()); | ||||
|         result.append(&mut self.latest_attestations.hash_tree_root()); | ||||
|         result.append(&mut self.batched_block_roots.hash_tree_root()); | ||||
|         result.append(&mut self.latest_eth1_data.hash_tree_root()); | ||||
|         result.append(&mut self.eth1_data_votes.hash_tree_root()); | ||||
|         result.append(&mut self.deposit_index.hash_tree_root()); | ||||
|         hash(&result) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,263 +1,99 @@ | ||||
| use super::BeaconStateError; | ||||
| use crate::validator_registry::get_active_validator_indices; | ||||
| use crate::*; | ||||
| use bls::create_proof_of_possession; | ||||
| use rayon::prelude::*; | ||||
| use ssz::TreeHash; | ||||
| 
 | ||||
| /// Builds a `BeaconState` for use in testing or benchmarking.
 | ||||
| /// Builds a `BeaconState` for use in production.
 | ||||
| ///
 | ||||
| /// Building the `BeaconState` is a three step processes:
 | ||||
| /// This struct should _not_ be modified for use in testing scenarios. Use `TestingBeaconStateBuilder` for that purpose.
 | ||||
| ///
 | ||||
| /// 1. Create a new `BeaconStateBuilder`.
 | ||||
| /// 2. Call `Self::build()` or `Self::build_fast()` generate a  `BeaconState`.
 | ||||
| /// 3. (Optional) Use builder functions to modify the `BeaconState`.
 | ||||
| /// 4. Call `Self::cloned_state()` to obtain a `BeaconState` cloned from this struct.
 | ||||
| ///
 | ||||
| /// Step (2) happens prior to step (3) because some functionality requires an existing
 | ||||
| /// `BeaconState`.
 | ||||
| ///
 | ||||
| /// Step (4) produces a clone of the BeaconState and doesn't consume the `BeaconStateBuilder` to
 | ||||
| /// allow access to `self.keypairs` and `self.spec`.
 | ||||
| /// This struct should remain safe and sensible for production usage.
 | ||||
| pub struct BeaconStateBuilder { | ||||
|     pub validator_count: usize, | ||||
|     pub state: Option<BeaconState>, | ||||
|     pub genesis_time: u64, | ||||
|     pub latest_eth1_data: Eth1Data, | ||||
|     pub spec: ChainSpec, | ||||
|     pub keypairs: Vec<Keypair>, | ||||
|     pub state: BeaconState, | ||||
| } | ||||
| 
 | ||||
| impl BeaconStateBuilder { | ||||
|     /// Create a new builder with the given number of validators.
 | ||||
|     pub fn new(validator_count: usize) -> Self { | ||||
|         let genesis_time = 10_000_000; | ||||
| 
 | ||||
|         let latest_eth1_data = Eth1Data { | ||||
|             deposit_root: Hash256::zero(), | ||||
|             block_hash: Hash256::zero(), | ||||
|         }; | ||||
| 
 | ||||
|         let spec = ChainSpec::foundation(); | ||||
| 
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn new(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> Self { | ||||
|         Self { | ||||
|             validator_count, | ||||
|             state: None, | ||||
|             genesis_time, | ||||
|             latest_eth1_data, | ||||
|             spec, | ||||
|             keypairs: vec![], | ||||
|             state: BeaconState::genesis(genesis_time, latest_eth1_data, spec), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Builds a `BeaconState` using the `BeaconState::genesis(..)` function.
 | ||||
|     /// Process deposit objects.
 | ||||
|     ///
 | ||||
|     /// Each validator is assigned a unique, randomly-generated keypair and all
 | ||||
|     /// proof-of-possessions are verified during genesis.
 | ||||
|     pub fn build(&mut self) -> Result<(), BeaconStateError> { | ||||
|         self.keypairs = (0..self.validator_count) | ||||
|             .collect::<Vec<usize>>() | ||||
|             .iter() | ||||
|             .map(|_| Keypair::random()) | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn process_initial_deposits( | ||||
|         &mut self, | ||||
|         initial_validator_deposits: &[Deposit], | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         let deposit_data = initial_validator_deposits | ||||
|             .par_iter() | ||||
|             .map(|deposit| &deposit.deposit_data) | ||||
|             .collect(); | ||||
| 
 | ||||
|         let initial_validator_deposits = self | ||||
|             .keypairs | ||||
|             .iter() | ||||
|             .map(|keypair| Deposit { | ||||
|                 branch: vec![], // branch verification is not specified.
 | ||||
|                 index: 0,       // index verification is not specified.
 | ||||
|                 deposit_data: DepositData { | ||||
|                     amount: 32_000_000_000, // 32 ETH (in Gwei)
 | ||||
|                     timestamp: self.genesis_time - 1, | ||||
|                     deposit_input: DepositInput { | ||||
|                         pubkey: keypair.pk.clone(), | ||||
|                         withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
 | ||||
|                         proof_of_possession: create_proof_of_possession(&keypair), | ||||
|                     }, | ||||
|                 }, | ||||
|             }) | ||||
|             .collect(); | ||||
|         self.state.process_deposits(deposit_data, spec); | ||||
| 
 | ||||
|         let state = BeaconState::genesis( | ||||
|             self.genesis_time, | ||||
|             initial_validator_deposits, | ||||
|             self.latest_eth1_data.clone(), | ||||
|             &self.spec, | ||||
|         )?; | ||||
|         self.activate_genesis_validators(spec); | ||||
| 
 | ||||
|         self.state = Some(state); | ||||
| 
 | ||||
|         Ok(()) | ||||
|         self.state.deposit_index = initial_validator_deposits.len() as u64; | ||||
|     } | ||||
| 
 | ||||
|     /// Builds a `BeaconState` using the `BeaconState::genesis(..)` function, without supplying any
 | ||||
|     /// validators. Instead validators are added to the state post-genesis.
 | ||||
|     ///
 | ||||
|     /// One keypair is randomly generated and all validators are assigned this same keypair.
 | ||||
|     /// Proof-of-possessions are not created (or validated).
 | ||||
|     ///
 | ||||
|     /// This function runs orders of magnitude faster than `Self::build()`, however it will be
 | ||||
|     /// erroneous for functions which use a validators public key as an identifier (e.g.,
 | ||||
|     /// deposits).
 | ||||
|     pub fn build_fast(&mut self) -> Result<(), BeaconStateError> { | ||||
|         let common_keypair = Keypair::random(); | ||||
| 
 | ||||
|         let mut validator_registry = Vec::with_capacity(self.validator_count); | ||||
|         let mut validator_balances = Vec::with_capacity(self.validator_count); | ||||
|         self.keypairs = Vec::with_capacity(self.validator_count); | ||||
| 
 | ||||
|         for _ in 0..self.validator_count { | ||||
|             self.keypairs.push(common_keypair.clone()); | ||||
|             validator_balances.push(32_000_000_000); | ||||
|             validator_registry.push(Validator { | ||||
|                 pubkey: common_keypair.pk.clone(), | ||||
|                 withdrawal_credentials: Hash256::zero(), | ||||
|                 activation_epoch: self.spec.genesis_epoch, | ||||
|                 ..Validator::default() | ||||
|             }) | ||||
|         } | ||||
| 
 | ||||
|         let state = BeaconState { | ||||
|             validator_registry, | ||||
|             validator_balances, | ||||
|             ..BeaconState::genesis( | ||||
|                 self.genesis_time, | ||||
|                 vec![], | ||||
|                 self.latest_eth1_data.clone(), | ||||
|                 &self.spec, | ||||
|             )? | ||||
|         }; | ||||
| 
 | ||||
|         self.state = Some(state); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Sets the `BeaconState` to be in the last slot of the given epoch.
 | ||||
|     ///
 | ||||
|     /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e.,
 | ||||
|     /// highest justified and finalized slots, full justification bitfield, etc).
 | ||||
|     pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch) { | ||||
|         let state = self.state.as_mut().expect("Genesis required"); | ||||
| 
 | ||||
|         let slot = epoch.end_slot(self.spec.slots_per_epoch); | ||||
| 
 | ||||
|         state.slot = slot; | ||||
|         state.validator_registry_update_epoch = epoch - 1; | ||||
| 
 | ||||
|         state.previous_shuffling_epoch = epoch - 1; | ||||
|         state.current_shuffling_epoch = epoch; | ||||
| 
 | ||||
|         state.previous_shuffling_seed = Hash256::from_low_u64_le(0); | ||||
|         state.current_shuffling_seed = Hash256::from_low_u64_le(1); | ||||
| 
 | ||||
|         state.previous_justified_epoch = epoch - 2; | ||||
|         state.justified_epoch = epoch - 1; | ||||
|         state.justification_bitfield = u64::max_value(); | ||||
|         state.finalized_epoch = epoch - 1; | ||||
|     } | ||||
| 
 | ||||
|     /// Creates a full set of attestations for the `BeaconState`. Each attestation has full
 | ||||
|     /// participation from its committee and references the expected beacon_block hashes.
 | ||||
|     ///
 | ||||
|     /// These attestations should be fully conducive to justification and finalization.
 | ||||
|     pub fn insert_attestations(&mut self) { | ||||
|         let state = self.state.as_mut().expect("Genesis required"); | ||||
| 
 | ||||
|         state | ||||
|             .build_epoch_cache(RelativeEpoch::Previous, &self.spec) | ||||
|             .unwrap(); | ||||
|         state | ||||
|             .build_epoch_cache(RelativeEpoch::Current, &self.spec) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let current_epoch = state.current_epoch(&self.spec); | ||||
|         let previous_epoch = state.previous_epoch(&self.spec); | ||||
|         let current_epoch_depth = | ||||
|             (state.slot - current_epoch.end_slot(self.spec.slots_per_epoch)).as_usize(); | ||||
| 
 | ||||
|         let previous_epoch_slots = previous_epoch.slot_iter(self.spec.slots_per_epoch); | ||||
|         let current_epoch_slots = current_epoch | ||||
|             .slot_iter(self.spec.slots_per_epoch) | ||||
|             .take(current_epoch_depth); | ||||
| 
 | ||||
|         for slot in previous_epoch_slots.chain(current_epoch_slots) { | ||||
|             let committees = state | ||||
|                 .get_crosslink_committees_at_slot(slot, &self.spec) | ||||
|                 .unwrap() | ||||
|                 .clone(); | ||||
| 
 | ||||
|             for (committee, shard) in committees { | ||||
|                 state | ||||
|                     .latest_attestations | ||||
|                     .push(committee_to_pending_attestation( | ||||
|                         state, &committee, shard, slot, &self.spec, | ||||
|                     )) | ||||
|     fn activate_genesis_validators(&mut self, spec: &ChainSpec) { | ||||
|         for validator_index in 0..self.state.validator_registry.len() { | ||||
|             if self.state.get_effective_balance(validator_index, spec) >= spec.max_deposit_amount { | ||||
|                 self.state.activate_validator(validator_index, true, spec); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Returns a cloned `BeaconState`.
 | ||||
|     pub fn cloned_state(&self) -> BeaconState { | ||||
|         self.state.as_ref().expect("Genesis required").clone() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Builds a valid PendingAttestation with full participation for some committee.
 | ||||
| fn committee_to_pending_attestation( | ||||
|     state: &BeaconState, | ||||
|     committee: &[usize], | ||||
|     shard: u64, | ||||
|     slot: Slot, | ||||
|     spec: &ChainSpec, | ||||
| ) -> PendingAttestation { | ||||
|     let current_epoch = state.current_epoch(spec); | ||||
|     let previous_epoch = state.previous_epoch(spec); | ||||
| 
 | ||||
|     let mut aggregation_bitfield = Bitfield::new(); | ||||
|     let mut custody_bitfield = Bitfield::new(); | ||||
| 
 | ||||
|     for (i, _) in committee.iter().enumerate() { | ||||
|         aggregation_bitfield.set(i, true); | ||||
|         custody_bitfield.set(i, true); | ||||
|     } | ||||
| 
 | ||||
|     let is_previous_epoch = | ||||
|         state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); | ||||
| 
 | ||||
|     let justified_epoch = if is_previous_epoch { | ||||
|         state.previous_justified_epoch | ||||
|     } else { | ||||
|         state.justified_epoch | ||||
|     }; | ||||
| 
 | ||||
|     let epoch_boundary_root = if is_previous_epoch { | ||||
|         *state | ||||
|             .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|             .unwrap() | ||||
|     } else { | ||||
|         *state | ||||
|             .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|             .unwrap() | ||||
|     }; | ||||
| 
 | ||||
|     let justified_block_root = *state | ||||
|         .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), &spec) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     PendingAttestation { | ||||
|         aggregation_bitfield, | ||||
|         data: AttestationData { | ||||
|             slot, | ||||
|             shard, | ||||
|             beacon_block_root: *state.get_block_root(slot, spec).unwrap(), | ||||
|             epoch_boundary_root, | ||||
|             crosslink_data_root: Hash256::zero(), | ||||
|             latest_crosslink: Crosslink { | ||||
|                 epoch: slot.epoch(spec.slots_per_epoch), | ||||
|                 crosslink_data_root: Hash256::zero(), | ||||
|             }, | ||||
|             justified_epoch, | ||||
|             justified_block_root, | ||||
|         }, | ||||
|         custody_bitfield, | ||||
|         inclusion_slot: slot, | ||||
|     /// Instantiate the validator registry from a YAML file.
 | ||||
|     ///
 | ||||
|     /// This skips a lot of signing and verification, useful if signing and verification has been
 | ||||
|     /// completed previously.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn import_existing_validators( | ||||
|         &mut self, | ||||
|         validators: Vec<Validator>, | ||||
|         initial_balances: Vec<u64>, | ||||
|         deposit_index: u64, | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         self.state.validator_registry = validators; | ||||
| 
 | ||||
|         assert_eq!( | ||||
|             self.state.validator_registry.len(), | ||||
|             initial_balances.len(), | ||||
|             "Not enough balances for validators" | ||||
|         ); | ||||
| 
 | ||||
|         self.state.validator_balances = initial_balances; | ||||
| 
 | ||||
|         self.activate_genesis_validators(spec); | ||||
| 
 | ||||
|         self.state.deposit_index = deposit_index; | ||||
|     } | ||||
| 
 | ||||
|     /// Updates the final state variables and returns a fully built genesis state.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn build(mut self, spec: &ChainSpec) -> Result<BeaconState, BeaconStateError> { | ||||
|         let genesis_active_index_root = | ||||
|             get_active_validator_indices(&self.state.validator_registry, spec.genesis_epoch) | ||||
|                 .hash_tree_root(); | ||||
| 
 | ||||
|         self.state.latest_active_index_roots = vec![ | ||||
|             Hash256::from_slice(&genesis_active_index_root); | ||||
|             spec.latest_active_index_roots_length | ||||
|         ]; | ||||
| 
 | ||||
|         self.state.current_shuffling_seed = self.state.generate_seed(spec.genesis_epoch, spec)?; | ||||
| 
 | ||||
|         Ok(self.state) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -11,7 +11,7 @@ pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> boo | ||||
|     } | ||||
| 
 | ||||
|     for i in committee_size..(bitfield.num_bytes() * 8) { | ||||
|         if bitfield.get(i).expect("Impossible due to previous check.") { | ||||
|         if bitfield.get(i).unwrap_or(false) { | ||||
|             return false; | ||||
|         } | ||||
|     } | ||||
|  | ||||
| @ -1,15 +1,9 @@ | ||||
| #![cfg(test)] | ||||
| 
 | ||||
| use super::*; | ||||
| use crate::test_utils::TestingBeaconStateBuilder; | ||||
| use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
| use crate::{BeaconState, ChainSpec}; | ||||
| use ssz::{ssz_encode, Decodable}; | ||||
| 
 | ||||
| #[test] | ||||
| pub fn can_produce_genesis_block() { | ||||
|     let mut builder = BeaconStateBuilder::new(2); | ||||
|     builder.build().unwrap(); | ||||
| } | ||||
| 
 | ||||
| /// Tests that `get_attestation_participants` is consistent with the result of
 | ||||
| /// get_crosslink_committees_at_slot` with a full bitfield.
 | ||||
| @ -17,13 +11,9 @@ pub fn can_produce_genesis_block() { | ||||
| pub fn get_attestation_participants_consistency() { | ||||
|     let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
| 
 | ||||
|     let mut builder = BeaconStateBuilder::new(8); | ||||
|     builder.spec = ChainSpec::few_validators(); | ||||
| 
 | ||||
|     builder.build().unwrap(); | ||||
| 
 | ||||
|     let mut state = builder.cloned_state(); | ||||
|     let spec = builder.spec.clone(); | ||||
|     let spec = ChainSpec::few_validators(); | ||||
|     let builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec); | ||||
|     let (mut state, _keypairs) = builder.build(); | ||||
| 
 | ||||
|     state | ||||
|         .build_epoch_cache(RelativeEpoch::Previous, &spec) | ||||
| @ -60,25 +50,4 @@ pub fn get_attestation_participants_consistency() { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| pub fn test_ssz_round_trip() { | ||||
|     let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|     let original = BeaconState::random_for_test(&mut rng); | ||||
| 
 | ||||
|     let bytes = ssz_encode(&original); | ||||
|     let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|     assert_eq!(original, decoded); | ||||
| } | ||||
| 
 | ||||
| #[test] | ||||
| pub fn test_hash_tree_root_internal() { | ||||
|     let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|     let original = BeaconState::random_for_test(&mut rng); | ||||
| 
 | ||||
|     let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|     assert_eq!(result.len(), 32); | ||||
|     // TODO: Add further tests
 | ||||
|     // https://github.com/sigp/lighthouse/issues/170
 | ||||
| } | ||||
| ssz_tests!(BeaconState); | ||||
|  | ||||
| @ -19,29 +19,6 @@ pub struct Crosslink { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Crosslink::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Crosslink::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Crosslink); | ||||
| } | ||||
|  | ||||
| @ -1,14 +1,14 @@ | ||||
| use super::{DepositData, Hash256}; | ||||
| use crate::test_utils::TestRandom; | ||||
| use rand::RngCore; | ||||
| use serde_derive::Serialize; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| use ssz_derive::{Decode, Encode, TreeHash}; | ||||
| use test_random_derive::TestRandom; | ||||
| 
 | ||||
| /// A deposit to potentially become a beacon chain validator.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] | ||||
| #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] | ||||
| pub struct Deposit { | ||||
|     pub branch: Vec<Hash256>, | ||||
|     pub index: u64, | ||||
| @ -18,29 +18,6 @@ pub struct Deposit { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Deposit::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Deposit::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Deposit); | ||||
| } | ||||
|  | ||||
| @ -1,14 +1,14 @@ | ||||
| use super::DepositInput; | ||||
| use crate::test_utils::TestRandom; | ||||
| use rand::RngCore; | ||||
| use serde_derive::Serialize; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| use ssz_derive::{Decode, Encode, TreeHash}; | ||||
| use test_random_derive::TestRandom; | ||||
| 
 | ||||
| /// Data generated by the deposit contract.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] | ||||
| #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] | ||||
| pub struct DepositData { | ||||
|     pub amount: u64, | ||||
|     pub timestamp: u64, | ||||
| @ -18,29 +18,6 @@ pub struct DepositData { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = DepositData::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = DepositData::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(DepositData); | ||||
| } | ||||
|  | ||||
| @ -1,47 +1,71 @@ | ||||
| use super::Hash256; | ||||
| use crate::test_utils::TestRandom; | ||||
| use bls::{PublicKey, Signature}; | ||||
| use crate::*; | ||||
| use bls::{Keypair, PublicKey, Signature}; | ||||
| use rand::RngCore; | ||||
| use serde_derive::Serialize; | ||||
| use ssz_derive::{Decode, Encode, TreeHash}; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| use ssz::{SignedRoot, TreeHash}; | ||||
| use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; | ||||
| use test_random_derive::TestRandom; | ||||
| 
 | ||||
| /// The data supplied by the user to the deposit contract.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| #[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] | ||||
| #[derive(
 | ||||
|     Debug, | ||||
|     PartialEq, | ||||
|     Clone, | ||||
|     Serialize, | ||||
|     Deserialize, | ||||
|     Encode, | ||||
|     Decode, | ||||
|     SignedRoot, | ||||
|     TreeHash, | ||||
|     TestRandom, | ||||
| )] | ||||
| pub struct DepositInput { | ||||
|     pub pubkey: PublicKey, | ||||
|     pub withdrawal_credentials: Hash256, | ||||
|     pub proof_of_possession: Signature, | ||||
| } | ||||
| 
 | ||||
| impl DepositInput { | ||||
|     /// Generate the 'proof_of_posession' signature for a given DepositInput details.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn create_proof_of_possession( | ||||
|         keypair: &Keypair, | ||||
|         withdrawal_credentials: &Hash256, | ||||
|         domain: u64, | ||||
|     ) -> Signature { | ||||
|         let signable_deposit_input = DepositInput { | ||||
|             pubkey: keypair.pk.clone(), | ||||
|             withdrawal_credentials: withdrawal_credentials.clone(), | ||||
|             proof_of_possession: Signature::empty_signature(), | ||||
|         }; | ||||
|         let msg = signable_deposit_input.signed_root(); | ||||
| 
 | ||||
|         Signature::new(msg.as_slice(), domain, &keypair.sk) | ||||
|     } | ||||
| 
 | ||||
|     /// Verify that proof-of-possession is valid.
 | ||||
|     ///
 | ||||
|     /// Spec v0.4.0
 | ||||
|     pub fn validate_proof_of_possession( | ||||
|         &self, | ||||
|         epoch: Epoch, | ||||
|         fork: &Fork, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> bool { | ||||
|         let msg = self.signed_root(); | ||||
|         let domain = spec.get_domain(epoch, Domain::Deposit, fork); | ||||
| 
 | ||||
|         self.proof_of_possession.verify(&msg, domain, &self.pubkey) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = DepositInput::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = DepositInput::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(DepositInput); | ||||
| } | ||||
|  | ||||
| @ -17,29 +17,6 @@ pub struct Eth1Data { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Eth1Data::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Eth1Data::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Eth1Data); | ||||
| } | ||||
|  | ||||
| @ -17,29 +17,6 @@ pub struct Eth1DataVote { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Eth1DataVote::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Eth1DataVote::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Eth1DataVote); | ||||
| } | ||||
|  | ||||
| @ -29,29 +29,6 @@ impl Fork { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Fork::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Fork::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Fork); | ||||
| } | ||||
|  | ||||
| @ -1,3 +1,4 @@ | ||||
| #[macro_use] | ||||
| pub mod test_utils; | ||||
| 
 | ||||
| pub mod attestation; | ||||
| @ -72,4 +73,4 @@ pub type AttesterMap = HashMap<(u64, u64), Vec<usize>>; | ||||
| /// Maps a slot to a block proposer.
 | ||||
| pub type ProposerMap = HashMap<u64, usize>; | ||||
| 
 | ||||
| pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; | ||||
| pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature}; | ||||
|  | ||||
| @ -19,29 +19,6 @@ pub struct PendingAttestation { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = PendingAttestation::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = PendingAttestation::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(PendingAttestation); | ||||
| } | ||||
|  | ||||
| @ -23,30 +23,7 @@ pub struct Proposal { | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, SignedRoot, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Proposal::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Proposal::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     use ssz::{SignedRoot, TreeHash}; | ||||
| 
 | ||||
|     #[derive(TreeHash)] | ||||
|     struct SignedProposal { | ||||
| @ -75,4 +52,5 @@ mod tests { | ||||
|         assert_eq!(original.signed_root(), other.hash_tree_root()); | ||||
|     } | ||||
| 
 | ||||
|     ssz_tests!(Proposal); | ||||
| } | ||||
|  | ||||
| @ -5,10 +5,6 @@ use serde_derive::Serialize; | ||||
| use ssz_derive::{Decode, Encode, TreeHash}; | ||||
| use test_random_derive::TestRandom; | ||||
| 
 | ||||
| mod builder; | ||||
| 
 | ||||
| pub use builder::ProposerSlashingBuilder; | ||||
| 
 | ||||
| /// Two conflicting proposals from the same proposer (validator).
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| @ -22,29 +18,6 @@ pub struct ProposerSlashing { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = ProposerSlashing::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = ProposerSlashing::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(ProposerSlashing); | ||||
| } | ||||
|  | ||||
| @ -14,29 +14,6 @@ pub struct ShardReassignmentRecord { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = ShardReassignmentRecord::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = ShardReassignmentRecord::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(ShardReassignmentRecord); | ||||
| } | ||||
|  | ||||
| @ -46,7 +46,6 @@ mod tests { | ||||
|     use crate::chain_spec::ChainSpec; | ||||
|     use crate::slot_epoch::{Epoch, Slot}; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_is_double_vote_true() { | ||||
| @ -120,28 +119,7 @@ mod tests { | ||||
|         ); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = SlashableAttestation::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = SlashableAttestation::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(SlashableAttestation); | ||||
| 
 | ||||
|     fn create_slashable_attestation( | ||||
|         slot_factor: u64, | ||||
|  | ||||
| @ -12,7 +12,7 @@ use crate::slot_height::SlotHeight; | ||||
| /// may lead to programming errors which are not detected by the compiler.
 | ||||
| use crate::test_utils::TestRandom; | ||||
| use rand::RngCore; | ||||
| use serde_derive::Serialize; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| use slog; | ||||
| use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; | ||||
| use std::cmp::{Ord, Ordering}; | ||||
| @ -21,10 +21,10 @@ use std::hash::{Hash, Hasher}; | ||||
| use std::iter::Iterator; | ||||
| use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; | ||||
| 
 | ||||
| #[derive(Eq, Debug, Clone, Copy, Default, Serialize)] | ||||
| #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] | ||||
| pub struct Slot(u64); | ||||
| 
 | ||||
| #[derive(Eq, Debug, Clone, Copy, Default, Serialize)] | ||||
| #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)] | ||||
| pub struct Epoch(u64); | ||||
| 
 | ||||
| impl_common!(Slot); | ||||
| @ -103,8 +103,6 @@ impl<'a> Iterator for SlotIter<'a> { | ||||
| #[cfg(test)] | ||||
| mod slot_tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::ssz_encode; | ||||
| 
 | ||||
|     all_tests!(Slot); | ||||
| } | ||||
| @ -112,8 +110,6 @@ mod slot_tests { | ||||
| #[cfg(test)] | ||||
| mod epoch_tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::ssz_encode; | ||||
| 
 | ||||
|     all_tests!(Epoch); | ||||
| 
 | ||||
|  | ||||
| @ -207,9 +207,9 @@ macro_rules! impl_ssz { | ||||
|         } | ||||
| 
 | ||||
|         impl TreeHash for $type { | ||||
|             fn hash_tree_root_internal(&self) -> Vec<u8> { | ||||
|             fn hash_tree_root(&self) -> Vec<u8> { | ||||
|                 let mut result: Vec<u8> = vec![]; | ||||
|                 result.append(&mut self.0.hash_tree_root_internal()); | ||||
|                 result.append(&mut self.0.hash_tree_root()); | ||||
|                 hash(&result) | ||||
|             } | ||||
|         } | ||||
| @ -248,7 +248,7 @@ macro_rules! impl_common { | ||||
| } | ||||
| 
 | ||||
| // test macros
 | ||||
| #[allow(unused_macros)] | ||||
| #[cfg(test)] | ||||
| macro_rules! new_tests { | ||||
|     ($type: ident) => { | ||||
|         #[test] | ||||
| @ -260,7 +260,7 @@ macro_rules! new_tests { | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| #[allow(unused_macros)] | ||||
| #[cfg(test)] | ||||
| macro_rules! from_into_tests { | ||||
|     ($type: ident, $other: ident) => { | ||||
|         #[test] | ||||
| @ -286,7 +286,7 @@ macro_rules! from_into_tests { | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| #[allow(unused_macros)] | ||||
| #[cfg(test)] | ||||
| macro_rules! math_between_tests { | ||||
|     ($type: ident, $other: ident) => { | ||||
|         #[test] | ||||
| @ -434,7 +434,7 @@ macro_rules! math_between_tests { | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| #[allow(unused_macros)] | ||||
| #[cfg(test)] | ||||
| macro_rules! math_tests { | ||||
|     ($type: ident) => { | ||||
|         #[test] | ||||
| @ -528,35 +528,7 @@ macro_rules! math_tests { | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| #[allow(unused_macros)] | ||||
| macro_rules! ssz_tests { | ||||
|     ($type: ident) => { | ||||
|         #[test] | ||||
|         pub fn test_ssz_round_trip() { | ||||
|             let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|             let original = $type::random_for_test(&mut rng); | ||||
| 
 | ||||
|             let bytes = ssz_encode(&original); | ||||
|             let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|             assert_eq!(original, decoded); | ||||
|         } | ||||
| 
 | ||||
|         #[test] | ||||
|         pub fn test_hash_tree_root_internal() { | ||||
|             let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|             let original = $type::random_for_test(&mut rng); | ||||
| 
 | ||||
|             let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|             assert_eq!(result.len(), 32); | ||||
|             // TODO: Add further tests
 | ||||
|             // https://github.com/sigp/lighthouse/issues/170
 | ||||
|         } | ||||
|     }; | ||||
| } | ||||
| 
 | ||||
| #[allow(unused_macros)] | ||||
| #[cfg(test)] | ||||
| macro_rules! all_tests { | ||||
|     ($type: ident) => { | ||||
|         new_tests!($type); | ||||
|  | ||||
| @ -33,11 +33,8 @@ impl SlotHeight { | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| 
 | ||||
| mod slot_height_tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::ssz_encode; | ||||
| 
 | ||||
|     all_tests!(SlotHeight); | ||||
| } | ||||
|  | ||||
							
								
								
									
										30
									
								
								eth2/types/src/test_utils/generate_deterministic_keypairs.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								eth2/types/src/test_utils/generate_deterministic_keypairs.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,30 @@ | ||||
| use crate::*; | ||||
| use int_to_bytes::int_to_bytes48; | ||||
| use log::debug; | ||||
| use rayon::prelude::*; | ||||
| 
 | ||||
| /// Generates `validator_count` keypairs where the secret key is the index of the
 | ||||
| /// validator.
 | ||||
| ///
 | ||||
| /// For example, the first validator has a secret key of `int_to_bytes48(1)`, the second has
 | ||||
| /// `int_to_bytes48(2)` and so on. (We skip `0` as it generates a weird looking public key and is
 | ||||
| /// probably invalid).
 | ||||
| pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec<Keypair> { | ||||
|     debug!( | ||||
|         "Generating {} deterministic validator keypairs...", | ||||
|         validator_count | ||||
|     ); | ||||
| 
 | ||||
|     let keypairs: Vec<Keypair> = (0..validator_count) | ||||
|         .collect::<Vec<usize>>() | ||||
|         .par_iter() | ||||
|         .map(|&i| { | ||||
|             let secret = int_to_bytes48(i as u64 + 1); | ||||
|             let sk = SecretKey::from_bytes(&secret).unwrap(); | ||||
|             let pk = PublicKey::from_secret_key(&sk); | ||||
|             Keypair { sk, pk } | ||||
|         }) | ||||
|         .collect(); | ||||
| 
 | ||||
|     keypairs | ||||
| } | ||||
							
								
								
									
										128
									
								
								eth2/types/src/test_utils/keypairs_file.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								eth2/types/src/test_utils/keypairs_file.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,128 @@ | ||||
| use crate::*; | ||||
| use rayon::prelude::*; | ||||
| use std::fs::File; | ||||
| use std::io::{Error, ErrorKind, Read, Write}; | ||||
| use std::path::Path; | ||||
| 
 | ||||
| pub const PUBLIC_KEY_BYTES_LEN: usize = 96; | ||||
| pub const SECRET_KEY_BYTES_LEN: usize = 48; | ||||
| 
 | ||||
| pub const BATCH_SIZE: usize = 1_000; // ~15MB
 | ||||
| 
 | ||||
| pub const KEYPAIR_BYTES_LEN: usize = PUBLIC_KEY_BYTES_LEN + SECRET_KEY_BYTES_LEN; | ||||
| pub const BATCH_BYTE_LEN: usize = KEYPAIR_BYTES_LEN * BATCH_SIZE; | ||||
| 
 | ||||
| /// Defines a trait that allows reading/writing a vec of `Keypair` from/to a file.
 | ||||
| pub trait KeypairsFile { | ||||
|     /// Write to file, without guaranteeing interoperability with other clients.
 | ||||
|     fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error>; | ||||
|     /// Read from file, without guaranteeing interoperability with other clients.
 | ||||
|     fn from_raw_file(path: &Path, count: usize) -> Result<Vec<Keypair>, Error>; | ||||
| } | ||||
| 
 | ||||
| impl KeypairsFile for Vec<Keypair> { | ||||
|     /// Write the keypairs to file, using the fastest possible method without guaranteeing
 | ||||
|     /// interoperability with other clients.
 | ||||
|     fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error> { | ||||
|         let mut keypairs_file = File::create(path)?; | ||||
| 
 | ||||
|         for keypair_batch in keypairs.chunks(BATCH_SIZE) { | ||||
|             let mut buf = Vec::with_capacity(BATCH_BYTE_LEN); | ||||
| 
 | ||||
|             for keypair in keypair_batch { | ||||
|                 buf.append(&mut keypair.sk.as_raw().as_bytes()); | ||||
|                 buf.append(&mut keypair.pk.clone().as_uncompressed_bytes()); | ||||
|             } | ||||
| 
 | ||||
|             keypairs_file.write_all(&buf)?; | ||||
|         } | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Read the keypairs from file, using the fastest possible method without guaranteeing
 | ||||
|     /// interoperability with other clients.
 | ||||
|     fn from_raw_file(path: &Path, count: usize) -> Result<Vec<Keypair>, Error> { | ||||
|         let mut keypairs_file = File::open(path)?; | ||||
| 
 | ||||
|         let mut keypairs = Vec::with_capacity(count); | ||||
| 
 | ||||
|         let indices: Vec<usize> = (0..count).collect(); | ||||
| 
 | ||||
|         for batch in indices.chunks(BATCH_SIZE) { | ||||
|             let mut buf = vec![0; batch.len() * KEYPAIR_BYTES_LEN]; | ||||
|             keypairs_file.read_exact(&mut buf)?; | ||||
| 
 | ||||
|             let mut keypair_batch = batch | ||||
|                 .par_iter() | ||||
|                 .enumerate() | ||||
|                 .map(|(i, _)| { | ||||
|                     let sk_start = i * KEYPAIR_BYTES_LEN; | ||||
|                     let sk_end = sk_start + SECRET_KEY_BYTES_LEN; | ||||
|                     let sk = SecretKey::from_bytes(&buf[sk_start..sk_end]) | ||||
|                         .map_err(|_| Error::new(ErrorKind::Other, "Invalid SecretKey bytes")) | ||||
|                         .unwrap(); | ||||
| 
 | ||||
|                     let pk_start = sk_end; | ||||
|                     let pk_end = pk_start + PUBLIC_KEY_BYTES_LEN; | ||||
|                     let pk = PublicKey::from_uncompressed_bytes(&buf[pk_start..pk_end]) | ||||
|                         .map_err(|_| Error::new(ErrorKind::Other, "Invalid PublicKey bytes")) | ||||
|                         .unwrap(); | ||||
| 
 | ||||
|                     Keypair { sk, pk } | ||||
|                 }) | ||||
|                 .collect(); | ||||
| 
 | ||||
|             keypairs.append(&mut keypair_batch); | ||||
|         } | ||||
| 
 | ||||
|         Ok(keypairs) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use rand::{distributions::Alphanumeric, thread_rng, Rng}; | ||||
|     use std::fs::remove_file; | ||||
| 
 | ||||
|     fn random_keypairs(n: usize) -> Vec<Keypair> { | ||||
|         (0..n).into_par_iter().map(|_| Keypair::random()).collect() | ||||
|     } | ||||
| 
 | ||||
|     fn random_tmp_file() -> String { | ||||
|         let mut rng = thread_rng(); | ||||
| 
 | ||||
|         rng.sample_iter(&Alphanumeric).take(7).collect() | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     #[ignore] | ||||
|     fn read_write_consistency_small_batch() { | ||||
|         let num_keypairs = 10; | ||||
|         let keypairs = random_keypairs(num_keypairs); | ||||
| 
 | ||||
|         let keypairs_path = Path::new("/tmp").join(random_tmp_file()); | ||||
|         keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap(); | ||||
| 
 | ||||
|         let decoded = Vec::from_raw_file(&keypairs_path, num_keypairs).unwrap(); | ||||
|         remove_file(keypairs_path).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(keypairs, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     #[ignore] | ||||
|     fn read_write_consistency_big_batch() { | ||||
|         let num_keypairs = BATCH_SIZE + 1; | ||||
|         let keypairs = random_keypairs(num_keypairs); | ||||
| 
 | ||||
|         let keypairs_path = Path::new("/tmp").join(random_tmp_file()); | ||||
|         keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap(); | ||||
| 
 | ||||
|         let decoded = Vec::from_raw_file(&keypairs_path, num_keypairs).unwrap(); | ||||
|         remove_file(keypairs_path).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(keypairs, decoded); | ||||
|     } | ||||
| } | ||||
| @ -17,14 +17,14 @@ macro_rules! ssz_tests { | ||||
|         } | ||||
| 
 | ||||
|         #[test] | ||||
|         pub fn test_hash_tree_root_internal() { | ||||
|         pub fn test_hash_tree_root() { | ||||
|             use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|             use ssz::TreeHash; | ||||
| 
 | ||||
|             let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|             let original = $type::random_for_test(&mut rng); | ||||
| 
 | ||||
|             let result = original.hash_tree_root_internal(); | ||||
|             let result = original.hash_tree_root(); | ||||
| 
 | ||||
|             assert_eq!(result.len(), 32); | ||||
|             // TODO: Add further tests
 | ||||
|  | ||||
| @ -1,55 +1,26 @@ | ||||
| use rand::RngCore; | ||||
| #[macro_use] | ||||
| mod macros; | ||||
| mod generate_deterministic_keypairs; | ||||
| mod keypairs_file; | ||||
| mod test_random; | ||||
| mod testing_attestation_builder; | ||||
| mod testing_attester_slashing_builder; | ||||
| mod testing_beacon_block_builder; | ||||
| mod testing_beacon_state_builder; | ||||
| mod testing_deposit_builder; | ||||
| mod testing_proposer_slashing_builder; | ||||
| mod testing_transfer_builder; | ||||
| mod testing_voluntary_exit_builder; | ||||
| 
 | ||||
| pub use generate_deterministic_keypairs::generate_deterministic_keypairs; | ||||
| pub use keypairs_file::KeypairsFile; | ||||
| pub use rand::{prng::XorShiftRng, SeedableRng}; | ||||
| 
 | ||||
| pub mod address; | ||||
| pub mod aggregate_signature; | ||||
| pub mod bitfield; | ||||
| pub mod hash256; | ||||
| pub mod public_key; | ||||
| pub mod secret_key; | ||||
| pub mod signature; | ||||
| 
 | ||||
| pub trait TestRandom<T> | ||||
| where | ||||
|     T: RngCore, | ||||
| { | ||||
|     fn random_for_test(rng: &mut T) -> Self; | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for bool { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         (rng.next_u32() % 2) == 1 | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for u64 { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         rng.next_u64() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for u32 { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         rng.next_u32() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for usize { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         rng.next_u32() as usize | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore, U> TestRandom<T> for Vec<U> | ||||
| where | ||||
|     U: TestRandom<T>, | ||||
| { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         vec![ | ||||
|             <U>::random_for_test(rng), | ||||
|             <U>::random_for_test(rng), | ||||
|             <U>::random_for_test(rng), | ||||
|         ] | ||||
|     } | ||||
| } | ||||
| pub use test_random::TestRandom; | ||||
| pub use testing_attestation_builder::TestingAttestationBuilder; | ||||
| pub use testing_attester_slashing_builder::TestingAttesterSlashingBuilder; | ||||
| pub use testing_beacon_block_builder::TestingBeaconBlockBuilder; | ||||
| pub use testing_beacon_state_builder::{keypairs_path, TestingBeaconStateBuilder}; | ||||
| pub use testing_deposit_builder::TestingDepositBuilder; | ||||
| pub use testing_proposer_slashing_builder::TestingProposerSlashingBuilder; | ||||
| pub use testing_transfer_builder::TestingTransferBuilder; | ||||
| pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder; | ||||
|  | ||||
							
								
								
									
										53
									
								
								eth2/types/src/test_utils/test_random.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								eth2/types/src/test_utils/test_random.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,53 @@ | ||||
| use rand::RngCore; | ||||
| 
 | ||||
| mod address; | ||||
| mod aggregate_signature; | ||||
| mod bitfield; | ||||
| mod hash256; | ||||
| mod public_key; | ||||
| mod secret_key; | ||||
| mod signature; | ||||
| 
 | ||||
| pub trait TestRandom<T> | ||||
| where | ||||
|     T: RngCore, | ||||
| { | ||||
|     fn random_for_test(rng: &mut T) -> Self; | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for bool { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         (rng.next_u32() % 2) == 1 | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for u64 { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         rng.next_u64() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for u32 { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         rng.next_u32() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for usize { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         rng.next_u32() as usize | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<T: RngCore, U> TestRandom<T> for Vec<U> | ||||
| where | ||||
|     U: TestRandom<T>, | ||||
| { | ||||
|     fn random_for_test(rng: &mut T) -> Self { | ||||
|         vec![ | ||||
|             <U>::random_for_test(rng), | ||||
|             <U>::random_for_test(rng), | ||||
|             <U>::random_for_test(rng), | ||||
|         ] | ||||
|     } | ||||
| } | ||||
| @ -1,5 +1,5 @@ | ||||
| use super::super::Bitfield; | ||||
| use super::TestRandom; | ||||
| use crate::Bitfield; | ||||
| use rand::RngCore; | ||||
| 
 | ||||
| impl<T: RngCore> TestRandom<T> for Bitfield { | ||||
							
								
								
									
										126
									
								
								eth2/types/src/test_utils/testing_attestation_builder.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								eth2/types/src/test_utils/testing_attestation_builder.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,126 @@ | ||||
| use crate::*; | ||||
| use ssz::TreeHash; | ||||
| 
 | ||||
| /// Builds an attestation to be used for testing purposes.
 | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingAttestationBuilder { | ||||
|     committee: Vec<usize>, | ||||
|     attestation: Attestation, | ||||
| } | ||||
| 
 | ||||
| impl TestingAttestationBuilder { | ||||
|     /// Create a new attestation builder.
 | ||||
|     pub fn new( | ||||
|         state: &BeaconState, | ||||
|         committee: &[usize], | ||||
|         slot: Slot, | ||||
|         shard: u64, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Self { | ||||
|         let current_epoch = state.current_epoch(spec); | ||||
|         let previous_epoch = state.previous_epoch(spec); | ||||
| 
 | ||||
|         let is_previous_epoch = | ||||
|             state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); | ||||
| 
 | ||||
|         let justified_epoch = if is_previous_epoch { | ||||
|             state.previous_justified_epoch | ||||
|         } else { | ||||
|             state.justified_epoch | ||||
|         }; | ||||
| 
 | ||||
|         let epoch_boundary_root = if is_previous_epoch { | ||||
|             *state | ||||
|                 .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|                 .unwrap() | ||||
|         } else { | ||||
|             *state | ||||
|                 .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|                 .unwrap() | ||||
|         }; | ||||
| 
 | ||||
|         let justified_block_root = *state | ||||
|             .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let mut aggregation_bitfield = Bitfield::new(); | ||||
|         let mut custody_bitfield = Bitfield::new(); | ||||
| 
 | ||||
|         for (i, _) in committee.iter().enumerate() { | ||||
|             custody_bitfield.set(i, false); | ||||
|             aggregation_bitfield.set(i, false); | ||||
|         } | ||||
| 
 | ||||
|         let attestation = Attestation { | ||||
|             aggregation_bitfield, | ||||
|             data: AttestationData { | ||||
|                 slot, | ||||
|                 shard, | ||||
|                 beacon_block_root: *state.get_block_root(slot, spec).unwrap(), | ||||
|                 epoch_boundary_root, | ||||
|                 crosslink_data_root: Hash256::zero(), | ||||
|                 latest_crosslink: state.latest_crosslinks[shard as usize].clone(), | ||||
|                 justified_epoch, | ||||
|                 justified_block_root, | ||||
|             }, | ||||
|             custody_bitfield, | ||||
|             aggregate_signature: AggregateSignature::new(), | ||||
|         }; | ||||
| 
 | ||||
|         Self { | ||||
|             attestation, | ||||
|             committee: committee.to_vec(), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Signs the attestation with a subset (or all) committee members.
 | ||||
|     ///
 | ||||
|     /// `secret_keys` must be supplied in the same order as `signing_validators`. I.e., the first
 | ||||
|     /// keypair must be that of the first signing validator.
 | ||||
|     pub fn sign( | ||||
|         &mut self, | ||||
|         signing_validators: &[usize], | ||||
|         secret_keys: &[&SecretKey], | ||||
|         fork: &Fork, | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         assert_eq!( | ||||
|             signing_validators.len(), | ||||
|             secret_keys.len(), | ||||
|             "Must be a key for each validator" | ||||
|         ); | ||||
| 
 | ||||
|         for (key_index, validator_index) in signing_validators.iter().enumerate() { | ||||
|             let committee_index = self | ||||
|                 .committee | ||||
|                 .iter() | ||||
|                 .position(|v| *v == *validator_index) | ||||
|                 .expect("Signing validator not in attestation committee"); | ||||
| 
 | ||||
|             self.attestation | ||||
|                 .aggregation_bitfield | ||||
|                 .set(committee_index, true); | ||||
| 
 | ||||
|             let message = AttestationDataAndCustodyBit { | ||||
|                 data: self.attestation.data.clone(), | ||||
|                 custody_bit: false, | ||||
|             } | ||||
|             .hash_tree_root(); | ||||
| 
 | ||||
|             let domain = spec.get_domain( | ||||
|                 self.attestation.data.slot.epoch(spec.slots_per_epoch), | ||||
|                 Domain::Attestation, | ||||
|                 fork, | ||||
|             ); | ||||
| 
 | ||||
|             let signature = Signature::new(&message, domain, secret_keys[key_index]); | ||||
|             self.attestation.aggregate_signature.add(&signature) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Consume the builder and return the attestation.
 | ||||
|     pub fn build(self) -> Attestation { | ||||
|         self.attestation | ||||
|     } | ||||
| } | ||||
| @ -2,9 +2,11 @@ use crate::*; | ||||
| use ssz::TreeHash; | ||||
| 
 | ||||
| /// Builds an `AttesterSlashing`.
 | ||||
| pub struct AttesterSlashingBuilder(); | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingAttesterSlashingBuilder(); | ||||
| 
 | ||||
| impl AttesterSlashingBuilder { | ||||
| impl TestingAttesterSlashingBuilder { | ||||
|     /// Builds an `AttesterSlashing` that is a double vote.
 | ||||
|     ///
 | ||||
|     /// The `signer` function is used to sign the double-vote and accepts:
 | ||||
| @ -65,12 +67,15 @@ impl AttesterSlashingBuilder { | ||||
|         }; | ||||
| 
 | ||||
|         let add_signatures = |attestation: &mut SlashableAttestation| { | ||||
|             // All validators sign with a `false` custody bit.
 | ||||
|             let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { | ||||
|                 data: attestation.data.clone(), | ||||
|                 custody_bit: false, | ||||
|             }; | ||||
|             let message = attestation_data_and_custody_bit.hash_tree_root(); | ||||
| 
 | ||||
|             for (i, validator_index) in validator_indices.iter().enumerate() { | ||||
|                 let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { | ||||
|                     data: attestation.data.clone(), | ||||
|                     custody_bit: attestation.custody_bitfield.get(i).unwrap(), | ||||
|                 }; | ||||
|                 let message = attestation_data_and_custody_bit.hash_tree_root(); | ||||
|                 attestation.custody_bitfield.set(i, false); | ||||
|                 let signature = signer(*validator_index, &message[..], epoch, Domain::Attestation); | ||||
|                 attestation.aggregate_signature.add(&signature); | ||||
|             } | ||||
							
								
								
									
										270
									
								
								eth2/types/src/test_utils/testing_beacon_block_builder.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										270
									
								
								eth2/types/src/test_utils/testing_beacon_block_builder.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,270 @@ | ||||
| use crate::{ | ||||
|     test_utils::{ | ||||
|         TestingAttestationBuilder, TestingAttesterSlashingBuilder, TestingDepositBuilder, | ||||
|         TestingProposerSlashingBuilder, TestingTransferBuilder, TestingVoluntaryExitBuilder, | ||||
|     }, | ||||
|     *, | ||||
| }; | ||||
| use rayon::prelude::*; | ||||
| use ssz::{SignedRoot, TreeHash}; | ||||
| 
 | ||||
| /// Builds a beacon block to be used for testing purposes.
 | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingBeaconBlockBuilder { | ||||
|     block: BeaconBlock, | ||||
| } | ||||
| 
 | ||||
| impl TestingBeaconBlockBuilder { | ||||
|     /// Create a new builder from genesis.
 | ||||
|     pub fn new(spec: &ChainSpec) -> Self { | ||||
|         Self { | ||||
|             block: BeaconBlock::genesis(spec.zero_hash, spec), | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Set the slot of the block.
 | ||||
|     pub fn set_slot(&mut self, slot: Slot) { | ||||
|         self.block.slot = slot; | ||||
|     } | ||||
| 
 | ||||
|     /// Signs the block.
 | ||||
|     ///
 | ||||
|     /// Modifying the block after signing may invalidate the signature.
 | ||||
|     pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { | ||||
|         let proposal = self.block.proposal(spec); | ||||
|         let message = proposal.signed_root(); | ||||
|         let epoch = self.block.slot.epoch(spec.slots_per_epoch); | ||||
|         let domain = spec.get_domain(epoch, Domain::Proposal, fork); | ||||
|         self.block.signature = Signature::new(&message, domain, sk); | ||||
|     } | ||||
| 
 | ||||
|     /// Sets the randao to be a signature across the blocks epoch.
 | ||||
|     ///
 | ||||
|     /// Modifying the block's slot after signing may invalidate the signature.
 | ||||
|     pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { | ||||
|         let epoch = self.block.slot.epoch(spec.slots_per_epoch); | ||||
|         let message = epoch.hash_tree_root(); | ||||
|         let domain = spec.get_domain(epoch, Domain::Randao, fork); | ||||
|         self.block.randao_reveal = Signature::new(&message, domain, sk); | ||||
|     } | ||||
| 
 | ||||
|     /// Inserts a signed, valid `ProposerSlashing` for the validator.
 | ||||
|     pub fn insert_proposer_slashing( | ||||
|         &mut self, | ||||
|         validator_index: u64, | ||||
|         secret_key: &SecretKey, | ||||
|         fork: &Fork, | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         let proposer_slashing = build_proposer_slashing(validator_index, secret_key, fork, spec); | ||||
|         self.block.body.proposer_slashings.push(proposer_slashing); | ||||
|     } | ||||
| 
 | ||||
|     /// Inserts a signed, valid `AttesterSlashing` for each validator index in `validator_indices`.
 | ||||
|     pub fn insert_attester_slashing( | ||||
|         &mut self, | ||||
|         validator_indices: &[u64], | ||||
|         secret_keys: &[&SecretKey], | ||||
|         fork: &Fork, | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         let attester_slashing = | ||||
|             build_double_vote_attester_slashing(validator_indices, secret_keys, fork, spec); | ||||
|         self.block.body.attester_slashings.push(attester_slashing); | ||||
|     } | ||||
| 
 | ||||
|     /// Fills the block with `MAX_ATTESTATIONS` attestations.
 | ||||
|     ///
 | ||||
|     /// It will first go and get each committee that is able to include an attestation in this
 | ||||
|     /// block. If there are enough committees, it will produce an attestation for each. If there
 | ||||
|     /// are _not_ enough committees, it will start splitting the committees in half until it
 | ||||
|     /// achieves the target. It will then produce separate attestations for each split committee.
 | ||||
|     ///
 | ||||
|     /// Note: the signed messages of the split committees will be identical -- it would be possible
 | ||||
|     /// to aggregate these split attestations.
 | ||||
|     pub fn fill_with_attestations( | ||||
|         &mut self, | ||||
|         state: &BeaconState, | ||||
|         secret_keys: &[&SecretKey], | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Result<(), BeaconStateError> { | ||||
|         let mut slot = self.block.slot - spec.min_attestation_inclusion_delay; | ||||
|         let mut attestations_added = 0; | ||||
| 
 | ||||
|         // Stores the following (in order):
 | ||||
|         //
 | ||||
|         // - The slot of the committee.
 | ||||
|         // - A list of all validators in the committee.
 | ||||
|         // - A list of all validators in the committee that should sign the attestation.
 | ||||
|         // - The shard of the committee.
 | ||||
|         let mut committees: Vec<(Slot, Vec<usize>, Vec<usize>, u64)> = vec![]; | ||||
| 
 | ||||
|         // Loop backwards through slots gathering each committee, until:
 | ||||
|         //
 | ||||
|         // - The slot is too old to be included in a block at this slot.
 | ||||
|         // - The `MAX_ATTESTATIONS`.
 | ||||
|         loop { | ||||
|             if state.slot >= slot + spec.slots_per_epoch { | ||||
|                 break; | ||||
|             } | ||||
| 
 | ||||
|             for (committee, shard) in state.get_crosslink_committees_at_slot(slot, spec)? { | ||||
|                 if attestations_added >= spec.max_attestations { | ||||
|                     break; | ||||
|                 } | ||||
| 
 | ||||
|                 committees.push((slot, committee.clone(), committee.clone(), *shard)); | ||||
| 
 | ||||
|                 attestations_added += 1; | ||||
|             } | ||||
| 
 | ||||
|             slot -= 1; | ||||
|         } | ||||
| 
 | ||||
|         // Loop through all the committees, splitting each one in half until we have
 | ||||
|         // `MAX_ATTESTATIONS` committees.
 | ||||
|         loop { | ||||
|             if committees.len() >= spec.max_attestations as usize { | ||||
|                 break; | ||||
|             } | ||||
| 
 | ||||
|             for index in 0..committees.len() { | ||||
|                 if committees.len() >= spec.max_attestations as usize { | ||||
|                     break; | ||||
|                 } | ||||
| 
 | ||||
|                 let (slot, committee, mut signing_validators, shard) = committees[index].clone(); | ||||
| 
 | ||||
|                 let new_signing_validators = | ||||
|                     signing_validators.split_off(signing_validators.len() / 2); | ||||
| 
 | ||||
|                 committees[index] = (slot, committee.clone(), signing_validators, shard); | ||||
|                 committees.push((slot, committee, new_signing_validators, shard)); | ||||
|             } | ||||
|         } | ||||
| 
 | ||||
|         let mut attestations: Vec<Attestation> = committees | ||||
|             .par_iter() | ||||
|             .map(|(slot, committee, signing_validators, shard)| { | ||||
|                 let mut builder = | ||||
|                     TestingAttestationBuilder::new(state, committee, *slot, *shard, spec); | ||||
| 
 | ||||
|                 let signing_secret_keys: Vec<&SecretKey> = signing_validators | ||||
|                     .iter() | ||||
|                     .map(|validator_index| secret_keys[*validator_index]) | ||||
|                     .collect(); | ||||
|                 builder.sign(signing_validators, &signing_secret_keys, &state.fork, spec); | ||||
| 
 | ||||
|                 builder.build() | ||||
|             }) | ||||
|             .collect(); | ||||
| 
 | ||||
|         self.block.body.attestations.append(&mut attestations); | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Insert a `Valid` deposit into the state.
 | ||||
|     pub fn insert_deposit( | ||||
|         &mut self, | ||||
|         amount: u64, | ||||
|         index: u64, | ||||
|         state: &BeaconState, | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         let keypair = Keypair::random(); | ||||
| 
 | ||||
|         let mut builder = TestingDepositBuilder::new(amount); | ||||
|         builder.set_index(index); | ||||
|         builder.sign(&keypair, state, spec); | ||||
| 
 | ||||
|         self.block.body.deposits.push(builder.build()) | ||||
|     } | ||||
| 
 | ||||
|     /// Insert a `Valid` exit into the state.
 | ||||
|     pub fn insert_exit( | ||||
|         &mut self, | ||||
|         state: &BeaconState, | ||||
|         validator_index: u64, | ||||
|         secret_key: &SecretKey, | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         let mut builder = TestingVoluntaryExitBuilder::new( | ||||
|             state.slot.epoch(spec.slots_per_epoch), | ||||
|             validator_index, | ||||
|         ); | ||||
| 
 | ||||
|         builder.sign(secret_key, &state.fork, spec); | ||||
| 
 | ||||
|         self.block.body.voluntary_exits.push(builder.build()) | ||||
|     } | ||||
| 
 | ||||
|     /// Insert a `Valid` transfer into the state.
 | ||||
|     ///
 | ||||
|     /// Note: this will set the validator to be withdrawable by directly modifying the state
 | ||||
|     /// validator registry. This _may_ cause problems historic hashes, etc.
 | ||||
|     pub fn insert_transfer( | ||||
|         &mut self, | ||||
|         state: &BeaconState, | ||||
|         from: u64, | ||||
|         to: u64, | ||||
|         amount: u64, | ||||
|         keypair: Keypair, | ||||
|         spec: &ChainSpec, | ||||
|     ) { | ||||
|         let mut builder = TestingTransferBuilder::new(from, to, amount, state.slot); | ||||
|         builder.sign(keypair, &state.fork, spec); | ||||
| 
 | ||||
|         self.block.body.transfers.push(builder.build()) | ||||
|     } | ||||
| 
 | ||||
|     /// Signs and returns the block, consuming the builder.
 | ||||
|     pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { | ||||
|         self.sign(sk, fork, spec); | ||||
|         self.block | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the block, consuming the builder.
 | ||||
|     pub fn build_without_signing(self) -> BeaconBlock { | ||||
|         self.block | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Builds an `ProposerSlashing` for some `validator_index`.
 | ||||
| ///
 | ||||
| /// Signs the message using a `BeaconChainHarness`.
 | ||||
| fn build_proposer_slashing( | ||||
|     validator_index: u64, | ||||
|     secret_key: &SecretKey, | ||||
|     fork: &Fork, | ||||
|     spec: &ChainSpec, | ||||
| ) -> ProposerSlashing { | ||||
|     let signer = |_validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| { | ||||
|         let domain = spec.get_domain(epoch, domain, fork); | ||||
|         Signature::new(message, domain, secret_key) | ||||
|     }; | ||||
| 
 | ||||
|     TestingProposerSlashingBuilder::double_vote(validator_index, signer, spec) | ||||
| } | ||||
| 
 | ||||
| /// Builds an `AttesterSlashing` for some `validator_indices`.
 | ||||
| ///
 | ||||
| /// Signs the message using a `BeaconChainHarness`.
 | ||||
| fn build_double_vote_attester_slashing( | ||||
|     validator_indices: &[u64], | ||||
|     secret_keys: &[&SecretKey], | ||||
|     fork: &Fork, | ||||
|     spec: &ChainSpec, | ||||
| ) -> AttesterSlashing { | ||||
|     let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| { | ||||
|         let key_index = validator_indices | ||||
|             .iter() | ||||
|             .position(|&i| i == validator_index) | ||||
|             .expect("Unable to find attester slashing key"); | ||||
|         let domain = spec.get_domain(epoch, domain, fork); | ||||
|         Signature::new(message, domain, secret_keys[key_index]) | ||||
|     }; | ||||
| 
 | ||||
|     TestingAttesterSlashingBuilder::double_vote(validator_indices, signer) | ||||
| } | ||||
							
								
								
									
										300
									
								
								eth2/types/src/test_utils/testing_beacon_state_builder.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										300
									
								
								eth2/types/src/test_utils/testing_beacon_state_builder.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,300 @@ | ||||
| use super::{generate_deterministic_keypairs, KeypairsFile}; | ||||
| use crate::beacon_state::BeaconStateBuilder; | ||||
| use crate::*; | ||||
| use bls::get_withdrawal_credentials; | ||||
| use dirs; | ||||
| use log::debug; | ||||
| use rayon::prelude::*; | ||||
| use std::path::{Path, PathBuf}; | ||||
| 
 | ||||
| pub const KEYPAIRS_FILE: &str = "keypairs.raw_keypairs"; | ||||
| 
 | ||||
| /// Returns the directory where the generated keypairs should be stored.
 | ||||
| ///
 | ||||
| /// It is either `$HOME/.lighthouse/keypairs.raw_keypairs` or, if `$HOME` is not available,
 | ||||
| /// `./keypairs.raw_keypairs`.
 | ||||
| pub fn keypairs_path() -> PathBuf { | ||||
|     let dir = dirs::home_dir() | ||||
|         .and_then(|home| Some(home.join(".lighthouse"))) | ||||
|         .unwrap_or_else(|| PathBuf::from("")); | ||||
|     dir.join(KEYPAIRS_FILE) | ||||
| } | ||||
| 
 | ||||
| /// Builds a beacon state to be used for testing purposes.
 | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingBeaconStateBuilder { | ||||
|     state: BeaconState, | ||||
|     keypairs: Vec<Keypair>, | ||||
| } | ||||
| 
 | ||||
| impl TestingBeaconStateBuilder { | ||||
|     /// Attempts to load validators from a file in `$HOME/.lighthouse/keypairs.raw_keypairs`. If
 | ||||
|     /// the file is unavailable, it generates the keys at runtime.
 | ||||
|     ///
 | ||||
|     /// If the `$HOME` environment variable is not set, the local directory is used.
 | ||||
|     ///
 | ||||
|     /// See the `Self::from_keypairs_file` method for more info.
 | ||||
|     ///
 | ||||
|     /// # Panics
 | ||||
|     ///
 | ||||
|     /// If the file does not contain enough keypairs or is invalid.
 | ||||
|     pub fn from_default_keypairs_file_if_exists(validator_count: usize, spec: &ChainSpec) -> Self { | ||||
|         let dir = dirs::home_dir() | ||||
|             .and_then(|home| Some(home.join(".lighthouse"))) | ||||
|             .unwrap_or_else(|| PathBuf::from("")); | ||||
|         let file = dir.join(KEYPAIRS_FILE); | ||||
| 
 | ||||
|         if file.exists() { | ||||
|             TestingBeaconStateBuilder::from_keypairs_file(validator_count, &file, spec) | ||||
|         } else { | ||||
|             TestingBeaconStateBuilder::from_deterministic_keypairs(validator_count, spec) | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     /// Loads the initial validator keypairs from a file on disk.
 | ||||
|     ///
 | ||||
|     /// Loading keypairs from file is ~10x faster than generating them. Use the `gen_keys` command
 | ||||
|     /// on the  `test_harness` binary to generate the keys. In the `test_harness` dir, run `cargo
 | ||||
|     /// run -- gen_keys -h` for help.
 | ||||
|     ///
 | ||||
|     /// # Panics
 | ||||
|     ///
 | ||||
|     /// If the file does not exist, is invalid or does not contain enough keypairs.
 | ||||
|     pub fn from_keypairs_file(validator_count: usize, path: &Path, spec: &ChainSpec) -> Self { | ||||
|         debug!("Loading {} keypairs from file...", validator_count); | ||||
|         let keypairs = Vec::from_raw_file(path, validator_count).unwrap(); | ||||
|         TestingBeaconStateBuilder::from_keypairs(keypairs, spec) | ||||
|     } | ||||
| 
 | ||||
|     /// Generates the validator keypairs deterministically.
 | ||||
|     pub fn from_deterministic_keypairs(validator_count: usize, spec: &ChainSpec) -> Self { | ||||
|         debug!("Generating {} deterministic keypairs...", validator_count); | ||||
|         let keypairs = generate_deterministic_keypairs(validator_count); | ||||
|         TestingBeaconStateBuilder::from_keypairs(keypairs, spec) | ||||
|     } | ||||
| 
 | ||||
|     /// Uses the given keypair for all validators.
 | ||||
|     pub fn from_single_keypair( | ||||
|         validator_count: usize, | ||||
|         keypair: &Keypair, | ||||
|         spec: &ChainSpec, | ||||
|     ) -> Self { | ||||
|         debug!("Generating {} cloned keypairs...", validator_count); | ||||
| 
 | ||||
|         let mut keypairs = Vec::with_capacity(validator_count); | ||||
|         for _ in 0..validator_count { | ||||
|             keypairs.push(keypair.clone()) | ||||
|         } | ||||
| 
 | ||||
|         TestingBeaconStateBuilder::from_keypairs(keypairs, spec) | ||||
|     } | ||||
| 
 | ||||
|     /// Creates the builder from an existing set of keypairs.
 | ||||
|     pub fn from_keypairs(keypairs: Vec<Keypair>, spec: &ChainSpec) -> Self { | ||||
|         let validator_count = keypairs.len(); | ||||
| 
 | ||||
|         debug!( | ||||
|             "Building {} Validator objects from keypairs...", | ||||
|             validator_count | ||||
|         ); | ||||
|         let validators = keypairs | ||||
|             .par_iter() | ||||
|             .map(|keypair| { | ||||
|                 let withdrawal_credentials = Hash256::from_slice(&get_withdrawal_credentials( | ||||
|                     &keypair.pk, | ||||
|                     spec.bls_withdrawal_prefix_byte, | ||||
|                 )); | ||||
| 
 | ||||
|                 Validator { | ||||
|                     pubkey: keypair.pk.clone(), | ||||
|                     withdrawal_credentials, | ||||
|                     activation_epoch: spec.far_future_epoch, | ||||
|                     exit_epoch: spec.far_future_epoch, | ||||
|                     withdrawable_epoch: spec.far_future_epoch, | ||||
|                     initiated_exit: false, | ||||
|                     slashed: false, | ||||
|                 } | ||||
|             }) | ||||
|             .collect(); | ||||
| 
 | ||||
|         let mut state_builder = BeaconStateBuilder::new( | ||||
|             0, | ||||
|             Eth1Data { | ||||
|                 deposit_root: Hash256::zero(), | ||||
|                 block_hash: Hash256::zero(), | ||||
|             }, | ||||
|             spec, | ||||
|         ); | ||||
| 
 | ||||
|         let balances = vec![32_000_000_000; validator_count]; | ||||
| 
 | ||||
|         debug!("Importing {} existing validators...", validator_count); | ||||
|         state_builder.import_existing_validators( | ||||
|             validators, | ||||
|             balances, | ||||
|             validator_count as u64, | ||||
|             spec, | ||||
|         ); | ||||
| 
 | ||||
|         let state = state_builder.build(spec).unwrap(); | ||||
| 
 | ||||
|         debug!("BeaconState built."); | ||||
| 
 | ||||
|         Self { state, keypairs } | ||||
|     } | ||||
| 
 | ||||
|     /// Consume the builder and return the `BeaconState` and the keypairs for each validator.
 | ||||
|     pub fn build(self) -> (BeaconState, Vec<Keypair>) { | ||||
|         (self.state, self.keypairs) | ||||
|     } | ||||
| 
 | ||||
|     /// Ensures that the state returned from `Self::build(..)` has all caches pre-built.
 | ||||
|     ///
 | ||||
|     /// Note: this performs the build when called. Ensure that no changes are made that would
 | ||||
|     /// invalidate this cache.
 | ||||
|     pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { | ||||
|         let state = &mut self.state; | ||||
| 
 | ||||
|         state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; | ||||
|         state.build_epoch_cache(RelativeEpoch::Current, &spec)?; | ||||
|         state.build_epoch_cache(RelativeEpoch::Next, &spec)?; | ||||
| 
 | ||||
|         Ok(()) | ||||
|     } | ||||
| 
 | ||||
|     /// Sets the `BeaconState` to be in a slot, calling `teleport_to_epoch` to update the epoch.
 | ||||
|     pub fn teleport_to_slot(&mut self, slot: Slot, spec: &ChainSpec) { | ||||
|         self.teleport_to_epoch(slot.epoch(spec.slots_per_epoch), spec); | ||||
|         self.state.slot = slot; | ||||
|     } | ||||
| 
 | ||||
|     /// Sets the `BeaconState` to be in the first slot of the given epoch.
 | ||||
|     ///
 | ||||
|     /// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e.,
 | ||||
|     /// highest justified and finalized slots, full justification bitfield, etc).
 | ||||
|     fn teleport_to_epoch(&mut self, epoch: Epoch, spec: &ChainSpec) { | ||||
|         let state = &mut self.state; | ||||
| 
 | ||||
|         let slot = epoch.start_slot(spec.slots_per_epoch); | ||||
| 
 | ||||
|         state.slot = slot; | ||||
| 
 | ||||
|         state.previous_shuffling_epoch = epoch - 1; | ||||
|         state.current_shuffling_epoch = epoch; | ||||
| 
 | ||||
|         state.previous_shuffling_seed = Hash256::from_low_u64_le(0); | ||||
|         state.current_shuffling_seed = Hash256::from_low_u64_le(1); | ||||
| 
 | ||||
|         state.previous_justified_epoch = epoch - 3; | ||||
|         state.justified_epoch = epoch - 2; | ||||
|         state.justification_bitfield = u64::max_value(); | ||||
| 
 | ||||
|         state.finalized_epoch = epoch - 3; | ||||
|         state.validator_registry_update_epoch = epoch - 3; | ||||
|     } | ||||
| 
 | ||||
|     /// Creates a full set of attestations for the `BeaconState`. Each attestation has full
 | ||||
|     /// participation from its committee and references the expected beacon_block hashes.
 | ||||
|     ///
 | ||||
|     /// These attestations should be fully conducive to justification and finalization.
 | ||||
|     pub fn insert_attestations(&mut self, spec: &ChainSpec) { | ||||
|         let state = &mut self.state; | ||||
| 
 | ||||
|         state | ||||
|             .build_epoch_cache(RelativeEpoch::Previous, spec) | ||||
|             .unwrap(); | ||||
|         state | ||||
|             .build_epoch_cache(RelativeEpoch::Current, spec) | ||||
|             .unwrap(); | ||||
| 
 | ||||
|         let current_epoch = state.current_epoch(spec); | ||||
|         let previous_epoch = state.previous_epoch(spec); | ||||
| 
 | ||||
|         let first_slot = previous_epoch.start_slot(spec.slots_per_epoch).as_u64(); | ||||
|         let last_slot = current_epoch.end_slot(spec.slots_per_epoch).as_u64() | ||||
|             - spec.min_attestation_inclusion_delay; | ||||
|         let last_slot = std::cmp::min(state.slot.as_u64(), last_slot); | ||||
| 
 | ||||
|         for slot in first_slot..last_slot + 1 { | ||||
|             let slot = Slot::from(slot); | ||||
| 
 | ||||
|             let committees = state | ||||
|                 .get_crosslink_committees_at_slot(slot, spec) | ||||
|                 .unwrap() | ||||
|                 .clone(); | ||||
| 
 | ||||
|             for (committee, shard) in committees { | ||||
|                 state | ||||
|                     .latest_attestations | ||||
|                     .push(committee_to_pending_attestation( | ||||
|                         state, &committee, shard, slot, spec, | ||||
|                     )) | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /// Maps a committee to a `PendingAttestation`.
 | ||||
| ///
 | ||||
| /// The committee will be signed by all validators in the committee.
 | ||||
| fn committee_to_pending_attestation( | ||||
|     state: &BeaconState, | ||||
|     committee: &[usize], | ||||
|     shard: u64, | ||||
|     slot: Slot, | ||||
|     spec: &ChainSpec, | ||||
| ) -> PendingAttestation { | ||||
|     let current_epoch = state.current_epoch(spec); | ||||
|     let previous_epoch = state.previous_epoch(spec); | ||||
| 
 | ||||
|     let mut aggregation_bitfield = Bitfield::new(); | ||||
|     let mut custody_bitfield = Bitfield::new(); | ||||
| 
 | ||||
|     for (i, _) in committee.iter().enumerate() { | ||||
|         aggregation_bitfield.set(i, true); | ||||
|         custody_bitfield.set(i, true); | ||||
|     } | ||||
| 
 | ||||
|     let is_previous_epoch = | ||||
|         state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch); | ||||
| 
 | ||||
|     let justified_epoch = if is_previous_epoch { | ||||
|         state.previous_justified_epoch | ||||
|     } else { | ||||
|         state.justified_epoch | ||||
|     }; | ||||
| 
 | ||||
|     let epoch_boundary_root = if is_previous_epoch { | ||||
|         *state | ||||
|             .get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|             .unwrap() | ||||
|     } else { | ||||
|         *state | ||||
|             .get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|             .unwrap() | ||||
|     }; | ||||
| 
 | ||||
|     let justified_block_root = *state | ||||
|         .get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), spec) | ||||
|         .unwrap(); | ||||
| 
 | ||||
|     PendingAttestation { | ||||
|         aggregation_bitfield, | ||||
|         data: AttestationData { | ||||
|             slot, | ||||
|             shard, | ||||
|             beacon_block_root: *state.get_block_root(slot, spec).unwrap(), | ||||
|             epoch_boundary_root, | ||||
|             crosslink_data_root: Hash256::zero(), | ||||
|             latest_crosslink: Crosslink { | ||||
|                 epoch: slot.epoch(spec.slots_per_epoch), | ||||
|                 crosslink_data_root: Hash256::zero(), | ||||
|             }, | ||||
|             justified_epoch, | ||||
|             justified_block_root, | ||||
|         }, | ||||
|         custody_bitfield, | ||||
|         inclusion_slot: slot + spec.min_attestation_inclusion_delay, | ||||
|     } | ||||
| } | ||||
							
								
								
									
										64
									
								
								eth2/types/src/test_utils/testing_deposit_builder.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								eth2/types/src/test_utils/testing_deposit_builder.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,64 @@ | ||||
| use crate::*; | ||||
| use bls::get_withdrawal_credentials; | ||||
| 
 | ||||
| /// Builds an deposit to be used for testing purposes.
 | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingDepositBuilder { | ||||
|     deposit: Deposit, | ||||
| } | ||||
| 
 | ||||
| impl TestingDepositBuilder { | ||||
|     /// Instantiates a new builder.
 | ||||
|     pub fn new(amount: u64) -> Self { | ||||
|         let keypair = Keypair::random(); | ||||
| 
 | ||||
|         let deposit = Deposit { | ||||
|             branch: vec![], | ||||
|             index: 0, | ||||
|             deposit_data: DepositData { | ||||
|                 amount, | ||||
|                 timestamp: 1, | ||||
|                 deposit_input: DepositInput { | ||||
|                     pubkey: keypair.pk, | ||||
|                     withdrawal_credentials: Hash256::zero(), | ||||
|                     proof_of_possession: Signature::empty_signature(), | ||||
|                 }, | ||||
|             }, | ||||
|         }; | ||||
| 
 | ||||
|         Self { deposit } | ||||
|     } | ||||
| 
 | ||||
|     /// Set the `deposit.index` value.
 | ||||
|     pub fn set_index(&mut self, index: u64) { | ||||
|         self.deposit.index = index; | ||||
|     } | ||||
| 
 | ||||
|     /// Signs the deposit, also setting the following values:
 | ||||
|     ///
 | ||||
|     /// - `pubkey` to the signing pubkey.
 | ||||
|     /// - `withdrawal_credentials` to the signing pubkey.
 | ||||
|     /// - `proof_of_possesssion`
 | ||||
|     pub fn sign(&mut self, keypair: &Keypair, state: &BeaconState, spec: &ChainSpec) { | ||||
|         let withdrawal_credentials = Hash256::from_slice( | ||||
|             &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], | ||||
|         ); | ||||
| 
 | ||||
|         let epoch = state.current_epoch(spec); | ||||
|         let domain = spec.get_domain(epoch, Domain::Deposit, &state.fork); | ||||
| 
 | ||||
|         self.deposit.deposit_data.deposit_input.pubkey = keypair.pk.clone(); | ||||
|         self.deposit | ||||
|             .deposit_data | ||||
|             .deposit_input | ||||
|             .withdrawal_credentials = withdrawal_credentials.clone(); | ||||
|         self.deposit.deposit_data.deposit_input.proof_of_possession = | ||||
|             DepositInput::create_proof_of_possession(&keypair, &withdrawal_credentials, domain); | ||||
|     } | ||||
| 
 | ||||
|     /// Builds the deposit, consuming the builder.
 | ||||
|     pub fn build(self) -> Deposit { | ||||
|         self.deposit | ||||
|     } | ||||
| } | ||||
| @ -2,9 +2,11 @@ use crate::*; | ||||
| use ssz::SignedRoot; | ||||
| 
 | ||||
| /// Builds a `ProposerSlashing`.
 | ||||
| pub struct ProposerSlashingBuilder(); | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingProposerSlashingBuilder(); | ||||
| 
 | ||||
| impl ProposerSlashingBuilder { | ||||
| impl TestingProposerSlashingBuilder { | ||||
|     /// Builds a `ProposerSlashing` that is a double vote.
 | ||||
|     ///
 | ||||
|     /// The `signer` function is used to sign the double-vote and accepts:
 | ||||
							
								
								
									
										45
									
								
								eth2/types/src/test_utils/testing_transfer_builder.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								eth2/types/src/test_utils/testing_transfer_builder.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,45 @@ | ||||
| use crate::*; | ||||
| use ssz::SignedRoot; | ||||
| 
 | ||||
| /// Builds a transfer to be used for testing purposes.
 | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingTransferBuilder { | ||||
|     transfer: Transfer, | ||||
| } | ||||
| 
 | ||||
| impl TestingTransferBuilder { | ||||
|     /// Instantiates a new builder.
 | ||||
|     pub fn new(from: u64, to: u64, amount: u64, slot: Slot) -> Self { | ||||
|         let keypair = Keypair::random(); | ||||
| 
 | ||||
|         let transfer = Transfer { | ||||
|             from, | ||||
|             to, | ||||
|             amount, | ||||
|             fee: 0, | ||||
|             slot, | ||||
|             pubkey: keypair.pk, | ||||
|             signature: Signature::empty_signature(), | ||||
|         }; | ||||
| 
 | ||||
|         Self { transfer } | ||||
|     } | ||||
| 
 | ||||
|     /// Signs the transfer.
 | ||||
|     ///
 | ||||
|     /// The keypair must match that of the `from` validator index.
 | ||||
|     pub fn sign(&mut self, keypair: Keypair, fork: &Fork, spec: &ChainSpec) { | ||||
|         self.transfer.pubkey = keypair.pk; | ||||
|         let message = self.transfer.signed_root(); | ||||
|         let epoch = self.transfer.slot.epoch(spec.slots_per_epoch); | ||||
|         let domain = spec.get_domain(epoch, Domain::Transfer, fork); | ||||
| 
 | ||||
|         self.transfer.signature = Signature::new(&message, domain, &keypair.sk); | ||||
|     } | ||||
| 
 | ||||
|     /// Builds the transfer, consuming the builder.
 | ||||
|     pub fn build(self) -> Transfer { | ||||
|         self.transfer | ||||
|     } | ||||
| } | ||||
							
								
								
									
										37
									
								
								eth2/types/src/test_utils/testing_voluntary_exit_builder.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								eth2/types/src/test_utils/testing_voluntary_exit_builder.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,37 @@ | ||||
| use crate::*; | ||||
| use ssz::SignedRoot; | ||||
| 
 | ||||
| /// Builds an exit to be used for testing purposes.
 | ||||
| ///
 | ||||
| /// This struct should **never be used for production purposes.**
 | ||||
| pub struct TestingVoluntaryExitBuilder { | ||||
|     exit: VoluntaryExit, | ||||
| } | ||||
| 
 | ||||
| impl TestingVoluntaryExitBuilder { | ||||
|     /// Instantiates a new builder.
 | ||||
|     pub fn new(epoch: Epoch, validator_index: u64) -> Self { | ||||
|         let exit = VoluntaryExit { | ||||
|             epoch, | ||||
|             validator_index, | ||||
|             signature: Signature::empty_signature(), | ||||
|         }; | ||||
| 
 | ||||
|         Self { exit } | ||||
|     } | ||||
| 
 | ||||
|     /// Signs the exit.
 | ||||
|     ///
 | ||||
|     /// The signing secret key must match that of the exiting validator.
 | ||||
|     pub fn sign(&mut self, secret_key: &SecretKey, fork: &Fork, spec: &ChainSpec) { | ||||
|         let message = self.exit.signed_root(); | ||||
|         let domain = spec.get_domain(self.exit.epoch, Domain::Exit, fork); | ||||
| 
 | ||||
|         self.exit.signature = Signature::new(&message, domain, secret_key); | ||||
|     } | ||||
| 
 | ||||
|     /// Builds the exit, consuming the builder.
 | ||||
|     pub fn build(self) -> VoluntaryExit { | ||||
|         self.exit | ||||
|     } | ||||
| } | ||||
| @ -24,29 +24,6 @@ pub struct Transfer { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Transfer::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Transfer::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Transfer); | ||||
| } | ||||
|  | ||||
| @ -1,13 +1,13 @@ | ||||
| use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey}; | ||||
| use rand::RngCore; | ||||
| use serde_derive::Serialize; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| use ssz_derive::{Decode, Encode, TreeHash}; | ||||
| use test_random_derive::TestRandom; | ||||
| 
 | ||||
| /// Information about a `BeaconChain` validator.
 | ||||
| ///
 | ||||
| /// Spec v0.4.0
 | ||||
| #[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TestRandom, TreeHash)] | ||||
| #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] | ||||
| pub struct Validator { | ||||
|     pub pubkey: PublicKey, | ||||
|     pub withdrawal_credentials: Hash256, | ||||
| @ -54,18 +54,6 @@ impl Default for Validator { | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Validator::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     fn test_validator_can_be_active() { | ||||
| @ -90,15 +78,5 @@ mod tests { | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = Validator::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(Validator); | ||||
| } | ||||
|  | ||||
| @ -19,29 +19,6 @@ pub struct VoluntaryExit { | ||||
| #[cfg(test)] | ||||
| mod tests { | ||||
|     use super::*; | ||||
|     use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; | ||||
|     use ssz::{ssz_encode, Decodable, TreeHash}; | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_ssz_round_trip() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = VoluntaryExit::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let bytes = ssz_encode(&original); | ||||
|         let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); | ||||
| 
 | ||||
|         assert_eq!(original, decoded); | ||||
|     } | ||||
| 
 | ||||
|     #[test] | ||||
|     pub fn test_hash_tree_root_internal() { | ||||
|         let mut rng = XorShiftRng::from_seed([42; 16]); | ||||
|         let original = VoluntaryExit::random_for_test(&mut rng); | ||||
| 
 | ||||
|         let result = original.hash_tree_root_internal(); | ||||
| 
 | ||||
|         assert_eq!(result.len(), 32); | ||||
|         // TODO: Add further tests
 | ||||
|         // https://github.com/sigp/lighthouse/issues/170
 | ||||
|     } | ||||
|     ssz_tests!(VoluntaryExit); | ||||
| } | ||||
|  | ||||
| @ -5,8 +5,9 @@ authors = ["Paul Hauner <paul@paulhauner.com>"] | ||||
| edition = "2018" | ||||
| 
 | ||||
| [dependencies] | ||||
| bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.5.2" } | ||||
| bls-aggregates = { git = "https://github.com/sigp/signature-schemes", tag = "0.6.0" } | ||||
| hashing = { path = "../hashing" } | ||||
| hex = "0.3" | ||||
| serde = "1.0" | ||||
| serde_derive = "1.0" | ||||
| ssz = { path = "../ssz" } | ||||
|  | ||||
| @ -48,16 +48,8 @@ impl AggregateSignature { | ||||
|         domain: u64, | ||||
|         aggregate_public_keys: &[&AggregatePublicKey], | ||||
|     ) -> bool { | ||||
|         // TODO: the API for `RawAggregatePublicKey` shoudn't need to take an owned
 | ||||
|         // `AggregatePublicKey`. There is an issue to fix this, but in the meantime we need to
 | ||||
|         // clone.
 | ||||
|         //
 | ||||
|         // https://github.com/sigp/signature-schemes/issues/10
 | ||||
|         let aggregate_public_keys: Vec<RawAggregatePublicKey> = aggregate_public_keys | ||||
|             .iter() | ||||
|             .map(|pk| pk.as_raw()) | ||||
|             .cloned() | ||||
|             .collect(); | ||||
|         let aggregate_public_keys: Vec<&RawAggregatePublicKey> = | ||||
|             aggregate_public_keys.iter().map(|pk| pk.as_raw()).collect(); | ||||
| 
 | ||||
|         // Messages are concatenated into one long message.
 | ||||
|         let mut msg: Vec<u8> = vec![]; | ||||
| @ -95,7 +87,7 @@ impl Serialize for AggregateSignature { | ||||
| } | ||||
| 
 | ||||
| impl TreeHash for AggregateSignature { | ||||
|     fn hash_tree_root_internal(&self) -> Vec<u8> { | ||||
|     fn hash_tree_root(&self) -> Vec<u8> { | ||||
|         hash(&self.0.as_bytes()) | ||||
|     } | ||||
| } | ||||
|  | ||||
| @ -1,6 +1,7 @@ | ||||
| use super::{PublicKey, SecretKey}; | ||||
| use serde_derive::{Deserialize, Serialize}; | ||||
| 
 | ||||
| #[derive(Debug, Clone, PartialEq, Eq)] | ||||
| #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] | ||||
| pub struct Keypair { | ||||
|     pub sk: SecretKey, | ||||
|     pub pk: PublicKey, | ||||
|  | ||||
| @ -6,6 +6,7 @@ mod aggregate_signature; | ||||
| mod keypair; | ||||
| mod public_key; | ||||
| mod secret_key; | ||||
| mod serde_vistors; | ||||
| mod signature; | ||||
| 
 | ||||
| pub use crate::aggregate_public_key::AggregatePublicKey; | ||||
| @ -20,20 +21,6 @@ pub const BLS_AGG_SIG_BYTE_SIZE: usize = 96; | ||||
| use hashing::hash; | ||||
| use ssz::ssz_encode; | ||||
| 
 | ||||
| /// For some signature and public key, ensure that the signature message was the public key and it
 | ||||
| /// was signed by the secret key that corresponds to that public key.
 | ||||
| pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool { | ||||
|     // TODO: replace this function with state.validate_proof_of_possession
 | ||||
|     // https://github.com/sigp/lighthouse/issues/239
 | ||||
|     sig.verify(&ssz_encode(pubkey), 0, &pubkey) | ||||
| } | ||||
| 
 | ||||
| // TODO: Update this method
 | ||||
| // https://github.com/sigp/lighthouse/issues/239
 | ||||
| pub fn create_proof_of_possession(keypair: &Keypair) -> Signature { | ||||
|     Signature::new(&ssz_encode(&keypair.pk), 0, &keypair.sk) | ||||
| } | ||||
| 
 | ||||
| /// Returns the withdrawal credentials for a given public key.
 | ||||
| pub fn get_withdrawal_credentials(pubkey: &PublicKey, prefix_byte: u8) -> Vec<u8> { | ||||
|     let hashed = hash(&ssz_encode(pubkey)); | ||||
|  | ||||
| @ -1,6 +1,8 @@ | ||||
| use super::serde_vistors::HexVisitor; | ||||
| use super::SecretKey; | ||||
| use bls_aggregates::PublicKey as RawPublicKey; | ||||
| use hex::encode as hex_encode; | ||||
| use serde::de::{Deserialize, Deserializer}; | ||||
| use serde::ser::{Serialize, Serializer}; | ||||
| use ssz::{ | ||||
|     decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, | ||||
| @ -25,6 +27,24 @@ impl PublicKey { | ||||
|         &self.0 | ||||
|     } | ||||
| 
 | ||||
|     /// Converts compressed bytes to PublicKey
 | ||||
|     pub fn from_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { | ||||
|         let pubkey = RawPublicKey::from_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; | ||||
|         Ok(PublicKey(pubkey)) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the PublicKey as (x, y) bytes
 | ||||
|     pub fn as_uncompressed_bytes(&self) -> Vec<u8> { | ||||
|         RawPublicKey::as_uncompressed_bytes(&mut self.0.clone()) | ||||
|     } | ||||
| 
 | ||||
|     /// Converts (x, y) bytes to PublicKey
 | ||||
|     pub fn from_uncompressed_bytes(bytes: &[u8]) -> Result<Self, DecodeError> { | ||||
|         let pubkey = | ||||
|             RawPublicKey::from_uncompressed_bytes(&bytes).map_err(|_| DecodeError::Invalid)?; | ||||
|         Ok(PublicKey(pubkey)) | ||||
|     } | ||||
| 
 | ||||
|     /// Returns the last 6 bytes of the SSZ encoding of the public key, as a hex string.
 | ||||
|     ///
 | ||||
|     /// Useful for providing a short identifier to the user.
 | ||||
| @ -61,12 +81,24 @@ impl Serialize for PublicKey { | ||||
|     where | ||||
|         S: Serializer, | ||||
|     { | ||||
|         serializer.serialize_bytes(&ssz_encode(self)) | ||||
|         serializer.serialize_str(&hex_encode(ssz_encode(self))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<'de> Deserialize<'de> for PublicKey { | ||||
|     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | ||||
|     where | ||||
|         D: Deserializer<'de>, | ||||
|     { | ||||
|         let bytes = deserializer.deserialize_str(HexVisitor)?; | ||||
|         let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) | ||||
|             .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; | ||||
|         Ok(pubkey) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl TreeHash for PublicKey { | ||||
|     fn hash_tree_root_internal(&self) -> Vec<u8> { | ||||
|     fn hash_tree_root(&self) -> Vec<u8> { | ||||
|         hash(&self.0.as_bytes()) | ||||
|     } | ||||
| } | ||||
| @ -78,8 +110,14 @@ impl PartialEq for PublicKey { | ||||
| } | ||||
| 
 | ||||
| impl Hash for PublicKey { | ||||
|     /// Note: this is distinct from consensus serialization, it will produce a different hash.
 | ||||
|     ///
 | ||||
|     /// This method uses the uncompressed bytes, which are much faster to obtain than the
 | ||||
|     /// compressed bytes required for consensus serialization.
 | ||||
|     ///
 | ||||
|     /// Use `ssz::Encode` to obtain the bytes required for consensus hashing.
 | ||||
|     fn hash<H: Hasher>(&self, state: &mut H) { | ||||
|         ssz_encode(self).hash(state) | ||||
|         self.as_uncompressed_bytes().hash(state) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -1,5 +1,9 @@ | ||||
| use super::serde_vistors::HexVisitor; | ||||
| use bls_aggregates::{DecodeError as BlsDecodeError, SecretKey as RawSecretKey}; | ||||
| use ssz::{decode_ssz_list, Decodable, DecodeError, Encodable, SszStream, TreeHash}; | ||||
| use hex::encode as hex_encode; | ||||
| use serde::de::{Deserialize, Deserializer}; | ||||
| use serde::ser::{Serialize, Serializer}; | ||||
| use ssz::{decode_ssz_list, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; | ||||
| 
 | ||||
| /// A single BLS signature.
 | ||||
| ///
 | ||||
| @ -40,8 +44,29 @@ impl Decodable for SecretKey { | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl Serialize for SecretKey { | ||||
|     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> | ||||
|     where | ||||
|         S: Serializer, | ||||
|     { | ||||
|         serializer.serialize_str(&hex_encode(ssz_encode(self))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<'de> Deserialize<'de> for SecretKey { | ||||
|     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | ||||
|     where | ||||
|         D: Deserializer<'de>, | ||||
|     { | ||||
|         let bytes = deserializer.deserialize_str(HexVisitor)?; | ||||
|         let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) | ||||
|             .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; | ||||
|         Ok(pubkey) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl TreeHash for SecretKey { | ||||
|     fn hash_tree_root_internal(&self) -> Vec<u8> { | ||||
|     fn hash_tree_root(&self) -> Vec<u8> { | ||||
|         self.0.as_bytes().clone() | ||||
|     } | ||||
| } | ||||
|  | ||||
							
								
								
									
										20
									
								
								eth2/utils/bls/src/serde_vistors.rs
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								eth2/utils/bls/src/serde_vistors.rs
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,20 @@ | ||||
| use hex; | ||||
| use serde::de::{self, Visitor}; | ||||
| use std::fmt; | ||||
| 
 | ||||
| pub struct HexVisitor; | ||||
| 
 | ||||
| impl<'de> Visitor<'de> for HexVisitor { | ||||
|     type Value = Vec<u8>; | ||||
| 
 | ||||
|     fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { | ||||
|         formatter.write_str("a hex string (without 0x prefix)") | ||||
|     } | ||||
| 
 | ||||
|     fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> | ||||
|     where | ||||
|         E: de::Error, | ||||
|     { | ||||
|         Ok(hex::decode(value).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))?) | ||||
|     } | ||||
| } | ||||
| @ -1,5 +1,8 @@ | ||||
| use super::serde_vistors::HexVisitor; | ||||
| use super::{PublicKey, SecretKey}; | ||||
| use bls_aggregates::Signature as RawSignature; | ||||
| use hex::encode as hex_encode; | ||||
| use serde::de::{Deserialize, Deserializer}; | ||||
| use serde::ser::{Serialize, Serializer}; | ||||
| use ssz::{ | ||||
|     decode_ssz_list, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash, | ||||
| @ -51,9 +54,12 @@ impl Signature { | ||||
| 
 | ||||
|     /// Returns a new empty signature.
 | ||||
|     pub fn empty_signature() -> Self { | ||||
|         // Empty Signature is currently being represented as BLS::Signature.point_at_infinity()
 | ||||
|         // However it should be represented as vec![0; 96] but this
 | ||||
|         // would require all signatures to be represented in byte form as opposed to Signature
 | ||||
|         let mut empty: Vec<u8> = vec![0; 96]; | ||||
|         // TODO: Modify the way flags are used (b_flag should not be used for empty_signature in the future)
 | ||||
|         empty[0] += u8::pow(2, 6); | ||||
|         // Sets C_flag and B_flag to 1 and all else to 0
 | ||||
|         empty[0] += u8::pow(2, 6) + u8::pow(2, 7); | ||||
|         Signature(RawSignature::from_bytes(&empty).unwrap()) | ||||
|     } | ||||
| } | ||||
| @ -73,7 +79,7 @@ impl Decodable for Signature { | ||||
| } | ||||
| 
 | ||||
| impl TreeHash for Signature { | ||||
|     fn hash_tree_root_internal(&self) -> Vec<u8> { | ||||
|     fn hash_tree_root(&self) -> Vec<u8> { | ||||
|         hash(&self.0.as_bytes()) | ||||
|     } | ||||
| } | ||||
| @ -83,7 +89,19 @@ impl Serialize for Signature { | ||||
|     where | ||||
|         S: Serializer, | ||||
|     { | ||||
|         serializer.serialize_bytes(&ssz_encode(self)) | ||||
|         serializer.serialize_str(&hex_encode(ssz_encode(self))) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| impl<'de> Deserialize<'de> for Signature { | ||||
|     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> | ||||
|     where | ||||
|         D: Deserializer<'de>, | ||||
|     { | ||||
|         let bytes = deserializer.deserialize_str(HexVisitor)?; | ||||
|         let (pubkey, _) = <_>::ssz_decode(&bytes[..], 0) | ||||
|             .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; | ||||
|         Ok(pubkey) | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| @ -114,7 +132,7 @@ mod tests { | ||||
|         assert_eq!(sig_as_bytes.len(), 96); | ||||
|         for (i, one_byte) in sig_as_bytes.iter().enumerate() { | ||||
|             if i == 0 { | ||||
|                 assert_eq!(*one_byte, u8::pow(2, 6)); | ||||
|                 assert_eq!(*one_byte, u8::pow(2, 6) + u8::pow(2, 7)); | ||||
|             } else { | ||||
|                 assert_eq!(*one_byte, 0); | ||||
|             } | ||||
|  | ||||
							
								
								
									
										4
									
								
								eth2/utils/boolean-bitfield/fuzz/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								eth2/utils/boolean-bitfield/fuzz/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,4 @@ | ||||
| 
 | ||||
| target | ||||
| corpus | ||||
| artifacts | ||||
							
								
								
									
										33
									
								
								eth2/utils/boolean-bitfield/fuzz/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								eth2/utils/boolean-bitfield/fuzz/Cargo.toml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,33 @@ | ||||
| 
 | ||||
| [package] | ||||
| name = "boolean-bitfield-fuzz" | ||||
| version = "0.0.1" | ||||
| authors = ["Automatically generated"] | ||||
| publish = false | ||||
| 
 | ||||
| [package.metadata] | ||||
| cargo-fuzz = true | ||||
| 
 | ||||
| [dependencies] | ||||
| ssz = { path = "../../ssz" } | ||||
| 
 | ||||
| [dependencies.boolean-bitfield] | ||||
| path = ".." | ||||
| [dependencies.libfuzzer-sys] | ||||
| git = "https://github.com/rust-fuzz/libfuzzer-sys.git" | ||||
| 
 | ||||
| # Prevent this from interfering with workspaces | ||||
| [workspace] | ||||
| members = ["."] | ||||
| 
 | ||||
| [[bin]] | ||||
| name = "fuzz_target_from_bytes" | ||||
| path = "fuzz_targets/fuzz_target_from_bytes.rs" | ||||
| 
 | ||||
| [[bin]] | ||||
| name = "fuzz_target_ssz_decode" | ||||
| path = "fuzz_targets/fuzz_target_ssz_decode.rs" | ||||
| 
 | ||||
| [[bin]] | ||||
| name = "fuzz_target_ssz_encode" | ||||
| path = "fuzz_targets/fuzz_target_ssz_encode.rs" | ||||
| @ -0,0 +1,9 @@ | ||||
| #![no_main] | ||||
| #[macro_use] extern crate libfuzzer_sys; | ||||
| extern crate boolean_bitfield; | ||||
| 
 | ||||
| use boolean_bitfield::BooleanBitfield; | ||||
| 
 | ||||
| fuzz_target!(|data: &[u8]| { | ||||
|     let _result = BooleanBitfield::from_bytes(data); | ||||
| }); | ||||
| @ -0,0 +1,11 @@ | ||||
| #![no_main] | ||||
| #[macro_use] extern crate libfuzzer_sys; | ||||
| extern crate boolean_bitfield; | ||||
| extern crate ssz; | ||||
| 
 | ||||
| use boolean_bitfield::BooleanBitfield; | ||||
| use ssz::{Decodable, DecodeError}; | ||||
| 
 | ||||
| fuzz_target!(|data: &[u8]| { | ||||
|     let result: Result<(BooleanBitfield, usize), DecodeError> = <_>::ssz_decode(data, 0); | ||||
| }); | ||||
| @ -0,0 +1,13 @@ | ||||
| #![no_main] | ||||
| #[macro_use] extern crate libfuzzer_sys; | ||||
| extern crate boolean_bitfield; | ||||
| extern crate ssz; | ||||
| 
 | ||||
| use boolean_bitfield::BooleanBitfield; | ||||
| use ssz::SszStream; | ||||
| 
 | ||||
| fuzz_target!(|data: &[u8]| { | ||||
|     let bitfield = BooleanBitfield::from_bytes(data); | ||||
|     let mut ssz = SszStream::new(); | ||||
|     ssz.append(&bitfield); | ||||
| }); | ||||
| @ -187,8 +187,8 @@ impl Serialize for BooleanBitfield { | ||||
| } | ||||
| 
 | ||||
| impl ssz::TreeHash for BooleanBitfield { | ||||
|     fn hash_tree_root_internal(&self) -> Vec<u8> { | ||||
|         self.to_bytes().hash_tree_root_internal() | ||||
|     fn hash_tree_root(&self) -> Vec<u8> { | ||||
|         self.to_bytes().hash_tree_root() | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										4
									
								
								eth2/utils/hashing/fuzz/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								eth2/utils/hashing/fuzz/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,4 @@ | ||||
| 
 | ||||
| target | ||||
| corpus | ||||
| artifacts | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue
	
	Block a user