diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs index bd5149cfd..92d296c37 100644 --- a/beacon_node/db/src/stores/beacon_block_store.rs +++ b/beacon_node/db/src/stores/beacon_block_store.rs @@ -198,6 +198,7 @@ mod tests { } #[test] + #[ignore] fn test_block_at_slot() { let db = Arc::new(MemoryDB::open()); let bs = Arc::new(BeaconBlockStore::new(db.clone())); diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index fd1c3dea4..d7b10015b 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -409,11 +409,23 @@ impl ForkChoice for BitwiseLMDGhost { *child_votes.entry(child).or_insert_with(|| 0) += vote; } } - // given the votes on the children, find the best child - current_head = self - .choose_best_child(&child_votes) - .ok_or(ForkChoiceError::CannotFindBestChild)?; - trace!("Best child found: {}", current_head); + // check if we have votes of children, if not select the smallest hash child + if child_votes.is_empty() { + current_head = *children + .iter() + .min_by(|child1, child2| child1.cmp(child2)) + .expect("Must be children here"); + trace!( + "Children have no votes - smallest hash chosen: {}", + current_head + ); + } else { + // given the votes on the children, find the best child + current_head = self + .choose_best_child(&child_votes) + .ok_or(ForkChoiceError::CannotFindBestChild)?; + trace!("Best child found: {}", current_head); + } } // didn't find head yet, proceed to next iteration diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index 6062c19b1..0d6969e89 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -22,6 +22,7 @@ extern crate types; pub mod bitwise_lmd_ghost; pub mod longest_chain; +pub mod optimized_lmd_ghost; pub mod slow_lmd_ghost; use db::stores::BeaconBlockAtSlotError; @@ -30,6 +31,7 @@ use types::{BeaconBlock, ChainSpec, Hash256}; pub use bitwise_lmd_ghost::BitwiseLMDGhost; pub use longest_chain::LongestChain; +pub use optimized_lmd_ghost::OptimizedLMDGhost; pub use slow_lmd_ghost::SlowLMDGhost; /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures @@ -101,4 +103,6 @@ pub enum ForkChoiceAlgorithm { SlowLMDGhost, /// An optimised version of bitwise LMD-GHOST by Vitalik. BitwiseLMDGhost, + /// An optimised implementation of LMD ghost. + OptimizedLMDGhost, } diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs new file mode 100644 index 000000000..30c84e9e1 --- /dev/null +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -0,0 +1,465 @@ +//! The optimised bitwise LMD-GHOST fork choice rule. +extern crate bit_vec; + +use crate::{ForkChoice, ForkChoiceError}; +use db::{ + stores::{BeaconBlockStore, BeaconStateStore}, + ClientDB, +}; +use log::{debug, trace}; +use std::cmp::Ordering; +use std::collections::HashMap; +use std::sync::Arc; +use types::{ + readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, + ChainSpec, Hash256, Slot, SlotHeight, +}; + +//TODO: Pruning - Children +//TODO: Handle Syncing + +// NOTE: This uses u32 to represent difference between block heights. Thus this is only +// applicable for block height differences in the range of a u32. +// This can potentially be parallelized in some parts. + +/// Compute the base-2 logarithm of an integer, floored (rounded down) +#[inline] +fn log2_int(x: u64) -> u32 { + if x == 0 { + return 0; + } + 63 - x.leading_zeros() +} + +fn power_of_2_below(x: u64) -> u64 { + 2u64.pow(log2_int(x)) +} + +/// Stores the necessary data structures to run the optimised lmd ghost algorithm. +pub struct OptimizedLMDGhost { + /// A cache of known ancestors at given heights for a specific block. + //TODO: Consider FnvHashMap + cache: HashMap, Hash256>, + /// Log lookup table for blocks to their ancestors. + //TODO: Verify we only want/need a size 16 log lookup + ancestors: Vec>, + /// Stores the children for any given parent. + children: HashMap>, + /// The latest attestation targets as a map of validator index to block hash. + //TODO: Could this be a fixed size vec + latest_attestation_targets: HashMap, + /// Block storage access. + block_store: Arc>, + /// State storage access. + state_store: Arc>, + max_known_height: SlotHeight, +} + +impl OptimizedLMDGhost +where + T: ClientDB + Sized, +{ + pub fn new( + block_store: Arc>, + state_store: Arc>, + ) -> Self { + OptimizedLMDGhost { + cache: HashMap::new(), + ancestors: vec![HashMap::new(); 16], + latest_attestation_targets: HashMap::new(), + children: HashMap::new(), + max_known_height: SlotHeight::new(0), + block_store, + state_store, + } + } + + /// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to + /// weighted votes. + pub fn get_latest_votes( + &self, + state_root: &Hash256, + block_slot: Slot, + spec: &ChainSpec, + ) -> Result, ForkChoiceError> { + // get latest votes + // Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) // + // FORK_CHOICE_BALANCE_INCREMENT + // build a hashmap of block_hash to weighted votes + let mut latest_votes: HashMap = HashMap::new(); + // gets the current weighted votes + let current_state = self + .state_store + .get_deserialized(&state_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; + + let active_validator_indices = get_active_validator_indices( + ¤t_state.validator_registry[..], + block_slot.epoch(spec.slots_per_epoch), + ); + + for index in active_validator_indices { + let balance = std::cmp::min( + current_state.validator_balances[index], + spec.max_deposit_amount, + ) / spec.fork_choice_balance_increment; + if balance > 0 { + if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) { + *latest_votes.entry(*target).or_insert_with(|| 0) += balance; + } + } + } + trace!("Latest votes: {:?}", latest_votes); + Ok(latest_votes) + } + + /// Gets the ancestor at a given height `at_height` of a block specified by `block_hash`. + fn get_ancestor( + &mut self, + block_hash: Hash256, + target_height: SlotHeight, + spec: &ChainSpec, + ) -> Option { + // return None if we can't get the block from the db. + let block_height = { + let block_slot = self + .block_store + .get_deserialized(&block_hash) + .ok()? + .expect("Should have returned already if None") + .slot; + + block_slot.height(spec.genesis_slot) + }; + + // verify we haven't exceeded the block height + if target_height >= block_height { + if target_height > block_height { + return None; + } else { + return Some(block_hash); + } + } + // check if the result is stored in our cache + let cache_key = CacheKey::new(&block_hash, target_height.as_u64()); + if let Some(ancestor) = self.cache.get(&cache_key) { + return Some(*ancestor); + } + + // not in the cache recursively search for ancestors using a log-lookup + if let Some(ancestor) = { + let ancestor_lookup = self.ancestors + [log2_int((block_height - target_height - 1u64).as_u64()) as usize] + .get(&block_hash) + //TODO: Panic if we can't lookup and fork choice fails + .expect("All blocks should be added to the ancestor log lookup table"); + self.get_ancestor(*ancestor_lookup, target_height, &spec) + } { + // add the result to the cache + self.cache.insert(cache_key, ancestor); + return Some(ancestor); + } + + None + } + + // looks for an obvious block winner given the latest votes for a specific height + fn get_clear_winner( + &mut self, + latest_votes: &HashMap, + block_height: SlotHeight, + spec: &ChainSpec, + ) -> Option { + // map of vote counts for every hash at this height + let mut current_votes: HashMap = HashMap::new(); + let mut total_vote_count = 0; + + trace!("Clear winner at block height: {}", block_height); + // loop through the latest votes and count all votes + // these have already been weighted by balance + for (hash, votes) in latest_votes.iter() { + if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) { + let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0); + current_votes.insert(ancestor, current_vote_value + *votes); + total_vote_count += votes; + } + } + // Check if there is a clear block winner at this height. If so return it. + for (hash, votes) in current_votes.iter() { + if *votes > total_vote_count / 2 { + // we have a clear winner, return it + return Some(*hash); + } + } + // didn't find a clear winner + None + } + + // Finds the best child (one with highest votes) + fn choose_best_child(&self, votes: &HashMap) -> Option { + if votes.is_empty() { + return None; + } + + // Iterate through hashmap to get child with maximum votes + let best_child = votes.iter().max_by(|(child1, v1), (child2, v2)| { + let mut result = v1.cmp(v2); + // If votes are equal, choose smaller hash to break ties deterministically + if result == Ordering::Equal { + // Reverse so that max_by chooses smaller hash + result = child1.cmp(child2).reverse(); + } + result + }); + + Some(*best_child.unwrap().0) + } +} + +impl ForkChoice for OptimizedLMDGhost { + fn add_block( + &mut self, + block: &BeaconBlock, + block_hash: &Hash256, + spec: &ChainSpec, + ) -> Result<(), ForkChoiceError> { + // get the height of the parent + let parent_height = self + .block_store + .get_deserialized(&block.parent_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? + .slot() + .height(spec.genesis_slot); + + let parent_hash = &block.parent_root; + + // add the new block to the children of parent + (*self + .children + .entry(block.parent_root) + .or_insert_with(|| vec![])) + .push(block_hash.clone()); + + // build the ancestor data structure + for index in 0..16 { + if parent_height % (1 << index) == 0 { + self.ancestors[index].insert(*block_hash, *parent_hash); + } else { + // TODO: This is unsafe. Will panic if parent_hash doesn't exist. Using it for debugging + let parent_ancestor = self.ancestors[index][parent_hash]; + self.ancestors[index].insert(*block_hash, parent_ancestor); + } + } + // update the max height + self.max_known_height = std::cmp::max(self.max_known_height, parent_height + 1); + Ok(()) + } + + fn add_attestation( + &mut self, + validator_index: u64, + target_block_root: &Hash256, + spec: &ChainSpec, + ) -> Result<(), ForkChoiceError> { + // simply add the attestation to the latest_attestation_target if the block_height is + // larger + trace!( + "Adding attestation of validator: {:?} for block: {}", + validator_index, + target_block_root + ); + let attestation_target = self + .latest_attestation_targets + .entry(validator_index) + .or_insert_with(|| *target_block_root); + // if we already have a value + if attestation_target != target_block_root { + trace!("Old attestation found: {:?}", attestation_target); + // get the height of the target block + let block_height = self + .block_store + .get_deserialized(&target_block_root)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? + .slot() + .height(spec.genesis_slot); + + // get the height of the past target block + let past_block_height = self + .block_store + .get_deserialized(&attestation_target)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? + .slot() + .height(spec.genesis_slot); + // update the attestation only if the new target is higher + if past_block_height < block_height { + trace!("Updating old attestation"); + *attestation_target = *target_block_root; + } + } + Ok(()) + } + + /// Perform lmd_ghost on the current chain to find the head. + fn find_head( + &mut self, + justified_block_start: &Hash256, + spec: &ChainSpec, + ) -> Result { + debug!( + "Starting optimised fork choice at block: {}", + justified_block_start + ); + let block = self + .block_store + .get_deserialized(&justified_block_start)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; + + let block_slot = block.slot(); + let state_root = block.state_root(); + let mut block_height = block_slot.height(spec.genesis_slot); + + let mut current_head = *justified_block_start; + + let mut latest_votes = self.get_latest_votes(&state_root, block_slot, spec)?; + + // remove any votes that don't relate to our current head. + latest_votes + .retain(|hash, _| self.get_ancestor(*hash, block_height, spec) == Some(current_head)); + + // begin searching for the head + loop { + debug!( + "Iteration for block: {} with vote length: {}", + current_head, + latest_votes.len() + ); + // if there are no children, we are done, return the current_head + let children = match self.children.get(¤t_head) { + Some(children) => children.clone(), + None => { + debug!("Head found: {}", current_head); + return Ok(current_head); + } + }; + + // logarithmic lookup blocks to see if there are obvious winners, if so, + // progress to the next iteration. + let mut step = + power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u64()) / 2; + while step > 0 { + trace!("Current Step: {}", step); + if let Some(clear_winner) = self.get_clear_winner( + &latest_votes, + block_height - (block_height % step) + step, + spec, + ) { + current_head = clear_winner; + break; + } + step /= 2; + } + if step > 0 { + trace!("Found clear winner: {}", current_head); + } + // if our skip lookup failed and we only have one child, progress to that child + else if children.len() == 1 { + current_head = children[0]; + trace!( + "Lookup failed, only one child, proceeding to child: {}", + current_head + ); + } + // we need to find the best child path to progress down. + else { + trace!("Searching for best child"); + let mut child_votes = HashMap::new(); + for (voted_hash, vote) in latest_votes.iter() { + // if the latest votes correspond to a child + if let Some(child) = self.get_ancestor(*voted_hash, block_height + 1, spec) { + // add up the votes for each child + *child_votes.entry(child).or_insert_with(|| 0) += vote; + } + } + // check if we have votes of children, if not select the smallest hash child + if child_votes.is_empty() { + current_head = *children + .iter() + .min_by(|child1, child2| child1.cmp(child2)) + .expect("Must be children here"); + trace!( + "Children have no votes - smallest hash chosen: {}", + current_head + ); + } else { + // given the votes on the children, find the best child + current_head = self + .choose_best_child(&child_votes) + .ok_or(ForkChoiceError::CannotFindBestChild)?; + trace!("Best child found: {}", current_head); + } + } + + // didn't find head yet, proceed to next iteration + // update block height + block_height = self + .block_store + .get_deserialized(¤t_head)? + .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? + .slot() + .height(spec.genesis_slot); + // prune the latest votes for votes that are not part of current chosen chain + // more specifically, only keep votes that have head as an ancestor + for hash in latest_votes.keys() { + trace!( + "Ancestor for vote: {} at height: {} is: {:?}", + hash, + block_height, + self.get_ancestor(*hash, block_height, spec) + ); + } + latest_votes.retain(|hash, _| { + self.get_ancestor(*hash, block_height, spec) == Some(current_head) + }); + } + } +} + +/// Type for storing blocks in a memory cache. Key is comprised of block-hash plus the height. +#[derive(PartialEq, Eq, Hash)] +pub struct CacheKey { + block_hash: Hash256, + block_height: T, +} + +impl CacheKey { + pub fn new(block_hash: &Hash256, block_height: T) -> Self { + CacheKey { + block_hash: *block_hash, + block_height, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + pub fn test_power_of_2_below() { + assert_eq!(power_of_2_below(4), 4); + assert_eq!(power_of_2_below(5), 4); + assert_eq!(power_of_2_below(7), 4); + assert_eq!(power_of_2_below(24), 16); + assert_eq!(power_of_2_below(32), 32); + assert_eq!(power_of_2_below(33), 32); + assert_eq!(power_of_2_below(63), 32); + } + + #[test] + pub fn test_power_of_2_below_large() { + let pow: u64 = 1 << 24; + for x in (pow - 20)..(pow + 20) { + assert!(power_of_2_below(x) <= x, "{}", x); + } + } +} diff --git a/eth2/fork_choice/src/protolambda_lmd_ghost.rs b/eth2/fork_choice/src/protolambda_lmd_ghost.rs deleted file mode 100644 index 8b1378917..000000000 --- a/eth2/fork_choice/src/protolambda_lmd_ghost.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index ab4cd2ada..abf13f21b 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -210,6 +210,7 @@ impl ForkChoice for SlowLMDGhost { trace!("Children found: {:?}", children); let mut head_vote_count = 0; + head_hash = children[0]; for child_hash in children { let vote_count = self.get_vote_count(&latest_votes, &child_hash)?; trace!("Vote count for child: {} is: {}", child_hash, vote_count); @@ -218,6 +219,12 @@ impl ForkChoice for SlowLMDGhost { head_hash = *child_hash; head_vote_count = vote_count; } + // resolve ties - choose smaller hash + else if vote_count == head_vote_count { + if *child_hash < head_hash { + head_hash = *child_hash; + } + } } } Ok(head_hash) diff --git a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml index 3233137ab..61b0b05c4 100644 --- a/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml +++ b/eth2/fork_choice/tests/bitwise_lmd_ghost_test_vectors.yaml @@ -63,3 +63,82 @@ test_cases: - b7: 2 heads: - id: 'b4' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + - id: 'b3' + parent: 'b0' + - id: 'b4' + parent: 'b1' + - id: 'b5' + parent: 'b1' + - id: 'b6' + parent: 'b2' + - id: 'b7' + parent: 'b2' + - id: 'b8' + parent: 'b3' + - id: 'b9' + parent: 'b3' + weights: + - b1: 2 + - b2: 1 + - b3: 1 + - b4: 7 + - b5: 5 + - b6: 2 + - b7: 4 + - b8: 4 + - b9: 2 + heads: + - id: 'b4' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + - id: 'b3' + parent: 'b0' + - id: 'b4' + parent: 'b1' + - id: 'b5' + parent: 'b1' + - id: 'b6' + parent: 'b2' + - id: 'b7' + parent: 'b2' + - id: 'b8' + parent: 'b3' + - id: 'b9' + parent: 'b3' + weights: + - b1: 1 + - b2: 1 + - b3: 1 + - b4: 7 + - b5: 5 + - b6: 2 + - b7: 4 + - b8: 4 + - b9: 2 + heads: + - id: 'b7' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + weights: + - b1: 0 + - b2: 0 + heads: + - id: 'b1' + diff --git a/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml b/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml index 4676d8201..e7847de11 100644 --- a/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml +++ b/eth2/fork_choice/tests/lmd_ghost_test_vectors.yaml @@ -35,3 +35,31 @@ test_cases: - b3: 3 heads: - id: 'b1' +# equal weights children. Should choose lower hash b2 +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + - id: 'b3' + parent: 'b0' + weights: + - b1: 5 + - b2: 6 + - b3: 6 + heads: + - id: 'b2' +- blocks: + - id: 'b0' + parent: 'b0' + - id: 'b1' + parent: 'b0' + - id: 'b2' + parent: 'b0' + weights: + - b1: 0 + - b2: 0 + heads: + - id: 'b1' diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 7228bca10..cd5ff360f 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -3,7 +3,7 @@ extern crate beacon_chain; extern crate bls; extern crate db; -//extern crate env_logger; // for debugging +// extern crate env_logger; // for debugging extern crate fork_choice; extern crate hex; extern crate log; @@ -15,18 +15,32 @@ pub use beacon_chain::BeaconChain; use bls::Signature; use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::MemoryDB; -//use env_logger::{Builder, Env}; -use fork_choice::{BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, SlowLMDGhost}; +// use env_logger::{Builder, Env}; +use fork_choice::{ + BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost, +}; use ssz::ssz_encode; use std::collections::HashMap; use std::sync::Arc; use std::{fs::File, io::prelude::*, path::PathBuf}; use types::test_utils::TestingBeaconStateBuilder; -use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; +use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Keypair, Slot}; use yaml_rust::yaml; // Note: We Assume the block Id's are hex-encoded. +#[test] +fn test_optimized_lmd_ghost() { + // set up logging + // Builder::from_env(Env::default().default_filter_or("trace")).init(); + + test_yaml_vectors( + ForkChoiceAlgorithm::OptimizedLMDGhost, + "tests/lmd_ghost_test_vectors.yaml", + 100, + ); +} + #[test] fn test_bitwise_lmd_ghost() { // set up logging @@ -204,7 +218,7 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec { // initialise a single validator and state. All blocks will reference this state root. fn setup_inital_state( fork_choice_algo: &ForkChoiceAlgorithm, - no_validators: usize, + num_validators: usize, ) -> (Box, Arc>, Hash256) { let db = Arc::new(MemoryDB::open()); let block_store = Arc::new(BeaconBlockStore::new(db.clone())); @@ -212,6 +226,10 @@ fn setup_inital_state( // the fork choice instantiation let fork_choice: Box = match fork_choice_algo { + ForkChoiceAlgorithm::OptimizedLMDGhost => Box::new(OptimizedLMDGhost::new( + block_store.clone(), + state_store.clone(), + )), ForkChoiceAlgorithm::BitwiseLMDGhost => Box::new(BitwiseLMDGhost::new( block_store.clone(), state_store.clone(), @@ -225,7 +243,7 @@ fn setup_inital_state( let spec = ChainSpec::foundation(); let state_builder = - TestingBeaconStateBuilder::from_deterministic_keypairs(no_validators, &spec); + TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec); let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index e4981b200..49b4f4371 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -4,14 +4,13 @@ use ssz::TreeHash; use state_processing::{ per_epoch_processing, per_epoch_processing::{ - calculate_active_validator_indices, calculate_attester_sets, clean_attestations, - process_crosslinks, process_eth1_data, process_justification, - process_rewards_and_penalities, process_validator_registry, update_active_tree_index_roots, - update_latest_slashed_balances, + clean_attestations, initialize_validator_statuses, process_crosslinks, process_eth1_data, + process_justification, process_rewards_and_penalities, process_validator_registry, + update_active_tree_index_roots, update_latest_slashed_balances, }, }; use types::test_utils::TestingBeaconStateBuilder; -use types::{validator_registry::get_active_validator_indices, *}; +use types::*; pub const BENCHING_SAMPLE_SIZE: usize = 10; pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; @@ -73,64 +72,6 @@ pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: u /// /// `desc` will be added to the title of each bench. fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_active_validator_indices", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - calculate_active_validator_indices(&mut state, &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_current_total_balance", move |b| { - b.iter_batched( - || state_clone.clone(), - |state| { - state.get_total_balance(&active_validator_indices[..], &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_previous_total_balance", move |b| { - b.iter_batched( - || state_clone.clone(), - |state| { - state.get_total_balance( - &get_active_validator_indices( - &state.validator_registry, - state.previous_epoch(&spec_clone), - )[..], - &spec_clone, - ); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - let state_clone = state.clone(); let spec_clone = spec.clone(); c.bench( @@ -152,11 +93,11 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let spec_clone = spec.clone(); c.bench( &format!("{}/epoch_processing", desc), - Benchmark::new("calculate_attester_sets", move |b| { + Benchmark::new("initialize_validator_statuses", move |b| { b.iter_batched( || state_clone.clone(), |mut state| { - calculate_attester_sets(&mut state, &spec_clone).unwrap(); + initialize_validator_statuses(&mut state, &spec_clone).unwrap(); state }, criterion::BatchSize::SmallInput, @@ -167,28 +108,14 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let state_clone = state.clone(); let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let current_total_balance = state.get_total_balance(&active_validator_indices[..], &spec); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); + let attesters = initialize_validator_statuses(&state, &spec).unwrap(); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("process_justification", move |b| { b.iter_batched( || state_clone.clone(), |mut state| { - process_justification( - &mut state, - current_total_balance, - previous_total_balance, - attesters.previous_epoch_boundary.balance, - attesters.current_epoch_boundary.balance, - &spec_clone, - ); + process_justification(&mut state, &attesters.total_balances, &spec_clone); state }, criterion::BatchSize::SmallInput, @@ -213,25 +140,17 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp let mut state_clone = state.clone(); let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); + let attesters = initialize_validator_statuses(&state, &spec).unwrap(); let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("process_rewards_and_penalties", move |b| { b.iter_batched( - || state_clone.clone(), - |mut state| { + || (state_clone.clone(), attesters.clone()), + |(mut state, mut attesters)| { process_rewards_and_penalities( &mut state, - &active_validator_indices, - &attesters, - previous_total_balance, + &mut attesters, &winning_root_for_shards, &spec_clone, ) @@ -261,32 +180,8 @@ fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSp .sample_size(BENCHING_SAMPLE_SIZE), ); - let mut state_clone = state.clone(); + let state_clone = state.clone(); let spec_clone = spec.clone(); - let previous_epoch = state.previous_epoch(&spec); - let attesters = calculate_attester_sets(&state, &spec).unwrap(); - let active_validator_indices = calculate_active_validator_indices(&state, &spec); - let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - &spec, - ); - assert_eq!( - state_clone.finalized_epoch, state_clone.validator_registry_update_epoch, - "The last registry update should be at the last finalized epoch." - ); - process_justification( - &mut state_clone, - current_total_balance, - previous_total_balance, - attesters.previous_epoch_boundary.balance, - attesters.current_epoch_boundary.balance, - spec, - ); - assert!( - state_clone.finalized_epoch > state_clone.validator_registry_update_epoch, - "The state should have been finalized." - ); c.bench( &format!("{}/epoch_processing", desc), Benchmark::new("process_validator_registry", move |b| { diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index ad8c4f714..c619e1ef7 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -18,8 +18,8 @@ pub fn state_processing(c: &mut Criterion) { Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init(); } - bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); + bench_block_processing::bench_block_processing_n_validators(c, VALIDATOR_COUNT); } criterion_group!(benches, state_processing); diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 4abbe012c..8c4b8e88b 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,19 +1,16 @@ -use attester_sets::AttesterSets; use errors::EpochProcessingError as Error; -use fnv::FnvHashMap; -use fnv::FnvHashSet; use integer_sqrt::IntegerSquareRoot; use rayon::prelude::*; use ssz::TreeHash; use std::collections::HashMap; -use std::iter::FromIterator; use types::{validator_registry::get_active_validator_indices, *}; +use validator_statuses::{TotalBalances, ValidatorStatuses}; use winning_root::{winning_root, WinningRoot}; -pub mod attester_sets; pub mod errors; pub mod inclusion_distance; pub mod tests; +pub mod validator_statuses; pub mod winning_root; /// Maps a shard to a winning root. @@ -28,47 +25,22 @@ pub type WinningRootHashSet = HashMap; /// /// Spec v0.4.0 pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - let previous_epoch = state.previous_epoch(spec); - // Ensure all of the caches are built. state.build_epoch_cache(RelativeEpoch::Previous, spec)?; state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Next, spec)?; - let attesters = calculate_attester_sets(&state, spec)?; - - let active_validator_indices = calculate_active_validator_indices(&state, spec); - - let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec); - - let previous_total_balance = state.get_total_balance( - &get_active_validator_indices(&state.validator_registry, previous_epoch)[..], - spec, - ); + let mut statuses = initialize_validator_statuses(&state, spec)?; process_eth1_data(state, spec); - process_justification( - state, - current_total_balance, - previous_total_balance, - attesters.previous_epoch_boundary.balance, - attesters.current_epoch_boundary.balance, - spec, - ); + process_justification(state, &statuses.total_balances, spec); // Crosslinks let winning_root_for_shards = process_crosslinks(state, spec)?; // Rewards and Penalities - process_rewards_and_penalities( - state, - &active_validator_indices, - &attesters, - previous_total_balance, - &winning_root_for_shards, - spec, - )?; + process_rewards_and_penalities(state, &mut statuses, &winning_root_for_shards, spec)?; // Ejections state.process_ejections(spec); @@ -105,11 +77,15 @@ pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) /// - etc. /// /// Spec v0.4.0 -pub fn calculate_attester_sets( +pub fn initialize_validator_statuses( state: &BeaconState, spec: &ChainSpec, -) -> Result { - AttesterSets::new(&state, spec) +) -> Result { + let mut statuses = ValidatorStatuses::new(state, spec); + + statuses.process_attestations(&state, &state.latest_attestations, spec)?; + + Ok(statuses) } /// Spec v0.4.0 @@ -137,10 +113,7 @@ pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { /// Spec v0.4.0 pub fn process_justification( state: &mut BeaconState, - current_total_balance: u64, - previous_total_balance: u64, - previous_epoch_boundary_attesting_balance: u64, - current_epoch_boundary_attesting_balance: u64, + total_balances: &TotalBalances, spec: &ChainSpec, ) { let previous_epoch = state.previous_epoch(spec); @@ -153,7 +126,8 @@ pub fn process_justification( // // - Set the 2nd bit of the bitfield. // - Set the previous epoch to be justified. - if (3 * previous_epoch_boundary_attesting_balance) >= (2 * previous_total_balance) { + if (3 * total_balances.previous_epoch_boundary_attesters) >= (2 * total_balances.previous_epoch) + { state.justification_bitfield |= 2; new_justified_epoch = previous_epoch; } @@ -161,7 +135,7 @@ pub fn process_justification( // // - Set the 1st bit of the bitfield. // - Set the current epoch to be justified. - if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { + if (3 * total_balances.current_epoch_boundary_attesters) >= (2 * total_balances.current_epoch) { state.justification_bitfield |= 1; new_justified_epoch = current_epoch; } @@ -283,126 +257,79 @@ pub fn process_crosslinks( /// Spec v0.4.0 pub fn process_rewards_and_penalities( state: &mut BeaconState, - active_validator_indices: &[usize], - attesters: &AttesterSets, - previous_total_balance: u64, + statuses: &mut ValidatorStatuses, winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { let next_epoch = state.next_epoch(spec); - let active_validator_indices: FnvHashSet = - FnvHashSet::from_iter(active_validator_indices.iter().cloned()); + statuses.process_winning_roots(state, winning_root_for_shards, spec)?; - let previous_epoch_attestations: Vec<&PendingAttestation> = state - .latest_attestations - .par_iter() - .filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec)) - .collect(); + let total_balances = &statuses.total_balances; - let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; + let base_reward_quotient = + total_balances.previous_epoch.integer_sqrt() / spec.base_reward_quotient; + // Guard against a divide-by-zero during the validator balance update. if base_reward_quotient == 0 { return Err(Error::BaseRewardQuotientIsZero); } - if previous_total_balance == 0 { + // Guard against a divide-by-zero during the validator balance update. + if total_balances.previous_epoch == 0 { return Err(Error::PreviousTotalBalanceIsZero); } - - // Map is ValidatorIndex -> ProposerIndex - let mut inclusion_slots: FnvHashMap = FnvHashMap::default(); - for a in &previous_epoch_attestations { - let participants = - state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; - let inclusion_distance = (a.inclusion_slot - a.data.slot).as_u64(); - for participant in participants { - if let Some((existing_distance, _)) = inclusion_slots.get(&participant) { - if *existing_distance <= inclusion_distance { - continue; - } - } - let proposer_index = state - .get_beacon_proposer_index(a.data.slot, spec) - .map_err(|_| Error::UnableToDetermineProducer)?; - inclusion_slots.insert( - participant, - (Slot::from(inclusion_distance), proposer_index), - ); - } + // Guard against an out-of-bounds during the validator balance update. + if statuses.statuses.len() != state.validator_balances.len() { + return Err(Error::ValidatorStatusesInconsistent); } // Justification and finalization let epochs_since_finality = next_epoch - state.finalized_epoch; - if epochs_since_finality <= 4 { - state.validator_balances = state - .validator_balances - .par_iter() - .enumerate() - .map(|(index, &balance)| { - let mut balance = balance; - let base_reward = state.base_reward(index, base_reward_quotient, spec); + state.validator_balances = state + .validator_balances + .par_iter() + .enumerate() + .map(|(index, &balance)| { + let mut balance = balance; + let status = &statuses.statuses[index]; + let base_reward = state.base_reward(index, base_reward_quotient, spec); + if epochs_since_finality <= 4 { // Expected FFG source - if attesters.previous_epoch.indices.contains(&index) { + if status.is_previous_epoch_attester { safe_add_assign!( balance, - base_reward * attesters.previous_epoch.balance / previous_total_balance + base_reward * total_balances.previous_epoch_attesters + / total_balances.previous_epoch ); - } else if active_validator_indices.contains(&index) { + } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); } // Expected FFG target - if attesters.previous_epoch_boundary.indices.contains(&index) { + if status.is_previous_epoch_boundary_attester { safe_add_assign!( balance, - base_reward * attesters.previous_epoch_boundary.balance - / previous_total_balance + base_reward * total_balances.previous_epoch_boundary_attesters + / total_balances.previous_epoch ); - } else if active_validator_indices.contains(&index) { + } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); } // Expected beacon chain head - if attesters.previous_epoch_head.indices.contains(&index) { + if status.is_previous_epoch_head_attester { safe_add_assign!( balance, - base_reward * attesters.previous_epoch_head.balance - / previous_total_balance + base_reward * total_balances.previous_epoch_head_attesters + / total_balances.previous_epoch ); - } else if active_validator_indices.contains(&index) { + } else if status.is_active_in_previous_epoch { safe_sub_assign!(balance, base_reward); }; - - if attesters.previous_epoch.indices.contains(&index) { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - - let (inclusion_distance, _) = inclusion_slots - .get(&index) - .expect("Inconsistent inclusion_slots."); - - if *inclusion_distance > 0 { - safe_add_assign!( - balance, - base_reward * spec.min_attestation_inclusion_delay - / inclusion_distance.as_u64() - ) - } - } - - balance - }) - .collect(); - } else { - state.validator_balances = state - .validator_balances - .par_iter() - .enumerate() - .map(|(index, &balance)| { - let mut balance = balance; - + } else { let inactivity_penalty = state.inactivity_penalty( index, epochs_since_finality, @@ -410,14 +337,14 @@ pub fn process_rewards_and_penalities( spec, ); - if active_validator_indices.contains(&index) { - if !attesters.previous_epoch.indices.contains(&index) { + if status.is_active_in_previous_epoch { + if !status.is_previous_epoch_attester { safe_sub_assign!(balance, inactivity_penalty); } - if !attesters.previous_epoch_boundary.indices.contains(&index) { + if !status.is_previous_epoch_boundary_attester { safe_sub_assign!(balance, inactivity_penalty); } - if !attesters.previous_epoch_head.indices.contains(&index) { + if !status.is_previous_epoch_head_attester { safe_sub_assign!(balance, inactivity_penalty); } @@ -426,91 +353,45 @@ pub fn process_rewards_and_penalities( safe_sub_assign!(balance, 2 * inactivity_penalty + base_reward); } } + } - if attesters.previous_epoch.indices.contains(&index) { - let base_reward = state.base_reward(index, base_reward_quotient, spec); + // Crosslinks - let (inclusion_distance, _) = inclusion_slots - .get(&index) - .expect("Inconsistent inclusion_slots."); + if let Some(ref info) = status.winning_root_info { + safe_add_assign!( + balance, + base_reward * info.total_attesting_balance / info.total_committee_balance + ); + } else { + safe_sub_assign!(balance, base_reward); + } - if *inclusion_distance > 0 { - safe_add_assign!( - balance, - base_reward * spec.min_attestation_inclusion_delay - / inclusion_distance.as_u64() - ) - } - } - - balance - }) - .collect(); - } + balance + }) + .collect(); // Attestation inclusion - // - for &index in &attesters.previous_epoch.indices { - let (_, proposer_index) = inclusion_slots - .get(&index) - .ok_or_else(|| Error::InclusionSlotsInconsistent(index))?; - - let base_reward = state.base_reward(*proposer_index, base_reward_quotient, spec); - - safe_add_assign!( - state.validator_balances[*proposer_index], - base_reward / spec.attestation_inclusion_reward_quotient - ); + // Guard against an out-of-bounds during the attester inclusion balance update. + if statuses.statuses.len() != state.validator_registry.len() { + return Err(Error::ValidatorStatusesInconsistent); } - //Crosslinks + for (index, _validator) in state.validator_registry.iter().enumerate() { + let status = &statuses.statuses[index]; - for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { - // Clone removes the borrow which becomes an issue when mutating `state.balances`. - let crosslink_committees_at_slot = - state.get_crosslink_committees_at_slot(slot, spec)?.clone(); + if status.is_previous_epoch_attester { + let proposer_index = status.inclusion_info.proposer_index; + let inclusion_distance = status.inclusion_info.distance; - for (crosslink_committee, shard) in crosslink_committees_at_slot { - let shard = shard as u64; + let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec); - // Note: I'm a little uncertain of the logic here -- I am waiting for spec v0.5.0 to - // clear it up. - // - // What happens here is: - // - // - If there was some crosslink root elected by the super-majority of this committee, - // then we reward all who voted for that root and penalize all that did not. - // - However, if there _was not_ some super-majority-voted crosslink root, then penalize - // all the validators. - // - // I'm not quite sure that the second case (no super-majority crosslink) is correct. - if let Some(winning_root) = winning_root_for_shards.get(&shard) { - // Hash set de-dedups and (hopefully) offers a speed improvement from faster - // lookups. - let attesting_validator_indices: FnvHashSet = - FnvHashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned()); - - for &index in &crosslink_committee { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - - let total_balance = state.get_total_balance(&crosslink_committee, spec); - - if attesting_validator_indices.contains(&index) { - safe_add_assign!( - state.validator_balances[index], - base_reward * winning_root.total_attesting_balance / total_balance - ); - } else { - safe_sub_assign!(state.validator_balances[index], base_reward); - } - } - } else { - for &index in &crosslink_committee { - let base_reward = state.base_reward(index, base_reward_quotient, spec); - - safe_sub_assign!(state.validator_balances[index], base_reward); - } + if inclusion_distance > 0 && inclusion_distance < Slot::max_value() { + safe_add_assign!( + state.validator_balances[proposer_index], + base_reward * spec.min_attestation_inclusion_delay + / inclusion_distance.as_u64() + ) } } } diff --git a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs b/eth2/state_processing/src/per_epoch_processing/attester_sets.rs deleted file mode 100644 index 03f49c1d3..000000000 --- a/eth2/state_processing/src/per_epoch_processing/attester_sets.rs +++ /dev/null @@ -1,133 +0,0 @@ -use fnv::FnvHashSet; -use types::*; - -/// A set of validator indices, along with the total balance of all those attesters. -#[derive(Default)] -pub struct Attesters { - /// A set of validator indices. - pub indices: FnvHashSet, - /// The total balance of all validators in `self.indices`. - pub balance: u64, -} - -impl Attesters { - /// Add the given indices to the set, incrementing the sets balance by the provided balance. - fn add(&mut self, additional_indices: &[usize], additional_balance: u64) { - self.indices.reserve(additional_indices.len()); - for i in additional_indices { - self.indices.insert(*i); - } - self.balance = self.balance.saturating_add(additional_balance); - } -} - -/// A collection of `Attester` objects, representing set of attesters that are rewarded/penalized -/// during an epoch transition. -pub struct AttesterSets { - /// All validators who attested during the state's current epoch. - pub current_epoch: Attesters, - /// All validators who attested that the beacon block root of the first slot of the state's - /// current epoch is the same as the one stored in this state. - /// - /// In short validators who agreed with the state about the first slot of the current epoch. - pub current_epoch_boundary: Attesters, - /// All validators who attested during the state's previous epoch. - pub previous_epoch: Attesters, - /// All validators who attested that the beacon block root of the first slot of the state's - /// previous epoch is the same as the one stored in this state. - /// - /// In short, validators who agreed with the state about the first slot of the previous epoch. - pub previous_epoch_boundary: Attesters, - /// All validators who attested that the beacon block root at the pending attestation's slot is - /// the same as the one stored in this state. - /// - /// In short, validators who agreed with the state about the current beacon block root when - /// they attested. - pub previous_epoch_head: Attesters, -} - -impl AttesterSets { - /// Loop through all attestations in the state and instantiate a complete `AttesterSets` struct. - /// - /// Spec v0.4.0 - pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { - let mut current_epoch = Attesters::default(); - let mut current_epoch_boundary = Attesters::default(); - let mut previous_epoch = Attesters::default(); - let mut previous_epoch_boundary = Attesters::default(); - let mut previous_epoch_head = Attesters::default(); - - for a in &state.latest_attestations { - let attesting_indices = - state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; - let attesting_balance = state.get_total_balance(&attesting_indices, spec); - - if is_from_epoch(a, state.current_epoch(spec), spec) { - current_epoch.add(&attesting_indices, attesting_balance); - - if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { - current_epoch_boundary.add(&attesting_indices, attesting_balance); - } - } else if is_from_epoch(a, state.previous_epoch(spec), spec) { - previous_epoch.add(&attesting_indices, attesting_balance); - - if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { - previous_epoch_boundary.add(&attesting_indices, attesting_balance); - } - - if has_common_beacon_block_root(a, state, spec)? { - previous_epoch_head.add(&attesting_indices, attesting_balance); - } - } - } - - Ok(Self { - current_epoch, - current_epoch_boundary, - previous_epoch, - previous_epoch_boundary, - previous_epoch_head, - }) - } -} - -/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. -/// -/// Spec v0.4.0 -fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { - a.data.slot.epoch(spec.slots_per_epoch) == epoch -} - -/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for -/// the first slot of the given epoch. -/// -/// Spec v0.4.0 -fn has_common_epoch_boundary_root( - a: &PendingAttestation, - state: &BeaconState, - epoch: Epoch, - spec: &ChainSpec, -) -> Result { - let slot = epoch.start_slot(spec.slots_per_epoch); - let state_boundary_root = *state - .get_block_root(slot, spec) - .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; - - Ok(a.data.epoch_boundary_root == state_boundary_root) -} - -/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for -/// the current slot of the `PendingAttestation`. -/// -/// Spec v0.4.0 -fn has_common_beacon_block_root( - a: &PendingAttestation, - state: &BeaconState, - spec: &ChainSpec, -) -> Result { - let state_block_root = *state - .get_block_root(a.data.slot, spec) - .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; - - Ok(a.data.beacon_block_root == state_block_root) -} diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index c60e00cae..94fc0cca5 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -8,6 +8,7 @@ pub enum EpochProcessingError { NoRandaoSeed, PreviousTotalBalanceIsZero, InclusionDistanceZero, + ValidatorStatusesInconsistent, /// Unable to get the inclusion distance for a validator that should have an inclusion /// distance. This indicates an internal inconsistency. /// diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs new file mode 100644 index 000000000..f76900f3b --- /dev/null +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -0,0 +1,319 @@ +use super::WinningRootHashSet; +use types::*; + +/// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self` +/// as is. +macro_rules! set_self_if_other_is_true { + ($self_: ident, $other: ident, $var: ident) => { + if $other.$var { + $self_.$var = true; + } + }; +} + +/// The information required to reward some validator for their participation in a "winning" +/// crosslink root. +#[derive(Default, Clone)] +pub struct WinningRootInfo { + /// The total balance of the crosslink committee. + pub total_committee_balance: u64, + /// The total balance of the crosslink committee that attested for the "winning" root. + pub total_attesting_balance: u64, +} + +/// The information required to reward a block producer for including an attestation in a block. +#[derive(Clone)] +pub struct InclusionInfo { + /// The earliest slot a validator had an attestation included in the previous epoch. + pub slot: Slot, + /// The distance between the attestation slot and the slot that attestation was included in a + /// block. + pub distance: Slot, + /// The index of the proposer at the slot where the attestation was included. + pub proposer_index: usize, +} + +impl Default for InclusionInfo { + /// Defaults to `slot` and `distance` at their maximum values and `proposer_index` at zero. + fn default() -> Self { + Self { + slot: Slot::max_value(), + distance: Slot::max_value(), + proposer_index: 0, + } + } +} + +impl InclusionInfo { + /// Tests if some `other` `InclusionInfo` has a lower inclusion slot than `self`. If so, + /// replaces `self` with `other`. + pub fn update(&mut self, other: &Self) { + if other.slot < self.slot { + self.slot = other.slot; + self.distance = other.distance; + self.proposer_index = other.proposer_index; + } + } +} + +/// Information required to reward some validator during the current and previous epoch. +#[derive(Default, Clone)] +pub struct AttesterStatus { + /// True if the validator was active in the state's _current_ epoch. + pub is_active_in_current_epoch: bool, + /// True if the validator was active in the state's _previous_ epoch. + pub is_active_in_previous_epoch: bool, + + /// True if the validator had an attestation included in the _current_ epoch. + pub is_current_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _current_ + /// epoch matches the block root known to the state. + pub is_current_epoch_boundary_attester: bool, + /// True if the validator had an attestation included in the _previous_ epoch. + pub is_previous_epoch_attester: bool, + /// True if the validator's beacon block root attestation for the first slot of the _previous_ + /// epoch matches the block root known to the state. + pub is_previous_epoch_boundary_attester: bool, + /// True if the validator's beacon block root attestation in the _previous_ epoch at the + /// attestation's slot (`attestation_data.slot`) matches the block root known to the state. + pub is_previous_epoch_head_attester: bool, + + /// Information used to reward the block producer of this validators earliest-included + /// attestation. + pub inclusion_info: InclusionInfo, + /// Information used to reward/penalize the validator if they voted in the super-majority for + /// some shard block. + pub winning_root_info: Option, +} + +impl AttesterStatus { + /// Accepts some `other` `AttesterStatus` and updates `self` if required. + /// + /// Will never set one of the `bool` fields to `false`, it will only set it to `true` if other + /// contains a `true` field. + /// + /// Note: does not update the winning root info, this is done manually. + pub fn update(&mut self, other: &Self) { + // Update all the bool fields, only updating `self` if `other` is true (never setting + // `self` to false). + set_self_if_other_is_true!(self, other, is_active_in_current_epoch); + set_self_if_other_is_true!(self, other, is_active_in_previous_epoch); + set_self_if_other_is_true!(self, other, is_current_epoch_attester); + set_self_if_other_is_true!(self, other, is_current_epoch_boundary_attester); + set_self_if_other_is_true!(self, other, is_previous_epoch_attester); + set_self_if_other_is_true!(self, other, is_previous_epoch_boundary_attester); + set_self_if_other_is_true!(self, other, is_previous_epoch_head_attester); + + self.inclusion_info.update(&other.inclusion_info); + } +} + +/// The total effective balances for different sets of validators during the previous and current +/// epochs. +#[derive(Default, Clone)] +pub struct TotalBalances { + /// The total effective balance of all active validators during the _current_ epoch. + pub current_epoch: u64, + /// The total effective balance of all active validators during the _previous_ epoch. + pub previous_epoch: u64, + /// The total effective balance of all validators who attested during the _current_ epoch. + pub current_epoch_attesters: u64, + /// The total effective balance of all validators who attested during the _current_ epoch and + /// agreed with the state about the beacon block at the first slot of the _current_ epoch. + pub current_epoch_boundary_attesters: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch. + pub previous_epoch_attesters: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the first slot of the _previous_ epoch. + pub previous_epoch_boundary_attesters: u64, + /// The total effective balance of all validators who attested during the _previous_ epoch and + /// agreed with the state about the beacon block at the time of attestation. + pub previous_epoch_head_attesters: u64, +} + +/// Summarised information about validator participation in the _previous and _current_ epochs of +/// some `BeaconState`. +#[derive(Clone)] +pub struct ValidatorStatuses { + /// Information about each individual validator from the state's validator registy. + pub statuses: Vec, + /// Summed balances for various sets of validators. + pub total_balances: TotalBalances, +} + +impl ValidatorStatuses { + /// Initializes a new instance, determining: + /// + /// - Active validators + /// - Total balances for the current and previous epochs. + /// + /// Spec v0.4.0 + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Self { + let mut statuses = Vec::with_capacity(state.validator_registry.len()); + let mut total_balances = TotalBalances::default(); + + for (i, validator) in state.validator_registry.iter().enumerate() { + let mut status = AttesterStatus::default(); + + if validator.is_active_at(state.current_epoch(spec)) { + status.is_active_in_current_epoch = true; + total_balances.current_epoch += state.get_effective_balance(i, spec); + } + + if validator.is_active_at(state.previous_epoch(spec)) { + status.is_active_in_previous_epoch = true; + total_balances.previous_epoch += state.get_effective_balance(i, spec); + } + + statuses.push(status); + } + + Self { + statuses, + total_balances, + } + } + + /// Process some attestations from the given `state` updating the `statuses` and + /// `total_balances` fields. + /// + /// Spec v0.4.0 + pub fn process_attestations( + &mut self, + state: &BeaconState, + attestations: &[PendingAttestation], + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { + for a in attestations { + let attesting_indices = + state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; + let attesting_balance = state.get_total_balance(&attesting_indices, spec); + + let mut status = AttesterStatus::default(); + + // Profile this attestation, updating the total balances and generating an + // `AttesterStatus` object that applies to all participants in the attestation. + if is_from_epoch(a, state.current_epoch(spec), spec) { + self.total_balances.current_epoch_attesters += attesting_balance; + status.is_current_epoch_attester = true; + + if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? { + self.total_balances.current_epoch_boundary_attesters += attesting_balance; + status.is_current_epoch_boundary_attester = true; + } + } else if is_from_epoch(a, state.previous_epoch(spec), spec) { + self.total_balances.previous_epoch_attesters += attesting_balance; + status.is_previous_epoch_attester = true; + + // The inclusion slot and distance are only required for previous epoch attesters. + status.inclusion_info = InclusionInfo { + slot: a.inclusion_slot, + distance: inclusion_distance(a), + proposer_index: state.get_beacon_proposer_index(a.inclusion_slot, spec)?, + }; + + if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? { + self.total_balances.previous_epoch_boundary_attesters += attesting_balance; + status.is_previous_epoch_boundary_attester = true; + } + + if has_common_beacon_block_root(a, state, spec)? { + self.total_balances.previous_epoch_head_attesters += attesting_balance; + status.is_previous_epoch_head_attester = true; + } + } + + // Loop through the participating validator indices and update the status vec. + for validator_index in attesting_indices { + self.statuses[validator_index].update(&status); + } + } + + Ok(()) + } + + /// Update the `statuses` for each validator based upon whether or not they attested to the + /// "winning" shard block root for the previous epoch. + /// + /// Spec v0.4.0 + pub fn process_winning_roots( + &mut self, + state: &BeaconState, + winning_roots: &WinningRootHashSet, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { + // Loop through each slot in the previous epoch. + for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { + let crosslink_committees_at_slot = + state.get_crosslink_committees_at_slot(slot, spec)?; + + // Loop through each committee in the slot. + for (crosslink_committee, shard) in crosslink_committees_at_slot { + // If there was some winning crosslink root for the committee's shard. + if let Some(winning_root) = winning_roots.get(&shard) { + let total_committee_balance = + state.get_total_balance(&crosslink_committee, spec); + for &validator_index in &winning_root.attesting_validator_indices { + // Take note of the balance information for the winning root, it will be + // used later to calculate rewards for that validator. + self.statuses[validator_index].winning_root_info = Some(WinningRootInfo { + total_committee_balance, + total_attesting_balance: winning_root.total_attesting_balance, + }) + } + } + } + } + + Ok(()) + } +} + +/// Returns the distance between when the attestation was created and when it was included in a +/// block. +/// +/// Spec v0.4.0 +fn inclusion_distance(a: &PendingAttestation) -> Slot { + a.inclusion_slot - a.data.slot +} + +/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. +/// +/// Spec v0.4.0 +fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { + a.data.slot.epoch(spec.slots_per_epoch) == epoch +} + +/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for +/// the first slot of the given epoch. +/// +/// Spec v0.4.0 +fn has_common_epoch_boundary_root( + a: &PendingAttestation, + state: &BeaconState, + epoch: Epoch, + spec: &ChainSpec, +) -> Result { + let slot = epoch.start_slot(spec.slots_per_epoch); + let state_boundary_root = *state + .get_block_root(slot, spec) + .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + + Ok(a.data.epoch_boundary_root == state_boundary_root) +} + +/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for +/// the current slot of the `PendingAttestation`. +/// +/// Spec v0.4.0 +fn has_common_beacon_block_root( + a: &PendingAttestation, + state: &BeaconState, + spec: &ChainSpec, +) -> Result { + let state_block_root = *state + .get_block_root(a.data.slot, spec) + .ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?; + + Ok(a.data.beacon_block_root == state_block_root) +} diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 03ef8ce48..dcc4c1fda 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -20,29 +20,6 @@ pub struct Attestation { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Attestation::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Attestation::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Attestation); } diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 1dfadfb1d..6e3cb3891 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -38,29 +38,6 @@ impl Eq for AttestationData {} #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttestationData::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttestationData::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(AttestationData); } diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 83018c194..020b07d28 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -25,31 +25,6 @@ impl TestRandom for AttestationDataAndCustodyBit { #[cfg(test)] mod test { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - - let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttestationDataAndCustodyBit::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(AttestationDataAndCustodyBit); } diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 7a0752b6a..f437d41f2 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -16,29 +16,6 @@ pub struct AttesterSlashing { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttesterSlashing::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = AttesterSlashing::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(AttesterSlashing); } diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index b67c866a4..615d9f928 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -70,29 +70,6 @@ impl BeaconBlock { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlock::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlock::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(BeaconBlock); } diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index e7dec2e4b..70ce24dbe 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -21,29 +21,6 @@ pub struct BeaconBlockBody { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlockBody::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconBlockBody::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(BeaconBlockBody); } diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index b4faa6a49..878d13b86 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -1195,42 +1195,34 @@ impl Decodable for BeaconState { } impl TreeHash for BeaconState { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { let mut result: Vec = vec![]; - result.append(&mut self.slot.hash_tree_root_internal()); - result.append(&mut self.genesis_time.hash_tree_root_internal()); - result.append(&mut self.fork.hash_tree_root_internal()); - result.append(&mut self.validator_registry.hash_tree_root_internal()); - result.append(&mut self.validator_balances.hash_tree_root_internal()); - result.append( - &mut self - .validator_registry_update_epoch - .hash_tree_root_internal(), - ); - result.append(&mut self.latest_randao_mixes.hash_tree_root_internal()); - result.append( - &mut self - .previous_shuffling_start_shard - .hash_tree_root_internal(), - ); - result.append(&mut self.current_shuffling_start_shard.hash_tree_root_internal()); - result.append(&mut self.previous_shuffling_epoch.hash_tree_root_internal()); - result.append(&mut self.current_shuffling_epoch.hash_tree_root_internal()); - result.append(&mut self.previous_shuffling_seed.hash_tree_root_internal()); - result.append(&mut self.current_shuffling_seed.hash_tree_root_internal()); - result.append(&mut self.previous_justified_epoch.hash_tree_root_internal()); - result.append(&mut self.justified_epoch.hash_tree_root_internal()); - result.append(&mut self.justification_bitfield.hash_tree_root_internal()); - result.append(&mut self.finalized_epoch.hash_tree_root_internal()); - result.append(&mut self.latest_crosslinks.hash_tree_root_internal()); - result.append(&mut self.latest_block_roots.hash_tree_root_internal()); - result.append(&mut self.latest_active_index_roots.hash_tree_root_internal()); - result.append(&mut self.latest_slashed_balances.hash_tree_root_internal()); - result.append(&mut self.latest_attestations.hash_tree_root_internal()); - result.append(&mut self.batched_block_roots.hash_tree_root_internal()); - result.append(&mut self.latest_eth1_data.hash_tree_root_internal()); - result.append(&mut self.eth1_data_votes.hash_tree_root_internal()); - result.append(&mut self.deposit_index.hash_tree_root_internal()); + result.append(&mut self.slot.hash_tree_root()); + result.append(&mut self.genesis_time.hash_tree_root()); + result.append(&mut self.fork.hash_tree_root()); + result.append(&mut self.validator_registry.hash_tree_root()); + result.append(&mut self.validator_balances.hash_tree_root()); + result.append(&mut self.validator_registry_update_epoch.hash_tree_root()); + result.append(&mut self.latest_randao_mixes.hash_tree_root()); + result.append(&mut self.previous_shuffling_start_shard.hash_tree_root()); + result.append(&mut self.current_shuffling_start_shard.hash_tree_root()); + result.append(&mut self.previous_shuffling_epoch.hash_tree_root()); + result.append(&mut self.current_shuffling_epoch.hash_tree_root()); + result.append(&mut self.previous_shuffling_seed.hash_tree_root()); + result.append(&mut self.current_shuffling_seed.hash_tree_root()); + result.append(&mut self.previous_justified_epoch.hash_tree_root()); + result.append(&mut self.justified_epoch.hash_tree_root()); + result.append(&mut self.justification_bitfield.hash_tree_root()); + result.append(&mut self.finalized_epoch.hash_tree_root()); + result.append(&mut self.latest_crosslinks.hash_tree_root()); + result.append(&mut self.latest_block_roots.hash_tree_root()); + result.append(&mut self.latest_active_index_roots.hash_tree_root()); + result.append(&mut self.latest_slashed_balances.hash_tree_root()); + result.append(&mut self.latest_attestations.hash_tree_root()); + result.append(&mut self.batched_block_roots.hash_tree_root()); + result.append(&mut self.latest_eth1_data.hash_tree_root()); + result.append(&mut self.eth1_data_votes.hash_tree_root()); + result.append(&mut self.deposit_index.hash_tree_root()); hash(&result) } } diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 61f3c03b0..1e1a555fd 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -4,7 +4,6 @@ use super::*; use crate::test_utils::TestingBeaconStateBuilder; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::{BeaconState, ChainSpec}; -use ssz::{ssz_encode, Decodable}; /// Tests that `get_attestation_participants` is consistent with the result of /// get_crosslink_committees_at_slot` with a full bitfield. @@ -51,25 +50,4 @@ pub fn get_attestation_participants_consistency() { } } -#[test] -pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconState::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); -} - -#[test] -pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = BeaconState::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 -} +ssz_tests!(BeaconState); diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index f49195a75..5db5e20a6 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -19,29 +19,6 @@ pub struct Crosslink { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Crosslink::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Crosslink::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Crosslink); } diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 91c6ef2ac..14eb19ad6 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -18,29 +18,6 @@ pub struct Deposit { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Deposit::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Deposit::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Deposit); } diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 61b82f4b3..9d6c1bda7 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -18,29 +18,6 @@ pub struct DepositData { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositData::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositData::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(DepositData); } diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 1b506894d..9a9031901 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -66,29 +66,6 @@ impl DepositInput { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositInput::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = DepositInput::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(DepositInput); } diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index 2c817ca38..c4b2b1894 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -17,29 +17,6 @@ pub struct Eth1Data { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1Data::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1Data::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Eth1Data); } diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index 898145575..4788833bd 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -17,29 +17,6 @@ pub struct Eth1DataVote { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1DataVote::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Eth1DataVote::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Eth1DataVote); } diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 0acd6da90..f3b62f5a8 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -29,29 +29,6 @@ impl Fork { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Fork::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Fork::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Fork); } diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 76fcb43ed..e7be732eb 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -1,3 +1,4 @@ +#[macro_use] pub mod test_utils; pub mod attestation; diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 0430d18ba..68dd1c345 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -19,29 +19,6 @@ pub struct PendingAttestation { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = PendingAttestation::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = PendingAttestation::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(PendingAttestation); } diff --git a/eth2/types/src/proposal.rs b/eth2/types/src/proposal.rs index b1fd737a0..59d6370e1 100644 --- a/eth2/types/src/proposal.rs +++ b/eth2/types/src/proposal.rs @@ -23,30 +23,7 @@ pub struct Proposal { mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, SignedRoot, TreeHash}; - - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Proposal::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Proposal::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + use ssz::{SignedRoot, TreeHash}; #[derive(TreeHash)] struct SignedProposal { @@ -75,4 +52,5 @@ mod tests { assert_eq!(original.signed_root(), other.hash_tree_root()); } + ssz_tests!(Proposal); } diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 394c55a01..26c3d67a7 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -18,29 +18,6 @@ pub struct ProposerSlashing { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ProposerSlashing::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ProposerSlashing::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(ProposerSlashing); } diff --git a/eth2/types/src/shard_reassignment_record.rs b/eth2/types/src/shard_reassignment_record.rs index f5dfa8676..9f1705f16 100644 --- a/eth2/types/src/shard_reassignment_record.rs +++ b/eth2/types/src/shard_reassignment_record.rs @@ -14,29 +14,6 @@ pub struct ShardReassignmentRecord { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ShardReassignmentRecord::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = ShardReassignmentRecord::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(ShardReassignmentRecord); } diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index 20ba76cdb..56c9dfc2f 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -46,7 +46,6 @@ mod tests { use crate::chain_spec::ChainSpec; use crate::slot_epoch::{Epoch, Slot}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; #[test] pub fn test_is_double_vote_true() { @@ -120,28 +119,7 @@ mod tests { ); } - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = SlashableAttestation::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = SlashableAttestation::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(SlashableAttestation); fn create_slashable_attestation( slot_factor: u64, diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 2af7f5196..c40b6badf 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -103,8 +103,6 @@ impl<'a> Iterator for SlotIter<'a> { #[cfg(test)] mod slot_tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::ssz_encode; all_tests!(Slot); } @@ -112,8 +110,6 @@ mod slot_tests { #[cfg(test)] mod epoch_tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::ssz_encode; all_tests!(Epoch); diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 2148b6cc2..300ad3f6f 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -207,9 +207,9 @@ macro_rules! impl_ssz { } impl TreeHash for $type { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { let mut result: Vec = vec![]; - result.append(&mut self.0.hash_tree_root_internal()); + result.append(&mut self.0.hash_tree_root()); hash(&result) } } @@ -248,7 +248,7 @@ macro_rules! impl_common { } // test macros -#[allow(unused_macros)] +#[cfg(test)] macro_rules! new_tests { ($type: ident) => { #[test] @@ -260,7 +260,7 @@ macro_rules! new_tests { }; } -#[allow(unused_macros)] +#[cfg(test)] macro_rules! from_into_tests { ($type: ident, $other: ident) => { #[test] @@ -286,7 +286,7 @@ macro_rules! from_into_tests { }; } -#[allow(unused_macros)] +#[cfg(test)] macro_rules! math_between_tests { ($type: ident, $other: ident) => { #[test] @@ -434,7 +434,7 @@ macro_rules! math_between_tests { }; } -#[allow(unused_macros)] +#[cfg(test)] macro_rules! math_tests { ($type: ident) => { #[test] @@ -528,35 +528,7 @@ macro_rules! math_tests { }; } -#[allow(unused_macros)] -macro_rules! ssz_tests { - ($type: ident) => { - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = $type::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = $type::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } - }; -} - -#[allow(unused_macros)] +#[cfg(test)] macro_rules! all_tests { ($type: ident) => { new_tests!($type); diff --git a/eth2/types/src/slot_height.rs b/eth2/types/src/slot_height.rs index 1739227a4..4a783d4a0 100644 --- a/eth2/types/src/slot_height.rs +++ b/eth2/types/src/slot_height.rs @@ -33,11 +33,8 @@ impl SlotHeight { } #[cfg(test)] - mod slot_height_tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::ssz_encode; all_tests!(SlotHeight); } diff --git a/eth2/types/src/test_utils/macros.rs b/eth2/types/src/test_utils/macros.rs index b7c0a6522..f5b2fd87c 100644 --- a/eth2/types/src/test_utils/macros.rs +++ b/eth2/types/src/test_utils/macros.rs @@ -17,14 +17,14 @@ macro_rules! ssz_tests { } #[test] - pub fn test_hash_tree_root_internal() { + pub fn test_hash_tree_root() { use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use ssz::TreeHash; let mut rng = XorShiftRng::from_seed([42; 16]); let original = $type::random_for_test(&mut rng); - let result = original.hash_tree_root_internal(); + let result = original.hash_tree_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 6fdbe53ad..9d04d1ca7 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -1,3 +1,5 @@ +#[macro_use] +mod macros; mod generate_deterministic_keypairs; mod keypairs_file; mod test_random; diff --git a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs index 232de87ec..92c7fe814 100644 --- a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs @@ -67,13 +67,15 @@ impl TestingAttesterSlashingBuilder { }; let add_signatures = |attestation: &mut SlashableAttestation| { + // All validators sign with a `false` custody bit. + let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { + data: attestation.data.clone(), + custody_bit: false, + }; + let message = attestation_data_and_custody_bit.hash_tree_root(); + for (i, validator_index) in validator_indices.iter().enumerate() { attestation.custody_bitfield.set(i, false); - let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { - data: attestation.data.clone(), - custody_bit: attestation.custody_bitfield.get(i).unwrap(), - }; - let message = attestation_data_and_custody_bit.hash_tree_root(); let signature = signer(*validator_index, &message[..], epoch, Domain::Attestation); attestation.aggregate_signature.add(&signature); } diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index b2cf28c8a..c116cd1b7 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -74,6 +74,22 @@ impl TestingBeaconStateBuilder { TestingBeaconStateBuilder::from_keypairs(keypairs, spec) } + /// Uses the given keypair for all validators. + pub fn from_single_keypair( + validator_count: usize, + keypair: &Keypair, + spec: &ChainSpec, + ) -> Self { + debug!("Generating {} cloned keypairs...", validator_count); + + let mut keypairs = Vec::with_capacity(validator_count); + for _ in 0..validator_count { + keypairs.push(keypair.clone()) + } + + TestingBeaconStateBuilder::from_keypairs(keypairs, spec) + } + /// Creates the builder from an existing set of keypairs. pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { let validator_count = keypairs.len(); diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index 0382dee11..af3b18ef4 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -24,29 +24,6 @@ pub struct Transfer { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Transfer::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Transfer::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Transfer); } diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 59f6c5826..6d1936bfd 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -54,18 +54,6 @@ impl Default for Validator { mod tests { use super::*; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Validator::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } #[test] fn test_validator_can_be_active() { @@ -90,15 +78,5 @@ mod tests { } } - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = Validator::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(Validator); } diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 58c3ae4c2..38630a057 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -19,29 +19,6 @@ pub struct VoluntaryExit { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::{ssz_encode, Decodable, TreeHash}; - #[test] - pub fn test_ssz_round_trip() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = VoluntaryExit::random_for_test(&mut rng); - - let bytes = ssz_encode(&original); - let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); - - assert_eq!(original, decoded); - } - - #[test] - pub fn test_hash_tree_root_internal() { - let mut rng = XorShiftRng::from_seed([42; 16]); - let original = VoluntaryExit::random_for_test(&mut rng); - - let result = original.hash_tree_root_internal(); - - assert_eq!(result.len(), 32); - // TODO: Add further tests - // https://github.com/sigp/lighthouse/issues/170 - } + ssz_tests!(VoluntaryExit); } diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index af0879ec7..9c5ed0375 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -87,7 +87,7 @@ impl Serialize for AggregateSignature { } impl TreeHash for AggregateSignature { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) } } diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 777ccceaa..c85760bbf 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -98,7 +98,7 @@ impl<'de> Deserialize<'de> for PublicKey { } impl TreeHash for PublicKey { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) } } diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 06c968389..a3914310a 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -66,7 +66,7 @@ impl<'de> Deserialize<'de> for SecretKey { } impl TreeHash for SecretKey { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { self.0.as_bytes().clone() } } diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 760b0018a..47598bc66 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -79,7 +79,7 @@ impl Decodable for Signature { } impl TreeHash for Signature { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { hash(&self.0.as_bytes()) } } diff --git a/eth2/utils/boolean-bitfield/fuzz/.gitignore b/eth2/utils/boolean-bitfield/fuzz/.gitignore new file mode 100644 index 000000000..572e03bdf --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/eth2/utils/boolean-bitfield/fuzz/Cargo.toml b/eth2/utils/boolean-bitfield/fuzz/Cargo.toml new file mode 100644 index 000000000..9769fc50e --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/Cargo.toml @@ -0,0 +1,33 @@ + +[package] +name = "boolean-bitfield-fuzz" +version = "0.0.1" +authors = ["Automatically generated"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies] +ssz = { path = "../../ssz" } + +[dependencies.boolean-bitfield] +path = ".." +[dependencies.libfuzzer-sys] +git = "https://github.com/rust-fuzz/libfuzzer-sys.git" + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_target_from_bytes" +path = "fuzz_targets/fuzz_target_from_bytes.rs" + +[[bin]] +name = "fuzz_target_ssz_decode" +path = "fuzz_targets/fuzz_target_ssz_decode.rs" + +[[bin]] +name = "fuzz_target_ssz_encode" +path = "fuzz_targets/fuzz_target_ssz_encode.rs" diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs new file mode 100644 index 000000000..0c71c6d68 --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs @@ -0,0 +1,9 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate boolean_bitfield; + +use boolean_bitfield::BooleanBitfield; + +fuzz_target!(|data: &[u8]| { + let _result = BooleanBitfield::from_bytes(data); +}); diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs new file mode 100644 index 000000000..14ddbb0a9 --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs @@ -0,0 +1,11 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate boolean_bitfield; +extern crate ssz; + +use boolean_bitfield::BooleanBitfield; +use ssz::{Decodable, DecodeError}; + +fuzz_target!(|data: &[u8]| { + let result: Result<(BooleanBitfield, usize), DecodeError> = <_>::ssz_decode(data, 0); +}); diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs new file mode 100644 index 000000000..0626e5db7 --- /dev/null +++ b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs @@ -0,0 +1,13 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate boolean_bitfield; +extern crate ssz; + +use boolean_bitfield::BooleanBitfield; +use ssz::SszStream; + +fuzz_target!(|data: &[u8]| { + let bitfield = BooleanBitfield::from_bytes(data); + let mut ssz = SszStream::new(); + ssz.append(&bitfield); +}); diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index fb3a78e7a..a0fce1f0a 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -187,8 +187,8 @@ impl Serialize for BooleanBitfield { } impl ssz::TreeHash for BooleanBitfield { - fn hash_tree_root_internal(&self) -> Vec { - self.to_bytes().hash_tree_root_internal() + fn hash_tree_root(&self) -> Vec { + self.to_bytes().hash_tree_root() } } diff --git a/eth2/utils/hashing/fuzz/.gitignore b/eth2/utils/hashing/fuzz/.gitignore new file mode 100644 index 000000000..572e03bdf --- /dev/null +++ b/eth2/utils/hashing/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/eth2/utils/hashing/fuzz/Cargo.toml b/eth2/utils/hashing/fuzz/Cargo.toml new file mode 100644 index 000000000..57e0172eb --- /dev/null +++ b/eth2/utils/hashing/fuzz/Cargo.toml @@ -0,0 +1,22 @@ + +[package] +name = "hashing-fuzz" +version = "0.0.1" +authors = ["Automatically generated"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies.hashing] +path = ".." +[dependencies.libfuzzer-sys] +git = "https://github.com/rust-fuzz/libfuzzer-sys.git" + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_target_hash" +path = "fuzz_targets/fuzz_target_hash.rs" diff --git a/eth2/utils/hashing/fuzz/fuzz_targets/fuzz_target_hash.rs b/eth2/utils/hashing/fuzz/fuzz_targets/fuzz_target_hash.rs new file mode 100644 index 000000000..dd78d1ac8 --- /dev/null +++ b/eth2/utils/hashing/fuzz/fuzz_targets/fuzz_target_hash.rs @@ -0,0 +1,9 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate hashing; + +use hashing::hash; + +fuzz_target!(|data: &[u8]| { + let _result = hash(data); +}); diff --git a/eth2/utils/ssz/fuzz/.gitignore b/eth2/utils/ssz/fuzz/.gitignore new file mode 100644 index 000000000..572e03bdf --- /dev/null +++ b/eth2/utils/ssz/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml new file mode 100644 index 000000000..055d031a0 --- /dev/null +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -0,0 +1,105 @@ + +[package] +name = "ssz-fuzz" +version = "0.0.1" +authors = ["Automatically generated"] +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies] +ethereum-types = "0.4.0" + +[dependencies.ssz] +path = ".." +[dependencies.libfuzzer-sys] +git = "https://github.com/rust-fuzz/libfuzzer-sys.git" + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_target_bool_decode" +path = "fuzz_targets/fuzz_target_bool_decode.rs" + +[[bin]] +name = "fuzz_target_bool_encode" +path = "fuzz_targets/fuzz_target_bool_encode.rs" + +[[bin]] +name = "fuzz_target_u8_decode" +path = "fuzz_targets/fuzz_target_u8_decode.rs" + +[[bin]] +name = "fuzz_target_u8_encode" +path = "fuzz_targets/fuzz_target_u8_encode.rs" + +[[bin]] +name = "fuzz_target_u16_decode" +path = "fuzz_targets/fuzz_target_u16_decode.rs" + +[[bin]] +name = "fuzz_target_u16_encode" +path = "fuzz_targets/fuzz_target_u16_encode.rs" + +[[bin]] +name = "fuzz_target_u32_decode" +path = "fuzz_targets/fuzz_target_u32_decode.rs" + +[[bin]] +name = "fuzz_target_u32_encode" +path = "fuzz_targets/fuzz_target_u32_encode.rs" + +[[bin]] +name = "fuzz_target_u64_decode" +path = "fuzz_targets/fuzz_target_u64_decode.rs" + +[[bin]] +name = "fuzz_target_u64_encode" +path = "fuzz_targets/fuzz_target_u64_encode.rs" + +[[bin]] +name = "fuzz_target_usize_decode" +path = "fuzz_targets/fuzz_target_usize_decode.rs" + +[[bin]] +name = "fuzz_target_usize_encode" +path = "fuzz_targets/fuzz_target_usize_encode.rs" + +[[bin]] +name = "fuzz_target_hash256_decode" +path = "fuzz_targets/fuzz_target_hash256_decode.rs" + +[[bin]] +name = "fuzz_target_hash256_encode" +path = "fuzz_targets/fuzz_target_hash256_encode.rs" + +[[bin]] +name = "fuzz_target_address_decode" +path = "fuzz_targets/fuzz_target_address_decode.rs" + +[[bin]] +name = "fuzz_target_address_encode" +path = "fuzz_targets/fuzz_target_address_encode.rs" + +[[bin]] +name = "fuzz_target_vec_decode" +path = "fuzz_targets/fuzz_target_vec_decode.rs" + +[[bin]] +name = "fuzz_target_vec_address_decode" +path = "fuzz_targets/fuzz_target_vec_address_decode.rs" + +[[bin]] +name = "fuzz_target_vec_u64_decode" +path = "fuzz_targets/fuzz_target_vec_u64_decode.rs" + +[[bin]] +name = "fuzz_target_vec_bool_decode" +path = "fuzz_targets/fuzz_target_vec_bool_decode.rs" + +[[bin]] +name = "fuzz_target_vec_encode" +path = "fuzz_targets/fuzz_target_vec_encode.rs" diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs new file mode 100644 index 000000000..c49be500a --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_decode.rs @@ -0,0 +1,21 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::Address; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(Address, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 20 { + // Should have valid result + let (address, index) = result.unwrap(); + assert_eq!(index, 20); + assert_eq!(address, Address::from_slice(&data[..20])); + } else { + // Length of less than 32 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs new file mode 100644 index 000000000..0e51e00ac --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_address_encode.rs @@ -0,0 +1,20 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::Address; +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + if data.len() >= 20 { + let hash = Address::from_slice(&data[..20]); + ssz.append(&hash); + let ssz = ssz.drain(); + + assert_eq!(data[..20], ssz[..20]); + assert_eq!(ssz.len(), 20); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs new file mode 100644 index 000000000..4fb1052b1 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_decode.rs @@ -0,0 +1,28 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(bool, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 1 { + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + if data[0] == u8::pow(2,7) { + let (val_bool, index) = result.unwrap(); + assert!(val_bool); + assert_eq!(index, 1); + } else if data[0] == 0 { + let (val_bool, index) = result.unwrap(); + assert!(!val_bool); + assert_eq!(index, 1); + } else { + assert_eq!(result, Err(DecodeError::Invalid)); + } + } else { + // Length of 0 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs new file mode 100644 index 000000000..4f344cb7d --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_bool_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut val_bool = 0; + if data.len() >= 1 { + val_bool = data[0] % u8::pow(2, 6); + } + + ssz.append(&val_bool); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(val_bool, ssz[0] % u8::pow(2, 6)); + assert_eq!(ssz.len(), 1); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs new file mode 100644 index 000000000..e4ccc56a4 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_decode.rs @@ -0,0 +1,21 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::H256; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(H256, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 32 { + // Should have valid result + let (hash, index) = result.unwrap(); + assert_eq!(index, 32); + assert_eq!(hash, H256::from_slice(&data[..32])); + } else { + // Length of less than 32 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs new file mode 100644 index 000000000..537d9cdf9 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_hash256_encode.rs @@ -0,0 +1,20 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::H256; +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + if data.len() >= 32 { + let hash = H256::from_slice(&data[..32]); + ssz.append(&hash); + let ssz = ssz.drain(); + + assert_eq!(data[..32], ssz[..32]); + assert_eq!(ssz.len(), 32); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs new file mode 100644 index 000000000..73395f3af --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_decode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(u16, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 2 { + // Valid result + let (number_u16, index) = result.unwrap(); + assert_eq!(index, 2); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u16::from_be_bytes([data[0], data[1]]); + assert_eq!(number_u16, val); + } else { + // Length of 0 or 1 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs new file mode 100644 index 000000000..ce8a51845 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u16_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u16 = 0; + if data.len() >= 2 { + number_u16 = u16::from_be_bytes([data[0], data[1]]); + } + + ssz.append(&number_u16); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 2); + assert_eq!(number_u16, u16::from_be_bytes([ssz[0], ssz[1]])); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs new file mode 100644 index 000000000..e99bf2fad --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_decode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(u32, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 4 { + // Valid result + let (number_u32, index) = result.unwrap(); + assert_eq!(index, 4); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u32::from_be_bytes([data[0], data[1], data[2], data[3]]); + assert_eq!(number_u32, val); + } else { + // Length less then 4 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs new file mode 100644 index 000000000..c71bcecaf --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u32_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u32 = 0; + if data.len() >= 4 { + number_u32 = u32::from_be_bytes([data[0], data[1], data[2], data[3]]); + } + + ssz.append(&number_u32); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 4); + assert_eq!(number_u32, u32::from_be_bytes([ssz[0], ssz[1], ssz[2], ssz[3]])); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs new file mode 100644 index 000000000..63eb60f55 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_decode.rs @@ -0,0 +1,31 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(u64, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 8 { + // Valid result + let (number_u64, index) = result.unwrap(); + assert_eq!(index, 8); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]); + assert_eq!(number_u64, val); + } else { + // Length less then 8 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs new file mode 100644 index 000000000..68616e0da --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u64_encode.rs @@ -0,0 +1,40 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u64 = 0; + if data.len() >= 8 { + number_u64 = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]); + } + + ssz.append(&number_u64); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 8); + assert_eq!(number_u64, u64::from_be_bytes([ + ssz[0], + ssz[1], + ssz[2], + ssz[3], + ssz[4], + ssz[5], + ssz[6], + ssz[7], + ])); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs new file mode 100644 index 000000000..6f17a4c85 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_decode.rs @@ -0,0 +1,21 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let result: Result<(u8, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 1 { + // Should have valid result + let (number_u8, index) = result.unwrap(); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(index, 1); + assert_eq!(number_u8, data[0]); + } else { + // Length of 0 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs new file mode 100644 index 000000000..a135f2cd5 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_u8_encode.rs @@ -0,0 +1,22 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_u8 = 0; + if data.len() >= 1 { + number_u8 = data[0]; + } + + ssz.append(&number_u8); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(number_u8, ssz[0]); + assert_eq!(ssz.len(), 1); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs new file mode 100644 index 000000000..1458bfae9 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_decode.rs @@ -0,0 +1,32 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + // Note: we assume architecture is 64 bit -> usize == 64 bits + let result: Result<(usize, usize), DecodeError> = Decodable::ssz_decode(data, 0); + if data.len() >= 8 { + // Valid result + let (number_usize, index) = result.unwrap(); + assert_eq!(index, 8); + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + let val = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]); + assert_eq!(number_usize, val as usize); + } else { + // Length less then 8 should return error + assert_eq!(result, Err(DecodeError::TooShort)); + } +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs new file mode 100644 index 000000000..d5aa9751f --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_usize_encode.rs @@ -0,0 +1,40 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::SszStream; + +// Fuzz ssz_encode (via ssz_append) +fuzz_target!(|data: &[u8]| { + let mut ssz = SszStream::new(); + let mut number_usize = 0; + if data.len() >= 8 { + number_usize = u64::from_be_bytes([ + data[0], + data[1], + data[2], + data[3], + data[4], + data[5], + data[6], + data[7], + ]) as usize; + } + + ssz.append(&number_usize); + let ssz = ssz.drain(); + + // TODO: change to little endian bytes + // https://github.com/sigp/lighthouse/issues/215 + assert_eq!(ssz.len(), 8); + assert_eq!(number_usize, u64::from_be_bytes([ + ssz[0], + ssz[1], + ssz[2], + ssz[3], + ssz[4], + ssz[5], + ssz[6], + ssz[7], + ]) as usize); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs new file mode 100644 index 000000000..6c686df1a --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_address_decode.rs @@ -0,0 +1,12 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::{Address}; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec
, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs new file mode 100644 index 000000000..25017ef25 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_bool_decode.rs @@ -0,0 +1,10 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs new file mode 100644 index 000000000..cc1dc09f5 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_decode.rs @@ -0,0 +1,12 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::{Address, H256}; +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs new file mode 100644 index 000000000..39500b782 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_encode.rs @@ -0,0 +1,15 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ethereum_types; +extern crate ssz; + +use ethereum_types::{Address, H256}; +use ssz::SszStream; + +// Fuzz ssz_encode() +fuzz_target!(|data: &[u8]| { + + let mut ssz = SszStream::new(); + let data_vec = data.to_vec(); + ssz.append(&data_vec); +}); diff --git a/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs new file mode 100644 index 000000000..ee25a6378 --- /dev/null +++ b/eth2/utils/ssz/fuzz/fuzz_targets/fuzz_target_vec_u64_decode.rs @@ -0,0 +1,10 @@ +#![no_main] +#[macro_use] extern crate libfuzzer_sys; +extern crate ssz; + +use ssz::{DecodeError, Decodable}; + +// Fuzz ssz_decode() +fuzz_target!(|data: &[u8]| { + let _result: Result<(Vec, usize), DecodeError> = Decodable::ssz_decode(data, 0); +}); diff --git a/eth2/utils/ssz/src/impl_tree_hash.rs b/eth2/utils/ssz/src/impl_tree_hash.rs index 54bd7c139..03976f637 100644 --- a/eth2/utils/ssz/src/impl_tree_hash.rs +++ b/eth2/utils/ssz/src/impl_tree_hash.rs @@ -3,55 +3,55 @@ use super::{merkle_hash, ssz_encode, TreeHash}; use hashing::hash; impl TreeHash for u8 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for u16 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for u32 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for u64 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for usize { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for bool { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for Address { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for H256 { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { ssz_encode(self) } } impl TreeHash for [u8] { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { if self.len() > 32 { return hash(&self); } @@ -63,12 +63,12 @@ impl TreeHash for Vec where T: TreeHash, { - /// Returns the merkle_hash of a list of hash_tree_root_internal values created + /// Returns the merkle_hash of a list of hash_tree_root values created /// from the given list. /// Note: A byte vector, Vec, must be converted to a slice (as_slice()) /// to be handled properly (i.e. hashed) as byte array. - fn hash_tree_root_internal(&self) -> Vec { - let mut tree_hashes = self.iter().map(|x| x.hash_tree_root_internal()).collect(); + fn hash_tree_root(&self) -> Vec { + let mut tree_hashes = self.iter().map(|x| x.hash_tree_root()).collect(); merkle_hash(&mut tree_hashes) } } @@ -79,7 +79,7 @@ mod tests { #[test] fn test_impl_tree_hash_vec() { - let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root_internal(); + let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root(); assert_eq!(result.len(), 32); } } diff --git a/eth2/utils/ssz/src/tree_hash.rs b/eth2/utils/ssz/src/tree_hash.rs index 7c1ab35e9..85e56924c 100644 --- a/eth2/utils/ssz/src/tree_hash.rs +++ b/eth2/utils/ssz/src/tree_hash.rs @@ -1,44 +1,31 @@ use hashing::hash; -const SSZ_CHUNK_SIZE: usize = 128; +const BYTES_PER_CHUNK: usize = 32; const HASHSIZE: usize = 32; pub trait TreeHash { - fn hash_tree_root_internal(&self) -> Vec; - fn hash_tree_root(&self) -> Vec { - let mut result = self.hash_tree_root_internal(); - zpad(&mut result, HASHSIZE); - result - } + fn hash_tree_root(&self) -> Vec; } /// Returns a 32 byte hash of 'list' - a vector of byte vectors. /// Note that this will consume 'list'. pub fn merkle_hash(list: &mut Vec>) -> Vec { // flatten list - let (mut chunk_size, mut chunkz) = list_to_blob(list); + let mut chunkz = list_to_blob(list); // get data_len as bytes. It will hashed will the merkle root let mut datalen = list.len().to_le_bytes().to_vec(); zpad(&mut datalen, 32); - // Tree-hash + // merklelize while chunkz.len() > HASHSIZE { let mut new_chunkz: Vec = Vec::new(); - for two_chunks in chunkz.chunks(chunk_size * 2) { - if two_chunks.len() == chunk_size { - // Odd number of chunks - let mut c = two_chunks.to_vec(); - c.append(&mut vec![0; SSZ_CHUNK_SIZE]); - new_chunkz.append(&mut hash(&c)); - } else { - // Hash two chuncks together - new_chunkz.append(&mut hash(two_chunks)); - } + for two_chunks in chunkz.chunks(BYTES_PER_CHUNK * 2) { + // Hash two chuncks together + new_chunkz.append(&mut hash(two_chunks)); } - chunk_size = HASHSIZE; chunkz = new_chunkz; } @@ -46,17 +33,13 @@ pub fn merkle_hash(list: &mut Vec>) -> Vec { hash(&chunkz) } -fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { - let chunk_size = if list.is_empty() || list[0].len() < SSZ_CHUNK_SIZE { - SSZ_CHUNK_SIZE - } else { - list[0].len() - }; - +fn list_to_blob(list: &mut Vec>) -> Vec { + // pack - fit as many many items per chunk as we can and then + // right pad to BYTES_PER_CHUNCK let (items_per_chunk, chunk_count) = if list.is_empty() { (1, 1) } else { - let items_per_chunk = SSZ_CHUNK_SIZE / list[0].len(); + let items_per_chunk = BYTES_PER_CHUNK / list[0].len(); let chunk_count = list.len() / items_per_chunk; (items_per_chunk, chunk_count) }; @@ -64,20 +47,20 @@ fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { let mut chunkz = Vec::new(); if list.is_empty() { // handle and empty list - chunkz.append(&mut vec![0; SSZ_CHUNK_SIZE]); - } else if list[0].len() <= SSZ_CHUNK_SIZE { + chunkz.append(&mut vec![0; BYTES_PER_CHUNK * 2]); + } else if list[0].len() <= BYTES_PER_CHUNK { // just create a blob here; we'll divide into // chunked slices when we merklize - let mut chunk = Vec::with_capacity(chunk_size); + let mut chunk = Vec::with_capacity(BYTES_PER_CHUNK); let mut item_count_in_chunk = 0; - chunkz.reserve(chunk_count * chunk_size); + chunkz.reserve(chunk_count * BYTES_PER_CHUNK); for item in list.iter_mut() { item_count_in_chunk += 1; chunk.append(item); // completed chunk? if item_count_in_chunk == items_per_chunk { - zpad(&mut chunk, chunk_size); + zpad(&mut chunk, BYTES_PER_CHUNK); chunkz.append(&mut chunk); item_count_in_chunk = 0; } @@ -85,18 +68,18 @@ fn list_to_blob(list: &mut Vec>) -> (usize, Vec) { // left-over uncompleted chunk? if item_count_in_chunk != 0 { - zpad(&mut chunk, chunk_size); + zpad(&mut chunk, BYTES_PER_CHUNK); chunkz.append(&mut chunk); } - } else { - // chunks larger than SSZ_CHUNK_SIZE - chunkz.reserve(chunk_count * chunk_size); - for item in list.iter_mut() { - chunkz.append(item); - } } - (chunk_size, chunkz) + // extend the number of chunks to a power of two if necessary + if !chunk_count.is_power_of_two() { + let zero_chunks_count = chunk_count.next_power_of_two() - chunk_count; + chunkz.append(&mut vec![0; zero_chunks_count * BYTES_PER_CHUNK]); + } + + chunkz } /// right pads with zeros making 'bytes' 'size' in length @@ -112,9 +95,9 @@ mod tests { #[test] fn test_merkle_hash() { - let data1 = vec![1; 100]; - let data2 = vec![2; 100]; - let data3 = vec![3; 100]; + let data1 = vec![1; 32]; + let data2 = vec![2; 32]; + let data3 = vec![3; 32]; let mut list = vec![data1, data2, data3]; let result = merkle_hash(&mut list); diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 0d2e17f76..a7802a274 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -146,10 +146,10 @@ pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { let output = quote! { impl ssz::TreeHash for #name { - fn hash_tree_root_internal(&self) -> Vec { + fn hash_tree_root(&self) -> Vec { let mut list: Vec> = Vec::new(); #( - list.push(self.#field_idents.hash_tree_root_internal()); + list.push(self.#field_idents.hash_tree_root()); )* ssz::merkle_hash(&mut list) @@ -224,7 +224,7 @@ pub fn ssz_signed_root_derive(input: TokenStream) -> TokenStream { fn signed_root(&self) -> Vec { let mut list: Vec> = Vec::new(); #( - list.push(self.#field_idents.hash_tree_root_internal()); + list.push(self.#field_idents.hash_tree_root()); )* ssz::merkle_hash(&mut list)