use crate::metrics; use environment::TaskExecutor; use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; use eth2_hashing::hash; use slog::{debug, error, trace, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::per_block_processing::get_new_eth1_data; use std::cmp::Ordering; use std::collections::HashMap; use std::iter::DoubleEndedIterator; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem}; use types::{ BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, DEPOSIT_TREE_DEPTH, }; type BlockNumber = u64; type Eth1DataVoteCount = HashMap<(Eth1Data, BlockNumber), u64>; #[derive(Debug)] pub enum Error { /// Unable to return an Eth1Data for the given epoch. EpochUnavailable, /// An error from the backend service (e.g., the web3 data fetcher). BackendError(String), /// The deposit index of the state is higher than the deposit contract. This is a critical /// consensus error. DepositIndexTooHigh, /// The current state was unable to return the root for the state at the start of the eth1 /// voting period. UnableToGetPreviousStateRoot(BeaconStateError), /// The state required to find the previous eth1 block was not found in the store. PreviousStateNotInDB(Hash256), /// There was an error accessing an object in the database. StoreError(StoreError), /// The eth1 head block at the start of the eth1 voting period is unknown. /// /// The eth1 caches are likely stale. UnknownVotingPeriodHead, /// The block that was previously voted into the state is unknown. /// /// The eth1 caches are stale, or a junk value was voted into the chain. UnknownPreviousEth1BlockHash, /// An arithmetic error occurred. ArithError(safe_arith::ArithError), } impl From for Error { fn from(e: safe_arith::ArithError) -> Self { Self::ArithError(e) } } #[derive(Encode, Decode, Clone)] pub struct SszEth1 { use_dummy_backend: bool, backend_bytes: Vec, } impl StoreItem for SszEth1 { fn db_column() -> DBColumn { DBColumn::Eth1Cache } fn as_store_bytes(&self) -> Vec { self.as_ssz_bytes() } fn from_store_bytes(bytes: &[u8]) -> Result { Self::from_ssz_bytes(bytes).map_err(Into::into) } } /// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. pub struct Eth1Chain where T: Eth1ChainBackend, E: EthSpec, { backend: T, /// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method /// will be used instead. pub use_dummy_backend: bool, _phantom: PhantomData, } impl Eth1Chain where T: Eth1ChainBackend, E: EthSpec, { pub fn new(backend: T) -> Self { Self { backend, use_dummy_backend: false, _phantom: PhantomData, } } /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. pub fn eth1_data_for_block_production( &self, state: &BeaconState, spec: &ChainSpec, ) -> Result { if self.use_dummy_backend { let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); dummy_backend.eth1_data(state, spec) } else { self.backend.eth1_data(state, spec) } } /// Returns a list of `Deposits` that may be included in a block. /// /// Including all of the returned `Deposits` in a block should _not_ cause it to become /// invalid (i.e., this function should respect the maximum). /// /// `eth1_data_vote` is the `Eth1Data` that the block producer would include in their /// block. This vote may change the `state.eth1_data` value, which would change the deposit /// count and therefore change the output of this function. pub fn deposits_for_block_inclusion( &self, state: &BeaconState, eth1_data_vote: &Eth1Data, spec: &ChainSpec, ) -> Result, Error> { if self.use_dummy_backend { let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); dummy_backend.queued_deposits(state, eth1_data_vote, spec) } else { self.backend.queued_deposits(state, eth1_data_vote, spec) } } /// Instantiate `Eth1Chain` from a persisted `SszEth1`. /// /// The `Eth1Chain` will have the same caches as the persisted `SszEth1`. pub fn from_ssz_container( ssz_container: &SszEth1, config: Eth1Config, log: &Logger, spec: ChainSpec, ) -> Result { let backend = Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone(), spec)?; Ok(Self { use_dummy_backend: ssz_container.use_dummy_backend, backend, _phantom: PhantomData, }) } /// Return a `SszEth1` containing the state of `Eth1Chain`. pub fn as_ssz_container(&self) -> SszEth1 { SszEth1 { use_dummy_backend: self.use_dummy_backend, backend_bytes: self.backend.as_bytes(), } } /// Consumes `self`, returning the backend. pub fn into_backend(self) -> T { self.backend } } pub trait Eth1ChainBackend: Sized + Send + Sync { /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. fn eth1_data(&self, beacon_state: &BeaconState, spec: &ChainSpec) -> Result; /// Returns all `Deposits` between `state.eth1_deposit_index` and /// `state.eth1_data.deposit_count`. /// /// # Note: /// /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. fn queued_deposits( &self, beacon_state: &BeaconState, eth1_data_vote: &Eth1Data, spec: &ChainSpec, ) -> Result, Error>; /// Encode the `Eth1ChainBackend` instance to bytes. fn as_bytes(&self) -> Vec; /// Create a `Eth1ChainBackend` instance given encoded bytes. fn from_bytes( bytes: &[u8], config: Eth1Config, log: Logger, spec: ChainSpec, ) -> Result; } /// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data. /// /// Never creates deposits, therefore the validator set is static. /// /// This was used in the 2019 Canada interop workshops. pub struct DummyEth1ChainBackend(PhantomData); impl Eth1ChainBackend for DummyEth1ChainBackend { /// Produce some deterministic junk based upon the current epoch. fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { let current_epoch = state.current_epoch(); let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; let deposit_root = hash(&int_to_bytes32(current_voting_period)); let block_hash = hash(&deposit_root); Ok(Eth1Data { deposit_root: Hash256::from_slice(&deposit_root), deposit_count: state.eth1_deposit_index, block_hash: Hash256::from_slice(&block_hash), }) } /// The dummy back-end never produces deposits. fn queued_deposits( &self, _: &BeaconState, _: &Eth1Data, _: &ChainSpec, ) -> Result, Error> { Ok(vec![]) } /// Return empty Vec for dummy backend. fn as_bytes(&self) -> Vec { Vec::new() } /// Create dummy eth1 backend. fn from_bytes( _bytes: &[u8], _config: Eth1Config, _log: Logger, _spec: ChainSpec, ) -> Result { Ok(Self(PhantomData)) } } impl Default for DummyEth1ChainBackend { fn default() -> Self { Self(PhantomData) } } /// Maintains a cache of eth1 blocks and deposits and provides functions to allow block producers /// to include new deposits and vote on `Eth1Data`. /// /// The `core` connects to some external eth1 client (e.g., Parity/Geth) and polls it for /// information. #[derive(Clone)] pub struct CachingEth1Backend { pub core: HttpService, log: Logger, _phantom: PhantomData, } impl CachingEth1Backend { /// Instantiates `self` with empty caches. /// /// Does not connect to the eth1 node or start any tasks to keep the cache updated. pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self { Self { core: HttpService::new(config, log.clone(), spec), log, _phantom: PhantomData, } } /// Starts the routine which connects to the external eth1 node and updates the caches. pub fn start(&self, handle: TaskExecutor) { HttpService::auto_update(self.core.clone(), handle); } /// Instantiates `self` from an existing service. pub fn from_service(service: HttpService) -> Self { Self { log: service.log.clone(), core: service, _phantom: PhantomData, } } } impl Eth1ChainBackend for CachingEth1Backend { fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { let period = T::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot / period) * period; let voting_period_start_seconds = slot_start_seconds::( state.genesis_time, spec.milliseconds_per_slot, voting_period_start_slot, ); let blocks = self.core.blocks().read(); let votes_to_consider = get_votes_to_consider(blocks.iter(), voting_period_start_seconds, spec); trace!( self.log, "Found eth1 data votes_to_consider"; "votes_to_consider" => votes_to_consider.len(), ); let valid_votes = collect_valid_votes(state, &votes_to_consider); let eth1_data = if let Some(eth1_data) = find_winning_vote(valid_votes) { eth1_data } else { // In this case, there are no valid votes available. // // Here we choose the eth1_data corresponding to the latest block in our voting window. // If no votes exist, choose `state.eth1_data` as default vote. let default_vote = votes_to_consider .iter() .max_by(|(_, x), (_, y)| x.cmp(y)) .map(|vote| { let vote = vote.0.clone(); debug!( self.log, "No valid eth1_data votes"; "outcome" => "Casting vote corresponding to last candidate eth1 block", ); vote }) .unwrap_or_else(|| { let vote = state.eth1_data.clone(); error!( self.log, "No valid eth1_data votes, `votes_to_consider` empty"; "lowest_block_number" => self.core.lowest_block_number(), "earliest_block_timestamp" => self.core.earliest_block_timestamp(), "genesis_time" => state.genesis_time, "outcome" => "casting `state.eth1_data` as eth1 vote" ); metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES); vote }); default_vote }; debug!( self.log, "Produced vote for eth1 chain"; "deposit_root" => format!("{:?}", eth1_data.deposit_root), "deposit_count" => eth1_data.deposit_count, "block_hash" => format!("{:?}", eth1_data.block_hash), ); Ok(eth1_data) } fn queued_deposits( &self, state: &BeaconState, eth1_data_vote: &Eth1Data, _spec: &ChainSpec, ) -> Result, Error> { let deposit_index = state.eth1_deposit_index; let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? { new_eth1_data.deposit_count } else { state.eth1_data.deposit_count }; match deposit_index.cmp(&deposit_count) { Ordering::Greater => Err(Error::DepositIndexTooHigh), Ordering::Equal => Ok(vec![]), Ordering::Less => { let next = deposit_index; let last = std::cmp::min(deposit_count, next + T::MaxDeposits::to_u64()); self.core .deposits() .read() .cache .get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH) .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) .map(|(_deposit_root, deposits)| deposits) } } } /// Return encoded byte representation of the block and deposit caches. fn as_bytes(&self) -> Vec { self.core.as_bytes() } /// Recover the cached backend from encoded bytes. fn from_bytes( bytes: &[u8], config: Eth1Config, log: Logger, spec: ChainSpec, ) -> Result { let inner = HttpService::from_bytes(bytes, config, log.clone(), spec)?; Ok(Self { core: inner, log, _phantom: PhantomData, }) } } /// Get all votes from eth1 blocks which are in the list of candidate blocks for the /// current eth1 voting period. /// /// Returns a hashmap of `Eth1Data` to its associated eth1 `block_number`. fn get_votes_to_consider<'a, I>( blocks: I, voting_period_start_seconds: u64, spec: &ChainSpec, ) -> HashMap where I: DoubleEndedIterator + Clone, { blocks .rev() .skip_while(|eth1_block| !is_candidate_block(eth1_block, voting_period_start_seconds, spec)) .take_while(|eth1_block| is_candidate_block(eth1_block, voting_period_start_seconds, spec)) .filter_map(|eth1_block| { eth1_block .clone() .eth1_data() .map(|eth1_data| (eth1_data, eth1_block.number)) }) .collect() } /// Collect all valid votes that are cast during the current voting period. /// Return hashmap with count of each vote cast. fn collect_valid_votes( state: &BeaconState, votes_to_consider: &HashMap, ) -> Eth1DataVoteCount { let mut valid_votes = HashMap::new(); state .eth1_data_votes .iter() .filter_map(|vote| { if let Some(block_num) = votes_to_consider.get(vote) { Some((vote.clone(), *block_num)) } else { None } }) .for_each(|(eth1_data, block_number)| { valid_votes .entry((eth1_data, block_number)) .and_modify(|count| *count += 1) .or_insert(1_u64); }); valid_votes } /// Selects the winning vote from `valid_votes`. fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { valid_votes .iter() .max_by_key(|((_eth1_data, block_number), vote_count)| (*vote_count, block_number)) .map(|((eth1_data, _), _)| eth1_data.clone()) } /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: u64) -> Vec { let mut vec = int.to_le_bytes().to_vec(); vec.resize(32, 0); vec } /// Returns the unix-epoch seconds at the start of the given `slot`. fn slot_start_seconds( genesis_unix_seconds: u64, milliseconds_per_slot: u64, slot: Slot, ) -> u64 { genesis_unix_seconds + slot.as_u64() * milliseconds_per_slot / 1_000 } /// Returns a boolean denoting if a given `Eth1Block` is a candidate for `Eth1Data` calculation /// at the timestamp `period_start`. /// /// Note: `period_start` needs to be atleast (`spec.seconds_per_eth1_block * spec.eth1_follow_distance * 2`) /// for this function to return meaningful values. fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) -> bool { block.timestamp <= period_start.saturating_sub(spec.seconds_per_eth1_block * spec.eth1_follow_distance) && block.timestamp >= period_start .saturating_sub(spec.seconds_per_eth1_block * spec.eth1_follow_distance * 2) } #[cfg(test)] mod test { use super::*; use environment::null_logger; use std::iter::FromIterator; use types::{test_utils::DepositTestTask, MinimalEthSpec}; type E = MinimalEthSpec; fn get_eth1_data(i: u64) -> Eth1Data { Eth1Data { block_hash: Hash256::from_low_u64_be(i), deposit_root: Hash256::from_low_u64_be(u64::max_value() - i), deposit_count: i, } } fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { let period = ::SlotsPerEth1VotingPeriod::to_u64(); let voting_period_start_slot = (state.slot / period) * period; slot_start_seconds::( state.genesis_time, spec.milliseconds_per_slot, voting_period_start_slot, ) } #[test] fn slot_start_time() { let zero_sec = 0; assert_eq!(slot_start_seconds::(100, zero_sec, Slot::new(2)), 100); let half_sec = 500; assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(0)), 100); assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(1)), 100); assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(2)), 101); assert_eq!(slot_start_seconds::(100, half_sec, Slot::new(3)), 101); let one_sec = 1_000; assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(0)), 100); assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(1)), 101); assert_eq!(slot_start_seconds::(100, one_sec, Slot::new(2)), 102); let three_sec = 3_000; assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(0)), 100); assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(1)), 103); assert_eq!(slot_start_seconds::(100, three_sec, Slot::new(2)), 106); } fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block { Eth1Block { number, timestamp, hash: Hash256::from_low_u64_be(number), deposit_root: Some(Hash256::from_low_u64_be(number)), deposit_count: Some(number), } } mod eth1_chain_json_backend { use super::*; use eth1::DepositLog; use types::{ test_utils::{generate_deterministic_keypair, TestingDepositBuilder}, EthSpec, MainnetEthSpec, }; fn get_eth1_chain() -> Eth1Chain, E> { let eth1_config = Eth1Config { ..Eth1Config::default() }; let log = null_logger().unwrap(); Eth1Chain::new(CachingEth1Backend::new( eth1_config, log, MainnetEthSpec::default_spec(), )) } fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { let keypair = generate_deterministic_keypair(i as usize); let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), spec.max_effective_balance); builder.sign(DepositTestTask::Valid, &keypair, spec); let deposit_data = builder.build().data; DepositLog { deposit_data, block_number: i, index: i, signature_is_valid: true, } } #[test] fn deposits_empty_cache() { let spec = &E::default_spec(); let eth1_chain = get_eth1_chain(); assert_eq!( eth1_chain.use_dummy_backend, false, "test should not use dummy backend" ); let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); state.eth1_deposit_index = 0; state.eth1_data.deposit_count = 0; assert!( eth1_chain .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) .is_ok(), "should succeed if cache is empty but no deposits are required" ); state.eth1_data.deposit_count = 1; assert!( eth1_chain .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) .is_err(), "should fail to get deposits if required, but cache is empty" ); } #[test] fn deposits_with_cache() { let spec = &E::default_spec(); let eth1_chain = get_eth1_chain(); let max_deposits = ::MaxDeposits::to_u64(); assert_eq!( eth1_chain.use_dummy_backend, false, "test should not use dummy backend" ); let deposits: Vec<_> = (0..max_deposits + 2) .map(|i| get_deposit_log(i, spec)) .inspect(|log| { eth1_chain .backend .core .deposits() .write() .cache .insert_log(log.clone()) .expect("should insert log") }) .collect(); assert_eq!( eth1_chain.backend.core.deposits().write().cache.len(), deposits.len(), "cache should store all logs" ); let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); state.eth1_deposit_index = 0; state.eth1_data.deposit_count = 0; assert!( eth1_chain .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) .is_ok(), "should succeed if no deposits are required" ); (0..3).for_each(|initial_deposit_index| { state.eth1_deposit_index = initial_deposit_index as u64; (initial_deposit_index..deposits.len()).for_each(|i| { state.eth1_data.deposit_count = i as u64; let deposits_for_inclusion = eth1_chain .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) .unwrap_or_else(|_| panic!("should find deposit for {}", i)); let expected_len = std::cmp::min(i - initial_deposit_index, max_deposits as usize); assert_eq!( deposits_for_inclusion.len(), expected_len, "should find {} deposits", expected_len ); let deposit_data_for_inclusion: Vec<_> = deposits_for_inclusion .into_iter() .map(|deposit| deposit.data) .collect(); let expected_deposit_data: Vec<_> = deposits[initial_deposit_index ..std::cmp::min(initial_deposit_index + expected_len, deposits.len())] .iter() .map(|log| log.deposit_data.clone()) .collect(); assert_eq!( deposit_data_for_inclusion, expected_deposit_data, "should find the correct deposits for {}", i ); }); }) } #[test] fn eth1_data_empty_cache() { let spec = &E::default_spec(); let eth1_chain = get_eth1_chain(); assert_eq!( eth1_chain.use_dummy_backend, false, "test should not use dummy backend" ); let state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); let a = eth1_chain .eth1_data_for_block_production(&state, &spec) .expect("should produce default eth1 data vote"); assert_eq!( a, state.eth1_data, "default vote should be same as state.eth1_data" ); } #[test] fn default_vote() { let spec = &E::default_spec(); let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); let eth1_follow_distance = spec.eth1_follow_distance; let eth1_chain = get_eth1_chain(); assert_eq!( eth1_chain.use_dummy_backend, false, "test should not use dummy backend" ); let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); state.slot = Slot::from(slots_per_eth1_voting_period * 10); let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; let voting_period_start = get_voting_period_start_seconds(&state, &spec); let start_eth1_block = voting_period_start - follow_distance_seconds * 2; let end_eth1_block = voting_period_start - follow_distance_seconds; // Populate blocks cache with candidate eth1 blocks let blocks = (start_eth1_block..end_eth1_block) .map(|i| get_eth1_block(i, i)) .collect::>(); blocks.iter().for_each(|block| { eth1_chain .backend .core .blocks() .write() .insert_root_or_child(block.clone()) .expect("should add blocks to cache"); }); let vote = eth1_chain .eth1_data_for_block_production(&state, &spec) .expect("should produce default eth1 data vote"); assert_eq!( vote, blocks .last() .expect("should have blocks") .clone() .eth1_data() .expect("should have valid eth1 data"), "default vote must correspond to last block in candidate blocks" ); } } mod eth1_data_sets { use super::*; #[test] fn empty_cache() { let spec = &E::default_spec(); let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); let blocks = vec![]; assert_eq!( get_votes_to_consider( blocks.iter(), get_voting_period_start_seconds(&state, spec), &spec, ), HashMap::new() ); } #[test] fn ideal_scenario() { let spec = E::default_spec(); let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); let eth1_follow_distance = spec.eth1_follow_distance; let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); state.genesis_time = 0; state.slot = Slot::from(slots_per_eth1_voting_period * 10); let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; let voting_period_start = get_voting_period_start_seconds(&state, &spec); let start_eth1_block = voting_period_start - follow_distance_seconds * 2; let end_eth1_block = voting_period_start - follow_distance_seconds; let blocks = (start_eth1_block..end_eth1_block) .map(|i| get_eth1_block(i, i)) .collect::>(); let votes_to_consider = get_votes_to_consider(blocks.iter(), voting_period_start, &spec); assert_eq!( votes_to_consider.len() as u64, end_eth1_block - start_eth1_block, "all produced eth1 blocks should be in votes to consider" ); (start_eth1_block..end_eth1_block) .map(|i| get_eth1_block(i, i)) .for_each(|eth1_block| { assert_eq!( eth1_block.number, *votes_to_consider .get(ð1_block.clone().eth1_data().unwrap()) .expect("votes_to_consider should have expected block numbers") ) }); } } mod collect_valid_votes { use super::*; fn get_eth1_data_vec(n: u64, block_number_offset: u64) -> Vec<(Eth1Data, BlockNumber)> { (0..n) .map(|i| (get_eth1_data(i), i + block_number_offset)) .collect() } macro_rules! assert_votes { ($votes: expr, $expected: expr, $text: expr) => { let expected: Vec<(Eth1Data, BlockNumber)> = $expected; assert_eq!( $votes.len(), expected.len(), "map should have the same number of elements" ); expected.iter().for_each(|(eth1_data, block_number)| { $votes .get(&(eth1_data.clone(), *block_number)) .expect("should contain eth1 data"); }) }; } #[test] fn no_votes_in_state() { let slots = ::SlotsPerEth1VotingPeriod::to_u64(); let spec = &E::default_spec(); let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); let votes_to_consider = get_eth1_data_vec(slots, 0); let votes = collect_valid_votes( &state, &HashMap::from_iter(votes_to_consider.clone().into_iter()), ); assert_eq!( votes.len(), 0, "should not find any votes when state has no votes" ); } #[test] fn distinct_votes_in_state() { let slots = ::SlotsPerEth1VotingPeriod::to_u64(); let spec = &E::default_spec(); let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); let votes_to_consider = get_eth1_data_vec(slots, 0); state.eth1_data_votes = votes_to_consider[0..slots as usize / 4] .iter() .map(|(eth1_data, _)| eth1_data) .cloned() .collect::>() .into(); let votes = collect_valid_votes( &state, &HashMap::from_iter(votes_to_consider.clone().into_iter()), ); assert_votes!( votes, votes_to_consider[0..slots as usize / 4].to_vec(), "should find as many votes as were in the state" ); } #[test] fn duplicate_votes_in_state() { let slots = ::SlotsPerEth1VotingPeriod::to_u64(); let spec = &E::default_spec(); let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); let votes_to_consider = get_eth1_data_vec(slots, 0); let duplicate_eth1_data = votes_to_consider .last() .expect("should have some eth1 data") .clone(); state.eth1_data_votes = vec![duplicate_eth1_data.clone(); 4] .iter() .map(|(eth1_data, _)| eth1_data) .cloned() .collect::>() .into(); let votes = collect_valid_votes( &state, &HashMap::from_iter(votes_to_consider.clone().into_iter()), ); assert_votes!( votes, // There should only be one value if there's a duplicate vec![duplicate_eth1_data.clone()], "should find as many votes as were in the state" ); assert_eq!( *votes .get(&duplicate_eth1_data) .expect("should contain vote"), 4, "should have four votes" ); } } mod winning_vote { use super::*; type Vote = ((Eth1Data, u64), u64); fn vote(block_number: u64, vote_count: u64) -> Vote { ( ( Eth1Data { deposit_root: Hash256::from_low_u64_be(block_number), deposit_count: block_number, block_hash: Hash256::from_low_u64_be(block_number), }, block_number, ), vote_count, ) } fn vote_data(vote: &Vote) -> Eth1Data { (vote.0).0.clone() } #[test] fn no_votes() { let no_votes = vec![vote(0, 0), vote(1, 0), vote(3, 0), vote(2, 0)]; assert_eq!( // Favour the highest block number when there are no votes. vote_data(&no_votes[2]), find_winning_vote(Eth1DataVoteCount::from_iter(no_votes.into_iter())) .expect("should find winner") ); } #[test] fn equal_votes() { let votes = vec![vote(0, 1), vote(1, 1), vote(3, 1), vote(2, 1)]; assert_eq!( // Favour the highest block number when there are equal votes. vote_data(&votes[2]), find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) .expect("should find winner") ); } #[test] fn some_votes() { let votes = vec![vote(0, 0), vote(1, 1), vote(3, 1), vote(2, 2)]; assert_eq!( // Favour the highest vote over the highest block number. vote_data(&votes[3]), find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) .expect("should find winner") ); } #[test] fn tying_votes() { let votes = vec![vote(0, 0), vote(1, 1), vote(2, 2), vote(3, 2)]; assert_eq!( // Favour the highest block number for tying votes. vote_data(&votes[3]), find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) .expect("should find winner") ); } #[test] fn all_tying_votes() { let votes = vec![vote(3, 42), vote(2, 42), vote(1, 42), vote(0, 42)]; assert_eq!( // Favour the highest block number for tying votes. vote_data(&votes[0]), find_winning_vote(Eth1DataVoteCount::from_iter(votes.into_iter())) .expect("should find winner") ); } } }