pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, StateSkipConfig, }; use bls::get_withdrawal_credentials; use execution_layer::test_utils::DEFAULT_JWT_SECRET; use execution_layer::{ auth::JwtKey, test_utils::{ ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK, }, ExecutionLayer, }; use fork_choice::CountUnrealized; use futures::channel::mpsc::Receiver; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use int_to_bytes::int_to_bytes32; use merkle_proof::MerkleTree; use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slog::Logger; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::{ state_advance::{complete_state_advance, partial_state_advance}, StateRootStrategy, }; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore}; use task_executor::{test_utils::TestRuntime, ShutdownReason}; use tree_hash::TreeHash; use types::sync_selection_proof::SyncSelectionProof; pub use types::test_utils::generate_deterministic_keypairs; use types::{typenum::U4294967296, *}; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; // Environment variable to read if `fork_from_env` feature is enabled. const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; // Default target aggregators to set during testing, this ensures an aggregator at each slot. // // You should mutate the `ChainSpec` prior to initialising the harness if you would like to use // a different value. pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::max_value(); pub type BaseHarnessType = Witness, TEthSpec, THotStore, TColdStore>; pub type DiskHarnessType = BaseHarnessType, LevelDB>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; pub type BoxedMutator = Box< dyn FnOnce( BeaconChainBuilder>, ) -> BeaconChainBuilder>, >; pub type AddBlocksResult = ( HashMap, HashMap, SignedBeaconBlockHash, BeaconState, ); /// Deprecated: Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { /// Produce blocks upon the canonical head (normal case). OnCanonicalHead, /// Ignore the canonical head and produce blocks upon the block at the given slot. /// /// Useful for simulating forks. ForkCanonicalChainAt { /// The slot of the parent of the first block produced. previous_slot: Slot, /// The slot of the first block produced (must be higher than `previous_slot`. first_slot: Slot, }, } /// Deprecated: Indicates how the `BeaconChainHarness` should produce attestations. #[derive(Clone, Debug)] pub enum AttestationStrategy { /// All validators attest to whichever block the `BeaconChainHarness` has produced. AllValidators, /// Only the given validators should attest. All others should fail to produce attestations. SomeValidators(Vec), } /// Indicates whether the `BeaconChainHarness` should use the `state.current_sync_committee` or /// `state.next_sync_committee` when creating sync messages or contributions. #[derive(Clone, Debug)] pub enum RelativeSyncCommittee { Current, Next, } fn make_rng() -> Mutex { // Nondeterminism in tests is a highly undesirable thing. Seed the RNG to some arbitrary // but fixed value for reproducibility. Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } /// Return a `ChainSpec` suitable for test usage. /// /// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment /// variable. Otherwise use the default spec. pub fn test_spec() -> ChainSpec { let mut spec = if cfg!(feature = "fork_from_env") { let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { panic!( "{} env var must be defined when using fork_from_env: {:?}", FORK_NAME_ENV_VAR, e ) }); let fork = ForkName::from_str(fork_name.as_str()).unwrap(); fork.make_genesis_spec(E::default_spec()) } else { E::default_spec() }; // Set target aggregators to a high value by default. spec.target_aggregators_per_committee = DEFAULT_TARGET_AGGREGATORS; spec } pub struct Builder { eth_spec_instance: T::EthSpec, spec: Option, validator_keypairs: Option>, chain_config: Option, store_config: Option, #[allow(clippy::type_complexity)] store: Option>>, initial_mutator: Option>, store_mutator: Option>, execution_layer: Option>, mock_execution_layer: Option>, mock_builder: Option>, testing_slot_clock: Option, runtime: TestRuntime, log: Logger, } impl Builder> { pub fn fresh_ephemeral_store(mut self) -> Self { let spec = self.spec.as_ref().expect("cannot build without spec"); let validator_keypairs = self .validator_keypairs .clone() .expect("cannot build without validator keypairs"); let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), spec.clone(), self.log.clone(), ) .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { let genesis_state = interop_genesis_state::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), None, builder.get_spec(), ) .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } /// Create a new ephemeral store that uses the specified `genesis_state`. pub fn genesis_state_ephemeral_store(mut self, genesis_state: BeaconState) -> Self { let spec = self.spec.as_ref().expect("cannot build without spec"); let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), spec.clone(), self.log.clone(), ) .unwrap(), ); let mutator = move |builder: BeaconChainBuilder<_>| { builder .genesis_state(genesis_state) .expect("should build state using recent genesis") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } /// Manually restore from a given `MemoryStore`. pub fn resumed_ephemeral_store( mut self, store: Arc, MemoryStore>>, ) -> Self { let mutator = move |builder: BeaconChainBuilder<_>| { builder .resume_from_db() .expect("should resume from database") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } } impl Builder> { /// Disk store, start from genesis. pub fn fresh_disk_store(mut self, store: Arc, LevelDB>>) -> Self { let validator_keypairs = self .validator_keypairs .clone() .expect("cannot build without validator keypairs"); let mutator = move |builder: BeaconChainBuilder<_>| { let genesis_state = interop_genesis_state::( &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), None, builder.get_spec(), ) .expect("should generate interop state"); builder .genesis_state(genesis_state) .expect("should build state using recent genesis") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } /// Disk store, resume. pub fn resumed_disk_store(mut self, store: Arc, LevelDB>>) -> Self { let mutator = move |builder: BeaconChainBuilder<_>| { builder .resume_from_db() .expect("should resume from database") }; self.store = Some(store); self.store_mutator(Box::new(mutator)) } } impl Builder> where E: EthSpec, Hot: ItemStore, Cold: ItemStore, { pub fn new(eth_spec_instance: E) -> Self { let runtime = TestRuntime::default(); let log = runtime.log.clone(); Self { eth_spec_instance, spec: None, validator_keypairs: None, chain_config: None, store_config: None, store: None, initial_mutator: None, store_mutator: None, execution_layer: None, mock_execution_layer: None, mock_builder: None, testing_slot_clock: None, runtime, log, } } pub fn deterministic_keypairs(self, num_keypairs: usize) -> Self { self.keypairs(types::test_utils::generate_deterministic_keypairs( num_keypairs, )) } pub fn keypairs(mut self, validator_keypairs: Vec) -> Self { self.validator_keypairs = Some(validator_keypairs); self } pub fn default_spec(self) -> Self { self.spec_or_default(None) } pub fn spec(self, spec: ChainSpec) -> Self { self.spec_or_default(Some(spec)) } pub fn spec_or_default(mut self, spec: Option) -> Self { self.spec = Some(spec.unwrap_or_else(test_spec::)); self } pub fn logger(mut self, log: Logger) -> Self { self.log = log.clone(); self.runtime.set_logger(log); self } /// This mutator will be run before the `store_mutator`. pub fn initial_mutator(mut self, mutator: BoxedMutator) -> Self { assert!( self.initial_mutator.is_none(), "initial mutator already set" ); self.initial_mutator = Some(mutator); self } /// This mutator will be run after the `initial_mutator`. pub fn store_mutator(mut self, mutator: BoxedMutator) -> Self { assert!(self.store_mutator.is_none(), "store mutator already set"); self.store_mutator = Some(mutator); self } /// Purposefully replace the `store_mutator`. pub fn override_store_mutator(mut self, mutator: BoxedMutator) -> Self { assert!(self.store_mutator.is_some(), "store mutator not set"); self.store_mutator = Some(mutator); self } pub fn chain_config(mut self, chain_config: ChainConfig) -> Self { self.chain_config = Some(chain_config); self } pub fn execution_layer(mut self, urls: &[&str]) -> Self { assert!( self.execution_layer.is_none(), "execution layer already defined" ); let urls: Vec = urls .iter() .map(|s| SensitiveUrl::parse(s)) .collect::>() .unwrap(); let config = execution_layer::Config { execution_endpoints: urls, secret_files: vec![], suggested_fee_recipient: Some(Address::repeat_byte(42)), ..Default::default() }; let execution_layer = ExecutionLayer::from_config( config, self.runtime.task_executor.clone(), self.log.clone(), ) .unwrap(); self.execution_layer = Some(execution_layer); self } pub fn mock_execution_layer(mut self) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); let mock = MockExecutionLayer::new( self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), None, ); self.execution_layer = Some(mock.el.clone()); self.mock_execution_layer = Some(mock); self } pub fn mock_execution_layer_with_builder(mut self, beacon_url: SensitiveUrl) -> Self { // Get a random unused port let port = unused_port::unused_tcp_port().unwrap(); let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(); let spec = self.spec.clone().expect("cannot build without spec"); let mock_el = MockExecutionLayer::new( self.runtime.task_executor.clone(), spec.terminal_total_difficulty, DEFAULT_TERMINAL_BLOCK, spec.terminal_block_hash, spec.terminal_block_hash_activation_epoch, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), Some(builder_url.clone()), ) .move_to_terminal_block(); let mock_el_url = SensitiveUrl::parse(mock_el.server.url().as_str()).unwrap(); self.mock_builder = Some(TestingBuilder::new( mock_el_url, builder_url, beacon_url, spec, self.runtime.task_executor.clone(), )); self.execution_layer = Some(mock_el.el.clone()); self.mock_execution_layer = Some(mock_el); self } /// Instruct the mock execution engine to always return a "valid" response to any payload it is /// asked to execute. pub fn mock_execution_layer_all_payloads_valid(self) -> Self { self.mock_execution_layer .as_ref() .expect("requires mock execution layer") .server .all_payloads_valid(); self } pub fn testing_slot_clock(mut self, slot_clock: TestingSlotClock) -> Self { self.testing_slot_clock = Some(slot_clock); self } pub fn build(self) -> BeaconChainHarness> { let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1); let log = self.log; let spec = self.spec.expect("cannot build without spec"); let seconds_per_slot = spec.seconds_per_slot; let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) .logger(log.clone()) .custom_spec(spec) .store(self.store.expect("cannot build without store")) .store_migrator_config(MigratorConfig::default().blocking()) .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) .dummy_eth1_backend() .expect("should build dummy backend") .shutdown_sender(shutdown_tx) .chain_config(self.chain_config.unwrap_or_default()) .event_handler(Some(ServerSentEventHandler::new_with_capacity( log.clone(), 5, ))) .monitor_validators(true, vec![], log); builder = if let Some(mutator) = self.initial_mutator { mutator(builder) } else { builder }; builder = if let Some(mutator) = self.store_mutator { mutator(builder) } else { builder }; // Initialize the slot clock only if it hasn't already been initialized. builder = if let Some(testing_slot_clock) = self.testing_slot_clock { builder.slot_clock(testing_slot_clock) } else if builder.get_slot_clock().is_none() { builder .testing_slot_clock(Duration::from_secs(seconds_per_slot)) .expect("should configure testing slot clock") } else { builder }; let chain = builder.build().expect("should build"); BeaconChainHarness { spec: chain.spec.clone(), chain: Arc::new(chain), validator_keypairs, shutdown_receiver: Arc::new(Mutex::new(shutdown_receiver)), runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, mock_builder: self.mock_builder.map(Arc::new), rng: make_rng(), } } } /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. /// /// Used for testing. pub struct BeaconChainHarness { pub validator_keypairs: Vec, pub chain: Arc>, pub spec: ChainSpec, pub shutdown_receiver: Arc>>, pub runtime: TestRuntime, pub mock_execution_layer: Option>, pub mock_builder: Option>>, pub rng: Mutex, } pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; pub type HarnessAttestations = Vec<(CommitteeAttestations, Option>)>; pub type HarnessSyncContributions = Vec<( Vec<(SyncCommitteeMessage, usize)>, Option>, )>; impl BeaconChainHarness> where E: EthSpec, Hot: ItemStore, Cold: ItemStore, { pub fn builder(eth_spec_instance: E) -> Builder> { Builder::new(eth_spec_instance) } pub fn logger(&self) -> &slog::Logger { &self.chain.log } pub fn execution_block_generator(&self) -> RwLockWriteGuard<'_, ExecutionBlockGenerator> { self.mock_execution_layer .as_ref() .expect("harness was not built with mock execution layer") .server .execution_block_generator() } pub fn get_all_validators(&self) -> Vec { (0..self.validator_keypairs.len()).collect() } pub fn slots_per_epoch(&self) -> u64 { E::slots_per_epoch() } pub fn epoch_start_slot(&self, epoch: u64) -> u64 { let epoch = Epoch::new(epoch); epoch.start_slot(E::slots_per_epoch()).into() } pub fn shutdown_reasons(&self) -> Vec { let mutex = self.shutdown_receiver.clone(); let mut receiver = mutex.lock(); std::iter::from_fn(move || match receiver.try_next() { Ok(Some(s)) => Some(s), Ok(None) => panic!("shutdown sender dropped"), Err(_) => None, }) .collect() } pub fn get_current_state(&self) -> BeaconState { self.chain.head_beacon_state_cloned() } pub fn get_timestamp_at_slot(&self) -> u64 { let state = self.get_current_state(); compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap() } pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); ( head.beacon_state.clone_with_only_committee_caches(), state_root, ) } pub fn head_slot(&self) -> Slot { self.chain.canonical_head.cached_head().head_slot() } pub fn head_block_root(&self) -> Hash256 { self.chain.canonical_head.cached_head().head_block_root() } pub fn finalized_checkpoint(&self) -> Checkpoint { self.chain .canonical_head .cached_head() .finalized_checkpoint() } pub fn justified_checkpoint(&self) -> Checkpoint { self.chain .canonical_head .cached_head() .justified_checkpoint() } pub fn get_current_slot(&self) -> Slot { self.chain.slot().unwrap() } pub fn get_block( &self, block_hash: SignedBeaconBlockHash, ) -> Option>> { self.chain.get_blinded_block(&block_hash.into()).unwrap() } pub fn block_exists(&self, block_hash: SignedBeaconBlockHash) -> bool { self.get_block(block_hash).is_some() } pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store .load_hot_state(&state_hash.into(), StateRootStrategy::Accurate) .unwrap() } pub fn get_cold_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store .load_cold_state(&state_hash.into()) .unwrap() } pub fn hot_state_exists(&self, state_hash: BeaconStateHash) -> bool { self.get_hot_state(state_hash).is_some() } pub fn cold_state_exists(&self, state_hash: BeaconStateHash) -> bool { self.get_cold_state(state_hash).is_some() } pub fn is_skipped_slot(&self, state: &BeaconState, slot: Slot) -> bool { state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() } pub async fn make_block( &self, mut state: BeaconState, slot: Slot, ) -> (SignedBeaconBlock, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); state .build_all_caches(&self.spec) .expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let (block, state) = self .chain .produce_block_on_state( state, None, slot, randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, ) .await .unwrap(); let signed_block = block.sign( &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); (signed_block, state) } /// Useful for the `per_block_processing` tests. Creates a block, and returns the state after /// caches are built but before the generated block is processed. pub async fn make_block_return_pre_state( &self, mut state: BeaconState, slot: Slot, ) -> (SignedBeaconBlock, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); complete_state_advance(&mut state, None, slot, &self.spec) .expect("should be able to advance state to slot"); state .build_all_caches(&self.spec) .expect("should build caches"); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); // If we produce two blocks for the same slot, they hash up to the same value and // BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().gen::<[u8; 32]>()); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); let pre_state = state.clone(); let (block, state) = self .chain .produce_block_on_state( state, None, slot, randao_reveal, Some(graffiti), ProduceBlockVerification::VerifyRandao, ) .await .unwrap(); let signed_block = block.sign( &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); (signed_block, pre_state) } /// Create a randao reveal for a block at `slot`. pub fn sign_randao_reveal( &self, state: &BeaconState, proposer_index: usize, slot: Slot, ) -> Signature { let epoch = slot.epoch(E::slots_per_epoch()); let domain = self.spec.get_domain( epoch, Domain::Randao, &state.fork(), state.genesis_validators_root(), ); let message = epoch.signing_root(domain); let sk = &self.validator_keypairs[proposer_index].sk; sk.sign(message) } /// Sign a beacon block using the proposer's key. pub fn sign_beacon_block( &self, block: BeaconBlock, state: &BeaconState, ) -> SignedBeaconBlock { let proposer_index = block.proposer_index() as usize; block.sign( &self.validator_keypairs[proposer_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ) } /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. /// /// The attestation doesn't _really_ have anything about it that makes it unaggregated per say, /// however this function is only required in the context of forming an unaggregated /// attestation. It would be an (undetectable) violation of the protocol to create a /// `SignedAggregateAndProof` based upon the output of this function. /// /// This function will produce attestations to optimistic blocks, which is against the /// specification but useful during testing. pub fn produce_unaggregated_attestation_for_block( &self, slot: Slot, index: CommitteeIndex, beacon_block_root: Hash256, mut state: Cow>, state_root: Hash256, ) -> Result, BeaconChainError> { let epoch = slot.epoch(E::slots_per_epoch()); if state.slot() > slot { return Err(BeaconChainError::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); // Only perform a "partial" state advance since we do not require the state roots to be // accurate. partial_state_advance( mut_state, Some(state_root), epoch.start_slot(E::slots_per_epoch()), &self.spec, )?; mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } let committee_len = state.get_beacon_committee(slot, index)?.committee.len(); let target_slot = epoch.start_slot(E::slots_per_epoch()); let target_root = if state.slot() <= target_slot { beacon_block_root } else { *state.get_block_root(target_slot)? }; Ok(Attestation { aggregation_bits: BitList::with_capacity(committee_len)?, data: AttestationData { slot, index, beacon_block_root, source: state.current_justified_checkpoint(), target: Checkpoint { epoch, root: target_root, }, }, signature: AggregateSignature::empty(), }) } /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. pub fn make_unaggregated_attestations( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec> { self.make_unaggregated_attestations_with_limit( attesting_validators, state, state_root, head_block_root, attestation_slot, None, ) .0 } pub fn make_unaggregated_attestations_with_limit( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, limit: Option, ) -> (Vec>, Vec) { let committee_count = state.get_committee_count_at_slot(state.slot()).unwrap(); let fork = self .spec .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); let attesters = Mutex::new(vec![]); let attestations = state .get_beacon_committees_at_slot(attestation_slot) .expect("should get committees") .iter() .map(|bc| { bc.committee .par_iter() .enumerate() .filter_map(|(i, validator_index)| { if !attesting_validators.contains(validator_index) { return None; } let mut attesters = attesters.lock(); if let Some(limit) = limit { if attesters.len() >= limit { return None; } } attesters.push(*validator_index); let mut attestation = self .produce_unaggregated_attestation_for_block( attestation_slot, bc.index, head_block_root.into(), Cow::Borrowed(state), state_root, ) .unwrap(); attestation.aggregation_bits.set(i, true).unwrap(); attestation.signature = { let domain = self.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, state.genesis_validators_root(), ); let message = attestation.data.signing_root(domain); let mut agg_sig = AggregateSignature::infinity(); agg_sig.add_assign( &self.validator_keypairs[*validator_index].sk.sign(message), ); agg_sig }; let subnet_id = SubnetId::compute_subnet_for_attestation_data::( &attestation.data, committee_count, &self.chain.spec, ) .unwrap(); Some((attestation, subnet_id)) }) .collect::>() }) .collect::>(); let attesters = attesters.into_inner(); if let Some(limit) = limit { assert_eq!( limit, attesters.len(), "failed to generate `limit` attestations" ); } (attestations, attesters) } /// A list of sync messages for the given state. pub fn make_sync_committee_messages( &self, state: &BeaconState, head_block_root: Hash256, message_slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> Vec> { let sync_committee: Arc> = match relative_sync_committee { RelativeSyncCommittee::Current => state .current_sync_committee() .expect("should be called on altair beacon state") .clone(), RelativeSyncCommittee::Next => state .next_sync_committee() .expect("should be called on altair beacon state") .clone(), }; let fork = self .spec .fork_at_epoch(message_slot.epoch(E::slots_per_epoch())); sync_committee .pubkeys .as_ref() .chunks(E::sync_subcommittee_size()) .map(|subcommittee| { subcommittee .iter() .enumerate() .map(|(subcommittee_position, pubkey)| { let validator_index = self .chain .validator_index(pubkey) .expect("should find validator index") .expect("pubkey should exist in the beacon chain"); let sync_message = SyncCommitteeMessage::new::( message_slot, head_block_root, validator_index as u64, &self.validator_keypairs[validator_index].sk, &fork, state.genesis_validators_root(), &self.spec, ); (sync_message, subcommittee_position) }) .collect() }) .collect() } /// Deprecated: Use make_unaggregated_attestations() instead. /// /// A list of attestations for each committee for the given slot. /// /// The first layer of the Vec is organised per committee. For example, if the return value is /// called `all_attestations`, then all attestations in `all_attestations[0]` will be for /// committee 0, whilst all in `all_attestations[1]` will be for committee 1. pub fn get_unaggregated_attestations( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, state_root: Hash256, head_block_root: Hash256, attestation_slot: Slot, ) -> Vec, SubnetId)>> { let validators: Vec = match attestation_strategy { AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals.clone(), }; self.make_unaggregated_attestations( &validators, state, state_root, head_block_root.into(), attestation_slot, ) } pub fn make_attestations( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, block_hash: SignedBeaconBlockHash, slot: Slot, ) -> HarnessAttestations { self.make_attestations_with_limit( attesting_validators, state, state_root, block_hash, slot, None, ) .0 } /// Produce exactly `limit` attestations. /// /// Return attestations and vec of validator indices that attested. pub fn make_attestations_with_limit( &self, attesting_validators: &[usize], state: &BeaconState, state_root: Hash256, block_hash: SignedBeaconBlockHash, slot: Slot, limit: Option, ) -> (HarnessAttestations, Vec) { let (unaggregated_attestations, attesters) = self .make_unaggregated_attestations_with_limit( attesting_validators, state, state_root, block_hash, slot, limit, ); let fork = self.spec.fork_at_epoch(slot.epoch(E::slots_per_epoch())); let aggregated_attestations: Vec>> = unaggregated_attestations .iter() .map(|committee_attestations| { // If there are any attestations in this committee, create an aggregate. if let Some((attestation, _)) = committee_attestations.first() { let bc = state .get_beacon_committee(attestation.data.slot, attestation.data.index) .unwrap(); // Find an aggregator if one exists. Return `None` if there are no // aggregators. let aggregator_index = bc .committee .iter() .find(|&validator_index| { if !attesters.contains(validator_index) { return false; } let selection_proof = SelectionProof::new::( slot, &self.validator_keypairs[*validator_index].sk, &fork, state.genesis_validators_root(), &self.spec, ); selection_proof .is_aggregator(bc.committee.len(), &self.spec) .unwrap_or(false) }) .copied()?; // If the chain is able to produce an aggregate, use that. Otherwise, build an // aggregate locally. let aggregate = self .chain .get_aggregated_attestation(&attestation.data) .unwrap() .unwrap_or_else(|| { committee_attestations.iter().skip(1).fold( attestation.clone(), |mut agg, (att, _)| { agg.aggregate(att); agg }, ) }); let signed_aggregate = SignedAggregateAndProof::from_aggregate( aggregator_index as u64, aggregate, None, &self.validator_keypairs[aggregator_index].sk, &fork, state.genesis_validators_root(), &self.spec, ); Some(signed_aggregate) } else { None } }) .collect(); ( unaggregated_attestations .into_iter() .zip(aggregated_attestations) .collect(), attesters, ) } pub fn make_sync_contributions( &self, state: &BeaconState, block_hash: Hash256, slot: Slot, relative_sync_committee: RelativeSyncCommittee, ) -> HarnessSyncContributions { let sync_messages = self.make_sync_committee_messages(state, block_hash, slot, relative_sync_committee); let sync_contributions: Vec>> = sync_messages .iter() .enumerate() .map(|(subnet_id, committee_messages)| { // If there are any sync messages in this committee, create an aggregate. if let Some((sync_message, subcommittee_position)) = committee_messages.first() { let sync_committee: Arc> = state .current_sync_committee() .expect("should be called on altair beacon state") .clone(); let aggregator_index = sync_committee .get_subcommittee_pubkeys(subnet_id) .unwrap() .iter() .find_map(|pubkey| { let validator_index = self .chain .validator_index(pubkey) .expect("should find validator index") .expect("pubkey should exist in the beacon chain"); let selection_proof = SyncSelectionProof::new::( slot, subnet_id as u64, &self.validator_keypairs[validator_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); selection_proof .is_aggregator::() .expect("should determine aggregator") .then_some(validator_index) })?; let default = SyncCommitteeContribution::from_message( sync_message, subnet_id as u64, *subcommittee_position, ) .expect("should derive sync contribution"); let aggregate = committee_messages.iter().skip(1).fold( default, |mut agg, (sig, position)| { let contribution = SyncCommitteeContribution::from_message( sig, subnet_id as u64, *position, ) .expect("should derive sync contribution"); agg.aggregate(&contribution); agg }, ); let signed_aggregate = SignedContributionAndProof::from_aggregate( aggregator_index as u64, aggregate, None, &self.validator_keypairs[aggregator_index].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); Some(signed_aggregate) } else { None } }) .collect(); sync_messages.into_iter().zip(sync_contributions).collect() } pub fn make_attester_slashing(&self, validator_indices: Vec) -> AttesterSlashing { self.make_attester_slashing_with_epochs(validator_indices, None, None, None, None) } pub fn make_attester_slashing_with_epochs( &self, validator_indices: Vec, source1: Option, target1: Option, source2: Option, target2: Option, ) -> AttesterSlashing { let fork = self.chain.canonical_head.cached_head().head_fork(); let mut attestation_1 = IndexedAttestation { attesting_indices: VariableList::new(validator_indices).unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, beacon_block_root: Hash256::zero(), target: Checkpoint { root: Hash256::zero(), epoch: target1.unwrap_or(fork.epoch), }, source: Checkpoint { root: Hash256::zero(), epoch: source1.unwrap_or(Epoch::new(0)), }, }, signature: AggregateSignature::infinity(), }; let mut attestation_2 = attestation_1.clone(); attestation_2.data.index += 1; attestation_2.data.source.epoch = source2.unwrap_or(Epoch::new(0)); attestation_2.data.target.epoch = target2.unwrap_or(fork.epoch); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, genesis_validators_root, ); let message = attestation.data.signing_root(domain); attestation.signature.add_assign(&sk.sign(message)); } } AttesterSlashing { attestation_1, attestation_2, } } pub fn make_attester_slashing_different_indices( &self, validator_indices_1: Vec, validator_indices_2: Vec, ) -> AttesterSlashing { let data = AttestationData { slot: Slot::new(0), index: 0, beacon_block_root: Hash256::zero(), target: Checkpoint { root: Hash256::zero(), epoch: Epoch::new(0), }, source: Checkpoint { root: Hash256::zero(), epoch: Epoch::new(0), }, }; let mut attestation_1 = IndexedAttestation { attesting_indices: VariableList::new(validator_indices_1).unwrap(), data: data.clone(), signature: AggregateSignature::infinity(), }; let mut attestation_2 = IndexedAttestation { attesting_indices: VariableList::new(validator_indices_2).unwrap(), data, signature: AggregateSignature::infinity(), }; attestation_2.data.index += 1; let fork = self.chain.canonical_head.cached_head().head_fork(); for attestation in &mut [&mut attestation_1, &mut attestation_2] { for &i in &attestation.attesting_indices { let sk = &self.validator_keypairs[i as usize].sk; let genesis_validators_root = self.chain.genesis_validators_root; let domain = self.chain.spec.get_domain( attestation.data.target.epoch, Domain::BeaconAttester, &fork, genesis_validators_root, ); let message = attestation.data.signing_root(domain); attestation.signature.add_assign(&sk.sign(message)); } } AttesterSlashing { attestation_1, attestation_2, } } pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing { self.make_proposer_slashing_at_slot(validator_index, None) } pub fn make_proposer_slashing_at_slot( &self, validator_index: u64, slot_override: Option, ) -> ProposerSlashing { let mut block_header_1 = self.chain.head_beacon_block().message().block_header(); block_header_1.proposer_index = validator_index; if let Some(slot) = slot_override { block_header_1.slot = slot; } let mut block_header_2 = block_header_1.clone(); block_header_2.state_root = Hash256::zero(); let sk = &self.validator_keypairs[validator_index as usize].sk; let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; let mut signed_block_headers = vec![block_header_1, block_header_2] .into_iter() .map(|block_header| { block_header.sign::(sk, &fork, genesis_validators_root, &self.chain.spec) }) .collect::>(); ProposerSlashing { signed_header_2: signed_block_headers.remove(1), signed_header_1: signed_block_headers.remove(0), } } pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit { let sk = &self.validator_keypairs[validator_index as usize].sk; let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; VoluntaryExit { epoch, validator_index, } .sign(sk, &fork, genesis_validators_root, &self.chain.spec) } pub fn add_voluntary_exit( &self, block: &mut BeaconBlock, validator_index: u64, epoch: Epoch, ) { let exit = self.make_voluntary_exit(validator_index, epoch); block.body_mut().voluntary_exits_mut().push(exit).unwrap(); } /// Create a new block, apply `block_modifier` to it, sign it and return it. /// /// The state returned is a pre-block state at the same slot as the produced block. pub async fn make_block_with_modifier( &self, state: BeaconState, slot: Slot, block_modifier: impl FnOnce(&mut BeaconBlock), ) -> (SignedBeaconBlock, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); let (block, state) = self.make_block_return_pre_state(state, slot).await; let (mut block, _) = block.deconstruct(); block_modifier(&mut block); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let signed_block = block.sign( &self.validator_keypairs[proposer_index as usize].sk, &state.fork(), state.genesis_validators_root(), &self.spec, ); (signed_block, state) } pub fn make_deposits<'a>( &self, state: &'a mut BeaconState, num_deposits: usize, invalid_pubkey: Option, invalid_signature: Option, ) -> (Vec, &'a mut BeaconState) { let mut datas = vec![]; for _ in 0..num_deposits { let keypair = Keypair::random(); let pubkeybytes = PublicKeyBytes::from(keypair.pk.clone()); let mut data = DepositData { pubkey: pubkeybytes, withdrawal_credentials: Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, self.spec.bls_withdrawal_prefix_byte) [..], ), amount: self.spec.min_deposit_amount, signature: SignatureBytes::empty(), }; data.signature = data.create_signature(&keypair.sk, &self.spec); if let Some(invalid_pubkey) = invalid_pubkey { data.pubkey = invalid_pubkey; } if let Some(invalid_signature) = invalid_signature.clone() { data.signature = invalid_signature; } datas.push(data); } // Vector containing all leaves let leaves = datas .iter() .map(|data| data.tree_hash_root()) .collect::>(); // Building a VarList from leaves let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone()); // Setting the deposit_root to be the tree_hash_root of the VarList state.eth1_data_mut().deposit_root = deposit_data_list.tree_hash_root(); state.eth1_data_mut().deposit_count = num_deposits as u64; *state.eth1_deposit_index_mut() = 0; // Building the merkle tree used for generating proofs let tree = MerkleTree::create(&leaves[..], self.spec.deposit_contract_tree_depth as usize); // Building proofs let mut proofs = vec![]; for i in 0..leaves.len() { let (_, mut proof) = tree .generate_proof(i, self.spec.deposit_contract_tree_depth as usize) .expect("should generate proof"); proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); proofs.push(proof); } // Building deposits let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) .map(|(data, proof)| (data, proof.into())) .map(|(data, proof)| Deposit { proof, data }) .collect::>(); // Pushing deposits to block body (deposits, state) } pub async fn process_block( &self, slot: Slot, block_root: Hash256, block: SignedBeaconBlock, ) -> Result> { self.set_current_slot(slot); let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, Arc::new(block), CountUnrealized::True, NotifyExecutionLayer::Yes, ) .await? .into(); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } pub async fn process_block_result( &self, block: SignedBeaconBlock, ) -> Result> { let block_hash: SignedBeaconBlockHash = self .chain .process_block( block.canonical_root(), Arc::new(block), CountUnrealized::True, NotifyExecutionLayer::Yes, ) .await? .into(); self.chain.recompute_head_at_current_slot().await; Ok(block_hash) } pub fn process_attestations(&self, attestations: HarnessAttestations) { let num_validators = self.validator_keypairs.len(); let mut unaggregated = Vec::with_capacity(num_validators); // This is an over-allocation, but it should be fine. It won't be *that* memory hungry and // it's nice to have fast tests. let mut aggregated = Vec::with_capacity(num_validators); for (unaggregated_attestations, maybe_signed_aggregate) in attestations.iter() { for (attn, subnet) in unaggregated_attestations { unaggregated.push((attn, Some(*subnet))); } if let Some(a) = maybe_signed_aggregate { aggregated.push(a) } } for result in self .chain .batch_verify_unaggregated_attestations_for_gossip(unaggregated.into_iter()) .unwrap() { let verified = result.unwrap(); self.chain.add_to_naive_aggregation_pool(&verified).unwrap(); } for result in self .chain .batch_verify_aggregated_attestations_for_gossip(aggregated.into_iter()) .unwrap() { let verified = result.unwrap(); self.chain .apply_attestation_to_fork_choice(&verified) .unwrap(); self.chain.add_to_block_inclusion_pool(verified).unwrap(); } } pub fn set_current_slot(&self, slot: Slot) { let current_slot = self.chain.slot().unwrap(); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let epoch = slot.epoch(E::slots_per_epoch()); assert!( epoch >= current_epoch, "Jumping backwards to an earlier epoch isn't well defined. \ Please generate test blocks epoch-by-epoch instead." ); self.chain.slot_clock.set_slot(slot.into()); } pub async fn add_block_at_slot( &self, slot: Slot, state: BeaconState, ) -> Result<(SignedBeaconBlockHash, SignedBeaconBlock, BeaconState), BlockError> { self.set_current_slot(slot); let (block, new_state) = self.make_block(state, slot).await; let block_hash = self .process_block(slot, block.canonical_root(), block.clone()) .await?; Ok((block_hash, block, new_state)) } pub fn attest_block( &self, state: &BeaconState, state_root: Hash256, block_hash: SignedBeaconBlockHash, block: &SignedBeaconBlock, validators: &[usize], ) { let attestations = self.make_attestations(validators, state, state_root, block_hash, block.slot()); self.process_attestations(attestations); } pub async fn add_attested_block_at_slot( &self, slot: Slot, state: BeaconState, state_root: Hash256, validators: &[usize], ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { let (block_hash, block, state) = self.add_block_at_slot(slot, state).await?; self.attest_block(&state, state_root, block_hash, &block, validators); Ok((block_hash, state)) } pub async fn add_attested_blocks_at_slots( &self, state: BeaconState, state_root: Hash256, slots: &[Slot], validators: &[usize], ) -> AddBlocksResult { assert!(!slots.is_empty()); self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None) .await } async fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, state_root: Hash256, slots: &[Slot], validators: &[usize], mut latest_block_hash: Option, ) -> AddBlocksResult { assert!( slots.windows(2).all(|w| w[0] <= w[1]), "Slots have to be sorted" ); // slice.is_sorted() isn't stabilized at the moment of writing this let mut block_hash_from_slot: HashMap = HashMap::new(); let mut state_hash_from_slot: HashMap = HashMap::new(); for slot in slots { let (block_hash, new_state) = self .add_attested_block_at_slot(*slot, state, state_root, validators) .await .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); state_hash_from_slot.insert(*slot, state.tree_hash_root().into()); latest_block_hash = Some(block_hash); } ( block_hash_from_slot, state_hash_from_slot, latest_block_hash.unwrap(), state, ) } /// A monstrosity of great usefulness. /// /// Calls `add_attested_blocks_at_slots` for each of the chains in `chains`, /// taking care to batch blocks by epoch so that the slot clock gets advanced one /// epoch at a time. /// /// Chains is a vec of `(state, slots, validators)` tuples. pub async fn add_blocks_on_multiple_chains( &self, chains: Vec<(BeaconState, Vec, Vec)>, ) -> Vec> { let slots_per_epoch = E::slots_per_epoch(); let min_epoch = chains .iter() .map(|(_, slots, _)| slots.iter().min().unwrap()) .min() .unwrap() .epoch(slots_per_epoch); let max_epoch = chains .iter() .map(|(_, slots, _)| slots.iter().max().unwrap()) .max() .unwrap() .epoch(slots_per_epoch); let mut chains = chains .into_iter() .map(|(state, slots, validators)| { ( state, slots, validators, HashMap::new(), HashMap::new(), SignedBeaconBlockHash::from(Hash256::zero()), ) }) .collect::>(); for epoch in min_epoch.as_u64()..=max_epoch.as_u64() { let mut new_chains = vec![]; for ( mut head_state, slots, validators, mut block_hashes, mut state_hashes, head_block, ) in chains { let epoch_slots = slots .iter() .filter(|s| s.epoch(slots_per_epoch).as_u64() == epoch) .copied() .collect::>(); let head_state_root = head_state.update_tree_hash_cache().unwrap(); let (new_block_hashes, new_state_hashes, new_head_block, new_head_state) = self .add_attested_blocks_at_slots_given_lbh( head_state, head_state_root, &epoch_slots, &validators, Some(head_block), ) .await; block_hashes.extend(new_block_hashes); state_hashes.extend(new_state_hashes); new_chains.push(( new_head_state, slots, validators, block_hashes, state_hashes, new_head_block, )); } chains = new_chains; } chains .into_iter() .map(|(state, _, _, block_hashes, state_hashes, head_block)| { (block_hashes, state_hashes, head_block, state) }) .collect() } pub fn get_finalized_checkpoints(&self) -> HashSet { let chain_dump = self.chain.chain_dump().unwrap(); chain_dump .iter() .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) .filter(|block_hash| *block_hash != Hash256::zero().into()) .collect() } /// Deprecated: Do not modify the slot clock manually; rely on add_attested_blocks_at_slots() /// instead /// /// Advance the slot of the `BeaconChain`. /// /// Does not produce blocks or attestations. pub fn advance_slot(&self) { self.chain.slot_clock.advance_slot(); } /// Advance the clock to `lookahead` before the start of `slot`. pub fn advance_to_slot_lookahead(&self, slot: Slot, lookahead: Duration) { let time = self.chain.slot_clock.start_of(slot).unwrap() - lookahead; self.chain.slot_clock.set_current_time(time); } /// Deprecated: Use make_block() instead /// /// Returns a newly created block, signed by the proposer for the given slot. pub async fn build_block( &self, state: BeaconState, slot: Slot, _block_strategy: BlockStrategy, ) -> (SignedBeaconBlock, BeaconState) { self.make_block(state, slot).await } /// Uses `Self::extend_chain` to build the chain out to the `target_slot`. pub async fn extend_to_slot(&self, target_slot: Slot) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } let num_slots = target_slot .as_usize() .checked_sub(self.chain.slot().unwrap().as_usize()) .expect("target_slot must be >= current_slot") .checked_add(1) .unwrap(); self.extend_slots(num_slots).await } /// Uses `Self::extend_chain` to `num_slots` blocks. /// /// Utilizes: /// /// - BlockStrategy::OnCanonicalHead, /// - AttestationStrategy::AllValidators, pub async fn extend_slots(&self, num_slots: usize) -> Hash256 { if self.chain.slot().unwrap() == self.chain.canonical_head.cached_head().head_slot() { self.advance_slot(); } self.extend_chain( num_slots, BlockStrategy::OnCanonicalHead, AttestationStrategy::AllValidators, ) .await } /// Deprecated: Use add_attested_blocks_at_slots() instead /// /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the /// last-produced block (the head of the chain). /// /// Chain will be extended by `num_blocks` blocks. /// /// The `block_strategy` dictates where the new blocks will be placed. /// /// The `attestation_strategy` dictates which validators will attest to the newly created /// blocks. pub async fn extend_chain( &self, num_blocks: usize, block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, ) -> Hash256 { let (mut state, slots) = match block_strategy { BlockStrategy::OnCanonicalHead => { let current_slot: u64 = self.get_current_slot().into(); let slots: Vec = (current_slot..(current_slot + (num_blocks as u64))) .map(Slot::new) .collect(); let state = self.get_current_state(); (state, slots) } BlockStrategy::ForkCanonicalChainAt { previous_slot, first_slot, } => { let first_slot_: u64 = first_slot.into(); let slots: Vec = (first_slot_..(first_slot_ + (num_blocks as u64))) .map(Slot::new) .collect(); let state = self .chain .state_at_slot(previous_slot, StateSkipConfig::WithStateRoots) .unwrap(); (state, slots) } }; let validators = match attestation_strategy { AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals, }; let state_root = state.update_tree_hash_cache().unwrap(); let (_, _, last_produced_block_hash, _) = self .add_attested_blocks_at_slots(state, state_root, &slots, &validators) .await; last_produced_block_hash.into() } /// Deprecated: Use add_attested_blocks_at_slots() instead /// /// Creates two forks: /// /// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks` /// on the head /// - The "faulty" fork: created by the `faulty_validators` who skipped a slot and /// then built `faulty_fork_blocks`. /// /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. pub async fn generate_two_forks_by_skipping_a_block( &self, honest_validators: &[usize], faulty_validators: &[usize], honest_fork_blocks: usize, faulty_fork_blocks: usize, ) -> (Hash256, Hash256) { let initial_head_slot = self.chain.head_snapshot().beacon_block.slot(); // Move to the next slot so we may produce some more blocks on the head. self.advance_slot(); // Extend the chain with blocks where only honest validators agree. let honest_head = self .extend_chain( honest_fork_blocks, BlockStrategy::OnCanonicalHead, AttestationStrategy::SomeValidators(honest_validators.to_vec()), ) .await; // Go back to the last block where all agreed, and build blocks upon it where only faulty nodes // agree. let faulty_head = self .extend_chain( faulty_fork_blocks, BlockStrategy::ForkCanonicalChainAt { previous_slot: initial_head_slot, // `initial_head_slot + 2` means one slot is skipped. first_slot: initial_head_slot + 2, }, AttestationStrategy::SomeValidators(faulty_validators.to_vec()), ) .await; assert_ne!(honest_head, faulty_head, "forks should be distinct"); (honest_head, faulty_head) } } // Junk `Debug` impl to satistfy certain trait bounds during testing. impl fmt::Debug for BeaconChainHarness { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BeaconChainHarness") } }