diff --git a/Cargo.lock b/Cargo.lock index 1c5ca52a9..c72a23957 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7073,6 +7073,7 @@ dependencies = [ "rand 0.7.3", "rayon", "ring", + "safe_arith", "scrypt", "serde", "serde_derive", diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 7d4340774..7960c55ba 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -229,7 +229,7 @@ pub fn cli_run( })?; slashing_protection - .register_validator(&voting_pubkey) + .register_validator(voting_pubkey.compress()) .map_err(|e| { format!( "Error registering validator {}: {:?}", diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index 79d939e38..7ce73076c 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -236,7 +236,7 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin .public_key() .ok_or_else(|| format!("Keystore public key is invalid: {}", keystore.pubkey()))?; slashing_protection - .register_validator(&voting_pubkey) + .register_validator(voting_pubkey.compress()) .map_err(|e| { format!( "Error registering validator {}: {:?}", diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index c748bd0e5..11eb4bbdb 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1073,7 +1073,7 @@ where } chain - .with_committee_cache(target.root, attestation_epoch, |committee_cache| { + .with_committee_cache(target.root, attestation_epoch, |committee_cache, _| { let committees_per_slot = committee_cache.committees_per_slot(); Ok(committee_cache diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 28256dc5b..144f08550 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -42,8 +42,11 @@ use slasher::Slasher; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; use state_processing::{ - common::get_indexed_attestation, per_block_processing, - per_block_processing::errors::AttestationValidationError, per_slot_processing, + common::get_indexed_attestation, + per_block_processing, + per_block_processing::errors::AttestationValidationError, + per_slot_processing, + state_advance::{complete_state_advance, partial_state_advance}, BlockSignatureStrategy, SigVerifiedOp, }; use std::borrow::Cow; @@ -156,6 +159,7 @@ pub struct HeadInfo { pub fork: Fork, pub genesis_time: u64, pub genesis_validators_root: Hash256, + pub proposer_shuffling_decision_root: Hash256, } pub trait BeaconChainTypes: Send + Sync + 'static { @@ -243,7 +247,7 @@ pub struct BeaconChain { /// Caches the attester shuffling for a given epoch and shuffling key root. pub(crate) shuffling_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. - pub(crate) beacon_proposer_cache: Mutex, + pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. pub(crate) validator_pubkey_cache: TimeoutRwLock>, /// A list of any hard-coded forks that have been disabled. @@ -606,6 +610,10 @@ impl BeaconChain { /// A summarized version of `Self::head` that involves less cloning. pub fn head_info(&self) -> Result { self.with_head(|head| { + let proposer_shuffling_decision_root = head + .beacon_state + .proposer_shuffling_decision_root(head.beacon_block_root)?; + Ok(HeadInfo { slot: head.beacon_block.slot(), block_root: head.beacon_block_root, @@ -615,6 +623,7 @@ impl BeaconChain { fork: head.beacon_state.fork, genesis_time: head.beacon_state.genesis_time, genesis_validators_root: head.beacon_state.genesis_validators_root, + proposer_shuffling_decision_root, }) }) } @@ -773,6 +782,42 @@ impl BeaconChain { Ok(pubkey_cache.get(validator_index).cloned()) } + /// As per `Self::validator_pubkey`, but returns `PublicKeyBytes`. + pub fn validator_pubkey_bytes( + &self, + validator_index: usize, + ) -> Result, Error> { + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + + Ok(pubkey_cache.get_pubkey_bytes(validator_index).copied()) + } + + /// As per `Self::validator_pubkey_bytes` but will resolve multiple indices at once to avoid + /// bouncing the read-lock on the pubkey cache. + /// + /// Returns a map that may have a length less than `validator_indices.len()` if some indices + /// were unable to be resolved. + pub fn validator_pubkey_bytes_many( + &self, + validator_indices: &[usize], + ) -> Result, Error> { + let pubkey_cache = self + .validator_pubkey_cache + .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) + .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + + let mut map = HashMap::with_capacity(validator_indices.len()); + for &validator_index in validator_indices { + if let Some(pubkey) = pubkey_cache.get_pubkey_bytes(validator_index) { + map.insert(validator_index, *pubkey); + } + } + Ok(map) + } + /// Returns the block canonical root of the current canonical chain at a given slot. /// /// Returns `None` if the given slot doesn't exist in the chain. @@ -803,19 +848,35 @@ impl BeaconChain { }) } - /// Returns the attestation duties for a given validator index. + /// Returns the attestation duties for the given validator indices using the shuffling cache. /// - /// Information is read from the current state, so only information from the present and prior - /// epoch is available. - pub fn validator_attestation_duty( + /// An error may be returned if `head_block_root` is a finalized block, this function is only + /// designed for operations at the head of the chain. + /// + /// The returned `Vec` will have the same length as `validator_indices`, any + /// non-existing/inactive validators will have `None` values. + /// + /// ## Notes + /// + /// This function will try to use the shuffling cache to return the value. If the value is not + /// in the shuffling cache, it will be added. Care should be taken not to wash out the + /// shuffling cache with historical/useless values. + pub fn validator_attestation_duties( &self, - validator_index: usize, + validator_indices: &[u64], epoch: Epoch, - ) -> Result, Error> { - let head_block_root = self.head_beacon_block_root()?; + head_block_root: Hash256, + ) -> Result<(Vec>, Hash256), Error> { + self.with_committee_cache(head_block_root, epoch, |committee_cache, dependent_root| { + let duties = validator_indices + .iter() + .map(|validator_index| { + let validator_index = *validator_index as usize; + committee_cache.get_attestation_duties(validator_index) + }) + .collect(); - self.with_committee_cache(head_block_root, epoch, |committee_cache| { - Ok(committee_cache.get_attestation_duties(validator_index)) + Ok((duties, dependent_root)) }) } @@ -867,6 +928,7 @@ impl BeaconChain { index, head.beacon_block_root, Cow::Borrowed(&head.beacon_state), + head.beacon_state_root(), ) } else { // We disallow producing attestations *prior* to the current head since such an @@ -902,6 +964,7 @@ impl BeaconChain { index: CommitteeIndex, beacon_block_root: Hash256, mut state: Cow>, + state_root: Hash256, ) -> Result, Error> { let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); @@ -909,13 +972,14 @@ impl BeaconChain { return Err(Error::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); - while mut_state.current_epoch() < epoch { - // Note: here we provide `Hash256::zero()` as the root of the current state. This - // has the effect of setting the values of all historic state roots to the zero - // hash. This is an optimization, we don't need the state roots so why calculate - // them? - per_slot_processing(mut_state, Some(Hash256::zero()), &self.spec)?; - } + // Only perform a "partial" state advance since we do not require the state roots to be + // accurate. + partial_state_advance( + mut_state, + Some(state_root), + epoch.start_slot(T::EthSpec::slots_per_epoch()), + &self.spec, + )?; mut_state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } @@ -1861,7 +1925,7 @@ impl BeaconChain { pub fn produce_block_on_state( &self, mut state: BeaconState, - mut state_root_opt: Option, + state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, validator_graffiti: Option, @@ -1880,15 +1944,10 @@ impl BeaconChain { } let slot_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_SLOT_PROCESS_TIMES); - // If required, transition the new state to the present slot. - // - // Note: supplying some `state_root` when it it is known would be a cheap and easy - // optimization. - while state.slot < produce_at_slot { - // Using `state_root.take()` here ensures that we consume the `state_root` on the first - // iteration and never use it again. - per_slot_processing(&mut state, state_root_opt.take(), &self.spec)?; - } + + // Ensure the state has performed a complete transition into the required slot. + complete_state_advance(&mut state, state_root_opt, produce_at_slot, &self.spec)?; + drop(slot_timer); state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; @@ -2363,7 +2422,10 @@ impl BeaconChain { } /// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head - /// `head_block_root`. + /// `head_block_root`. The `map_fn` will be supplied two values: + /// + /// - `&CommitteeCache`: the committee cache that serves the given parameters. + /// - `Hash256`: the "shuffling decision root" which uniquely identifies the `CommitteeCache`. /// /// It's not necessary that `head_block_root` matches our current view of the chain, it can be /// any block that is: @@ -2376,7 +2438,7 @@ impl BeaconChain { /// /// ## Important /// - /// This function is **not** suitable for determining proposer duties. + /// This function is **not** suitable for determining proposer duties (only attester duties). /// /// ## Notes /// @@ -2394,7 +2456,7 @@ impl BeaconChain { map_fn: F, ) -> Result where - F: Fn(&CommitteeCache) -> Result, + F: Fn(&CommitteeCache, Hash256) -> Result, { let head_block = self .fork_choice @@ -2425,7 +2487,7 @@ impl BeaconChain { metrics::stop_timer(cache_wait_timer); if let Some(committee_cache) = shuffling_cache.get(&shuffling_id) { - map_fn(committee_cache) + map_fn(committee_cache, shuffling_id.shuffling_decision_block) } else { // Drop the shuffling cache to avoid holding the lock for any longer than // required. @@ -2434,33 +2496,80 @@ impl BeaconChain { debug!( self.log, "Committee cache miss"; - "shuffling_epoch" => shuffling_epoch.as_u64(), + "shuffling_id" => ?shuffling_epoch, "head_block_root" => head_block_root.to_string(), ); let state_read_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); - let mut state = self - .store - .get_inconsistent_state_for_attestation_verification_only( - &head_block.state_root, - Some(head_block.slot), - )? - .ok_or(Error::MissingBeaconState(head_block.state_root))?; + // If the head of the chain can serve this request, use it. + // + // This code is a little awkward because we need to ensure that the head we read and + // the head we copy is identical. Taking one lock to read the head values and another + // to copy the head is liable to race-conditions. + let head_state_opt = self.with_head(|head| { + if head.beacon_block_root == head_block_root { + Ok(Some(( + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + head.beacon_state_root(), + ))) + } else { + Ok::<_, Error>(None) + } + })?; + + // If the head state is useful for this request, use it. Otherwise, read a state from + // disk. + let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { + (state, state_root) + } else { + let state_root = head_block.state_root; + let state = self + .store + .get_inconsistent_state_for_attestation_verification_only( + &state_root, + Some(head_block.slot), + )? + .ok_or(Error::MissingBeaconState(head_block.state_root))?; + (state, state_root) + }; + + /* + * IMPORTANT + * + * Since it's possible that + * `Store::get_inconsistent_state_for_attestation_verification_only` was used to obtain + * the state, we cannot rely upon the following fields: + * + * - `state.state_roots` + * - `state.block_roots` + * + * These fields should not be used for the rest of this function. + */ metrics::stop_timer(state_read_timer); let state_skip_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); - while state.current_epoch() + 1 < shuffling_epoch { - // Here we tell `per_slot_processing` to skip hashing the state and just - // use the zero hash instead. - // - // The state roots are not useful for the shuffling, so there's no need to - // compute them. - per_slot_processing(&mut state, Some(Hash256::zero()), &self.spec) - .map_err(Error::from)?; + // If the state is in an earlier epoch, advance it. If it's from a later epoch, reject + // it. + if state.current_epoch() + 1 < shuffling_epoch { + // Since there's a one-epoch look-ahead on the attester shuffling, it suffices to + // only advance into the slot prior to the `shuffling_epoch`. + let target_slot = shuffling_epoch + .saturating_sub(1_u64) + .start_slot(T::EthSpec::slots_per_epoch()); + + // Advance the state into the required slot, using the "partial" method since the state + // roots are not relevant for the shuffling. + partial_state_advance(&mut state, Some(state_root), target_slot, &self.spec)?; + } else if state.current_epoch() > shuffling_epoch { + return Err(Error::InvalidStateForShuffling { + state_epoch: state.current_epoch(), + shuffling_epoch, + }); } metrics::stop_timer(state_skip_timer); @@ -2473,6 +2582,7 @@ impl BeaconChain { state.build_committee_cache(relative_epoch, &self.spec)?; let committee_cache = state.committee_cache(relative_epoch)?; + let shuffling_decision_block = shuffling_id.shuffling_decision_block; self.shuffling_cache .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) @@ -2481,7 +2591,7 @@ impl BeaconChain { metrics::stop_timer(committee_building_timer); - map_fn(&committee_cache) + map_fn(&committee_cache, shuffling_decision_block) } } diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 212071010..646884b60 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -59,7 +59,7 @@ impl Default for BeaconProposerCache { impl BeaconProposerCache { /// If it is cached, returns the proposer for the block at `slot` where the block has the /// ancestor block root of `shuffling_decision_block` at `end_slot(slot.epoch() - 1)`. - pub fn get( + pub fn get_slot( &mut self, shuffling_decision_block: Hash256, slot: Slot, @@ -84,6 +84,20 @@ impl BeaconProposerCache { } } + /// As per `Self::get_slot`, but returns all proposers in all slots for the given `epoch`. + /// + /// The nth slot in the returned `SmallVec` will be equal to the nth slot in the given `epoch`. + /// E.g., if `epoch == 1` then `smallvec[0]` refers to slot 32 (assuming `SLOTS_PER_EPOCH == + /// 32`). + pub fn get_epoch( + &mut self, + shuffling_decision_block: Hash256, + epoch: Epoch, + ) -> Option<&SmallVec<[usize; TYPICAL_SLOTS_PER_EPOCH]>> { + let key = (epoch, shuffling_decision_block); + self.cache.get(&key).map(|cache| &cache.proposers) + } + /// Insert the proposers into the cache. /// /// See `Self::get` for a description of `shuffling_decision_block`. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a3e5c3195..038695857 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -60,7 +60,9 @@ use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, per_block_processing, per_epoch_processing::EpochProcessingSummary, - per_slot_processing, BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, + per_slot_processing, + state_advance::partial_state_advance, + BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, }; use std::borrow::Cow; use std::convert::TryFrom; @@ -351,8 +353,12 @@ pub fn signature_verify_chain_segment( .map(|(_, block)| block.slot()) .unwrap_or_else(|| slot); - let state = - cheap_state_advance_to_obtain_committees(&mut parent.pre_state, highest_slot, &chain.spec)?; + let state = cheap_state_advance_to_obtain_committees( + &mut parent.pre_state, + parent.beacon_state_root, + highest_slot, + &chain.spec, + )?; let pubkey_cache = get_validator_pubkey_cache(chain)?; let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); @@ -564,7 +570,7 @@ impl GossipVerifiedBlock { let proposer_opt = chain .beacon_proposer_cache .lock() - .get::(proposer_shuffling_decision_block, block.slot()); + .get_slot::(proposer_shuffling_decision_block, block.slot()); let (expected_proposer, fork, parent, block) = if let Some(proposer) = proposer_opt { // The proposer index was cached and we can return it without needing to load the // parent. @@ -586,6 +592,7 @@ impl GossipVerifiedBlock { // The state produced is only valid for determining proposer/attester shuffling indices. let state = cheap_state_advance_to_obtain_committees( &mut parent.pre_state, + parent.beacon_state_root, block.slot(), &chain.spec, )?; @@ -694,6 +701,7 @@ impl SignatureVerifiedBlock { let state = cheap_state_advance_to_obtain_committees( &mut parent.pre_state, + parent.beacon_state_root, block.slot(), &chain.spec, )?; @@ -738,6 +746,7 @@ impl SignatureVerifiedBlock { let state = cheap_state_advance_to_obtain_committees( &mut parent.pre_state, + parent.beacon_state_root, block.slot(), &chain.spec, )?; @@ -1280,6 +1289,7 @@ fn load_parent( beacon_block: parent_block, beacon_block_root: root, pre_state: parent_state, + beacon_state_root: Some(parent_state_root), }, block, )) @@ -1303,6 +1313,7 @@ fn load_parent( /// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build). fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( state: &'a mut BeaconState, + state_root_opt: Option, block_slot: Slot, spec: &ChainSpec, ) -> Result>, BlockError> { @@ -1319,14 +1330,12 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( }) } else { let mut state = state.clone_with(CloneConfig::committee_caches_only()); + let target_slot = block_epoch.start_slot(E::slots_per_epoch()); - while state.current_epoch() < block_epoch { - // Don't calculate state roots since they aren't required for calculating - // shuffling (achieved by providing Hash256::zero()). - per_slot_processing(&mut state, Some(Hash256::zero()), spec).map_err(|e| { - BlockError::BeaconChainError(BeaconChainError::SlotProcessingError(e)) - })?; - } + // Advance the state into the same epoch as the block. Use the "partial" method since state + // roots are not important for proposer/attester shuffling. + partial_state_advance(&mut state, state_root_opt, target_slot, spec) + .map_err(|e| BlockError::BeaconChainError(BeaconChainError::from(e)))?; state.build_committee_cache(RelativeEpoch::Current, spec)?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 395392971..1f5662a62 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -17,6 +17,7 @@ use state_processing::{ ProposerSlashingValidationError, }, signature_sets::Error as SignatureSetError, + state_advance::Error as StateAdvanceError, BlockProcessingError, SlotProcessingError, }; use std::time::Duration; @@ -51,6 +52,7 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + StateAdvanceError(StateAdvanceError), UnableToAdvanceState(String), NoStateForAttestation { beacon_block_root: Hash256, @@ -81,6 +83,7 @@ pub enum BeaconChainError { BlockSignatureVerifierError(state_processing::block_signature_verifier::Error), DuplicateValidatorPublicKey, ValidatorPubkeyCacheFileError(String), + ValidatorIndexUnknown(usize), OpPoolError(OpPoolError), NaiveAggregationError(NaiveAggregationError), ObservedAttestationsError(ObservedAttestationsError), @@ -105,6 +108,10 @@ pub enum BeaconChainError { block_slot: Slot, state_slot: Slot, }, + InvalidStateForShuffling { + state_epoch: Epoch, + shuffling_epoch: Epoch, + }, } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -122,6 +129,7 @@ easy_from_to!(BlockSignatureVerifierError, BeaconChainError); easy_from_to!(PruningError, BeaconChainError); easy_from_to!(ArithError, BeaconChainError); easy_from_to!(ForkChoiceStoreError, BeaconChainError); +easy_from_to!(StateAdvanceError, BeaconChainError); #[derive(Debug)] pub enum BlockProductionError { @@ -133,6 +141,7 @@ pub enum BlockProductionError { BlockProcessingError(BlockProcessingError), Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), + StateAdvanceError(StateAdvanceError), OpPoolError(OpPoolError), /// The `BeaconChain` was explicitly configured _without_ a connection to eth1, therefore it /// cannot produce blocks. @@ -147,3 +156,4 @@ easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); easy_from_to!(Eth1ChainError, BlockProductionError); +easy_from_to!(StateAdvanceError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs index 119f0172a..435e4b6f8 100644 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ b/beacon_node/beacon_chain/src/snapshot_cache.rs @@ -14,14 +14,18 @@ pub struct PreProcessingSnapshot { /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for /// the application of another block. pub pre_state: BeaconState, + /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. + pub beacon_state_root: Option, pub beacon_block: SignedBeaconBlock, pub beacon_block_root: Hash256, } impl From> for PreProcessingSnapshot { fn from(snapshot: BeaconSnapshot) -> Self { + let beacon_state_root = Some(snapshot.beacon_state_root()); Self { pre_state: snapshot.beacon_state, + beacon_state_root, beacon_block: snapshot.beacon_block, beacon_block_root: snapshot.beacon_block_root, } @@ -47,10 +51,15 @@ impl CacheItem { } pub fn into_pre_state(self) -> PreProcessingSnapshot { + // Do not include the beacon state root if the state has been advanced. + let beacon_state_root = + Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); + PreProcessingSnapshot { beacon_block: self.beacon_block, beacon_block_root: self.beacon_block_root, pre_state: self.pre_state.unwrap_or(self.beacon_state), + beacon_state_root, } } } diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index 64b540641..fe6e05a8b 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -15,8 +15,9 @@ //! 2. There's a possibility that the head block is never built upon, causing wasted CPU cycles. use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ - beacon_chain::BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, snapshot_cache::StateAdvance, BeaconChain, - BeaconChainError, BeaconChainTypes, + beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}, + snapshot_cache::StateAdvance, + BeaconChain, BeaconChainError, BeaconChainTypes, }; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; @@ -27,7 +28,7 @@ use std::sync::{ }; use task_executor::TaskExecutor; use tokio::time::sleep; -use types::{EthSpec, Hash256, Slot}; +use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -252,16 +253,22 @@ fn advance_head( "current_slot" => current_slot, ); - // If the advanced state is in a later epoch than where it started, pre-emptively add the - // proposer shuffling for the new epoch into the cache. - if state.current_epoch() > initial_epoch { - debug!( - log, - "Priming proposer cache"; - "head_root" => ?head_root, - "state_epoch" => state.current_epoch(), - "current_epoch" => current_slot.epoch(T::EthSpec::slots_per_epoch()), - ); + // Build the current epoch cache, to prepare to compute proposer duties. + state + .build_committee_cache(RelativeEpoch::Current, &beacon_chain.spec) + .map_err(BeaconChainError::from)?; + // Build the next epoch cache, to prepare to compute attester duties. + state + .build_committee_cache(RelativeEpoch::Next, &beacon_chain.spec) + .map_err(BeaconChainError::from)?; + + // If the `pre_state` is in a later epoch than `state`, pre-emptively add the proposer shuffling + // for the state's current epoch and the committee cache for the state's next epoch. + if initial_epoch < state.current_epoch() { + // Update the proposer cache. + // + // We supply the `head_root` as the decision block since the prior `if` statement guarantees + // the head root is the latest block from the prior epoch. beacon_chain .beacon_proposer_cache .lock() @@ -274,6 +281,27 @@ fn advance_head( state.fork, ) .map_err(BeaconChainError::from)?; + + // Update the attester cache. + let shuffling_id = AttestationShufflingId::new(head_root, &state, RelativeEpoch::Next) + .map_err(BeaconChainError::from)?; + let committee_cache = state + .committee_cache(RelativeEpoch::Next) + .map_err(BeaconChainError::from)?; + beacon_chain + .shuffling_cache + .try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT) + .ok_or(BeaconChainError::AttestationCacheLockTimeout)? + .insert(shuffling_id.clone(), committee_cache); + + debug!( + log, + "Primed proposer and attester caches"; + "head_root" => ?head_root, + "next_epoch_shuffling_root" => ?shuffling_id.shuffling_decision_block, + "state_epoch" => state.current_epoch(), + "current_epoch" => current_slot.epoch(T::EthSpec::slots_per_epoch()), + ); } let final_slot = state.slot; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c04fcc3d6..9c6b3079a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -19,7 +19,7 @@ use rand::SeedableRng; use rayon::prelude::*; use slog::Logger; use slot_clock::TestingSlotClock; -use state_processing::per_slot_processing; +use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::sync::Arc; @@ -331,6 +331,12 @@ where self.chain.head().unwrap().beacon_state } + pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { + let head = self.chain.head().unwrap(); + let state_root = head.beacon_state_root(); + (head.beacon_state, state_root) + } + pub fn get_current_slot(&self) -> Slot { self.chain.slot().unwrap() } @@ -377,10 +383,8 @@ where assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot); - while state.slot < slot { - per_slot_processing(&mut state, None, &self.spec) - .expect("should be able to advance state to slot"); - } + complete_state_advance(&mut state, None, slot, &self.spec) + .expect("should be able to advance state to slot"); state .build_all_caches(&self.spec) @@ -430,6 +434,7 @@ where &self, attesting_validators: &[usize], state: &BeaconState, + state_root: Hash256, head_block_root: SignedBeaconBlockHash, attestation_slot: Slot, ) -> Vec, SubnetId)>> { @@ -454,6 +459,7 @@ where bc.index, head_block_root.into(), Cow::Borrowed(state), + state_root, ) .unwrap(); @@ -503,6 +509,7 @@ where &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, + state_root: Hash256, head_block_root: Hash256, attestation_slot: Slot, ) -> Vec, SubnetId)>> { @@ -513,6 +520,7 @@ where self.make_unaggregated_attestations( &validators, state, + state_root, head_block_root.into(), attestation_slot, ) @@ -522,11 +530,17 @@ where &self, attesting_validators: &[usize], state: &BeaconState, + state_root: Hash256, block_hash: SignedBeaconBlockHash, slot: Slot, ) -> HarnessAttestations { - let unaggregated_attestations = - self.make_unaggregated_attestations(&attesting_validators, &state, block_hash, slot); + let unaggregated_attestations = self.make_unaggregated_attestations( + &attesting_validators, + &state, + state_root, + block_hash, + slot, + ); let aggregated_attestations: Vec>> = unaggregated_attestations .iter() @@ -754,12 +768,18 @@ where pub fn attest_block( &self, state: &BeaconState, + state_root: Hash256, block_hash: SignedBeaconBlockHash, block: &SignedBeaconBlock, validators: &[usize], ) { - let attestations = - self.make_attestations(validators, &state, block_hash, block.message.slot); + let attestations = self.make_attestations( + validators, + &state, + state_root, + block_hash, + block.message.slot, + ); self.process_attestations(attestations); } @@ -767,26 +787,29 @@ where &self, slot: Slot, state: BeaconState, + state_root: Hash256, validators: &[usize], ) -> Result<(SignedBeaconBlockHash, BeaconState), BlockError> { let (block_hash, block, state) = self.add_block_at_slot(slot, state)?; - self.attest_block(&state, block_hash, &block, validators); + self.attest_block(&state, state_root, block_hash, &block, validators); Ok((block_hash, state)) } pub fn add_attested_blocks_at_slots( &self, state: BeaconState, + state_root: Hash256, slots: &[Slot], validators: &[usize], ) -> AddBlocksResult { assert!(!slots.is_empty()); - self.add_attested_blocks_at_slots_given_lbh(state, slots, validators, None) + self.add_attested_blocks_at_slots_given_lbh(state, state_root, slots, validators, None) } fn add_attested_blocks_at_slots_given_lbh( &self, mut state: BeaconState, + state_root: Hash256, slots: &[Slot], validators: &[usize], mut latest_block_hash: Option, @@ -799,7 +822,7 @@ where let mut state_hash_from_slot: HashMap = HashMap::new(); for slot in slots { let (block_hash, new_state) = self - .add_attested_block_at_slot(*slot, state, validators) + .add_attested_block_at_slot(*slot, state, state_root, validators) .unwrap(); state = new_state; block_hash_from_slot.insert(*slot, block_hash); @@ -857,8 +880,14 @@ where for epoch in min_epoch.as_u64()..=max_epoch.as_u64() { let mut new_chains = vec![]; - for (head_state, slots, validators, mut block_hashes, mut state_hashes, head_block) in - chains + for ( + mut head_state, + slots, + validators, + mut block_hashes, + mut state_hashes, + head_block, + ) in chains { let epoch_slots = slots .iter() @@ -866,9 +895,11 @@ where .copied() .collect::>(); + let head_state_root = head_state.update_tree_hash_cache().unwrap(); let (new_block_hashes, new_state_hashes, new_head_block, new_head_state) = self .add_attested_blocks_at_slots_given_lbh( head_state, + head_state_root, &epoch_slots, &validators, Some(head_block), @@ -947,7 +978,7 @@ where block_strategy: BlockStrategy, attestation_strategy: AttestationStrategy, ) -> Hash256 { - let (state, slots) = match block_strategy { + let (mut state, slots) = match block_strategy { BlockStrategy::OnCanonicalHead => { let current_slot: u64 = self.get_current_slot().into(); let slots: Vec = (current_slot..(current_slot + (num_blocks as u64))) @@ -975,8 +1006,9 @@ where AttestationStrategy::AllValidators => self.get_all_validators(), AttestationStrategy::SomeValidators(vals) => vals, }; + let state_root = state.update_tree_hash_cache().unwrap(); let (_, _, last_produced_block_hash, _) = - self.add_attested_blocks_at_slots(state, &slots, &validators); + self.add_attested_blocks_at_slots(state, state_root, &slots, &validators); last_produced_block_hash.into() } diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 28bcbf389..86c358fcc 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -23,6 +23,7 @@ use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; pub struct ValidatorPubkeyCache { pubkeys: Vec, indices: HashMap, + pubkey_bytes: Vec, backing: PubkeyCacheBacking, } @@ -46,6 +47,7 @@ impl ValidatorPubkeyCache { let mut cache = Self { pubkeys: vec![], indices: HashMap::new(), + pubkey_bytes: vec![], backing: PubkeyCacheBacking::Database(store), }; @@ -58,12 +60,14 @@ impl ValidatorPubkeyCache { pub fn load_from_store(store: BeaconStore) -> Result { let mut pubkeys = vec![]; let mut indices = HashMap::new(); + let mut pubkey_bytes = vec![]; for validator_index in 0.. { if let Some(DatabasePubkey(pubkey)) = store.get_item(&DatabasePubkey::key_for_index(validator_index))? { pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?); + pubkey_bytes.push(pubkey); indices.insert(pubkey, validator_index); } else { break; @@ -73,6 +77,7 @@ impl ValidatorPubkeyCache { Ok(ValidatorPubkeyCache { pubkeys, indices, + pubkey_bytes, backing: PubkeyCacheBacking::Database(store), }) } @@ -91,6 +96,7 @@ impl ValidatorPubkeyCache { let mut result = ValidatorPubkeyCache { pubkeys: Vec::with_capacity(existing_cache.pubkeys.len()), indices: HashMap::with_capacity(existing_cache.indices.len()), + pubkey_bytes: Vec::with_capacity(existing_cache.indices.len()), backing: PubkeyCacheBacking::Database(store), }; result.import(existing_cache.pubkeys.iter().map(PublicKeyBytes::from))?; @@ -120,6 +126,7 @@ impl ValidatorPubkeyCache { where I: Iterator + ExactSizeIterator, { + self.pubkey_bytes.reserve(validator_keys.len()); self.pubkeys.reserve(validator_keys.len()); self.indices.reserve(validator_keys.len()); @@ -153,6 +160,7 @@ impl ValidatorPubkeyCache { .try_into() .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?, ); + self.pubkey_bytes.push(pubkey); self.indices.insert(pubkey, i); } @@ -165,6 +173,11 @@ impl ValidatorPubkeyCache { self.pubkeys.get(i) } + /// Get the public key (in bytes form) for a validator with index `i`. + pub fn get_pubkey_bytes(&self, i: usize) -> Option<&PublicKeyBytes> { + self.pubkey_bytes.get(i) + } + /// Get the index of a validator with `pubkey`. pub fn get_index(&self, pubkey: &PublicKeyBytes) -> Option { self.indices.get(pubkey).copied() @@ -264,13 +277,15 @@ impl ValidatorPubkeyCacheFile { let mut last = None; let mut pubkeys = Vec::with_capacity(list.len()); - let mut indices = HashMap::new(); + let mut indices = HashMap::with_capacity(list.len()); + let mut pubkey_bytes = Vec::with_capacity(list.len()); for (index, pubkey) in list { let expected = last.map(|n| n + 1); if expected.map_or(true, |expected| index == expected) { last = Some(index); pubkeys.push((&pubkey).try_into().map_err(Error::PubkeyDecode)?); + pubkey_bytes.push(pubkey); indices.insert(pubkey, index); } else { return Err(Error::InconsistentIndex { @@ -283,6 +298,7 @@ impl ValidatorPubkeyCacheFile { Ok(ValidatorPubkeyCache { pubkeys, indices, + pubkey_bytes, backing: PubkeyCacheBacking::File(self), }) } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 4d512d841..580307ddc 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -909,10 +909,13 @@ fn attestation_that_skips_epochs() { per_slot_processing(&mut state, None, &harness.spec).expect("should process slot"); } + let state_root = state.update_tree_hash_cache().unwrap(); + let (attestation, subnet_id) = harness .get_unaggregated_attestations( &AttestationStrategy::AllValidators, &state, + state_root, earlier_block.canonical_root(), current_slot, ) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 7a7ea49c7..a25663178 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -276,6 +276,7 @@ fn epoch_boundary_state_attestation_processing() { late_attestations.extend(harness.get_unaggregated_attestations( &AttestationStrategy::SomeValidators(late_validators.clone()), &head.beacon_state, + head.beacon_state_root(), head.beacon_block_root, head.beacon_block.slot(), )); @@ -353,9 +354,9 @@ fn delete_blocks_and_states() { // Finalize an initial portion of the chain. let initial_slots: Vec = (1..=unforked_blocks).map(Into::into).collect(); - let state = harness.get_current_state(); + let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, &initial_slots, &all_validators); + harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); // Create a fork post-finalization. let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2; @@ -478,9 +479,9 @@ fn multi_epoch_fork_valid_blocks_test( // Create the initial portion of the chain if initial_blocks > 0 { let initial_slots: Vec = (1..=initial_blocks).map(Into::into).collect(); - let state = harness.get_current_state(); + let (state, state_root) = harness.get_current_state_and_root(); let all_validators = harness.get_all_validators(); - harness.add_attested_blocks_at_slots(state, &initial_slots, &all_validators); + harness.add_attested_blocks_at_slots(state, state_root, &initial_slots, &all_validators); } assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT); @@ -759,19 +760,26 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); let slots_per_epoch = rig.slots_per_epoch(); - let mut state = rig.get_current_state(); + let (mut state, state_root) = rig.get_current_state_and_root(); let canonical_chain_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_chain_blocks_pre_finalization, _, _, new_state) = - rig.add_attested_blocks_at_slots(state, &canonical_chain_slots, &honest_validators); + let (canonical_chain_blocks_pre_finalization, _, _, new_state) = rig + .add_attested_blocks_at_slots( + state, + state_root, + &canonical_chain_slots, + &honest_validators, + ); state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); let stray_slots: Vec = (canonical_chain_slot + 1..rig.epoch_start_slot(2)) .map(Slot::new) .collect(); + let (current_state, current_state_root) = rig.get_current_state_and_root(); let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( - rig.get_current_state(), + current_state, + current_state_root, &stray_slots, &adversarial_validators, ); @@ -803,8 +811,13 @@ fn prunes_abandoned_fork_between_two_finalized_checkpoints() { ..=(canonical_chain_slot + slots_per_epoch * 5)) .map(Slot::new) .collect(); - let (canonical_chain_blocks_post_finalization, _, _, _) = - rig.add_attested_blocks_at_slots(state, &finalization_slots, &honest_validators); + let state_root = state.update_tree_hash_cache().unwrap(); + let (canonical_chain_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( + state, + state_root, + &finalization_slots, + &honest_validators, + ); // Postcondition: New blocks got finalized assert_eq!( @@ -852,13 +865,14 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); let slots_per_epoch = rig.slots_per_epoch(); - let state = rig.get_current_state(); + let (state, state_root) = rig.get_current_state_and_root(); // Fill up 0th epoch let canonical_chain_slots_zeroth_epoch: Vec = (1..rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (_, _, _, state) = rig.add_attested_blocks_at_slots( + let (_, _, _, mut state) = rig.add_attested_blocks_at_slots( state, + state_root, &canonical_chain_slots_zeroth_epoch, &honest_validators, ); @@ -868,9 +882,11 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { ..=rig.epoch_start_slot(1) + 1) .map(Slot::new) .collect(); - let (canonical_chain_blocks_first_epoch, _, shared_head, state) = rig + let state_root = state.update_tree_hash_cache().unwrap(); + let (canonical_chain_blocks_first_epoch, _, shared_head, mut state) = rig .add_attested_blocks_at_slots( state.clone(), + state_root, &canonical_chain_slots_first_epoch, &honest_validators, ); @@ -880,8 +896,10 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { ..=rig.epoch_start_slot(1) + 2) .map(Slot::new) .collect(); + let state_root = state.update_tree_hash_cache().unwrap(); let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( state.clone(), + state_root, &stray_chain_slots_first_epoch, &adversarial_validators, ); @@ -917,8 +935,13 @@ fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() { ..=(canonical_chain_slot + slots_per_epoch * 5)) .map(Slot::new) .collect(); - let (canonical_chain_blocks, _, _, _) = - rig.add_attested_blocks_at_slots(state, &finalization_slots, &honest_validators); + let state_root = state.update_tree_hash_cache().unwrap(); + let (canonical_chain_blocks, _, _, _) = rig.add_attested_blocks_at_slots( + state, + state_root, + &finalization_slots, + &honest_validators, + ); // Postconditions assert_eq!( @@ -967,12 +990,16 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); let slots_per_epoch = rig.slots_per_epoch(); - let mut state = rig.get_current_state(); + let (mut state, state_root) = rig.get_current_state_and_root(); // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_chain_blocks, _, _, new_state) = - rig.add_attested_blocks_at_slots(state, &zeroth_epoch_slots, &honest_validators); + let (canonical_chain_blocks, _, _, new_state) = rig.add_attested_blocks_at_slots( + state, + state_root, + &zeroth_epoch_slots, + &honest_validators, + ); state = new_state; let canonical_chain_slot: u64 = rig.get_current_slot().into(); @@ -980,8 +1007,10 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { let first_epoch_slots: Vec = ((rig.epoch_start_slot(1) + 1)..(rig.epoch_start_slot(2))) .map(Slot::new) .collect(); + let state_root = state.update_tree_hash_cache().unwrap(); let (stray_blocks, stray_states, stray_head, _) = rig.add_attested_blocks_at_slots( state.clone(), + state_root, &first_epoch_slots, &adversarial_validators, ); @@ -1011,7 +1040,9 @@ fn pruning_does_not_touch_blocks_prior_to_finalization() { ..=(canonical_chain_slot + slots_per_epoch * 4)) .map(Slot::new) .collect(); - let (_, _, _, _) = rig.add_attested_blocks_at_slots(state, &slots, &honest_validators); + let state_root = state.update_tree_hash_cache().unwrap(); + let (_, _, _, _) = + rig.add_attested_blocks_at_slots(state, state_root, &slots, &honest_validators); // Postconditions assert_eq!( @@ -1048,30 +1079,42 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); - let state = rig.get_current_state(); + let (state, state_root) = rig.get_current_state_and_root(); // Fill up 0th epoch with canonical chain blocks let zeroth_epoch_slots: Vec = (1..=rig.epoch_start_slot(1)).map(Slot::new).collect(); - let (canonical_blocks_zeroth_epoch, _, _, state) = - rig.add_attested_blocks_at_slots(state, &zeroth_epoch_slots, &honest_validators); + let (canonical_blocks_zeroth_epoch, _, _, mut state) = rig.add_attested_blocks_at_slots( + state, + state_root, + &zeroth_epoch_slots, + &honest_validators, + ); // Fill up 1st epoch. Contains a fork. let slots_first_epoch: Vec = (rig.epoch_start_slot(1) + 1..rig.epoch_start_slot(2)) .map(Into::into) .collect(); - let (stray_blocks_first_epoch, stray_states_first_epoch, _, stray_state) = rig - .add_attested_blocks_at_slots(state.clone(), &slots_first_epoch, &adversarial_validators); - let (canonical_blocks_first_epoch, _, _, canonical_state) = - rig.add_attested_blocks_at_slots(state, &slots_first_epoch, &honest_validators); + let state_root = state.update_tree_hash_cache().unwrap(); + let (stray_blocks_first_epoch, stray_states_first_epoch, _, mut stray_state) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &slots_first_epoch, + &adversarial_validators, + ); + let (canonical_blocks_first_epoch, _, _, mut canonical_state) = + rig.add_attested_blocks_at_slots(state, state_root, &slots_first_epoch, &honest_validators); // Fill up 2nd epoch. Extends both the canonical chain and the fork. let stray_slots_second_epoch: Vec = (rig.epoch_start_slot(2) ..=rig.epoch_start_slot(2) + 1) .map(Into::into) .collect(); + let stray_state_root = stray_state.update_tree_hash_cache().unwrap(); let (stray_blocks_second_epoch, stray_states_second_epoch, stray_head, _) = rig .add_attested_blocks_at_slots( stray_state, + stray_state_root, &stray_slots_second_epoch, &adversarial_validators, ); @@ -1113,8 +1156,13 @@ fn prunes_fork_growing_past_youngest_finalized_checkpoint() { let canonical_slots: Vec = (rig.epoch_start_slot(2)..=rig.epoch_start_slot(6)) .map(Into::into) .collect(); - let (canonical_blocks, _, _, _) = - rig.add_attested_blocks_at_slots(canonical_state, &canonical_slots, &honest_validators); + let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); + let (canonical_blocks, _, _, _) = rig.add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ); // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1169,23 +1217,27 @@ fn prunes_skipped_slots_states() { let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); - let state = rig.get_current_state(); + let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = (1..=rig.epoch_start_slot(1)).map(Into::into).collect(); - let (canonical_blocks_zeroth_epoch, _, _, canonical_state) = rig.add_attested_blocks_at_slots( - state.clone(), - &canonical_slots_zeroth_epoch, - &honest_validators, - ); + let (canonical_blocks_zeroth_epoch, _, _, mut canonical_state) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &canonical_slots_zeroth_epoch, + &honest_validators, + ); let skipped_slot: Slot = (rig.epoch_start_slot(1) + 1).into(); let stray_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(2)) .map(Into::into) .collect(); + let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( canonical_state.clone(), + canonical_state_root, &stray_slots, &adversarial_validators, ); @@ -1225,8 +1277,13 @@ fn prunes_skipped_slots_states() { let canonical_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(7)) .map(Into::into) .collect(); - let (canonical_blocks_post_finalization, _, _, _) = - rig.add_attested_blocks_at_slots(canonical_state, &canonical_slots, &honest_validators); + let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); + let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ); // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1279,23 +1336,27 @@ fn finalizes_non_epoch_start_slot() { let honest_validators: Vec = (0..HONEST_VALIDATOR_COUNT).collect(); let adversarial_validators: Vec = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect(); let rig = BeaconChainHarness::new(MinimalEthSpec, validators_keypairs); - let state = rig.get_current_state(); + let (state, state_root) = rig.get_current_state_and_root(); let canonical_slots_zeroth_epoch: Vec = (1..rig.epoch_start_slot(1)).map(Into::into).collect(); - let (canonical_blocks_zeroth_epoch, _, _, canonical_state) = rig.add_attested_blocks_at_slots( - state.clone(), - &canonical_slots_zeroth_epoch, - &honest_validators, - ); + let (canonical_blocks_zeroth_epoch, _, _, mut canonical_state) = rig + .add_attested_blocks_at_slots( + state.clone(), + state_root, + &canonical_slots_zeroth_epoch, + &honest_validators, + ); let skipped_slot: Slot = rig.epoch_start_slot(1).into(); let stray_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(2)) .map(Into::into) .collect(); + let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); let (stray_blocks, stray_states, _, stray_state) = rig.add_attested_blocks_at_slots( canonical_state.clone(), + canonical_state_root, &stray_slots, &adversarial_validators, ); @@ -1335,8 +1396,13 @@ fn finalizes_non_epoch_start_slot() { let canonical_slots: Vec = ((skipped_slot + 1).into()..rig.epoch_start_slot(7)) .map(Into::into) .collect(); - let (canonical_blocks_post_finalization, _, _, _) = - rig.add_attested_blocks_at_slots(canonical_state, &canonical_slots, &honest_validators); + let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); + let (canonical_blocks_post_finalization, _, _, _) = rig.add_attested_blocks_at_slots( + canonical_state, + canonical_state_root, + &canonical_slots, + &honest_validators, + ); // Postconditions let canonical_blocks: HashMap = canonical_blocks_zeroth_epoch @@ -1530,8 +1596,10 @@ fn pruning_test( let start_slot = Slot::new(1); let divergence_slot = start_slot + num_initial_blocks; + let (state, state_root) = harness.get_current_state_and_root(); let (_, _, _, divergence_state) = harness.add_attested_blocks_at_slots( - harness.get_current_state(), + state, + state_root, &slots(start_slot, num_initial_blocks)[..], &honest_validators, ); @@ -1553,7 +1621,7 @@ fn pruning_test( faulty_validators, ), ]); - let (_, _, _, canonical_state) = chains.remove(0); + let (_, _, _, mut canonical_state) = chains.remove(0); let (stray_blocks, stray_states, _, stray_head_state) = chains.remove(0); let stray_head_slot = divergence_slot + num_fork_skips + num_fork_blocks - 1; @@ -1577,8 +1645,10 @@ fn pruning_test( // Trigger finalization let num_finalization_blocks = 4 * E::slots_per_epoch(); let canonical_slot = divergence_slot + num_canonical_skips + num_canonical_middle_blocks; + let canonical_state_root = canonical_state.update_tree_hash_cache().unwrap(); harness.add_attested_blocks_at_slots( canonical_state, + canonical_state_root, &slots(canonical_slot, num_finalization_blocks), &honest_validators, ); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 9281b1a5e..616f2b544 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -436,23 +436,16 @@ fn attestations_with_increasing_slots() { AttestationStrategy::SomeValidators(vec![]), ); - attestations.extend( - harness.get_unaggregated_attestations( - &AttestationStrategy::AllValidators, - &harness.chain.head().expect("should get head").beacon_state, - harness - .chain - .head() - .expect("should get head") - .beacon_block_root, - harness - .chain - .head() - .expect("should get head") - .beacon_block - .slot(), - ), - ); + let head = harness.chain.head().unwrap(); + let head_state_root = head.beacon_state_root(); + + attestations.extend(harness.get_unaggregated_attestations( + &AttestationStrategy::AllValidators, + &head.beacon_state, + head_state_root, + head.beacon_block_root, + head.beacon_block.slot(), + )); harness.advance_slot(); } diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs new file mode 100644 index 000000000..52400f003 --- /dev/null +++ b/beacon_node/http_api/src/attester_duties.rs @@ -0,0 +1,201 @@ +//! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint. + +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::{self as api_types}; +use state_processing::state_advance::partial_state_advance; +use types::{ + AttestationDuty, BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, RelativeEpoch, +}; + +/// The struct that is returned to the requesting HTTP client. +type ApiDuties = api_types::DutiesResponse>; + +/// Handles a request from the HTTP API for attester duties. +pub fn attester_duties( + request_epoch: Epoch, + request_indices: &[u64], + chain: &BeaconChain, +) -> Result { + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + let next_epoch = current_epoch + 1; + + if request_epoch > next_epoch { + Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch past the current epoch {}", + request_epoch, current_epoch + ))) + } else if request_epoch == current_epoch || request_epoch == next_epoch { + cached_attestation_duties(request_epoch, request_indices, chain) + } else { + compute_historic_attester_duties(request_epoch, request_indices, chain) + } +} + +fn cached_attestation_duties( + request_epoch: Epoch, + request_indices: &[u64], + chain: &BeaconChain, +) -> Result { + let head = chain + .head_info() + .map_err(warp_utils::reject::beacon_chain_error)?; + + let (duties, dependent_root) = chain + .validator_attestation_duties(&request_indices, request_epoch, head.block_root) + .map_err(warp_utils::reject::beacon_chain_error)?; + + convert_to_api_response(duties, request_indices, dependent_root, chain) +} + +/// Compute some attester duties by reading a `BeaconState` from disk, completely ignoring the +/// shuffling cache. +fn compute_historic_attester_duties( + request_epoch: Epoch, + request_indices: &[u64], + chain: &BeaconChain, +) -> Result { + // If the head is quite old then it might still be relevant for a historical request. + // + // Use the `with_head` function to read & clone in a single call to avoid race conditions. + let state_opt = chain + .with_head(|head| { + if head.beacon_state.current_epoch() <= request_epoch { + Ok(Some(( + head.beacon_state_root(), + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + ))) + } else { + Ok(None) + } + }) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let mut state = if let Some((state_root, mut state)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_knows_attester_duties_for_epoch( + &mut state, + state_root, + request_epoch, + &chain.spec, + )?; + state + } else { + StateId::slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch())).state(&chain)? + }; + + // Sanity-check the state lookup. + if !(state.current_epoch() == request_epoch || state.current_epoch() + 1 == request_epoch) { + return Err(warp_utils::reject::custom_server_error(format!( + "state epoch {} not suitable for request epoch {}", + state.current_epoch(), + request_epoch + ))); + } + + let relative_epoch = + RelativeEpoch::from_epoch(state.current_epoch(), request_epoch).map_err(|e| { + warp_utils::reject::custom_server_error(format!("invalid epoch for state: {:?}", e)) + })?; + + state + .build_committee_cache(relative_epoch, &chain.spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let dependent_root = state + // The only block which decides its own shuffling is the genesis block. + .attester_shuffling_decision_root(chain.genesis_block_root, relative_epoch) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let duties = request_indices + .iter() + .map(|&validator_index| { + state + .get_attestation_duties(validator_index as usize, relative_epoch) + .map_err(BeaconChainError::from) + }) + .collect::>() + .map_err(warp_utils::reject::beacon_chain_error)?; + + convert_to_api_response(duties, request_indices, dependent_root, chain) +} + +fn ensure_state_knows_attester_duties_for_epoch( + state: &mut BeaconState, + state_root: Hash256, + target_epoch: Epoch, + spec: &ChainSpec, +) -> Result<(), warp::reject::Rejection> { + // Protect against an inconsistent slot clock. + if state.current_epoch() > target_epoch { + return Err(warp_utils::reject::custom_server_error(format!( + "state epoch {} is later than target epoch {}", + state.current_epoch(), + target_epoch + ))); + } else if state.current_epoch() + 1 < target_epoch { + // Since there's a one-epoch look-head on attester duties, it suffices to only advance to + // the prior epoch. + let target_slot = target_epoch + .saturating_sub(1_u64) + .start_slot(E::slots_per_epoch()); + + // A "partial" state advance is adequate since attester duties don't rely on state roots. + partial_state_advance(state, Some(state_root), target_slot, spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + } + + Ok(()) +} + +/// Convert the internal representation of attester duties into the format returned to the HTTP +/// client. +fn convert_to_api_response( + duties: Vec>, + indices: &[u64], + dependent_root: Hash256, + chain: &BeaconChain, +) -> Result { + // Protect against an inconsistent slot clock. + if duties.len() != indices.len() { + return Err(warp_utils::reject::custom_server_error(format!( + "duties length {} does not match indices length {}", + duties.len(), + indices.len() + ))); + } + + let usize_indices = indices.iter().map(|i| *i as usize).collect::>(); + let index_to_pubkey_map = chain + .validator_pubkey_bytes_many(&usize_indices) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let data = duties + .into_iter() + .zip(indices) + .filter_map(|(duty_opt, &validator_index)| { + let duty = duty_opt?; + Some(api_types::AttesterData { + pubkey: *index_to_pubkey_map.get(&(validator_index as usize))?, + validator_index, + committees_at_slot: duty.committees_at_slot, + committee_index: duty.index, + committee_length: duty.committee_len as u64, + validator_committee_index: duty.committee_position as u64, + slot: duty.slot, + }) + }) + .collect::>(); + + Ok(api_types::DutiesResponse { + dependent_root, + data, + }) +} diff --git a/beacon_node/http_api/src/beacon_proposer_cache.rs b/beacon_node/http_api/src/beacon_proposer_cache.rs deleted file mode 100644 index 4347ec951..000000000 --- a/beacon_node/http_api/src/beacon_proposer_cache.rs +++ /dev/null @@ -1,186 +0,0 @@ -use crate::metrics; -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use eth2::types::ProposerData; -use fork_choice::ProtoBlock; -use slot_clock::SlotClock; -use state_processing::per_slot_processing; -use types::{BeaconState, Epoch, EthSpec, Hash256, PublicKeyBytes}; - -/// This sets a maximum bound on the number of epochs to skip whilst instantiating the cache for -/// the first time. -const EPOCHS_TO_SKIP: u64 = 2; - -/// Caches the beacon block proposers for a given `epoch` and `epoch_boundary_root`. -/// -/// This cache is only able to contain a single set of proposers and is only -/// intended to cache the proposers for the current epoch according to the head -/// of the chain. A change in epoch or re-org to a different chain may cause a -/// cache miss and rebuild. -pub struct BeaconProposerCache { - epoch: Epoch, - decision_block_root: Hash256, - proposers: Vec, -} - -impl BeaconProposerCache { - /// Create a new cache for the current epoch of the `chain`. - pub fn new(chain: &BeaconChain) -> Result { - let head_root = chain.head_beacon_block_root()?; - let head_block = chain - .fork_choice - .read() - .get_block(&head_root) - .ok_or(BeaconChainError::MissingBeaconBlock(head_root))?; - - // If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at - // the epoch of the head. This prevents doing a massive amount of skip slots when starting - // a new database from genesis. - let epoch = { - let epoch_now = chain - .epoch() - .unwrap_or_else(|_| chain.spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch())); - let head_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch()); - if epoch_now > head_epoch + EPOCHS_TO_SKIP { - head_epoch - } else { - epoch_now - } - }; - - Self::for_head_block(chain, epoch, head_root, head_block) - } - - /// Create a new cache that contains the shuffling for `current_epoch`, - /// assuming that `head_root` and `head_block` represents the most recent - /// canonical block. - fn for_head_block( - chain: &BeaconChain, - current_epoch: Epoch, - head_root: Hash256, - head_block: ProtoBlock, - ) -> Result { - let _timer = metrics::start_timer(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_TIMES); - - let mut head_state = chain - .get_state(&head_block.state_root, Some(head_block.slot))? - .ok_or(BeaconChainError::MissingBeaconState(head_block.state_root))?; - - let decision_block_root = Self::decision_block_root(current_epoch, head_root, &head_state)?; - - // We *must* skip forward to the current epoch to obtain valid proposer - // duties. We cannot skip to the previous epoch, like we do with - // attester duties. - while head_state.current_epoch() < current_epoch { - // Skip slots until the current epoch, providing `Hash256::zero()` as the state root - // since we don't require it to be valid to identify producers. - per_slot_processing(&mut head_state, Some(Hash256::zero()), &chain.spec)?; - } - - let proposers = current_epoch - .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| { - head_state - .get_beacon_proposer_index(slot, &chain.spec) - .map_err(BeaconChainError::from) - .and_then(|i| { - let pubkey = chain - .validator_pubkey(i)? - .ok_or(BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?; - - Ok(ProposerData { - pubkey: PublicKeyBytes::from(pubkey), - validator_index: i as u64, - slot, - }) - }) - }) - .collect::>()?; - - Ok(Self { - epoch: current_epoch, - decision_block_root, - proposers, - }) - } - - /// Returns a block root which can be used to key the shuffling obtained from the following - /// parameters: - /// - /// - `shuffling_epoch`: the epoch for which the shuffling pertains. - /// - `head_block_root`: the block root at the head of the chain. - /// - `head_block_state`: the state of `head_block_root`. - pub fn decision_block_root( - shuffling_epoch: Epoch, - head_block_root: Hash256, - head_block_state: &BeaconState, - ) -> Result { - let decision_slot = shuffling_epoch - .start_slot(E::slots_per_epoch()) - .saturating_sub(1_u64); - - // If decision slot is equal to or ahead of the head, the block root is the head block root - if decision_slot >= head_block_state.slot { - Ok(head_block_root) - } else { - head_block_state - .get_block_root(decision_slot) - .map(|root| *root) - .map_err(Into::into) - } - } - - /// Return the proposers for the given `Epoch`. - /// - /// The cache may be rebuilt if: - /// - /// - The epoch has changed since the last cache build. - /// - There has been a re-org that crosses an epoch boundary. - pub fn get_proposers( - &mut self, - chain: &BeaconChain, - epoch: Epoch, - ) -> Result, warp::Rejection> { - let current_epoch = chain - .slot_clock - .now_or_genesis() - .ok_or_else(|| { - warp_utils::reject::custom_server_error("unable to read slot clock".to_string()) - })? - .epoch(T::EthSpec::slots_per_epoch()); - - // Disallow requests that are outside the current epoch. This ensures the cache doesn't get - // washed-out with old values. - if current_epoch != epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "requested epoch is {} but only current epoch {} is allowed", - epoch, current_epoch - ))); - } - - let (head_block_root, head_decision_block_root) = chain - .with_head(|head| { - Self::decision_block_root(current_epoch, head.beacon_block_root, &head.beacon_state) - .map(|decision_root| (head.beacon_block_root, decision_root)) - }) - .map_err(warp_utils::reject::beacon_chain_error)?; - - let head_block = chain - .fork_choice - .read() - .get_block(&head_block_root) - .ok_or(BeaconChainError::MissingBeaconBlock(head_block_root)) - .map_err(warp_utils::reject::beacon_chain_error)?; - - // Rebuild the cache if this call causes a cache-miss. - if self.epoch != current_epoch || self.decision_block_root != head_decision_block_root { - metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL); - - *self = Self::for_head_block(chain, current_epoch, head_block_root, head_block) - .map_err(warp_utils::reject::beacon_chain_error)?; - } else { - metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL); - } - - Ok(self.proposers.clone()) - } -} diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 341048e31..653820367 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -5,9 +5,10 @@ //! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are //! used for development. -mod beacon_proposer_cache; +mod attester_duties; mod block_id; mod metrics; +mod proposer_duties; mod state_id; mod validator_inclusion; @@ -17,19 +18,16 @@ use beacon_chain::{ validator_monitor::{get_block_delay_ms, timestamp_now}, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, }; -use beacon_proposer_cache::BeaconProposerCache; use block_id::BlockId; use eth2::types::{self as api_types, ValidatorId}; use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use network::NetworkMessage; -use parking_lot::Mutex; use serde::{Deserialize, Serialize}; use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_id::StateId; -use state_processing::per_slot_processing; use std::borrow::Cow; use std::convert::TryInto; use std::future::Future; @@ -38,9 +36,8 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{wrappers::BroadcastStream, StreamExt}; use types::{ - Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec, - Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof, - SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, + Attestation, AttesterSlashing, CommitteeCache, Epoch, EthSpec, ProposerSlashing, RelativeEpoch, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, }; use warp::http::StatusCode; use warp::sse::Event; @@ -240,30 +237,6 @@ pub fn serve( let eth1_v1 = warp::path(API_PREFIX).and(warp::path(API_VERSION)); - // Instantiate the beacon proposer cache. - let beacon_proposer_cache = ctx - .chain - .as_ref() - .map(|chain| BeaconProposerCache::new(&chain)) - .transpose() - .map_err(|e| format!("Unable to initialize beacon proposer cache: {:?}", e))? - .map(Mutex::new) - .map(Arc::new); - - // Create a `warp` filter that provides access to the proposer cache. - let beacon_proposer_cache = || { - warp::any() - .map(move || beacon_proposer_cache.clone()) - .and_then(|beacon_proposer_cache| async move { - match beacon_proposer_cache { - Some(cache) => Ok(cache), - None => Err(warp_utils::reject::custom_not_found( - "Beacon proposer cache is not initialized.".to_string(), - )), - } - }) - }; - // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); let network_globals = warp::any() @@ -1674,89 +1647,10 @@ pub fn serve( .and(warp::path::end()) .and(not_while_syncing_filter.clone()) .and(chain_filter.clone()) - .and(beacon_proposer_cache()) - .and_then( - |epoch: Epoch, - chain: Arc>, - beacon_proposer_cache: Arc>| { - blocking_json_task(move || { - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; - - if epoch > current_epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "request epoch {} is ahead of the current epoch {}", - epoch, current_epoch - ))); - } - - if epoch == current_epoch { - let dependent_root_slot = current_epoch - .start_slot(T::EthSpec::slots_per_epoch()) - 1; - let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? { - chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)? - } else { - chain - .root_at_slot(dependent_root_slot) - .map_err(warp_utils::reject::beacon_chain_error)? - .unwrap_or(chain.genesis_block_root) - }; - - beacon_proposer_cache - .lock() - .get_proposers(&chain, epoch) - .map(|duties| api_types::DutiesResponse { data: duties, dependent_root }) - } else { - let state = - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) - .state(&chain)?; - - let dependent_root_slot = state.current_epoch() - .start_slot(T::EthSpec::slots_per_epoch()) - 1; - let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? { - chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)? - } else { - chain - .root_at_slot(dependent_root_slot) - .map_err(warp_utils::reject::beacon_chain_error)? - .unwrap_or(chain.genesis_block_root) - }; - - epoch - .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| { - state - .get_beacon_proposer_index(slot, &chain.spec) - .map_err(warp_utils::reject::beacon_state_error) - .and_then(|i| { - let pubkey = - chain.validator_pubkey(i) - .map_err(warp_utils::reject::beacon_chain_error)? - .ok_or_else(|| - warp_utils::reject::beacon_chain_error( - BeaconChainError::ValidatorPubkeyCacheIncomplete(i) - ) - )?; - - Ok(api_types::ProposerData { - pubkey: PublicKeyBytes::from(pubkey), - validator_index: i as u64, - slot, - }) - }) - }) - .collect::, _>>() - .map(|duties| { - api_types::DutiesResponse { - dependent_root, - data: duties, - } - }) - } - }) - }, - ); + .and(log_filter.clone()) + .and_then(|epoch: Epoch, chain: Arc>, log: Logger| { + blocking_json_task(move || proposer_duties::proposer_duties(epoch, &chain, &log)) + }); // GET validator/blocks/{slot} let get_validator_blocks = eth1_v1 @@ -1865,188 +1759,7 @@ pub fn serve( .and_then( |epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc>| { blocking_json_task(move || { - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; - - if epoch > current_epoch + 1 { - return Err(warp_utils::reject::custom_bad_request(format!( - "request epoch {} is more than one epoch past the current epoch {}", - epoch, current_epoch - ))); - } - - let validator_count = StateId::head() - .map_state(&chain, |state| Ok(state.validators.len() as u64))?; - - let pubkeys = indices - .0 - .iter() - .filter(|i| **i < validator_count as u64) - .map(|i| { - let pubkey = chain - .validator_pubkey(*i as usize) - .map_err(warp_utils::reject::beacon_chain_error)? - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "unknown validator index {}", - *i - )) - })?; - - Ok((*i, pubkey)) - }) - .collect::, warp::Rejection>>()?; - - // Converts the internal Lighthouse `AttestationDuty` struct into an - // API-conforming `AttesterData` struct. - let convert = |validator_index: u64, - pubkey: PublicKey, - duty: AttestationDuty| - -> api_types::AttesterData { - api_types::AttesterData { - pubkey: pubkey.into(), - validator_index, - committees_at_slot: duty.committees_at_slot, - committee_index: duty.index, - committee_length: duty.committee_len as u64, - validator_committee_index: duty.committee_position as u64, - slot: duty.slot, - } - }; - - // Here we have two paths: - // - // ## Fast - // - // If the request epoch is the current epoch, use the cached beacon chain - // method. - // - // ## Slow - // - // If the request epoch is prior to the current epoch, load a beacon state from - // disk - // - // The idea is to stop historical requests from washing out the cache on the - // beacon chain, whilst allowing a VC to request duties quickly. - let (duties, dependent_root) = if epoch == current_epoch { - // Fast path. - let duties = pubkeys - .into_iter() - // Exclude indices which do not represent a known public key and a - // validator duty. - .filter_map(|(i, pubkey)| { - Some( - chain - .validator_attestation_duty(i as usize, epoch) - .transpose()? - .map_err(warp_utils::reject::beacon_chain_error) - .map(|duty| convert(i, pubkey, duty)), - ) - }) - .collect::, warp::Rejection>>()?; - - let dependent_root_slot = - (epoch - 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - let dependent_root = if dependent_root_slot - > chain - .best_slot() - .map_err(warp_utils::reject::beacon_chain_error)? - { - chain - .head_beacon_block_root() - .map_err(warp_utils::reject::beacon_chain_error)? - } else { - chain - .root_at_slot(dependent_root_slot) - .map_err(warp_utils::reject::beacon_chain_error)? - .unwrap_or(chain.genesis_block_root) - }; - - (duties, dependent_root) - } else { - // If the head state is equal to or earlier than the request epoch, use it. - let mut state = chain - .with_head(|head| { - if head.beacon_state.current_epoch() <= epoch { - Ok(Some( - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - )) - } else { - Ok(None) - } - }) - .map_err(warp_utils::reject::beacon_chain_error)? - .map(Result::Ok) - .unwrap_or_else(|| { - StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) - .state(&chain) - })?; - - // Only skip forward to the epoch prior to the request, since we have a - // one-epoch look-ahead on shuffling. - while state - .next_epoch() - .map_err(warp_utils::reject::beacon_state_error)? - < epoch - { - // Don't calculate state roots since they aren't required for calculating - // shuffling (achieved by providing Hash256::zero()). - per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec) - .map_err(warp_utils::reject::slot_processing_error)?; - } - - let relative_epoch = - RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err( - |e| { - warp_utils::reject::custom_server_error(format!( - "unable to obtain suitable state: {:?}", - e - )) - }, - )?; - - state - .build_committee_cache(relative_epoch, &chain.spec) - .map_err(warp_utils::reject::beacon_state_error)?; - let duties = pubkeys - .into_iter() - .filter_map(|(i, pubkey)| { - Some( - state - .get_attestation_duties(i as usize, relative_epoch) - .transpose()? - .map_err(warp_utils::reject::beacon_state_error) - .map(|duty| convert(i, pubkey, duty)), - ) - }) - .collect::, warp::Rejection>>()?; - - let dependent_root_slot = - (epoch - 1).start_slot(T::EthSpec::slots_per_epoch()) - 1; - let dependent_root = if dependent_root_slot - > chain - .best_slot() - .map_err(warp_utils::reject::beacon_chain_error)? - { - chain - .head_beacon_block_root() - .map_err(warp_utils::reject::beacon_chain_error)? - } else { - chain - .root_at_slot(dependent_root_slot) - .map_err(warp_utils::reject::beacon_chain_error)? - .unwrap_or(chain.genesis_block_root) - }; - - (duties, dependent_root) - }; - - Ok(api_types::DutiesResponse { - dependent_root, - data: duties, - }) + attester_duties::attester_duties(epoch, &indices.0, &chain) }) }, ); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs new file mode 100644 index 000000000..7505aa66d --- /dev/null +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -0,0 +1,280 @@ +//! Contains the handler for the `GET validator/duties/proposer/{epoch}` endpoint. + +use crate::state_id::StateId; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::types::{self as api_types}; +use slog::{debug, Logger}; +use state_processing::state_advance::partial_state_advance; +use std::cmp::Ordering; +use types::{BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, Slot}; + +/// The struct that is returned to the requesting HTTP client. +type ApiDuties = api_types::DutiesResponse>; + +/// Handles a request from the HTTP API for proposer duties. +pub fn proposer_duties( + request_epoch: Epoch, + chain: &BeaconChain, + log: &Logger, +) -> Result { + let current_epoch = chain + .epoch() + .map_err(warp_utils::reject::beacon_chain_error)?; + + match request_epoch.cmp(¤t_epoch) { + // request_epoch > current_epoch + // + // Reject queries about the future as they're very expensive there's no look-ahead for + // proposer duties. + Ordering::Greater => Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is ahead of the current epoch {}", + request_epoch, current_epoch + ))), + // request_epoch == current_epoch + // + // Queries about the current epoch should attempt to find the value in the cache. If it + // can't be found, it should be computed and then stored in the cache for future gains. + Ordering::Equal => { + if let Some(duties) = try_proposer_duties_from_cache(request_epoch, chain)? { + Ok(duties) + } else { + debug!( + log, + "Proposer cache miss"; + "request_epoch" => request_epoch, + ); + compute_and_cache_proposer_duties(request_epoch, chain) + } + } + // request_epoch < current_epoch + // + // Queries about the past are handled with a slow path. + Ordering::Less => compute_historic_proposer_duties(request_epoch, chain), + } +} + +/// Attempt to load the proposer duties from the `chain.beacon_proposer_cache`, returning `Ok(None)` +/// if there is a cache miss. +/// +/// ## Notes +/// +/// The `current_epoch` value should equal the current epoch on the slot clock, otherwise we risk +/// washing out the proposer cache at the expense of block processing. +fn try_proposer_duties_from_cache( + current_epoch: Epoch, + chain: &BeaconChain, +) -> Result, warp::reject::Rejection> { + let head = chain + .head_info() + .map_err(warp_utils::reject::beacon_chain_error)?; + let head_epoch = head.slot.epoch(T::EthSpec::slots_per_epoch()); + + let dependent_root = match head_epoch.cmp(¤t_epoch) { + // head_epoch == current_epoch + Ordering::Equal => head.proposer_shuffling_decision_root, + // head_epoch < current_epoch + Ordering::Less => head.block_root, + // head_epoch > current_epoch + Ordering::Greater => { + return Err(warp_utils::reject::custom_server_error(format!( + "head epoch {} is later than current epoch {}", + head_epoch, current_epoch + ))) + } + }; + + chain + .beacon_proposer_cache + .lock() + .get_epoch::(dependent_root, current_epoch) + .cloned() + .map(|indices| { + convert_to_api_response(chain, current_epoch, dependent_root, indices.to_vec()) + }) + .transpose() +} + +/// Compute the proposer duties using the head state, add the duties to the proposer cache and +/// return the proposers. +/// +/// This method does *not* attempt to read the values from the cache before computing them. See +/// `try_proposer_duties_from_cache` to read values. +/// +/// ## Notes +/// +/// The `current_epoch` value should equal the current epoch on the slot clock, otherwise we risk +/// washing out the proposer cache at the expense of block processing. +fn compute_and_cache_proposer_duties( + current_epoch: Epoch, + chain: &BeaconChain, +) -> Result { + // Take a copy of the head of the chain. + let head = chain + .head() + .map_err(warp_utils::reject::beacon_chain_error)?; + let mut state = head.beacon_state; + let head_state_root = head.beacon_block.state_root(); + + // Advance the state into the requested epoch. + ensure_state_is_in_epoch(&mut state, head_state_root, current_epoch, &chain.spec)?; + + let indices = state + .get_beacon_proposer_indices(&chain.spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let dependent_root = state + // The only block which decides its own shuffling is the genesis block. + .proposer_shuffling_decision_root(chain.genesis_block_root) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Prime the proposer shuffling cache with the newly-learned value. + chain + .beacon_proposer_cache + .lock() + .insert( + state.current_epoch(), + dependent_root, + indices.clone(), + state.fork, + ) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + convert_to_api_response(chain, current_epoch, dependent_root, indices) +} + +/// Compute some proposer duties by reading a `BeaconState` from disk, completely ignoring the +/// `beacon_proposer_cache`. +fn compute_historic_proposer_duties( + epoch: Epoch, + chain: &BeaconChain, +) -> Result { + // If the head is quite old then it might still be relevant for a historical request. + // + // Use the `with_head` function to read & clone in a single call to avoid race conditions. + let state_opt = chain + .with_head(|head| { + if head.beacon_state.current_epoch() <= epoch { + Ok(Some(( + head.beacon_state_root(), + head.beacon_state + .clone_with(CloneConfig::committee_caches_only()), + ))) + } else { + Ok(None) + } + }) + .map_err(warp_utils::reject::beacon_chain_error)?; + + let state = if let Some((state_root, mut state)) = state_opt { + // If we've loaded the head state it might be from a previous epoch, ensure it's in a + // suitable epoch. + ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec)?; + state + } else { + StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(&chain)? + }; + + // Ensure the state lookup was correct. + if state.current_epoch() != epoch { + return Err(warp_utils::reject::custom_server_error(format!( + "state epoch {} not equal to request epoch {}", + state.current_epoch(), + epoch + ))); + } + + let indices = state + .get_beacon_proposer_indices(&chain.spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // We can supply the genesis block root as the block root since we know that the only block that + // decides its own root is the genesis block. + let dependent_root = state + .proposer_shuffling_decision_root(chain.genesis_block_root) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error)?; + + convert_to_api_response(chain, epoch, dependent_root, indices) +} + +/// If required, advance `state` to `target_epoch`. +/// +/// ## Details +/// +/// - Returns an error if `state.current_epoch() > target_epoch`. +/// - No-op if `state.current_epoch() == target_epoch`. +/// - It must be the case that `state.canonical_root() == state_root`, but this function will not +/// check that. +fn ensure_state_is_in_epoch( + state: &mut BeaconState, + state_root: Hash256, + target_epoch: Epoch, + spec: &ChainSpec, +) -> Result<(), warp::reject::Rejection> { + match state.current_epoch().cmp(&target_epoch) { + // Protects against an inconsistent slot clock. + Ordering::Greater => Err(warp_utils::reject::custom_server_error(format!( + "state epoch {} is later than target epoch {}", + state.current_epoch(), + target_epoch + ))), + // The state needs to be advanced. + Ordering::Less => { + let target_slot = target_epoch.start_slot(E::slots_per_epoch()); + partial_state_advance(state, Some(state_root), target_slot, spec) + .map_err(BeaconChainError::from) + .map_err(warp_utils::reject::beacon_chain_error) + } + // The state is suitable, nothing to do. + Ordering::Equal => Ok(()), + } +} + +/// Converts the internal representation of proposer duties into one that is compatible with the +/// standard API. +fn convert_to_api_response( + chain: &BeaconChain, + epoch: Epoch, + dependent_root: Hash256, + indices: Vec, +) -> Result { + let index_to_pubkey_map = chain + .validator_pubkey_bytes_many(&indices) + .map_err(warp_utils::reject::beacon_chain_error)?; + + // Map our internal data structure into the API structure. + let proposer_data = indices + .iter() + .enumerate() + .filter_map(|(i, &validator_index)| { + // Offset the index in `indices` to determine the slot for which these + // duties apply. + let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()) + Slot::from(i); + + Some(api_types::ProposerData { + pubkey: *index_to_pubkey_map.get(&validator_index)?, + validator_index: validator_index as u64, + slot, + }) + }) + .collect::>(); + + // Consistency check. + let slots_per_epoch = T::EthSpec::slots_per_epoch() as usize; + if proposer_data.len() != slots_per_epoch { + Err(warp_utils::reject::custom_server_error(format!( + "{} proposers is not enough for {} slots", + proposer_data.len(), + slots_per_epoch, + ))) + } else { + Ok(api_types::DutiesResponse { + dependent_root, + data: proposer_data, + }) + } +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 979158f01..6ae779f91 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -103,10 +103,12 @@ impl ApiTester { let (next_block, _next_state) = harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let head_state_root = head.beacon_state_root(); let attestations = harness .get_unaggregated_attestations( &AttestationStrategy::AllValidators, &head.beacon_state, + head_state_root, head.beacon_block_root, harness.chain.slot().unwrap(), ) @@ -234,10 +236,12 @@ impl ApiTester { let (next_block, _next_state) = harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let head_state_root = head.beacon_state_root(); let attestations = harness .get_unaggregated_attestations( &AttestationStrategy::AllValidators, &head.beacon_state, + head_state_root, head.beacon_block_root, harness.chain.slot().unwrap(), ) @@ -1563,50 +1567,117 @@ impl ApiTester { pub async fn test_get_validator_duties_proposer(self) -> Self { let current_epoch = self.chain.epoch().unwrap(); - let dependent_root = self - .chain - .root_at_slot(current_epoch.start_slot(E::slots_per_epoch()) - 1) - .unwrap() - .unwrap_or(self.chain.head_beacon_block_root().unwrap()); + for epoch in 0..=self.chain.epoch().unwrap().as_u64() { + let epoch = Epoch::from(epoch); - let result = self - .client - .get_validator_duties_proposer(current_epoch) - .await - .unwrap(); + let dependent_root = self + .chain + .root_at_slot(epoch.start_slot(E::slots_per_epoch()) - 1) + .unwrap() + .unwrap_or(self.chain.head_beacon_block_root().unwrap()); - let mut state = self.chain.head_beacon_state().unwrap(); + // Presently, the beacon chain harness never runs the code that primes the proposer + // cache. If this changes in the future then we'll need some smarter logic here, but + // this is succinct and effective for the time being. + assert!( + self.chain + .beacon_proposer_cache + .lock() + .get_epoch::(dependent_root, epoch) + .is_none(), + "the proposer cache should miss initially" + ); - while state.current_epoch() < current_epoch { - per_slot_processing(&mut state, None, &self.chain.spec).unwrap(); + let result = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap(); + + // Check that current-epoch requests prime the proposer cache, whilst non-current + // requests don't. + if epoch == current_epoch { + assert!( + self.chain + .beacon_proposer_cache + .lock() + .get_epoch::(dependent_root, epoch) + .is_some(), + "a current-epoch request should prime the proposer cache" + ); + } else { + assert!( + self.chain + .beacon_proposer_cache + .lock() + .get_epoch::(dependent_root, epoch) + .is_none(), + "a non-current-epoch request should not prime the proposer cache" + ); + } + + let mut state = self + .chain + .state_at_slot( + epoch.start_slot(E::slots_per_epoch()), + StateSkipConfig::WithStateRoots, + ) + .unwrap(); + + state + .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) + .unwrap(); + + let expected_duties = epoch + .slot_iter(E::slots_per_epoch()) + .map(|slot| { + let index = state + .get_beacon_proposer_index(slot, &self.chain.spec) + .unwrap(); + let pubkey = state.validators[index].pubkey.clone().into(); + + ProposerData { + pubkey, + validator_index: index as u64, + slot, + } + }) + .collect::>(); + + let expected = DutiesResponse { + data: expected_duties, + dependent_root, + }; + + assert_eq!(result, expected); + + // If it's the current epoch, check the function with a primed proposer cache. + if epoch == current_epoch { + // This is technically a double-check, but it's defensive. + assert!( + self.chain + .beacon_proposer_cache + .lock() + .get_epoch::(dependent_root, epoch) + .is_some(), + "the request should prime the proposer cache" + ); + + let result = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap(); + + assert_eq!(result, expected); + } } - state - .build_committee_cache(RelativeEpoch::Current, &self.chain.spec) - .unwrap(); - - let expected_duties = current_epoch - .slot_iter(E::slots_per_epoch()) - .map(|slot| { - let index = state - .get_beacon_proposer_index(slot, &self.chain.spec) - .unwrap(); - let pubkey = state.validators[index].pubkey.clone().into(); - - ProposerData { - pubkey, - validator_index: index as u64, - slot, - } - }) - .collect::>(); - - let expected = DutiesResponse { - data: expected_duties, - dependent_root, - }; - - assert_eq!(result, expected); + // Requests to future epochs should fail. + self.client + .get_validator_duties_proposer(current_epoch + 1) + .await + .unwrap_err(); self } diff --git a/beacon_node/network/src/beacon_processor/tests.rs b/beacon_node/network/src/beacon_processor/tests.rs index 69fefe219..6549a0263 100644 --- a/beacon_node/network/src/beacon_processor/tests.rs +++ b/beacon_node/network/src/beacon_processor/tests.rs @@ -92,15 +92,16 @@ impl TestRig { let (next_block, _next_state) = harness.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()); + let head_state_root = head.beacon_state_root(); let attestations = harness .get_unaggregated_attestations( &AttestationStrategy::AllValidators, &head.beacon_state, + head_state_root, head.beacon_block_root, harness.chain.slot().unwrap(), ) .into_iter() - // .map(|vec| vec.into_iter().map(|(attestation, _subnet_id)| attestation)) .flatten() .collect::>(); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6eb2fe792..ea456acb9 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -333,7 +333,11 @@ impl, Cold: ItemStore> HotColdDB /// ## Warning /// /// The returned state **is not a valid beacon state**, it can only be used for obtaining - /// shuffling to process attestations. + /// shuffling to process attestations. At least the following components of the state will be + /// broken/invalid: + /// + /// - `state.state_roots` + /// - `state.block_roots` pub fn get_inconsistent_state_for_attestation_verification_only( &self, state_root: &Hash256, diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 32675002a..5c0db4ebc 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -183,8 +183,13 @@ impl ForkChoiceTest { break; } if let Ok(block_hash) = self.harness.process_block_result(block.clone()) { - self.harness - .attest_block(&state, block_hash, &block, &validators); + self.harness.attest_block( + &state, + block.state_root(), + block_hash, + &block, + &validators, + ); self.harness.advance_slot(); } else { return Err(self); diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 3ae90f885..b0a755281 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -9,6 +9,7 @@ pub mod genesis; pub mod per_block_processing; pub mod per_epoch_processing; pub mod per_slot_processing; +pub mod state_advance; pub mod test_utils; pub mod verify_operation; diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs new file mode 100644 index 000000000..24cf99019 --- /dev/null +++ b/consensus/state_processing/src/state_advance.rs @@ -0,0 +1,105 @@ +//! This module contains functions for advancing a `BeaconState` forward some number of slots +//! without blocks (i.e., skip slots). +//! +//! These functions are not in the specification, however they're defined here to reduce code +//! duplication and protect against some easy-to-make mistakes when performing state advances. + +use crate::*; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; + +#[derive(Debug, PartialEq)] +pub enum Error { + BadTargetSlot { target_slot: Slot, state_slot: Slot }, + PerSlotProcessing(per_slot_processing::Error), + StateRootNotProvided, +} + +/// Advances the `state` to the given `target_slot`, assuming that there were no blocks between +/// these slots. +/// +/// ## Errors +/// +/// - If `state.slot > target_slot`, an error will be returned. +/// +/// ## Notes +/// +/// This state advance method is "complete"; it outputs a perfectly valid `BeaconState` and doesn't +/// do anything hacky like the "partial" method (see `partial_state_advance`). +pub fn complete_state_advance( + state: &mut BeaconState, + mut state_root_opt: Option, + target_slot: Slot, + spec: &ChainSpec, +) -> Result<(), Error> { + check_target_slot(state.slot, target_slot)?; + + while state.slot < target_slot { + // Use the initial state root on the first iteration of the loop, then use `None` for any + // future iterations. + let state_root_opt = state_root_opt.take(); + + per_slot_processing(state, state_root_opt, spec).map_err(Error::PerSlotProcessing)?; + } + + Ok(()) +} + +/// Advances the `state` to the given `target_slot`, assuming that there were no blocks between +/// these slots. +/// +/// This is a "partial" state advance which outputs an **invalid** `BeaconState`. The state is +/// invalid because the intermediate state roots are not computed. Avoiding computing state roots +/// saves *a lot* of compute time and can be a useful optimization when a state only needs to be +/// advanced to obtain proposer/attester shuffling as they are indifferent to state roots. +/// +/// For clarity, **be careful with this function as it produces invalid states**. +/// +/// ## Errors +/// +/// - If `state.slot > target_slot`, an error will be returned. +/// - If `state_root_opt.is_none()` but the latest block header requires a state root. +pub fn partial_state_advance( + state: &mut BeaconState, + state_root_opt: Option, + target_slot: Slot, + spec: &ChainSpec, +) -> Result<(), Error> { + check_target_slot(state.slot, target_slot)?; + + // The only time that a state root is mandatory is if a block has been applied to the state + // without it yet being advanced another slot. + // + // Failing to provide a state root in this scenario would result in corrupting the + // `state.block_roots` array, since the `state.latest_block_header` would contain an invalid + // (all-zeros) state root. + let mut initial_state_root = Some(if state.slot > state.latest_block_header.slot { + state_root_opt.unwrap_or_else(Hash256::zero) + } else { + state_root_opt.ok_or(Error::StateRootNotProvided)? + }); + + while state.slot < target_slot { + // Use the initial state root on the first iteration of the loop, then use `[0; 32]` for any + // later iterations. + // + // Failing to provide the correct state root on the initial iteration may result in + // corrupting the `state.block_roots` array since the latest block header may not be updated + // with the correct state root. + let state_root = initial_state_root.take().unwrap_or_else(Hash256::zero); + + per_slot_processing(state, Some(state_root), spec).map_err(Error::PerSlotProcessing)?; + } + + Ok(()) +} + +fn check_target_slot(state_slot: Slot, target_slot: Slot) -> Result<(), Error> { + if state_slot > target_slot { + Err(Error::BadTargetSlot { + target_slot, + state_slot, + }) + } else { + Ok(()) + } +} diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 69419dfac..6f1b59552 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -444,6 +444,62 @@ impl BeaconState { cache.get_all_beacon_committees() } + /// Returns the block root which decided the proposer shuffling for the current epoch. This root + /// can be used to key this proposer shuffling. + /// + /// ## Notes + /// + /// The `block_root` covers the one-off scenario where the genesis block decides its own + /// shuffling. It should be set to the latest block applied to `self` or the genesis block root. + pub fn proposer_shuffling_decision_root(&self, block_root: Hash256) -> Result { + let decision_slot = self.proposer_shuffling_decision_slot(); + if self.slot == decision_slot { + Ok(block_root) + } else { + self.get_block_root(decision_slot).map(|root| *root) + } + } + + /// Returns the slot at which the proposer shuffling was decided. The block root at this slot + /// can be used to key the proposer shuffling for the current epoch. + fn proposer_shuffling_decision_slot(&self) -> Slot { + self.current_epoch() + .start_slot(T::slots_per_epoch()) + .saturating_sub(1_u64) + } + + /// Returns the block root which decided the attester shuffling for the given `relative_epoch`. + /// This root can be used to key that attester shuffling. + /// + /// ## Notes + /// + /// The `block_root` covers the one-off scenario where the genesis block decides its own + /// shuffling. It should be set to the latest block applied to `self` or the genesis block root. + pub fn attester_shuffling_decision_root( + &self, + block_root: Hash256, + relative_epoch: RelativeEpoch, + ) -> Result { + let decision_slot = self.attester_shuffling_decision_slot(relative_epoch); + if self.slot == decision_slot { + Ok(block_root) + } else { + self.get_block_root(decision_slot).map(|root| *root) + } + } + + /// Returns the slot at which the proposer shuffling was decided. The block root at this slot + /// can be used to key the proposer shuffling for the current epoch. + fn attester_shuffling_decision_slot(&self, relative_epoch: RelativeEpoch) -> Slot { + match relative_epoch { + RelativeEpoch::Next => self.current_epoch(), + RelativeEpoch::Current => self.previous_epoch(), + RelativeEpoch::Previous => self.previous_epoch().saturating_sub(1_u64), + } + .start_slot(T::slots_per_epoch()) + .saturating_sub(1_u64) + } + /// Compute the proposer (not necessarily for the Beacon chain) from a list of indices. /// /// Spec v0.12.1 diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/shuffling_id.rs index d2c501083..120d744a5 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/shuffling_id.rs @@ -35,16 +35,8 @@ impl AttestationShufflingId { ) -> Result { let shuffling_epoch = relative_epoch.into_epoch(state.current_epoch()); - let shuffling_decision_slot = shuffling_epoch - .saturating_sub(1_u64) - .start_slot(E::slots_per_epoch()) - .saturating_sub(1_u64); - - let shuffling_decision_block = if state.slot == shuffling_decision_slot { - block_root - } else { - *state.get_block_root(shuffling_decision_slot)? - }; + let shuffling_decision_block = + state.attester_shuffling_decision_root(block_root, relative_epoch)?; Ok(Self { shuffling_epoch, diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index c5f164bb0..2e4833b4e 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -1,3 +1,4 @@ +use crate::generic_public_key_bytes::GenericPublicKeyBytes; use crate::Error; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -54,6 +55,11 @@ where format!("{:?}", self) } + /// Returns `self` in the compressed `PublicKeyBytes` representation. + pub fn compress(&self) -> GenericPublicKeyBytes { + GenericPublicKeyBytes::from(self) + } + /// Serialize `self` as compressed bytes. pub fn serialize(&self) -> [u8; PUBLIC_KEY_BYTES_LEN] { self.point.serialize() diff --git a/crypto/bls/src/generic_public_key_bytes.rs b/crypto/bls/src/generic_public_key_bytes.rs index ea7ed30d8..d341e907a 100644 --- a/crypto/bls/src/generic_public_key_bytes.rs +++ b/crypto/bls/src/generic_public_key_bytes.rs @@ -69,6 +69,11 @@ impl GenericPublicKeyBytes { self.bytes } + /// Returns `self.serialize()` as a `0x`-prefixed hex string. + pub fn to_hex_string(&self) -> String { + format!("{:?}", self) + } + /// Instantiates `Self` from bytes. /// /// The bytes are not fully verified (i.e., they may not represent a valid BLS point). Only the diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 74b5a5ba8..b5b012998 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -599,6 +599,8 @@ fn check_slashing_protection(validator_dir: &TempDir, pubkeys: impl Iterator, pub signed_attestations: Vec, } diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index 9c1ab770f..b5a4b86ee 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -5,7 +5,7 @@ use crate::{ }; use serde_derive::{Deserialize, Serialize}; use tempfile::tempdir; -use types::{Epoch, Hash256, PublicKey, Slot}; +use types::{Epoch, Hash256, PublicKeyBytes, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] pub struct MultiTestCase { @@ -25,7 +25,7 @@ pub struct TestCase { #[derive(Debug, Clone, Deserialize, Serialize)] pub struct TestBlock { - pub pubkey: PublicKey, + pub pubkey: PublicKeyBytes, pub slot: Slot, pub signing_root: Hash256, pub should_succeed: bool, @@ -33,7 +33,7 @@ pub struct TestBlock { #[derive(Debug, Clone, Deserialize, Serialize)] pub struct TestAttestation { - pub pubkey: PublicKey, + pub pubkey: PublicKeyBytes, pub source_epoch: Epoch, pub target_epoch: Epoch, pub signing_root: Hash256, diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 06c117ab1..8f6bdb50e 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -17,7 +17,7 @@ pub use crate::slashing_database::{ use rusqlite::Error as SQLError; use std::io::{Error as IOError, ErrorKind}; use std::string::ToString; -use types::{Hash256, PublicKey}; +use types::{Hash256, PublicKeyBytes}; /// The filename within the `validators` directory that contains the slashing protection DB. pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; @@ -27,7 +27,7 @@ pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; /// This could be because it's slashable, or because an error occurred. #[derive(PartialEq, Debug)] pub enum NotSafe { - UnregisteredValidator(PublicKey), + UnregisteredValidator(PublicKeyBytes), InvalidBlock(InvalidBlock), InvalidAttestation(InvalidAttestation), IOError(ErrorKind), diff --git a/validator_client/slashing_protection/src/parallel_tests.rs b/validator_client/slashing_protection/src/parallel_tests.rs index bfe7526d4..e3cc1a0d5 100644 --- a/validator_client/slashing_protection/src/parallel_tests.rs +++ b/validator_client/slashing_protection/src/parallel_tests.rs @@ -16,7 +16,7 @@ fn block_same_slot() { let pk = pubkey(0); - slashing_db.register_validator(&pk).unwrap(); + slashing_db.register_validator(pk).unwrap(); // A stream of blocks all with the same slot. let num_blocks = 10; @@ -37,7 +37,7 @@ fn attestation_same_target() { let pk = pubkey(0); - slashing_db.register_validator(&pk).unwrap(); + slashing_db.register_validator(pk).unwrap(); // A stream of attestations all with the same target. let num_attestations = 10; @@ -64,7 +64,7 @@ fn attestation_surround_fest() { let pk = pubkey(0); - slashing_db.register_validator(&pk).unwrap(); + slashing_db.register_validator(pk).unwrap(); // A stream of attestations that all surround each other. let num_attestations = 10; diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 01a9210c5..79bcec7a9 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -10,7 +10,7 @@ use rusqlite::{params, OptionalExtension, Transaction, TransactionBehavior}; use std::fs::{File, OpenOptions}; use std::path::Path; use std::time::Duration; -use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKey, SignedRoot, Slot}; +use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKeyBytes, SignedRoot, Slot}; type Pool = r2d2::Pool; @@ -147,14 +147,14 @@ impl SlashingDatabase { /// /// This allows the validator to record their signatures in the database, and check /// for slashings. - pub fn register_validator(&self, validator_pk: &PublicKey) -> Result<(), NotSafe> { - self.register_validators(std::iter::once(validator_pk)) + pub fn register_validator(&self, validator_pk: PublicKeyBytes) -> Result<(), NotSafe> { + self.register_validators(std::iter::once(&validator_pk)) } /// Register multiple validators with the slashing protection database. pub fn register_validators<'a>( &self, - public_keys: impl Iterator, + public_keys: impl Iterator, ) -> Result<(), NotSafe> { let mut conn = self.conn_pool.get()?; let txn = conn.transaction()?; @@ -168,7 +168,7 @@ impl SlashingDatabase { /// The caller must commit the transaction for the changes to be persisted. pub fn register_validators_in_txn<'a>( &self, - public_keys: impl Iterator, + public_keys: impl Iterator, txn: &Transaction, ) -> Result<(), NotSafe> { let mut stmt = txn.prepare("INSERT INTO validators (public_key) VALUES (?1)")?; @@ -183,7 +183,7 @@ impl SlashingDatabase { /// Check that all of the given validators are registered. pub fn check_validator_registrations<'a>( &self, - mut public_keys: impl Iterator, + mut public_keys: impl Iterator, ) -> Result<(), NotSafe> { let mut conn = self.conn_pool.get()?; let txn = conn.transaction()?; @@ -195,7 +195,7 @@ impl SlashingDatabase { /// /// This is NOT the same as a validator index, and depends on the ordering that validators /// are registered with the slashing protection database (and may vary between machines). - pub fn get_validator_id(&self, public_key: &PublicKey) -> Result { + pub fn get_validator_id(&self, public_key: &PublicKeyBytes) -> Result { let mut conn = self.conn_pool.get()?; let txn = conn.transaction()?; self.get_validator_id_in_txn(&txn, public_key) @@ -204,17 +204,17 @@ impl SlashingDatabase { fn get_validator_id_in_txn( &self, txn: &Transaction, - public_key: &PublicKey, + public_key: &PublicKeyBytes, ) -> Result { self.get_validator_id_opt(txn, public_key)? - .ok_or_else(|| NotSafe::UnregisteredValidator(public_key.clone())) + .ok_or_else(|| NotSafe::UnregisteredValidator(*public_key)) } /// Optional version of `get_validator_id`. fn get_validator_id_opt( &self, txn: &Transaction, - public_key: &PublicKey, + public_key: &PublicKeyBytes, ) -> Result, NotSafe> { Ok(txn .query_row( @@ -229,7 +229,7 @@ impl SlashingDatabase { fn check_block_proposal( &self, txn: &Transaction, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, slot: Slot, signing_root: SigningRoot, ) -> Result { @@ -278,7 +278,7 @@ impl SlashingDatabase { fn check_attestation( &self, txn: &Transaction, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, att_source_epoch: Epoch, att_target_epoch: Epoch, att_signing_root: SigningRoot, @@ -408,7 +408,7 @@ impl SlashingDatabase { fn insert_block_proposal( &self, txn: &Transaction, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, slot: Slot, signing_root: SigningRoot, ) -> Result<(), NotSafe> { @@ -429,7 +429,7 @@ impl SlashingDatabase { fn insert_attestation( &self, txn: &Transaction, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, att_source_epoch: Epoch, att_target_epoch: Epoch, att_signing_root: SigningRoot, @@ -457,7 +457,7 @@ impl SlashingDatabase { /// This is the safe, externally-callable interface for checking block proposals. pub fn check_and_insert_block_proposal( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, block_header: &BeaconBlockHeader, domain: Hash256, ) -> Result { @@ -471,7 +471,7 @@ impl SlashingDatabase { /// As for `check_and_insert_block_proposal` but without requiring the whole `BeaconBlockHeader`. pub fn check_and_insert_block_signing_root( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, slot: Slot, signing_root: SigningRoot, ) -> Result { @@ -490,7 +490,7 @@ impl SlashingDatabase { /// Transactional variant of `check_and_insert_block_signing_root`. pub fn check_and_insert_block_signing_root_txn( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, slot: Slot, signing_root: SigningRoot, txn: &Transaction, @@ -511,7 +511,7 @@ impl SlashingDatabase { /// This is the safe, externally-callable interface for checking attestations. pub fn check_and_insert_attestation( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, attestation: &AttestationData, domain: Hash256, ) -> Result { @@ -527,7 +527,7 @@ impl SlashingDatabase { /// As for `check_and_insert_attestation` but without requiring the whole `AttestationData`. pub fn check_and_insert_attestation_signing_root( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, att_source_epoch: Epoch, att_target_epoch: Epoch, att_signing_root: SigningRoot, @@ -548,7 +548,7 @@ impl SlashingDatabase { /// Transactional variant of `check_and_insert_attestation_signing_root`. fn check_and_insert_attestation_signing_root_txn( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, att_source_epoch: Epoch, att_target_epoch: Epoch, att_signing_root: SigningRoot, @@ -600,7 +600,7 @@ impl SlashingDatabase { let mut import_outcomes = vec![]; for record in interchange.data { - let pubkey = record.pubkey.clone(); + let pubkey = record.pubkey; let txn = conn.transaction()?; match self.import_interchange_record(record, &txn) { Ok(summary) => { @@ -757,7 +757,7 @@ impl SlashingDatabase { /// Remove all blocks for `public_key` with slots less than `new_min_slot`. fn prune_signed_blocks( &self, - public_key: &PublicKey, + public_key: &PublicKeyBytes, new_min_slot: Slot, txn: &Transaction, ) -> Result<(), NotSafe> { @@ -780,7 +780,7 @@ impl SlashingDatabase { /// Prune the signed blocks table for the given public keys. pub fn prune_all_signed_blocks<'a>( &self, - mut public_keys: impl Iterator, + mut public_keys: impl Iterator, new_min_slot: Slot, ) -> Result<(), NotSafe> { let mut conn = self.conn_pool.get()?; @@ -803,7 +803,7 @@ impl SlashingDatabase { /// attestations in the database. fn prune_signed_attestations( &self, - public_key: &PublicKey, + public_key: &PublicKeyBytes, new_min_target: Epoch, txn: &Transaction, ) -> Result<(), NotSafe> { @@ -830,7 +830,7 @@ impl SlashingDatabase { /// Prune the signed attestations table for the given validator keys. pub fn prune_all_signed_attestations<'a>( &self, - mut public_keys: impl Iterator, + mut public_keys: impl Iterator, new_min_target: Epoch, ) -> Result<(), NotSafe> { let mut conn = self.conn_pool.get()?; @@ -853,7 +853,7 @@ impl SlashingDatabase { /// Get a summary of a validator's slashing protection data for consumption by the user. pub fn validator_summary( &self, - public_key: &PublicKey, + public_key: &PublicKeyBytes, txn: &Transaction, ) -> Result { let validator_id = self.get_validator_id_in_txn(txn, public_key)?; @@ -906,11 +906,11 @@ pub struct ValidatorSummary { #[derive(Debug)] pub enum InterchangeImportOutcome { Success { - pubkey: PublicKey, + pubkey: PublicKeyBytes, summary: ValidatorSummary, }, Failure { - pubkey: PublicKey, + pubkey: PublicKeyBytes, error: NotSafe, }, } @@ -981,7 +981,7 @@ mod tests { let _db1 = SlashingDatabase::create(&file).unwrap(); let db2 = SlashingDatabase::open(&file).unwrap(); - db2.register_validator(&pubkey(0)).unwrap_err(); + db2.register_validator(pubkey(0)).unwrap_err(); } // Attempting to create the same database twice should error. diff --git a/validator_client/slashing_protection/src/test_utils.rs b/validator_client/slashing_protection/src/test_utils.rs index db7676ed4..aa82c6f58 100644 --- a/validator_client/slashing_protection/src/test_utils.rs +++ b/validator_client/slashing_protection/src/test_utils.rs @@ -2,18 +2,19 @@ use crate::*; use tempfile::{tempdir, TempDir}; use types::{ test_utils::generate_deterministic_keypair, AttestationData, BeaconBlockHeader, Hash256, + PublicKeyBytes, }; pub const DEFAULT_VALIDATOR_INDEX: usize = 0; pub const DEFAULT_DOMAIN: Hash256 = Hash256::zero(); pub const DEFAULT_GENESIS_VALIDATORS_ROOT: Hash256 = Hash256::zero(); -pub fn pubkey(index: usize) -> PublicKey { - generate_deterministic_keypair(index).pk +pub fn pubkey(index: usize) -> PublicKeyBytes { + generate_deterministic_keypair(index).pk.compress() } pub struct Test { - pubkey: PublicKey, + pubkey: PublicKeyBytes, data: T, domain: Hash256, expected: Result, @@ -24,7 +25,7 @@ impl Test { Self::with_pubkey(pubkey(DEFAULT_VALIDATOR_INDEX), data) } - pub fn with_pubkey(pubkey: PublicKey, data: T) -> Self { + pub fn with_pubkey(pubkey: PublicKeyBytes, data: T) -> Self { Self { pubkey, data, @@ -58,7 +59,7 @@ impl Test { pub struct StreamTest { /// Validators to register. - pub registered_validators: Vec, + pub registered_validators: Vec, /// Vector of cases and the value expected when calling `check_and_insert_X`. pub cases: Vec>, } @@ -89,7 +90,7 @@ impl StreamTest { let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); for pubkey in &self.registered_validators { - slashing_db.register_validator(pubkey).unwrap(); + slashing_db.register_validator(*pubkey).unwrap(); } for (i, test) in self.cases.iter().enumerate() { @@ -112,7 +113,7 @@ impl StreamTest { let slashing_db = SlashingDatabase::create(&slashing_db_file).unwrap(); for pubkey in &self.registered_validators { - slashing_db.register_validator(pubkey).unwrap(); + slashing_db.register_validator(*pubkey).unwrap(); } for (i, test) in self.cases.iter().enumerate() { diff --git a/validator_client/src/attestation_service.rs b/validator_client/src/attestation_service.rs index 1a1b8ae84..9c79e79b4 100644 --- a/validator_client/src/attestation_service.rs +++ b/validator_client/src/attestation_service.rs @@ -11,7 +11,7 @@ use slot_clock::SlotClock; use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; -use tokio::time::{interval_at, sleep_until, Duration, Instant}; +use tokio::time::{sleep, sleep_until, Duration, Instant}; use tree_hash::TreeHash; use types::{ AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec, @@ -20,7 +20,7 @@ use types::{ /// Builds an `AttestationService`. pub struct AttestationServiceBuilder { - duties_service: Option>, + duties_service: Option>>, validator_store: Option>, slot_clock: Option, beacon_nodes: Option>>, @@ -38,7 +38,7 @@ impl AttestationServiceBuilder { } } - pub fn duties_service(mut self, service: DutiesService) -> Self { + pub fn duties_service(mut self, service: Arc>) -> Self { self.duties_service = Some(service); self } @@ -88,7 +88,7 @@ impl AttestationServiceBuilder { /// Helper to minimise `Arc` usage. pub struct Inner { - duties_service: DutiesService, + duties_service: Arc>, validator_store: ValidatorStore, slot_clock: T, beacon_nodes: Arc>, @@ -137,32 +137,31 @@ impl AttestationService { "next_update_millis" => duration_to_next_slot.as_millis() ); - let mut interval = { - // Note: `interval_at` panics if `slot_duration` is 0 - interval_at( - Instant::now() + duration_to_next_slot + slot_duration / 3, - slot_duration, - ) - }; - let executor = self.context.executor.clone(); let interval_fut = async move { loop { - interval.tick().await; - let log = self.context.log(); + if let Some(duration_to_next_slot) = self.slot_clock.duration_to_next_slot() { + sleep(duration_to_next_slot + slot_duration / 3).await; + let log = self.context.log(); - if let Err(e) = self.spawn_attestation_tasks(slot_duration) { - crit!( - log, - "Failed to spawn attestation tasks"; - "error" => e - ) + if let Err(e) = self.spawn_attestation_tasks(slot_duration) { + crit!( + log, + "Failed to spawn attestation tasks"; + "error" => e + ) + } else { + trace!( + log, + "Spawned attestation tasks"; + ) + } } else { - trace!( - log, - "Spawned attestation tasks"; - ) + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + continue; } } }; @@ -192,12 +191,9 @@ impl AttestationService { .attesters(slot) .into_iter() .fold(HashMap::new(), |mut map, duty_and_proof| { - if let Some(committee_index) = duty_and_proof.duty.attestation_committee_index { - let validator_duties = map.entry(committee_index).or_insert_with(Vec::new); - - validator_duties.push(duty_and_proof); - } - + map.entry(duty_and_proof.duty.committee_index) + .or_insert_with(Vec::new) + .push(duty_and_proof); map }); @@ -355,43 +351,27 @@ impl AttestationService { let mut attestations = Vec::with_capacity(validator_duties.len()); - for duty in validator_duties { - // Ensure that all required fields are present in the validator duty. - let ( - duty_slot, - duty_committee_index, - validator_committee_position, - _, - _, - committee_length, - ) = if let Some(tuple) = duty.attestation_duties() { - tuple - } else { - crit!( - log, - "Missing validator duties when signing"; - "duties" => format!("{:?}", duty) - ); - continue; - }; + for duty_and_proof in validator_duties { + let duty = &duty_and_proof.duty; // Ensure that the attestation matches the duties. - if duty_slot != attestation_data.slot || duty_committee_index != attestation_data.index + #[allow(clippy::suspicious_operation_groupings)] + if duty.slot != attestation_data.slot || duty.committee_index != attestation_data.index { crit!( log, "Inconsistent validator duties during signing"; - "validator" => format!("{:?}", duty.validator_pubkey()), - "duty_slot" => duty_slot, + "validator" => ?duty.pubkey, + "duty_slot" => duty.slot, "attestation_slot" => attestation_data.slot, - "duty_index" => duty_committee_index, + "duty_index" => duty.committee_index, "attestation_index" => attestation_data.index, ); continue; } let mut attestation = Attestation { - aggregation_bits: BitList::with_capacity(committee_length as usize).unwrap(), + aggregation_bits: BitList::with_capacity(duty.committee_length as usize).unwrap(), data: attestation_data.clone(), signature: AggregateSignature::infinity(), }; @@ -399,8 +379,8 @@ impl AttestationService { if self .validator_store .sign_attestation( - duty.validator_pubkey(), - validator_committee_position, + &duty.pubkey, + duty.validator_committee_index as usize, &mut attestation, current_epoch, ) @@ -490,6 +470,8 @@ impl AttestationService { let mut signed_aggregate_and_proofs = Vec::new(); for duty_and_proof in validator_duties { + let duty = &duty_and_proof.duty; + let selection_proof = if let Some(proof) = duty_and_proof.selection_proof.as_ref() { proof } else { @@ -497,26 +479,18 @@ impl AttestationService { // subscribed aggregators. continue; }; - let (duty_slot, duty_committee_index, _, validator_index, _, _) = - if let Some(tuple) = duty_and_proof.attestation_duties() { - tuple - } else { - crit!(log, "Missing duties when signing aggregate"); - continue; - }; - let pubkey = &duty_and_proof.duty.validator_pubkey; let slot = attestation_data.slot; let committee_index = attestation_data.index; - if duty_slot != slot || duty_committee_index != committee_index { + if duty.slot != slot || duty.committee_index != committee_index { crit!(log, "Inconsistent validator duties during signing"); continue; } if let Some(aggregate) = self.validator_store.produce_signed_aggregate_and_proof( - pubkey, - validator_index, + &duty.pubkey, + duty.validator_index, aggregated_attestation.clone(), selection_proof.clone(), ) { diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index d302e428c..48f9e7489 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -5,13 +5,13 @@ use crate::{ use crate::{http_metrics::metrics, validator_store::ValidatorStore}; use environment::RuntimeContext; use eth2::types::Graffiti; -use futures::channel::mpsc::Receiver; -use futures::{StreamExt, TryFutureExt}; +use futures::TryFutureExt; use slog::{crit, debug, error, info, trace, warn}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use types::{EthSpec, PublicKey, Slot}; +use tokio::sync::mpsc; +use types::{EthSpec, PublicKeyBytes, Slot}; /// Builds a `BlockService`. pub struct BlockServiceBuilder { @@ -121,13 +121,13 @@ impl Deref for BlockService { /// Notification from the duties service that we should try to produce a block. pub struct BlockServiceNotification { pub slot: Slot, - pub block_proposers: Vec, + pub block_proposers: Vec, } impl BlockService { pub fn start_update_service( self, - notification_rx: Receiver, + mut notification_rx: mpsc::Receiver, ) -> Result<(), String> { let log = self.context.log().clone(); @@ -135,14 +135,16 @@ impl BlockService { let executor = self.inner.context.executor.clone(); - let block_service_fut = notification_rx.for_each(move |notif| { - let service = self.clone(); + executor.spawn( async move { - service.do_update(notif).await.ok(); - } - }); - - executor.spawn(block_service_fut, "block_service"); + while let Some(notif) = notification_rx.recv().await { + let service = self.clone(); + service.do_update(notif).await.ok(); + } + debug!(log, "Block service shutting down"); + }, + "block_service", + ); Ok(()) } @@ -222,7 +224,11 @@ impl BlockService { } /// Produce a block at the given slot for validator_pubkey - async fn publish_block(self, slot: Slot, validator_pubkey: PublicKey) -> Result<(), String> { + async fn publish_block( + self, + slot: Slot, + validator_pubkey: PublicKeyBytes, + ) -> Result<(), String> { let log = self.context.log(); let _timer = metrics::start_timer_vec(&metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK]); diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 0c69a0c25..0ad85a0dd 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -1,428 +1,114 @@ +//! The `DutiesService` contains the attester/proposer duties for all local validators. +//! +//! It learns of the local validator via the `crate::ValidatorStore` struct. It keeps the duties +//! up-to-date by polling the beacon node on regular intervals. +//! +//! The `DutiesService` is also responsible for sending events to the `BlockService` which trigger +//! block production. + use crate::beacon_node_fallback::{BeaconNodeFallback, RequireSynced}; use crate::{ - block_service::BlockServiceNotification, http_metrics::metrics, validator_duty::ValidatorDuty, - validator_store::ValidatorStore, + block_service::BlockServiceNotification, http_metrics::metrics, validator_store::ValidatorStore, }; use environment::RuntimeContext; -use futures::channel::mpsc::Sender; -use futures::SinkExt; +use eth2::types::{AttesterData, BeaconCommitteeSubscription, ProposerData, StateId, ValidatorId}; use parking_lot::RwLock; -use slog::{debug, error, trace, warn}; +use safe_arith::ArithError; +use slog::{debug, error, info, warn, Logger}; use slot_clock::SlotClock; -use std::collections::HashMap; -use std::ops::Deref; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use tokio::time::{interval_at, Duration, Instant}; -use types::{ChainSpec, CommitteeIndex, Epoch, EthSpec, PublicKey, SelectionProof, Slot}; +use tokio::{sync::mpsc::Sender, time::sleep}; +use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; -/// Delay this period of time after the slot starts. This allows the node to process the new slot. -const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); +/// Since the BN does not like it when we subscribe to slots that are close to the current time, we +/// will only subscribe to slots which are further than `SUBSCRIPTION_BUFFER_SLOTS` away. +/// +/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the +/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid +/// bringing in the entire crate. +const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2; -/// Remove any duties where the `duties_epoch < current_epoch - PRUNE_DEPTH`. -const PRUNE_DEPTH: u64 = 4; +/// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch. +const HISTORICAL_DUTIES_EPOCHS: u64 = 2; -type BaseHashMap = HashMap>; +#[derive(Debug)] +pub enum Error { + UnableToReadSlotClock, + FailedToDownloadAttesters(String), + FailedToProduceSelectionProof, + InvalidModulo(ArithError), +} -#[derive(Debug, Clone)] +/// Neatly joins the server-generated `AttesterData` with the locally-generated `selection_proof`. +#[derive(Clone)] pub struct DutyAndProof { - /// The validator duty. - pub duty: ValidatorDuty, - /// Stores the selection proof if the duty elects the validator to be an aggregator. + pub duty: AttesterData, + /// This value is only set to `Some` if the proof indicates that the validator is an aggregator. pub selection_proof: Option, } impl DutyAndProof { - /// Computes the selection proof for `self.validator_pubkey` and `self.duty.attestation_slot`, - /// storing it in `self.selection_proof` _if_ the validator is an aggregator. If the validator - /// is not an aggregator, `self.selection_proof` is set to `None`. - /// - /// ## Errors - /// - /// - `self.validator_pubkey` is not known in `validator_store`. - /// - There's an arith error during computation. - pub fn compute_selection_proof( - &mut self, + /// Instantiate `Self`, computing the selection proof as well. + pub fn new( + duty: AttesterData, validator_store: &ValidatorStore, spec: &ChainSpec, - ) -> Result<(), String> { - let (committee_length, slot) = if let (Some(count), Some(slot)) = - (self.duty.committee_length, self.duty.attestation_slot) - { - (count as usize, slot) - } else { - // If there are no attester duties we assume the validator is inactive. - self.selection_proof = None; - return Ok(()); - }; - + ) -> Result { let selection_proof = validator_store - .produce_selection_proof(&self.duty.validator_pubkey, slot) - .ok_or("Failed to produce selection proof")?; + .produce_selection_proof(&duty.pubkey, duty.slot) + .ok_or(Error::FailedToProduceSelectionProof)?; - self.selection_proof = selection_proof - .is_aggregator(committee_length, spec) - .map_err(|e| format!("Invalid modulo: {:?}", e)) + let selection_proof = selection_proof + .is_aggregator(duty.committee_length as usize, spec) + .map_err(Error::InvalidModulo) .map(|is_aggregator| { if is_aggregator { Some(selection_proof) } else { + // Don't bother storing the selection proof if the validator isn't an + // aggregator, we won't need it. None } })?; - Ok(()) - } - - /// Returns `true` if the two `Self` instances would result in the same beacon subscription. - pub fn subscription_eq(&self, other: &Self) -> bool { - self.selection_proof_eq(other) - && self.duty.validator_index == other.duty.validator_index - && self.duty.attestation_committee_index == other.duty.attestation_committee_index - && self.duty.attestation_slot == other.duty.attestation_slot - } - - /// Returns `true` if the selection proof between `self` and `other` _should_ be equal. - /// - /// It's important to note that this doesn't actually check `self.selection_proof`, instead it - /// checks to see if the inputs to computing the selection proof are equal. - fn selection_proof_eq(&self, other: &Self) -> bool { - self.duty.committee_count_at_slot == other.duty.committee_count_at_slot - && self.duty.attestation_slot == other.duty.attestation_slot - } - - /// Returns the information required for an attesting validator, if they are scheduled to - /// attest. - pub fn attestation_duties(&self) -> Option<(Slot, CommitteeIndex, usize, u64, u64, u64)> { - Some(( - self.duty.attestation_slot?, - self.duty.attestation_committee_index?, - self.duty.attestation_committee_position?, - self.duty.validator_index?, - self.duty.committee_count_at_slot?, - self.duty.committee_length?, - )) - } - - pub fn validator_pubkey(&self) -> &PublicKey { - &self.duty.validator_pubkey - } - - pub fn validator_index(&self) -> Option { - self.duty.validator_index - } -} - -impl Into for ValidatorDuty { - fn into(self) -> DutyAndProof { - DutyAndProof { - duty: self, - selection_proof: None, - } - } -} - -/// The outcome of inserting some `ValidatorDuty` into the `DutiesStore`. -#[derive(PartialEq, Debug, Clone)] -enum InsertOutcome { - /// These are the first duties received for this validator. - NewValidator, - /// The duties for this given epoch were previously unknown and have been stored. - NewEpoch, - /// The duties were identical to some already in the store. - Identical, - /// The duties informed us of new proposal slots but were otherwise identical. - NewProposalSlots, - /// There were duties for this validator and epoch in the store that were different to the ones - /// provided. The existing duties were replaced. - Replaced { should_resubscribe: bool }, - /// The given duties were invalid. - Invalid, -} - -impl InsertOutcome { - /// Returns `true` if the outcome indicates that the validator _might_ require a subscription. - pub fn is_subscription_candidate(&self) -> bool { - match self { - InsertOutcome::Replaced { should_resubscribe } => *should_resubscribe, - InsertOutcome::NewValidator | InsertOutcome::NewEpoch => true, - InsertOutcome::Identical | InsertOutcome::Invalid | InsertOutcome::NewProposalSlots => { - false - } - } - } -} - -#[derive(Default)] -pub struct DutiesStore { - store: RwLock, -} - -impl DutiesStore { - /// Returns the total number of validators that should propose in the given epoch. - fn proposer_count(&self, epoch: Epoch) -> usize { - self.store - .read() - .iter() - .filter(|(_validator_pubkey, validator_map)| { - validator_map - .get(&epoch) - .map(|duties| { - duties - .duty - .block_proposal_slots - .as_ref() - .map_or(false, |proposal_slots| !proposal_slots.is_empty()) - }) - .unwrap_or(false) - }) - .count() - } - - /// Returns the total number of validators that should attest in the given epoch. - fn attester_count(&self, epoch: Epoch) -> usize { - self.store - .read() - .iter() - .filter(|(_validator_pubkey, validator_map)| { - validator_map - .get(&epoch) - .map(|duties| duties.duty.attestation_slot.is_some()) - .unwrap_or_else(|| false) - }) - .count() - } - - fn block_proposers(&self, slot: Slot, slots_per_epoch: u64) -> Vec { - self.store - .read() - .iter() - // As long as a `HashMap` iterator does not return duplicate keys, neither will this - // function. - .filter_map(|(_validator_pubkey, validator_map)| { - let epoch = slot.epoch(slots_per_epoch); - - validator_map.get(&epoch).and_then(|duties| { - if duties.duty.block_proposal_slots.as_ref()?.contains(&slot) { - Some(duties.duty.validator_pubkey.clone()) - } else { - None - } - }) - }) - .collect() - } - - fn attesters(&self, slot: Slot, slots_per_epoch: u64) -> Vec { - self.store - .read() - .iter() - // As long as a `HashMap` iterator does not return duplicate keys, neither will this - // function. - .filter_map(|(_validator_pubkey, validator_map)| { - let epoch = slot.epoch(slots_per_epoch); - - validator_map.get(&epoch).and_then(|duties| { - if duties.duty.attestation_slot == Some(slot) { - Some(duties) - } else { - None - } - }) - }) - .cloned() - .collect() - } - - fn get_index(&self, pubkey: &PublicKey, epoch: Epoch) -> Option { - self.store - .read() - .get(pubkey)? - .get(&epoch)? - .validator_index() - } - - fn is_aggregator(&self, validator_pubkey: &PublicKey, epoch: Epoch) -> Option { - Some( - self.store - .read() - .get(validator_pubkey)? - .get(&epoch)? - .selection_proof - .is_some(), - ) - } - - fn insert( - &self, - epoch: Epoch, - mut duties: DutyAndProof, - slots_per_epoch: u64, - validator_store: &ValidatorStore, - spec: &ChainSpec, - ) -> Result { - let mut store = self.store.write(); - - if !duties_match_epoch(&duties.duty, epoch, slots_per_epoch) { - return Ok(InsertOutcome::Invalid); - } - - // TODO: refactor with Entry. - - if let Some(validator_map) = store.get_mut(&duties.duty.validator_pubkey) { - if let Some(known_duties) = validator_map.get_mut(&epoch) { - if known_duties.duty.eq_ignoring_proposal_slots(&duties.duty) { - if known_duties.duty.block_proposal_slots == duties.duty.block_proposal_slots { - Ok(InsertOutcome::Identical) - } else if duties.duty.block_proposal_slots.is_some() { - known_duties.duty.block_proposal_slots = duties.duty.block_proposal_slots; - Ok(InsertOutcome::NewProposalSlots) - } else { - Ok(InsertOutcome::Invalid) - } - } else { - // Compute the selection proof. - duties.compute_selection_proof(validator_store, spec)?; - - // Determine if a re-subscription is required. - let should_resubscribe = !duties.subscription_eq(known_duties); - - // Replace the existing duties. - *known_duties = duties; - - Ok(InsertOutcome::Replaced { should_resubscribe }) - } - } else { - // Compute the selection proof. - duties.compute_selection_proof(validator_store, spec)?; - - validator_map.insert(epoch, duties); - - Ok(InsertOutcome::NewEpoch) - } - } else { - // Compute the selection proof. - duties.compute_selection_proof(validator_store, spec)?; - - let validator_pubkey = duties.duty.validator_pubkey.clone(); - - let mut validator_map = HashMap::new(); - validator_map.insert(epoch, duties); - - store.insert(validator_pubkey, validator_map); - - Ok(InsertOutcome::NewValidator) - } - } - - fn prune(&self, prior_to: Epoch) { - self.store - .write() - .retain(|_validator_pubkey, validator_map| { - validator_map.retain(|epoch, _duties| *epoch >= prior_to); - !validator_map.is_empty() - }); - } -} - -pub struct DutiesServiceBuilder { - validator_store: Option>, - slot_clock: Option, - beacon_nodes: Option>>, - allow_unsynced_beacon_node: bool, - context: Option>, -} - -impl DutiesServiceBuilder { - pub fn new() -> Self { - Self { - validator_store: None, - slot_clock: None, - beacon_nodes: None, - allow_unsynced_beacon_node: false, - context: None, - } - } - - pub fn validator_store(mut self, store: ValidatorStore) -> Self { - self.validator_store = Some(store); - self - } - - pub fn slot_clock(mut self, slot_clock: T) -> Self { - self.slot_clock = Some(slot_clock); - self - } - - pub fn beacon_nodes(mut self, beacon_nodes: Arc>) -> Self { - self.beacon_nodes = Some(beacon_nodes); - self - } - - pub fn allow_unsynced_beacon_node(mut self, allow_unsynced_beacon_node: bool) -> Self { - self.allow_unsynced_beacon_node = allow_unsynced_beacon_node; - self - } - - pub fn runtime_context(mut self, context: RuntimeContext) -> Self { - self.context = Some(context); - self - } - - pub fn build(self) -> Result, String> { - Ok(DutiesService { - inner: Arc::new(Inner { - store: Arc::new(DutiesStore::default()), - validator_store: self - .validator_store - .ok_or("Cannot build DutiesService without validator_store")?, - slot_clock: self - .slot_clock - .ok_or("Cannot build DutiesService without slot_clock")?, - beacon_nodes: self - .beacon_nodes - .ok_or("Cannot build DutiesService without beacon_node")?, - allow_unsynced_beacon_node: self.allow_unsynced_beacon_node, - context: self - .context - .ok_or("Cannot build DutiesService without runtime_context")?, - }), + Ok(Self { + duty, + selection_proof, }) } } -/// Helper to minimise `Arc` usage. -pub struct Inner { - store: Arc, - validator_store: ValidatorStore, - pub(crate) slot_clock: T, - pub(crate) beacon_nodes: Arc>, - allow_unsynced_beacon_node: bool, - context: RuntimeContext, -} +/// To assist with readability, the dependent root for attester/proposer duties. +type DependentRoot = Hash256; -/// Maintains a store of the duties for all voting validators in the `validator_store`. -/// -/// Polls the beacon node at the start of each slot, collecting duties for the current and next -/// epoch. The duties service notifies the block production service to run each time it completes, -/// so it *must* be run every slot. +type AttesterMap = HashMap>; +type ProposerMap = HashMap)>; +type IndicesMap = HashMap; + +/// See the module-level documentation. pub struct DutiesService { - inner: Arc>, -} - -impl Clone for DutiesService { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -impl Deref for DutiesService { - type Target = Inner; - - fn deref(&self) -> &Self::Target { - self.inner.deref() - } + /// Maps a validator public key to their duties for each epoch. + pub attesters: RwLock, + /// Maps an epoch to all *local* proposers in this epoch. Notably, this does not contain + /// proposals for any validators which are not registered locally. + pub proposers: RwLock, + /// Maps a public key to a validator index. There is a task which ensures this map is kept + /// up-to-date. + pub indices: RwLock, + /// Provides the canonical list of locally-managed validators. + pub validator_store: ValidatorStore, + /// Tracks the current slot. + pub slot_clock: T, + /// Provides HTTP access to remote beacon nodes. + pub beacon_nodes: Arc>, + /// Controls whether or not this function will refuse to interact with non-synced beacon nodes. + /// + /// This functionality is a little redundant since most BNs will likely reject duties when they + /// aren't synced, but we keep it around for an emergency. + pub require_synced: RequireSynced, + pub context: RuntimeContext, + pub spec: ChainSpec, } impl DutiesService { @@ -433,326 +119,634 @@ impl DutiesService { /// Returns the total number of validators that should propose in the given epoch. pub fn proposer_count(&self, epoch: Epoch) -> usize { - self.store.proposer_count(epoch) + self.proposers + .read() + .get(&epoch) + .map_or(0, |(_, proposers)| proposers.len()) } /// Returns the total number of validators that should attest in the given epoch. pub fn attester_count(&self, epoch: Epoch) -> usize { - self.store.attester_count(epoch) + self.attesters + .read() + .iter() + .filter(|(_, map)| map.contains_key(&epoch)) + .count() } /// Returns the pubkeys of the validators which are assigned to propose in the given slot. /// /// It is possible that multiple validators have an identical proposal slot, however that is /// likely the result of heavy forking (lol) or inconsistent beacon node connections. - pub fn block_proposers(&self, slot: Slot) -> Vec { - self.store.block_proposers(slot, E::slots_per_epoch()) + pub fn block_proposers(&self, slot: Slot) -> HashSet { + let epoch = slot.epoch(E::slots_per_epoch()); + + self.proposers + .read() + .get(&epoch) + .map(|(_, proposers)| { + proposers + .iter() + .filter(|proposer_data| proposer_data.slot == slot) + .map(|proposer_data| proposer_data.pubkey) + .collect() + }) + .unwrap_or_default() } /// Returns all `ValidatorDuty` for the given `slot`. pub fn attesters(&self, slot: Slot) -> Vec { - self.store.attesters(slot, E::slots_per_epoch()) + let epoch = slot.epoch(E::slots_per_epoch()); + + self.attesters + .read() + .iter() + .filter_map(|(_, map)| map.get(&epoch)) + .map(|(_, duty_and_proof)| duty_and_proof) + .filter(|duty_and_proof| duty_and_proof.duty.slot == slot) + .cloned() + .collect() } +} - /// Start the service that periodically polls the beacon node for validator duties. - pub fn start_update_service( - self, - mut block_service_tx: Sender, - spec: Arc, - ) -> Result<(), String> { - let duration_to_next_slot = self - .slot_clock - .duration_to_next_slot() - .ok_or("Unable to determine duration to next slot")?; - - let mut interval = { - let slot_duration = Duration::from_secs(spec.seconds_per_slot); - // Note: `interval_at` panics if `slot_duration` is 0 - interval_at( - Instant::now() + duration_to_next_slot + TIME_DELAY_FROM_SLOT, - slot_duration, - ) - }; - - // Run an immediate update before starting the updater service. - let duties_service = self.clone(); - let mut block_service_tx_clone = block_service_tx.clone(); - let inner_spec = spec.clone(); - self.inner.context.executor.spawn( - async move { - duties_service - .do_update(&mut block_service_tx_clone, &inner_spec) - .await - }, - "duties update", - ); - - let executor = self.inner.context.executor.clone(); - - let interval_fut = async move { +/// Start the service that periodically polls the beacon node for validator duties. This will start +/// several sub-services. +/// +/// ## Notes +/// +/// The loops in this function are structured such that a new instance of that task will only start +/// once the current one is finished. This means that if a task happens to take more than one slot +/// to run, we might skip a slot. This is unfortunate, however the alternative is to *always* +/// process every slot, which has the chance of creating a theoretically unlimited backlog of tasks. +/// It was a conscious decision to choose to drop tasks on an overloaded/latent system rather than +/// overload it even more. +pub fn start_update_service( + core_duties_service: Arc>, + mut block_service_tx: Sender, +) { + /* + * Spawn the task which updates the map of pubkey to validator index. + */ + let duties_service = core_duties_service.clone(); + core_duties_service.context.executor.spawn( + async move { loop { - interval.tick().await; - self.clone().do_update(&mut block_service_tx, &spec).await; - } - }; + // Run this poll before the wait, this should hopefully download all the indices + // before the block/attestation tasks need them. + poll_validator_indices(&duties_service).await; - executor.spawn(interval_fut, "duties_service"); - - Ok(()) - } - - /// Attempt to download the duties of all managed validators for this epoch and the next. - async fn do_update( - self, - block_service_tx: &mut Sender, - spec: &ChainSpec, - ) { - let log = self.context.log(); - let _timer = - metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::FULL_UPDATE]); - - let slot = if let Some(slot) = self.slot_clock.now() { - slot - } else { - error!(log, "Duties manager failed to read slot clock"); - return; - }; - - let current_epoch = slot.epoch(E::slots_per_epoch()); - - if slot % E::slots_per_epoch() == 0 { - let prune_below = current_epoch - PRUNE_DEPTH; - - trace!( - log, - "Pruning duties cache"; - "pruning_below" => prune_below.as_u64(), - "current_epoch" => current_epoch.as_u64(), - ); - - self.store.prune(prune_below); - } - - // Update duties for the current epoch, but keep running if there's an error: - // block production or the next epoch update could still succeed. - if let Err(e) = self - .clone() - .update_epoch(current_epoch, current_epoch, spec) - .await - { - error!( - log, - "Failed to get current epoch duties"; - "http_error" => format!("{:?}", e) - ); - } - - // Notify the block service to produce a block. - if let Err(e) = block_service_tx - .send(BlockServiceNotification { - slot, - block_proposers: self.block_proposers(slot), - }) - .await - { - error!( - log, - "Failed to notify block service"; - "error" => format!("{:?}", e) - ); - }; - - // Update duties for the next epoch. - if let Err(e) = self - .clone() - .update_epoch(current_epoch, current_epoch + 1, spec) - .await - { - error!( - log, - "Failed to get next epoch duties"; - "http_error" => format!("{:?}", e) - ); - } - } - - /// Attempt to download the duties of all managed validators for the given `request_epoch`. The - /// `current_epoch` should be a local reading of the slot clock. - async fn update_epoch( - self, - current_epoch: Epoch, - request_epoch: Epoch, - spec: &ChainSpec, - ) -> Result<(), String> { - let log = self.context.log(); - - let maybe_require_synced = if self.allow_unsynced_beacon_node { - RequireSynced::No - } else { - RequireSynced::Yes - }; - - let mut new_validator = 0; - let mut new_epoch = 0; - let mut new_proposal_slots = 0; - let mut identical = 0; - let mut replaced = 0; - let mut invalid = 0; - - // Determine which pubkeys we already know the index of by checking the duties store for - // the current epoch. - let pubkeys: Vec<(PublicKey, Option)> = self - .validator_store - .voting_pubkeys() - .into_iter() - .map(|pubkey| { - let index = self.store.get_index(&pubkey, current_epoch); - (pubkey, index) - }) - .collect(); - - let mut validator_subscriptions = vec![]; - let pubkeys_ref = &pubkeys; - let remote_duties: Vec = match self - .beacon_nodes - .first_success(maybe_require_synced, |beacon_node| async move { - ValidatorDuty::download( - &beacon_node, - current_epoch, - request_epoch, - pubkeys_ref.clone(), - &log, - ) - .await - }) - .await - { - Ok(duties) => duties, - Err(e) => { - error!( - log, - "Failed to download validator duties"; - "error" => %e - ); - vec![] - } - }; - - remote_duties.iter().for_each(|remote_duty| { - // Convert the remote duties into our local representation. - let duties: DutyAndProof = remote_duty.clone().into(); - - let validator_pubkey = duties.duty.validator_pubkey.clone(); - - // Attempt to update our local store. - match self.store.insert( - request_epoch, - duties, - E::slots_per_epoch(), - &self.validator_store, - spec, - ) { - Ok(outcome) => { - match &outcome { - InsertOutcome::NewValidator => { - debug!( - log, - "First duty assignment for validator"; - "proposal_slots" => format!("{:?}", &remote_duty.block_proposal_slots), - "attestation_slot" => format!("{:?}", &remote_duty.attestation_slot), - "validator" => format!("{:?}", &remote_duty.validator_pubkey) - ); - new_validator += 1; - } - InsertOutcome::NewProposalSlots => new_proposal_slots += 1, - InsertOutcome::NewEpoch => new_epoch += 1, - InsertOutcome::Identical => identical += 1, - InsertOutcome::Replaced { .. } => replaced += 1, - InsertOutcome::Invalid => invalid += 1, - } - - if let Some(is_aggregator) = - self.store.is_aggregator(&validator_pubkey, request_epoch) - { - if outcome.is_subscription_candidate() { - if let Some(subscription) = remote_duty.subscription(is_aggregator) { - validator_subscriptions.push(subscription) - } - } - } + if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() { + sleep(duration).await; + } else { + // Just sleep for one slot if we are unable to read the system clock, this gives + // us an opportunity for the clock to eventually come good. + sleep(duties_service.slot_clock.slot_duration()).await; } - Err(e) => error!( - log, - "Unable to store duties"; - "error" => e - ), } - }); + }, + "duties_service_indices", + ); - if invalid > 0 { - error!( - log, - "Received invalid duties from beacon node"; - "bad_duty_count" => invalid, - "info" => "Duties are from wrong epoch." - ) - } + /* + * Spawn the task which keeps track of local block proposal duties. + */ + let duties_service = core_duties_service.clone(); + let log = core_duties_service.context.log().clone(); + core_duties_service.context.executor.spawn( + async move { + loop { + if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() { + sleep(duration).await; + } else { + // Just sleep for one slot if we are unable to read the system clock, this gives + // us an opportunity for the clock to eventually come good. + sleep(duties_service.slot_clock.slot_duration()).await; + continue; + } - trace!( - log, - "Performed duties update"; - "identical" => identical, - "new_epoch" => new_epoch, - "new_proposal_slots" => new_proposal_slots, - "new_validator" => new_validator, - "replaced" => replaced, - "epoch" => format!("{}", request_epoch) - ); + if let Err(e) = poll_beacon_proposers(&duties_service, &mut block_service_tx).await + { + error!( + log, + "Failed to poll beacon proposers"; + "error" => ?e + ) + } + } + }, + "duties_service_proposers", + ); - if replaced > 0 { - warn!( - log, - "Duties changed during routine update"; - "info" => "Chain re-org likely occurred", - "replaced" => replaced, - ) - } + /* + * Spawn the task which keeps track of local attestation duties. + */ + let duties_service = core_duties_service.clone(); + let log = core_duties_service.context.log().clone(); + core_duties_service.context.executor.spawn( + async move { + loop { + if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() { + sleep(duration).await; + } else { + // Just sleep for one slot if we are unable to read the system clock, this gives + // us an opportunity for the clock to eventually come good. + sleep(duties_service.slot_clock.slot_duration()).await; + continue; + } - let log = self.context.log().clone(); - let count = validator_subscriptions.len(); + if let Err(e) = poll_beacon_attesters(&duties_service).await { + error!( + log, + "Failed to poll beacon attesters"; + "error" => ?e + ); + } + } + }, + "duties_service_attesters", + ); +} - if count == 0 { - debug!(log, "No new subscriptions required"); - } else { - let validator_subscriptions_ref = &validator_subscriptions; - self.beacon_nodes - .first_success(RequireSynced::No, |beacon_node| async move { +/// Iterate through all the voting pubkeys in the `ValidatorStore` and attempt to learn any unknown +/// validator indices. +async fn poll_validator_indices( + duties_service: &DutiesService, +) { + let _timer = + metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_INDICES]); + + let log = duties_service.context.log(); + for pubkey in duties_service.validator_store.voting_pubkeys() { + // This is on its own line to avoid some weirdness with locks and if statements. + let is_known = duties_service.indices.read().contains_key(&pubkey); + + if !is_known { + // Query the remote BN to resolve a pubkey to a validator index. + let download_result = duties_service + .beacon_nodes + .first_success(duties_service.require_synced, |beacon_node| async move { beacon_node - .post_validator_beacon_committee_subscriptions(validator_subscriptions_ref) + .get_beacon_states_validator_id( + StateId::Head, + &ValidatorId::PublicKey(pubkey), + ) .await }) - .await - .map_err(|e| format!("Failed to subscribe validators: {}", e))?; + .await; - debug!( - log, - "Successfully subscribed validators"; - "count" => count - ); + match download_result { + Ok(Some(response)) => { + info!( + log, + "Validator exists in beacon chain"; + "pubkey" => ?pubkey, + "validator_index" => response.data.index + ); + duties_service + .indices + .write() + .insert(pubkey, response.data.index); + } + // This is not necessarily an error, it just means the validator is not yet known to + // the beacon chain. + Ok(None) => { + debug!( + log, + "Validator without index"; + "pubkey" => ?pubkey + ) + } + // Don't exit early on an error, keep attempting to resolve other indices. + Err(e) => { + error!( + log, + "Failed to resolve pubkey to index"; + "error" => %e, + "pubkey" => ?pubkey, + ) + } + } } - - Ok(()) } } -/// Returns `true` if the slots in the `duties` are from the given `epoch` -fn duties_match_epoch(duties: &ValidatorDuty, epoch: Epoch, slots_per_epoch: u64) -> bool { - duties - .attestation_slot - .map_or(true, |slot| slot.epoch(slots_per_epoch) == epoch) - && duties.block_proposal_slots.as_ref().map_or(true, |slots| { - slots - .iter() - .all(|slot| slot.epoch(slots_per_epoch) == epoch) - }) +/// Query the beacon node for attestation duties for any known validators. +/// +/// This function will perform (in the following order): +/// +/// 1. Poll for current-epoch duties and update the local `duties_service.attesters` map. +/// 2. As above, but for the next-epoch. +/// 3. Push out any attestation subnet subscriptions to the BN. +/// 4. Prune old entries from `duties_service.attesters`. +async fn poll_beacon_attesters( + duties_service: &DutiesService, +) -> Result<(), Error> { + let current_epoch_timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::UPDATE_ATTESTERS_CURRENT_EPOCH], + ); + + let log = duties_service.context.log(); + + let current_slot = duties_service + .slot_clock + .now() + .ok_or(Error::UnableToReadSlotClock)?; + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let next_epoch = current_epoch + 1; + + let local_pubkeys: HashSet = duties_service + .validator_store + .voting_pubkeys() + .into_iter() + .collect(); + + let local_indices = { + let mut local_indices = Vec::with_capacity(local_pubkeys.len()); + let indices_map = duties_service.indices.read(); + for &pubkey in &local_pubkeys { + if let Some(validator_index) = indices_map.get(&pubkey) { + local_indices.push(*validator_index) + } + } + local_indices + }; + + // Download the duties and update the duties for the current epoch. + if let Err(e) = poll_beacon_attesters_for_epoch( + &duties_service, + current_epoch, + &local_indices, + &local_pubkeys, + ) + .await + { + error!( + log, + "Failed to download attester duties"; + "current_epoch" => current_epoch, + "request_epoch" => current_epoch, + "err" => ?e, + ) + } + + drop(current_epoch_timer); + let next_epoch_timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::UPDATE_ATTESTERS_NEXT_EPOCH], + ); + + // Download the duties and update the duties for the next epoch. + if let Err(e) = + poll_beacon_attesters_for_epoch(&duties_service, next_epoch, &local_indices, &local_pubkeys) + .await + { + error!( + log, + "Failed to download attester duties"; + "current_epoch" => current_epoch, + "request_epoch" => next_epoch, + "err" => ?e, + ) + } + + drop(next_epoch_timer); + let subscriptions_timer = + metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]); + + // This vector is likely to be a little oversized, but it won't reallocate. + let mut subscriptions = Vec::with_capacity(local_pubkeys.len() * 2); + + // For this epoch and the next epoch, produce any beacon committee subscriptions. + // + // We are *always* pushing out subscriptions, even if we've subscribed before. This is + // potentially excessive on the BN in normal cases, but it will help with fast re-subscriptions + // if the BN goes offline or we swap to a different one. + for epoch in &[current_epoch, next_epoch] { + duties_service + .attesters + .read() + .iter() + .filter_map(|(_, map)| map.get(&epoch)) + // The BN logs a warning if we try and subscribe to current or near-by slots. Give it a + // buffer. + .filter(|(_, duty_and_proof)| { + current_slot + SUBSCRIPTION_BUFFER_SLOTS < duty_and_proof.duty.slot + }) + .for_each(|(_, duty_and_proof)| { + let duty = &duty_and_proof.duty; + let is_aggregator = duty_and_proof.selection_proof.is_some(); + + subscriptions.push(BeaconCommitteeSubscription { + validator_index: duty.validator_index, + committee_index: duty.committee_index, + committees_at_slot: duty.committees_at_slot, + slot: duty.slot, + is_aggregator, + }) + }); + } + + // If there are any subscriptions, push them out to the beacon node. + if !subscriptions.is_empty() { + let subscriptions_ref = &subscriptions; + if let Err(e) = duties_service + .beacon_nodes + .first_success(duties_service.require_synced, |beacon_node| async move { + beacon_node + .post_validator_beacon_committee_subscriptions(subscriptions_ref) + .await + }) + .await + { + error!( + log, + "Failed to subscribe validators"; + "error" => %e + ) + } + } + + drop(subscriptions_timer); + + // Prune old duties. + duties_service + .attesters + .write() + .iter_mut() + .for_each(|(_, map)| { + map.retain(|&epoch, _| epoch + HISTORICAL_DUTIES_EPOCHS >= current_epoch) + }); + + Ok(()) +} + +/// For the given `local_indices` and `local_pubkeys`, download the duties for the given `epoch` and +/// store them in `duties_service.attesters`. +async fn poll_beacon_attesters_for_epoch( + duties_service: &DutiesService, + epoch: Epoch, + local_indices: &[u64], + local_pubkeys: &HashSet, +) -> Result<(), Error> { + let log = duties_service.context.log(); + + // No need to bother the BN if we don't have any validators. + if local_indices.is_empty() { + debug!( + duties_service.context.log(), + "No validators, not downloading duties"; + "epoch" => epoch, + ); + return Ok(()); + } + + let fetch_timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::UPDATE_ATTESTERS_FETCH], + ); + + let response = duties_service + .beacon_nodes + .first_success(duties_service.require_synced, |beacon_node| async move { + beacon_node + .post_validator_duties_attester(epoch, local_indices) + .await + }) + .await + .map_err(|e| Error::FailedToDownloadAttesters(e.to_string()))?; + + drop(fetch_timer); + let _store_timer = metrics::start_timer_vec( + &metrics::DUTIES_SERVICE_TIMES, + &[metrics::UPDATE_ATTESTERS_STORE], + ); + + let dependent_root = response.dependent_root; + + let relevant_duties = response + .data + .into_iter() + .filter(|attester_duty| local_pubkeys.contains(&attester_duty.pubkey)) + .collect::>(); + + debug!( + log, + "Downloaded attester duties"; + "dependent_root" => %dependent_root, + "num_relevant_duties" => relevant_duties.len(), + ); + + let mut already_warned = Some(()); + let mut attesters_map = duties_service.attesters.write(); + for duty in relevant_duties { + let attesters_map = attesters_map.entry(duty.pubkey).or_default(); + + // Only update the duties if either is true: + // + // - There were no known duties for this epoch. + // - The dependent root has changed, signalling a re-org. + if attesters_map + .get(&epoch) + .map_or(true, |(prior, _)| *prior != dependent_root) + { + let duty_and_proof = + DutyAndProof::new(duty, &duties_service.validator_store, &duties_service.spec)?; + + if let Some((prior_dependent_root, _)) = + attesters_map.insert(epoch, (dependent_root, duty_and_proof)) + { + // Using `already_warned` avoids excessive logs. + if dependent_root != prior_dependent_root && already_warned.take().is_some() { + warn!( + log, + "Attester duties re-org"; + "prior_dependent_root" => %prior_dependent_root, + "dependent_root" => %dependent_root, + "msg" => "this may happen from time to time" + ) + } + } + } + } + // Drop the write-lock. + // + // This is strictly unnecessary since the function ends immediately afterwards, but we remain + // defensive regardless. + drop(attesters_map); + + Ok(()) +} + +/// Download the proposer duties for the current epoch and store them in `duties_service.proposers`. +/// If there are any proposer for this slot, send out a notification to the block proposers. +/// +/// ## Note +/// +/// This function will potentially send *two* notifications to the `BlockService`; it will send a +/// notification initially, then it will download the latest duties and send a *second* notification +/// if those duties have changed. This behaviour simultaneously achieves the following: +/// +/// 1. Block production can happen immediately and does not have to wait for the proposer duties to +/// download. +/// 2. We won't miss a block if the duties for the current slot happen to change with this poll. +/// +/// This sounds great, but is it safe? Firstly, the additional notification will only contain block +/// producers that were not included in the first notification. This should be safety enough. +/// However, we also have the slashing protection as a second line of defence. These two factors +/// provide an acceptable level of safety. +/// +/// It's important to note that since there is a 0-epoch look-ahead (i.e., no look-ahead) for block +/// proposers then it's very likely that a proposal for the first slot of the epoch will need go +/// through the slow path every time. I.e., the proposal will only happen after we've been able to +/// download and process the duties from the BN. This means it is very important to ensure this +/// function is as fast as possible. +async fn poll_beacon_proposers( + duties_service: &DutiesService, + block_service_tx: &mut Sender, +) -> Result<(), Error> { + let _timer = + metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_PROPOSERS]); + + let log = duties_service.context.log(); + + let current_slot = duties_service + .slot_clock + .now() + .ok_or(Error::UnableToReadSlotClock)?; + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + + // Notify the block proposal service for any proposals that we have in our cache. + // + // See the function-level documentation for more information. + let initial_block_proposers = duties_service.block_proposers(current_slot); + notify_block_production_service( + current_slot, + &initial_block_proposers, + block_service_tx, + &log, + ) + .await; + + let local_pubkeys: HashSet = duties_service + .validator_store + .voting_pubkeys() + .into_iter() + .collect(); + + // Only download duties and push out additional block production events if we have some + // validators. + if !local_pubkeys.is_empty() { + let download_result = duties_service + .beacon_nodes + .first_success(duties_service.require_synced, |beacon_node| async move { + beacon_node + .get_validator_duties_proposer(current_epoch) + .await + }) + .await; + + match download_result { + Ok(response) => { + let dependent_root = response.dependent_root; + + let relevant_duties = response + .data + .into_iter() + .filter(|proposer_duty| local_pubkeys.contains(&proposer_duty.pubkey)) + .collect::>(); + + debug!( + log, + "Downloaded proposer duties"; + "dependent_root" => %dependent_root, + "num_relevant_duties" => relevant_duties.len(), + ); + + if let Some((prior_dependent_root, _)) = duties_service + .proposers + .write() + .insert(current_epoch, (dependent_root, relevant_duties)) + { + if dependent_root != prior_dependent_root { + warn!( + log, + "Proposer duties re-org"; + "prior_dependent_root" => %prior_dependent_root, + "dependent_root" => %dependent_root, + "msg" => "this may happen from time to time" + ) + } + } + } + // Don't return early here, we still want to try and produce blocks using the cached values. + Err(e) => error!( + log, + "Failed to download proposer duties"; + "err" => %e, + ), + } + + // Compute the block proposers for this slot again, now that we've received an update from + // the BN. + // + // Then, compute the difference between these two sets to obtain a set of block proposers + // which were not included in the initial notification to the `BlockService`. + let additional_block_producers = duties_service + .block_proposers(current_slot) + .difference(&initial_block_proposers) + .copied() + .collect::>(); + + // If there are any new proposers for this slot, send a notification so they produce a + // block. + // + // See the function-level documentation for more reasoning about this behaviour. + if !additional_block_producers.is_empty() { + notify_block_production_service( + current_slot, + &additional_block_producers, + block_service_tx, + &log, + ) + .await; + debug!( + log, + "Detected new block proposer"; + "current_slot" => current_slot, + ); + metrics::inc_counter(&metrics::PROPOSAL_CHANGED); + } + } + + // Prune old duties. + duties_service + .proposers + .write() + .retain(|&epoch, _| epoch + HISTORICAL_DUTIES_EPOCHS >= current_epoch); + + Ok(()) +} + +/// Notify the block service if it should produce a block. +async fn notify_block_production_service( + current_slot: Slot, + block_proposers: &HashSet, + block_service_tx: &mut Sender, + log: &Logger, +) { + if let Err(e) = block_service_tx + .send(BlockServiceNotification { + slot: current_slot, + block_proposers: block_proposers.iter().copied().collect(), + }) + .await + { + error!( + log, + "Failed to notify block service"; + "current_slot" => current_slot, + "error" => %e + ); + }; } diff --git a/validator_client/src/fork_service.rs b/validator_client/src/fork_service.rs index 2a9bf78ef..a69edef26 100644 --- a/validator_client/src/fork_service.rs +++ b/validator_client/src/fork_service.rs @@ -4,12 +4,12 @@ use environment::RuntimeContext; use eth2::types::StateId; use futures::future::FutureExt; use parking_lot::RwLock; -use slog::Logger; use slog::{debug, trace}; +use slog::{error, Logger}; use slot_clock::SlotClock; use std::ops::Deref; use std::sync::Arc; -use tokio::time::{interval_at, Duration, Instant}; +use tokio::time::{sleep, Duration}; use types::{EthSpec, Fork}; /// Delay this period of time after the slot starts. This allows the node to process the new slot. @@ -139,33 +139,31 @@ impl ForkService { /// Starts the service that periodically polls for the `Fork`. pub fn start_update_service(self, context: &RuntimeContext) -> Result<(), String> { - let spec = &context.eth2_config.spec; - - let duration_to_next_epoch = self - .slot_clock - .duration_to_next_epoch(E::slots_per_epoch()) - .ok_or("Unable to determine duration to next epoch")?; - - let mut interval = { - let slot_duration = Duration::from_secs(spec.seconds_per_slot); - // Note: interval_at panics if `slot_duration * E::slots_per_epoch()` = 0 - interval_at( - Instant::now() + duration_to_next_epoch + TIME_DELAY_FROM_SLOT, - slot_duration * E::slots_per_epoch() as u32, - ) - }; - // Run an immediate update before starting the updater service. context .executor .spawn(self.clone().do_update().map(|_| ()), "fork service update"); let executor = context.executor.clone(); + let log = context.log().clone(); + let spec = E::default_spec(); let interval_fut = async move { loop { - interval.tick().await; + // Run this poll before the wait, this should hopefully download the fork before the + // other services need them. self.clone().do_update().await.ok(); + + if let Some(duration_to_next_epoch) = + self.slot_clock.duration_to_next_epoch(E::slots_per_epoch()) + { + sleep(duration_to_next_epoch + TIME_DELAY_FROM_SLOT).await; + } else { + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(Duration::from_secs(spec.seconds_per_slot)).await; + continue; + } } }; diff --git a/validator_client/src/graffiti_file.rs b/validator_client/src/graffiti_file.rs index a16646d2f..75b75b148 100644 --- a/validator_client/src/graffiti_file.rs +++ b/validator_client/src/graffiti_file.rs @@ -5,7 +5,7 @@ use std::io::{prelude::*, BufReader}; use std::path::PathBuf; use std::str::FromStr; -use bls::blst_implementations::PublicKey; +use bls::blst_implementations::PublicKeyBytes; use types::{graffiti::GraffitiString, Graffiti}; #[derive(Debug)] @@ -26,7 +26,7 @@ pub enum Error { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GraffitiFile { graffiti_path: PathBuf, - graffitis: HashMap, + graffitis: HashMap, default: Option, } @@ -44,7 +44,10 @@ impl GraffitiFile { /// default graffiti. /// /// Returns an error if loading from the graffiti file fails. - pub fn load_graffiti(&mut self, public_key: &PublicKey) -> Result, Error> { + pub fn load_graffiti( + &mut self, + public_key: &PublicKeyBytes, + ) -> Result, Error> { self.read_graffiti_file()?; Ok(self.graffitis.get(public_key).copied().or(self.default)) } @@ -78,7 +81,7 @@ impl GraffitiFile { /// `Ok((None, graffiti))` represents the graffiti for the default key. /// `Ok((Some(pk), graffiti))` represents graffiti for the public key `pk`. /// Returns an error if the line is in the wrong format or does not contain a valid public key or graffiti. -fn read_line(line: &str) -> Result<(Option, Graffiti), Error> { +fn read_line(line: &str) -> Result<(Option, Graffiti), Error> { if let Some(i) = line.find(':') { let (key, value) = line.split_at(i); // Note: `value.len() >=1` so `value[1..]` is safe @@ -88,7 +91,7 @@ fn read_line(line: &str) -> Result<(Option, Graffiti), Error> { if key == "default" { Ok((None, graffiti)) } else { - let pk = PublicKey::from_str(&key).map_err(Error::InvalidPublicKey)?; + let pk = PublicKeyBytes::from_str(&key).map_err(Error::InvalidPublicKey)?; Ok((Some(pk), graffiti)) } } else { @@ -114,9 +117,9 @@ mod tests { // Create a graffiti file in the required format and return a path to the file. fn create_graffiti_file() -> PathBuf { let temp = TempDir::new().unwrap(); - let pk1 = PublicKey::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKey::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - let pk3 = PublicKey::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); + let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); + let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); + let pk3 = PublicKeyBytes::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); let file_name = temp.into_path().join("graffiti.txt"); @@ -143,9 +146,9 @@ mod tests { let graffiti_file_path = create_graffiti_file(); let mut gf = GraffitiFile::new(graffiti_file_path); - let pk1 = PublicKey::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); - let pk2 = PublicKey::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); - let pk3 = PublicKey::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); + let pk1 = PublicKeyBytes::deserialize(&hex::decode(&PK1[2..]).unwrap()).unwrap(); + let pk2 = PublicKeyBytes::deserialize(&hex::decode(&PK2[2..]).unwrap()).unwrap(); + let pk3 = PublicKeyBytes::deserialize(&hex::decode(&PK3[2..]).unwrap()).unwrap(); // Read once gf.read_graffiti_file().unwrap(); @@ -165,7 +168,7 @@ mod tests { ); // Random pk should return the default graffiti - let random_pk = Keypair::random().pk; + let random_pk = Keypair::random().pk.compress(); assert_eq!( gf.load_graffiti(&random_pk).unwrap().unwrap(), GraffitiString::from_str(DEFAULT_GRAFFITI).unwrap().into() diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index 5a6b993ab..31a5efd3c 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -13,6 +13,13 @@ pub const ATTESTATIONS: &str = "attestations"; pub const AGGREGATES: &str = "aggregates"; pub const CURRENT_EPOCH: &str = "current_epoch"; pub const NEXT_EPOCH: &str = "next_epoch"; +pub const UPDATE_INDICES: &str = "update_indices"; +pub const UPDATE_ATTESTERS_CURRENT_EPOCH: &str = "update_attesters_current_epoch"; +pub const UPDATE_ATTESTERS_NEXT_EPOCH: &str = "update_attesters_next_epoch"; +pub const UPDATE_ATTESTERS_FETCH: &str = "update_attesters_fetch"; +pub const UPDATE_ATTESTERS_STORE: &str = "update_attesters_store"; +pub const UPDATE_PROPOSERS: &str = "update_proposers"; +pub const SUBSCRIPTIONS: &str = "subscriptions"; pub use lighthouse_metrics::*; @@ -84,6 +91,10 @@ lazy_static::lazy_static! { "Number of attesters on this host", &["task"] ); + pub static ref PROPOSAL_CHANGED: Result = try_create_int_counter( + "vc_beacon_block_proposal_changed", + "A duties update discovered a new block proposer for the current slot", + ); /* * Endpoint metrics */ diff --git a/validator_client/src/http_metrics/mod.rs b/validator_client/src/http_metrics/mod.rs index 1e1c63640..bb80e20f4 100644 --- a/validator_client/src/http_metrics/mod.rs +++ b/validator_client/src/http_metrics/mod.rs @@ -36,7 +36,7 @@ impl From for Error { /// Contains objects which have shared access from inside/outside of the metrics server. pub struct Shared { pub validator_store: Option>, - pub duties_service: Option>, + pub duties_service: Option>>, pub genesis_time: Option, } diff --git a/validator_client/src/initialized_validators.rs b/validator_client/src/initialized_validators.rs index 731a0b1a9..1475cd995 100644 --- a/validator_client/src/initialized_validators.rs +++ b/validator_client/src/initialized_validators.rs @@ -20,7 +20,7 @@ use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io; use std::path::PathBuf; -use types::{Graffiti, Keypair, PublicKey}; +use types::{Graffiti, Keypair, PublicKey, PublicKeyBytes}; use crate::key_cache; use crate::key_cache::KeyCache; @@ -284,7 +284,7 @@ pub struct InitializedValidators { /// The directory that the `self.definitions` will be saved into. validators_dir: PathBuf, /// The canonical set of validators. - validators: HashMap, + validators: HashMap, /// For logging via `slog`. log: Logger, } @@ -317,13 +317,13 @@ impl InitializedValidators { } /// Iterate through all **enabled** voting public keys in `self`. - pub fn iter_voting_pubkeys(&self) -> impl Iterator { + pub fn iter_voting_pubkeys(&self) -> impl Iterator { self.validators.iter().map(|(pubkey, _)| pubkey) } /// Returns the voting `Keypair` for a given voting `PublicKey`, if that validator is known to /// `self` **and** the validator is enabled. - pub fn voting_keypair(&self, voting_public_key: &PublicKey) -> Option<&Keypair> { + pub fn voting_keypair(&self, voting_public_key: &PublicKeyBytes) -> Option<&Keypair> { self.validators .get(voting_public_key) .map(|v| v.voting_keypair()) @@ -366,7 +366,7 @@ impl InitializedValidators { } /// Returns the `graffiti` for a given public key specified in the `ValidatorDefinitions`. - pub fn graffiti(&self, public_key: &PublicKey) -> Option { + pub fn graffiti(&self, public_key: &PublicKeyBytes) -> Option { self.validators.get(public_key).and_then(|v| v.graffiti) } @@ -513,7 +513,9 @@ impl InitializedValidators { voting_keystore_path, .. } => { - if self.validators.contains_key(&def.voting_public_key) { + let pubkey_bytes = def.voting_public_key.compress(); + + if self.validators.contains_key(&pubkey_bytes) { continue; } @@ -536,7 +538,7 @@ impl InitializedValidators { .map(|l| l.path().to_owned()); self.validators - .insert(init.voting_public_key().clone(), init); + .insert(init.voting_public_key().compress(), init); info!( self.log, "Enabled validator"; @@ -569,7 +571,7 @@ impl InitializedValidators { } } } else { - self.validators.remove(&def.voting_public_key); + self.validators.remove(&def.voting_public_key.compress()); match &def.signing_definition { SigningDefinition::LocalKeystore { voting_keystore_path, diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index fb3e81e33..1541fce27 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -11,7 +11,6 @@ mod http_metrics; mod initialized_validators; mod key_cache; mod notifier; -mod validator_duty; mod validator_store; pub mod http_api; @@ -26,12 +25,11 @@ use account_utils::validator_definitions::ValidatorDefinitions; use attestation_service::{AttestationService, AttestationServiceBuilder}; use block_service::{BlockService, BlockServiceBuilder}; use clap::ArgMatches; -use duties_service::{DutiesService, DutiesServiceBuilder}; +use duties_service::DutiesService; use environment::RuntimeContext; use eth2::types::StateId; use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, StatusCode, Url}; use fork_service::{ForkService, ForkServiceBuilder}; -use futures::channel::mpsc; use http_api::ApiSecret; use initialized_validators::InitializedValidators; use notifier::spawn_notifier; @@ -44,7 +42,10 @@ use std::marker::PhantomData; use std::net::SocketAddr; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::time::{sleep, Duration}; +use tokio::{ + sync::mpsc, + time::{sleep, Duration}, +}; use types::{EthSpec, Fork, Hash256}; use validator_store::ValidatorStore; @@ -60,7 +61,7 @@ const HTTP_TIMEOUT: Duration = Duration::from_secs(12); #[derive(Clone)] pub struct ProductionValidatorClient { context: RuntimeContext, - duties_service: DutiesService, + duties_service: Arc>, fork_service: ForkService, block_service: BlockService, attestation_service: AttestationService, @@ -285,13 +286,22 @@ impl ProductionValidatorClient { validator_store.prune_slashing_protection_db(slot.epoch(T::slots_per_epoch()), true); } - let duties_service = DutiesServiceBuilder::new() - .slot_clock(slot_clock.clone()) - .validator_store(validator_store.clone()) - .beacon_nodes(beacon_nodes.clone()) - .allow_unsynced_beacon_node(config.allow_unsynced_beacon_node) - .runtime_context(context.service_context("duties".into())) - .build()?; + let duties_context = context.service_context("duties".into()); + let duties_service = Arc::new(DutiesService { + attesters: <_>::default(), + proposers: <_>::default(), + indices: <_>::default(), + slot_clock: slot_clock.clone(), + beacon_nodes: beacon_nodes.clone(), + validator_store: validator_store.clone(), + require_synced: if config.allow_unsynced_beacon_node { + RequireSynced::Yes + } else { + RequireSynced::No + }, + spec: context.eth2_config.spec.clone(), + context: duties_context, + }); // Update the metrics server. if let Some(ctx) = &http_metrics_ctx { @@ -343,13 +353,7 @@ impl ProductionValidatorClient { let (block_service_tx, block_service_rx) = mpsc::channel(channel_capacity); let log = self.context.log(); - self.duties_service - .clone() - .start_update_service( - block_service_tx, - Arc::new(self.context.eth2_config.spec.clone()), - ) - .map_err(|e| format!("Unable to start duties service: {}", e))?; + duties_service::start_update_service(self.duties_service.clone(), block_service_tx); self.fork_service .clone() diff --git a/validator_client/src/notifier.rs b/validator_client/src/notifier.rs index beaa5b697..8b6d523cc 100644 --- a/validator_client/src/notifier.rs +++ b/validator_client/src/notifier.rs @@ -1,7 +1,7 @@ -use crate::ProductionValidatorClient; -use slog::{error, info}; +use crate::{DutiesService, ProductionValidatorClient}; +use slog::{error, info, Logger}; use slot_clock::SlotClock; -use tokio::time::{interval_at, Duration, Instant}; +use tokio::time::{sleep, Duration}; use types::EthSpec; /// Spawns a notifier service which periodically logs information about the node. @@ -11,86 +11,19 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu let duties_service = client.duties_service.clone(); let slot_duration = Duration::from_secs(context.eth2_config.spec.seconds_per_slot); - let duration_to_next_slot = duties_service - .slot_clock - .duration_to_next_slot() - .ok_or("slot_notifier unable to determine time to next slot")?; - - // Run the notifier half way through each slot. - let start_instant = Instant::now() + duration_to_next_slot + (slot_duration / 2); - let mut interval = interval_at(start_instant, slot_duration); let interval_fut = async move { let log = context.log(); loop { - interval.tick().await; - let num_available = duties_service.beacon_nodes.num_available().await; - let num_synced = duties_service.beacon_nodes.num_synced().await; - let num_total = duties_service.beacon_nodes.num_total().await; - if num_synced > 0 { - info!( - log, - "Connected to beacon node(s)"; - "total" => num_total, - "available" => num_available, - "synced" => num_synced, - ) + if let Some(duration_to_next_slot) = duties_service.slot_clock.duration_to_next_slot() { + sleep(duration_to_next_slot + slot_duration / 2).await; + notify(&duties_service, &log).await; } else { - error!( - log, - "No synced beacon nodes"; - "total" => num_total, - "available" => num_available, - "synced" => num_synced, - ) - } - - if let Some(slot) = duties_service.slot_clock.now() { - let epoch = slot.epoch(T::slots_per_epoch()); - - let total_validators = duties_service.total_validator_count(); - let proposing_validators = duties_service.proposer_count(epoch); - let attesting_validators = duties_service.attester_count(epoch); - - if total_validators == 0 { - info!( - log, - "No validators present"; - "msg" => "see `lighthouse account validator create --help` \ - or the HTTP API documentation" - ) - } else if total_validators == attesting_validators { - info!( - log, - "All validators active"; - "proposers" => proposing_validators, - "active_validators" => attesting_validators, - "total_validators" => total_validators, - "epoch" => format!("{}", epoch), - "slot" => format!("{}", slot), - ); - } else if attesting_validators > 0 { - info!( - log, - "Some validators active"; - "proposers" => proposing_validators, - "active_validators" => attesting_validators, - "total_validators" => total_validators, - "epoch" => format!("{}", epoch), - "slot" => format!("{}", slot), - ); - } else { - info!( - log, - "Awaiting activation"; - "validators" => total_validators, - "epoch" => format!("{}", epoch), - "slot" => format!("{}", slot), - ); - } - } else { - error!(log, "Unable to read slot clock"); + error!(log, "Failed to read slot clock"); + // If we can't read the slot clock, just wait another slot. + sleep(slot_duration).await; + continue; } } }; @@ -98,3 +31,77 @@ pub fn spawn_notifier(client: &ProductionValidatorClient) -> Resu executor.spawn(interval_fut, "validator_notifier"); Ok(()) } + +/// Performs a single notification routine. +async fn notify( + duties_service: &DutiesService, + log: &Logger, +) { + let num_available = duties_service.beacon_nodes.num_available().await; + let num_synced = duties_service.beacon_nodes.num_synced().await; + let num_total = duties_service.beacon_nodes.num_total().await; + if num_synced > 0 { + info!( + log, + "Connected to beacon node(s)"; + "total" => num_total, + "available" => num_available, + "synced" => num_synced, + ) + } else { + error!( + log, + "No synced beacon nodes"; + "total" => num_total, + "available" => num_available, + "synced" => num_synced, + ) + } + + if let Some(slot) = duties_service.slot_clock.now() { + let epoch = slot.epoch(E::slots_per_epoch()); + + let total_validators = duties_service.total_validator_count(); + let proposing_validators = duties_service.proposer_count(epoch); + let attesting_validators = duties_service.attester_count(epoch); + + if total_validators == 0 { + info!( + log, + "No validators present"; + "msg" => "see `lighthouse account validator create --help` \ + or the HTTP API documentation" + ) + } else if total_validators == attesting_validators { + info!( + log, + "All validators active"; + "proposers" => proposing_validators, + "active_validators" => attesting_validators, + "total_validators" => total_validators, + "epoch" => format!("{}", epoch), + "slot" => format!("{}", slot), + ); + } else if attesting_validators > 0 { + info!( + log, + "Some validators active"; + "proposers" => proposing_validators, + "active_validators" => attesting_validators, + "total_validators" => total_validators, + "epoch" => format!("{}", epoch), + "slot" => format!("{}", slot), + ); + } else { + info!( + log, + "Awaiting activation"; + "validators" => total_validators, + "epoch" => format!("{}", epoch), + "slot" => format!("{}", slot), + ); + } + } else { + error!(log, "Unable to read slot clock"); + } +} diff --git a/validator_client/src/validator_duty.rs b/validator_client/src/validator_duty.rs deleted file mode 100644 index b87a9dbc3..000000000 --- a/validator_client/src/validator_duty.rs +++ /dev/null @@ -1,188 +0,0 @@ -use eth2::{ - types::{BeaconCommitteeSubscription, StateId, ValidatorId}, - BeaconNodeHttpClient, -}; -use serde::{Deserialize, Serialize}; -use slog::{error, Logger}; -use std::collections::HashMap; -use types::{CommitteeIndex, Epoch, PublicKey, PublicKeyBytes, Slot}; - -/// This struct is being used as a shim since we deprecated the `rest_api` in favour of `http_api`. -/// -/// Tracking issue: https://github.com/sigp/lighthouse/issues/1643 -// NOTE: if you add or remove fields, please adjust `eq_ignoring_proposal_slots` -#[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] -pub struct ValidatorDuty { - /// The validator's BLS public key, uniquely identifying them. - pub validator_pubkey: PublicKey, - /// The validator's index in `state.validators` - pub validator_index: Option, - /// The slot at which the validator must attest. - pub attestation_slot: Option, - /// The index of the committee within `slot` of which the validator is a member. - pub attestation_committee_index: Option, - /// The position of the validator in the committee. - pub attestation_committee_position: Option, - /// The committee count at `attestation_slot`. - pub committee_count_at_slot: Option, - /// The number of validators in the committee. - pub committee_length: Option, - /// The slots in which a validator must propose a block (can be empty). - /// - /// Should be set to `None` when duties are not yet known (before the current epoch). - pub block_proposal_slots: Option>, -} - -impl ValidatorDuty { - /// Instantiate `Self` as if there are no known dutes for `validator_pubkey`. - fn no_duties(validator_pubkey: PublicKey, validator_index: Option) -> Self { - ValidatorDuty { - validator_pubkey, - validator_index, - attestation_slot: None, - attestation_committee_index: None, - attestation_committee_position: None, - committee_count_at_slot: None, - committee_length: None, - block_proposal_slots: None, - } - } - - /// Instantiate `Self` by performing requests on the `beacon_node`. - /// - /// Will only request proposer duties if `current_epoch == request_epoch`. - pub async fn download( - beacon_node: &BeaconNodeHttpClient, - current_epoch: Epoch, - request_epoch: Epoch, - mut pubkeys: Vec<(PublicKey, Option)>, - log: &Logger, - ) -> Result, String> { - for (pubkey, index_opt) in &mut pubkeys { - if index_opt.is_none() { - *index_opt = beacon_node - .get_beacon_states_validator_id( - StateId::Head, - &ValidatorId::PublicKey(PublicKeyBytes::from(&*pubkey)), - ) - .await - .map_err(|e| { - error!( - log, - "Failed to obtain validator index"; - "pubkey" => ?pubkey, - "error" => ?e - ) - }) - // Supress the error since we've already logged an error and we don't want to - // stop the rest of the code. - .ok() - .and_then(|body_opt| body_opt.map(|body| body.data.index)); - } - } - - // Query for all block proposer duties in the current epoch and map the response by index. - let proposal_slots_by_index: HashMap> = if current_epoch == request_epoch { - beacon_node - .get_validator_duties_proposer(current_epoch) - .await - .map(|resp| resp.data) - // Exit early if there's an error. - .map_err(|e| format!("Failed to get proposer indices: {:?}", e))? - .into_iter() - .fold( - HashMap::with_capacity(pubkeys.len()), - |mut map, proposer_data| { - map.entry(proposer_data.validator_index) - .or_insert_with(Vec::new) - .push(proposer_data.slot); - map - }, - ) - } else { - HashMap::new() - }; - - let query_indices = pubkeys - .iter() - .filter_map(|(_, index_opt)| *index_opt) - .collect::>(); - let attester_data_map = beacon_node - .post_validator_duties_attester(request_epoch, query_indices.as_slice()) - .await - .map(|resp| resp.data) - // Exit early if there's an error. - .map_err(|e| format!("Failed to get attester duties: {:?}", e))? - .into_iter() - .fold( - HashMap::with_capacity(pubkeys.len()), - |mut map, attester_data| { - map.insert(attester_data.validator_index, attester_data); - map - }, - ); - - let duties = pubkeys - .into_iter() - .map(|(pubkey, index_opt)| { - if let Some(index) = index_opt { - if let Some(attester_data) = attester_data_map.get(&index) { - match attester_data.pubkey.decompress() { - Ok(pubkey) => ValidatorDuty { - validator_pubkey: pubkey, - validator_index: Some(attester_data.validator_index), - attestation_slot: Some(attester_data.slot), - attestation_committee_index: Some(attester_data.committee_index), - attestation_committee_position: Some( - attester_data.validator_committee_index as usize, - ), - committee_count_at_slot: Some(attester_data.committees_at_slot), - committee_length: Some(attester_data.committee_length), - block_proposal_slots: proposal_slots_by_index - .get(&attester_data.validator_index) - .cloned(), - }, - Err(e) => { - error!( - log, - "Could not deserialize validator public key"; - "error" => format!("{:?}", e), - "validator_index" => attester_data.validator_index - ); - Self::no_duties(pubkey, Some(index)) - } - } - } else { - Self::no_duties(pubkey, Some(index)) - } - } else { - Self::no_duties(pubkey, None) - } - }) - .collect(); - - Ok(duties) - } - - /// Return `true` if these validator duties are equal, ignoring their `block_proposal_slots`. - pub fn eq_ignoring_proposal_slots(&self, other: &Self) -> bool { - self.validator_pubkey == other.validator_pubkey - && self.validator_index == other.validator_index - && self.attestation_slot == other.attestation_slot - && self.attestation_committee_index == other.attestation_committee_index - && self.attestation_committee_position == other.attestation_committee_position - && self.committee_count_at_slot == other.committee_count_at_slot - && self.committee_length == other.committee_length - } - - /// Generate a subscription for `self`, if `self` has appropriate attestation duties. - pub fn subscription(&self, is_aggregator: bool) -> Option { - Some(BeaconCommitteeSubscription { - validator_index: self.validator_index?, - committee_index: self.attestation_committee_index?, - committees_at_slot: self.committee_count_at_slot?, - slot: self.attestation_slot?, - is_aggregator, - }) - } -} diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index c23cc1ba5..760f4ce49 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use tempfile::TempDir; use types::{ graffiti::GraffitiString, Attestation, BeaconBlock, ChainSpec, Domain, Epoch, EthSpec, Fork, - Graffiti, Hash256, Keypair, PublicKey, SelectionProof, Signature, SignedAggregateAndProof, + Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedRoot, Slot, }; use validator_dir::ValidatorDir; @@ -106,7 +106,7 @@ impl ValidatorStore { .map_err(|e| format!("failed to create validator definitions: {:?}", e))?; self.slashing_protection - .register_validator(&validator_def.voting_public_key) + .register_validator(validator_def.voting_public_key.compress()) .map_err(|e| format!("failed to register validator: {:?}", e))?; validator_def.enabled = enable; @@ -120,7 +120,7 @@ impl ValidatorStore { Ok(validator_def) } - pub fn voting_pubkeys(&self) -> Vec { + pub fn voting_pubkeys(&self) -> Vec { self.validators .read() .iter_voting_pubkeys() @@ -136,7 +136,11 @@ impl ValidatorStore { self.fork_service.fork() } - pub fn randao_reveal(&self, validator_pubkey: &PublicKey, epoch: Epoch) -> Option { + pub fn randao_reveal( + &self, + validator_pubkey: &PublicKeyBytes, + epoch: Epoch, + ) -> Option { self.validators .read() .voting_keypair(validator_pubkey) @@ -153,13 +157,13 @@ impl ValidatorStore { }) } - pub fn graffiti(&self, validator_pubkey: &PublicKey) -> Option { + pub fn graffiti(&self, validator_pubkey: &PublicKeyBytes) -> Option { self.validators.read().graffiti(validator_pubkey) } pub fn sign_block( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, block: BeaconBlock, current_slot: Slot, ) -> Option> { @@ -236,7 +240,7 @@ impl ValidatorStore { pub fn sign_attestation( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, validator_committee_position: usize, attestation: &mut Attestation, current_epoch: Epoch, @@ -334,7 +338,7 @@ impl ValidatorStore { /// modified by actors other than the signing validator. pub fn produce_signed_aggregate_and_proof( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, validator_index: u64, aggregate: Attestation, selection_proof: SelectionProof, @@ -359,7 +363,7 @@ impl ValidatorStore { /// `validator_pubkey`. pub fn produce_selection_proof( &self, - validator_pubkey: &PublicKey, + validator_pubkey: &PublicKeyBytes, slot: Slot, ) -> Option { let validators = self.validators.read();