2021-03-17 05:09:57 +00:00
|
|
|
//! The `DutiesService` contains the attester/proposer duties for all local validators.
|
|
|
|
//!
|
|
|
|
//! It learns of the local validator via the `crate::ValidatorStore` struct. It keeps the duties
|
|
|
|
//! up-to-date by polling the beacon node on regular intervals.
|
|
|
|
//!
|
|
|
|
//! The `DutiesService` is also responsible for sending events to the `BlockService` which trigger
|
|
|
|
//! block production.
|
|
|
|
|
2021-08-06 00:47:31 +00:00
|
|
|
mod sync;
|
|
|
|
|
2022-08-29 11:35:59 +00:00
|
|
|
use crate::beacon_node_fallback::{BeaconNodeFallback, OfflineOnFailure, RequireSynced};
|
2020-07-07 04:03:21 +00:00
|
|
|
use crate::{
|
2021-07-31 03:50:52 +00:00
|
|
|
block_service::BlockServiceNotification,
|
|
|
|
http_metrics::metrics,
|
|
|
|
validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore},
|
2020-07-07 04:03:21 +00:00
|
|
|
};
|
2019-11-25 04:48:24 +00:00
|
|
|
use environment::RuntimeContext;
|
2021-03-17 05:09:57 +00:00
|
|
|
use eth2::types::{AttesterData, BeaconCommitteeSubscription, ProposerData, StateId, ValidatorId};
|
2021-09-16 03:26:33 +00:00
|
|
|
use futures::future::join_all;
|
2019-11-25 04:48:24 +00:00
|
|
|
use parking_lot::RwLock;
|
2021-03-17 05:09:57 +00:00
|
|
|
use safe_arith::ArithError;
|
|
|
|
use slog::{debug, error, info, warn, Logger};
|
2019-11-25 04:48:24 +00:00
|
|
|
use slot_clock::SlotClock;
|
2021-03-17 05:09:57 +00:00
|
|
|
use std::collections::{HashMap, HashSet};
|
2019-11-25 04:48:24 +00:00
|
|
|
use std::sync::Arc;
|
2021-08-06 00:47:31 +00:00
|
|
|
use sync::poll_sync_committee_duties;
|
|
|
|
use sync::SyncDutiesMap;
|
2021-03-17 05:09:57 +00:00
|
|
|
use tokio::{sync::mpsc::Sender, time::sleep};
|
|
|
|
use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot};
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Since the BN does not like it when we subscribe to slots that are close to the current time, we
|
|
|
|
/// will only subscribe to slots which are further than `SUBSCRIPTION_BUFFER_SLOTS` away.
|
|
|
|
///
|
|
|
|
/// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the
|
|
|
|
/// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid
|
|
|
|
/// bringing in the entire crate.
|
|
|
|
const SUBSCRIPTION_BUFFER_SLOTS: u64 = 2;
|
|
|
|
|
|
|
|
/// Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch.
|
|
|
|
const HISTORICAL_DUTIES_EPOCHS: u64 = 2;
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
UnableToReadSlotClock,
|
|
|
|
FailedToDownloadAttesters(String),
|
2021-07-31 03:50:52 +00:00
|
|
|
FailedToProduceSelectionProof(ValidatorStoreError),
|
2021-03-17 05:09:57 +00:00
|
|
|
InvalidModulo(ArithError),
|
2021-08-06 00:47:31 +00:00
|
|
|
Arith(ArithError),
|
|
|
|
SyncDutiesNotFound(u64),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<ArithError> for Error {
|
|
|
|
fn from(e: ArithError) -> Self {
|
|
|
|
Self::Arith(e)
|
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
Initial work towards v0.2.0 (#924)
* Remove ping protocol
* Initial renaming of network services
* Correct rebasing relative to latest master
* Start updating types
* Adds HashMapDelay struct to utils
* Initial network restructure
* Network restructure. Adds new types for v0.2.0
* Removes build artefacts
* Shift validation to beacon chain
* Temporarily remove gossip validation
This is to be updated to match current optimisation efforts.
* Adds AggregateAndProof
* Begin rebuilding pubsub encoding/decoding
* Signature hacking
* Shift gossipsup decoding into eth2_libp2p
* Existing EF tests passing with fake_crypto
* Shifts block encoding/decoding into RPC
* Delete outdated API spec
* All release tests passing bar genesis state parsing
* Update and test YamlConfig
* Update to spec v0.10 compatible BLS
* Updates to BLS EF tests
* Add EF test for AggregateVerify
And delete unused hash2curve tests for uncompressed points
* Update EF tests to v0.10.1
* Use optional block root correctly in block proc
* Use genesis fork in deposit domain. All tests pass
* Fast aggregate verify test
* Update REST API docs
* Fix unused import
* Bump spec tags to v0.10.1
* Add `seconds_per_eth1_block` to chainspec
* Update to timestamp based eth1 voting scheme
* Return None from `get_votes_to_consider` if block cache is empty
* Handle overflows in `is_candidate_block`
* Revert to failing tests
* Fix eth1 data sets test
* Choose default vote according to spec
* Fix collect_valid_votes tests
* Fix `get_votes_to_consider` to choose all eligible blocks
* Uncomment winning_vote tests
* Add comments; remove unused code
* Reduce seconds_per_eth1_block for simulation
* Addressed review comments
* Add test for default vote case
* Fix logs
* Remove unused functions
* Meter default eth1 votes
* Fix comments
* Progress on attestation service
* Address review comments; remove unused dependency
* Initial work on removing libp2p lock
* Add LRU caches to store (rollup)
* Update attestation validation for DB changes (WIP)
* Initial version of should_forward_block
* Scaffold
* Progress on attestation validation
Also, consolidate prod+testing slot clocks so that they share much
of the same implementation and can both handle sub-slot time changes.
* Removes lock from libp2p service
* Completed network lock removal
* Finish(?) attestation processing
* Correct network termination future
* Add slot check to block check
* Correct fmt issues
* Remove Drop implementation for network service
* Add first attempt at attestation proc. re-write
* Add version 2 of attestation processing
* Minor fixes
* Add validator pubkey cache
* Make get_indexed_attestation take a committee
* Link signature processing into new attn verification
* First working version
* Ensure pubkey cache is updated
* Add more metrics, slight optimizations
* Clone committee cache during attestation processing
* Update shuffling cache during block processing
* Remove old commented-out code
* Fix shuffling cache insert bug
* Used indexed attestation in fork choice
* Restructure attn processing, add metrics
* Add more detailed metrics
* Tidy, fix failing tests
* Fix failing tests, tidy
* Address reviewers suggestions
* Disable/delete two outdated tests
* Modification of validator for subscriptions
* Add slot signing to validator client
* Further progress on validation subscription
* Adds necessary validator subscription functionality
* Add new Pubkeys struct to signature_sets
* Refactor with functional approach
* Update beacon chain
* Clean up validator <-> beacon node http types
* Add aggregator status to ValidatorDuty
* Impl Clone for manual slot clock
* Fix minor errors
* Further progress validator client subscription
* Initial subscription and aggregation handling
* Remove decompressed member from pubkey bytes
* Progress to modifying val client for attestation aggregation
* First draft of validator client upgrade for aggregate attestations
* Add hashmap for indices lookup
* Add state cache, remove store cache
* Only build the head committee cache
* Removes lock on a network channel
* Partially implement beacon node subscription http api
* Correct compilation issues
* Change `get_attesting_indices` to use Vec
* Fix failing test
* Partial implementation of timer
* Adds timer, removes exit_future, http api to op pool
* Partial multiple aggregate attestation handling
* Permits bulk messages accross gossipsub network channel
* Correct compile issues
* Improve gosispsub messaging and correct rest api helpers
* Added global gossipsub subscriptions
* Update validator subscriptions data structs
* Tidy
* Re-structure validator subscriptions
* Initial handling of subscriptions
* Re-structure network service
* Add pubkey cache persistence file
* Add more comments
* Integrate persistence file into builder
* Add pubkey cache tests
* Add HashSetDelay and introduce into attestation service
* Handles validator subscriptions
* Add data_dir to beacon chain builder
* Remove Option in pubkey cache persistence file
* Ensure consistency between datadir/data_dir
* Fix failing network test
* Peer subnet discovery gets queued for future subscriptions
* Reorganise attestation service functions
* Initial wiring of attestation service
* First draft of attestation service timing logic
* Correct minor typos
* Tidy
* Fix todos
* Improve tests
* Add PeerInfo to connected peers mapping
* Fix compile error
* Fix compile error from merge
* Split up block processing metrics
* Tidy
* Refactor get_pubkey_from_state
* Remove commented-out code
* Rename state_cache -> checkpoint_cache
* Rename Checkpoint -> Snapshot
* Tidy, add comments
* Tidy up find_head function
* Change some checkpoint -> snapshot
* Add tests
* Expose max_len
* Remove dead code
* Tidy
* Fix bug
* Add sync-speed metric
* Add first attempt at VerifiableBlock
* Start integrating into beacon chain
* Integrate VerifiableBlock
* Rename VerifableBlock -> PartialBlockVerification
* Add start of typed methods
* Add progress
* Add further progress
* Rename structs
* Add full block verification to block_processing.rs
* Further beacon chain integration
* Update checks for gossip
* Add todo
* Start adding segement verification
* Add passing chain segement test
* Initial integration with batch sync
* Minor changes
* Tidy, add more error checking
* Start adding chain_segment tests
* Finish invalid signature tests
* Include single and gossip verified blocks in tests
* Add gossip verification tests
* Start adding docs
* Finish adding comments to block_processing.rs
* Rename block_processing.rs -> block_verification
* Start removing old block processing code
* Fixes beacon_chain compilation
* Fix project-wide compile errors
* Remove old code
* Correct code to pass all tests
* Fix bug with beacon proposer index
* Fix shim for BlockProcessingError
* Only process one epoch at a time
* Fix loop in chain segment processing
* Correct tests from master merge
* Add caching for state.eth1_data_votes
* Add BeaconChain::validator_pubkey
* Revert "Add caching for state.eth1_data_votes"
This reverts commit cd73dcd6434fb8d8e6bf30c5356355598ea7b78e.
Co-authored-by: Grant Wuerker <gwuerker@gmail.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-03-17 06:24:44 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Neatly joins the server-generated `AttesterData` with the locally-generated `selection_proof`.
|
|
|
|
#[derive(Clone)]
|
2020-05-06 11:42:56 +00:00
|
|
|
pub struct DutyAndProof {
|
2021-03-17 05:09:57 +00:00
|
|
|
pub duty: AttesterData,
|
|
|
|
/// This value is only set to `Some` if the proof indicates that the validator is an aggregator.
|
2020-05-06 11:42:56 +00:00
|
|
|
pub selection_proof: Option<SelectionProof>,
|
2020-03-25 10:14:05 +00:00
|
|
|
}
|
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
impl DutyAndProof {
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Instantiate `Self`, computing the selection proof as well.
|
2021-09-16 03:26:33 +00:00
|
|
|
pub async fn new<T: SlotClock + 'static, E: EthSpec>(
|
2021-03-17 05:09:57 +00:00
|
|
|
duty: AttesterData,
|
2020-05-06 11:42:56 +00:00
|
|
|
validator_store: &ValidatorStore<T, E>,
|
2020-09-29 03:46:54 +00:00
|
|
|
spec: &ChainSpec,
|
2021-03-17 05:09:57 +00:00
|
|
|
) -> Result<Self, Error> {
|
2020-05-06 11:42:56 +00:00
|
|
|
let selection_proof = validator_store
|
2021-07-31 03:50:52 +00:00
|
|
|
.produce_selection_proof(duty.pubkey, duty.slot)
|
2021-09-16 03:26:33 +00:00
|
|
|
.await
|
2021-07-31 03:50:52 +00:00
|
|
|
.map_err(Error::FailedToProduceSelectionProof)?;
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
let selection_proof = selection_proof
|
|
|
|
.is_aggregator(duty.committee_length as usize, spec)
|
|
|
|
.map_err(Error::InvalidModulo)
|
2020-05-06 11:42:56 +00:00
|
|
|
.map(|is_aggregator| {
|
|
|
|
if is_aggregator {
|
|
|
|
Some(selection_proof)
|
|
|
|
} else {
|
2021-03-17 05:09:57 +00:00
|
|
|
// Don't bother storing the selection proof if the validator isn't an
|
|
|
|
// aggregator, we won't need it.
|
2020-05-06 11:42:56 +00:00
|
|
|
None
|
|
|
|
}
|
|
|
|
})?;
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
Ok(Self {
|
|
|
|
duty,
|
|
|
|
selection_proof,
|
|
|
|
})
|
2020-11-09 23:13:56 +00:00
|
|
|
}
|
2019-12-06 05:44:03 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// To assist with readability, the dependent root for attester/proposer duties.
|
|
|
|
type DependentRoot = Hash256;
|
2019-12-06 05:44:03 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
type AttesterMap = HashMap<PublicKeyBytes, HashMap<Epoch, (DependentRoot, DutyAndProof)>>;
|
|
|
|
type ProposerMap = HashMap<Epoch, (DependentRoot, Vec<ProposerData>)>;
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// See the module-level documentation.
|
|
|
|
pub struct DutiesService<T, E: EthSpec> {
|
|
|
|
/// Maps a validator public key to their duties for each epoch.
|
|
|
|
pub attesters: RwLock<AttesterMap>,
|
|
|
|
/// Maps an epoch to all *local* proposers in this epoch. Notably, this does not contain
|
|
|
|
/// proposals for any validators which are not registered locally.
|
|
|
|
pub proposers: RwLock<ProposerMap>,
|
2021-08-06 00:47:31 +00:00
|
|
|
/// Map from validator index to sync committee duties.
|
|
|
|
pub sync_duties: SyncDutiesMap,
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Provides the canonical list of locally-managed validators.
|
2021-07-31 03:50:52 +00:00
|
|
|
pub validator_store: Arc<ValidatorStore<T, E>>,
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Tracks the current slot.
|
|
|
|
pub slot_clock: T,
|
|
|
|
/// Provides HTTP access to remote beacon nodes.
|
|
|
|
pub beacon_nodes: Arc<BeaconNodeFallback<T, E>>,
|
|
|
|
/// Controls whether or not this function will refuse to interact with non-synced beacon nodes.
|
|
|
|
///
|
|
|
|
/// This functionality is a little redundant since most BNs will likely reject duties when they
|
|
|
|
/// aren't synced, but we keep it around for an emergency.
|
|
|
|
pub require_synced: RequireSynced,
|
|
|
|
pub context: RuntimeContext<E>,
|
|
|
|
pub spec: ChainSpec,
|
2020-05-06 11:42:56 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
|
|
|
|
/// Returns the total number of validators known to the duties service.
|
|
|
|
pub fn total_validator_count(&self) -> usize {
|
|
|
|
self.validator_store.num_voting_validators()
|
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2019-12-09 11:42:36 +00:00
|
|
|
/// Returns the total number of validators that should propose in the given epoch.
|
2021-03-17 05:09:57 +00:00
|
|
|
pub fn proposer_count(&self, epoch: Epoch) -> usize {
|
2021-07-31 03:50:52 +00:00
|
|
|
// Only collect validators that are considered safe in terms of doppelganger protection.
|
|
|
|
let signing_pubkeys: HashSet<_> = self
|
|
|
|
.validator_store
|
|
|
|
.voting_pubkeys(DoppelgangerStatus::only_safe);
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
self.proposers
|
2019-12-09 11:42:36 +00:00
|
|
|
.read()
|
2021-03-17 05:09:57 +00:00
|
|
|
.get(&epoch)
|
2021-07-31 03:50:52 +00:00
|
|
|
.map_or(0, |(_, proposers)| {
|
|
|
|
proposers
|
|
|
|
.iter()
|
|
|
|
.filter(|proposer_data| signing_pubkeys.contains(&proposer_data.pubkey))
|
|
|
|
.count()
|
|
|
|
})
|
2019-12-09 11:42:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the total number of validators that should attest in the given epoch.
|
2021-03-17 05:09:57 +00:00
|
|
|
pub fn attester_count(&self, epoch: Epoch) -> usize {
|
2021-07-31 03:50:52 +00:00
|
|
|
// Only collect validators that are considered safe in terms of doppelganger protection.
|
|
|
|
let signing_pubkeys: HashSet<_> = self
|
|
|
|
.validator_store
|
|
|
|
.voting_pubkeys(DoppelgangerStatus::only_safe);
|
2021-03-17 05:09:57 +00:00
|
|
|
self.attesters
|
2019-12-09 11:42:36 +00:00
|
|
|
.read()
|
|
|
|
.iter()
|
2021-07-31 03:50:52 +00:00
|
|
|
.filter_map(|(_, map)| map.get(&epoch))
|
|
|
|
.map(|(_, duty_and_proof)| duty_and_proof)
|
|
|
|
.filter(|duty_and_proof| signing_pubkeys.contains(&duty_and_proof.duty.pubkey))
|
2019-12-09 11:42:36 +00:00
|
|
|
.count()
|
|
|
|
}
|
|
|
|
|
2021-07-31 03:50:52 +00:00
|
|
|
/// Returns the total number of validators that are in a doppelganger detection period.
|
|
|
|
pub fn doppelganger_detecting_count(&self) -> usize {
|
|
|
|
self.validator_store
|
|
|
|
.voting_pubkeys::<HashSet<_>, _>(DoppelgangerStatus::only_unsafe)
|
|
|
|
.len()
|
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Returns the pubkeys of the validators which are assigned to propose in the given slot.
|
|
|
|
///
|
|
|
|
/// It is possible that multiple validators have an identical proposal slot, however that is
|
|
|
|
/// likely the result of heavy forking (lol) or inconsistent beacon node connections.
|
|
|
|
pub fn block_proposers(&self, slot: Slot) -> HashSet<PublicKeyBytes> {
|
|
|
|
let epoch = slot.epoch(E::slots_per_epoch());
|
|
|
|
|
2021-07-31 03:50:52 +00:00
|
|
|
// Only collect validators that are considered safe in terms of doppelganger protection.
|
|
|
|
let signing_pubkeys: HashSet<_> = self
|
|
|
|
.validator_store
|
|
|
|
.voting_pubkeys(DoppelgangerStatus::only_safe);
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
self.proposers
|
2019-11-25 04:48:24 +00:00
|
|
|
.read()
|
2021-03-17 05:09:57 +00:00
|
|
|
.get(&epoch)
|
|
|
|
.map(|(_, proposers)| {
|
|
|
|
proposers
|
|
|
|
.iter()
|
2021-07-31 03:50:52 +00:00
|
|
|
.filter(|proposer_data| {
|
|
|
|
proposer_data.slot == slot
|
|
|
|
&& signing_pubkeys.contains(&proposer_data.pubkey)
|
|
|
|
})
|
2021-03-17 05:09:57 +00:00
|
|
|
.map(|proposer_data| proposer_data.pubkey)
|
|
|
|
.collect()
|
Initial work towards v0.2.0 (#924)
* Remove ping protocol
* Initial renaming of network services
* Correct rebasing relative to latest master
* Start updating types
* Adds HashMapDelay struct to utils
* Initial network restructure
* Network restructure. Adds new types for v0.2.0
* Removes build artefacts
* Shift validation to beacon chain
* Temporarily remove gossip validation
This is to be updated to match current optimisation efforts.
* Adds AggregateAndProof
* Begin rebuilding pubsub encoding/decoding
* Signature hacking
* Shift gossipsup decoding into eth2_libp2p
* Existing EF tests passing with fake_crypto
* Shifts block encoding/decoding into RPC
* Delete outdated API spec
* All release tests passing bar genesis state parsing
* Update and test YamlConfig
* Update to spec v0.10 compatible BLS
* Updates to BLS EF tests
* Add EF test for AggregateVerify
And delete unused hash2curve tests for uncompressed points
* Update EF tests to v0.10.1
* Use optional block root correctly in block proc
* Use genesis fork in deposit domain. All tests pass
* Fast aggregate verify test
* Update REST API docs
* Fix unused import
* Bump spec tags to v0.10.1
* Add `seconds_per_eth1_block` to chainspec
* Update to timestamp based eth1 voting scheme
* Return None from `get_votes_to_consider` if block cache is empty
* Handle overflows in `is_candidate_block`
* Revert to failing tests
* Fix eth1 data sets test
* Choose default vote according to spec
* Fix collect_valid_votes tests
* Fix `get_votes_to_consider` to choose all eligible blocks
* Uncomment winning_vote tests
* Add comments; remove unused code
* Reduce seconds_per_eth1_block for simulation
* Addressed review comments
* Add test for default vote case
* Fix logs
* Remove unused functions
* Meter default eth1 votes
* Fix comments
* Progress on attestation service
* Address review comments; remove unused dependency
* Initial work on removing libp2p lock
* Add LRU caches to store (rollup)
* Update attestation validation for DB changes (WIP)
* Initial version of should_forward_block
* Scaffold
* Progress on attestation validation
Also, consolidate prod+testing slot clocks so that they share much
of the same implementation and can both handle sub-slot time changes.
* Removes lock from libp2p service
* Completed network lock removal
* Finish(?) attestation processing
* Correct network termination future
* Add slot check to block check
* Correct fmt issues
* Remove Drop implementation for network service
* Add first attempt at attestation proc. re-write
* Add version 2 of attestation processing
* Minor fixes
* Add validator pubkey cache
* Make get_indexed_attestation take a committee
* Link signature processing into new attn verification
* First working version
* Ensure pubkey cache is updated
* Add more metrics, slight optimizations
* Clone committee cache during attestation processing
* Update shuffling cache during block processing
* Remove old commented-out code
* Fix shuffling cache insert bug
* Used indexed attestation in fork choice
* Restructure attn processing, add metrics
* Add more detailed metrics
* Tidy, fix failing tests
* Fix failing tests, tidy
* Address reviewers suggestions
* Disable/delete two outdated tests
* Modification of validator for subscriptions
* Add slot signing to validator client
* Further progress on validation subscription
* Adds necessary validator subscription functionality
* Add new Pubkeys struct to signature_sets
* Refactor with functional approach
* Update beacon chain
* Clean up validator <-> beacon node http types
* Add aggregator status to ValidatorDuty
* Impl Clone for manual slot clock
* Fix minor errors
* Further progress validator client subscription
* Initial subscription and aggregation handling
* Remove decompressed member from pubkey bytes
* Progress to modifying val client for attestation aggregation
* First draft of validator client upgrade for aggregate attestations
* Add hashmap for indices lookup
* Add state cache, remove store cache
* Only build the head committee cache
* Removes lock on a network channel
* Partially implement beacon node subscription http api
* Correct compilation issues
* Change `get_attesting_indices` to use Vec
* Fix failing test
* Partial implementation of timer
* Adds timer, removes exit_future, http api to op pool
* Partial multiple aggregate attestation handling
* Permits bulk messages accross gossipsub network channel
* Correct compile issues
* Improve gosispsub messaging and correct rest api helpers
* Added global gossipsub subscriptions
* Update validator subscriptions data structs
* Tidy
* Re-structure validator subscriptions
* Initial handling of subscriptions
* Re-structure network service
* Add pubkey cache persistence file
* Add more comments
* Integrate persistence file into builder
* Add pubkey cache tests
* Add HashSetDelay and introduce into attestation service
* Handles validator subscriptions
* Add data_dir to beacon chain builder
* Remove Option in pubkey cache persistence file
* Ensure consistency between datadir/data_dir
* Fix failing network test
* Peer subnet discovery gets queued for future subscriptions
* Reorganise attestation service functions
* Initial wiring of attestation service
* First draft of attestation service timing logic
* Correct minor typos
* Tidy
* Fix todos
* Improve tests
* Add PeerInfo to connected peers mapping
* Fix compile error
* Fix compile error from merge
* Split up block processing metrics
* Tidy
* Refactor get_pubkey_from_state
* Remove commented-out code
* Rename state_cache -> checkpoint_cache
* Rename Checkpoint -> Snapshot
* Tidy, add comments
* Tidy up find_head function
* Change some checkpoint -> snapshot
* Add tests
* Expose max_len
* Remove dead code
* Tidy
* Fix bug
* Add sync-speed metric
* Add first attempt at VerifiableBlock
* Start integrating into beacon chain
* Integrate VerifiableBlock
* Rename VerifableBlock -> PartialBlockVerification
* Add start of typed methods
* Add progress
* Add further progress
* Rename structs
* Add full block verification to block_processing.rs
* Further beacon chain integration
* Update checks for gossip
* Add todo
* Start adding segement verification
* Add passing chain segement test
* Initial integration with batch sync
* Minor changes
* Tidy, add more error checking
* Start adding chain_segment tests
* Finish invalid signature tests
* Include single and gossip verified blocks in tests
* Add gossip verification tests
* Start adding docs
* Finish adding comments to block_processing.rs
* Rename block_processing.rs -> block_verification
* Start removing old block processing code
* Fixes beacon_chain compilation
* Fix project-wide compile errors
* Remove old code
* Correct code to pass all tests
* Fix bug with beacon proposer index
* Fix shim for BlockProcessingError
* Only process one epoch at a time
* Fix loop in chain segment processing
* Correct tests from master merge
* Add caching for state.eth1_data_votes
* Add BeaconChain::validator_pubkey
* Revert "Add caching for state.eth1_data_votes"
This reverts commit cd73dcd6434fb8d8e6bf30c5356355598ea7b78e.
Co-authored-by: Grant Wuerker <gwuerker@gmail.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-03-17 06:24:44 +00:00
|
|
|
})
|
2021-03-17 05:09:57 +00:00
|
|
|
.unwrap_or_default()
|
Initial work towards v0.2.0 (#924)
* Remove ping protocol
* Initial renaming of network services
* Correct rebasing relative to latest master
* Start updating types
* Adds HashMapDelay struct to utils
* Initial network restructure
* Network restructure. Adds new types for v0.2.0
* Removes build artefacts
* Shift validation to beacon chain
* Temporarily remove gossip validation
This is to be updated to match current optimisation efforts.
* Adds AggregateAndProof
* Begin rebuilding pubsub encoding/decoding
* Signature hacking
* Shift gossipsup decoding into eth2_libp2p
* Existing EF tests passing with fake_crypto
* Shifts block encoding/decoding into RPC
* Delete outdated API spec
* All release tests passing bar genesis state parsing
* Update and test YamlConfig
* Update to spec v0.10 compatible BLS
* Updates to BLS EF tests
* Add EF test for AggregateVerify
And delete unused hash2curve tests for uncompressed points
* Update EF tests to v0.10.1
* Use optional block root correctly in block proc
* Use genesis fork in deposit domain. All tests pass
* Fast aggregate verify test
* Update REST API docs
* Fix unused import
* Bump spec tags to v0.10.1
* Add `seconds_per_eth1_block` to chainspec
* Update to timestamp based eth1 voting scheme
* Return None from `get_votes_to_consider` if block cache is empty
* Handle overflows in `is_candidate_block`
* Revert to failing tests
* Fix eth1 data sets test
* Choose default vote according to spec
* Fix collect_valid_votes tests
* Fix `get_votes_to_consider` to choose all eligible blocks
* Uncomment winning_vote tests
* Add comments; remove unused code
* Reduce seconds_per_eth1_block for simulation
* Addressed review comments
* Add test for default vote case
* Fix logs
* Remove unused functions
* Meter default eth1 votes
* Fix comments
* Progress on attestation service
* Address review comments; remove unused dependency
* Initial work on removing libp2p lock
* Add LRU caches to store (rollup)
* Update attestation validation for DB changes (WIP)
* Initial version of should_forward_block
* Scaffold
* Progress on attestation validation
Also, consolidate prod+testing slot clocks so that they share much
of the same implementation and can both handle sub-slot time changes.
* Removes lock from libp2p service
* Completed network lock removal
* Finish(?) attestation processing
* Correct network termination future
* Add slot check to block check
* Correct fmt issues
* Remove Drop implementation for network service
* Add first attempt at attestation proc. re-write
* Add version 2 of attestation processing
* Minor fixes
* Add validator pubkey cache
* Make get_indexed_attestation take a committee
* Link signature processing into new attn verification
* First working version
* Ensure pubkey cache is updated
* Add more metrics, slight optimizations
* Clone committee cache during attestation processing
* Update shuffling cache during block processing
* Remove old commented-out code
* Fix shuffling cache insert bug
* Used indexed attestation in fork choice
* Restructure attn processing, add metrics
* Add more detailed metrics
* Tidy, fix failing tests
* Fix failing tests, tidy
* Address reviewers suggestions
* Disable/delete two outdated tests
* Modification of validator for subscriptions
* Add slot signing to validator client
* Further progress on validation subscription
* Adds necessary validator subscription functionality
* Add new Pubkeys struct to signature_sets
* Refactor with functional approach
* Update beacon chain
* Clean up validator <-> beacon node http types
* Add aggregator status to ValidatorDuty
* Impl Clone for manual slot clock
* Fix minor errors
* Further progress validator client subscription
* Initial subscription and aggregation handling
* Remove decompressed member from pubkey bytes
* Progress to modifying val client for attestation aggregation
* First draft of validator client upgrade for aggregate attestations
* Add hashmap for indices lookup
* Add state cache, remove store cache
* Only build the head committee cache
* Removes lock on a network channel
* Partially implement beacon node subscription http api
* Correct compilation issues
* Change `get_attesting_indices` to use Vec
* Fix failing test
* Partial implementation of timer
* Adds timer, removes exit_future, http api to op pool
* Partial multiple aggregate attestation handling
* Permits bulk messages accross gossipsub network channel
* Correct compile issues
* Improve gosispsub messaging and correct rest api helpers
* Added global gossipsub subscriptions
* Update validator subscriptions data structs
* Tidy
* Re-structure validator subscriptions
* Initial handling of subscriptions
* Re-structure network service
* Add pubkey cache persistence file
* Add more comments
* Integrate persistence file into builder
* Add pubkey cache tests
* Add HashSetDelay and introduce into attestation service
* Handles validator subscriptions
* Add data_dir to beacon chain builder
* Remove Option in pubkey cache persistence file
* Ensure consistency between datadir/data_dir
* Fix failing network test
* Peer subnet discovery gets queued for future subscriptions
* Reorganise attestation service functions
* Initial wiring of attestation service
* First draft of attestation service timing logic
* Correct minor typos
* Tidy
* Fix todos
* Improve tests
* Add PeerInfo to connected peers mapping
* Fix compile error
* Fix compile error from merge
* Split up block processing metrics
* Tidy
* Refactor get_pubkey_from_state
* Remove commented-out code
* Rename state_cache -> checkpoint_cache
* Rename Checkpoint -> Snapshot
* Tidy, add comments
* Tidy up find_head function
* Change some checkpoint -> snapshot
* Add tests
* Expose max_len
* Remove dead code
* Tidy
* Fix bug
* Add sync-speed metric
* Add first attempt at VerifiableBlock
* Start integrating into beacon chain
* Integrate VerifiableBlock
* Rename VerifableBlock -> PartialBlockVerification
* Add start of typed methods
* Add progress
* Add further progress
* Rename structs
* Add full block verification to block_processing.rs
* Further beacon chain integration
* Update checks for gossip
* Add todo
* Start adding segement verification
* Add passing chain segement test
* Initial integration with batch sync
* Minor changes
* Tidy, add more error checking
* Start adding chain_segment tests
* Finish invalid signature tests
* Include single and gossip verified blocks in tests
* Add gossip verification tests
* Start adding docs
* Finish adding comments to block_processing.rs
* Rename block_processing.rs -> block_verification
* Start removing old block processing code
* Fixes beacon_chain compilation
* Fix project-wide compile errors
* Remove old code
* Correct code to pass all tests
* Fix bug with beacon proposer index
* Fix shim for BlockProcessingError
* Only process one epoch at a time
* Fix loop in chain segment processing
* Correct tests from master merge
* Add caching for state.eth1_data_votes
* Add BeaconChain::validator_pubkey
* Revert "Add caching for state.eth1_data_votes"
This reverts commit cd73dcd6434fb8d8e6bf30c5356355598ea7b78e.
Co-authored-by: Grant Wuerker <gwuerker@gmail.com>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
Co-authored-by: pawan <pawandhananjay@gmail.com>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2020-03-17 06:24:44 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Returns all `ValidatorDuty` for the given `slot`.
|
|
|
|
pub fn attesters(&self, slot: Slot) -> Vec<DutyAndProof> {
|
|
|
|
let epoch = slot.epoch(E::slots_per_epoch());
|
|
|
|
|
2021-07-31 03:50:52 +00:00
|
|
|
// Only collect validators that are considered safe in terms of doppelganger protection.
|
|
|
|
let signing_pubkeys: HashSet<_> = self
|
|
|
|
.validator_store
|
|
|
|
.voting_pubkeys(DoppelgangerStatus::only_safe);
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
self.attesters
|
2019-11-25 04:48:24 +00:00
|
|
|
.read()
|
|
|
|
.iter()
|
2021-03-17 05:09:57 +00:00
|
|
|
.filter_map(|(_, map)| map.get(&epoch))
|
|
|
|
.map(|(_, duty_and_proof)| duty_and_proof)
|
2021-07-31 03:50:52 +00:00
|
|
|
.filter(|duty_and_proof| {
|
|
|
|
duty_and_proof.duty.slot == slot
|
|
|
|
&& signing_pubkeys.contains(&duty_and_proof.duty.pubkey)
|
|
|
|
})
|
2019-11-25 04:48:24 +00:00
|
|
|
.cloned()
|
|
|
|
.collect()
|
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Start the service that periodically polls the beacon node for validator duties. This will start
|
|
|
|
/// several sub-services.
|
|
|
|
///
|
|
|
|
/// ## Notes
|
|
|
|
///
|
|
|
|
/// The loops in this function are structured such that a new instance of that task will only start
|
|
|
|
/// once the current one is finished. This means that if a task happens to take more than one slot
|
|
|
|
/// to run, we might skip a slot. This is unfortunate, however the alternative is to *always*
|
|
|
|
/// process every slot, which has the chance of creating a theoretically unlimited backlog of tasks.
|
|
|
|
/// It was a conscious decision to choose to drop tasks on an overloaded/latent system rather than
|
|
|
|
/// overload it even more.
|
|
|
|
pub fn start_update_service<T: SlotClock + 'static, E: EthSpec>(
|
|
|
|
core_duties_service: Arc<DutiesService<T, E>>,
|
|
|
|
mut block_service_tx: Sender<BlockServiceNotification>,
|
|
|
|
) {
|
|
|
|
/*
|
|
|
|
* Spawn the task which updates the map of pubkey to validator index.
|
|
|
|
*/
|
|
|
|
let duties_service = core_duties_service.clone();
|
|
|
|
core_duties_service.context.executor.spawn(
|
|
|
|
async move {
|
|
|
|
loop {
|
|
|
|
// Run this poll before the wait, this should hopefully download all the indices
|
|
|
|
// before the block/attestation tasks need them.
|
|
|
|
poll_validator_indices(&duties_service).await;
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() {
|
|
|
|
sleep(duration).await;
|
2019-11-25 04:48:24 +00:00
|
|
|
} else {
|
2021-03-17 05:09:57 +00:00
|
|
|
// Just sleep for one slot if we are unable to read the system clock, this gives
|
|
|
|
// us an opportunity for the clock to eventually come good.
|
|
|
|
sleep(duties_service.slot_clock.slot_duration()).await;
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
},
|
|
|
|
"duties_service_indices",
|
|
|
|
);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Spawn the task which keeps track of local block proposal duties.
|
|
|
|
*/
|
|
|
|
let duties_service = core_duties_service.clone();
|
|
|
|
let log = core_duties_service.context.log().clone();
|
|
|
|
core_duties_service.context.executor.spawn(
|
|
|
|
async move {
|
|
|
|
loop {
|
|
|
|
if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() {
|
|
|
|
sleep(duration).await;
|
|
|
|
} else {
|
|
|
|
// Just sleep for one slot if we are unable to read the system clock, this gives
|
|
|
|
// us an opportunity for the clock to eventually come good.
|
|
|
|
sleep(duties_service.slot_clock.slot_duration()).await;
|
|
|
|
continue;
|
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
if let Err(e) = poll_beacon_proposers(&duties_service, &mut block_service_tx).await
|
|
|
|
{
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to poll beacon proposers";
|
|
|
|
"error" => ?e
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"duties_service_proposers",
|
|
|
|
);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Spawn the task which keeps track of local attestation duties.
|
|
|
|
*/
|
|
|
|
let duties_service = core_duties_service.clone();
|
|
|
|
let log = core_duties_service.context.log().clone();
|
|
|
|
core_duties_service.context.executor.spawn(
|
|
|
|
async move {
|
|
|
|
loop {
|
|
|
|
if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() {
|
|
|
|
sleep(duration).await;
|
|
|
|
} else {
|
|
|
|
// Just sleep for one slot if we are unable to read the system clock, this gives
|
|
|
|
// us an opportunity for the clock to eventually come good.
|
|
|
|
sleep(duties_service.slot_clock.slot_duration()).await;
|
|
|
|
continue;
|
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
if let Err(e) = poll_beacon_attesters(&duties_service).await {
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to poll beacon attesters";
|
|
|
|
"error" => ?e
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"duties_service_attesters",
|
|
|
|
);
|
2021-08-06 00:47:31 +00:00
|
|
|
|
|
|
|
// Spawn the task which keeps track of local sync committee duties.
|
|
|
|
let duties_service = core_duties_service.clone();
|
|
|
|
let log = core_duties_service.context.log().clone();
|
|
|
|
core_duties_service.context.executor.spawn(
|
|
|
|
async move {
|
|
|
|
loop {
|
|
|
|
if let Err(e) = poll_sync_committee_duties(&duties_service).await {
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to poll sync committee duties";
|
|
|
|
"error" => ?e
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the next slot before polling again.
|
|
|
|
// This doesn't mean that the beacon node will get polled every slot
|
|
|
|
// as the sync duties service will return early if it deems it already has
|
|
|
|
// enough information.
|
|
|
|
if let Some(duration) = duties_service.slot_clock.duration_to_next_slot() {
|
|
|
|
sleep(duration).await;
|
|
|
|
} else {
|
|
|
|
// Just sleep for one slot if we are unable to read the system clock, this gives
|
|
|
|
// us an opportunity for the clock to eventually come good.
|
|
|
|
sleep(duties_service.slot_clock.slot_duration()).await;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
"duties_service_sync_committee",
|
|
|
|
);
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Iterate through all the voting pubkeys in the `ValidatorStore` and attempt to learn any unknown
|
|
|
|
/// validator indices.
|
|
|
|
async fn poll_validator_indices<T: SlotClock + 'static, E: EthSpec>(
|
|
|
|
duties_service: &DutiesService<T, E>,
|
|
|
|
) {
|
|
|
|
let _timer =
|
|
|
|
metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_INDICES]);
|
|
|
|
|
|
|
|
let log = duties_service.context.log();
|
2021-07-31 03:50:52 +00:00
|
|
|
|
|
|
|
// Collect *all* pubkeys for resolving indices, even those undergoing doppelganger protection.
|
|
|
|
//
|
|
|
|
// Since doppelganger protection queries rely on validator indices it is important to ensure we
|
|
|
|
// collect those indices.
|
|
|
|
let all_pubkeys: Vec<_> = duties_service
|
|
|
|
.validator_store
|
|
|
|
.voting_pubkeys(DoppelgangerStatus::ignored);
|
|
|
|
|
|
|
|
for pubkey in all_pubkeys {
|
2021-03-17 05:09:57 +00:00
|
|
|
// This is on its own line to avoid some weirdness with locks and if statements.
|
2021-07-31 03:50:52 +00:00
|
|
|
let is_known = duties_service
|
|
|
|
.validator_store
|
|
|
|
.initialized_validators()
|
|
|
|
.read()
|
|
|
|
.get_index(&pubkey)
|
|
|
|
.is_some();
|
2021-03-17 05:09:57 +00:00
|
|
|
|
|
|
|
if !is_known {
|
|
|
|
// Query the remote BN to resolve a pubkey to a validator index.
|
|
|
|
let download_result = duties_service
|
|
|
|
.beacon_nodes
|
2022-08-29 11:35:59 +00:00
|
|
|
.first_success(
|
|
|
|
duties_service.require_synced,
|
|
|
|
OfflineOnFailure::Yes,
|
|
|
|
|beacon_node| async move {
|
|
|
|
let _timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::VALIDATOR_ID_HTTP_GET],
|
|
|
|
);
|
|
|
|
beacon_node
|
|
|
|
.get_beacon_states_validator_id(
|
|
|
|
StateId::Head,
|
|
|
|
&ValidatorId::PublicKey(pubkey),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
},
|
|
|
|
)
|
2021-03-17 05:09:57 +00:00
|
|
|
.await;
|
|
|
|
|
2022-08-30 05:47:32 +00:00
|
|
|
let fee_recipient = duties_service
|
|
|
|
.validator_store
|
|
|
|
.get_fee_recipient(&pubkey)
|
|
|
|
.map(|fr| fr.to_string())
|
|
|
|
.unwrap_or_else(|| {
|
|
|
|
"Fee recipient for validator not set in validator_definitions.yml \
|
|
|
|
or provided with the `--suggested-fee-recipient` flag"
|
|
|
|
.to_string()
|
|
|
|
});
|
2021-03-17 05:09:57 +00:00
|
|
|
match download_result {
|
|
|
|
Ok(Some(response)) => {
|
|
|
|
info!(
|
|
|
|
log,
|
|
|
|
"Validator exists in beacon chain";
|
|
|
|
"pubkey" => ?pubkey,
|
2022-08-30 05:47:32 +00:00
|
|
|
"validator_index" => response.data.index,
|
|
|
|
"fee_recipient" => fee_recipient
|
2021-03-17 05:09:57 +00:00
|
|
|
);
|
|
|
|
duties_service
|
2021-07-31 03:50:52 +00:00
|
|
|
.validator_store
|
|
|
|
.initialized_validators()
|
2021-03-17 05:09:57 +00:00
|
|
|
.write()
|
2021-07-31 03:50:52 +00:00
|
|
|
.set_index(&pubkey, response.data.index);
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
|
|
|
// This is not necessarily an error, it just means the validator is not yet known to
|
|
|
|
// the beacon chain.
|
|
|
|
Ok(None) => {
|
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"Validator without index";
|
2022-08-30 05:47:32 +00:00
|
|
|
"pubkey" => ?pubkey,
|
|
|
|
"fee_recipient" => fee_recipient
|
2021-03-17 05:09:57 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
// Don't exit early on an error, keep attempting to resolve other indices.
|
|
|
|
Err(e) => {
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to resolve pubkey to index";
|
|
|
|
"error" => %e,
|
|
|
|
"pubkey" => ?pubkey,
|
2022-08-30 05:47:32 +00:00
|
|
|
"fee_recipient" => fee_recipient
|
2021-03-17 05:09:57 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Query the beacon node for attestation duties for any known validators.
|
2019-11-25 04:48:24 +00:00
|
|
|
///
|
2021-03-17 05:09:57 +00:00
|
|
|
/// This function will perform (in the following order):
|
|
|
|
///
|
|
|
|
/// 1. Poll for current-epoch duties and update the local `duties_service.attesters` map.
|
|
|
|
/// 2. As above, but for the next-epoch.
|
|
|
|
/// 3. Push out any attestation subnet subscriptions to the BN.
|
|
|
|
/// 4. Prune old entries from `duties_service.attesters`.
|
|
|
|
async fn poll_beacon_attesters<T: SlotClock + 'static, E: EthSpec>(
|
|
|
|
duties_service: &DutiesService<T, E>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let current_epoch_timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::UPDATE_ATTESTERS_CURRENT_EPOCH],
|
|
|
|
);
|
|
|
|
|
|
|
|
let log = duties_service.context.log();
|
|
|
|
|
|
|
|
let current_slot = duties_service
|
|
|
|
.slot_clock
|
|
|
|
.now()
|
|
|
|
.ok_or(Error::UnableToReadSlotClock)?;
|
|
|
|
let current_epoch = current_slot.epoch(E::slots_per_epoch());
|
|
|
|
let next_epoch = current_epoch + 1;
|
|
|
|
|
2021-07-31 03:50:52 +00:00
|
|
|
// Collect *all* pubkeys, even those undergoing doppelganger protection.
|
|
|
|
//
|
|
|
|
// We must know the duties for doppelganger validators so that we can subscribe to their subnets
|
|
|
|
// and get more information about other running instances.
|
|
|
|
let local_pubkeys: HashSet<_> = duties_service
|
2021-03-17 05:09:57 +00:00
|
|
|
.validator_store
|
2021-07-31 03:50:52 +00:00
|
|
|
.voting_pubkeys(DoppelgangerStatus::ignored);
|
2021-03-17 05:09:57 +00:00
|
|
|
|
|
|
|
let local_indices = {
|
|
|
|
let mut local_indices = Vec::with_capacity(local_pubkeys.len());
|
2021-07-31 03:50:52 +00:00
|
|
|
|
|
|
|
let vals_ref = duties_service.validator_store.initialized_validators();
|
|
|
|
let vals = vals_ref.read();
|
2021-03-17 05:09:57 +00:00
|
|
|
for &pubkey in &local_pubkeys {
|
2021-07-31 03:50:52 +00:00
|
|
|
if let Some(validator_index) = vals.get_index(&pubkey) {
|
|
|
|
local_indices.push(validator_index)
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
local_indices
|
|
|
|
};
|
|
|
|
|
|
|
|
// Download the duties and update the duties for the current epoch.
|
|
|
|
if let Err(e) = poll_beacon_attesters_for_epoch(
|
2021-07-30 01:11:47 +00:00
|
|
|
duties_service,
|
2021-03-17 05:09:57 +00:00
|
|
|
current_epoch,
|
|
|
|
&local_indices,
|
|
|
|
&local_pubkeys,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to download attester duties";
|
|
|
|
"current_epoch" => current_epoch,
|
|
|
|
"request_epoch" => current_epoch,
|
|
|
|
"err" => ?e,
|
|
|
|
)
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
drop(current_epoch_timer);
|
|
|
|
let next_epoch_timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::UPDATE_ATTESTERS_NEXT_EPOCH],
|
|
|
|
);
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
// Download the duties and update the duties for the next epoch.
|
|
|
|
if let Err(e) =
|
2021-07-30 01:11:47 +00:00
|
|
|
poll_beacon_attesters_for_epoch(duties_service, next_epoch, &local_indices, &local_pubkeys)
|
2021-03-17 05:09:57 +00:00
|
|
|
.await
|
|
|
|
{
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to download attester duties";
|
|
|
|
"current_epoch" => current_epoch,
|
|
|
|
"request_epoch" => next_epoch,
|
|
|
|
"err" => ?e,
|
|
|
|
)
|
2019-12-09 11:42:36 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
drop(next_epoch_timer);
|
|
|
|
let subscriptions_timer =
|
|
|
|
metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::SUBSCRIPTIONS]);
|
2019-12-09 11:42:36 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
// This vector is likely to be a little oversized, but it won't reallocate.
|
|
|
|
let mut subscriptions = Vec::with_capacity(local_pubkeys.len() * 2);
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
// For this epoch and the next epoch, produce any beacon committee subscriptions.
|
|
|
|
//
|
|
|
|
// We are *always* pushing out subscriptions, even if we've subscribed before. This is
|
|
|
|
// potentially excessive on the BN in normal cases, but it will help with fast re-subscriptions
|
|
|
|
// if the BN goes offline or we swap to a different one.
|
|
|
|
for epoch in &[current_epoch, next_epoch] {
|
|
|
|
duties_service
|
|
|
|
.attesters
|
|
|
|
.read()
|
|
|
|
.iter()
|
2021-07-30 01:11:47 +00:00
|
|
|
.filter_map(|(_, map)| map.get(epoch))
|
2021-03-17 05:09:57 +00:00
|
|
|
// The BN logs a warning if we try and subscribe to current or near-by slots. Give it a
|
|
|
|
// buffer.
|
|
|
|
.filter(|(_, duty_and_proof)| {
|
|
|
|
current_slot + SUBSCRIPTION_BUFFER_SLOTS < duty_and_proof.duty.slot
|
|
|
|
})
|
|
|
|
.for_each(|(_, duty_and_proof)| {
|
|
|
|
let duty = &duty_and_proof.duty;
|
|
|
|
let is_aggregator = duty_and_proof.selection_proof.is_some();
|
|
|
|
|
|
|
|
subscriptions.push(BeaconCommitteeSubscription {
|
|
|
|
validator_index: duty.validator_index,
|
|
|
|
committee_index: duty.committee_index,
|
|
|
|
committees_at_slot: duty.committees_at_slot,
|
|
|
|
slot: duty.slot,
|
|
|
|
is_aggregator,
|
|
|
|
})
|
|
|
|
});
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
|
|
|
|
2022-09-28 19:53:35 +00:00
|
|
|
// If there are any subscriptions, push them out to beacon nodes
|
2021-03-17 05:09:57 +00:00
|
|
|
if !subscriptions.is_empty() {
|
|
|
|
let subscriptions_ref = &subscriptions;
|
|
|
|
if let Err(e) = duties_service
|
|
|
|
.beacon_nodes
|
2022-09-28 19:53:35 +00:00
|
|
|
.run(
|
2022-08-29 11:35:59 +00:00
|
|
|
duties_service.require_synced,
|
|
|
|
OfflineOnFailure::Yes,
|
|
|
|
|beacon_node| async move {
|
|
|
|
let _timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::SUBSCRIPTIONS_HTTP_POST],
|
|
|
|
);
|
|
|
|
beacon_node
|
|
|
|
.post_validator_beacon_committee_subscriptions(subscriptions_ref)
|
|
|
|
.await
|
|
|
|
},
|
|
|
|
)
|
2020-09-29 03:46:54 +00:00
|
|
|
.await
|
|
|
|
{
|
2020-05-17 11:16:48 +00:00
|
|
|
error!(
|
|
|
|
log,
|
2021-03-17 05:09:57 +00:00
|
|
|
"Failed to subscribe validators";
|
|
|
|
"error" => %e
|
|
|
|
)
|
2020-05-17 11:16:48 +00:00
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
2019-12-09 11:42:36 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
drop(subscriptions_timer);
|
2019-12-09 11:42:36 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
// Prune old duties.
|
|
|
|
duties_service
|
|
|
|
.attesters
|
|
|
|
.write()
|
|
|
|
.iter_mut()
|
|
|
|
.for_each(|(_, map)| {
|
|
|
|
map.retain(|&epoch, _| epoch + HISTORICAL_DUTIES_EPOCHS >= current_epoch)
|
|
|
|
});
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// For the given `local_indices` and `local_pubkeys`, download the duties for the given `epoch` and
|
|
|
|
/// store them in `duties_service.attesters`.
|
|
|
|
async fn poll_beacon_attesters_for_epoch<T: SlotClock + 'static, E: EthSpec>(
|
|
|
|
duties_service: &DutiesService<T, E>,
|
|
|
|
epoch: Epoch,
|
|
|
|
local_indices: &[u64],
|
|
|
|
local_pubkeys: &HashSet<PublicKeyBytes>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let log = duties_service.context.log();
|
|
|
|
|
|
|
|
// No need to bother the BN if we don't have any validators.
|
|
|
|
if local_indices.is_empty() {
|
|
|
|
debug!(
|
|
|
|
duties_service.context.log(),
|
|
|
|
"No validators, not downloading duties";
|
|
|
|
"epoch" => epoch,
|
|
|
|
);
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
let fetch_timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::UPDATE_ATTESTERS_FETCH],
|
|
|
|
);
|
|
|
|
|
|
|
|
let response = duties_service
|
|
|
|
.beacon_nodes
|
2022-08-29 11:35:59 +00:00
|
|
|
.first_success(
|
|
|
|
duties_service.require_synced,
|
|
|
|
OfflineOnFailure::Yes,
|
|
|
|
|beacon_node| async move {
|
|
|
|
let _timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::ATTESTER_DUTIES_HTTP_POST],
|
|
|
|
);
|
|
|
|
beacon_node
|
|
|
|
.post_validator_duties_attester(epoch, local_indices)
|
|
|
|
.await
|
|
|
|
},
|
|
|
|
)
|
2021-03-17 05:09:57 +00:00
|
|
|
.await
|
|
|
|
.map_err(|e| Error::FailedToDownloadAttesters(e.to_string()))?;
|
|
|
|
|
|
|
|
drop(fetch_timer);
|
|
|
|
let _store_timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::UPDATE_ATTESTERS_STORE],
|
|
|
|
);
|
|
|
|
|
|
|
|
let dependent_root = response.dependent_root;
|
|
|
|
|
2021-09-16 03:26:33 +00:00
|
|
|
// Filter any duties that are not relevant or already known.
|
|
|
|
let new_duties = {
|
|
|
|
// Avoid holding the read-lock for any longer than required.
|
|
|
|
let attesters = duties_service.attesters.read();
|
|
|
|
response
|
|
|
|
.data
|
|
|
|
.into_iter()
|
|
|
|
.filter(|duty| {
|
2022-05-16 01:59:47 +00:00
|
|
|
local_pubkeys.contains(&duty.pubkey) && {
|
|
|
|
// Only update the duties if either is true:
|
|
|
|
//
|
|
|
|
// - There were no known duties for this epoch.
|
|
|
|
// - The dependent root has changed, signalling a re-org.
|
|
|
|
attesters.get(&duty.pubkey).map_or(true, |duties| {
|
|
|
|
duties
|
|
|
|
.get(&epoch)
|
|
|
|
.map_or(true, |(prior, _)| *prior != dependent_root)
|
|
|
|
})
|
|
|
|
}
|
2021-09-16 03:26:33 +00:00
|
|
|
})
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
};
|
2021-03-17 05:09:57 +00:00
|
|
|
|
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"Downloaded attester duties";
|
|
|
|
"dependent_root" => %dependent_root,
|
2021-09-16 03:26:33 +00:00
|
|
|
"num_new_duties" => new_duties.len(),
|
2021-03-17 05:09:57 +00:00
|
|
|
);
|
|
|
|
|
2021-09-16 03:26:33 +00:00
|
|
|
// Produce the `DutyAndProof` messages in parallel.
|
|
|
|
let duty_and_proof_results = join_all(new_duties.into_iter().map(|duty| {
|
|
|
|
DutyAndProof::new(duty, &duties_service.validator_store, &duties_service.spec)
|
|
|
|
}))
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// Update the duties service with the new `DutyAndProof` messages.
|
|
|
|
let mut attesters = duties_service.attesters.write();
|
2021-03-17 05:09:57 +00:00
|
|
|
let mut already_warned = Some(());
|
2021-09-16 03:26:33 +00:00
|
|
|
for result in duty_and_proof_results {
|
|
|
|
let duty_and_proof = match result {
|
|
|
|
Ok(duty_and_proof) => duty_and_proof,
|
|
|
|
Err(e) => {
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to produce duty and proof";
|
|
|
|
"error" => ?e,
|
|
|
|
"msg" => "may impair attestation duties"
|
|
|
|
);
|
|
|
|
// Do not abort the entire batch for a single failure.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let attester_map = attesters.entry(duty_and_proof.duty.pubkey).or_default();
|
|
|
|
|
|
|
|
if let Some((prior_dependent_root, _)) =
|
|
|
|
attester_map.insert(epoch, (dependent_root, duty_and_proof))
|
2020-09-29 03:46:54 +00:00
|
|
|
{
|
2021-09-16 03:26:33 +00:00
|
|
|
// Using `already_warned` avoids excessive logs.
|
|
|
|
if dependent_root != prior_dependent_root && already_warned.take().is_some() {
|
|
|
|
warn!(
|
|
|
|
log,
|
|
|
|
"Attester duties re-org";
|
|
|
|
"prior_dependent_root" => %prior_dependent_root,
|
|
|
|
"dependent_root" => %dependent_root,
|
|
|
|
"msg" => "this may happen from time to time"
|
|
|
|
)
|
2021-03-17 05:09:57 +00:00
|
|
|
}
|
2020-07-07 04:03:21 +00:00
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
2021-09-16 03:26:33 +00:00
|
|
|
drop(attesters);
|
2019-11-25 04:48:24 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-11-09 23:13:56 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Download the proposer duties for the current epoch and store them in `duties_service.proposers`.
|
|
|
|
/// If there are any proposer for this slot, send out a notification to the block proposers.
|
|
|
|
///
|
|
|
|
/// ## Note
|
|
|
|
///
|
|
|
|
/// This function will potentially send *two* notifications to the `BlockService`; it will send a
|
|
|
|
/// notification initially, then it will download the latest duties and send a *second* notification
|
|
|
|
/// if those duties have changed. This behaviour simultaneously achieves the following:
|
|
|
|
///
|
|
|
|
/// 1. Block production can happen immediately and does not have to wait for the proposer duties to
|
|
|
|
/// download.
|
|
|
|
/// 2. We won't miss a block if the duties for the current slot happen to change with this poll.
|
|
|
|
///
|
|
|
|
/// This sounds great, but is it safe? Firstly, the additional notification will only contain block
|
|
|
|
/// producers that were not included in the first notification. This should be safety enough.
|
|
|
|
/// However, we also have the slashing protection as a second line of defence. These two factors
|
|
|
|
/// provide an acceptable level of safety.
|
|
|
|
///
|
|
|
|
/// It's important to note that since there is a 0-epoch look-ahead (i.e., no look-ahead) for block
|
|
|
|
/// proposers then it's very likely that a proposal for the first slot of the epoch will need go
|
|
|
|
/// through the slow path every time. I.e., the proposal will only happen after we've been able to
|
|
|
|
/// download and process the duties from the BN. This means it is very important to ensure this
|
|
|
|
/// function is as fast as possible.
|
|
|
|
async fn poll_beacon_proposers<T: SlotClock + 'static, E: EthSpec>(
|
|
|
|
duties_service: &DutiesService<T, E>,
|
|
|
|
block_service_tx: &mut Sender<BlockServiceNotification>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let _timer =
|
|
|
|
metrics::start_timer_vec(&metrics::DUTIES_SERVICE_TIMES, &[metrics::UPDATE_PROPOSERS]);
|
|
|
|
|
|
|
|
let log = duties_service.context.log();
|
|
|
|
|
|
|
|
let current_slot = duties_service
|
|
|
|
.slot_clock
|
|
|
|
.now()
|
|
|
|
.ok_or(Error::UnableToReadSlotClock)?;
|
|
|
|
let current_epoch = current_slot.epoch(E::slots_per_epoch());
|
|
|
|
|
|
|
|
// Notify the block proposal service for any proposals that we have in our cache.
|
|
|
|
//
|
|
|
|
// See the function-level documentation for more information.
|
|
|
|
let initial_block_proposers = duties_service.block_proposers(current_slot);
|
|
|
|
notify_block_production_service(
|
|
|
|
current_slot,
|
|
|
|
&initial_block_proposers,
|
|
|
|
block_service_tx,
|
2021-07-31 03:50:52 +00:00
|
|
|
&duties_service.validator_store,
|
2021-07-30 01:11:47 +00:00
|
|
|
log,
|
2021-03-17 05:09:57 +00:00
|
|
|
)
|
|
|
|
.await;
|
|
|
|
|
2021-07-31 03:50:52 +00:00
|
|
|
// Collect *all* pubkeys, even those undergoing doppelganger protection.
|
|
|
|
//
|
|
|
|
// It is useful to keep the duties for all validators around, so they're on hand when
|
|
|
|
// doppelganger finishes.
|
|
|
|
let local_pubkeys: HashSet<_> = duties_service
|
2021-03-17 05:09:57 +00:00
|
|
|
.validator_store
|
2021-07-31 03:50:52 +00:00
|
|
|
.voting_pubkeys(DoppelgangerStatus::ignored);
|
2021-03-17 05:09:57 +00:00
|
|
|
|
|
|
|
// Only download duties and push out additional block production events if we have some
|
|
|
|
// validators.
|
|
|
|
if !local_pubkeys.is_empty() {
|
|
|
|
let download_result = duties_service
|
2020-12-18 09:17:03 +00:00
|
|
|
.beacon_nodes
|
2022-08-29 11:35:59 +00:00
|
|
|
.first_success(
|
|
|
|
duties_service.require_synced,
|
|
|
|
OfflineOnFailure::Yes,
|
|
|
|
|beacon_node| async move {
|
|
|
|
let _timer = metrics::start_timer_vec(
|
|
|
|
&metrics::DUTIES_SERVICE_TIMES,
|
|
|
|
&[metrics::PROPOSER_DUTIES_HTTP_GET],
|
|
|
|
);
|
|
|
|
beacon_node
|
|
|
|
.get_validator_duties_proposer(current_epoch)
|
|
|
|
.await
|
|
|
|
},
|
|
|
|
)
|
2021-03-17 05:09:57 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
match download_result {
|
|
|
|
Ok(response) => {
|
|
|
|
let dependent_root = response.dependent_root;
|
|
|
|
|
|
|
|
let relevant_duties = response
|
|
|
|
.data
|
|
|
|
.into_iter()
|
|
|
|
.filter(|proposer_duty| local_pubkeys.contains(&proposer_duty.pubkey))
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
debug!(
|
2020-11-09 23:13:56 +00:00
|
|
|
log,
|
2021-03-17 05:09:57 +00:00
|
|
|
"Downloaded proposer duties";
|
|
|
|
"dependent_root" => %dependent_root,
|
|
|
|
"num_relevant_duties" => relevant_duties.len(),
|
2020-11-09 23:13:56 +00:00
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
if let Some((prior_dependent_root, _)) = duties_service
|
|
|
|
.proposers
|
|
|
|
.write()
|
|
|
|
.insert(current_epoch, (dependent_root, relevant_duties))
|
|
|
|
{
|
|
|
|
if dependent_root != prior_dependent_root {
|
|
|
|
warn!(
|
|
|
|
log,
|
|
|
|
"Proposer duties re-org";
|
|
|
|
"prior_dependent_root" => %prior_dependent_root,
|
|
|
|
"dependent_root" => %dependent_root,
|
|
|
|
"msg" => "this may happen from time to time"
|
|
|
|
)
|
2020-05-06 11:42:56 +00:00
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
// Don't return early here, we still want to try and produce blocks using the cached values.
|
|
|
|
Err(e) => error!(
|
2020-05-17 11:16:48 +00:00
|
|
|
log,
|
2021-03-17 05:09:57 +00:00
|
|
|
"Failed to download proposer duties";
|
|
|
|
"err" => %e,
|
|
|
|
),
|
2020-05-17 11:16:48 +00:00
|
|
|
}
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
// Compute the block proposers for this slot again, now that we've received an update from
|
|
|
|
// the BN.
|
|
|
|
//
|
|
|
|
// Then, compute the difference between these two sets to obtain a set of block proposers
|
|
|
|
// which were not included in the initial notification to the `BlockService`.
|
|
|
|
let additional_block_producers = duties_service
|
|
|
|
.block_proposers(current_slot)
|
|
|
|
.difference(&initial_block_proposers)
|
|
|
|
.copied()
|
|
|
|
.collect::<HashSet<PublicKeyBytes>>();
|
|
|
|
|
|
|
|
// If there are any new proposers for this slot, send a notification so they produce a
|
|
|
|
// block.
|
|
|
|
//
|
|
|
|
// See the function-level documentation for more reasoning about this behaviour.
|
|
|
|
if !additional_block_producers.is_empty() {
|
|
|
|
notify_block_production_service(
|
|
|
|
current_slot,
|
|
|
|
&additional_block_producers,
|
|
|
|
block_service_tx,
|
2021-07-31 03:50:52 +00:00
|
|
|
&duties_service.validator_store,
|
2021-07-30 01:11:47 +00:00
|
|
|
log,
|
2020-05-17 11:16:48 +00:00
|
|
|
)
|
2021-03-17 05:09:57 +00:00
|
|
|
.await;
|
2020-09-29 03:46:54 +00:00
|
|
|
debug!(
|
|
|
|
log,
|
2021-03-17 05:09:57 +00:00
|
|
|
"Detected new block proposer";
|
|
|
|
"current_slot" => current_slot,
|
2020-09-29 03:46:54 +00:00
|
|
|
);
|
2021-03-17 05:09:57 +00:00
|
|
|
metrics::inc_counter(&metrics::PROPOSAL_CHANGED);
|
2020-05-17 11:16:48 +00:00
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
2021-03-17 05:09:57 +00:00
|
|
|
|
|
|
|
// Prune old duties.
|
|
|
|
duties_service
|
|
|
|
.proposers
|
|
|
|
.write()
|
|
|
|
.retain(|&epoch, _| epoch + HISTORICAL_DUTIES_EPOCHS >= current_epoch);
|
|
|
|
|
|
|
|
Ok(())
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|
|
|
|
|
2021-03-17 05:09:57 +00:00
|
|
|
/// Notify the block service if it should produce a block.
|
2021-07-31 03:50:52 +00:00
|
|
|
async fn notify_block_production_service<T: SlotClock + 'static, E: EthSpec>(
|
2021-03-17 05:09:57 +00:00
|
|
|
current_slot: Slot,
|
|
|
|
block_proposers: &HashSet<PublicKeyBytes>,
|
|
|
|
block_service_tx: &mut Sender<BlockServiceNotification>,
|
2021-07-31 03:50:52 +00:00
|
|
|
validator_store: &ValidatorStore<T, E>,
|
2021-03-17 05:09:57 +00:00
|
|
|
log: &Logger,
|
|
|
|
) {
|
2021-07-31 03:50:52 +00:00
|
|
|
let non_doppelganger_proposers = block_proposers
|
|
|
|
.iter()
|
|
|
|
.filter(|pubkey| validator_store.doppelganger_protection_allows_signing(**pubkey))
|
|
|
|
.copied()
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
if !non_doppelganger_proposers.is_empty() {
|
|
|
|
if let Err(e) = block_service_tx
|
|
|
|
.send(BlockServiceNotification {
|
|
|
|
slot: current_slot,
|
|
|
|
block_proposers: non_doppelganger_proposers,
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Failed to notify block service";
|
|
|
|
"current_slot" => current_slot,
|
|
|
|
"error" => %e
|
|
|
|
);
|
|
|
|
};
|
|
|
|
}
|
2019-11-25 04:48:24 +00:00
|
|
|
}
|