Merge branch 'remove-into-gossip-verified-block' of https://github.com/realbigsean/lighthouse into merge-unstable-deneb-june-6th

This commit is contained in:
realbigsean 2023-07-06 16:51:35 -04:00
commit cfe2452533
No known key found for this signature in database
GPG Key ID: BE1B3DB104F6C788
78 changed files with 3075 additions and 407 deletions

9
Cargo.lock generated
View File

@ -684,7 +684,7 @@ dependencies = [
[[package]] [[package]]
name = "beacon_node" name = "beacon_node"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"beacon_chain", "beacon_chain",
"clap", "clap",
@ -891,7 +891,7 @@ dependencies = [
[[package]] [[package]]
name = "boot_node" name = "boot_node"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"beacon_node", "beacon_node",
"clap", "clap",
@ -4165,7 +4165,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]] [[package]]
name = "lcli" name = "lcli"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"account_utils", "account_utils",
"beacon_chain", "beacon_chain",
@ -4812,7 +4812,7 @@ dependencies = [
[[package]] [[package]]
name = "lighthouse" name = "lighthouse"
version = "4.2.0" version = "4.3.0"
dependencies = [ dependencies = [
"account_manager", "account_manager",
"account_utils", "account_utils",
@ -9108,6 +9108,7 @@ dependencies = [
"smallvec", "smallvec",
"ssz_types", "ssz_types",
"state_processing", "state_processing",
"strum",
"superstruct 0.6.0", "superstruct 0.6.0",
"swap_or_not_shuffle", "swap_or_not_shuffle",
"tempfile", "tempfile",

View File

@ -1,6 +1,6 @@
[package] [package]
name = "beacon_node" name = "beacon_node"
version = "4.2.0" version = "4.3.0"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"] authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
edition = "2021" edition = "2021"

View File

@ -2664,6 +2664,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
signature_verified_block.block_root(), signature_verified_block.block_root(),
signature_verified_block, signature_verified_block,
notify_execution_layer, notify_execution_layer,
|| Ok(()),
) )
.await .await
{ {
@ -2783,6 +2784,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256, block_root: Hash256,
unverified_block: B, unverified_block: B,
notify_execution_layer: NotifyExecutionLayer, notify_execution_layer: NotifyExecutionLayer,
publish_fn: impl FnOnce() -> Result<(), BlockError<T::EthSpec>> + Send + 'static,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> { ) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
// Start the Prometheus timer. // Start the Prometheus timer.
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
@ -2798,6 +2800,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
notify_execution_layer, notify_execution_layer,
)?; )?;
//TODO(sean) error handling?
publish_fn()?;
let executed_block = self let executed_block = self
.clone() .clone()
.into_executed_block(execution_pending) .into_executed_block(execution_pending)
@ -3073,7 +3078,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_delay, block_delay,
&state, &state,
payload_verification_status, payload_verification_status,
self.config.progressive_balances_mode,
&self.spec, &self.spec,
&self.log,
) )
.map_err(|e| BlockError::BeaconChainError(e.into()))?; .map_err(|e| BlockError::BeaconChainError(e.into()))?;
} }
@ -6012,13 +6019,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Since we are likely calling this during the slot we are going to propose in, don't take into /// Since we are likely calling this during the slot we are going to propose in, don't take into
/// account the current slot when accounting for skips. /// account the current slot when accounting for skips.
pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> { pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> {
let cached_head = self.canonical_head.cached_head();
// Check if the merge has been finalized. // Check if the merge has been finalized.
if let Some(finalized_hash) = self if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash {
.canonical_head
.cached_head()
.forkchoice_update_parameters()
.finalized_hash
{
if ExecutionBlockHash::zero() == finalized_hash { if ExecutionBlockHash::zero() == finalized_hash {
return Ok(ChainHealth::PreMerge); return Ok(ChainHealth::PreMerge);
} }
@ -6045,17 +6048,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Check slots at the head of the chain. // Check slots at the head of the chain.
let prev_slot = current_slot.saturating_sub(Slot::new(1)); let prev_slot = current_slot.saturating_sub(Slot::new(1));
let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot()); let head_skips = prev_slot.saturating_sub(cached_head.head_slot());
let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips; let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips;
// Check if finalization is advancing. // Check if finalization is advancing.
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let epochs_since_finalization = current_epoch.saturating_sub( let epochs_since_finalization =
self.canonical_head current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch);
.cached_head()
.finalized_checkpoint()
.epoch,
);
let finalization_check = epochs_since_finalization.as_usize() let finalization_check = epochs_since_finalization.as_usize()
<= self.config.builder_fallback_epochs_since_finalization; <= self.config.builder_fallback_epochs_since_finalization;

View File

@ -57,6 +57,7 @@ use crate::execution_payload::{
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
}; };
use crate::observed_block_producers::SeenBlock;
use crate::snapshot_cache::PreProcessingSnapshot; use crate::snapshot_cache::PreProcessingSnapshot;
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::validator_pubkey_cache::ValidatorPubkeyCache;
@ -189,13 +190,6 @@ pub enum BlockError<T: EthSpec> {
/// ///
/// The block is valid and we have already imported a block with this hash. /// The block is valid and we have already imported a block with this hash.
BlockIsAlreadyKnown, BlockIsAlreadyKnown,
/// A block for this proposer and slot has already been observed.
///
/// ## Peer scoring
///
/// The `proposer` has already proposed a block at this slot. The existing block may or may not
/// be equal to the given block.
RepeatProposal { proposer: u64, slot: Slot },
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
/// ///
/// ## Peer scoring /// ## Peer scoring
@ -291,6 +285,14 @@ pub enum BlockError<T: EthSpec> {
/// problems to worry about than losing peers, and we're doing the network a favour by /// problems to worry about than losing peers, and we're doing the network a favour by
/// disconnecting. /// disconnecting.
ParentExecutionPayloadInvalid { parent_root: Hash256 }, ParentExecutionPayloadInvalid { parent_root: Hash256 },
/// The block is a slashable equivocation from the proposer.
///
/// ## Peer scoring
///
/// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
/// we penalise them with a mid-tolerance error.
Slashable,
//TODO(sean) peer scoring docs
/// A blob alone failed validation. /// A blob alone failed validation.
BlobValidation(BlobError<T>), BlobValidation(BlobError<T>),
/// The block and blob together failed validation. /// The block and blob together failed validation.
@ -892,19 +894,6 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
return Err(BlockError::BlockIsAlreadyKnown); return Err(BlockError::BlockIsAlreadyKnown);
} }
// Check that we have not already received a block with a valid signature for this slot.
if chain
.observed_block_producers
.read()
.proposer_has_been_observed(block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
return Err(BlockError::RepeatProposal {
proposer: block.message().proposer_index(),
slot: block.slot(),
});
}
// Do not process a block that doesn't descend from the finalized root. // Do not process a block that doesn't descend from the finalized root.
// //
// We check this *before* we load the parent so that we can return a more detailed error. // We check this *before* we load the parent so that we can return a more detailed error.
@ -1020,17 +1009,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// //
// It's important to double-check that the proposer still hasn't been observed so we don't // It's important to double-check that the proposer still hasn't been observed so we don't
// have a race-condition when verifying two blocks simultaneously. // have a race-condition when verifying two blocks simultaneously.
if chain match chain
.observed_block_producers .observed_block_producers
.write() .write()
.observe_proposer(block.message()) .observe_proposal(block_root, block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))? .map_err(|e| BlockError::BeaconChainError(e.into()))?
{ {
return Err(BlockError::RepeatProposal { SeenBlock::Slashable => return Err(BlockError::Slashable),
proposer: block.message().proposer_index(), SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown),
slot: block.slot(), SeenBlock::UniqueNonSlashable => {}
}); };
}
if block.message().proposer_index() != expected_proposer as u64 { if block.message().proposer_index() != expected_proposer as u64 {
return Err(BlockError::IncorrectBlockProposer { return Err(BlockError::IncorrectBlockProposer {
@ -1293,6 +1281,12 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
chain: &Arc<BeaconChain<T>>, chain: &Arc<BeaconChain<T>>,
notify_execution_layer: NotifyExecutionLayer, notify_execution_layer: NotifyExecutionLayer,
) -> Result<Self, BlockError<T::EthSpec>> { ) -> Result<Self, BlockError<T::EthSpec>> {
chain
.observed_block_producers
.write()
.observe_proposal(block_root, block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
if let Some(parent) = chain if let Some(parent) = chain
.canonical_head .canonical_head
.fork_choice_read_lock() .fork_choice_read_lock()

View File

@ -343,7 +343,7 @@ where
let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; let beacon_block = genesis_block(&mut beacon_state, &self.spec)?;
beacon_state beacon_state
.build_all_caches(&self.spec) .build_caches(&self.spec)
.map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?; .map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?;
let beacon_state_root = beacon_block.message().state_root(); let beacon_state_root = beacon_block.message().state_root();
@ -433,7 +433,7 @@ where
// Prime all caches before storing the state in the database and computing the tree hash // Prime all caches before storing the state in the database and computing the tree hash
// root. // root.
weak_subj_state weak_subj_state
.build_all_caches(&self.spec) .build_caches(&self.spec)
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
weak_subj_state weak_subj_state
.update_tree_hash_cache() .update_tree_hash_cache()
@ -701,6 +701,8 @@ where
store.clone(), store.clone(),
Some(current_slot), Some(current_slot),
&self.spec, &self.spec,
self.chain_config.progressive_balances_mode,
&log,
)?; )?;
} }
@ -714,7 +716,7 @@ where
head_snapshot head_snapshot
.beacon_state .beacon_state
.build_all_caches(&self.spec) .build_caches(&self.spec)
.map_err(|e| format!("Failed to build state caches: {:?}", e))?; .map_err(|e| format!("Failed to build state caches: {:?}", e))?;
// Perform a check to ensure that the finalization points of the head and fork choice are // Perform a check to ensure that the finalization points of the head and fork choice are
@ -840,9 +842,7 @@ where
observed_sync_aggregators: <_>::default(), observed_sync_aggregators: <_>::default(),
// TODO: allow for persisting and loading the pool from disk. // TODO: allow for persisting and loading the pool from disk.
observed_block_producers: <_>::default(), observed_block_producers: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_blob_sidecars: <_>::default(), observed_blob_sidecars: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_voluntary_exits: <_>::default(), observed_voluntary_exits: <_>::default(),
observed_proposer_slashings: <_>::default(), observed_proposer_slashings: <_>::default(),
observed_attester_slashings: <_>::default(), observed_attester_slashings: <_>::default(),

View File

@ -1,7 +1,7 @@
pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::time::Duration; use std::time::Duration;
use types::{Checkpoint, Epoch}; use types::{Checkpoint, Epoch, ProgressiveBalancesMode};
pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20); pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20);
pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2); pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2);
@ -81,6 +81,8 @@ pub struct ChainConfig {
pub always_prepare_payload: bool, pub always_prepare_payload: bool,
/// Whether backfill sync processing should be rate-limited. /// Whether backfill sync processing should be rate-limited.
pub enable_backfill_rate_limiting: bool, pub enable_backfill_rate_limiting: bool,
/// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation.
pub progressive_balances_mode: ProgressiveBalancesMode,
} }
impl Default for ChainConfig { impl Default for ChainConfig {
@ -111,6 +113,7 @@ impl Default for ChainConfig {
genesis_backfill: false, genesis_backfill: false,
always_prepare_payload: false, always_prepare_payload: false,
enable_backfill_rate_limiting: true, enable_backfill_rate_limiting: true,
progressive_balances_mode: ProgressiveBalancesMode::Checked,
} }
} }
} }

View File

@ -216,6 +216,7 @@ pub enum BeaconChainError {
BlsToExecutionConflictsWithPool, BlsToExecutionConflictsWithPool,
InconsistentFork(InconsistentFork), InconsistentFork(InconsistentFork),
ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>), ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>),
UnableToPublish,
AvailabilityCheckError(AvailabilityCheckError), AvailabilityCheckError(AvailabilityCheckError),
} }

View File

@ -10,7 +10,10 @@ use state_processing::{
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore}; use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore};
use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot}; use types::{
BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock,
Slot,
};
const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \ const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \
consider deleting it by running with the --purge-db flag."; consider deleting it by running with the --purge-db flag.";
@ -100,6 +103,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
store: Arc<HotColdDB<E, Hot, Cold>>, store: Arc<HotColdDB<E, Hot, Cold>>,
current_slot: Option<Slot>, current_slot: Option<Slot>,
spec: &ChainSpec, spec: &ChainSpec,
progressive_balances_mode: ProgressiveBalancesMode,
log: &Logger,
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
// Fetch finalized block. // Fetch finalized block.
let finalized_checkpoint = head_state.finalized_checkpoint(); let finalized_checkpoint = head_state.finalized_checkpoint();
@ -197,7 +202,9 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
payload_verification_status, payload_verification_status,
progressive_balances_mode,
spec, spec,
log,
) )
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
} }

View File

@ -1,9 +1,10 @@
//! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from //! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
//! validators that have already produced a block. //! validators that have already produced a block.
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::marker::PhantomData; use std::marker::PhantomData;
use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned}; use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Error { pub enum Error {
@ -14,6 +15,12 @@ pub enum Error {
ValidatorIndexTooHigh(u64), ValidatorIndexTooHigh(u64),
} }
#[derive(Eq, Hash, PartialEq, Debug, Default)]
struct ProposalKey {
slot: Slot,
proposer: u64,
}
/// Maintains a cache of observed `(block.slot, block.proposer)`. /// Maintains a cache of observed `(block.slot, block.proposer)`.
/// ///
/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you /// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
@ -27,7 +34,7 @@ pub enum Error {
/// known_distinct_shufflings` which is much smaller. /// known_distinct_shufflings` which is much smaller.
pub struct ObservedBlockProducers<E: EthSpec> { pub struct ObservedBlockProducers<E: EthSpec> {
finalized_slot: Slot, finalized_slot: Slot,
items: HashMap<Slot, HashSet<u64>>, items: HashMap<ProposalKey, HashSet<Hash256>>,
_phantom: PhantomData<E>, _phantom: PhantomData<E>,
} }
@ -42,6 +49,24 @@ impl<E: EthSpec> Default for ObservedBlockProducers<E> {
} }
} }
pub enum SeenBlock {
Duplicate,
Slashable,
UniqueNonSlashable,
}
impl SeenBlock {
pub fn proposer_previously_observed(self) -> bool {
match self {
Self::Duplicate | Self::Slashable => true,
Self::UniqueNonSlashable => false,
}
}
pub fn is_slashable(&self) -> bool {
matches!(self, Self::Slashable)
}
}
impl<E: EthSpec> ObservedBlockProducers<E> { impl<E: EthSpec> ObservedBlockProducers<E> {
/// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will /// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will
/// update `self` so future calls to it indicate that this block is known. /// update `self` so future calls to it indicate that this block is known.
@ -52,16 +77,44 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
/// ///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { pub fn observe_proposal(
&mut self,
block_root: Hash256,
block: BeaconBlockRef<'_, E>,
) -> Result<SeenBlock, Error> {
self.sanitize_block(block)?; self.sanitize_block(block)?;
let did_not_exist = self let key = ProposalKey {
.items slot: block.slot(),
.entry(block.slot()) proposer: block.proposer_index(),
.or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize())) };
.insert(block.proposer_index());
Ok(!did_not_exist) let entry = self.items.entry(key);
let slashable_proposal = match entry {
Entry::Occupied(mut occupied_entry) => {
let block_roots = occupied_entry.get_mut();
let newly_inserted = block_roots.insert(block_root);
let is_equivocation = block_roots.len() > 1;
if is_equivocation {
SeenBlock::Slashable
} else if !newly_inserted {
SeenBlock::Duplicate
} else {
SeenBlock::UniqueNonSlashable
}
}
Entry::Vacant(vacant_entry) => {
let block_roots = HashSet::from([block_root]);
vacant_entry.insert(block_roots);
SeenBlock::UniqueNonSlashable
}
};
Ok(slashable_proposal)
} }
/// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not /// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not
@ -72,15 +125,33 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
/// ///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`. /// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`. /// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> { pub fn proposer_has_been_observed(
&self,
block: BeaconBlockRef<'_, E>,
block_root: Hash256,
) -> Result<SeenBlock, Error> {
self.sanitize_block(block)?; self.sanitize_block(block)?;
let exists = self let key = ProposalKey {
.items slot: block.slot(),
.get(&block.slot()) proposer: block.proposer_index(),
.map_or(false, |set| set.contains(&block.proposer_index())); };
Ok(exists) if let Some(block_roots) = self.items.get(&key) {
let block_already_known = block_roots.contains(&block_root);
let no_prev_known_blocks =
block_roots.difference(&HashSet::from([block_root])).count() == 0;
if !no_prev_known_blocks {
Ok(SeenBlock::Slashable)
} else if block_already_known {
Ok(SeenBlock::Duplicate)
} else {
Ok(SeenBlock::UniqueNonSlashable)
}
} else {
Ok(SeenBlock::UniqueNonSlashable)
}
} }
/// Returns `Ok(())` if the given `block` is sane. /// Returns `Ok(())` if the given `block` is sane.
@ -112,15 +183,15 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
} }
self.finalized_slot = finalized_slot; self.finalized_slot = finalized_slot;
self.items.retain(|slot, _set| *slot > finalized_slot); self.items.retain(|key, _| key.slot > finalized_slot);
} }
/// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`. /// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`.
/// ///
/// This is useful for doppelganger detection. /// This is useful for doppelganger detection.
pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool { pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool {
self.items.iter().any(|(slot, producers)| { self.items.iter().any(|(key, _)| {
slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index) key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index
}) })
} }
} }
@ -148,9 +219,12 @@ mod tests {
// Slot 0, proposer 0 // Slot 0, proposer 0
let block_a = get_block(0, 0); let block_a = get_block(0, 0);
let block_root = block_a.canonical_root();
assert_eq!( assert_eq!(
cache.observe_proposer(block_a.to_ref()), cache
.observe_proposal(block_root, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe proposer, indicates proposer unobserved" "can observe proposer, indicates proposer unobserved"
); );
@ -164,7 +238,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -182,7 +259,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -207,9 +287,12 @@ mod tests {
// First slot of finalized epoch, proposer 0 // First slot of finalized epoch, proposer 0
let block_b = get_block(E::slots_per_epoch(), 0); let block_b = get_block(E::slots_per_epoch(), 0);
let block_root_b = block_b.canonical_root();
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Err(Error::FinalizedBlock { Err(Error::FinalizedBlock {
slot: E::slots_per_epoch().into(), slot: E::slots_per_epoch().into(),
finalized_slot: E::slots_per_epoch().into(), finalized_slot: E::slots_per_epoch().into(),
@ -229,7 +312,9 @@ mod tests {
let block_b = get_block(three_epochs, 0); let block_b = get_block(three_epochs, 0);
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can insert non-finalized block" "can insert non-finalized block"
); );
@ -238,7 +323,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(three_epochs)) .get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present") .expect("the three epochs slot should be present")
.len(), .len(),
1, 1,
@ -262,7 +350,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(three_epochs)) .get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present") .expect("the three epochs slot should be present")
.len(), .len(),
1, 1,
@ -276,24 +367,33 @@ mod tests {
// Slot 0, proposer 0 // Slot 0, proposer 0
let block_a = get_block(0, 0); let block_a = get_block(0, 0);
let block_root_a = block_a.canonical_root();
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_a.to_ref()), cache
.proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false), Ok(false),
"no observation in empty cache" "no observation in empty cache"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_a.to_ref()), cache
.observe_proposal(block_root_a, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe proposer, indicates proposer unobserved" "can observe proposer, indicates proposer unobserved"
); );
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_a.to_ref()), cache
.proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true), Ok(true),
"observed block is indicated as true" "observed block is indicated as true"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_a.to_ref()), cache
.observe_proposal(block_root_a, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true), Ok(true),
"observing again indicates true" "observing again indicates true"
); );
@ -303,7 +403,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -312,24 +415,33 @@ mod tests {
// Slot 1, proposer 0 // Slot 1, proposer 0
let block_b = get_block(1, 0); let block_b = get_block(1, 0);
let block_root_b = block_b.canonical_root();
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_b.to_ref()), cache
.proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false), Ok(false),
"no observation for new slot" "no observation for new slot"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe proposer for new slot, indicates proposer unobserved" "can observe proposer for new slot, indicates proposer unobserved"
); );
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_b.to_ref()), cache
.proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true), Ok(true),
"observed block in slot 1 is indicated as true" "observed block in slot 1 is indicated as true"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_b.to_ref()), cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true), Ok(true),
"observing slot 1 again indicates true" "observing slot 1 again indicates true"
); );
@ -339,7 +451,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -348,7 +463,10 @@ mod tests {
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(1)) .get(&ProposalKey {
slot: Slot::new(1),
proposer: 0
})
.expect("slot zero should be present") .expect("slot zero should be present")
.len(), .len(),
1, 1,
@ -357,45 +475,54 @@ mod tests {
// Slot 0, proposer 1 // Slot 0, proposer 1
let block_c = get_block(0, 1); let block_c = get_block(0, 1);
let block_root_c = block_c.canonical_root();
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_c.to_ref()), cache
.proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false), Ok(false),
"no observation for new proposer" "no observation for new proposer"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_c.to_ref()), cache
.observe_proposal(block_root_c, block_c.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false), Ok(false),
"can observe new proposer, indicates proposer unobserved" "can observe new proposer, indicates proposer unobserved"
); );
assert_eq!( assert_eq!(
cache.proposer_has_been_observed(block_c.to_ref()), cache
.proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true), Ok(true),
"observed new proposer block is indicated as true" "observed new proposer block is indicated as true"
); );
assert_eq!( assert_eq!(
cache.observe_proposer(block_c.to_ref()), cache
.observe_proposal(block_root_c, block_c.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true), Ok(true),
"observing new proposer again indicates true" "observing new proposer again indicates true"
); );
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero"); assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 2, "two slots should be present"); assert_eq!(cache.items.len(), 3, "three slots should be present");
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(0)) .iter()
.expect("slot zero should be present") .filter(|(k, _)| k.slot == cache.finalized_slot)
.len(), .count(),
2, 2,
"two proposers should be present in slot 0" "two proposers should be present in slot 0"
); );
assert_eq!( assert_eq!(
cache cache
.items .items
.get(&Slot::new(1)) .iter()
.expect("slot zero should be present") .filter(|(k, _)| k.slot == Slot::new(1))
.len(), .count(),
1, 1,
"only one proposer should be present in slot 1" "only one proposer should be present in slot 1"
); );

View File

@ -808,6 +808,15 @@ where
state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap() state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap()
} }
pub async fn make_blinded_block(
&self,
state: BeaconState<E>,
slot: Slot,
) -> (SignedBlindedBeaconBlock<E>, BeaconState<E>) {
let (unblinded, new_state) = self.make_block(state, slot).await;
(unblinded.into(), new_state)
}
/// Returns a newly created block, signed by the proposer for the given slot. /// Returns a newly created block, signed by the proposer for the given slot.
pub async fn make_block( pub async fn make_block(
&self, &self,
@ -820,9 +829,7 @@ where
complete_state_advance(&mut state, None, slot, &self.spec) complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot"); .expect("should be able to advance state to slot");
state state.build_caches(&self.spec).expect("should build caches");
.build_all_caches(&self.spec)
.expect("should build caches");
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
@ -906,9 +913,7 @@ where
complete_state_advance(&mut state, None, slot, &self.spec) complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot"); .expect("should be able to advance state to slot");
state state.build_caches(&self.spec).expect("should build caches");
.build_all_caches(&self.spec)
.expect("should build caches");
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap(); let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
@ -1626,6 +1631,36 @@ where
.sign(sk, &fork, genesis_validators_root, &self.chain.spec) .sign(sk, &fork, genesis_validators_root, &self.chain.spec)
} }
pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> {
let propposer_slashing = self.make_proposer_slashing(validator_index);
if let ObservationOutcome::New(verified_proposer_slashing) = self
.chain
.verify_proposer_slashing_for_gossip(propposer_slashing)
.expect("should verify proposer slashing for gossip")
{
self.chain
.import_proposer_slashing(verified_proposer_slashing);
Ok(())
} else {
Err("should observe new proposer slashing".to_string())
}
}
pub fn add_attester_slashing(&self, validator_indices: Vec<u64>) -> Result<(), String> {
let attester_slashing = self.make_attester_slashing(validator_indices);
if let ObservationOutcome::New(verified_attester_slashing) = self
.chain
.verify_attester_slashing_for_gossip(attester_slashing)
.expect("should verify attester slashing for gossip")
{
self.chain
.import_attester_slashing(verified_attester_slashing);
Ok(())
} else {
Err("should observe new attester slashing".to_string())
}
}
pub fn add_bls_to_execution_change( pub fn add_bls_to_execution_change(
&self, &self,
validator_index: u64, validator_index: u64,
@ -1804,7 +1839,7 @@ where
self.set_current_slot(slot); self.set_current_slot(slot);
let block_hash: SignedBeaconBlockHash = self let block_hash: SignedBeaconBlockHash = self
.chain .chain
.process_block(block_root, block.into(), NotifyExecutionLayer::Yes) .process_block(block_root, block.into(), NotifyExecutionLayer::Yes,|| Ok(()),)
.await? .await?
.try_into() .try_into()
.unwrap(); .unwrap();
@ -1823,6 +1858,7 @@ where
wrapped_block.canonical_root(), wrapped_block.canonical_root(),
wrapped_block, wrapped_block,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await? .await?
.try_into() .try_into()

View File

@ -459,6 +459,7 @@ async fn assert_invalid_signature(
chain_segment_blobs[block_index].clone(), chain_segment_blobs[block_index].clone(),
), ),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await; .await;
assert!( assert!(
@ -526,6 +527,7 @@ async fn invalid_signature_gossip_block() {
signed_block.canonical_root(), signed_block.canonical_root(),
Arc::new(signed_block), Arc::new(signed_block),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await, .await,
Err(BlockError::InvalidSignature) Err(BlockError::InvalidSignature)
@ -859,6 +861,7 @@ async fn block_gossip_verification() {
gossip_verified.block_root, gossip_verified.block_root,
gossip_verified, gossip_verified,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.expect("should import valid gossip verified block"); .expect("should import valid gossip verified block");
@ -1069,11 +1072,7 @@ async fn block_gossip_verification() {
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
BlockError::RepeatProposal { BlockError::BlockIsAlreadyKnown,
proposer,
slot,
}
if proposer == other_proposer && slot == block.message().slot()
), ),
"should register any valid signature against the proposer, even if the block failed later verification" "should register any valid signature against the proposer, even if the block failed later verification"
); );
@ -1106,11 +1105,7 @@ async fn block_gossip_verification() {
.await .await
.err() .err()
.expect("should error when processing known block"), .expect("should error when processing known block"),
BlockError::RepeatProposal { BlockError::BlockIsAlreadyKnown
proposer,
slot,
}
if proposer == block.message().proposer_index() && slot == block.message().slot()
), ),
"the second proposal by this validator should be rejected" "the second proposal by this validator should be rejected"
); );
@ -1159,6 +1154,7 @@ async fn verify_block_for_gossip_slashing_detection() {
verified_block.block_root, verified_block.block_root,
verified_block, verified_block,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();
@ -1198,6 +1194,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
verified_block.block_root, verified_block.block_root,
verified_block, verified_block,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();
@ -1345,6 +1342,7 @@ async fn add_base_block_to_altair_chain() {
base_block.canonical_root(), base_block.canonical_root(),
Arc::new(base_block.clone()), Arc::new(base_block.clone()),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.err() .err()
@ -1479,6 +1477,7 @@ async fn add_altair_block_to_base_chain() {
altair_block.canonical_root(), altair_block.canonical_root(),
Arc::new(altair_block.clone()), Arc::new(altair_block.clone()),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.err() .err()

View File

@ -133,13 +133,8 @@ async fn base_altair_merge_capella() {
for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() { for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() {
harness.extend_slots(1).await; harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block; let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block let full_payload: FullPayload<E> =
.message() block.message().body().execution_payload().unwrap().into();
.body()
.execution_payload()
.unwrap()
.clone()
.into();
// pre-capella shouldn't have withdrawals // pre-capella shouldn't have withdrawals
assert!(full_payload.withdrawals_root().is_err()); assert!(full_payload.withdrawals_root().is_err());
execution_payloads.push(full_payload); execution_payloads.push(full_payload);
@ -151,13 +146,8 @@ async fn base_altair_merge_capella() {
for _ in 0..16 { for _ in 0..16 {
harness.extend_slots(1).await; harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block; let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block let full_payload: FullPayload<E> =
.message() block.message().body().execution_payload().unwrap().into();
.body()
.execution_payload()
.unwrap()
.clone()
.into();
// post-capella should have withdrawals // post-capella should have withdrawals
assert!(full_payload.withdrawals_root().is_ok()); assert!(full_payload.withdrawals_root().is_ok());
execution_payloads.push(full_payload); execution_payloads.push(full_payload);

View File

@ -698,6 +698,7 @@ async fn invalidates_all_descendants() {
fork_block.canonical_root(), fork_block.canonical_root(),
Arc::new(fork_block), Arc::new(fork_block),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap() .unwrap()
@ -797,6 +798,7 @@ async fn switches_heads() {
fork_block.canonical_root(), fork_block.canonical_root(),
Arc::new(fork_block), Arc::new(fork_block),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap() .unwrap()
@ -1055,7 +1057,9 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for import. // Ensure the block built atop an invalid payload is invalid for import.
assert!(matches!( assert!(matches!(
rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await, rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes,
|| Ok(()),
).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root if invalid_root == parent_root
)); ));
@ -1069,8 +1073,9 @@ async fn invalid_parent() {
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
PayloadVerificationStatus::Optimistic, PayloadVerificationStatus::Optimistic,
rig.harness.chain.config.progressive_balances_mode,
&rig.harness.chain.spec, &rig.harness.chain.spec,
rig.harness.logger()
), ),
Err(ForkChoiceError::ProtoArrayStringError(message)) Err(ForkChoiceError::ProtoArrayStringError(message))
if message.contains(&format!( if message.contains(&format!(
@ -1341,7 +1346,12 @@ async fn build_optimistic_chain(
for block in blocks { for block in blocks {
rig.harness rig.harness
.chain .chain
.process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes) .process_block(
block.canonical_root(),
block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await .await
.unwrap(); .unwrap();
} }
@ -1901,6 +1911,7 @@ async fn recover_from_invalid_head_by_importing_blocks() {
fork_block.canonical_root(), fork_block.canonical_root(),
fork_block.clone(), fork_block.clone(),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();

View File

@ -2178,6 +2178,7 @@ async fn weak_subjectivity_sync() {
full_block.canonical_root(), full_block.canonical_root(),
BlockWrapper::new(Arc::new(full_block), blobs), BlockWrapper::new(Arc::new(full_block), blobs),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
) )
.await .await
.unwrap(); .unwrap();

View File

@ -686,6 +686,7 @@ async fn run_skip_slot_test(skip_slots: u64) {
harness_a.chain.head_snapshot().beacon_block_root, harness_a.chain.head_snapshot().beacon_block_root,
harness_a.get_head_block(), harness_a.get_head_block(),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(())
) )
.await .await
.unwrap(); .unwrap();

View File

@ -49,7 +49,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>(
.map_err(beacon_chain_error)?; .map_err(beacon_chain_error)?;
state state
.build_all_caches(&chain.spec) .build_caches(&chain.spec)
.map_err(beacon_state_error)?; .map_err(beacon_state_error)?;
let mut reward_cache = Default::default(); let mut reward_cache = Default::default();

View File

@ -32,7 +32,7 @@ use beacon_chain::{
pub use block_id::BlockId; pub use block_id::BlockId;
use directory::DEFAULT_ROOT_DIR; use directory::DEFAULT_ROOT_DIR;
use eth2::types::{ use eth2::types::{
self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SignedBlockContents, self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, SignedBlockContents,
SkipRandaoVerification, ValidatorId, ValidatorStatus, SkipRandaoVerification, ValidatorId, ValidatorStatus,
}; };
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
@ -41,7 +41,9 @@ use logging::SSELoggingComponents;
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
use operation_pool::ReceivedPreCapella; use operation_pool::ReceivedPreCapella;
use parking_lot::RwLock; use parking_lot::RwLock;
use publish_blocks::ProvenancedBlock; pub use publish_blocks::{
publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock,
};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use slog::{crit, debug, error, info, warn, Logger}; use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -325,6 +327,7 @@ pub fn serve<T: BeaconChainTypes>(
}; };
let eth_v1 = single_version(V1); let eth_v1 = single_version(V1);
let eth_v2 = single_version(V2);
// Create a `warp` filter that provides access to the network globals. // Create a `warp` filter that provides access to the network globals.
let inner_network_globals = ctx.network_globals.clone(); let inner_network_globals = ctx.network_globals.clone();
@ -1223,16 +1226,59 @@ pub fn serve<T: BeaconChainTypes>(
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_block( publish_blocks::publish_block(
None, None,
ProvenancedBlock::Local(block_contents), ProvenancedBlock::local(block_contents),
chain, chain,
&network_tx, &network_tx,
log, log,
BroadcastValidation::default(),
) )
.await .await
.map(|()| warp::reply().into_response()) .map(|()| warp::reply().into_response())
}, },
); );
let post_beacon_blocks_v2 = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
match publish_blocks::publish_block(
None,
ProvenancedBlock::local(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
},
);
/*
* beacon/blocks
*/
// POST beacon/blinded_blocks // POST beacon/blinded_blocks
let post_beacon_blinded_blocks = eth_v1 let post_beacon_blinded_blocks = eth_v1
.and(warp::path("beacon")) .and(warp::path("beacon"))
@ -1247,9 +1293,52 @@ pub fn serve<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move { log: Logger| async move {
publish_blocks::publish_blinded_block(block, chain, &network_tx, log) publish_blocks::publish_blinded_block(
.await block,
.map(|()| warp::reply().into_response()) chain,
&network_tx,
log,
BroadcastValidation::default(),
)
.await
.map(|()| warp::reply().into_response())
},
);
let post_beacon_blinded_blocks_v2 = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blinded_blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<_>>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
match publish_blocks::publish_blinded_block(
block_contents,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
}, },
); );
@ -2369,24 +2458,41 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("health")) .and(warp::path("health"))
.and(warp::path::end()) .and(warp::path::end())
.and(network_globals.clone()) .and(network_globals.clone())
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| { .and(chain_filter.clone())
blocking_response_task(move || match *network_globals.sync_state.read() { .and_then(
SyncState::SyncingFinalized { .. } |network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| {
| SyncState::SyncingHead { .. } async move {
| SyncState::SyncTransition let el_offline = if let Some(el) = &chain.execution_layer {
| SyncState::BackFillSyncing { .. } => Ok(warp::reply::with_status( el.is_offline_or_erroring().await
warp::reply(), } else {
warp::http::StatusCode::PARTIAL_CONTENT, true
)), };
SyncState::Synced => Ok(warp::reply::with_status(
warp::reply(), blocking_response_task(move || {
warp::http::StatusCode::OK, let is_optimistic = chain
)), .is_optimistic_or_invalid_head()
SyncState::Stalled => Err(warp_utils::reject::not_synced( .map_err(warp_utils::reject::beacon_chain_error)?;
"sync stalled, beacon chain may not yet be initialized.".to_string(),
)), let is_syncing = !network_globals.sync_state.read().is_synced();
})
}); if el_offline {
Err(warp_utils::reject::not_synced("execution layer is offline".to_string()))
} else if is_syncing || is_optimistic {
Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::PARTIAL_CONTENT,
))
} else {
Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::OK,
))
}
})
.await
}
},
);
// GET node/peers/{peer_id} // GET node/peers/{peer_id}
let get_node_peers_by_id = eth_v1 let get_node_peers_by_id = eth_v1
@ -3866,6 +3972,8 @@ pub fn serve<T: BeaconChainTypes>(
warp::post().and( warp::post().and(
post_beacon_blocks post_beacon_blocks
.uor(post_beacon_blinded_blocks) .uor(post_beacon_blinded_blocks)
.uor(post_beacon_blocks_v2)
.uor(post_beacon_blinded_blocks_v2)
.uor(post_beacon_pool_attestations) .uor(post_beacon_pool_attestations)
.uor(post_beacon_pool_attester_slashings) .uor(post_beacon_pool_attester_slashings)
.uor(post_beacon_pool_proposer_slashings) .uor(post_beacon_pool_proposer_slashings)

View File

@ -2,9 +2,12 @@ use crate::metrics;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
use beacon_chain::{AvailabilityProcessingStatus, NotifyExecutionLayer}; use beacon_chain::{
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError}; BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, GossipVerifiedBlock,
NotifyExecutionLayer, AvailabilityProcessingStatus
};
use eth2::types::SignedBlockContents; use eth2::types::SignedBlockContents;
use eth2::types::BroadcastValidation;
use execution_layer::ProvenancedPayload; use execution_layer::ProvenancedPayload;
use lighthouse_network::PubsubMessage; use lighthouse_network::PubsubMessage;
use network::NetworkMessage; use network::NetworkMessage;
@ -29,6 +32,16 @@ pub enum ProvenancedBlock<T: EthSpec> {
Builder(SignedBlockContents<T, FullPayload<T>>), Builder(SignedBlockContents<T, FullPayload<T>>),
} }
impl<T: EthSpec> ProvenancedBlock<T> {
pub fn local(block: Arc<SignedBeaconBlock<T>>) -> Self {
Self::Local(block)
}
pub fn builder(block: Arc<SignedBeaconBlock<T>>) -> Self {
Self::Builder(block)
}
}
/// Handles a request from the HTTP API for full blocks. /// Handles a request from the HTTP API for full blocks.
pub async fn publish_block<T: BeaconChainTypes>( pub async fn publish_block<T: BeaconChainTypes>(
block_root: Option<Hash256>, block_root: Option<Hash256>,
@ -36,6 +49,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger, log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> { ) -> Result<(), Rejection> {
let seen_timestamp = timestamp_now(); let seen_timestamp = timestamp_now();
let (block, maybe_blobs, is_locally_built_block) = match provenanced_block { let (block, maybe_blobs, is_locally_built_block) = match provenanced_block {
@ -48,17 +62,24 @@ pub async fn publish_block<T: BeaconChainTypes>(
(Arc::new(block), maybe_blobs, false) (Arc::new(block), maybe_blobs, false)
} }
}; };
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); let beacon_block = block.clone();
let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock);
//FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message. //FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message.
//this may skew metrics //this may skew metrics
let block_root = block_root.unwrap_or_else(|| block.canonical_root()); let block_root = block_root.unwrap_or_else(|| block.canonical_root());
debug!( debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot());
log,
"Signed block published to HTTP API";
"slot" => block.slot()
);
/* actually publish a block */
let publish_block = move |block: Arc<SignedBeaconBlock<T::EthSpec>>,
sender,
log,
seen_timestamp| {
let publish_timestamp = timestamp_now();
let publish_delay = publish_timestamp
.checked_sub(seen_timestamp)
.unwrap_or_else(|| Duration::from_secs(0));
info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay);
// Send the block, regardless of whether or not it is valid. The API // Send the block, regardless of whether or not it is valid. The API
// specification is very clear that this is the desired behaviour. // specification is very clear that this is the desired behaviour.
let wrapped_block: BlockWrapper<T::EthSpec> = match block.as_ref() { let wrapped_block: BlockWrapper<T::EthSpec> = match block.as_ref() {
@ -88,13 +109,70 @@ pub async fn publish_block<T: BeaconChainTypes>(
} }
} }
}; };
// Determine the delay after the start of the slot, register it with metrics. let message = PubsubMessage::BeaconBlock(block);
crate::publish_pubsub_message(&sender, message)
.map_err(|_| BeaconChainError::UnableToPublish.into())
};
/* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */
let gossip_verified_block = GossipVerifiedBlock::new(block, &chain).map_err(|e| {
warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e);
warp_utils::reject::custom_bad_request(e.to_string())
})?;
let block_root = block_root.unwrap_or(gossip_verified_block.block_root);
if let BroadcastValidation::Gossip = validation_level {
publish_block(
beacon_block.clone(),
network_tx.clone(),
log.clone(),
seen_timestamp,
)
.map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?;
}
/* only publish if gossip- and consensus-valid and equivocation-free */
let chain_clone = chain.clone();
let block_clone = beacon_block.clone();
let log_clone = log.clone();
let sender_clone = network_tx.clone();
let publish_fn = move || match validation_level {
BroadcastValidation::Gossip => Ok(()),
BroadcastValidation::Consensus => {
publish_block(block_clone, sender_clone, log_clone, seen_timestamp)
}
BroadcastValidation::ConsensusAndEquivocation => {
if chain_clone
.observed_block_producers
.read()
.proposer_has_been_observed(block_clone.message(), block_root)
.map_err(|e| BlockError::BeaconChainError(e.into()))?
.is_slashable()
{
warn!(
log_clone,
"Not publishing equivocating block";
"slot" => block_clone.slot()
);
Err(BlockError::Slashable)
} else {
publish_block(block_clone, sender_clone, log_clone, seen_timestamp)
}
}
};
let block_clone = wrapped_block.block_cloned(); let block_clone = wrapped_block.block_cloned();
let slot = block_clone.message().slot(); let slot = block_clone.message().slot();
let proposer_index = block_clone.message().proposer_index(); let proposer_index = block_clone.message().proposer_index();
match chain match chain
.process_block(block_root, wrapped_block, NotifyExecutionLayer::Yes) .process_block(
block_root,
gossip_verified_block,
NotifyExecutionLayer::Yes,
publish_fn,
)
.await .await
{ {
Ok(AvailabilityProcessingStatus::Imported(root)) => { Ok(AvailabilityProcessingStatus::Imported(root)) => {
@ -144,35 +222,32 @@ pub async fn publish_block<T: BeaconChainTypes>(
); );
Err(warp_utils::reject::broadcast_without_import(msg)) Err(warp_utils::reject::broadcast_without_import(msg))
} }
Err(BlockError::BlockIsAlreadyKnown) => { Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => {
info!( Err(warp_utils::reject::custom_server_error(
log, "unable to publish to network channel".to_string(),
"Block from HTTP API already known"; ))
"block" => ?block_root,
"slot" => slot,
);
Ok(())
} }
Err(BlockError::RepeatProposal { proposer, slot }) => { Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request(
warn!( "proposal for this slot and proposer has already been seen".to_string(),
log, )),
"Block ignored due to repeat proposal"; Err(BlockError::BlockIsAlreadyKnown) => {
"msg" => "this can happen when a VC uses fallback BNs. \ info!(log, "Block from HTTP API already known"; "block" => ?block_root);
whilst this is not necessarily an error, it can indicate issues with a BN \
or between the VC and BN.",
"slot" => slot,
"proposer" => proposer,
);
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
let msg = format!("{:?}", e); if let BroadcastValidation::Gossip = validation_level {
error!( Err(warp_utils::reject::broadcast_without_import(format!("{e}")))
log, } else {
"Invalid block provided to HTTP API"; let msg = format!("{:?}", e);
"reason" => &msg error!(
); log,
Err(warp_utils::reject::broadcast_without_import(msg)) "Invalid block provided to HTTP API";
"reason" => &msg
);
Err(warp_utils::reject::custom_bad_request(format!(
"Invalid block: {e}"
)))
}
} }
} }
} }
@ -184,21 +259,31 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>, network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger, log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> { ) -> Result<(), Rejection> {
let block_root = block.canonical_root(); let block_root = block.canonical_root();
let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?; let full_block: ProvenancedBlock<T> =
publish_block::<T>(Some(block_root), full_block, chain, network_tx, log).await reconstruct_block(chain.clone(), block_root, block, log.clone()).await?;
publish_block::<T>(
Some(block_root),
full_block,
chain,
network_tx,
log,
validation_level,
)
.await
} }
/// Deconstruct the given blinded block, and construct a full block. This attempts to use the /// Deconstruct the given blinded block, and construct a full block. This attempts to use the
/// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve /// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve
/// the full payload. /// the full payload.
async fn reconstruct_block<T: BeaconChainTypes>( pub async fn reconstruct_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
block_root: Hash256, block_root: Hash256,
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>, block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
log: Logger, log: Logger,
) -> Result<ProvenancedBlock<T::EthSpec>, Rejection> { ) -> Result<ProvenancedBlock<T>, Rejection> {
let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() {
let el = chain.execution_layer.as_ref().ok_or_else(|| { let el = chain.execution_layer.as_ref().ok_or_else(|| {
warp_utils::reject::custom_server_error("Missing execution layer".to_string()) warp_utils::reject::custom_server_error("Missing execution layer".to_string())
@ -264,15 +349,15 @@ async fn reconstruct_block<T: BeaconChainTypes>(
None => block None => block
.try_into_full_block(None) .try_into_full_block(None)
.map(SignedBlockContents::Block) .map(SignedBlockContents::Block)
.map(ProvenancedBlock::Local), .map(ProvenancedBlock::local),
Some(ProvenancedPayload::Local(full_payload)) => block Some(ProvenancedPayload::Local(full_payload)) => block
.try_into_full_block(Some(full_payload)) .try_into_full_block(Some(full_payload))
.map(SignedBlockContents::Block) .map(SignedBlockContents::Block)
.map(ProvenancedBlock::Local), .map(ProvenancedBlock::local),
Some(ProvenancedPayload::Builder(full_payload)) => block Some(ProvenancedPayload::Builder(full_payload)) => block
.try_into_full_block(Some(full_payload)) .try_into_full_block(Some(full_payload))
.map(SignedBlockContents::Block) .map(SignedBlockContents::Block)
.map(ProvenancedBlock::Builder), .map(ProvenancedBlock::builder),
} }
.ok_or_else(|| { .ok_or_else(|| {
warp_utils::reject::custom_server_error("Unable to add payload to block".to_string()) warp_utils::reject::custom_server_error("Unable to add payload to block".to_string())

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug. #![cfg(not(debug_assertions))] // Tests are too slow in debug.
pub mod broadcast_validation_tests;
pub mod fork_tests; pub mod fork_tests;
pub mod interactive_tests; pub mod interactive_tests;
pub mod status_tests; pub mod status_tests;

View File

@ -3,6 +3,7 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy}, test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
BlockError, BlockError,
}; };
use eth2::StatusCode;
use execution_layer::{PayloadStatusV1, PayloadStatusV1Status}; use execution_layer::{PayloadStatusV1, PayloadStatusV1Status};
use http_api::test_utils::InteractiveTester; use http_api::test_utils::InteractiveTester;
use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot}; use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot};
@ -149,3 +150,82 @@ async fn el_error_on_new_payload() {
assert_eq!(api_response.is_optimistic, Some(false)); assert_eq!(api_response.is_optimistic, Some(false));
assert_eq!(api_response.is_syncing, false); assert_eq!(api_response.is_syncing, false);
} }
/// Check `node health` endpoint when the EL is offline.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_offline() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL offline
mock_el.server.set_syncing_response(Err("offline".into()));
mock_el.el.upcheck().await;
let status = tester.client.get_node_health().await;
match status {
Ok(_) => {
panic!("should return 503 error status code");
}
Err(e) => {
assert_eq!(e.status().unwrap(), 503);
}
}
}
/// Check `node health` endpoint when the EL is online and synced.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_online_and_synced() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL synced
mock_el.server.set_syncing_response(Ok(false));
mock_el.el.upcheck().await;
let status = tester.client.get_node_health().await;
match status {
Ok(response) => {
assert_eq!(response, StatusCode::OK);
}
Err(_) => {
panic!("should return 200 status code");
}
}
}
/// Check `node health` endpoint when the EL is online but not synced.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_online_and_not_synced() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL not synced
harness.advance_slot();
mock_el.server.all_payloads_syncing(true);
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let status = tester.client.get_node_health().await;
match status {
Ok(response) => {
assert_eq!(response, StatusCode::PARTIAL_CONTENT);
}
Err(_) => {
panic!("should return 206 status code");
}
}
}

View File

@ -8,7 +8,7 @@ use eth2::{
mixin::{RequestAccept, ResponseForkName, ResponseOptional}, mixin::{RequestAccept, ResponseForkName, ResponseOptional},
reqwest::RequestBuilder, reqwest::RequestBuilder,
types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *}, types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *},
BeaconNodeHttpClient, Error, StatusCode, Timeouts, BeaconNodeHttpClient, Error, Timeouts,
}; };
use execution_layer::test_utils::TestingBuilder; use execution_layer::test_utils::TestingBuilder;
use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI; use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI;
@ -160,7 +160,7 @@ impl ApiTester {
// `make_block` adds random graffiti, so this will produce an alternate block // `make_block` adds random graffiti, so this will produce an alternate block
let (reorg_block, _reorg_state) = harness let (reorg_block, _reorg_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1)
.await; .await;
let reorg_block = SignedBlockContents::from(reorg_block); let reorg_block = SignedBlockContents::from(reorg_block);
@ -1252,18 +1252,23 @@ impl ApiTester {
} }
pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { pub async fn test_post_beacon_blocks_invalid(mut self) -> Self {
let mut next_block = self.next_block.clone().deconstruct().0; let block = self
*next_block.message_mut().proposer_index_mut() += 1; .harness
.make_block_with_modifier(
assert!(self self.harness.get_current_state(),
.client self.harness.get_current_slot(),
.post_beacon_blocks(&SignedBlockContents::from(next_block)) |b| {
*b.state_root_mut() = Hash256::zero();
},
)
.await .await
.is_err()); .0;
assert!(self.client.post_beacon_blocks(&SignedBlockContents::from(block)).await.is_err());
assert!( assert!(
self.network_rx.network_recv.recv().await.is_some(), self.network_rx.network_recv.recv().await.is_some(),
"invalid blocks should be sent to network" "gossip valid blocks should be sent to network"
); );
self self
@ -1761,9 +1766,15 @@ impl ApiTester {
} }
pub async fn test_get_node_health(self) -> Self { pub async fn test_get_node_health(self) -> Self {
let status = self.client.get_node_health().await.unwrap(); let status = self.client.get_node_health().await;
assert_eq!(status, StatusCode::OK); match status {
Ok(_) => {
panic!("should return 503 error status code");
}
Err(e) => {
assert_eq!(e.status().unwrap(), 503);
}
}
self self
} }
@ -4142,7 +4153,7 @@ impl ApiTester {
.unwrap(); .unwrap();
let expected_reorg = EventKind::ChainReorg(SseChainReorg { let expected_reorg = EventKind::ChainReorg(SseChainReorg {
slot: self.next_block.signed_block().slot(), slot: self.reorg_block.slot(),
depth: 1, depth: 1,
old_head_block: self.next_block.signed_block().canonical_root(), old_head_block: self.next_block.signed_block().canonical_root(),
old_head_state: self.next_block.signed_block().state_root(), old_head_state: self.next_block.signed_block().state_root(),
@ -4156,6 +4167,8 @@ impl ApiTester {
execution_optimistic: false, execution_optimistic: false,
}); });
self.harness.advance_slot();
self.client self.client
.post_beacon_blocks(&self.reorg_block) .post_beacon_blocks(&self.reorg_block)
.await .await

View File

@ -836,6 +836,24 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
} }
} }
pub struct BeaconProcessorSend<T: BeaconChainTypes>(pub mpsc::Sender<WorkEvent<T>>);
impl<T: BeaconChainTypes> BeaconProcessorSend<T> {
pub fn try_send(&self, message: WorkEvent<T>) -> Result<(), Box<TrySendError<WorkEvent<T>>>> {
let work_type = message.work_type();
match self.0.try_send(message) {
Ok(res) => Ok(res),
Err(e) => {
metrics::inc_counter_vec(
&metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE,
&[work_type],
);
Err(Box::new(e))
}
}
}
}
/// A consensus message (or multiple) from the network that requires processing. /// A consensus message (or multiple) from the network that requires processing.
#[derive(Derivative)] #[derive(Derivative)]
#[derivative(Debug(bound = "T: BeaconChainTypes"))] #[derivative(Debug(bound = "T: BeaconChainTypes"))]

View File

@ -929,6 +929,20 @@ impl<T: BeaconChainTypes> Worker<T> {
verified_block verified_block
} }
Err(e @ BlockError::Slashable) => {
warn!(
self.log,
"Received equivocating block from peer";
"error" => ?e
);
/* punish peer for submitting an equivocation, but not too harshly as honest peers may conceivably forward equivocating blocks to us from time to time */
self.gossip_penalize_peer(
peer_id,
PeerAction::MidToleranceError,
"gossip_block_mid",
);
return None;
}
Err(BlockError::ParentUnknown(block)) => { Err(BlockError::ParentUnknown(block)) => {
debug!( debug!(
self.log, self.log,
@ -950,7 +964,6 @@ impl<T: BeaconChainTypes> Worker<T> {
Err(e @ BlockError::FutureSlot { .. }) Err(e @ BlockError::FutureSlot { .. })
| Err(e @ BlockError::WouldRevertFinalizedSlot { .. }) | Err(e @ BlockError::WouldRevertFinalizedSlot { .. })
| Err(e @ BlockError::BlockIsAlreadyKnown) | Err(e @ BlockError::BlockIsAlreadyKnown)
| Err(e @ BlockError::RepeatProposal { .. })
| Err(e @ BlockError::NotFinalizedDescendant { .. }) => { | Err(e @ BlockError::NotFinalizedDescendant { .. }) => {
debug!(self.log, "Could not verify block for gossip. Ignoring the block"; debug!(self.log, "Could not verify block for gossip. Ignoring the block";
"error" => %e); "error" => %e);
@ -1103,7 +1116,12 @@ impl<T: BeaconChainTypes> Worker<T> {
let result = self let result = self
.chain .chain
.process_block(block_root, verified_block, NotifyExecutionLayer::Yes) .process_block(
block_root,
verified_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await; .await;
match &result { match &result {

View File

@ -103,33 +103,21 @@ impl<T: BeaconChainTypes> Worker<T> {
}); });
// Checks if a block from this proposer is already known. // Checks if a block from this proposer is already known.
let proposal_already_known = || { let block_equivocates = || {
match self match self
.chain .chain
.observed_block_producers .observed_block_producers
.read() .read()
.proposer_has_been_observed(block.message()) .proposer_has_been_observed(block.message(), block.canonical_root())
{ {
Ok(is_observed) => is_observed, Ok(seen_status) => seen_status.is_slashable(),
// Both of these blocks will be rejected, so reject them now rather //Both of these blocks will be rejected, so reject them now rather
// than re-queuing them. // than re-queuing them.
Err(ObserveError::FinalizedBlock { .. }) Err(ObserveError::FinalizedBlock { .. })
| Err(ObserveError::ValidatorIndexTooHigh { .. }) => false, | Err(ObserveError::ValidatorIndexTooHigh { .. }) => false,
} }
}; };
// Returns `true` if the block is already known to fork choice. Notably,
// this will return `false` for blocks that we've already imported but
// ancestors of the finalized checkpoint. That should not be an issue
// for our use here since finalized blocks will always be late and won't
// be requeued anyway.
let block_is_already_known = || {
self.chain
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
};
// If we've already seen a block from this proposer *and* the block // If we've already seen a block from this proposer *and* the block
// arrived before the attestation deadline, requeue it to ensure it is // arrived before the attestation deadline, requeue it to ensure it is
// imported late enough that it won't receive a proposer boost. // imported late enough that it won't receive a proposer boost.
@ -137,7 +125,7 @@ impl<T: BeaconChainTypes> Worker<T> {
// Don't requeue blocks if they're already known to fork choice, just // Don't requeue blocks if they're already known to fork choice, just
// push them through to block processing so they can be handled through // push them through to block processing so they can be handled through
// the normal channels. // the normal channels.
if !block_is_late && proposal_already_known() && !block_is_already_known() { if !block_is_late && block_equivocates() {
debug!( debug!(
self.log, self.log,
"Delaying processing of duplicate RPC block"; "Delaying processing of duplicate RPC block";
@ -171,7 +159,7 @@ impl<T: BeaconChainTypes> Worker<T> {
let result = self let result = self
.chain .chain
.process_block(block_root, block, NotifyExecutionLayer::Yes) .process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(()))
.await; .await;
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);

View File

@ -301,6 +301,12 @@ lazy_static! {
"Gossipsub light_client_optimistic_update errors per error type", "Gossipsub light_client_optimistic_update errors per error type",
&["type"] &["type"]
); );
pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result<IntCounterVec> =
try_create_int_counter_vec(
"beacon_processor_send_error_per_work_type",
"Total number of beacon processor send error per work type",
&["type"]
);
/* /*

View File

@ -6,7 +6,8 @@
#![allow(clippy::unit_arg)] #![allow(clippy::unit_arg)]
use crate::beacon_processor::{ use crate::beacon_processor::{
BeaconProcessor, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN, BeaconProcessor, BeaconProcessorSend, InvalidBlockStorage, WorkEvent as BeaconWorkEvent,
MAX_WORK_EVENT_QUEUE_LEN,
}; };
use crate::error; use crate::error;
use crate::service::{NetworkMessage, RequestId}; use crate::service::{NetworkMessage, RequestId};
@ -14,11 +15,14 @@ use crate::status::status_message;
use crate::sync::manager::RequestId as SyncId; use crate::sync::manager::RequestId as SyncId;
use crate::sync::SyncMessage; use crate::sync::SyncMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::{future, StreamExt}; use futures::prelude::*;
use lighthouse_network::rpc::*;
use lighthouse_network::{rpc::*, PubsubMessage}; use lighthouse_network::{
use lighthouse_network::{MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response}; MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response,
use slog::{debug, error, o, trace, warn}; };
use logging::TimeLatch;
use slog::{debug, o, trace};
use slog::{error, warn};
use std::cmp; use std::cmp;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::time::{Duration, SystemTime, UNIX_EPOCH};
@ -37,9 +41,11 @@ pub struct Router<T: BeaconChainTypes> {
/// A network context to return and handle RPC requests. /// A network context to return and handle RPC requests.
network: HandlerNetworkContext<T::EthSpec>, network: HandlerNetworkContext<T::EthSpec>,
/// A multi-threaded, non-blocking processor for applying messages to the beacon chain. /// A multi-threaded, non-blocking processor for applying messages to the beacon chain.
beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, beacon_processor_send: BeaconProcessorSend<T>,
/// The `Router` logger. /// The `Router` logger.
log: slog::Logger, log: slog::Logger,
/// Provides de-bounce functionality for logging.
logger_debounce: TimeLatch,
} }
/// Types of messages the router can receive. /// Types of messages the router can receive.
@ -99,7 +105,7 @@ impl<T: BeaconChainTypes> Router<T> {
beacon_chain.clone(), beacon_chain.clone(),
network_globals.clone(), network_globals.clone(),
network_send.clone(), network_send.clone(),
beacon_processor_send.clone(), BeaconProcessorSend(beacon_processor_send.clone()),
sync_logger, sync_logger,
); );
@ -123,8 +129,9 @@ impl<T: BeaconChainTypes> Router<T> {
chain: beacon_chain, chain: beacon_chain,
sync_send, sync_send,
network: HandlerNetworkContext::new(network_send, log.clone()), network: HandlerNetworkContext::new(network_send, log.clone()),
beacon_processor_send, beacon_processor_send: BeaconProcessorSend(beacon_processor_send),
log: message_handler_log, log: message_handler_log,
logger_debounce: TimeLatch::default(),
}; };
// spawn handler task and move the message handler instance into the spawned thread // spawn handler task and move the message handler instance into the spawned thread
@ -570,12 +577,15 @@ impl<T: BeaconChainTypes> Router<T> {
self.beacon_processor_send self.beacon_processor_send
.try_send(work) .try_send(work)
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
let work_type = match &e { let work_type = match &*e {
mpsc::error::TrySendError::Closed(work) mpsc::error::TrySendError::Closed(work)
| mpsc::error::TrySendError::Full(work) => work.work_type(), | mpsc::error::TrySendError::Full(work) => work.work_type(),
}; };
error!(&self.log, "Unable to send message to the beacon processor";
"error" => %e, "type" => work_type) if self.logger_debounce.elapsed() {
error!(&self.log, "Unable to send message to the beacon processor";
"error" => %e, "type" => work_type)
}
}) })
} }
} }

View File

@ -1,6 +1,7 @@
#![cfg(feature = "spec-minimal")] #![cfg(feature = "spec-minimal")]
use std::sync::Arc; use std::sync::Arc;
use crate::beacon_processor::BeaconProcessorSend;
use crate::service::RequestId; use crate::service::RequestId;
use crate::sync::manager::RequestId as SyncId; use crate::sync::manager::RequestId as SyncId;
use crate::NetworkMessage; use crate::NetworkMessage;
@ -80,7 +81,7 @@ impl TestRig {
SyncNetworkContext::new( SyncNetworkContext::new(
network_tx, network_tx,
globals, globals,
beacon_processor_tx, BeaconProcessorSend(beacon_processor_tx),
chain, chain,
log.new(slog::o!("component" => "network_context")), log.new(slog::o!("component" => "network_context")),
) )

View File

@ -38,7 +38,7 @@ use super::block_lookups::{BlockLookups, PeerShouldHave};
use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::network_context::{BlockOrBlob, SyncNetworkContext};
use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::peer_sync_info::{remote_sync_type, PeerSyncType};
use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::beacon_processor::{BeaconProcessorSend, ChainSegmentProcessId};
use crate::service::NetworkMessage; use crate::service::NetworkMessage;
use crate::status::ToStatusMessage; use crate::status::ToStatusMessage;
use crate::sync::block_lookups::delayed_lookup; use crate::sync::block_lookups::delayed_lookup;
@ -238,7 +238,7 @@ pub fn spawn<T: BeaconChainTypes>(
beacon_chain: Arc<BeaconChain<T>>, beacon_chain: Arc<BeaconChain<T>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>,
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>, beacon_processor_send: BeaconProcessorSend<T>,
log: slog::Logger, log: slog::Logger,
) -> mpsc::UnboundedSender<SyncMessage<T::EthSpec>> { ) -> mpsc::UnboundedSender<SyncMessage<T::EthSpec>> {
assert!( assert!(

View File

@ -4,7 +4,7 @@
use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo; use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo;
use super::manager::{Id, RequestId as SyncRequestId}; use super::manager::{Id, RequestId as SyncRequestId};
use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; use super::range_sync::{BatchId, ByRangeRequestType, ChainId};
use crate::beacon_processor::WorkEvent; use crate::beacon_processor::BeaconProcessorSend;
use crate::service::{NetworkMessage, RequestId}; use crate::service::{NetworkMessage, RequestId};
use crate::status::ToStatusMessage; use crate::status::ToStatusMessage;
use crate::sync::block_lookups::{BlobRequestId, BlockRequestId}; use crate::sync::block_lookups::{BlobRequestId, BlockRequestId};
@ -60,7 +60,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
execution_engine_state: EngineState, execution_engine_state: EngineState,
/// Channel to send work to the beacon processor. /// Channel to send work to the beacon processor.
beacon_processor_send: mpsc::Sender<WorkEvent<T>>, beacon_processor_send: BeaconProcessorSend<T>,
pub chain: Arc<BeaconChain<T>>, pub chain: Arc<BeaconChain<T>>,
@ -90,7 +90,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
pub fn new( pub fn new(
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>, network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>,
beacon_processor_send: mpsc::Sender<WorkEvent<T>>, beacon_processor_send: BeaconProcessorSend<T>,
chain: Arc<BeaconChain<T>>, chain: Arc<BeaconChain<T>>,
log: slog::Logger, log: slog::Logger,
) -> Self { ) -> Self {
@ -564,12 +564,12 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
}) })
} }
pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender<WorkEvent<T>>> { pub fn processor_channel_if_enabled(&self) -> Option<&BeaconProcessorSend<T>> {
self.is_execution_engine_online() self.is_execution_engine_online()
.then_some(&self.beacon_processor_send) .then_some(&self.beacon_processor_send)
} }
pub fn processor_channel(&self) -> &mpsc::Sender<WorkEvent<T>> { pub fn processor_channel(&self) -> &BeaconProcessorSend<T> {
&self.beacon_processor_send &self.beacon_processor_send
} }

View File

@ -381,7 +381,7 @@ where
mod tests { mod tests {
use super::*; use super::*;
use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::beacon_processor::{BeaconProcessorSend, WorkEvent as BeaconWorkEvent};
use crate::service::RequestId; use crate::service::RequestId;
use crate::NetworkMessage; use crate::NetworkMessage;
use beacon_chain::{ use beacon_chain::{
@ -610,7 +610,7 @@ mod tests {
let cx = SyncNetworkContext::new( let cx = SyncNetworkContext::new(
network_tx, network_tx,
globals.clone(), globals.clone(),
beacon_processor_tx, BeaconProcessorSend(beacon_processor_tx),
chain, chain,
log.new(o!("component" => "network_context")), log.new(o!("component" => "network_context")),
); );

View File

@ -1,5 +1,6 @@
use clap::{App, Arg}; use clap::{App, Arg};
use strum::VariantNames; use strum::VariantNames;
use types::ProgressiveBalancesMode;
pub fn cli_app<'a, 'b>() -> App<'a, 'b> { pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
App::new("beacon_node") App::new("beacon_node")
@ -1159,4 +1160,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
developers. This directory is not pruned, users should be careful to avoid \ developers. This directory is not pruned, users should be careful to avoid \
filling up their disks.") filling up their disks.")
) )
.arg(
Arg::with_name("progressive-balances")
.long("progressive-balances")
.value_name("MODE")
.help("Options to enable or disable the progressive balances cache for \
unrealized FFG progression calculation. The default `checked` mode compares \
the progressive balances from the cache against results from the existing \
method. If there is a mismatch, it falls back to the existing method. The \
optimized mode (`fast`) is faster but is still experimental, and is \
not recommended for mainnet usage at this time.")
.takes_value(true)
.possible_values(ProgressiveBalancesMode::VARIANTS)
)
} }

View File

@ -837,6 +837,12 @@ pub fn get_config<E: EthSpec>(
client_config.network.invalid_block_storage = Some(path); client_config.network.invalid_block_storage = Some(path);
} }
if let Some(progressive_balances_mode) =
clap_utils::parse_optional(cli_args, "progressive-balances")?
{
client_config.chain.progressive_balances_mode = progressive_balances_mode;
}
Ok(client_config) Ok(client_config)
} }

View File

@ -395,6 +395,7 @@ macro_rules! impl_try_into_beacon_state {
// Caching // Caching
total_active_balance: <_>::default(), total_active_balance: <_>::default(),
progressive_balances_cache: <_>::default(),
committee_caches: <_>::default(), committee_caches: <_>::default(),
pubkey_cache: <_>::default(), pubkey_cache: <_>::default(),
exit_cache: <_>::default(), exit_cache: <_>::default(),

View File

@ -63,7 +63,7 @@ where
.load_cold_state_by_slot(lower_limit_slot)? .load_cold_state_by_slot(lower_limit_slot)?
.ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?; .ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?;
state.build_all_caches(&self.spec)?; state.build_caches(&self.spec)?;
process_results(block_root_iter, |iter| -> Result<(), Error> { process_results(block_root_iter, |iter| -> Result<(), Error> {
let mut io_batch = vec![]; let mut io_batch = vec![];

View File

@ -1,6 +1,6 @@
[package] [package]
name = "boot_node" name = "boot_node"
version = "4.2.0" version = "4.3.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"] authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021" edition = "2021"

View File

@ -322,6 +322,26 @@ impl BeaconNodeHttpClient {
ok_or_error(response).await ok_or_error(response).await
} }
/// Generic POST function supporting arbitrary responses and timeouts.
async fn post_generic_with_consensus_version<T: Serialize, U: IntoUrl>(
&self,
url: U,
body: &T,
timeout: Option<Duration>,
fork: ForkName,
) -> Result<Response, Error> {
let mut builder = self.client.post(url);
if let Some(timeout) = timeout {
builder = builder.timeout(timeout);
}
let response = builder
.header(CONSENSUS_VERSION_HEADER, fork.to_string())
.json(body)
.send()
.await?;
ok_or_error(response).await
}
/// `GET beacon/genesis` /// `GET beacon/genesis`
/// ///
/// ## Errors /// ## Errors
@ -654,6 +674,76 @@ impl BeaconNodeHttpClient {
Ok(()) Ok(())
} }
pub fn post_beacon_blocks_v2_path(
&self,
validation_level: Option<BroadcastValidation>,
) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?;
path.path_segments_mut()
.map_err(|_| Error::InvalidUrl(self.server.clone()))?
.extend(&["beacon", "blocks"]);
path.set_query(
validation_level
.map(|v| format!("broadcast_validation={}", v))
.as_deref(),
);
Ok(path)
}
pub fn post_beacon_blinded_blocks_v2_path(
&self,
validation_level: Option<BroadcastValidation>,
) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?;
path.path_segments_mut()
.map_err(|_| Error::InvalidUrl(self.server.clone()))?
.extend(&["beacon", "blinded_blocks"]);
path.set_query(
validation_level
.map(|v| format!("broadcast_validation={}", v))
.as_deref(),
);
Ok(path)
}
/// `POST v2/beacon/blocks`
pub async fn post_beacon_blocks_v2<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBeaconBlock<T, Payload>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
self.post_beacon_blocks_v2_path(validation_level)?,
block,
Some(self.timeouts.proposal),
block.message().body().fork_name(),
)
.await?;
Ok(())
}
/// `POST v2/beacon/blinded_blocks`
pub async fn post_beacon_blinded_blocks_v2<T: EthSpec>(
&self,
block: &SignedBlindedBeaconBlock<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
self.post_beacon_blinded_blocks_v2_path(validation_level)?,
block,
Some(self.timeouts.proposal),
block.message().body().fork_name(),
)
.await?;
Ok(())
}
/// Path for `v2/beacon/blocks` /// Path for `v2/beacon/blocks`
pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> { pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?; let mut path = self.eth_path(V2)?;

View File

@ -7,7 +7,7 @@ use mediatype::{names, MediaType, MediaTypeList};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ssz_derive::Encode; use ssz_derive::Encode;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt; use std::fmt::{self, Display};
use std::str::{from_utf8, FromStr}; use std::str::{from_utf8, FromStr};
use std::time::Duration; use std::time::Duration;
pub use types::*; pub use types::*;
@ -1261,6 +1261,50 @@ pub struct ForkChoiceNode {
pub execution_block_hash: Option<Hash256>, pub execution_block_hash: Option<Hash256>,
} }
#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum BroadcastValidation {
Gossip,
Consensus,
ConsensusAndEquivocation,
}
impl Default for BroadcastValidation {
fn default() -> Self {
Self::Gossip
}
}
impl Display for BroadcastValidation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Gossip => write!(f, "gossip"),
Self::Consensus => write!(f, "consensus"),
Self::ConsensusAndEquivocation => write!(f, "consensus_and_equivocation"),
}
}
}
impl FromStr for BroadcastValidation {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"gossip" => Ok(Self::Gossip),
"consensus" => Ok(Self::Consensus),
"consensus_and_equivocation" => Ok(Self::ConsensusAndEquivocation),
_ => Err("Invalid broadcast validation level"),
}
}
}
#[derive(Default, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub struct BroadcastValidationQuery {
#[serde(default)]
pub broadcast_validation: BroadcastValidation,
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -1,6 +1,8 @@
# Lighthouse Team (Sigma Prime) # Lighthouse Team (Sigma Prime)
- enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo - enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I
- enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo - enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I
- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I
- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I
# EF Team # EF Team
- enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg - enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg
- enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg - enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg

View File

@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
// NOTE: using --match instead of --exclude for compatibility with old Git // NOTE: using --match instead of --exclude for compatibility with old Git
"--match=thiswillnevermatchlol" "--match=thiswillnevermatchlol"
], ],
prefix = "Lighthouse/v4.2.0-", prefix = "Lighthouse/v4.3.0-",
fallback = "Lighthouse/v4.2.0" fallback = "Lighthouse/v4.3.0"
); );
/// Returns `VERSION`, but with platform information appended to the end. /// Returns `VERSION`, but with platform information appended to the end.

View File

@ -1,10 +1,15 @@
use crate::{ForkChoiceStore, InvalidationOperation}; use crate::{ForkChoiceStore, InvalidationOperation};
use per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError;
use proto_array::{ use proto_array::{
Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError, Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError,
ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold, ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold,
}; };
use slog::{crit, debug, warn, Logger}; use slog::{crit, debug, error, warn, Logger};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use state_processing::per_epoch_processing::altair::ParticipationCache;
use state_processing::per_epoch_processing::{
weigh_justification_and_finalization, JustificationAndFinalizationState,
};
use state_processing::{ use state_processing::{
per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing,
}; };
@ -18,6 +23,7 @@ use types::{
EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch, EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch,
SignedBeaconBlock, Slot, SignedBeaconBlock, Slot,
}; };
use types::{ProgressiveBalancesCache, ProgressiveBalancesMode};
#[derive(Debug)] #[derive(Debug)]
pub enum Error<T> { pub enum Error<T> {
@ -72,7 +78,9 @@ pub enum Error<T> {
}, },
UnrealizedVoteProcessing(state_processing::EpochProcessingError), UnrealizedVoteProcessing(state_processing::EpochProcessingError),
ParticipationCacheBuild(BeaconStateError), ParticipationCacheBuild(BeaconStateError),
ParticipationCacheError(ParticipationCacheError),
ValidatorStatuses(BeaconStateError), ValidatorStatuses(BeaconStateError),
ProgressiveBalancesCacheCheckFailed(String),
} }
impl<T> From<InvalidAttestation> for Error<T> { impl<T> From<InvalidAttestation> for Error<T> {
@ -93,6 +101,18 @@ impl<T> From<state_processing::EpochProcessingError> for Error<T> {
} }
} }
impl<T> From<BeaconStateError> for Error<T> {
fn from(e: BeaconStateError) -> Self {
Error::BeaconStateError(e)
}
}
impl<T> From<ParticipationCacheError> for Error<T> {
fn from(e: ParticipationCacheError) -> Self {
Error::ParticipationCacheError(e)
}
}
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
/// Controls how fork choice should behave when restoring from a persisted fork choice. /// Controls how fork choice should behave when restoring from a persisted fork choice.
pub enum ResetPayloadStatuses { pub enum ResetPayloadStatuses {
@ -645,7 +665,9 @@ where
block_delay: Duration, block_delay: Duration,
state: &BeaconState<E>, state: &BeaconState<E>,
payload_verification_status: PayloadVerificationStatus, payload_verification_status: PayloadVerificationStatus,
progressive_balances_mode: ProgressiveBalancesMode,
spec: &ChainSpec, spec: &ChainSpec,
log: &Logger,
) -> Result<(), Error<T::Error>> { ) -> Result<(), Error<T::Error>> {
// If this block has already been processed we do not need to reprocess it. // If this block has already been processed we do not need to reprocess it.
// We check this immediately in case re-processing the block mutates some property of the // We check this immediately in case re-processing the block mutates some property of the
@ -739,46 +761,84 @@ where
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch
}); });
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some((
if let Some((parent_justified, parent_finalized)) = parent_checkpoints { parent_justified,
(parent_justified, parent_finalized) parent_finalized,
} else { )) =
let justification_and_finalization_state = match block { parent_checkpoints
// TODO(deneb): Ensure that the final specification {
// does not substantially modify per epoch processing. (parent_justified, parent_finalized)
BeaconBlockRef::Deneb(_) } else {
| BeaconBlockRef::Capella(_) let justification_and_finalization_state = match block {
| BeaconBlockRef::Merge(_) BeaconBlockRef::Deneb(_)| BeaconBlockRef::Capella(_)
| BeaconBlockRef::Altair(_) => { | BeaconBlockRef::Merge(_)
let participation_cache = | BeaconBlockRef::Altair(_) => match progressive_balances_mode {
per_epoch_processing::altair::ParticipationCache::new(state, spec) ProgressiveBalancesMode::Disabled => {
.map_err(Error::ParticipationCacheBuild)?; let participation_cache = ParticipationCache::new(state, spec)
.map_err(Error::ParticipationCacheBuild)?;
per_epoch_processing::altair::process_justification_and_finalization( per_epoch_processing::altair::process_justification_and_finalization(
state, state,
&participation_cache, &participation_cache,
)? )?
} }
BeaconBlockRef::Base(_) => { ProgressiveBalancesMode::Fast
let mut validator_statuses = | ProgressiveBalancesMode::Checked
per_epoch_processing::base::ValidatorStatuses::new(state, spec) | ProgressiveBalancesMode::Strict => {
.map_err(Error::ValidatorStatuses)?; let maybe_participation_cache = progressive_balances_mode
validator_statuses .perform_comparative_checks()
.process_attestations(state) .then(|| {
.map_err(Error::ValidatorStatuses)?; ParticipationCache::new(state, spec)
per_epoch_processing::base::process_justification_and_finalization( .map_err(Error::ParticipationCacheBuild)
state, })
&validator_statuses.total_balances, .transpose()?;
spec,
)?
}
};
( process_justification_and_finalization_from_progressive_cache::<E, T>(
justification_and_finalization_state.current_justified_checkpoint(), state,
justification_and_finalization_state.finalized_checkpoint(), maybe_participation_cache.as_ref(),
) )
.or_else(|e| {
if progressive_balances_mode != ProgressiveBalancesMode::Strict {
error!(
log,
"Processing with progressive balances cache failed";
"info" => "falling back to the non-optimized processing method",
"error" => ?e,
);
let participation_cache = maybe_participation_cache
.map(Ok)
.unwrap_or_else(|| ParticipationCache::new(state, spec))
.map_err(Error::ParticipationCacheBuild)?;
per_epoch_processing::altair::process_justification_and_finalization(
state,
&participation_cache,
).map_err(Error::from)
} else {
Err(e)
}
})?
}
},
BeaconBlockRef::Base(_) => {
let mut validator_statuses =
per_epoch_processing::base::ValidatorStatuses::new(state, spec)
.map_err(Error::ValidatorStatuses)?;
validator_statuses
.process_attestations(state)
.map_err(Error::ValidatorStatuses)?;
per_epoch_processing::base::process_justification_and_finalization(
state,
&validator_statuses.total_balances,
spec,
)?
}
}; };
(
justification_and_finalization_state.current_justified_checkpoint(),
justification_and_finalization_state.finalized_checkpoint(),
)
};
// Update best known unrealized justified & finalized checkpoints // Update best known unrealized justified & finalized checkpoints
if unrealized_justified_checkpoint.epoch if unrealized_justified_checkpoint.epoch
> self.fc_store.unrealized_justified_checkpoint().epoch > self.fc_store.unrealized_justified_checkpoint().epoch
@ -1504,6 +1564,92 @@ where
} }
} }
/// Process justification and finalization using progressive cache. Also performs a comparative
/// check against the `ParticipationCache` if it is supplied.
///
/// Returns an error if the cache is not initialized or if there is a mismatch on the comparative check.
fn process_justification_and_finalization_from_progressive_cache<E, T>(
state: &BeaconState<E>,
maybe_participation_cache: Option<&ParticipationCache>,
) -> Result<JustificationAndFinalizationState<E>, Error<T::Error>>
where
E: EthSpec,
T: ForkChoiceStore<E>,
{
let justification_and_finalization_state = JustificationAndFinalizationState::new(state);
if state.current_epoch() <= E::genesis_epoch() + 1 {
return Ok(justification_and_finalization_state);
}
// Load cached balances
let progressive_balances_cache: &ProgressiveBalancesCache = state.progressive_balances_cache();
let previous_target_balance =
progressive_balances_cache.previous_epoch_target_attesting_balance()?;
let current_target_balance =
progressive_balances_cache.current_epoch_target_attesting_balance()?;
let total_active_balance = state.get_total_active_balance()?;
if let Some(participation_cache) = maybe_participation_cache {
check_progressive_balances::<E, T>(
state,
participation_cache,
previous_target_balance,
current_target_balance,
total_active_balance,
)?;
}
weigh_justification_and_finalization(
justification_and_finalization_state,
total_active_balance,
previous_target_balance,
current_target_balance,
)
.map_err(Error::from)
}
/// Perform comparative checks against `ParticipationCache`, will return error if there's a mismatch.
fn check_progressive_balances<E, T>(
state: &BeaconState<E>,
participation_cache: &ParticipationCache,
cached_previous_target_balance: u64,
cached_current_target_balance: u64,
cached_total_active_balance: u64,
) -> Result<(), Error<T::Error>>
where
E: EthSpec,
T: ForkChoiceStore<E>,
{
let slot = state.slot();
let epoch = state.current_epoch();
// Check previous epoch target balances
let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?;
if previous_target_balance != cached_previous_target_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Previous epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, previous_target_balance, cached_previous_target_balance)
));
}
// Check current epoch target balances
let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?;
if current_target_balance != cached_current_target_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Current epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, current_target_balance, cached_current_target_balance)
));
}
// Check current epoch total balances
let total_active_balance = participation_cache.current_epoch_total_active_balance();
if total_active_balance != cached_total_active_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Current epoch total active balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, total_active_balance, cached_total_active_balance)
));
}
Ok(())
}
/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes. /// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes.
/// ///
/// This is used when persisting the state of the fork choice to disk. /// This is used when persisting the state of the fork choice to disk.

View File

@ -17,12 +17,13 @@ use fork_choice::{
use store::MemoryStore; use store::MemoryStore;
use types::{ use types::{
test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint,
Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId, Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, ProgressiveBalancesMode,
RelativeEpoch, SignedBeaconBlock, Slot, SubnetId,
}; };
pub type E = MainnetEthSpec; pub type E = MainnetEthSpec;
pub const VALIDATOR_COUNT: usize = 32; pub const VALIDATOR_COUNT: usize = 64;
/// Defines some delay between when an attestation is created and when it is mutated. /// Defines some delay between when an attestation is created and when it is mutated.
pub enum MutationDelay { pub enum MutationDelay {
@ -68,6 +69,24 @@ impl ForkChoiceTest {
Self { harness } Self { harness }
} }
/// Creates a new tester with the specified `ProgressiveBalancesMode` and genesis from latest fork.
fn new_with_progressive_balances_mode(mode: ProgressiveBalancesMode) -> ForkChoiceTest {
// genesis with latest fork (at least altair required to test the cache)
let spec = ForkName::latest().make_genesis_spec(ChainSpec::default());
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
progressive_balances_mode: mode,
..ChainConfig::default()
})
.deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
Self { harness }
}
/// Get a value from the `ForkChoice` instantiation. /// Get a value from the `ForkChoice` instantiation.
fn get<T, U>(&self, func: T) -> U fn get<T, U>(&self, func: T) -> U
where where
@ -212,6 +231,39 @@ impl ForkChoiceTest {
self self
} }
/// Slash a validator from the previous epoch committee.
pub async fn add_previous_epoch_attester_slashing(self) -> Self {
let state = self.harness.get_current_state();
let previous_epoch_shuffling = state.get_shuffling(RelativeEpoch::Previous).unwrap();
let validator_indices = previous_epoch_shuffling
.iter()
.map(|idx| *idx as u64)
.take(1)
.collect();
self.harness
.add_attester_slashing(validator_indices)
.unwrap();
self
}
/// Slash the proposer of a block in the previous epoch.
pub async fn add_previous_epoch_proposer_slashing(self, slots_per_epoch: u64) -> Self {
let previous_epoch_slot = self.harness.get_current_slot() - slots_per_epoch;
let previous_epoch_block = self
.harness
.chain
.block_at_slot(previous_epoch_slot, WhenSlotSkipped::None)
.unwrap()
.unwrap();
let proposer_index: u64 = previous_epoch_block.message().proposer_index();
self.harness.add_proposer_slashing(proposer_index).unwrap();
self
}
/// Apply `count` blocks to the chain (without attestations). /// Apply `count` blocks to the chain (without attestations).
pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self {
self.harness.advance_slot(); self.harness.advance_slot();
@ -286,7 +338,9 @@ impl ForkChoiceTest {
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
PayloadVerificationStatus::Verified, PayloadVerificationStatus::Verified,
self.harness.chain.config.progressive_balances_mode,
&self.harness.chain.spec, &self.harness.chain.spec,
self.harness.logger(),
) )
.unwrap(); .unwrap();
self self
@ -328,7 +382,9 @@ impl ForkChoiceTest {
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
PayloadVerificationStatus::Verified, PayloadVerificationStatus::Verified,
self.harness.chain.config.progressive_balances_mode,
&self.harness.chain.spec, &self.harness.chain.spec,
self.harness.logger(),
) )
.err() .err()
.expect("on_block did not return an error"); .expect("on_block did not return an error");
@ -1287,3 +1343,49 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() {
.assert_finalized_epoch_is_less_than(checkpoint.epoch) .assert_finalized_epoch_is_less_than(checkpoint.epoch)
.assert_shutdown_signal_sent(); .assert_shutdown_signal_sent();
} }
/// Checks that `ProgressiveBalancesCache` is updated correctly after an attester slashing event,
/// where the slashed validator is a target attester in previous / current epoch.
#[tokio::test]
async fn progressive_balances_cache_attester_slashing() {
ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict)
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.await
.unwrap()
.add_previous_epoch_attester_slashing()
.await
// expect fork choice to import blocks successfully after a previous epoch attester is
// slashed, i.e. the slashed attester's balance is correctly excluded from
// the previous epoch total balance in `ProgressiveBalancesCache`.
.apply_blocks(1)
.await
// expect fork choice to import another epoch of blocks successfully - the slashed
// attester's balance should be excluded from the current epoch total balance in
// `ProgressiveBalancesCache` as well.
.apply_blocks(MainnetEthSpec::slots_per_epoch() as usize)
.await;
}
/// Checks that `ProgressiveBalancesCache` is updated correctly after a proposer slashing event,
/// where the slashed validator is a target attester in previous / current epoch.
#[tokio::test]
async fn progressive_balances_cache_proposer_slashing() {
ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict)
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.await
.unwrap()
.add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch())
.await
// expect fork choice to import blocks successfully after a previous epoch proposer is
// slashed, i.e. the slashed proposer's balance is correctly excluded from
// the previous epoch total balance in `ProgressiveBalancesCache`.
.apply_blocks(1)
.await
// expect fork choice to import another epoch of blocks successfully - the slashed
// proposer's balance should be excluded from the current epoch total balance in
// `ProgressiveBalancesCache` as well.
.apply_blocks(MainnetEthSpec::slots_per_epoch() as usize)
.await;
}

View File

@ -7,6 +7,7 @@ mod slash_validator;
pub mod altair; pub mod altair;
pub mod base; pub mod base;
pub mod update_progressive_balances_cache;
pub use deposit_data_tree::DepositDataTree; pub use deposit_data_tree::DepositDataTree;
pub use get_attestation_participation::get_attestation_participation_flag_indices; pub use get_attestation_participation::get_attestation_participation_flag_indices;

View File

@ -1,3 +1,4 @@
use crate::common::update_progressive_balances_cache::update_progressive_balances_on_slashing;
use crate::{ use crate::{
common::{decrease_balance, increase_balance, initiate_validator_exit}, common::{decrease_balance, increase_balance, initiate_validator_exit},
per_block_processing::errors::BlockProcessingError, per_block_processing::errors::BlockProcessingError,
@ -43,6 +44,8 @@ pub fn slash_validator<T: EthSpec>(
.safe_div(spec.min_slashing_penalty_quotient_for_state(state))?, .safe_div(spec.min_slashing_penalty_quotient_for_state(state))?,
)?; )?;
update_progressive_balances_on_slashing(state, slashed_index)?;
// Apply proposer and whistleblower rewards // Apply proposer and whistleblower rewards
let proposer_index = ctxt.get_proposer_index(state, spec)? as usize; let proposer_index = ctxt.get_proposer_index(state, spec)? as usize;
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);

View File

@ -0,0 +1,142 @@
/// A collection of all functions that mutates the `ProgressiveBalancesCache`.
use crate::metrics::{
PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
};
use crate::per_epoch_processing::altair::ParticipationCache;
use crate::{BlockProcessingError, EpochProcessingError};
use lighthouse_metrics::set_gauge;
use ssz_types::VariableList;
use std::borrow::Cow;
use types::consts::altair::TIMELY_TARGET_FLAG_INDEX;
use types::{
is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec,
ParticipationFlags, ProgressiveBalancesCache,
};
/// Initializes the `ProgressiveBalancesCache` cache using balance values from the
/// `ParticipationCache`. If the optional `&ParticipationCache` is not supplied, it will be computed
/// from the `BeaconState`.
pub fn initialize_progressive_balances_cache<E: EthSpec>(
state: &mut BeaconState<E>,
maybe_participation_cache: Option<&ParticipationCache>,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
if !is_progressive_balances_enabled(state)
|| state.progressive_balances_cache().is_initialized()
{
return Ok(());
}
let participation_cache = match maybe_participation_cache {
Some(cache) => Cow::Borrowed(cache),
None => Cow::Owned(ParticipationCache::new(state, spec)?),
};
let previous_epoch_target_attesting_balance = participation_cache
.previous_epoch_target_attesting_balance_raw()
.map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?;
let current_epoch_target_attesting_balance = participation_cache
.current_epoch_target_attesting_balance_raw()
.map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?;
let current_epoch = state.current_epoch();
state.progressive_balances_cache_mut().initialize(
current_epoch,
previous_epoch_target_attesting_balance,
current_epoch_target_attesting_balance,
);
update_progressive_balances_metrics(state.progressive_balances_cache())?;
Ok(())
}
/// Updates the `ProgressiveBalancesCache` when a new target attestation has been processed.
pub fn update_progressive_balances_on_attestation<T: EthSpec>(
state: &mut BeaconState<T>,
epoch: Epoch,
validator_index: usize,
) -> Result<(), BlockProcessingError> {
if is_progressive_balances_enabled(state) {
let validator = state.get_validator(validator_index)?;
if !validator.slashed {
let validator_effective_balance = validator.effective_balance;
state
.progressive_balances_cache_mut()
.on_new_target_attestation(epoch, validator_effective_balance)?;
}
}
Ok(())
}
/// Updates the `ProgressiveBalancesCache` when a target attester has been slashed.
pub fn update_progressive_balances_on_slashing<T: EthSpec>(
state: &mut BeaconState<T>,
validator_index: usize,
) -> Result<(), BlockProcessingError> {
if is_progressive_balances_enabled(state) {
let previous_epoch_participation = state.previous_epoch_participation()?;
let is_previous_epoch_target_attester =
is_target_attester_in_epoch::<T>(previous_epoch_participation, validator_index)?;
let current_epoch_participation = state.current_epoch_participation()?;
let is_current_epoch_target_attester =
is_target_attester_in_epoch::<T>(current_epoch_participation, validator_index)?;
let validator_effective_balance = state.get_effective_balance(validator_index)?;
state.progressive_balances_cache_mut().on_slashing(
is_previous_epoch_target_attester,
is_current_epoch_target_attester,
validator_effective_balance,
)?;
}
Ok(())
}
/// Updates the `ProgressiveBalancesCache` on epoch transition.
pub fn update_progressive_balances_on_epoch_transition<T: EthSpec>(
state: &mut BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), EpochProcessingError> {
if is_progressive_balances_enabled(state) {
state
.progressive_balances_cache_mut()
.on_epoch_transition(spec)?;
update_progressive_balances_metrics(state.progressive_balances_cache())?;
}
Ok(())
}
pub fn update_progressive_balances_metrics(
cache: &ProgressiveBalancesCache,
) -> Result<(), BeaconStateError> {
set_gauge(
&PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
cache.previous_epoch_target_attesting_balance()? as i64,
);
set_gauge(
&PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
cache.current_epoch_target_attesting_balance()? as i64,
);
Ok(())
}
fn is_target_attester_in_epoch<T: EthSpec>(
epoch_participation: &VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
validator_index: usize,
) -> Result<bool, BlockProcessingError> {
let participation_flags = epoch_participation
.get(validator_index)
.ok_or(BeaconStateError::UnknownValidator(validator_index))?;
participation_flags
.has_flag(TIMELY_TARGET_FLAG_INDEX)
.map_err(|e| e.into())
}

View File

@ -111,7 +111,7 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
} }
// Now that we have our validators, initialize the caches (including the committees) // Now that we have our validators, initialize the caches (including the committees)
state.build_all_caches(spec)?; state.build_caches(spec)?;
// Set genesis validators root for domain separation and chain versioning // Set genesis validators root for domain separation and chain versioning
*state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?; *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?;
@ -134,7 +134,7 @@ pub fn process_activations<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (validators, balances) = state.validators_and_balances_mut(); let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter_mut().enumerate() { for (index, validator) in validators.iter_mut().enumerate() {
let balance = balances let balance = balances
.get(index) .get(index)

View File

@ -23,4 +23,15 @@ lazy_static! {
"beacon_participation_prev_epoch_active_gwei_total", "beacon_participation_prev_epoch_active_gwei_total",
"Total effective balance (gwei) of validators active in the previous epoch" "Total effective balance (gwei) of validators active in the previous epoch"
); );
/*
* Participation Metrics (progressive balances)
*/
pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_participation_prev_epoch_target_attesting_gwei_progressive_total",
"Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch"
);
pub static ref PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_participation_curr_epoch_target_attesting_gwei_progressive_total",
"Progressive total effective balance (gwei) of validators who attested to the target in the current epoch"
);
} }

View File

@ -42,6 +42,9 @@ mod verify_proposer_slashing;
use crate::common::decrease_balance; use crate::common::decrease_balance;
use crate::StateProcessingStrategy; use crate::StateProcessingStrategy;
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_metrics,
};
#[cfg(feature = "arbitrary-fuzz")] #[cfg(feature = "arbitrary-fuzz")]
use arbitrary::Arbitrary; use arbitrary::Arbitrary;
@ -115,6 +118,8 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
.fork_name(spec) .fork_name(spec)
.map_err(BlockProcessingError::InconsistentStateFork)?; .map_err(BlockProcessingError::InconsistentStateFork)?;
initialize_progressive_balances_cache(state, None, spec)?;
let verify_signatures = match block_signature_strategy { let verify_signatures = match block_signature_strategy {
BlockSignatureStrategy::VerifyBulk => { BlockSignatureStrategy::VerifyBulk => {
// Verify all signatures in the block at once. // Verify all signatures in the block at once.
@ -183,6 +188,10 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
)?; )?;
} }
if is_progressive_balances_enabled(state) {
update_progressive_balances_metrics(state.progressive_balances_cache())?;
}
Ok(()) Ok(())
} }

View File

@ -1,6 +1,8 @@
use super::signature_sets::Error as SignatureSetError; use super::signature_sets::Error as SignatureSetError;
use crate::per_epoch_processing::altair::participation_cache;
use crate::ContextError; use crate::ContextError;
use merkle_proof::MerkleTreeError; use merkle_proof::MerkleTreeError;
use participation_cache::Error as ParticipationCacheError;
use safe_arith::ArithError; use safe_arith::ArithError;
use ssz::DecodeError; use ssz::DecodeError;
use types::*; use types::*;
@ -99,6 +101,7 @@ pub enum BlockProcessingError {
length: usize, length: usize,
}, },
WithdrawalCredentialsInvalid, WithdrawalCredentialsInvalid,
ParticipationCacheError(ParticipationCacheError),
} }
impl From<BeaconStateError> for BlockProcessingError { impl From<BeaconStateError> for BlockProcessingError {
@ -156,6 +159,12 @@ impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
} }
} }
impl From<ParticipationCacheError> for BlockProcessingError {
fn from(e: ParticipationCacheError) -> Self {
BlockProcessingError::ParticipationCacheError(e)
}
}
/// A conversion that consumes `self` and adds an `index` variable to resulting struct. /// A conversion that consumes `self` and adds an `index` variable to resulting struct.
/// ///
/// Used here to allow converting an error into an upstream error that points to the object that /// Used here to allow converting an error into an upstream error that points to the object that

View File

@ -97,6 +97,8 @@ pub mod base {
pub mod altair { pub mod altair {
use super::*; use super::*;
use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation;
use types::consts::altair::TIMELY_TARGET_FLAG_INDEX;
pub fn process_attestations<T: EthSpec>( pub fn process_attestations<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
@ -163,6 +165,14 @@ pub mod altair {
get_base_reward(state, index, base_reward_per_increment, spec)? get_base_reward(state, index, base_reward_per_increment, spec)?
.safe_mul(weight)?, .safe_mul(weight)?,
)?; )?;
if flag_index == TIMELY_TARGET_FLAG_INDEX {
update_progressive_balances_on_attestation(
state,
data.target.epoch,
index,
)?;
}
} }
} }
} }
@ -235,6 +245,7 @@ pub fn process_attester_slashings<T: EthSpec>(
Ok(()) Ok(())
} }
/// Wrapper function to handle calling the correct version of `process_attestations` based on /// Wrapper function to handle calling the correct version of `process_attestations` based on
/// the fork. /// the fork.
pub fn process_attestations<T: EthSpec, Payload: AbstractExecPayload<T>>( pub fn process_attestations<T: EthSpec, Payload: AbstractExecPayload<T>>(

View File

@ -1,4 +1,7 @@
use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error}; use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error};
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition,
};
use crate::per_epoch_processing::{ use crate::per_epoch_processing::{
effective_balance_updates::process_effective_balance_updates, effective_balance_updates::process_effective_balance_updates,
historical_roots_update::process_historical_roots_update, historical_roots_update::process_historical_roots_update,
@ -31,6 +34,7 @@ pub fn process_epoch<T: EthSpec>(
// Pre-compute participating indices and total balances. // Pre-compute participating indices and total balances.
let participation_cache = ParticipationCache::new(state, spec)?; let participation_cache = ParticipationCache::new(state, spec)?;
let sync_committee = state.current_sync_committee()?.clone(); let sync_committee = state.current_sync_committee()?.clone();
initialize_progressive_balances_cache::<T>(state, Some(&participation_cache), spec)?;
// Justification and finalization. // Justification and finalization.
let justification_and_finalization_state = let justification_and_finalization_state =
@ -56,7 +60,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?; process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag). // Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?; process_effective_balance_updates(state, Some(&participation_cache), spec)?;
// Reset slashings // Reset slashings
process_slashings_reset(state)?; process_slashings_reset(state)?;
@ -75,6 +79,8 @@ pub fn process_epoch<T: EthSpec>(
// Rotate the epoch caches to suit the epoch transition. // Rotate the epoch caches to suit the epoch transition.
state.advance_caches(spec)?; state.advance_caches(spec)?;
update_progressive_balances_on_epoch_transition(state, spec)?;
Ok(EpochProcessingSummary::Altair { Ok(EpochProcessingSummary::Altair {
participation_cache, participation_cache,
sync_committee, sync_committee,

View File

@ -11,49 +11,23 @@
//! Additionally, this cache is returned from the `altair::process_epoch` function and can be used //! Additionally, this cache is returned from the `altair::process_epoch` function and can be used
//! to get useful summaries about the validator participation in an epoch. //! to get useful summaries about the validator participation in an epoch.
use safe_arith::{ArithError, SafeArith};
use types::{ use types::{
consts::altair::{ consts::altair::{
NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX,
TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX,
}, },
BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, Balance, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags,
RelativeEpoch,
}; };
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq, Clone)]
pub enum Error { pub enum Error {
InvalidFlagIndex(usize), InvalidFlagIndex(usize),
InvalidValidatorIndex(usize), InvalidValidatorIndex(usize),
} }
/// A balance which will never be below the specified `minimum`.
///
/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected.
#[derive(PartialEq, Debug, Clone, Copy)]
struct Balance {
raw: u64,
minimum: u64,
}
impl Balance {
/// Initialize the balance to `0`, or the given `minimum`.
pub fn zero(minimum: u64) -> Self {
Self { raw: 0, minimum }
}
/// Returns the balance with respect to the initialization `minimum`.
pub fn get(&self) -> u64 {
std::cmp::max(self.raw, self.minimum)
}
/// Add-assign to the balance.
pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_add_assign(other)
}
}
/// Caches the participation values for one epoch (either the previous or current). /// Caches the participation values for one epoch (either the previous or current).
#[derive(PartialEq, Debug)] #[derive(PartialEq, Debug, Clone)]
struct SingleEpochParticipationCache { struct SingleEpochParticipationCache {
/// Maps an active validator index to their participation flags. /// Maps an active validator index to their participation flags.
/// ///
@ -95,6 +69,14 @@ impl SingleEpochParticipationCache {
.ok_or(Error::InvalidFlagIndex(flag_index)) .ok_or(Error::InvalidFlagIndex(flag_index))
} }
/// Returns the raw total balance of attesters who have `flag_index` set.
fn total_flag_balance_raw(&self, flag_index: usize) -> Result<Balance, Error> {
self.total_flag_balances
.get(flag_index)
.copied()
.ok_or(Error::InvalidFlagIndex(flag_index))
}
/// Returns `true` if `val_index` is active, unslashed and has `flag_index` set. /// Returns `true` if `val_index` is active, unslashed and has `flag_index` set.
/// ///
/// ## Errors /// ## Errors
@ -173,7 +155,7 @@ impl SingleEpochParticipationCache {
} }
/// Maintains a cache to be used during `altair::process_epoch`. /// Maintains a cache to be used during `altair::process_epoch`.
#[derive(PartialEq, Debug)] #[derive(PartialEq, Debug, Clone)]
pub struct ParticipationCache { pub struct ParticipationCache {
current_epoch: Epoch, current_epoch: Epoch,
/// Caches information about active validators pertaining to `self.current_epoch`. /// Caches information about active validators pertaining to `self.current_epoch`.
@ -291,6 +273,11 @@ impl ParticipationCache {
.total_flag_balance(TIMELY_TARGET_FLAG_INDEX) .total_flag_balance(TIMELY_TARGET_FLAG_INDEX)
} }
pub fn current_epoch_target_attesting_balance_raw(&self) -> Result<Balance, Error> {
self.current_epoch_participation
.total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_total_active_balance(&self) -> u64 { pub fn previous_epoch_total_active_balance(&self) -> u64 {
self.previous_epoch_participation.total_active_balance.get() self.previous_epoch_participation.total_active_balance.get()
} }
@ -300,6 +287,11 @@ impl ParticipationCache {
.total_flag_balance(TIMELY_TARGET_FLAG_INDEX) .total_flag_balance(TIMELY_TARGET_FLAG_INDEX)
} }
pub fn previous_epoch_target_attesting_balance_raw(&self) -> Result<Balance, Error> {
self.previous_epoch_participation
.total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_source_attesting_balance(&self) -> Result<u64, Error> { pub fn previous_epoch_source_attesting_balance(&self) -> Result<u64, Error> {
self.previous_epoch_participation self.previous_epoch_participation
.total_flag_balance(TIMELY_SOURCE_FLAG_INDEX) .total_flag_balance(TIMELY_SOURCE_FLAG_INDEX)

View File

@ -52,7 +52,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?; process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag). // Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?; process_effective_balance_updates(state, None, spec)?;
// Reset slashings // Reset slashings
process_slashings_reset(state)?; process_slashings_reset(state)?;

View File

@ -11,6 +11,9 @@ use crate::per_epoch_processing::{
}; };
use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch}; use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch};
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition,
};
pub use historical_summaries_update::process_historical_summaries_update; pub use historical_summaries_update::process_historical_summaries_update;
mod historical_summaries_update; mod historical_summaries_update;
@ -27,6 +30,7 @@ pub fn process_epoch<T: EthSpec>(
// Pre-compute participating indices and total balances. // Pre-compute participating indices and total balances.
let participation_cache = ParticipationCache::new(state, spec)?; let participation_cache = ParticipationCache::new(state, spec)?;
let sync_committee = state.current_sync_committee()?.clone(); let sync_committee = state.current_sync_committee()?.clone();
initialize_progressive_balances_cache(state, Some(&participation_cache), spec)?;
// Justification and finalization. // Justification and finalization.
let justification_and_finalization_state = let justification_and_finalization_state =
@ -52,7 +56,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?; process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag). // Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?; process_effective_balance_updates(state, Some(&participation_cache), spec)?;
// Reset slashings // Reset slashings
process_slashings_reset(state)?; process_slashings_reset(state)?;
@ -71,6 +75,8 @@ pub fn process_epoch<T: EthSpec>(
// Rotate the epoch caches to suit the epoch transition. // Rotate the epoch caches to suit the epoch transition.
state.advance_caches(spec)?; state.advance_caches(spec)?;
update_progressive_balances_on_epoch_transition(state, spec)?;
Ok(EpochProcessingSummary::Altair { Ok(EpochProcessingSummary::Altair {
participation_cache, participation_cache,
sync_committee, sync_committee,

View File

@ -1,11 +1,13 @@
use super::errors::EpochProcessingError; use super::errors::EpochProcessingError;
use crate::per_epoch_processing::altair::ParticipationCache;
use safe_arith::SafeArith; use safe_arith::SafeArith;
use types::beacon_state::BeaconState; use types::beacon_state::BeaconState;
use types::chain_spec::ChainSpec; use types::chain_spec::ChainSpec;
use types::{BeaconStateError, EthSpec}; use types::{BeaconStateError, EthSpec, ProgressiveBalancesCache};
pub fn process_effective_balance_updates<T: EthSpec>( pub fn process_effective_balance_updates<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
maybe_participation_cache: Option<&ParticipationCache>,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), EpochProcessingError> { ) -> Result<(), EpochProcessingError> {
let hysteresis_increment = spec let hysteresis_increment = spec
@ -13,7 +15,8 @@ pub fn process_effective_balance_updates<T: EthSpec>(
.safe_div(spec.hysteresis_quotient)?; .safe_div(spec.hysteresis_quotient)?;
let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?;
let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?;
let (validators, balances) = state.validators_and_balances_mut(); let (validators, balances, progressive_balances_cache) =
state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter_mut().enumerate() { for (index, validator) in validators.iter_mut().enumerate() {
let balance = balances let balance = balances
.get(index) .get(index)
@ -23,11 +26,43 @@ pub fn process_effective_balance_updates<T: EthSpec>(
if balance.safe_add(downward_threshold)? < validator.effective_balance if balance.safe_add(downward_threshold)? < validator.effective_balance
|| validator.effective_balance.safe_add(upward_threshold)? < balance || validator.effective_balance.safe_add(upward_threshold)? < balance
{ {
validator.effective_balance = std::cmp::min( let old_effective_balance = validator.effective_balance;
let new_effective_balance = std::cmp::min(
balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?,
spec.max_effective_balance, spec.max_effective_balance,
); );
if let Some(participation_cache) = maybe_participation_cache {
update_progressive_balances(
participation_cache,
progressive_balances_cache,
index,
old_effective_balance,
new_effective_balance,
)?;
}
validator.effective_balance = new_effective_balance;
} }
} }
Ok(()) Ok(())
} }
fn update_progressive_balances(
participation_cache: &ParticipationCache,
progressive_balances_cache: &mut ProgressiveBalancesCache,
index: usize,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), EpochProcessingError> {
if old_effective_balance != new_effective_balance {
let is_current_epoch_target_attester =
participation_cache.is_current_epoch_timely_target_attester(index)?;
progressive_balances_cache.on_effective_balance_change(
is_current_epoch_target_attester,
old_effective_balance,
new_effective_balance,
)?;
}
Ok(())
}

View File

@ -16,7 +16,7 @@ pub fn process_slashings<T: EthSpec>(
total_balance, total_balance,
); );
let (validators, balances) = state.validators_and_balances_mut(); let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter().enumerate() { for (index, validator) in validators.iter().enumerate() {
if validator.slashed if validator.slashed
&& epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)?

View File

@ -1,3 +1,4 @@
use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache;
use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices}; use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices};
use std::mem; use std::mem;
use std::sync::Arc; use std::sync::Arc;
@ -101,6 +102,7 @@ pub fn upgrade_to_altair<E: EthSpec>(
next_sync_committee: temp_sync_committee, // not read next_sync_committee: temp_sync_committee, // not read
// Caches // Caches
total_active_balance: pre.total_active_balance, total_active_balance: pre.total_active_balance,
progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache),
committee_caches: mem::take(&mut pre.committee_caches), committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache), pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache), exit_cache: mem::take(&mut pre.exit_cache),
@ -110,6 +112,8 @@ pub fn upgrade_to_altair<E: EthSpec>(
// Fill in previous epoch participation from the pre state's pending attestations. // Fill in previous epoch participation from the pre state's pending attestations.
translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?; translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?;
initialize_progressive_balances_cache(&mut post, None, spec)?;
// Fill in sync committees // Fill in sync committees
// Note: A duplicate committee is assigned for the current and next committee at the fork // Note: A duplicate committee is assigned for the current and next committee at the fork
// boundary // boundary

View File

@ -62,6 +62,7 @@ pub fn upgrade_to_capella<E: EthSpec>(
historical_summaries: VariableList::default(), historical_summaries: VariableList::default(),
// Caches // Caches
total_active_balance: pre.total_active_balance, total_active_balance: pre.total_active_balance,
progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache),
committee_caches: mem::take(&mut pre.committee_caches), committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache), pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache), exit_cache: mem::take(&mut pre.exit_cache),

View File

@ -60,6 +60,7 @@ pub fn upgrade_to_bellatrix<E: EthSpec>(
latest_execution_payload_header: <ExecutionPayloadHeaderMerge<E>>::default(), latest_execution_payload_header: <ExecutionPayloadHeaderMerge<E>>::default(),
// Caches // Caches
total_active_balance: pre.total_active_balance, total_active_balance: pre.total_active_balance,
progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache),
committee_caches: mem::take(&mut pre.committee_caches), committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache), pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache), exit_cache: mem::take(&mut pre.exit_cache),

View File

@ -53,6 +53,7 @@ serde_json = "1.0.74"
smallvec = "1.8.0" smallvec = "1.8.0"
serde_with = "1.13.0" serde_with = "1.13.0"
maplit = "1.0.2" maplit = "1.0.2"
strum = { version = "0.24.0", features = ["derive"] }
[dev-dependencies] [dev-dependencies]
criterion = "0.3.3" criterion = "0.3.3"

View File

@ -51,7 +51,7 @@ fn all_benches(c: &mut Criterion) {
let spec = Arc::new(MainnetEthSpec::default_spec()); let spec = Arc::new(MainnetEthSpec::default_spec());
let mut state = get_state::<MainnetEthSpec>(validator_count); let mut state = get_state::<MainnetEthSpec>(validator_count);
state.build_all_caches(&spec).expect("should build caches"); state.build_caches(&spec).expect("should build caches");
let state_bytes = state.as_ssz_bytes(); let state_bytes = state.as_ssz_bytes();
let inner_state = state.clone(); let inner_state = state.clone();

View File

@ -98,7 +98,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T,
} }
} }
impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> { impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T, Payload> {
/// Get the fork_name of this object /// Get the fork_name of this object
pub fn fork_name(self) -> ForkName { pub fn fork_name(self) -> ForkName {
match self { match self {

View File

@ -26,6 +26,8 @@ pub use self::committee_cache::{
compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count,
CommitteeCache, CommitteeCache,
}; };
pub use crate::beacon_state::balance::Balance;
pub use crate::beacon_state::progressive_balances_cache::*;
use crate::historical_summary::HistoricalSummary; use crate::historical_summary::HistoricalSummary;
pub use clone_config::CloneConfig; pub use clone_config::CloneConfig;
pub use eth_spec::*; pub use eth_spec::*;
@ -34,9 +36,11 @@ pub use tree_hash_cache::BeaconTreeHashCache;
#[macro_use] #[macro_use]
mod committee_cache; mod committee_cache;
mod balance;
mod clone_config; mod clone_config;
mod exit_cache; mod exit_cache;
mod iter; mod iter;
mod progressive_balances_cache;
mod pubkey_cache; mod pubkey_cache;
mod tests; mod tests;
mod tree_hash_cache; mod tree_hash_cache;
@ -101,6 +105,9 @@ pub enum Error {
SszTypesError(ssz_types::Error), SszTypesError(ssz_types::Error),
TreeHashCacheNotInitialized, TreeHashCacheNotInitialized,
NonLinearTreeHashCacheHistory, NonLinearTreeHashCacheHistory,
ParticipationCacheError(String),
ProgressiveBalancesCacheNotInitialized,
ProgressiveBalancesCacheInconsistent,
TreeHashCacheSkippedSlot { TreeHashCacheSkippedSlot {
cache: Slot, cache: Slot,
state: Slot, state: Slot,
@ -322,6 +329,12 @@ where
#[tree_hash(skip_hashing)] #[tree_hash(skip_hashing)]
#[test_random(default)] #[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))] #[derivative(Clone(clone_with = "clone_default"))]
pub progressive_balances_cache: ProgressiveBalancesCache,
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub committee_caches: [CommitteeCache; CACHED_EPOCHS], pub committee_caches: [CommitteeCache; CACHED_EPOCHS],
#[serde(skip_serializing, skip_deserializing)] #[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)]
@ -398,6 +411,7 @@ impl<T: EthSpec> BeaconState<T> {
// Caching (not in spec) // Caching (not in spec)
total_active_balance: None, total_active_balance: None,
progressive_balances_cache: <_>::default(),
committee_caches: [ committee_caches: [
CommitteeCache::default(), CommitteeCache::default(),
CommitteeCache::default(), CommitteeCache::default(),
@ -776,7 +790,7 @@ impl<T: EthSpec> BeaconState<T> {
Ok(signature_hash_int.safe_rem(modulo)? == 0) Ok(signature_hash_int.safe_rem(modulo)? == 0)
} }
/// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`.
/// ///
/// Spec v0.12.1 /// Spec v0.12.1
pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result<usize, Error> { pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result<usize, Error> {
@ -1169,13 +1183,35 @@ impl<T: EthSpec> BeaconState<T> {
} }
/// Convenience accessor for validators and balances simultaneously. /// Convenience accessor for validators and balances simultaneously.
pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) { pub fn validators_and_balances_and_progressive_balances_mut(
&mut self,
) -> (&mut [Validator], &mut [u64], &mut ProgressiveBalancesCache) {
match self { match self {
BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Base(state) => (
BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), &mut state.validators,
BeaconState::Merge(state) => (&mut state.validators, &mut state.balances), &mut state.balances,
BeaconState::Capella(state) => (&mut state.validators, &mut state.balances), &mut state.progressive_balances_cache,
BeaconState::Deneb(state) => (&mut state.validators, &mut state.balances), ),
BeaconState::Altair(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Merge(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Capella(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Deneb(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
} }
} }
@ -1402,7 +1438,7 @@ impl<T: EthSpec> BeaconState<T> {
} }
/// Build all caches (except the tree hash cache), if they need to be built. /// Build all caches (except the tree hash cache), if they need to be built.
pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
self.build_all_committee_caches(spec)?; self.build_all_committee_caches(spec)?;
self.update_pubkey_cache()?; self.update_pubkey_cache()?;
self.build_exit_cache(spec)?; self.build_exit_cache(spec)?;
@ -1434,6 +1470,7 @@ impl<T: EthSpec> BeaconState<T> {
self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_committee_cache(RelativeEpoch::Next)?;
self.drop_pubkey_cache(); self.drop_pubkey_cache();
self.drop_tree_hash_cache(); self.drop_tree_hash_cache();
self.drop_progressive_balances_cache();
*self.exit_cache_mut() = ExitCache::default(); *self.exit_cache_mut() = ExitCache::default();
Ok(()) Ok(())
} }
@ -1630,6 +1667,11 @@ impl<T: EthSpec> BeaconState<T> {
*self.pubkey_cache_mut() = PubkeyCache::default() *self.pubkey_cache_mut() = PubkeyCache::default()
} }
/// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache.
fn drop_progressive_balances_cache(&mut self) {
*self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default();
}
/// Initialize but don't fill the tree hash cache, if it isn't already initialized. /// Initialize but don't fill the tree hash cache, if it isn't already initialized.
pub fn initialize_tree_hash_cache(&mut self) { pub fn initialize_tree_hash_cache(&mut self) {
if !self.tree_hash_cache().is_initialized() { if !self.tree_hash_cache().is_initialized() {
@ -1702,6 +1744,9 @@ impl<T: EthSpec> BeaconState<T> {
if config.tree_hash_cache { if config.tree_hash_cache {
*res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); *res.tree_hash_cache_mut() = self.tree_hash_cache().clone();
} }
if config.progressive_balances_cache {
*res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone();
}
res res
} }

View File

@ -0,0 +1,33 @@
use arbitrary::Arbitrary;
use safe_arith::{ArithError, SafeArith};
/// A balance which will never be below the specified `minimum`.
///
/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected.
#[derive(PartialEq, Debug, Clone, Copy, Arbitrary)]
pub struct Balance {
raw: u64,
minimum: u64,
}
impl Balance {
/// Initialize the balance to `0`, or the given `minimum`.
pub fn zero(minimum: u64) -> Self {
Self { raw: 0, minimum }
}
/// Returns the balance with respect to the initialization `minimum`.
pub fn get(&self) -> u64 {
std::cmp::max(self.raw, self.minimum)
}
/// Add-assign to the balance.
pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_add_assign(other)
}
/// Sub-assign to the balance.
pub fn safe_sub_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_sub_assign(other)
}
}

View File

@ -5,6 +5,7 @@ pub struct CloneConfig {
pub pubkey_cache: bool, pub pubkey_cache: bool,
pub exit_cache: bool, pub exit_cache: bool,
pub tree_hash_cache: bool, pub tree_hash_cache: bool,
pub progressive_balances_cache: bool,
} }
impl CloneConfig { impl CloneConfig {
@ -14,6 +15,7 @@ impl CloneConfig {
pubkey_cache: true, pubkey_cache: true,
exit_cache: true, exit_cache: true,
tree_hash_cache: true, tree_hash_cache: true,
progressive_balances_cache: true,
} }
} }

View File

@ -0,0 +1,184 @@
use crate::beacon_state::balance::Balance;
use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec};
use arbitrary::Arbitrary;
use safe_arith::SafeArith;
use serde_derive::{Deserialize, Serialize};
use strum::{Display, EnumString, EnumVariantNames};
/// This cache keeps track of the accumulated target attestation balance for the current & previous
/// epochs. The cached values can be utilised by fork choice to calculate unrealized justification
/// and finalization instead of converting epoch participation arrays to balances for each block we
/// process.
#[derive(Default, Debug, PartialEq, Arbitrary, Clone)]
pub struct ProgressiveBalancesCache {
inner: Option<Inner>,
}
#[derive(Debug, PartialEq, Arbitrary, Clone)]
struct Inner {
pub current_epoch: Epoch,
pub previous_epoch_target_attesting_balance: Balance,
pub current_epoch_target_attesting_balance: Balance,
}
impl ProgressiveBalancesCache {
pub fn initialize(
&mut self,
current_epoch: Epoch,
previous_epoch_target_attesting_balance: Balance,
current_epoch_target_attesting_balance: Balance,
) {
self.inner = Some(Inner {
current_epoch,
previous_epoch_target_attesting_balance,
current_epoch_target_attesting_balance,
});
}
pub fn is_initialized(&self) -> bool {
self.inner.is_some()
}
/// When a new target attestation has been processed, we update the cached
/// `current_epoch_target_attesting_balance` to include the validator effective balance.
/// If the epoch is neither the current epoch nor the previous epoch, an error is returned.
pub fn on_new_target_attestation(
&mut self,
epoch: Epoch,
validator_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if epoch == cache.current_epoch {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
} else if epoch.safe_add(1)? == cache.current_epoch {
cache
.previous_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
} else {
return Err(BeaconStateError::ProgressiveBalancesCacheInconsistent);
}
Ok(())
}
/// When a validator is slashed, we reduce the `current_epoch_target_attesting_balance` by the
/// validator's effective balance to exclude the validator weight.
pub fn on_slashing(
&mut self,
is_previous_epoch_target_attester: bool,
is_current_epoch_target_attester: bool,
effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_previous_epoch_target_attester {
cache
.previous_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
if is_current_epoch_target_attester {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
Ok(())
}
/// When a current epoch target attester has its effective balance changed, we adjust the
/// its share of the target attesting balance in the cache.
pub fn on_effective_balance_change(
&mut self,
is_current_epoch_target_attester: bool,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_current_epoch_target_attester {
if new_effective_balance > old_effective_balance {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?;
} else {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?;
}
}
Ok(())
}
/// On epoch transition, the balance from current epoch is shifted to previous epoch, and the
/// current epoch balance is reset to 0.
pub fn on_epoch_transition(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
cache.current_epoch.safe_add_assign(1)?;
cache.previous_epoch_target_attesting_balance =
cache.current_epoch_target_attesting_balance;
cache.current_epoch_target_attesting_balance =
Balance::zero(spec.effective_balance_increment);
Ok(())
}
pub fn previous_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.previous_epoch_target_attesting_balance
.get())
}
pub fn current_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.current_epoch_target_attesting_balance
.get())
}
fn get_inner_mut(&mut self) -> Result<&mut Inner, BeaconStateError> {
self.inner
.as_mut()
.ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized)
}
fn get_inner(&self) -> Result<&Inner, BeaconStateError> {
self.inner
.as_ref()
.ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized)
}
}
#[derive(
Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize, Display, EnumString, EnumVariantNames,
)]
#[strum(serialize_all = "lowercase")]
pub enum ProgressiveBalancesMode {
/// Disable the usage of progressive cache, and use the existing `ParticipationCache` calculation.
Disabled,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache` and falls
/// back to the existing calculation if there is a balance mismatch.
Checked,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache`. Errors
/// if there is a balance mismatch. Used in testing only.
Strict,
/// Enable the usage of progressive cache, with no comparative checks against the
/// `ParticipationCache`. This is fast but an experimental mode, use with caution.
Fast,
}
impl ProgressiveBalancesMode {
pub fn perform_comparative_checks(&self) -> bool {
match self {
Self::Disabled | Self::Fast => false,
Self::Checked | Self::Strict => true,
}
}
}
/// `ProgressiveBalancesCache` is only enabled from `Altair` as it requires `ParticipationCache`.
pub fn is_progressive_balances_enabled<E: EthSpec>(state: &BeaconState<E>) -> bool {
match state {
BeaconState::Base(_) => false,
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true,
}
}

View File

@ -219,17 +219,18 @@ async fn clone_config() {
let mut state = build_state::<MinimalEthSpec>(16).await; let mut state = build_state::<MinimalEthSpec>(16).await;
state.build_all_caches(&spec).unwrap(); state.build_caches(&spec).unwrap();
state state
.update_tree_hash_cache() .update_tree_hash_cache()
.expect("should update tree hash cache"); .expect("should update tree hash cache");
let num_caches = 4; let num_caches = 5;
let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig { let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig {
committee_caches: (i & 1) != 0, committee_caches: (i & 1) != 0,
pubkey_cache: ((i >> 1) & 1) != 0, pubkey_cache: ((i >> 1) & 1) != 0,
exit_cache: ((i >> 2) & 1) != 0, exit_cache: ((i >> 2) & 1) != 0,
tree_hash_cache: ((i >> 3) & 1) != 0, tree_hash_cache: ((i >> 3) & 1) != 0,
progressive_balances_cache: ((i >> 4) & 1) != 0,
}); });
for config in all_configs { for config in all_configs {

View File

@ -1,7 +1,7 @@
[package] [package]
name = "lcli" name = "lcli"
description = "Lighthouse CLI (modeled after zcli)" description = "Lighthouse CLI (modeled after zcli)"
version = "4.2.0" version = "4.3.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2021" edition = "2021"

View File

@ -329,7 +329,7 @@ fn initialize_state_with_validators<T: EthSpec>(
} }
// Now that we have our validators, initialize the caches (including the committees) // Now that we have our validators, initialize the caches (including the committees)
state.build_all_caches(spec).unwrap(); state.build_caches(spec).unwrap();
// Set genesis validators root for domain separation and chain versioning // Set genesis validators root for domain separation and chain versioning
*state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap(); *state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap();

View File

@ -109,7 +109,7 @@ pub fn run<T: EthSpec>(env: Environment<T>, matches: &ArgMatches) -> Result<(),
let target_slot = initial_slot + slots; let target_slot = initial_slot + slots;
state state
.build_all_caches(spec) .build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?; .map_err(|e| format!("Unable to build caches: {:?}", e))?;
let state_root = if let Some(root) = cli_state_root.or(state_root) { let state_root = if let Some(root) = cli_state_root.or(state_root) {

View File

@ -205,7 +205,7 @@ pub fn run<T: EthSpec>(env: Environment<T>, matches: &ArgMatches) -> Result<(),
if config.exclude_cache_builds { if config.exclude_cache_builds {
pre_state pre_state
.build_all_caches(spec) .build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?; .map_err(|e| format!("Unable to build caches: {:?}", e))?;
let state_root = pre_state let state_root = pre_state
.update_tree_hash_cache() .update_tree_hash_cache()
@ -303,7 +303,7 @@ fn do_transition<T: EthSpec>(
if !config.exclude_cache_builds { if !config.exclude_cache_builds {
let t = Instant::now(); let t = Instant::now();
pre_state pre_state
.build_all_caches(spec) .build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?; .map_err(|e| format!("Unable to build caches: {:?}", e))?;
debug!("Build caches: {:?}", t.elapsed()); debug!("Build caches: {:?}", t.elapsed());
@ -335,7 +335,7 @@ fn do_transition<T: EthSpec>(
let t = Instant::now(); let t = Instant::now();
pre_state pre_state
.build_all_caches(spec) .build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?; .map_err(|e| format!("Unable to build caches: {:?}", e))?;
debug!("Build all caches (again): {:?}", t.elapsed()); debug!("Build all caches (again): {:?}", t.elapsed());

View File

@ -1,6 +1,6 @@
[package] [package]
name = "lighthouse" name = "lighthouse"
version = "4.2.0" version = "4.3.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"] authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021" edition = "2021"
autotests = false autotests = false

View File

@ -16,7 +16,10 @@ use std::str::FromStr;
use std::string::ToString; use std::string::ToString;
use std::time::Duration; use std::time::Duration;
use tempfile::TempDir; use tempfile::TempDir;
use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec}; use types::{
Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec,
ProgressiveBalancesMode,
};
use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port}; use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port};
const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/"; const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/";
@ -1125,48 +1128,13 @@ fn default_backfill_rate_limiting_flag() {
} }
#[test] #[test]
fn default_boot_nodes() { fn default_boot_nodes() {
let mainnet = vec![ let number_of_boot_nodes = 15;
// Lighthouse Team (Sigma Prime)
"enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo",
"enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo",
// EF Team
"enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg",
"enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg",
"enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg",
"enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg",
// Teku team (Consensys)
"enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA",
"enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA",
// Prysm team (Prysmatic Labs)
"enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg",
"enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA",
"enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg",
// Nimbus team
"enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM",
"enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM"
];
CommandLineTest::new() CommandLineTest::new()
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| { .with_config(|config| {
// Lighthouse Team (Sigma Prime) // Lighthouse Team (Sigma Prime)
assert_eq!(config.network.boot_nodes_enr[0].to_base64(), mainnet[0]); assert_eq!(config.network.boot_nodes_enr.len(), number_of_boot_nodes);
assert_eq!(config.network.boot_nodes_enr[1].to_base64(), mainnet[1]);
// EF Team
assert_eq!(config.network.boot_nodes_enr[2].to_base64(), mainnet[2]);
assert_eq!(config.network.boot_nodes_enr[3].to_base64(), mainnet[3]);
assert_eq!(config.network.boot_nodes_enr[4].to_base64(), mainnet[4]);
assert_eq!(config.network.boot_nodes_enr[5].to_base64(), mainnet[5]);
// Teku team (Consensys)
assert_eq!(config.network.boot_nodes_enr[6].to_base64(), mainnet[6]);
assert_eq!(config.network.boot_nodes_enr[7].to_base64(), mainnet[7]);
// Prysm team (Prysmatic Labs)
assert_eq!(config.network.boot_nodes_enr[8].to_base64(), mainnet[8]);
assert_eq!(config.network.boot_nodes_enr[9].to_base64(), mainnet[9]);
assert_eq!(config.network.boot_nodes_enr[10].to_base64(), mainnet[10]);
// Nimbus team
assert_eq!(config.network.boot_nodes_enr[11].to_base64(), mainnet[11]);
assert_eq!(config.network.boot_nodes_enr[12].to_base64(), mainnet[12]);
}); });
} }
#[test] #[test]
@ -2323,3 +2291,28 @@ fn invalid_gossip_verified_blocks_path() {
) )
}); });
} }
#[test]
fn progressive_balances_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.chain.progressive_balances_mode,
ProgressiveBalancesMode::Checked
)
});
}
#[test]
fn progressive_balances_fast() {
CommandLineTest::new()
.flag("progressive-balances", Some("fast"))
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.chain.progressive_balances_mode,
ProgressiveBalancesMode::Fast
)
});
}

View File

@ -6,9 +6,9 @@ use crate::type_name;
use crate::type_name::TypeName; use crate::type_name::TypeName;
use serde_derive::Deserialize; use serde_derive::Deserialize;
use state_processing::per_epoch_processing::capella::process_historical_summaries_update; use state_processing::per_epoch_processing::capella::process_historical_summaries_update;
use state_processing::per_epoch_processing::effective_balance_updates::process_effective_balance_updates;
use state_processing::per_epoch_processing::{ use state_processing::per_epoch_processing::{
altair, base, altair, base,
effective_balance_updates::process_effective_balance_updates,
historical_roots_update::process_historical_roots_update, historical_roots_update::process_historical_roots_update,
process_registry_updates, process_slashings, process_registry_updates, process_slashings,
resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset}, resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset},
@ -180,7 +180,7 @@ impl<E: EthSpec> EpochTransition<E> for Eth1DataReset {
impl<E: EthSpec> EpochTransition<E> for EffectiveBalanceUpdates { impl<E: EthSpec> EpochTransition<E> for EffectiveBalanceUpdates {
fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> { fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> {
process_effective_balance_updates(state, spec) process_effective_balance_updates(state, None, spec)
} }
} }

View File

@ -18,7 +18,8 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use types::{ use types::{
Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec, Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec,
ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, ProgressiveBalancesMode,
SignedBeaconBlock, Slot, Uint256,
}; };
#[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)]
@ -382,6 +383,7 @@ impl<E: EthSpec> Tester<E> {
block_root, block_root,
block.clone(), block.clone(),
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
|| Ok(()),
))?; ))?;
if result.is_ok() != valid { if result.is_ok() != valid {
return Err(Error::DidntFail(format!( return Err(Error::DidntFail(format!(
@ -439,7 +441,9 @@ impl<E: EthSpec> Tester<E> {
block_delay, block_delay,
&state, &state,
PayloadVerificationStatus::Irrelevant, PayloadVerificationStatus::Irrelevant,
ProgressiveBalancesMode::Strict,
&self.harness.chain.spec, &self.harness.chain.spec,
self.harness.logger(),
); );
if result.is_ok() { if result.is_ok() {

View File

@ -4,6 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches;
use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file};
use crate::testing_spec; use crate::testing_spec;
use serde_derive::Deserialize; use serde_derive::Deserialize;
use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache;
use ssz::Decode; use ssz::Decode;
use state_processing::{ use state_processing::{
per_block_processing::{ per_block_processing::{
@ -97,10 +98,8 @@ impl<E: EthSpec> Operation<E> for Attestation<E> {
&mut ctxt, &mut ctxt,
spec, spec,
), ),
BeaconState::Altair(_) BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_)| BeaconState::Deneb(_) => {
| BeaconState::Merge(_) initialize_progressive_balances_cache(state, None, spec)?;
| BeaconState::Capella(_)
| BeaconState::Deneb(_) => {
altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec) altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec)
} }
} }
@ -123,6 +122,7 @@ impl<E: EthSpec> Operation<E> for AttesterSlashing<E> {
_: &Operations<E, Self>, _: &Operations<E, Self>,
) -> Result<(), BlockProcessingError> { ) -> Result<(), BlockProcessingError> {
let mut ctxt = ConsensusContext::new(state.slot()); let mut ctxt = ConsensusContext::new(state.slot());
initialize_progressive_balances_cache(state, None, spec)?;
process_attester_slashings( process_attester_slashings(
state, state,
&[self.clone()], &[self.clone()],
@ -173,6 +173,7 @@ impl<E: EthSpec> Operation<E> for ProposerSlashing {
_: &Operations<E, Self>, _: &Operations<E, Self>,
) -> Result<(), BlockProcessingError> { ) -> Result<(), BlockProcessingError> {
let mut ctxt = ConsensusContext::new(state.slot()); let mut ctxt = ConsensusContext::new(state.slot());
initialize_progressive_balances_cache(state, None, spec)?;
process_proposer_slashings( process_proposer_slashings(
state, state,
&[self.clone()], &[self.clone()],

View File

@ -67,7 +67,7 @@ impl<E: EthSpec> Case for SanityBlocks<E> {
let spec = &testing_spec::<E>(fork_name); let spec = &testing_spec::<E>(fork_name);
// Processing requires the epoch cache. // Processing requires the epoch cache.
bulk_state.build_all_caches(spec).unwrap(); bulk_state.build_caches(spec).unwrap();
// Spawning a second state to call the VerifyIndiviual strategy to avoid bitrot. // Spawning a second state to call the VerifyIndiviual strategy to avoid bitrot.
// See https://github.com/sigp/lighthouse/issues/742. // See https://github.com/sigp/lighthouse/issues/742.

View File

@ -61,7 +61,7 @@ impl<E: EthSpec> Case for SanitySlots<E> {
let spec = &testing_spec::<E>(fork_name); let spec = &testing_spec::<E>(fork_name);
// Processing requires the epoch cache. // Processing requires the epoch cache.
state.build_all_caches(spec).unwrap(); state.build_caches(spec).unwrap();
let mut result = (0..self.slots) let mut result = (0..self.slots)
.try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ())) .try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ()))