Merge branch 'remove-into-gossip-verified-block' of https://github.com/realbigsean/lighthouse into merge-unstable-deneb-june-6th

This commit is contained in:
realbigsean 2023-07-06 16:51:35 -04:00
commit cfe2452533
No known key found for this signature in database
GPG Key ID: BE1B3DB104F6C788
78 changed files with 3075 additions and 407 deletions

9
Cargo.lock generated
View File

@ -684,7 +684,7 @@ dependencies = [
[[package]]
name = "beacon_node"
version = "4.2.0"
version = "4.3.0"
dependencies = [
"beacon_chain",
"clap",
@ -891,7 +891,7 @@ dependencies = [
[[package]]
name = "boot_node"
version = "4.2.0"
version = "4.3.0"
dependencies = [
"beacon_node",
"clap",
@ -4165,7 +4165,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
[[package]]
name = "lcli"
version = "4.2.0"
version = "4.3.0"
dependencies = [
"account_utils",
"beacon_chain",
@ -4812,7 +4812,7 @@ dependencies = [
[[package]]
name = "lighthouse"
version = "4.2.0"
version = "4.3.0"
dependencies = [
"account_manager",
"account_utils",
@ -9108,6 +9108,7 @@ dependencies = [
"smallvec",
"ssz_types",
"state_processing",
"strum",
"superstruct 0.6.0",
"swap_or_not_shuffle",
"tempfile",

View File

@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "4.2.0"
version = "4.3.0"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
edition = "2021"

View File

@ -2664,6 +2664,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
signature_verified_block.block_root(),
signature_verified_block,
notify_execution_layer,
|| Ok(()),
)
.await
{
@ -2783,6 +2784,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256,
unverified_block: B,
notify_execution_layer: NotifyExecutionLayer,
publish_fn: impl FnOnce() -> Result<(), BlockError<T::EthSpec>> + Send + 'static,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
// Start the Prometheus timer.
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
@ -2798,6 +2800,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
notify_execution_layer,
)?;
//TODO(sean) error handling?
publish_fn()?;
let executed_block = self
.clone()
.into_executed_block(execution_pending)
@ -3073,7 +3078,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_delay,
&state,
payload_verification_status,
self.config.progressive_balances_mode,
&self.spec,
&self.log,
)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
}
@ -6012,13 +6019,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Since we are likely calling this during the slot we are going to propose in, don't take into
/// account the current slot when accounting for skips.
pub fn is_healthy(&self, parent_root: &Hash256) -> Result<ChainHealth, Error> {
let cached_head = self.canonical_head.cached_head();
// Check if the merge has been finalized.
if let Some(finalized_hash) = self
.canonical_head
.cached_head()
.forkchoice_update_parameters()
.finalized_hash
{
if let Some(finalized_hash) = cached_head.forkchoice_update_parameters().finalized_hash {
if ExecutionBlockHash::zero() == finalized_hash {
return Ok(ChainHealth::PreMerge);
}
@ -6045,17 +6048,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Check slots at the head of the chain.
let prev_slot = current_slot.saturating_sub(Slot::new(1));
let head_skips = prev_slot.saturating_sub(self.canonical_head.cached_head().head_slot());
let head_skips = prev_slot.saturating_sub(cached_head.head_slot());
let head_skips_check = head_skips.as_usize() <= self.config.builder_fallback_skips;
// Check if finalization is advancing.
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let epochs_since_finalization = current_epoch.saturating_sub(
self.canonical_head
.cached_head()
.finalized_checkpoint()
.epoch,
);
let epochs_since_finalization =
current_epoch.saturating_sub(cached_head.finalized_checkpoint().epoch);
let finalization_check = epochs_since_finalization.as_usize()
<= self.config.builder_fallback_epochs_since_finalization;

View File

@ -57,6 +57,7 @@ use crate::execution_payload::{
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
};
use crate::observed_block_producers::SeenBlock;
use crate::snapshot_cache::PreProcessingSnapshot;
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
@ -189,13 +190,6 @@ pub enum BlockError<T: EthSpec> {
///
/// The block is valid and we have already imported a block with this hash.
BlockIsAlreadyKnown,
/// A block for this proposer and slot has already been observed.
///
/// ## Peer scoring
///
/// The `proposer` has already proposed a block at this slot. The existing block may or may not
/// be equal to the given block.
RepeatProposal { proposer: u64, slot: Slot },
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
///
/// ## Peer scoring
@ -291,6 +285,14 @@ pub enum BlockError<T: EthSpec> {
/// problems to worry about than losing peers, and we're doing the network a favour by
/// disconnecting.
ParentExecutionPayloadInvalid { parent_root: Hash256 },
/// The block is a slashable equivocation from the proposer.
///
/// ## Peer scoring
///
/// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
/// we penalise them with a mid-tolerance error.
Slashable,
//TODO(sean) peer scoring docs
/// A blob alone failed validation.
BlobValidation(BlobError<T>),
/// The block and blob together failed validation.
@ -892,19 +894,6 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
return Err(BlockError::BlockIsAlreadyKnown);
}
// Check that we have not already received a block with a valid signature for this slot.
if chain
.observed_block_producers
.read()
.proposer_has_been_observed(block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
return Err(BlockError::RepeatProposal {
proposer: block.message().proposer_index(),
slot: block.slot(),
});
}
// Do not process a block that doesn't descend from the finalized root.
//
// We check this *before* we load the parent so that we can return a more detailed error.
@ -1020,17 +1009,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
//
// It's important to double-check that the proposer still hasn't been observed so we don't
// have a race-condition when verifying two blocks simultaneously.
if chain
match chain
.observed_block_producers
.write()
.observe_proposer(block.message())
.observe_proposal(block_root, block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?
{
return Err(BlockError::RepeatProposal {
proposer: block.message().proposer_index(),
slot: block.slot(),
});
}
SeenBlock::Slashable => return Err(BlockError::Slashable),
SeenBlock::Duplicate => return Err(BlockError::BlockIsAlreadyKnown),
SeenBlock::UniqueNonSlashable => {}
};
if block.message().proposer_index() != expected_proposer as u64 {
return Err(BlockError::IncorrectBlockProposer {
@ -1293,6 +1281,12 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
chain: &Arc<BeaconChain<T>>,
notify_execution_layer: NotifyExecutionLayer,
) -> Result<Self, BlockError<T::EthSpec>> {
chain
.observed_block_producers
.write()
.observe_proposal(block_root, block.message())
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
if let Some(parent) = chain
.canonical_head
.fork_choice_read_lock()

View File

@ -343,7 +343,7 @@ where
let beacon_block = genesis_block(&mut beacon_state, &self.spec)?;
beacon_state
.build_all_caches(&self.spec)
.build_caches(&self.spec)
.map_err(|e| format!("Failed to build genesis state caches: {:?}", e))?;
let beacon_state_root = beacon_block.message().state_root();
@ -433,7 +433,7 @@ where
// Prime all caches before storing the state in the database and computing the tree hash
// root.
weak_subj_state
.build_all_caches(&self.spec)
.build_caches(&self.spec)
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
weak_subj_state
.update_tree_hash_cache()
@ -701,6 +701,8 @@ where
store.clone(),
Some(current_slot),
&self.spec,
self.chain_config.progressive_balances_mode,
&log,
)?;
}
@ -714,7 +716,7 @@ where
head_snapshot
.beacon_state
.build_all_caches(&self.spec)
.build_caches(&self.spec)
.map_err(|e| format!("Failed to build state caches: {:?}", e))?;
// Perform a check to ensure that the finalization points of the head and fork choice are
@ -840,9 +842,7 @@ where
observed_sync_aggregators: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_block_producers: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_blob_sidecars: <_>::default(),
// TODO: allow for persisting and loading the pool from disk.
observed_voluntary_exits: <_>::default(),
observed_proposer_slashings: <_>::default(),
observed_attester_slashings: <_>::default(),

View File

@ -1,7 +1,7 @@
pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
use serde_derive::{Deserialize, Serialize};
use std::time::Duration;
use types::{Checkpoint, Epoch};
use types::{Checkpoint, Epoch, ProgressiveBalancesMode};
pub const DEFAULT_RE_ORG_THRESHOLD: ReOrgThreshold = ReOrgThreshold(20);
pub const DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION: Epoch = Epoch::new(2);
@ -81,6 +81,8 @@ pub struct ChainConfig {
pub always_prepare_payload: bool,
/// Whether backfill sync processing should be rate-limited.
pub enable_backfill_rate_limiting: bool,
/// Whether to use `ProgressiveBalancesCache` in unrealized FFG progression calculation.
pub progressive_balances_mode: ProgressiveBalancesMode,
}
impl Default for ChainConfig {
@ -111,6 +113,7 @@ impl Default for ChainConfig {
genesis_backfill: false,
always_prepare_payload: false,
enable_backfill_rate_limiting: true,
progressive_balances_mode: ProgressiveBalancesMode::Checked,
}
}
}

View File

@ -216,6 +216,7 @@ pub enum BeaconChainError {
BlsToExecutionConflictsWithPool,
InconsistentFork(InconsistentFork),
ProposerHeadForkChoiceError(fork_choice::Error<proto_array::Error>),
UnableToPublish,
AvailabilityCheckError(AvailabilityCheckError),
}

View File

@ -10,7 +10,10 @@ use state_processing::{
use std::sync::Arc;
use std::time::Duration;
use store::{iter::ParentRootBlockIterator, HotColdDB, ItemStore};
use types::{BeaconState, ChainSpec, EthSpec, ForkName, Hash256, SignedBeaconBlock, Slot};
use types::{
BeaconState, ChainSpec, EthSpec, ForkName, Hash256, ProgressiveBalancesMode, SignedBeaconBlock,
Slot,
};
const CORRUPT_DB_MESSAGE: &str = "The database could be corrupt. Check its file permissions or \
consider deleting it by running with the --purge-db flag.";
@ -100,6 +103,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
store: Arc<HotColdDB<E, Hot, Cold>>,
current_slot: Option<Slot>,
spec: &ChainSpec,
progressive_balances_mode: ProgressiveBalancesMode,
log: &Logger,
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
// Fetch finalized block.
let finalized_checkpoint = head_state.finalized_checkpoint();
@ -197,7 +202,9 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
Duration::from_secs(0),
&state,
payload_verification_status,
progressive_balances_mode,
spec,
log,
)
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
}

View File

@ -1,9 +1,10 @@
//! Provides the `ObservedBlockProducers` struct which allows for rejecting gossip blocks from
//! validators that have already produced a block.
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use types::{BeaconBlockRef, Epoch, EthSpec, Slot, Unsigned};
use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned};
#[derive(Debug, PartialEq)]
pub enum Error {
@ -14,6 +15,12 @@ pub enum Error {
ValidatorIndexTooHigh(u64),
}
#[derive(Eq, Hash, PartialEq, Debug, Default)]
struct ProposalKey {
slot: Slot,
proposer: u64,
}
/// Maintains a cache of observed `(block.slot, block.proposer)`.
///
/// The cache supports pruning based upon the finalized epoch. It does not automatically prune, you
@ -27,7 +34,7 @@ pub enum Error {
/// known_distinct_shufflings` which is much smaller.
pub struct ObservedBlockProducers<E: EthSpec> {
finalized_slot: Slot,
items: HashMap<Slot, HashSet<u64>>,
items: HashMap<ProposalKey, HashSet<Hash256>>,
_phantom: PhantomData<E>,
}
@ -42,6 +49,24 @@ impl<E: EthSpec> Default for ObservedBlockProducers<E> {
}
}
pub enum SeenBlock {
Duplicate,
Slashable,
UniqueNonSlashable,
}
impl SeenBlock {
pub fn proposer_previously_observed(self) -> bool {
match self {
Self::Duplicate | Self::Slashable => true,
Self::UniqueNonSlashable => false,
}
}
pub fn is_slashable(&self) -> bool {
matches!(self, Self::Slashable)
}
}
impl<E: EthSpec> ObservedBlockProducers<E> {
/// Observe that the `block` was produced by `block.proposer_index` at `block.slot`. This will
/// update `self` so future calls to it indicate that this block is known.
@ -52,16 +77,44 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn observe_proposer(&mut self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> {
pub fn observe_proposal(
&mut self,
block_root: Hash256,
block: BeaconBlockRef<'_, E>,
) -> Result<SeenBlock, Error> {
self.sanitize_block(block)?;
let did_not_exist = self
.items
.entry(block.slot())
.or_insert_with(|| HashSet::with_capacity(E::SlotsPerEpoch::to_usize()))
.insert(block.proposer_index());
let key = ProposalKey {
slot: block.slot(),
proposer: block.proposer_index(),
};
Ok(!did_not_exist)
let entry = self.items.entry(key);
let slashable_proposal = match entry {
Entry::Occupied(mut occupied_entry) => {
let block_roots = occupied_entry.get_mut();
let newly_inserted = block_roots.insert(block_root);
let is_equivocation = block_roots.len() > 1;
if is_equivocation {
SeenBlock::Slashable
} else if !newly_inserted {
SeenBlock::Duplicate
} else {
SeenBlock::UniqueNonSlashable
}
}
Entry::Vacant(vacant_entry) => {
let block_roots = HashSet::from([block_root]);
vacant_entry.insert(block_roots);
SeenBlock::UniqueNonSlashable
}
};
Ok(slashable_proposal)
}
/// Returns `Ok(true)` if the `block` has been observed before, `Ok(false)` if not. Does not
@ -72,15 +125,33 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
///
/// - `block.proposer_index` is greater than `VALIDATOR_REGISTRY_LIMIT`.
/// - `block.slot` is equal to or less than the latest pruned `finalized_slot`.
pub fn proposer_has_been_observed(&self, block: BeaconBlockRef<'_, E>) -> Result<bool, Error> {
pub fn proposer_has_been_observed(
&self,
block: BeaconBlockRef<'_, E>,
block_root: Hash256,
) -> Result<SeenBlock, Error> {
self.sanitize_block(block)?;
let exists = self
.items
.get(&block.slot())
.map_or(false, |set| set.contains(&block.proposer_index()));
let key = ProposalKey {
slot: block.slot(),
proposer: block.proposer_index(),
};
Ok(exists)
if let Some(block_roots) = self.items.get(&key) {
let block_already_known = block_roots.contains(&block_root);
let no_prev_known_blocks =
block_roots.difference(&HashSet::from([block_root])).count() == 0;
if !no_prev_known_blocks {
Ok(SeenBlock::Slashable)
} else if block_already_known {
Ok(SeenBlock::Duplicate)
} else {
Ok(SeenBlock::UniqueNonSlashable)
}
} else {
Ok(SeenBlock::UniqueNonSlashable)
}
}
/// Returns `Ok(())` if the given `block` is sane.
@ -112,15 +183,15 @@ impl<E: EthSpec> ObservedBlockProducers<E> {
}
self.finalized_slot = finalized_slot;
self.items.retain(|slot, _set| *slot > finalized_slot);
self.items.retain(|key, _| key.slot > finalized_slot);
}
/// Returns `true` if the given `validator_index` has been stored in `self` at `epoch`.
///
/// This is useful for doppelganger detection.
pub fn index_seen_at_epoch(&self, validator_index: u64, epoch: Epoch) -> bool {
self.items.iter().any(|(slot, producers)| {
slot.epoch(E::slots_per_epoch()) == epoch && producers.contains(&validator_index)
self.items.iter().any(|(key, _)| {
key.slot.epoch(E::slots_per_epoch()) == epoch && key.proposer == validator_index
})
}
}
@ -148,9 +219,12 @@ mod tests {
// Slot 0, proposer 0
let block_a = get_block(0, 0);
let block_root = block_a.canonical_root();
assert_eq!(
cache.observe_proposer(block_a.to_ref()),
cache
.observe_proposal(block_root, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false),
"can observe proposer, indicates proposer unobserved"
);
@ -164,7 +238,10 @@ mod tests {
assert_eq!(
cache
.items
.get(&Slot::new(0))
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
@ -182,7 +259,10 @@ mod tests {
assert_eq!(
cache
.items
.get(&Slot::new(0))
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
@ -207,9 +287,12 @@ mod tests {
// First slot of finalized epoch, proposer 0
let block_b = get_block(E::slots_per_epoch(), 0);
let block_root_b = block_b.canonical_root();
assert_eq!(
cache.observe_proposer(block_b.to_ref()),
cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Err(Error::FinalizedBlock {
slot: E::slots_per_epoch().into(),
finalized_slot: E::slots_per_epoch().into(),
@ -229,7 +312,9 @@ mod tests {
let block_b = get_block(three_epochs, 0);
assert_eq!(
cache.observe_proposer(block_b.to_ref()),
cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false),
"can insert non-finalized block"
);
@ -238,7 +323,10 @@ mod tests {
assert_eq!(
cache
.items
.get(&Slot::new(three_epochs))
.get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present")
.len(),
1,
@ -262,7 +350,10 @@ mod tests {
assert_eq!(
cache
.items
.get(&Slot::new(three_epochs))
.get(&ProposalKey {
slot: Slot::new(three_epochs),
proposer: 0
})
.expect("the three epochs slot should be present")
.len(),
1,
@ -276,24 +367,33 @@ mod tests {
// Slot 0, proposer 0
let block_a = get_block(0, 0);
let block_root_a = block_a.canonical_root();
assert_eq!(
cache.proposer_has_been_observed(block_a.to_ref()),
cache
.proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false),
"no observation in empty cache"
);
assert_eq!(
cache.observe_proposer(block_a.to_ref()),
cache
.observe_proposal(block_root_a, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false),
"can observe proposer, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_has_been_observed(block_a.to_ref()),
cache
.proposer_has_been_observed(block_a.to_ref(), block_a.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true),
"observed block is indicated as true"
);
assert_eq!(
cache.observe_proposer(block_a.to_ref()),
cache
.observe_proposal(block_root_a, block_a.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true),
"observing again indicates true"
);
@ -303,7 +403,10 @@ mod tests {
assert_eq!(
cache
.items
.get(&Slot::new(0))
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
@ -312,24 +415,33 @@ mod tests {
// Slot 1, proposer 0
let block_b = get_block(1, 0);
let block_root_b = block_b.canonical_root();
assert_eq!(
cache.proposer_has_been_observed(block_b.to_ref()),
cache
.proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false),
"no observation for new slot"
);
assert_eq!(
cache.observe_proposer(block_b.to_ref()),
cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false),
"can observe proposer for new slot, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_has_been_observed(block_b.to_ref()),
cache
.proposer_has_been_observed(block_b.to_ref(), block_b.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true),
"observed block in slot 1 is indicated as true"
);
assert_eq!(
cache.observe_proposer(block_b.to_ref()),
cache
.observe_proposal(block_root_b, block_b.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true),
"observing slot 1 again indicates true"
);
@ -339,7 +451,10 @@ mod tests {
assert_eq!(
cache
.items
.get(&Slot::new(0))
.get(&ProposalKey {
slot: Slot::new(0),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
@ -348,7 +463,10 @@ mod tests {
assert_eq!(
cache
.items
.get(&Slot::new(1))
.get(&ProposalKey {
slot: Slot::new(1),
proposer: 0
})
.expect("slot zero should be present")
.len(),
1,
@ -357,45 +475,54 @@ mod tests {
// Slot 0, proposer 1
let block_c = get_block(0, 1);
let block_root_c = block_c.canonical_root();
assert_eq!(
cache.proposer_has_been_observed(block_c.to_ref()),
cache
.proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(false),
"no observation for new proposer"
);
assert_eq!(
cache.observe_proposer(block_c.to_ref()),
cache
.observe_proposal(block_root_c, block_c.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(false),
"can observe new proposer, indicates proposer unobserved"
);
assert_eq!(
cache.proposer_has_been_observed(block_c.to_ref()),
cache
.proposer_has_been_observed(block_c.to_ref(), block_c.canonical_root())
.map(|x| x.proposer_previously_observed()),
Ok(true),
"observed new proposer block is indicated as true"
);
assert_eq!(
cache.observe_proposer(block_c.to_ref()),
cache
.observe_proposal(block_root_c, block_c.to_ref())
.map(SeenBlock::proposer_previously_observed),
Ok(true),
"observing new proposer again indicates true"
);
assert_eq!(cache.finalized_slot, 0, "finalized slot is zero");
assert_eq!(cache.items.len(), 2, "two slots should be present");
assert_eq!(cache.items.len(), 3, "three slots should be present");
assert_eq!(
cache
.items
.get(&Slot::new(0))
.expect("slot zero should be present")
.len(),
.iter()
.filter(|(k, _)| k.slot == cache.finalized_slot)
.count(),
2,
"two proposers should be present in slot 0"
);
assert_eq!(
cache
.items
.get(&Slot::new(1))
.expect("slot zero should be present")
.len(),
.iter()
.filter(|(k, _)| k.slot == Slot::new(1))
.count(),
1,
"only one proposer should be present in slot 1"
);

View File

@ -808,6 +808,15 @@ where
state.get_block_root(slot).unwrap() == state.get_block_root(slot - 1).unwrap()
}
pub async fn make_blinded_block(
&self,
state: BeaconState<E>,
slot: Slot,
) -> (SignedBlindedBeaconBlock<E>, BeaconState<E>) {
let (unblinded, new_state) = self.make_block(state, slot).await;
(unblinded.into(), new_state)
}
/// Returns a newly created block, signed by the proposer for the given slot.
pub async fn make_block(
&self,
@ -820,9 +829,7 @@ where
complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot");
state
.build_all_caches(&self.spec)
.expect("should build caches");
state.build_caches(&self.spec).expect("should build caches");
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
@ -906,9 +913,7 @@ where
complete_state_advance(&mut state, None, slot, &self.spec)
.expect("should be able to advance state to slot");
state
.build_all_caches(&self.spec)
.expect("should build caches");
state.build_caches(&self.spec).expect("should build caches");
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
@ -1626,6 +1631,36 @@ where
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
}
pub fn add_proposer_slashing(&self, validator_index: u64) -> Result<(), String> {
let propposer_slashing = self.make_proposer_slashing(validator_index);
if let ObservationOutcome::New(verified_proposer_slashing) = self
.chain
.verify_proposer_slashing_for_gossip(propposer_slashing)
.expect("should verify proposer slashing for gossip")
{
self.chain
.import_proposer_slashing(verified_proposer_slashing);
Ok(())
} else {
Err("should observe new proposer slashing".to_string())
}
}
pub fn add_attester_slashing(&self, validator_indices: Vec<u64>) -> Result<(), String> {
let attester_slashing = self.make_attester_slashing(validator_indices);
if let ObservationOutcome::New(verified_attester_slashing) = self
.chain
.verify_attester_slashing_for_gossip(attester_slashing)
.expect("should verify attester slashing for gossip")
{
self.chain
.import_attester_slashing(verified_attester_slashing);
Ok(())
} else {
Err("should observe new attester slashing".to_string())
}
}
pub fn add_bls_to_execution_change(
&self,
validator_index: u64,
@ -1804,7 +1839,7 @@ where
self.set_current_slot(slot);
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(block_root, block.into(), NotifyExecutionLayer::Yes)
.process_block(block_root, block.into(), NotifyExecutionLayer::Yes,|| Ok(()),)
.await?
.try_into()
.unwrap();
@ -1823,6 +1858,7 @@ where
wrapped_block.canonical_root(),
wrapped_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await?
.try_into()

View File

@ -459,6 +459,7 @@ async fn assert_invalid_signature(
chain_segment_blobs[block_index].clone(),
),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await;
assert!(
@ -526,6 +527,7 @@ async fn invalid_signature_gossip_block() {
signed_block.canonical_root(),
Arc::new(signed_block),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await,
Err(BlockError::InvalidSignature)
@ -859,6 +861,7 @@ async fn block_gossip_verification() {
gossip_verified.block_root,
gossip_verified,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.expect("should import valid gossip verified block");
@ -1069,11 +1072,7 @@ async fn block_gossip_verification() {
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
BlockError::RepeatProposal {
proposer,
slot,
}
if proposer == other_proposer && slot == block.message().slot()
BlockError::BlockIsAlreadyKnown,
),
"should register any valid signature against the proposer, even if the block failed later verification"
);
@ -1106,11 +1105,7 @@ async fn block_gossip_verification() {
.await
.err()
.expect("should error when processing known block"),
BlockError::RepeatProposal {
proposer,
slot,
}
if proposer == block.message().proposer_index() && slot == block.message().slot()
BlockError::BlockIsAlreadyKnown
),
"the second proposal by this validator should be rejected"
);
@ -1159,6 +1154,7 @@ async fn verify_block_for_gossip_slashing_detection() {
verified_block.block_root,
verified_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap();
@ -1198,6 +1194,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
verified_block.block_root,
verified_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap();
@ -1345,6 +1342,7 @@ async fn add_base_block_to_altair_chain() {
base_block.canonical_root(),
Arc::new(base_block.clone()),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.err()
@ -1479,6 +1477,7 @@ async fn add_altair_block_to_base_chain() {
altair_block.canonical_root(),
Arc::new(altair_block.clone()),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.err()

View File

@ -133,13 +133,8 @@ async fn base_altair_merge_capella() {
for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() {
harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block
.message()
.body()
.execution_payload()
.unwrap()
.clone()
.into();
let full_payload: FullPayload<E> =
block.message().body().execution_payload().unwrap().into();
// pre-capella shouldn't have withdrawals
assert!(full_payload.withdrawals_root().is_err());
execution_payloads.push(full_payload);
@ -151,13 +146,8 @@ async fn base_altair_merge_capella() {
for _ in 0..16 {
harness.extend_slots(1).await;
let block = &harness.chain.head_snapshot().beacon_block;
let full_payload: FullPayload<E> = block
.message()
.body()
.execution_payload()
.unwrap()
.clone()
.into();
let full_payload: FullPayload<E> =
block.message().body().execution_payload().unwrap().into();
// post-capella should have withdrawals
assert!(full_payload.withdrawals_root().is_ok());
execution_payloads.push(full_payload);

View File

@ -698,6 +698,7 @@ async fn invalidates_all_descendants() {
fork_block.canonical_root(),
Arc::new(fork_block),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap()
@ -797,6 +798,7 @@ async fn switches_heads() {
fork_block.canonical_root(),
Arc::new(fork_block),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap()
@ -1055,7 +1057,9 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for import.
assert!(matches!(
rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes).await,
rig.harness.chain.process_block(block.canonical_root(), block.clone(), NotifyExecutionLayer::Yes,
|| Ok(()),
).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root
));
@ -1069,8 +1073,9 @@ async fn invalid_parent() {
Duration::from_secs(0),
&state,
PayloadVerificationStatus::Optimistic,
rig.harness.chain.config.progressive_balances_mode,
&rig.harness.chain.spec,
rig.harness.logger()
),
Err(ForkChoiceError::ProtoArrayStringError(message))
if message.contains(&format!(
@ -1341,7 +1346,12 @@ async fn build_optimistic_chain(
for block in blocks {
rig.harness
.chain
.process_block(block.canonical_root(), block, NotifyExecutionLayer::Yes)
.process_block(
block.canonical_root(),
block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap();
}
@ -1901,6 +1911,7 @@ async fn recover_from_invalid_head_by_importing_blocks() {
fork_block.canonical_root(),
fork_block.clone(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap();

View File

@ -2178,6 +2178,7 @@ async fn weak_subjectivity_sync() {
full_block.canonical_root(),
BlockWrapper::new(Arc::new(full_block), blobs),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await
.unwrap();

View File

@ -686,6 +686,7 @@ async fn run_skip_slot_test(skip_slots: u64) {
harness_a.chain.head_snapshot().beacon_block_root,
harness_a.get_head_block(),
NotifyExecutionLayer::Yes,
|| Ok(())
)
.await
.unwrap();

View File

@ -49,7 +49,7 @@ pub fn get_block_rewards<T: BeaconChainTypes>(
.map_err(beacon_chain_error)?;
state
.build_all_caches(&chain.spec)
.build_caches(&chain.spec)
.map_err(beacon_state_error)?;
let mut reward_cache = Default::default();

View File

@ -32,7 +32,7 @@ use beacon_chain::{
pub use block_id::BlockId;
use directory::DEFAULT_ROOT_DIR;
use eth2::types::{
self as api_types, EndpointVersion, ForkChoice, ForkChoiceNode, SignedBlockContents,
self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, SignedBlockContents,
SkipRandaoVerification, ValidatorId, ValidatorStatus,
};
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
@ -41,7 +41,9 @@ use logging::SSELoggingComponents;
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
use operation_pool::ReceivedPreCapella;
use parking_lot::RwLock;
use publish_blocks::ProvenancedBlock;
pub use publish_blocks::{
publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock,
};
use serde::{Deserialize, Serialize};
use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock;
@ -325,6 +327,7 @@ pub fn serve<T: BeaconChainTypes>(
};
let eth_v1 = single_version(V1);
let eth_v2 = single_version(V2);
// Create a `warp` filter that provides access to the network globals.
let inner_network_globals = ctx.network_globals.clone();
@ -1223,16 +1226,59 @@ pub fn serve<T: BeaconChainTypes>(
log: Logger| async move {
publish_blocks::publish_block(
None,
ProvenancedBlock::Local(block_contents),
ProvenancedBlock::local(block_contents),
chain,
&network_tx,
log,
BroadcastValidation::default(),
)
.await
.map(|()| warp::reply().into_response())
},
);
let post_beacon_blocks_v2 = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
match publish_blocks::publish_block(
None,
ProvenancedBlock::local(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
},
);
/*
* beacon/blocks
*/
// POST beacon/blinded_blocks
let post_beacon_blinded_blocks = eth_v1
.and(warp::path("beacon"))
@ -1247,9 +1293,52 @@ pub fn serve<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
publish_blocks::publish_blinded_block(block, chain, &network_tx, log)
.await
.map(|()| warp::reply().into_response())
publish_blocks::publish_blinded_block(
block,
chain,
&network_tx,
log,
BroadcastValidation::default(),
)
.await
.map(|()| warp::reply().into_response())
},
);
let post_beacon_blinded_blocks_v2 = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blinded_blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<_>>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
match publish_blocks::publish_blinded_block(
block_contents,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
},
);
@ -2369,24 +2458,41 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("health"))
.and(warp::path::end())
.and(network_globals.clone())
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
blocking_response_task(move || match *network_globals.sync_state.read() {
SyncState::SyncingFinalized { .. }
| SyncState::SyncingHead { .. }
| SyncState::SyncTransition
| SyncState::BackFillSyncing { .. } => Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::PARTIAL_CONTENT,
)),
SyncState::Synced => Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::OK,
)),
SyncState::Stalled => Err(warp_utils::reject::not_synced(
"sync stalled, beacon chain may not yet be initialized.".to_string(),
)),
})
});
.and(chain_filter.clone())
.and_then(
|network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| {
async move {
let el_offline = if let Some(el) = &chain.execution_layer {
el.is_offline_or_erroring().await
} else {
true
};
blocking_response_task(move || {
let is_optimistic = chain
.is_optimistic_or_invalid_head()
.map_err(warp_utils::reject::beacon_chain_error)?;
let is_syncing = !network_globals.sync_state.read().is_synced();
if el_offline {
Err(warp_utils::reject::not_synced("execution layer is offline".to_string()))
} else if is_syncing || is_optimistic {
Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::PARTIAL_CONTENT,
))
} else {
Ok(warp::reply::with_status(
warp::reply(),
warp::http::StatusCode::OK,
))
}
})
.await
}
},
);
// GET node/peers/{peer_id}
let get_node_peers_by_id = eth_v1
@ -3866,6 +3972,8 @@ pub fn serve<T: BeaconChainTypes>(
warp::post().and(
post_beacon_blocks
.uor(post_beacon_blinded_blocks)
.uor(post_beacon_blocks_v2)
.uor(post_beacon_blinded_blocks_v2)
.uor(post_beacon_pool_attestations)
.uor(post_beacon_pool_attester_slashings)
.uor(post_beacon_pool_proposer_slashings)

View File

@ -2,9 +2,12 @@ use crate::metrics;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
use beacon_chain::{AvailabilityProcessingStatus, NotifyExecutionLayer};
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
use beacon_chain::{
BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, GossipVerifiedBlock,
NotifyExecutionLayer, AvailabilityProcessingStatus
};
use eth2::types::SignedBlockContents;
use eth2::types::BroadcastValidation;
use execution_layer::ProvenancedPayload;
use lighthouse_network::PubsubMessage;
use network::NetworkMessage;
@ -29,6 +32,16 @@ pub enum ProvenancedBlock<T: EthSpec> {
Builder(SignedBlockContents<T, FullPayload<T>>),
}
impl<T: EthSpec> ProvenancedBlock<T> {
pub fn local(block: Arc<SignedBeaconBlock<T>>) -> Self {
Self::Local(block)
}
pub fn builder(block: Arc<SignedBeaconBlock<T>>) -> Self {
Self::Builder(block)
}
}
/// Handles a request from the HTTP API for full blocks.
pub async fn publish_block<T: BeaconChainTypes>(
block_root: Option<Hash256>,
@ -36,6 +49,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> {
let seen_timestamp = timestamp_now();
let (block, maybe_blobs, is_locally_built_block) = match provenanced_block {
@ -48,17 +62,24 @@ pub async fn publish_block<T: BeaconChainTypes>(
(Arc::new(block), maybe_blobs, false)
}
};
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
//FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message.
let beacon_block = block.clone();
let delay = get_block_delay_ms(seen_timestamp, beacon_block.message(), &chain.slot_clock);
//FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message.
//this may skew metrics
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
debug!(
log,
"Signed block published to HTTP API";
"slot" => block.slot()
);
debug!(log, "Signed block received in HTTP API"; "slot" => beacon_block.slot());
/* actually publish a block */
let publish_block = move |block: Arc<SignedBeaconBlock<T::EthSpec>>,
sender,
log,
seen_timestamp| {
let publish_timestamp = timestamp_now();
let publish_delay = publish_timestamp
.checked_sub(seen_timestamp)
.unwrap_or_else(|| Duration::from_secs(0));
info!(log, "Signed block published to network via HTTP API"; "slot" => block.slot(), "publish_delay" => ?publish_delay);
// Send the block, regardless of whether or not it is valid. The API
// specification is very clear that this is the desired behaviour.
let wrapped_block: BlockWrapper<T::EthSpec> = match block.as_ref() {
@ -88,13 +109,70 @@ pub async fn publish_block<T: BeaconChainTypes>(
}
}
};
// Determine the delay after the start of the slot, register it with metrics.
let message = PubsubMessage::BeaconBlock(block);
crate::publish_pubsub_message(&sender, message)
.map_err(|_| BeaconChainError::UnableToPublish.into())
};
/* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */
let gossip_verified_block = GossipVerifiedBlock::new(block, &chain).map_err(|e| {
warn!(log, "Not publishing block, not gossip verified"; "slot" => beacon_block.slot(), "error" => ?e);
warp_utils::reject::custom_bad_request(e.to_string())
})?;
let block_root = block_root.unwrap_or(gossip_verified_block.block_root);
if let BroadcastValidation::Gossip = validation_level {
publish_block(
beacon_block.clone(),
network_tx.clone(),
log.clone(),
seen_timestamp,
)
.map_err(|_| warp_utils::reject::custom_server_error("unable to publish".into()))?;
}
/* only publish if gossip- and consensus-valid and equivocation-free */
let chain_clone = chain.clone();
let block_clone = beacon_block.clone();
let log_clone = log.clone();
let sender_clone = network_tx.clone();
let publish_fn = move || match validation_level {
BroadcastValidation::Gossip => Ok(()),
BroadcastValidation::Consensus => {
publish_block(block_clone, sender_clone, log_clone, seen_timestamp)
}
BroadcastValidation::ConsensusAndEquivocation => {
if chain_clone
.observed_block_producers
.read()
.proposer_has_been_observed(block_clone.message(), block_root)
.map_err(|e| BlockError::BeaconChainError(e.into()))?
.is_slashable()
{
warn!(
log_clone,
"Not publishing equivocating block";
"slot" => block_clone.slot()
);
Err(BlockError::Slashable)
} else {
publish_block(block_clone, sender_clone, log_clone, seen_timestamp)
}
}
};
let block_clone = wrapped_block.block_cloned();
let slot = block_clone.message().slot();
let proposer_index = block_clone.message().proposer_index();
match chain
.process_block(block_root, wrapped_block, NotifyExecutionLayer::Yes)
.process_block(
block_root,
gossip_verified_block,
NotifyExecutionLayer::Yes,
publish_fn,
)
.await
{
Ok(AvailabilityProcessingStatus::Imported(root)) => {
@ -144,35 +222,32 @@ pub async fn publish_block<T: BeaconChainTypes>(
);
Err(warp_utils::reject::broadcast_without_import(msg))
}
Err(BlockError::BlockIsAlreadyKnown) => {
info!(
log,
"Block from HTTP API already known";
"block" => ?block_root,
"slot" => slot,
);
Ok(())
Err(BlockError::BeaconChainError(BeaconChainError::UnableToPublish)) => {
Err(warp_utils::reject::custom_server_error(
"unable to publish to network channel".to_string(),
))
}
Err(BlockError::RepeatProposal { proposer, slot }) => {
warn!(
log,
"Block ignored due to repeat proposal";
"msg" => "this can happen when a VC uses fallback BNs. \
whilst this is not necessarily an error, it can indicate issues with a BN \
or between the VC and BN.",
"slot" => slot,
"proposer" => proposer,
);
Err(BlockError::Slashable) => Err(warp_utils::reject::custom_bad_request(
"proposal for this slot and proposer has already been seen".to_string(),
)),
Err(BlockError::BlockIsAlreadyKnown) => {
info!(log, "Block from HTTP API already known"; "block" => ?block_root);
Ok(())
}
Err(e) => {
let msg = format!("{:?}", e);
error!(
log,
"Invalid block provided to HTTP API";
"reason" => &msg
);
Err(warp_utils::reject::broadcast_without_import(msg))
if let BroadcastValidation::Gossip = validation_level {
Err(warp_utils::reject::broadcast_without_import(format!("{e}")))
} else {
let msg = format!("{:?}", e);
error!(
log,
"Invalid block provided to HTTP API";
"reason" => &msg
);
Err(warp_utils::reject::custom_bad_request(format!(
"Invalid block: {e}"
)))
}
}
}
}
@ -184,21 +259,31 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
validation_level: BroadcastValidation,
) -> Result<(), Rejection> {
let block_root = block.canonical_root();
let full_block = reconstruct_block(chain.clone(), block_root, block, log.clone()).await?;
publish_block::<T>(Some(block_root), full_block, chain, network_tx, log).await
let full_block: ProvenancedBlock<T> =
reconstruct_block(chain.clone(), block_root, block, log.clone()).await?;
publish_block::<T>(
Some(block_root),
full_block,
chain,
network_tx,
log,
validation_level,
)
.await
}
/// Deconstruct the given blinded block, and construct a full block. This attempts to use the
/// execution layer's payload cache, and if that misses, attempts a blind block proposal to retrieve
/// the full payload.
async fn reconstruct_block<T: BeaconChainTypes>(
pub async fn reconstruct_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
block_root: Hash256,
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
log: Logger,
) -> Result<ProvenancedBlock<T::EthSpec>, Rejection> {
) -> Result<ProvenancedBlock<T>, Rejection> {
let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() {
let el = chain.execution_layer.as_ref().ok_or_else(|| {
warp_utils::reject::custom_server_error("Missing execution layer".to_string())
@ -264,15 +349,15 @@ async fn reconstruct_block<T: BeaconChainTypes>(
None => block
.try_into_full_block(None)
.map(SignedBlockContents::Block)
.map(ProvenancedBlock::Local),
.map(ProvenancedBlock::local),
Some(ProvenancedPayload::Local(full_payload)) => block
.try_into_full_block(Some(full_payload))
.map(SignedBlockContents::Block)
.map(ProvenancedBlock::Local),
.map(ProvenancedBlock::local),
Some(ProvenancedPayload::Builder(full_payload)) => block
.try_into_full_block(Some(full_payload))
.map(SignedBlockContents::Block)
.map(ProvenancedBlock::Builder),
.map(ProvenancedBlock::builder),
}
.ok_or_else(|| {
warp_utils::reject::custom_server_error("Unable to add payload to block".to_string())

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
pub mod broadcast_validation_tests;
pub mod fork_tests;
pub mod interactive_tests;
pub mod status_tests;

View File

@ -3,6 +3,7 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
BlockError,
};
use eth2::StatusCode;
use execution_layer::{PayloadStatusV1, PayloadStatusV1Status};
use http_api::test_utils::InteractiveTester;
use types::{EthSpec, ExecPayload, ForkName, MinimalEthSpec, Slot};
@ -149,3 +150,82 @@ async fn el_error_on_new_payload() {
assert_eq!(api_response.is_optimistic, Some(false));
assert_eq!(api_response.is_syncing, false);
}
/// Check `node health` endpoint when the EL is offline.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_offline() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL offline
mock_el.server.set_syncing_response(Err("offline".into()));
mock_el.el.upcheck().await;
let status = tester.client.get_node_health().await;
match status {
Ok(_) => {
panic!("should return 503 error status code");
}
Err(e) => {
assert_eq!(e.status().unwrap(), 503);
}
}
}
/// Check `node health` endpoint when the EL is online and synced.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_online_and_synced() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL synced
mock_el.server.set_syncing_response(Ok(false));
mock_el.el.upcheck().await;
let status = tester.client.get_node_health().await;
match status {
Ok(response) => {
assert_eq!(response, StatusCode::OK);
}
Err(_) => {
panic!("should return 200 status code");
}
}
}
/// Check `node health` endpoint when the EL is online but not synced.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn node_health_el_online_and_not_synced() {
let num_blocks = E::slots_per_epoch() / 2;
let num_validators = E::slots_per_epoch();
let tester = post_merge_tester(num_blocks, num_validators).await;
let harness = &tester.harness;
let mock_el = harness.mock_execution_layer.as_ref().unwrap();
// EL not synced
harness.advance_slot();
mock_el.server.all_payloads_syncing(true);
harness
.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let status = tester.client.get_node_health().await;
match status {
Ok(response) => {
assert_eq!(response, StatusCode::PARTIAL_CONTENT);
}
Err(_) => {
panic!("should return 206 status code");
}
}
}

View File

@ -8,7 +8,7 @@ use eth2::{
mixin::{RequestAccept, ResponseForkName, ResponseOptional},
reqwest::RequestBuilder,
types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *},
BeaconNodeHttpClient, Error, StatusCode, Timeouts,
BeaconNodeHttpClient, Error, Timeouts,
};
use execution_layer::test_utils::TestingBuilder;
use execution_layer::test_utils::DEFAULT_BUILDER_THRESHOLD_WEI;
@ -160,7 +160,7 @@ impl ApiTester {
// `make_block` adds random graffiti, so this will produce an alternate block
let (reorg_block, _reorg_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1)
.await;
let reorg_block = SignedBlockContents::from(reorg_block);
@ -1252,18 +1252,23 @@ impl ApiTester {
}
pub async fn test_post_beacon_blocks_invalid(mut self) -> Self {
let mut next_block = self.next_block.clone().deconstruct().0;
*next_block.message_mut().proposer_index_mut() += 1;
assert!(self
.client
.post_beacon_blocks(&SignedBlockContents::from(next_block))
let block = self
.harness
.make_block_with_modifier(
self.harness.get_current_state(),
self.harness.get_current_slot(),
|b| {
*b.state_root_mut() = Hash256::zero();
},
)
.await
.is_err());
.0;
assert!(self.client.post_beacon_blocks(&SignedBlockContents::from(block)).await.is_err());
assert!(
self.network_rx.network_recv.recv().await.is_some(),
"invalid blocks should be sent to network"
"gossip valid blocks should be sent to network"
);
self
@ -1761,9 +1766,15 @@ impl ApiTester {
}
pub async fn test_get_node_health(self) -> Self {
let status = self.client.get_node_health().await.unwrap();
assert_eq!(status, StatusCode::OK);
let status = self.client.get_node_health().await;
match status {
Ok(_) => {
panic!("should return 503 error status code");
}
Err(e) => {
assert_eq!(e.status().unwrap(), 503);
}
}
self
}
@ -4142,7 +4153,7 @@ impl ApiTester {
.unwrap();
let expected_reorg = EventKind::ChainReorg(SseChainReorg {
slot: self.next_block.signed_block().slot(),
slot: self.reorg_block.slot(),
depth: 1,
old_head_block: self.next_block.signed_block().canonical_root(),
old_head_state: self.next_block.signed_block().state_root(),
@ -4156,6 +4167,8 @@ impl ApiTester {
execution_optimistic: false,
});
self.harness.advance_slot();
self.client
.post_beacon_blocks(&self.reorg_block)
.await

View File

@ -836,6 +836,24 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
}
}
pub struct BeaconProcessorSend<T: BeaconChainTypes>(pub mpsc::Sender<WorkEvent<T>>);
impl<T: BeaconChainTypes> BeaconProcessorSend<T> {
pub fn try_send(&self, message: WorkEvent<T>) -> Result<(), Box<TrySendError<WorkEvent<T>>>> {
let work_type = message.work_type();
match self.0.try_send(message) {
Ok(res) => Ok(res),
Err(e) => {
metrics::inc_counter_vec(
&metrics::BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE,
&[work_type],
);
Err(Box::new(e))
}
}
}
}
/// A consensus message (or multiple) from the network that requires processing.
#[derive(Derivative)]
#[derivative(Debug(bound = "T: BeaconChainTypes"))]

View File

@ -929,6 +929,20 @@ impl<T: BeaconChainTypes> Worker<T> {
verified_block
}
Err(e @ BlockError::Slashable) => {
warn!(
self.log,
"Received equivocating block from peer";
"error" => ?e
);
/* punish peer for submitting an equivocation, but not too harshly as honest peers may conceivably forward equivocating blocks to us from time to time */
self.gossip_penalize_peer(
peer_id,
PeerAction::MidToleranceError,
"gossip_block_mid",
);
return None;
}
Err(BlockError::ParentUnknown(block)) => {
debug!(
self.log,
@ -950,7 +964,6 @@ impl<T: BeaconChainTypes> Worker<T> {
Err(e @ BlockError::FutureSlot { .. })
| Err(e @ BlockError::WouldRevertFinalizedSlot { .. })
| Err(e @ BlockError::BlockIsAlreadyKnown)
| Err(e @ BlockError::RepeatProposal { .. })
| Err(e @ BlockError::NotFinalizedDescendant { .. }) => {
debug!(self.log, "Could not verify block for gossip. Ignoring the block";
"error" => %e);
@ -1103,7 +1116,12 @@ impl<T: BeaconChainTypes> Worker<T> {
let result = self
.chain
.process_block(block_root, verified_block, NotifyExecutionLayer::Yes)
.process_block(
block_root,
verified_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.await;
match &result {

View File

@ -103,33 +103,21 @@ impl<T: BeaconChainTypes> Worker<T> {
});
// Checks if a block from this proposer is already known.
let proposal_already_known = || {
let block_equivocates = || {
match self
.chain
.observed_block_producers
.read()
.proposer_has_been_observed(block.message())
.proposer_has_been_observed(block.message(), block.canonical_root())
{
Ok(is_observed) => is_observed,
// Both of these blocks will be rejected, so reject them now rather
Ok(seen_status) => seen_status.is_slashable(),
//Both of these blocks will be rejected, so reject them now rather
// than re-queuing them.
Err(ObserveError::FinalizedBlock { .. })
| Err(ObserveError::ValidatorIndexTooHigh { .. }) => false,
}
};
// Returns `true` if the block is already known to fork choice. Notably,
// this will return `false` for blocks that we've already imported but
// ancestors of the finalized checkpoint. That should not be an issue
// for our use here since finalized blocks will always be late and won't
// be requeued anyway.
let block_is_already_known = || {
self.chain
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
};
// If we've already seen a block from this proposer *and* the block
// arrived before the attestation deadline, requeue it to ensure it is
// imported late enough that it won't receive a proposer boost.
@ -137,7 +125,7 @@ impl<T: BeaconChainTypes> Worker<T> {
// Don't requeue blocks if they're already known to fork choice, just
// push them through to block processing so they can be handled through
// the normal channels.
if !block_is_late && proposal_already_known() && !block_is_already_known() {
if !block_is_late && block_equivocates() {
debug!(
self.log,
"Delaying processing of duplicate RPC block";
@ -171,7 +159,7 @@ impl<T: BeaconChainTypes> Worker<T> {
let result = self
.chain
.process_block(block_root, block, NotifyExecutionLayer::Yes)
.process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(()))
.await;
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);

View File

@ -301,6 +301,12 @@ lazy_static! {
"Gossipsub light_client_optimistic_update errors per error type",
&["type"]
);
pub static ref BEACON_PROCESSOR_SEND_ERROR_PER_WORK_TYPE: Result<IntCounterVec> =
try_create_int_counter_vec(
"beacon_processor_send_error_per_work_type",
"Total number of beacon processor send error per work type",
&["type"]
);
/*

View File

@ -6,7 +6,8 @@
#![allow(clippy::unit_arg)]
use crate::beacon_processor::{
BeaconProcessor, InvalidBlockStorage, WorkEvent as BeaconWorkEvent, MAX_WORK_EVENT_QUEUE_LEN,
BeaconProcessor, BeaconProcessorSend, InvalidBlockStorage, WorkEvent as BeaconWorkEvent,
MAX_WORK_EVENT_QUEUE_LEN,
};
use crate::error;
use crate::service::{NetworkMessage, RequestId};
@ -14,11 +15,14 @@ use crate::status::status_message;
use crate::sync::manager::RequestId as SyncId;
use crate::sync::SyncMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::{future, StreamExt};
use lighthouse_network::{rpc::*, PubsubMessage};
use lighthouse_network::{MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response};
use slog::{debug, error, o, trace, warn};
use futures::prelude::*;
use lighthouse_network::rpc::*;
use lighthouse_network::{
MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response,
};
use logging::TimeLatch;
use slog::{debug, o, trace};
use slog::{error, warn};
use std::cmp;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
@ -37,9 +41,11 @@ pub struct Router<T: BeaconChainTypes> {
/// A network context to return and handle RPC requests.
network: HandlerNetworkContext<T::EthSpec>,
/// A multi-threaded, non-blocking processor for applying messages to the beacon chain.
beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>,
beacon_processor_send: BeaconProcessorSend<T>,
/// The `Router` logger.
log: slog::Logger,
/// Provides de-bounce functionality for logging.
logger_debounce: TimeLatch,
}
/// Types of messages the router can receive.
@ -99,7 +105,7 @@ impl<T: BeaconChainTypes> Router<T> {
beacon_chain.clone(),
network_globals.clone(),
network_send.clone(),
beacon_processor_send.clone(),
BeaconProcessorSend(beacon_processor_send.clone()),
sync_logger,
);
@ -123,8 +129,9 @@ impl<T: BeaconChainTypes> Router<T> {
chain: beacon_chain,
sync_send,
network: HandlerNetworkContext::new(network_send, log.clone()),
beacon_processor_send,
beacon_processor_send: BeaconProcessorSend(beacon_processor_send),
log: message_handler_log,
logger_debounce: TimeLatch::default(),
};
// spawn handler task and move the message handler instance into the spawned thread
@ -570,12 +577,15 @@ impl<T: BeaconChainTypes> Router<T> {
self.beacon_processor_send
.try_send(work)
.unwrap_or_else(|e| {
let work_type = match &e {
let work_type = match &*e {
mpsc::error::TrySendError::Closed(work)
| mpsc::error::TrySendError::Full(work) => work.work_type(),
};
error!(&self.log, "Unable to send message to the beacon processor";
"error" => %e, "type" => work_type)
if self.logger_debounce.elapsed() {
error!(&self.log, "Unable to send message to the beacon processor";
"error" => %e, "type" => work_type)
}
})
}
}

View File

@ -1,6 +1,7 @@
#![cfg(feature = "spec-minimal")]
use std::sync::Arc;
use crate::beacon_processor::BeaconProcessorSend;
use crate::service::RequestId;
use crate::sync::manager::RequestId as SyncId;
use crate::NetworkMessage;
@ -80,7 +81,7 @@ impl TestRig {
SyncNetworkContext::new(
network_tx,
globals,
beacon_processor_tx,
BeaconProcessorSend(beacon_processor_tx),
chain,
log.new(slog::o!("component" => "network_context")),
)

View File

@ -38,7 +38,7 @@ use super::block_lookups::{BlockLookups, PeerShouldHave};
use super::network_context::{BlockOrBlob, SyncNetworkContext};
use super::peer_sync_info::{remote_sync_type, PeerSyncType};
use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
use crate::beacon_processor::{BeaconProcessorSend, ChainSegmentProcessId};
use crate::service::NetworkMessage;
use crate::status::ToStatusMessage;
use crate::sync::block_lookups::delayed_lookup;
@ -238,7 +238,7 @@ pub fn spawn<T: BeaconChainTypes>(
beacon_chain: Arc<BeaconChain<T>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
beacon_processor_send: mpsc::Sender<BeaconWorkEvent<T>>,
beacon_processor_send: BeaconProcessorSend<T>,
log: slog::Logger,
) -> mpsc::UnboundedSender<SyncMessage<T::EthSpec>> {
assert!(

View File

@ -4,7 +4,7 @@
use super::block_sidecar_coupling::BlocksAndBlobsRequestInfo;
use super::manager::{Id, RequestId as SyncRequestId};
use super::range_sync::{BatchId, ByRangeRequestType, ChainId};
use crate::beacon_processor::WorkEvent;
use crate::beacon_processor::BeaconProcessorSend;
use crate::service::{NetworkMessage, RequestId};
use crate::status::ToStatusMessage;
use crate::sync::block_lookups::{BlobRequestId, BlockRequestId};
@ -60,7 +60,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
execution_engine_state: EngineState,
/// Channel to send work to the beacon processor.
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
beacon_processor_send: BeaconProcessorSend<T>,
pub chain: Arc<BeaconChain<T>>,
@ -90,7 +90,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
pub fn new(
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
beacon_processor_send: BeaconProcessorSend<T>,
chain: Arc<BeaconChain<T>>,
log: slog::Logger,
) -> Self {
@ -564,12 +564,12 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
})
}
pub fn processor_channel_if_enabled(&self) -> Option<&mpsc::Sender<WorkEvent<T>>> {
pub fn processor_channel_if_enabled(&self) -> Option<&BeaconProcessorSend<T>> {
self.is_execution_engine_online()
.then_some(&self.beacon_processor_send)
}
pub fn processor_channel(&self) -> &mpsc::Sender<WorkEvent<T>> {
pub fn processor_channel(&self) -> &BeaconProcessorSend<T> {
&self.beacon_processor_send
}

View File

@ -381,7 +381,7 @@ where
mod tests {
use super::*;
use crate::beacon_processor::WorkEvent as BeaconWorkEvent;
use crate::beacon_processor::{BeaconProcessorSend, WorkEvent as BeaconWorkEvent};
use crate::service::RequestId;
use crate::NetworkMessage;
use beacon_chain::{
@ -610,7 +610,7 @@ mod tests {
let cx = SyncNetworkContext::new(
network_tx,
globals.clone(),
beacon_processor_tx,
BeaconProcessorSend(beacon_processor_tx),
chain,
log.new(o!("component" => "network_context")),
);

View File

@ -1,5 +1,6 @@
use clap::{App, Arg};
use strum::VariantNames;
use types::ProgressiveBalancesMode;
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
App::new("beacon_node")
@ -1159,4 +1160,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
developers. This directory is not pruned, users should be careful to avoid \
filling up their disks.")
)
.arg(
Arg::with_name("progressive-balances")
.long("progressive-balances")
.value_name("MODE")
.help("Options to enable or disable the progressive balances cache for \
unrealized FFG progression calculation. The default `checked` mode compares \
the progressive balances from the cache against results from the existing \
method. If there is a mismatch, it falls back to the existing method. The \
optimized mode (`fast`) is faster but is still experimental, and is \
not recommended for mainnet usage at this time.")
.takes_value(true)
.possible_values(ProgressiveBalancesMode::VARIANTS)
)
}

View File

@ -837,6 +837,12 @@ pub fn get_config<E: EthSpec>(
client_config.network.invalid_block_storage = Some(path);
}
if let Some(progressive_balances_mode) =
clap_utils::parse_optional(cli_args, "progressive-balances")?
{
client_config.chain.progressive_balances_mode = progressive_balances_mode;
}
Ok(client_config)
}

View File

@ -395,6 +395,7 @@ macro_rules! impl_try_into_beacon_state {
// Caching
total_active_balance: <_>::default(),
progressive_balances_cache: <_>::default(),
committee_caches: <_>::default(),
pubkey_cache: <_>::default(),
exit_cache: <_>::default(),

View File

@ -63,7 +63,7 @@ where
.load_cold_state_by_slot(lower_limit_slot)?
.ok_or(HotColdDBError::MissingLowerLimitState(lower_limit_slot))?;
state.build_all_caches(&self.spec)?;
state.build_caches(&self.spec)?;
process_results(block_root_iter, |iter| -> Result<(), Error> {
let mut io_batch = vec![];

View File

@ -1,6 +1,6 @@
[package]
name = "boot_node"
version = "4.2.0"
version = "4.3.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021"

View File

@ -322,6 +322,26 @@ impl BeaconNodeHttpClient {
ok_or_error(response).await
}
/// Generic POST function supporting arbitrary responses and timeouts.
async fn post_generic_with_consensus_version<T: Serialize, U: IntoUrl>(
&self,
url: U,
body: &T,
timeout: Option<Duration>,
fork: ForkName,
) -> Result<Response, Error> {
let mut builder = self.client.post(url);
if let Some(timeout) = timeout {
builder = builder.timeout(timeout);
}
let response = builder
.header(CONSENSUS_VERSION_HEADER, fork.to_string())
.json(body)
.send()
.await?;
ok_or_error(response).await
}
/// `GET beacon/genesis`
///
/// ## Errors
@ -654,6 +674,76 @@ impl BeaconNodeHttpClient {
Ok(())
}
pub fn post_beacon_blocks_v2_path(
&self,
validation_level: Option<BroadcastValidation>,
) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?;
path.path_segments_mut()
.map_err(|_| Error::InvalidUrl(self.server.clone()))?
.extend(&["beacon", "blocks"]);
path.set_query(
validation_level
.map(|v| format!("broadcast_validation={}", v))
.as_deref(),
);
Ok(path)
}
pub fn post_beacon_blinded_blocks_v2_path(
&self,
validation_level: Option<BroadcastValidation>,
) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?;
path.path_segments_mut()
.map_err(|_| Error::InvalidUrl(self.server.clone()))?
.extend(&["beacon", "blinded_blocks"]);
path.set_query(
validation_level
.map(|v| format!("broadcast_validation={}", v))
.as_deref(),
);
Ok(path)
}
/// `POST v2/beacon/blocks`
pub async fn post_beacon_blocks_v2<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBeaconBlock<T, Payload>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
self.post_beacon_blocks_v2_path(validation_level)?,
block,
Some(self.timeouts.proposal),
block.message().body().fork_name(),
)
.await?;
Ok(())
}
/// `POST v2/beacon/blinded_blocks`
pub async fn post_beacon_blinded_blocks_v2<T: EthSpec>(
&self,
block: &SignedBlindedBeaconBlock<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
self.post_beacon_blinded_blocks_v2_path(validation_level)?,
block,
Some(self.timeouts.proposal),
block.message().body().fork_name(),
)
.await?;
Ok(())
}
/// Path for `v2/beacon/blocks`
pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?;

View File

@ -7,7 +7,7 @@ use mediatype::{names, MediaType, MediaTypeList};
use serde::{Deserialize, Serialize};
use ssz_derive::Encode;
use std::convert::TryFrom;
use std::fmt;
use std::fmt::{self, Display};
use std::str::{from_utf8, FromStr};
use std::time::Duration;
pub use types::*;
@ -1261,6 +1261,50 @@ pub struct ForkChoiceNode {
pub execution_block_hash: Option<Hash256>,
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum BroadcastValidation {
Gossip,
Consensus,
ConsensusAndEquivocation,
}
impl Default for BroadcastValidation {
fn default() -> Self {
Self::Gossip
}
}
impl Display for BroadcastValidation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Gossip => write!(f, "gossip"),
Self::Consensus => write!(f, "consensus"),
Self::ConsensusAndEquivocation => write!(f, "consensus_and_equivocation"),
}
}
}
impl FromStr for BroadcastValidation {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"gossip" => Ok(Self::Gossip),
"consensus" => Ok(Self::Consensus),
"consensus_and_equivocation" => Ok(Self::ConsensusAndEquivocation),
_ => Err("Invalid broadcast validation level"),
}
}
}
#[derive(Default, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub struct BroadcastValidationQuery {
#[serde(default)]
pub broadcast_validation: BroadcastValidation,
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,6 +1,8 @@
# Lighthouse Team (Sigma Prime)
- enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo
- enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo
- enr:-Le4QPUXJS2BTORXxyx2Ia-9ae4YqA_JWX3ssj4E_J-3z1A-HmFGrU8BpvpqhNabayXeOZ2Nq_sbeDgtzMJpLLnXFgAChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISsaa0Zg2lwNpAkAIkHAAAAAPA8kv_-awoTiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMohHVkcDaCI4I
- enr:-Le4QLHZDSvkLfqgEo8IWGG96h6mxwe_PsggC20CL3neLBjfXLGAQFOPSltZ7oP6ol54OvaNqO02Rnvb8YmDR274uq8ChGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLosQxg2lwNpAqAX4AAAAAAPA8kv_-ax65iXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMohHVkcDaCI4I
- enr:-Le4QH6LQrusDbAHPjU_HcKOuMeXfdEB5NJyXgHWFadfHgiySqeDyusQMvfphdYWOzuSZO9Uq2AMRJR5O4ip7OvVma8BhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY9ncg2lwNpAkAh8AgQIBAAAAAAAAAAmXiXNlY3AyNTZrMaECDYCZTZEksF-kmgPholqgVt8IXr-8L7Nu7YrZ7HUpgxmDdWRwgiMohHVkcDaCI4I
- enr:-Le4QIqLuWybHNONr933Lk0dcMmAB5WgvGKRyDihy1wHDIVlNuuztX62W51voT4I8qD34GcTEOTmag1bcdZ_8aaT4NUBhGV0aDKQtTA_KgEAAAAAIgEAAAAAAIJpZIJ2NIJpcISLY04ng2lwNpAkAh8AgAIBAAAAAAAAAA-fiXNlY3AyNTZrMaEDscnRV6n1m-D9ID5UsURk0jsoKNXt1TIrj8uKOGW6iluDdWRwgiMohHVkcDaCI4I
# EF Team
- enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg
- enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg
@ -15,4 +17,4 @@
- enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg
# Nimbus team
- enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM
- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM
- enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM

View File

@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
// NOTE: using --match instead of --exclude for compatibility with old Git
"--match=thiswillnevermatchlol"
],
prefix = "Lighthouse/v4.2.0-",
fallback = "Lighthouse/v4.2.0"
prefix = "Lighthouse/v4.3.0-",
fallback = "Lighthouse/v4.3.0"
);
/// Returns `VERSION`, but with platform information appended to the end.

View File

@ -1,10 +1,15 @@
use crate::{ForkChoiceStore, InvalidationOperation};
use per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError;
use proto_array::{
Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, ProposerHeadError,
ProposerHeadInfo, ProtoArrayForkChoice, ReOrgThreshold,
};
use slog::{crit, debug, warn, Logger};
use slog::{crit, debug, error, warn, Logger};
use ssz_derive::{Decode, Encode};
use state_processing::per_epoch_processing::altair::ParticipationCache;
use state_processing::per_epoch_processing::{
weigh_justification_and_finalization, JustificationAndFinalizationState,
};
use state_processing::{
per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing,
};
@ -18,6 +23,7 @@ use types::{
EthSpec, ExecPayload, ExecutionBlockHash, Hash256, IndexedAttestation, RelativeEpoch,
SignedBeaconBlock, Slot,
};
use types::{ProgressiveBalancesCache, ProgressiveBalancesMode};
#[derive(Debug)]
pub enum Error<T> {
@ -72,7 +78,9 @@ pub enum Error<T> {
},
UnrealizedVoteProcessing(state_processing::EpochProcessingError),
ParticipationCacheBuild(BeaconStateError),
ParticipationCacheError(ParticipationCacheError),
ValidatorStatuses(BeaconStateError),
ProgressiveBalancesCacheCheckFailed(String),
}
impl<T> From<InvalidAttestation> for Error<T> {
@ -93,6 +101,18 @@ impl<T> From<state_processing::EpochProcessingError> for Error<T> {
}
}
impl<T> From<BeaconStateError> for Error<T> {
fn from(e: BeaconStateError) -> Self {
Error::BeaconStateError(e)
}
}
impl<T> From<ParticipationCacheError> for Error<T> {
fn from(e: ParticipationCacheError) -> Self {
Error::ParticipationCacheError(e)
}
}
#[derive(Debug, Clone, Copy)]
/// Controls how fork choice should behave when restoring from a persisted fork choice.
pub enum ResetPayloadStatuses {
@ -645,7 +665,9 @@ where
block_delay: Duration,
state: &BeaconState<E>,
payload_verification_status: PayloadVerificationStatus,
progressive_balances_mode: ProgressiveBalancesMode,
spec: &ChainSpec,
log: &Logger,
) -> Result<(), Error<T::Error>> {
// If this block has already been processed we do not need to reprocess it.
// We check this immediately in case re-processing the block mutates some property of the
@ -739,46 +761,84 @@ where
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch
});
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) =
if let Some((parent_justified, parent_finalized)) = parent_checkpoints {
(parent_justified, parent_finalized)
} else {
let justification_and_finalization_state = match block {
// TODO(deneb): Ensure that the final specification
// does not substantially modify per epoch processing.
BeaconBlockRef::Deneb(_)
| BeaconBlockRef::Capella(_)
| BeaconBlockRef::Merge(_)
| BeaconBlockRef::Altair(_) => {
let participation_cache =
per_epoch_processing::altair::ParticipationCache::new(state, spec)
.map_err(Error::ParticipationCacheBuild)?;
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some((
parent_justified,
parent_finalized,
)) =
parent_checkpoints
{
(parent_justified, parent_finalized)
} else {
let justification_and_finalization_state = match block {
BeaconBlockRef::Deneb(_)| BeaconBlockRef::Capella(_)
| BeaconBlockRef::Merge(_)
| BeaconBlockRef::Altair(_) => match progressive_balances_mode {
ProgressiveBalancesMode::Disabled => {
let participation_cache = ParticipationCache::new(state, spec)
.map_err(Error::ParticipationCacheBuild)?;
per_epoch_processing::altair::process_justification_and_finalization(
state,
&participation_cache,
)?
}
BeaconBlockRef::Base(_) => {
let mut validator_statuses =
per_epoch_processing::base::ValidatorStatuses::new(state, spec)
.map_err(Error::ValidatorStatuses)?;
validator_statuses
.process_attestations(state)
.map_err(Error::ValidatorStatuses)?;
per_epoch_processing::base::process_justification_and_finalization(
state,
&validator_statuses.total_balances,
spec,
)?
}
};
ProgressiveBalancesMode::Fast
| ProgressiveBalancesMode::Checked
| ProgressiveBalancesMode::Strict => {
let maybe_participation_cache = progressive_balances_mode
.perform_comparative_checks()
.then(|| {
ParticipationCache::new(state, spec)
.map_err(Error::ParticipationCacheBuild)
})
.transpose()?;
(
justification_and_finalization_state.current_justified_checkpoint(),
justification_and_finalization_state.finalized_checkpoint(),
)
process_justification_and_finalization_from_progressive_cache::<E, T>(
state,
maybe_participation_cache.as_ref(),
)
.or_else(|e| {
if progressive_balances_mode != ProgressiveBalancesMode::Strict {
error!(
log,
"Processing with progressive balances cache failed";
"info" => "falling back to the non-optimized processing method",
"error" => ?e,
);
let participation_cache = maybe_participation_cache
.map(Ok)
.unwrap_or_else(|| ParticipationCache::new(state, spec))
.map_err(Error::ParticipationCacheBuild)?;
per_epoch_processing::altair::process_justification_and_finalization(
state,
&participation_cache,
).map_err(Error::from)
} else {
Err(e)
}
})?
}
},
BeaconBlockRef::Base(_) => {
let mut validator_statuses =
per_epoch_processing::base::ValidatorStatuses::new(state, spec)
.map_err(Error::ValidatorStatuses)?;
validator_statuses
.process_attestations(state)
.map_err(Error::ValidatorStatuses)?;
per_epoch_processing::base::process_justification_and_finalization(
state,
&validator_statuses.total_balances,
spec,
)?
}
};
(
justification_and_finalization_state.current_justified_checkpoint(),
justification_and_finalization_state.finalized_checkpoint(),
)
};
// Update best known unrealized justified & finalized checkpoints
if unrealized_justified_checkpoint.epoch
> self.fc_store.unrealized_justified_checkpoint().epoch
@ -1504,6 +1564,92 @@ where
}
}
/// Process justification and finalization using progressive cache. Also performs a comparative
/// check against the `ParticipationCache` if it is supplied.
///
/// Returns an error if the cache is not initialized or if there is a mismatch on the comparative check.
fn process_justification_and_finalization_from_progressive_cache<E, T>(
state: &BeaconState<E>,
maybe_participation_cache: Option<&ParticipationCache>,
) -> Result<JustificationAndFinalizationState<E>, Error<T::Error>>
where
E: EthSpec,
T: ForkChoiceStore<E>,
{
let justification_and_finalization_state = JustificationAndFinalizationState::new(state);
if state.current_epoch() <= E::genesis_epoch() + 1 {
return Ok(justification_and_finalization_state);
}
// Load cached balances
let progressive_balances_cache: &ProgressiveBalancesCache = state.progressive_balances_cache();
let previous_target_balance =
progressive_balances_cache.previous_epoch_target_attesting_balance()?;
let current_target_balance =
progressive_balances_cache.current_epoch_target_attesting_balance()?;
let total_active_balance = state.get_total_active_balance()?;
if let Some(participation_cache) = maybe_participation_cache {
check_progressive_balances::<E, T>(
state,
participation_cache,
previous_target_balance,
current_target_balance,
total_active_balance,
)?;
}
weigh_justification_and_finalization(
justification_and_finalization_state,
total_active_balance,
previous_target_balance,
current_target_balance,
)
.map_err(Error::from)
}
/// Perform comparative checks against `ParticipationCache`, will return error if there's a mismatch.
fn check_progressive_balances<E, T>(
state: &BeaconState<E>,
participation_cache: &ParticipationCache,
cached_previous_target_balance: u64,
cached_current_target_balance: u64,
cached_total_active_balance: u64,
) -> Result<(), Error<T::Error>>
where
E: EthSpec,
T: ForkChoiceStore<E>,
{
let slot = state.slot();
let epoch = state.current_epoch();
// Check previous epoch target balances
let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?;
if previous_target_balance != cached_previous_target_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Previous epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, previous_target_balance, cached_previous_target_balance)
));
}
// Check current epoch target balances
let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?;
if current_target_balance != cached_current_target_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Current epoch target attesting balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, current_target_balance, cached_current_target_balance)
));
}
// Check current epoch total balances
let total_active_balance = participation_cache.current_epoch_total_active_balance();
if total_active_balance != cached_total_active_balance {
return Err(Error::ProgressiveBalancesCacheCheckFailed(
format!("Current epoch total active balance mismatch, slot: {}, epoch: {}, actual: {}, cached: {}", slot, epoch, total_active_balance, cached_total_active_balance)
));
}
Ok(())
}
/// Helper struct that is used to encode/decode the state of the `ForkChoice` as SSZ bytes.
///
/// This is used when persisting the state of the fork choice to disk.

View File

@ -17,12 +17,13 @@ use fork_choice::{
use store::MemoryStore;
use types::{
test_utils::generate_deterministic_keypair, BeaconBlockRef, BeaconState, ChainSpec, Checkpoint,
Epoch, EthSpec, Hash256, IndexedAttestation, MainnetEthSpec, SignedBeaconBlock, Slot, SubnetId,
Epoch, EthSpec, ForkName, Hash256, IndexedAttestation, MainnetEthSpec, ProgressiveBalancesMode,
RelativeEpoch, SignedBeaconBlock, Slot, SubnetId,
};
pub type E = MainnetEthSpec;
pub const VALIDATOR_COUNT: usize = 32;
pub const VALIDATOR_COUNT: usize = 64;
/// Defines some delay between when an attestation is created and when it is mutated.
pub enum MutationDelay {
@ -68,6 +69,24 @@ impl ForkChoiceTest {
Self { harness }
}
/// Creates a new tester with the specified `ProgressiveBalancesMode` and genesis from latest fork.
fn new_with_progressive_balances_mode(mode: ProgressiveBalancesMode) -> ForkChoiceTest {
// genesis with latest fork (at least altair required to test the cache)
let spec = ForkName::latest().make_genesis_spec(ChainSpec::default());
let harness = BeaconChainHarness::builder(MainnetEthSpec)
.spec(spec)
.chain_config(ChainConfig {
progressive_balances_mode: mode,
..ChainConfig::default()
})
.deterministic_keypairs(VALIDATOR_COUNT)
.fresh_ephemeral_store()
.mock_execution_layer()
.build();
Self { harness }
}
/// Get a value from the `ForkChoice` instantiation.
fn get<T, U>(&self, func: T) -> U
where
@ -212,6 +231,39 @@ impl ForkChoiceTest {
self
}
/// Slash a validator from the previous epoch committee.
pub async fn add_previous_epoch_attester_slashing(self) -> Self {
let state = self.harness.get_current_state();
let previous_epoch_shuffling = state.get_shuffling(RelativeEpoch::Previous).unwrap();
let validator_indices = previous_epoch_shuffling
.iter()
.map(|idx| *idx as u64)
.take(1)
.collect();
self.harness
.add_attester_slashing(validator_indices)
.unwrap();
self
}
/// Slash the proposer of a block in the previous epoch.
pub async fn add_previous_epoch_proposer_slashing(self, slots_per_epoch: u64) -> Self {
let previous_epoch_slot = self.harness.get_current_slot() - slots_per_epoch;
let previous_epoch_block = self
.harness
.chain
.block_at_slot(previous_epoch_slot, WhenSlotSkipped::None)
.unwrap()
.unwrap();
let proposer_index: u64 = previous_epoch_block.message().proposer_index();
self.harness.add_proposer_slashing(proposer_index).unwrap();
self
}
/// Apply `count` blocks to the chain (without attestations).
pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self {
self.harness.advance_slot();
@ -286,7 +338,9 @@ impl ForkChoiceTest {
Duration::from_secs(0),
&state,
PayloadVerificationStatus::Verified,
self.harness.chain.config.progressive_balances_mode,
&self.harness.chain.spec,
self.harness.logger(),
)
.unwrap();
self
@ -328,7 +382,9 @@ impl ForkChoiceTest {
Duration::from_secs(0),
&state,
PayloadVerificationStatus::Verified,
self.harness.chain.config.progressive_balances_mode,
&self.harness.chain.spec,
self.harness.logger(),
)
.err()
.expect("on_block did not return an error");
@ -1287,3 +1343,49 @@ async fn weak_subjectivity_check_epoch_boundary_is_skip_slot_failure() {
.assert_finalized_epoch_is_less_than(checkpoint.epoch)
.assert_shutdown_signal_sent();
}
/// Checks that `ProgressiveBalancesCache` is updated correctly after an attester slashing event,
/// where the slashed validator is a target attester in previous / current epoch.
#[tokio::test]
async fn progressive_balances_cache_attester_slashing() {
ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict)
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.await
.unwrap()
.add_previous_epoch_attester_slashing()
.await
// expect fork choice to import blocks successfully after a previous epoch attester is
// slashed, i.e. the slashed attester's balance is correctly excluded from
// the previous epoch total balance in `ProgressiveBalancesCache`.
.apply_blocks(1)
.await
// expect fork choice to import another epoch of blocks successfully - the slashed
// attester's balance should be excluded from the current epoch total balance in
// `ProgressiveBalancesCache` as well.
.apply_blocks(MainnetEthSpec::slots_per_epoch() as usize)
.await;
}
/// Checks that `ProgressiveBalancesCache` is updated correctly after a proposer slashing event,
/// where the slashed validator is a target attester in previous / current epoch.
#[tokio::test]
async fn progressive_balances_cache_proposer_slashing() {
ForkChoiceTest::new_with_progressive_balances_mode(ProgressiveBalancesMode::Strict)
// first two epochs
.apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0)
.await
.unwrap()
.add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch())
.await
// expect fork choice to import blocks successfully after a previous epoch proposer is
// slashed, i.e. the slashed proposer's balance is correctly excluded from
// the previous epoch total balance in `ProgressiveBalancesCache`.
.apply_blocks(1)
.await
// expect fork choice to import another epoch of blocks successfully - the slashed
// proposer's balance should be excluded from the current epoch total balance in
// `ProgressiveBalancesCache` as well.
.apply_blocks(MainnetEthSpec::slots_per_epoch() as usize)
.await;
}

View File

@ -7,6 +7,7 @@ mod slash_validator;
pub mod altair;
pub mod base;
pub mod update_progressive_balances_cache;
pub use deposit_data_tree::DepositDataTree;
pub use get_attestation_participation::get_attestation_participation_flag_indices;

View File

@ -1,3 +1,4 @@
use crate::common::update_progressive_balances_cache::update_progressive_balances_on_slashing;
use crate::{
common::{decrease_balance, increase_balance, initiate_validator_exit},
per_block_processing::errors::BlockProcessingError,
@ -43,6 +44,8 @@ pub fn slash_validator<T: EthSpec>(
.safe_div(spec.min_slashing_penalty_quotient_for_state(state))?,
)?;
update_progressive_balances_on_slashing(state, slashed_index)?;
// Apply proposer and whistleblower rewards
let proposer_index = ctxt.get_proposer_index(state, spec)? as usize;
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);

View File

@ -0,0 +1,142 @@
/// A collection of all functions that mutates the `ProgressiveBalancesCache`.
use crate::metrics::{
PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
};
use crate::per_epoch_processing::altair::ParticipationCache;
use crate::{BlockProcessingError, EpochProcessingError};
use lighthouse_metrics::set_gauge;
use ssz_types::VariableList;
use std::borrow::Cow;
use types::consts::altair::TIMELY_TARGET_FLAG_INDEX;
use types::{
is_progressive_balances_enabled, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec,
ParticipationFlags, ProgressiveBalancesCache,
};
/// Initializes the `ProgressiveBalancesCache` cache using balance values from the
/// `ParticipationCache`. If the optional `&ParticipationCache` is not supplied, it will be computed
/// from the `BeaconState`.
pub fn initialize_progressive_balances_cache<E: EthSpec>(
state: &mut BeaconState<E>,
maybe_participation_cache: Option<&ParticipationCache>,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
if !is_progressive_balances_enabled(state)
|| state.progressive_balances_cache().is_initialized()
{
return Ok(());
}
let participation_cache = match maybe_participation_cache {
Some(cache) => Cow::Borrowed(cache),
None => Cow::Owned(ParticipationCache::new(state, spec)?),
};
let previous_epoch_target_attesting_balance = participation_cache
.previous_epoch_target_attesting_balance_raw()
.map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?;
let current_epoch_target_attesting_balance = participation_cache
.current_epoch_target_attesting_balance_raw()
.map_err(|e| BeaconStateError::ParticipationCacheError(format!("{e:?}")))?;
let current_epoch = state.current_epoch();
state.progressive_balances_cache_mut().initialize(
current_epoch,
previous_epoch_target_attesting_balance,
current_epoch_target_attesting_balance,
);
update_progressive_balances_metrics(state.progressive_balances_cache())?;
Ok(())
}
/// Updates the `ProgressiveBalancesCache` when a new target attestation has been processed.
pub fn update_progressive_balances_on_attestation<T: EthSpec>(
state: &mut BeaconState<T>,
epoch: Epoch,
validator_index: usize,
) -> Result<(), BlockProcessingError> {
if is_progressive_balances_enabled(state) {
let validator = state.get_validator(validator_index)?;
if !validator.slashed {
let validator_effective_balance = validator.effective_balance;
state
.progressive_balances_cache_mut()
.on_new_target_attestation(epoch, validator_effective_balance)?;
}
}
Ok(())
}
/// Updates the `ProgressiveBalancesCache` when a target attester has been slashed.
pub fn update_progressive_balances_on_slashing<T: EthSpec>(
state: &mut BeaconState<T>,
validator_index: usize,
) -> Result<(), BlockProcessingError> {
if is_progressive_balances_enabled(state) {
let previous_epoch_participation = state.previous_epoch_participation()?;
let is_previous_epoch_target_attester =
is_target_attester_in_epoch::<T>(previous_epoch_participation, validator_index)?;
let current_epoch_participation = state.current_epoch_participation()?;
let is_current_epoch_target_attester =
is_target_attester_in_epoch::<T>(current_epoch_participation, validator_index)?;
let validator_effective_balance = state.get_effective_balance(validator_index)?;
state.progressive_balances_cache_mut().on_slashing(
is_previous_epoch_target_attester,
is_current_epoch_target_attester,
validator_effective_balance,
)?;
}
Ok(())
}
/// Updates the `ProgressiveBalancesCache` on epoch transition.
pub fn update_progressive_balances_on_epoch_transition<T: EthSpec>(
state: &mut BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), EpochProcessingError> {
if is_progressive_balances_enabled(state) {
state
.progressive_balances_cache_mut()
.on_epoch_transition(spec)?;
update_progressive_balances_metrics(state.progressive_balances_cache())?;
}
Ok(())
}
pub fn update_progressive_balances_metrics(
cache: &ProgressiveBalancesCache,
) -> Result<(), BeaconStateError> {
set_gauge(
&PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
cache.previous_epoch_target_attesting_balance()? as i64,
);
set_gauge(
&PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL,
cache.current_epoch_target_attesting_balance()? as i64,
);
Ok(())
}
fn is_target_attester_in_epoch<T: EthSpec>(
epoch_participation: &VariableList<ParticipationFlags, T::ValidatorRegistryLimit>,
validator_index: usize,
) -> Result<bool, BlockProcessingError> {
let participation_flags = epoch_participation
.get(validator_index)
.ok_or(BeaconStateError::UnknownValidator(validator_index))?;
participation_flags
.has_flag(TIMELY_TARGET_FLAG_INDEX)
.map_err(|e| e.into())
}

View File

@ -111,7 +111,7 @@ pub fn initialize_beacon_state_from_eth1<T: EthSpec>(
}
// Now that we have our validators, initialize the caches (including the committees)
state.build_all_caches(spec)?;
state.build_caches(spec)?;
// Set genesis validators root for domain separation and chain versioning
*state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache()?;
@ -134,7 +134,7 @@ pub fn process_activations<T: EthSpec>(
state: &mut BeaconState<T>,
spec: &ChainSpec,
) -> Result<(), Error> {
let (validators, balances) = state.validators_and_balances_mut();
let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter_mut().enumerate() {
let balance = balances
.get(index)

View File

@ -23,4 +23,15 @@ lazy_static! {
"beacon_participation_prev_epoch_active_gwei_total",
"Total effective balance (gwei) of validators active in the previous epoch"
);
/*
* Participation Metrics (progressive balances)
*/
pub static ref PARTICIPATION_PREV_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_participation_prev_epoch_target_attesting_gwei_progressive_total",
"Progressive total effective balance (gwei) of validators who attested to the target in the previous epoch"
);
pub static ref PARTICIPATION_CURR_EPOCH_TARGET_ATTESTING_GWEI_PROGRESSIVE_TOTAL: Result<IntGauge> = try_create_int_gauge(
"beacon_participation_curr_epoch_target_attesting_gwei_progressive_total",
"Progressive total effective balance (gwei) of validators who attested to the target in the current epoch"
);
}

View File

@ -42,6 +42,9 @@ mod verify_proposer_slashing;
use crate::common::decrease_balance;
use crate::StateProcessingStrategy;
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_metrics,
};
#[cfg(feature = "arbitrary-fuzz")]
use arbitrary::Arbitrary;
@ -115,6 +118,8 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
.fork_name(spec)
.map_err(BlockProcessingError::InconsistentStateFork)?;
initialize_progressive_balances_cache(state, None, spec)?;
let verify_signatures = match block_signature_strategy {
BlockSignatureStrategy::VerifyBulk => {
// Verify all signatures in the block at once.
@ -183,6 +188,10 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
)?;
}
if is_progressive_balances_enabled(state) {
update_progressive_balances_metrics(state.progressive_balances_cache())?;
}
Ok(())
}

View File

@ -1,6 +1,8 @@
use super::signature_sets::Error as SignatureSetError;
use crate::per_epoch_processing::altair::participation_cache;
use crate::ContextError;
use merkle_proof::MerkleTreeError;
use participation_cache::Error as ParticipationCacheError;
use safe_arith::ArithError;
use ssz::DecodeError;
use types::*;
@ -99,6 +101,7 @@ pub enum BlockProcessingError {
length: usize,
},
WithdrawalCredentialsInvalid,
ParticipationCacheError(ParticipationCacheError),
}
impl From<BeaconStateError> for BlockProcessingError {
@ -156,6 +159,12 @@ impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
}
}
impl From<ParticipationCacheError> for BlockProcessingError {
fn from(e: ParticipationCacheError) -> Self {
BlockProcessingError::ParticipationCacheError(e)
}
}
/// A conversion that consumes `self` and adds an `index` variable to resulting struct.
///
/// Used here to allow converting an error into an upstream error that points to the object that

View File

@ -97,6 +97,8 @@ pub mod base {
pub mod altair {
use super::*;
use crate::common::update_progressive_balances_cache::update_progressive_balances_on_attestation;
use types::consts::altair::TIMELY_TARGET_FLAG_INDEX;
pub fn process_attestations<T: EthSpec>(
state: &mut BeaconState<T>,
@ -163,6 +165,14 @@ pub mod altair {
get_base_reward(state, index, base_reward_per_increment, spec)?
.safe_mul(weight)?,
)?;
if flag_index == TIMELY_TARGET_FLAG_INDEX {
update_progressive_balances_on_attestation(
state,
data.target.epoch,
index,
)?;
}
}
}
}
@ -235,6 +245,7 @@ pub fn process_attester_slashings<T: EthSpec>(
Ok(())
}
/// Wrapper function to handle calling the correct version of `process_attestations` based on
/// the fork.
pub fn process_attestations<T: EthSpec, Payload: AbstractExecPayload<T>>(

View File

@ -1,4 +1,7 @@
use super::{process_registry_updates, process_slashings, EpochProcessingSummary, Error};
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition,
};
use crate::per_epoch_processing::{
effective_balance_updates::process_effective_balance_updates,
historical_roots_update::process_historical_roots_update,
@ -31,6 +34,7 @@ pub fn process_epoch<T: EthSpec>(
// Pre-compute participating indices and total balances.
let participation_cache = ParticipationCache::new(state, spec)?;
let sync_committee = state.current_sync_committee()?.clone();
initialize_progressive_balances_cache::<T>(state, Some(&participation_cache), spec)?;
// Justification and finalization.
let justification_and_finalization_state =
@ -56,7 +60,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?;
process_effective_balance_updates(state, Some(&participation_cache), spec)?;
// Reset slashings
process_slashings_reset(state)?;
@ -75,6 +79,8 @@ pub fn process_epoch<T: EthSpec>(
// Rotate the epoch caches to suit the epoch transition.
state.advance_caches(spec)?;
update_progressive_balances_on_epoch_transition(state, spec)?;
Ok(EpochProcessingSummary::Altair {
participation_cache,
sync_committee,

View File

@ -11,49 +11,23 @@
//! Additionally, this cache is returned from the `altair::process_epoch` function and can be used
//! to get useful summaries about the validator participation in an epoch.
use safe_arith::{ArithError, SafeArith};
use types::{
consts::altair::{
NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX,
TIMELY_TARGET_FLAG_INDEX,
},
BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch,
Balance, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags,
RelativeEpoch,
};
#[derive(Debug, PartialEq)]
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
InvalidFlagIndex(usize),
InvalidValidatorIndex(usize),
}
/// A balance which will never be below the specified `minimum`.
///
/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected.
#[derive(PartialEq, Debug, Clone, Copy)]
struct Balance {
raw: u64,
minimum: u64,
}
impl Balance {
/// Initialize the balance to `0`, or the given `minimum`.
pub fn zero(minimum: u64) -> Self {
Self { raw: 0, minimum }
}
/// Returns the balance with respect to the initialization `minimum`.
pub fn get(&self) -> u64 {
std::cmp::max(self.raw, self.minimum)
}
/// Add-assign to the balance.
pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_add_assign(other)
}
}
/// Caches the participation values for one epoch (either the previous or current).
#[derive(PartialEq, Debug)]
#[derive(PartialEq, Debug, Clone)]
struct SingleEpochParticipationCache {
/// Maps an active validator index to their participation flags.
///
@ -95,6 +69,14 @@ impl SingleEpochParticipationCache {
.ok_or(Error::InvalidFlagIndex(flag_index))
}
/// Returns the raw total balance of attesters who have `flag_index` set.
fn total_flag_balance_raw(&self, flag_index: usize) -> Result<Balance, Error> {
self.total_flag_balances
.get(flag_index)
.copied()
.ok_or(Error::InvalidFlagIndex(flag_index))
}
/// Returns `true` if `val_index` is active, unslashed and has `flag_index` set.
///
/// ## Errors
@ -173,7 +155,7 @@ impl SingleEpochParticipationCache {
}
/// Maintains a cache to be used during `altair::process_epoch`.
#[derive(PartialEq, Debug)]
#[derive(PartialEq, Debug, Clone)]
pub struct ParticipationCache {
current_epoch: Epoch,
/// Caches information about active validators pertaining to `self.current_epoch`.
@ -291,6 +273,11 @@ impl ParticipationCache {
.total_flag_balance(TIMELY_TARGET_FLAG_INDEX)
}
pub fn current_epoch_target_attesting_balance_raw(&self) -> Result<Balance, Error> {
self.current_epoch_participation
.total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_total_active_balance(&self) -> u64 {
self.previous_epoch_participation.total_active_balance.get()
}
@ -300,6 +287,11 @@ impl ParticipationCache {
.total_flag_balance(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_target_attesting_balance_raw(&self) -> Result<Balance, Error> {
self.previous_epoch_participation
.total_flag_balance_raw(TIMELY_TARGET_FLAG_INDEX)
}
pub fn previous_epoch_source_attesting_balance(&self) -> Result<u64, Error> {
self.previous_epoch_participation
.total_flag_balance(TIMELY_SOURCE_FLAG_INDEX)

View File

@ -52,7 +52,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?;
process_effective_balance_updates(state, None, spec)?;
// Reset slashings
process_slashings_reset(state)?;

View File

@ -11,6 +11,9 @@ use crate::per_epoch_processing::{
};
use types::{BeaconState, ChainSpec, EthSpec, RelativeEpoch};
use crate::common::update_progressive_balances_cache::{
initialize_progressive_balances_cache, update_progressive_balances_on_epoch_transition,
};
pub use historical_summaries_update::process_historical_summaries_update;
mod historical_summaries_update;
@ -27,6 +30,7 @@ pub fn process_epoch<T: EthSpec>(
// Pre-compute participating indices and total balances.
let participation_cache = ParticipationCache::new(state, spec)?;
let sync_committee = state.current_sync_committee()?.clone();
initialize_progressive_balances_cache(state, Some(&participation_cache), spec)?;
// Justification and finalization.
let justification_and_finalization_state =
@ -52,7 +56,7 @@ pub fn process_epoch<T: EthSpec>(
process_eth1_data_reset(state)?;
// Update effective balances with hysteresis (lag).
process_effective_balance_updates(state, spec)?;
process_effective_balance_updates(state, Some(&participation_cache), spec)?;
// Reset slashings
process_slashings_reset(state)?;
@ -71,6 +75,8 @@ pub fn process_epoch<T: EthSpec>(
// Rotate the epoch caches to suit the epoch transition.
state.advance_caches(spec)?;
update_progressive_balances_on_epoch_transition(state, spec)?;
Ok(EpochProcessingSummary::Altair {
participation_cache,
sync_committee,

View File

@ -1,11 +1,13 @@
use super::errors::EpochProcessingError;
use crate::per_epoch_processing::altair::ParticipationCache;
use safe_arith::SafeArith;
use types::beacon_state::BeaconState;
use types::chain_spec::ChainSpec;
use types::{BeaconStateError, EthSpec};
use types::{BeaconStateError, EthSpec, ProgressiveBalancesCache};
pub fn process_effective_balance_updates<T: EthSpec>(
state: &mut BeaconState<T>,
maybe_participation_cache: Option<&ParticipationCache>,
spec: &ChainSpec,
) -> Result<(), EpochProcessingError> {
let hysteresis_increment = spec
@ -13,7 +15,8 @@ pub fn process_effective_balance_updates<T: EthSpec>(
.safe_div(spec.hysteresis_quotient)?;
let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?;
let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?;
let (validators, balances) = state.validators_and_balances_mut();
let (validators, balances, progressive_balances_cache) =
state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter_mut().enumerate() {
let balance = balances
.get(index)
@ -23,11 +26,43 @@ pub fn process_effective_balance_updates<T: EthSpec>(
if balance.safe_add(downward_threshold)? < validator.effective_balance
|| validator.effective_balance.safe_add(upward_threshold)? < balance
{
validator.effective_balance = std::cmp::min(
let old_effective_balance = validator.effective_balance;
let new_effective_balance = std::cmp::min(
balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?,
spec.max_effective_balance,
);
if let Some(participation_cache) = maybe_participation_cache {
update_progressive_balances(
participation_cache,
progressive_balances_cache,
index,
old_effective_balance,
new_effective_balance,
)?;
}
validator.effective_balance = new_effective_balance;
}
}
Ok(())
}
fn update_progressive_balances(
participation_cache: &ParticipationCache,
progressive_balances_cache: &mut ProgressiveBalancesCache,
index: usize,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), EpochProcessingError> {
if old_effective_balance != new_effective_balance {
let is_current_epoch_target_attester =
participation_cache.is_current_epoch_timely_target_attester(index)?;
progressive_balances_cache.on_effective_balance_change(
is_current_epoch_target_attester,
old_effective_balance,
new_effective_balance,
)?;
}
Ok(())
}

View File

@ -16,7 +16,7 @@ pub fn process_slashings<T: EthSpec>(
total_balance,
);
let (validators, balances) = state.validators_and_balances_mut();
let (validators, balances, _) = state.validators_and_balances_and_progressive_balances_mut();
for (index, validator) in validators.iter().enumerate() {
if validator.slashed
&& epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)?

View File

@ -1,3 +1,4 @@
use crate::common::update_progressive_balances_cache::initialize_progressive_balances_cache;
use crate::common::{get_attestation_participation_flag_indices, get_attesting_indices};
use std::mem;
use std::sync::Arc;
@ -101,6 +102,7 @@ pub fn upgrade_to_altair<E: EthSpec>(
next_sync_committee: temp_sync_committee, // not read
// Caches
total_active_balance: pre.total_active_balance,
progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache),
committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache),
@ -110,6 +112,8 @@ pub fn upgrade_to_altair<E: EthSpec>(
// Fill in previous epoch participation from the pre state's pending attestations.
translate_participation(&mut post, &pre.previous_epoch_attestations, spec)?;
initialize_progressive_balances_cache(&mut post, None, spec)?;
// Fill in sync committees
// Note: A duplicate committee is assigned for the current and next committee at the fork
// boundary

View File

@ -62,6 +62,7 @@ pub fn upgrade_to_capella<E: EthSpec>(
historical_summaries: VariableList::default(),
// Caches
total_active_balance: pre.total_active_balance,
progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache),
committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache),

View File

@ -60,6 +60,7 @@ pub fn upgrade_to_bellatrix<E: EthSpec>(
latest_execution_payload_header: <ExecutionPayloadHeaderMerge<E>>::default(),
// Caches
total_active_balance: pre.total_active_balance,
progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache),
committee_caches: mem::take(&mut pre.committee_caches),
pubkey_cache: mem::take(&mut pre.pubkey_cache),
exit_cache: mem::take(&mut pre.exit_cache),

View File

@ -53,6 +53,7 @@ serde_json = "1.0.74"
smallvec = "1.8.0"
serde_with = "1.13.0"
maplit = "1.0.2"
strum = { version = "0.24.0", features = ["derive"] }
[dev-dependencies]
criterion = "0.3.3"

View File

@ -51,7 +51,7 @@ fn all_benches(c: &mut Criterion) {
let spec = Arc::new(MainnetEthSpec::default_spec());
let mut state = get_state::<MainnetEthSpec>(validator_count);
state.build_all_caches(&spec).expect("should build caches");
state.build_caches(&spec).expect("should build caches");
let state_bytes = state.as_ssz_bytes();
let inner_state = state.clone();

View File

@ -98,7 +98,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T,
}
}
impl<'a, T: EthSpec> BeaconBlockBodyRef<'a, T> {
impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T, Payload> {
/// Get the fork_name of this object
pub fn fork_name(self) -> ForkName {
match self {

View File

@ -26,6 +26,8 @@ pub use self::committee_cache::{
compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count,
CommitteeCache,
};
pub use crate::beacon_state::balance::Balance;
pub use crate::beacon_state::progressive_balances_cache::*;
use crate::historical_summary::HistoricalSummary;
pub use clone_config::CloneConfig;
pub use eth_spec::*;
@ -34,9 +36,11 @@ pub use tree_hash_cache::BeaconTreeHashCache;
#[macro_use]
mod committee_cache;
mod balance;
mod clone_config;
mod exit_cache;
mod iter;
mod progressive_balances_cache;
mod pubkey_cache;
mod tests;
mod tree_hash_cache;
@ -101,6 +105,9 @@ pub enum Error {
SszTypesError(ssz_types::Error),
TreeHashCacheNotInitialized,
NonLinearTreeHashCacheHistory,
ParticipationCacheError(String),
ProgressiveBalancesCacheNotInitialized,
ProgressiveBalancesCacheInconsistent,
TreeHashCacheSkippedSlot {
cache: Slot,
state: Slot,
@ -322,6 +329,12 @@ where
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub progressive_balances_cache: ProgressiveBalancesCache,
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
#[derivative(Clone(clone_with = "clone_default"))]
pub committee_caches: [CommitteeCache; CACHED_EPOCHS],
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing, skip_deserializing)]
@ -398,6 +411,7 @@ impl<T: EthSpec> BeaconState<T> {
// Caching (not in spec)
total_active_balance: None,
progressive_balances_cache: <_>::default(),
committee_caches: [
CommitteeCache::default(),
CommitteeCache::default(),
@ -776,7 +790,7 @@ impl<T: EthSpec> BeaconState<T> {
Ok(signature_hash_int.safe_rem(modulo)? == 0)
}
/// Returns the beacon proposer index for the `slot` in the given `relative_epoch`.
/// Returns the beacon proposer index for the `slot` in `self.current_epoch()`.
///
/// Spec v0.12.1
pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result<usize, Error> {
@ -1169,13 +1183,35 @@ impl<T: EthSpec> BeaconState<T> {
}
/// Convenience accessor for validators and balances simultaneously.
pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) {
pub fn validators_and_balances_and_progressive_balances_mut(
&mut self,
) -> (&mut [Validator], &mut [u64], &mut ProgressiveBalancesCache) {
match self {
BeaconState::Base(state) => (&mut state.validators, &mut state.balances),
BeaconState::Altair(state) => (&mut state.validators, &mut state.balances),
BeaconState::Merge(state) => (&mut state.validators, &mut state.balances),
BeaconState::Capella(state) => (&mut state.validators, &mut state.balances),
BeaconState::Deneb(state) => (&mut state.validators, &mut state.balances),
BeaconState::Base(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Altair(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Merge(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Capella(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
BeaconState::Deneb(state) => (
&mut state.validators,
&mut state.balances,
&mut state.progressive_balances_cache,
),
}
}
@ -1402,7 +1438,7 @@ impl<T: EthSpec> BeaconState<T> {
}
/// Build all caches (except the tree hash cache), if they need to be built.
pub fn build_all_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> {
self.build_all_committee_caches(spec)?;
self.update_pubkey_cache()?;
self.build_exit_cache(spec)?;
@ -1434,6 +1470,7 @@ impl<T: EthSpec> BeaconState<T> {
self.drop_committee_cache(RelativeEpoch::Next)?;
self.drop_pubkey_cache();
self.drop_tree_hash_cache();
self.drop_progressive_balances_cache();
*self.exit_cache_mut() = ExitCache::default();
Ok(())
}
@ -1630,6 +1667,11 @@ impl<T: EthSpec> BeaconState<T> {
*self.pubkey_cache_mut() = PubkeyCache::default()
}
/// Completely drops the `progressive_balances_cache` cache, replacing it with a new, empty cache.
fn drop_progressive_balances_cache(&mut self) {
*self.progressive_balances_cache_mut() = ProgressiveBalancesCache::default();
}
/// Initialize but don't fill the tree hash cache, if it isn't already initialized.
pub fn initialize_tree_hash_cache(&mut self) {
if !self.tree_hash_cache().is_initialized() {
@ -1702,6 +1744,9 @@ impl<T: EthSpec> BeaconState<T> {
if config.tree_hash_cache {
*res.tree_hash_cache_mut() = self.tree_hash_cache().clone();
}
if config.progressive_balances_cache {
*res.progressive_balances_cache_mut() = self.progressive_balances_cache().clone();
}
res
}

View File

@ -0,0 +1,33 @@
use arbitrary::Arbitrary;
use safe_arith::{ArithError, SafeArith};
/// A balance which will never be below the specified `minimum`.
///
/// This is an effort to ensure the `EFFECTIVE_BALANCE_INCREMENT` minimum is always respected.
#[derive(PartialEq, Debug, Clone, Copy, Arbitrary)]
pub struct Balance {
raw: u64,
minimum: u64,
}
impl Balance {
/// Initialize the balance to `0`, or the given `minimum`.
pub fn zero(minimum: u64) -> Self {
Self { raw: 0, minimum }
}
/// Returns the balance with respect to the initialization `minimum`.
pub fn get(&self) -> u64 {
std::cmp::max(self.raw, self.minimum)
}
/// Add-assign to the balance.
pub fn safe_add_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_add_assign(other)
}
/// Sub-assign to the balance.
pub fn safe_sub_assign(&mut self, other: u64) -> Result<(), ArithError> {
self.raw.safe_sub_assign(other)
}
}

View File

@ -5,6 +5,7 @@ pub struct CloneConfig {
pub pubkey_cache: bool,
pub exit_cache: bool,
pub tree_hash_cache: bool,
pub progressive_balances_cache: bool,
}
impl CloneConfig {
@ -14,6 +15,7 @@ impl CloneConfig {
pubkey_cache: true,
exit_cache: true,
tree_hash_cache: true,
progressive_balances_cache: true,
}
}

View File

@ -0,0 +1,184 @@
use crate::beacon_state::balance::Balance;
use crate::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec};
use arbitrary::Arbitrary;
use safe_arith::SafeArith;
use serde_derive::{Deserialize, Serialize};
use strum::{Display, EnumString, EnumVariantNames};
/// This cache keeps track of the accumulated target attestation balance for the current & previous
/// epochs. The cached values can be utilised by fork choice to calculate unrealized justification
/// and finalization instead of converting epoch participation arrays to balances for each block we
/// process.
#[derive(Default, Debug, PartialEq, Arbitrary, Clone)]
pub struct ProgressiveBalancesCache {
inner: Option<Inner>,
}
#[derive(Debug, PartialEq, Arbitrary, Clone)]
struct Inner {
pub current_epoch: Epoch,
pub previous_epoch_target_attesting_balance: Balance,
pub current_epoch_target_attesting_balance: Balance,
}
impl ProgressiveBalancesCache {
pub fn initialize(
&mut self,
current_epoch: Epoch,
previous_epoch_target_attesting_balance: Balance,
current_epoch_target_attesting_balance: Balance,
) {
self.inner = Some(Inner {
current_epoch,
previous_epoch_target_attesting_balance,
current_epoch_target_attesting_balance,
});
}
pub fn is_initialized(&self) -> bool {
self.inner.is_some()
}
/// When a new target attestation has been processed, we update the cached
/// `current_epoch_target_attesting_balance` to include the validator effective balance.
/// If the epoch is neither the current epoch nor the previous epoch, an error is returned.
pub fn on_new_target_attestation(
&mut self,
epoch: Epoch,
validator_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if epoch == cache.current_epoch {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
} else if epoch.safe_add(1)? == cache.current_epoch {
cache
.previous_epoch_target_attesting_balance
.safe_add_assign(validator_effective_balance)?;
} else {
return Err(BeaconStateError::ProgressiveBalancesCacheInconsistent);
}
Ok(())
}
/// When a validator is slashed, we reduce the `current_epoch_target_attesting_balance` by the
/// validator's effective balance to exclude the validator weight.
pub fn on_slashing(
&mut self,
is_previous_epoch_target_attester: bool,
is_current_epoch_target_attester: bool,
effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_previous_epoch_target_attester {
cache
.previous_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
if is_current_epoch_target_attester {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(effective_balance)?;
}
Ok(())
}
/// When a current epoch target attester has its effective balance changed, we adjust the
/// its share of the target attesting balance in the cache.
pub fn on_effective_balance_change(
&mut self,
is_current_epoch_target_attester: bool,
old_effective_balance: u64,
new_effective_balance: u64,
) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
if is_current_epoch_target_attester {
if new_effective_balance > old_effective_balance {
cache
.current_epoch_target_attesting_balance
.safe_add_assign(new_effective_balance.safe_sub(old_effective_balance)?)?;
} else {
cache
.current_epoch_target_attesting_balance
.safe_sub_assign(old_effective_balance.safe_sub(new_effective_balance)?)?;
}
}
Ok(())
}
/// On epoch transition, the balance from current epoch is shifted to previous epoch, and the
/// current epoch balance is reset to 0.
pub fn on_epoch_transition(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> {
let cache = self.get_inner_mut()?;
cache.current_epoch.safe_add_assign(1)?;
cache.previous_epoch_target_attesting_balance =
cache.current_epoch_target_attesting_balance;
cache.current_epoch_target_attesting_balance =
Balance::zero(spec.effective_balance_increment);
Ok(())
}
pub fn previous_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.previous_epoch_target_attesting_balance
.get())
}
pub fn current_epoch_target_attesting_balance(&self) -> Result<u64, BeaconStateError> {
Ok(self
.get_inner()?
.current_epoch_target_attesting_balance
.get())
}
fn get_inner_mut(&mut self) -> Result<&mut Inner, BeaconStateError> {
self.inner
.as_mut()
.ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized)
}
fn get_inner(&self) -> Result<&Inner, BeaconStateError> {
self.inner
.as_ref()
.ok_or(BeaconStateError::ProgressiveBalancesCacheNotInitialized)
}
}
#[derive(
Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize, Display, EnumString, EnumVariantNames,
)]
#[strum(serialize_all = "lowercase")]
pub enum ProgressiveBalancesMode {
/// Disable the usage of progressive cache, and use the existing `ParticipationCache` calculation.
Disabled,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache` and falls
/// back to the existing calculation if there is a balance mismatch.
Checked,
/// Enable the usage of progressive cache, with checks against the `ParticipationCache`. Errors
/// if there is a balance mismatch. Used in testing only.
Strict,
/// Enable the usage of progressive cache, with no comparative checks against the
/// `ParticipationCache`. This is fast but an experimental mode, use with caution.
Fast,
}
impl ProgressiveBalancesMode {
pub fn perform_comparative_checks(&self) -> bool {
match self {
Self::Disabled | Self::Fast => false,
Self::Checked | Self::Strict => true,
}
}
}
/// `ProgressiveBalancesCache` is only enabled from `Altair` as it requires `ParticipationCache`.
pub fn is_progressive_balances_enabled<E: EthSpec>(state: &BeaconState<E>) -> bool {
match state {
BeaconState::Base(_) => false,
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => true,
}
}

View File

@ -219,17 +219,18 @@ async fn clone_config() {
let mut state = build_state::<MinimalEthSpec>(16).await;
state.build_all_caches(&spec).unwrap();
state.build_caches(&spec).unwrap();
state
.update_tree_hash_cache()
.expect("should update tree hash cache");
let num_caches = 4;
let num_caches = 5;
let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig {
committee_caches: (i & 1) != 0,
pubkey_cache: ((i >> 1) & 1) != 0,
exit_cache: ((i >> 2) & 1) != 0,
tree_hash_cache: ((i >> 3) & 1) != 0,
progressive_balances_cache: ((i >> 4) & 1) != 0,
});
for config in all_configs {

View File

@ -1,7 +1,7 @@
[package]
name = "lcli"
description = "Lighthouse CLI (modeled after zcli)"
version = "4.2.0"
version = "4.3.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2021"

View File

@ -329,7 +329,7 @@ fn initialize_state_with_validators<T: EthSpec>(
}
// Now that we have our validators, initialize the caches (including the committees)
state.build_all_caches(spec).unwrap();
state.build_caches(spec).unwrap();
// Set genesis validators root for domain separation and chain versioning
*state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap();

View File

@ -109,7 +109,7 @@ pub fn run<T: EthSpec>(env: Environment<T>, matches: &ArgMatches) -> Result<(),
let target_slot = initial_slot + slots;
state
.build_all_caches(spec)
.build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?;
let state_root = if let Some(root) = cli_state_root.or(state_root) {

View File

@ -205,7 +205,7 @@ pub fn run<T: EthSpec>(env: Environment<T>, matches: &ArgMatches) -> Result<(),
if config.exclude_cache_builds {
pre_state
.build_all_caches(spec)
.build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?;
let state_root = pre_state
.update_tree_hash_cache()
@ -303,7 +303,7 @@ fn do_transition<T: EthSpec>(
if !config.exclude_cache_builds {
let t = Instant::now();
pre_state
.build_all_caches(spec)
.build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?;
debug!("Build caches: {:?}", t.elapsed());
@ -335,7 +335,7 @@ fn do_transition<T: EthSpec>(
let t = Instant::now();
pre_state
.build_all_caches(spec)
.build_caches(spec)
.map_err(|e| format!("Unable to build caches: {:?}", e))?;
debug!("Build all caches (again): {:?}", t.elapsed());

View File

@ -1,6 +1,6 @@
[package]
name = "lighthouse"
version = "4.2.0"
version = "4.3.0"
authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021"
autotests = false

View File

@ -16,7 +16,10 @@ use std::str::FromStr;
use std::string::ToString;
use std::time::Duration;
use tempfile::TempDir;
use types::{Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec};
use types::{
Address, Checkpoint, Epoch, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec,
ProgressiveBalancesMode,
};
use unused_port::{unused_tcp4_port, unused_tcp6_port, unused_udp4_port, unused_udp6_port};
const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545/";
@ -1125,48 +1128,13 @@ fn default_backfill_rate_limiting_flag() {
}
#[test]
fn default_boot_nodes() {
let mainnet = vec![
// Lighthouse Team (Sigma Prime)
"enr:-Jq4QItoFUuug_n_qbYbU0OY04-np2wT8rUCauOOXNi0H3BWbDj-zbfZb7otA7jZ6flbBpx1LNZK2TDebZ9dEKx84LYBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISsaa0ZiXNlY3AyNTZrMaEDHAD2JKYevx89W0CcFJFiskdcEzkH_Wdv9iW42qLK79ODdWRwgiMo",
"enr:-Jq4QN_YBsUOqQsty1OGvYv48PMaiEt1AzGD1NkYQHaxZoTyVGqMYXg0K9c0LPNWC9pkXmggApp8nygYLsQwScwAgfgBhGV0aDKQtTA_KgEAAAD__________4JpZIJ2NIJpcISLosQxiXNlY3AyNTZrMaEDBJj7_dLFACaxBfaI8KZTh_SSJUjhyAyfshimvSqo22WDdWRwgiMo",
// EF Team
"enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg",
"enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg",
"enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg",
"enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg",
// Teku team (Consensys)
"enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA",
"enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA",
// Prysm team (Prysmatic Labs)
"enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg",
"enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA",
"enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg",
// Nimbus team
"enr:-LK4QA8FfhaAjlb_BXsXxSfiysR7R52Nhi9JBt4F8SPssu8hdE1BXQQEtVDC3qStCW60LSO7hEsVHv5zm8_6Vnjhcn0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAN4aBKJc2VjcDI1NmsxoQJerDhsJ-KxZ8sHySMOCmTO6sHM3iCFQ6VMvLTe948MyYN0Y3CCI4yDdWRwgiOM",
"enr:-LK4QKWrXTpV9T78hNG6s8AM6IO4XH9kFT91uZtFg1GcsJ6dKovDOr1jtAAFPnS2lvNltkOGA9k29BUN7lFh_sjuc9QBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhANAdd-Jc2VjcDI1NmsxoQLQa6ai7y9PMN5hpLe5HmiJSlYzMuzP7ZhwRiwHvqNXdoN0Y3CCI4yDdWRwgiOM"
];
let number_of_boot_nodes = 15;
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| {
// Lighthouse Team (Sigma Prime)
assert_eq!(config.network.boot_nodes_enr[0].to_base64(), mainnet[0]);
assert_eq!(config.network.boot_nodes_enr[1].to_base64(), mainnet[1]);
// EF Team
assert_eq!(config.network.boot_nodes_enr[2].to_base64(), mainnet[2]);
assert_eq!(config.network.boot_nodes_enr[3].to_base64(), mainnet[3]);
assert_eq!(config.network.boot_nodes_enr[4].to_base64(), mainnet[4]);
assert_eq!(config.network.boot_nodes_enr[5].to_base64(), mainnet[5]);
// Teku team (Consensys)
assert_eq!(config.network.boot_nodes_enr[6].to_base64(), mainnet[6]);
assert_eq!(config.network.boot_nodes_enr[7].to_base64(), mainnet[7]);
// Prysm team (Prysmatic Labs)
assert_eq!(config.network.boot_nodes_enr[8].to_base64(), mainnet[8]);
assert_eq!(config.network.boot_nodes_enr[9].to_base64(), mainnet[9]);
assert_eq!(config.network.boot_nodes_enr[10].to_base64(), mainnet[10]);
// Nimbus team
assert_eq!(config.network.boot_nodes_enr[11].to_base64(), mainnet[11]);
assert_eq!(config.network.boot_nodes_enr[12].to_base64(), mainnet[12]);
assert_eq!(config.network.boot_nodes_enr.len(), number_of_boot_nodes);
});
}
#[test]
@ -2323,3 +2291,28 @@ fn invalid_gossip_verified_blocks_path() {
)
});
}
#[test]
fn progressive_balances_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.chain.progressive_balances_mode,
ProgressiveBalancesMode::Checked
)
});
}
#[test]
fn progressive_balances_fast() {
CommandLineTest::new()
.flag("progressive-balances", Some("fast"))
.run_with_zero_port()
.with_config(|config| {
assert_eq!(
config.chain.progressive_balances_mode,
ProgressiveBalancesMode::Fast
)
});
}

View File

@ -6,9 +6,9 @@ use crate::type_name;
use crate::type_name::TypeName;
use serde_derive::Deserialize;
use state_processing::per_epoch_processing::capella::process_historical_summaries_update;
use state_processing::per_epoch_processing::effective_balance_updates::process_effective_balance_updates;
use state_processing::per_epoch_processing::{
altair, base,
effective_balance_updates::process_effective_balance_updates,
historical_roots_update::process_historical_roots_update,
process_registry_updates, process_slashings,
resets::{process_eth1_data_reset, process_randao_mixes_reset, process_slashings_reset},
@ -180,7 +180,7 @@ impl<E: EthSpec> EpochTransition<E> for Eth1DataReset {
impl<E: EthSpec> EpochTransition<E> for EffectiveBalanceUpdates {
fn run(state: &mut BeaconState<E>, spec: &ChainSpec) -> Result<(), EpochProcessingError> {
process_effective_balance_updates(state, spec)
process_effective_balance_updates(state, None, spec)
}
}

View File

@ -18,7 +18,8 @@ use std::sync::Arc;
use std::time::Duration;
use types::{
Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec,
ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256,
ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, ProgressiveBalancesMode,
SignedBeaconBlock, Slot, Uint256,
};
#[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)]
@ -382,6 +383,7 @@ impl<E: EthSpec> Tester<E> {
block_root,
block.clone(),
NotifyExecutionLayer::Yes,
|| Ok(()),
))?;
if result.is_ok() != valid {
return Err(Error::DidntFail(format!(
@ -439,7 +441,9 @@ impl<E: EthSpec> Tester<E> {
block_delay,
&state,
PayloadVerificationStatus::Irrelevant,
ProgressiveBalancesMode::Strict,
&self.harness.chain.spec,
self.harness.logger(),
);
if result.is_ok() {

View File

@ -4,6 +4,7 @@ use crate::case_result::compare_beacon_state_results_without_caches;
use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file};
use crate::testing_spec;
use serde_derive::Deserialize;
use state_processing::common::update_progressive_balances_cache::initialize_progressive_balances_cache;
use ssz::Decode;
use state_processing::{
per_block_processing::{
@ -97,10 +98,8 @@ impl<E: EthSpec> Operation<E> for Attestation<E> {
&mut ctxt,
spec,
),
BeaconState::Altair(_)
| BeaconState::Merge(_)
| BeaconState::Capella(_)
| BeaconState::Deneb(_) => {
BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_)| BeaconState::Deneb(_) => {
initialize_progressive_balances_cache(state, None, spec)?;
altair::process_attestation(state, self, 0, &mut ctxt, VerifySignatures::True, spec)
}
}
@ -123,6 +122,7 @@ impl<E: EthSpec> Operation<E> for AttesterSlashing<E> {
_: &Operations<E, Self>,
) -> Result<(), BlockProcessingError> {
let mut ctxt = ConsensusContext::new(state.slot());
initialize_progressive_balances_cache(state, None, spec)?;
process_attester_slashings(
state,
&[self.clone()],
@ -173,6 +173,7 @@ impl<E: EthSpec> Operation<E> for ProposerSlashing {
_: &Operations<E, Self>,
) -> Result<(), BlockProcessingError> {
let mut ctxt = ConsensusContext::new(state.slot());
initialize_progressive_balances_cache(state, None, spec)?;
process_proposer_slashings(
state,
&[self.clone()],

View File

@ -67,7 +67,7 @@ impl<E: EthSpec> Case for SanityBlocks<E> {
let spec = &testing_spec::<E>(fork_name);
// Processing requires the epoch cache.
bulk_state.build_all_caches(spec).unwrap();
bulk_state.build_caches(spec).unwrap();
// Spawning a second state to call the VerifyIndiviual strategy to avoid bitrot.
// See https://github.com/sigp/lighthouse/issues/742.

View File

@ -61,7 +61,7 @@ impl<E: EthSpec> Case for SanitySlots<E> {
let spec = &testing_spec::<E>(fork_name);
// Processing requires the epoch cache.
state.build_all_caches(spec).unwrap();
state.build_caches(spec).unwrap();
let mut result = (0..self.slots)
.try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ()))