Realized unrealized experimentation (#3322)

## Issue Addressed

Add a flag that optionally enables unrealized vote tracking.  Would like to test out on testnets and benchmark differences in methods of vote tracking. This PR includes a DB schema upgrade to enable to new vote tracking style.


Co-authored-by: realbigsean <sean@sigmaprime.io>
Co-authored-by: Paul Hauner <paul@paulhauner.com>
Co-authored-by: sean <seananderson33@gmail.com>
Co-authored-by: Mac L <mjladson@pm.me>
This commit is contained in:
realbigsean 2022-07-25 23:53:26 +00:00
parent bb5a6d2cca
commit 20ebf1f3c1
47 changed files with 1254 additions and 338 deletions

2
Cargo.lock generated
View File

@ -2125,6 +2125,7 @@ dependencies = [
"eth2_ssz", "eth2_ssz",
"eth2_ssz_derive", "eth2_ssz_derive",
"proto_array", "proto_array",
"state_processing",
"store", "store",
"tokio", "tokio",
"types", "types",
@ -3008,6 +3009,7 @@ dependencies = [
"serde", "serde",
"serde_json", "serde_json",
"serde_yaml", "serde_yaml",
"snap",
"state_processing", "state_processing",
"tree_hash", "tree_hash",
"types", "types",

View File

@ -93,6 +93,7 @@ use types::beacon_state::CloneConfig;
use types::*; use types::*;
pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock};
pub use fork_choice::CountUnrealized;
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>; pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
@ -1740,6 +1741,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self.slot()?, self.slot()?,
verified.indexed_attestation(), verified.indexed_attestation(),
AttestationFromBlock::False, AttestationFromBlock::False,
&self.spec,
) )
.map_err(Into::into) .map_err(Into::into)
} }
@ -2220,6 +2222,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn process_chain_segment( pub async fn process_chain_segment(
self: &Arc<Self>, self: &Arc<Self>,
chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>, chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
count_unrealized: CountUnrealized,
) -> ChainSegmentResult<T::EthSpec> { ) -> ChainSegmentResult<T::EthSpec> {
let mut imported_blocks = 0; let mut imported_blocks = 0;
@ -2284,7 +2287,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Import the blocks into the chain. // Import the blocks into the chain.
for signature_verified_block in signature_verified_blocks { for signature_verified_block in signature_verified_blocks {
match self.process_block(signature_verified_block).await { match self
.process_block(signature_verified_block, count_unrealized)
.await
{
Ok(_) => imported_blocks += 1, Ok(_) => imported_blocks += 1,
Err(error) => { Err(error) => {
return ChainSegmentResult::Failed { return ChainSegmentResult::Failed {
@ -2368,6 +2374,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub async fn process_block<B: IntoExecutionPendingBlock<T>>( pub async fn process_block<B: IntoExecutionPendingBlock<T>>(
self: &Arc<Self>, self: &Arc<Self>,
unverified_block: B, unverified_block: B,
count_unrealized: CountUnrealized,
) -> Result<Hash256, BlockError<T::EthSpec>> { ) -> Result<Hash256, BlockError<T::EthSpec>> {
// Start the Prometheus timer. // Start the Prometheus timer.
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
@ -2383,7 +2390,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let import_block = async move { let import_block = async move {
let execution_pending = unverified_block.into_execution_pending_block(&chain)?; let execution_pending = unverified_block.into_execution_pending_block(&chain)?;
chain chain
.import_execution_pending_block(execution_pending) .import_execution_pending_block(execution_pending, count_unrealized)
.await .await
}; };
@ -2441,6 +2448,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
async fn import_execution_pending_block( async fn import_execution_pending_block(
self: Arc<Self>, self: Arc<Self>,
execution_pending_block: ExecutionPendingBlock<T>, execution_pending_block: ExecutionPendingBlock<T>,
count_unrealized: CountUnrealized,
) -> Result<Hash256, BlockError<T::EthSpec>> { ) -> Result<Hash256, BlockError<T::EthSpec>> {
let ExecutionPendingBlock { let ExecutionPendingBlock {
block, block,
@ -2499,6 +2507,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
state, state,
confirmed_state_roots, confirmed_state_roots,
payload_verification_status, payload_verification_status,
count_unrealized,
) )
}, },
"payload_verification_handle", "payload_verification_handle",
@ -2520,6 +2529,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
mut state: BeaconState<T::EthSpec>, mut state: BeaconState<T::EthSpec>,
confirmed_state_roots: Vec<Hash256>, confirmed_state_roots: Vec<Hash256>,
payload_verification_status: PayloadVerificationStatus, payload_verification_status: PayloadVerificationStatus,
count_unrealized: CountUnrealized,
) -> Result<Hash256, BlockError<T::EthSpec>> { ) -> Result<Hash256, BlockError<T::EthSpec>> {
let current_slot = self.slot()?; let current_slot = self.slot()?;
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
@ -2665,6 +2675,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
&state, &state,
payload_verification_status, payload_verification_status,
&self.spec, &self.spec,
count_unrealized.and(self.config.count_unrealized.into()),
) )
.map_err(|e| BlockError::BeaconChainError(e.into()))?; .map_err(|e| BlockError::BeaconChainError(e.into()))?;
} }
@ -2690,6 +2701,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
current_slot, current_slot,
&indexed_attestation, &indexed_attestation,
AttestationFromBlock::True, AttestationFromBlock::True,
&self.spec,
) { ) {
Ok(()) => Ok(()), Ok(()) => Ok(()),
// Ignore invalid attestations whilst importing attestations from a block. The // Ignore invalid attestations whilst importing attestations from a block. The

View File

@ -155,6 +155,8 @@ pub struct BeaconForkChoiceStore<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<
justified_checkpoint: Checkpoint, justified_checkpoint: Checkpoint,
justified_balances: Vec<u64>, justified_balances: Vec<u64>,
best_justified_checkpoint: Checkpoint, best_justified_checkpoint: Checkpoint,
unrealized_justified_checkpoint: Checkpoint,
unrealized_finalized_checkpoint: Checkpoint,
proposer_boost_root: Hash256, proposer_boost_root: Hash256,
_phantom: PhantomData<E>, _phantom: PhantomData<E>,
} }
@ -201,6 +203,8 @@ where
justified_balances: anchor_state.balances().clone().into(), justified_balances: anchor_state.balances().clone().into(),
finalized_checkpoint, finalized_checkpoint,
best_justified_checkpoint: justified_checkpoint, best_justified_checkpoint: justified_checkpoint,
unrealized_justified_checkpoint: justified_checkpoint,
unrealized_finalized_checkpoint: finalized_checkpoint,
proposer_boost_root: Hash256::zero(), proposer_boost_root: Hash256::zero(),
_phantom: PhantomData, _phantom: PhantomData,
} }
@ -216,6 +220,8 @@ where
justified_checkpoint: self.justified_checkpoint, justified_checkpoint: self.justified_checkpoint,
justified_balances: self.justified_balances.clone(), justified_balances: self.justified_balances.clone(),
best_justified_checkpoint: self.best_justified_checkpoint, best_justified_checkpoint: self.best_justified_checkpoint,
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
proposer_boost_root: self.proposer_boost_root, proposer_boost_root: self.proposer_boost_root,
} }
} }
@ -233,6 +239,8 @@ where
justified_checkpoint: persisted.justified_checkpoint, justified_checkpoint: persisted.justified_checkpoint,
justified_balances: persisted.justified_balances, justified_balances: persisted.justified_balances,
best_justified_checkpoint: persisted.best_justified_checkpoint, best_justified_checkpoint: persisted.best_justified_checkpoint,
unrealized_justified_checkpoint: persisted.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: persisted.unrealized_finalized_checkpoint,
proposer_boost_root: persisted.proposer_boost_root, proposer_boost_root: persisted.proposer_boost_root,
_phantom: PhantomData, _phantom: PhantomData,
}) })
@ -280,6 +288,14 @@ where
&self.finalized_checkpoint &self.finalized_checkpoint
} }
fn unrealized_justified_checkpoint(&self) -> &Checkpoint {
&self.unrealized_justified_checkpoint
}
fn unrealized_finalized_checkpoint(&self) -> &Checkpoint {
&self.unrealized_finalized_checkpoint
}
fn proposer_boost_root(&self) -> Hash256 { fn proposer_boost_root(&self) -> Hash256 {
self.proposer_boost_root self.proposer_boost_root
} }
@ -323,6 +339,14 @@ where
self.best_justified_checkpoint = checkpoint self.best_justified_checkpoint = checkpoint
} }
fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint) {
self.unrealized_justified_checkpoint = checkpoint;
}
fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint) {
self.unrealized_finalized_checkpoint = checkpoint;
}
fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) { fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256) {
self.proposer_boost_root = proposer_boost_root; self.proposer_boost_root = proposer_boost_root;
} }
@ -330,22 +354,26 @@ where
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database. /// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
#[superstruct( #[superstruct(
variants(V1, V7, V8), variants(V1, V7, V8, V10),
variant_attributes(derive(Encode, Decode)), variant_attributes(derive(Encode, Decode)),
no_enum no_enum
)] )]
pub struct PersistedForkChoiceStore { pub struct PersistedForkChoiceStore {
#[superstruct(only(V1, V7))] #[superstruct(only(V1, V7))]
pub balances_cache: BalancesCacheV1, pub balances_cache: BalancesCacheV1,
#[superstruct(only(V8))] #[superstruct(only(V8, V10))]
pub balances_cache: BalancesCacheV8, pub balances_cache: BalancesCacheV8,
pub time: Slot, pub time: Slot,
pub finalized_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint,
pub justified_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint,
pub justified_balances: Vec<u64>, pub justified_balances: Vec<u64>,
pub best_justified_checkpoint: Checkpoint, pub best_justified_checkpoint: Checkpoint,
#[superstruct(only(V7, V8))] #[superstruct(only(V10))]
pub unrealized_justified_checkpoint: Checkpoint,
#[superstruct(only(V10))]
pub unrealized_finalized_checkpoint: Checkpoint,
#[superstruct(only(V7, V8, V10))]
pub proposer_boost_root: Hash256, pub proposer_boost_root: Hash256,
} }
pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV8; pub type PersistedForkChoiceStore = PersistedForkChoiceStoreV10;

View File

@ -1416,6 +1416,10 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>(
block_root: Hash256, block_root: Hash256,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<(), BlockError<T::EthSpec>> { ) -> Result<(), BlockError<T::EthSpec>> {
// The finalized checkpoint is being read from fork choice, rather than the cached head.
//
// Fork choice has the most up-to-date view of finalization and there's no point importing a
// block which conflicts with the fork-choice view of finalization.
let finalized_slot = chain let finalized_slot = chain
.canonical_head .canonical_head
.cached_head() .cached_head()

View File

@ -647,6 +647,7 @@ where
store.clone(), store.clone(),
Some(current_slot), Some(current_slot),
&self.spec, &self.spec,
self.chain_config.count_unrealized.into(),
)?; )?;
} }

View File

@ -24,6 +24,7 @@ pub struct ChainConfig {
/// ///
/// If set to 0 then block proposal will not wait for fork choice at all. /// If set to 0 then block proposal will not wait for fork choice at all.
pub fork_choice_before_proposal_timeout_ms: u64, pub fork_choice_before_proposal_timeout_ms: u64,
pub count_unrealized: bool,
} }
impl Default for ChainConfig { impl Default for ChainConfig {
@ -35,6 +36,7 @@ impl Default for ChainConfig {
enable_lock_timeouts: true, enable_lock_timeouts: true,
max_network_size: 10 * 1_048_576, // 10M max_network_size: 10 * 1_048_576, // 10M
fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT, fork_choice_before_proposal_timeout_ms: DEFAULT_FORK_CHOICE_BEFORE_PROPOSAL_TIMEOUT,
count_unrealized: false,
} }
} }
} }

View File

@ -1,5 +1,5 @@
use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot};
use fork_choice::{ForkChoice, PayloadVerificationStatus}; use fork_choice::{CountUnrealized, ForkChoice, PayloadVerificationStatus};
use itertools::process_results; use itertools::process_results;
use slog::{info, warn, Logger}; use slog::{info, warn, Logger};
use state_processing::state_advance::complete_state_advance; use state_processing::state_advance::complete_state_advance;
@ -99,6 +99,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
store: Arc<HotColdDB<E, Hot, Cold>>, store: Arc<HotColdDB<E, Hot, Cold>>,
current_slot: Option<Slot>, current_slot: Option<Slot>,
spec: &ChainSpec, spec: &ChainSpec,
count_unrealized_config: CountUnrealized,
) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> { ) -> Result<ForkChoice<BeaconForkChoiceStore<E, Hot, Cold>, E>, String> {
// Fetch finalized block. // Fetch finalized block.
let finalized_checkpoint = head_state.finalized_checkpoint(); let finalized_checkpoint = head_state.finalized_checkpoint();
@ -163,7 +164,8 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
.map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?; .map_err(|e| format!("Error loading blocks to replay for fork choice: {:?}", e))?;
let mut state = finalized_snapshot.beacon_state; let mut state = finalized_snapshot.beacon_state;
for block in blocks { let blocks_len = blocks.len();
for (i, block) in blocks.into_iter().enumerate() {
complete_state_advance(&mut state, None, block.slot(), spec) complete_state_advance(&mut state, None, block.slot(), spec)
.map_err(|e| format!("State advance failed: {:?}", e))?; .map_err(|e| format!("State advance failed: {:?}", e))?;
@ -183,6 +185,15 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
// This scenario is so rare that it seems OK to double-verify some blocks. // This scenario is so rare that it seems OK to double-verify some blocks.
let payload_verification_status = PayloadVerificationStatus::Optimistic; let payload_verification_status = PayloadVerificationStatus::Optimistic;
// Because we are replaying a single chain of blocks, we only need to calculate unrealized
// justification for the last block in the chain.
let is_last_block = i + 1 == blocks_len;
let count_unrealized = if is_last_block {
count_unrealized_config
} else {
CountUnrealized::False
};
fork_choice fork_choice
.on_block( .on_block(
block.slot(), block.slot(),
@ -193,6 +204,7 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
&state, &state,
payload_verification_status, payload_verification_status,
spec, spec,
count_unrealized,
) )
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
} }

View File

@ -44,7 +44,7 @@ mod validator_pubkey_cache;
pub use self::beacon_chain::{ pub use self::beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, CountUnrealized, ForkChoiceError, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped,
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
}; };
pub use self::beacon_snapshot::BeaconSnapshot; pub use self::beacon_snapshot::BeaconSnapshot;

View File

@ -1,5 +1,6 @@
use crate::beacon_fork_choice_store::{ use crate::beacon_fork_choice_store::{
PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8, PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV7,
PersistedForkChoiceStoreV8,
}; };
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
@ -7,10 +8,10 @@ use store::{DBColumn, Error, StoreItem};
use superstruct::superstruct; use superstruct::superstruct;
// If adding a new version you should update this type alias and fix the breakages. // If adding a new version you should update this type alias and fix the breakages.
pub type PersistedForkChoice = PersistedForkChoiceV8; pub type PersistedForkChoice = PersistedForkChoiceV10;
#[superstruct( #[superstruct(
variants(V1, V7, V8), variants(V1, V7, V8, V10),
variant_attributes(derive(Encode, Decode)), variant_attributes(derive(Encode, Decode)),
no_enum no_enum
)] )]
@ -22,6 +23,8 @@ pub struct PersistedForkChoice {
pub fork_choice_store: PersistedForkChoiceStoreV7, pub fork_choice_store: PersistedForkChoiceStoreV7,
#[superstruct(only(V8))] #[superstruct(only(V8))]
pub fork_choice_store: PersistedForkChoiceStoreV8, pub fork_choice_store: PersistedForkChoiceStoreV8,
#[superstruct(only(V10))]
pub fork_choice_store: PersistedForkChoiceStoreV10,
} }
macro_rules! impl_store_item { macro_rules! impl_store_item {
@ -45,3 +48,4 @@ macro_rules! impl_store_item {
impl_store_item!(PersistedForkChoiceV1); impl_store_item!(PersistedForkChoiceV1);
impl_store_item!(PersistedForkChoiceV7); impl_store_item!(PersistedForkChoiceV7);
impl_store_item!(PersistedForkChoiceV8); impl_store_item!(PersistedForkChoiceV8);
impl_store_item!(PersistedForkChoiceV10);

View File

@ -1,4 +1,5 @@
//! Utilities for managing database schema changes. //! Utilities for managing database schema changes.
mod migration_schema_v10;
mod migration_schema_v6; mod migration_schema_v6;
mod migration_schema_v7; mod migration_schema_v7;
mod migration_schema_v8; mod migration_schema_v8;
@ -6,7 +7,9 @@ mod migration_schema_v9;
mod types; mod types;
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY};
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; use crate::persisted_fork_choice::{
PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV7, PersistedForkChoiceV8,
};
use crate::types::ChainSpec; use crate::types::ChainSpec;
use slog::{warn, Logger}; use slog::{warn, Logger};
use std::path::Path; use std::path::Path;
@ -130,6 +133,32 @@ pub fn migrate_schema<T: BeaconChainTypes>(
migration_schema_v9::downgrade_from_v9::<T>(db.clone(), log)?; migration_schema_v9::downgrade_from_v9::<T>(db.clone(), log)?;
db.store_schema_version(to) db.store_schema_version(to)
} }
(SchemaVersion(9), SchemaVersion(10)) => {
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV8>(&FORK_CHOICE_DB_KEY)?;
if let Some(fork_choice) = fork_choice_opt {
let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?;
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
(SchemaVersion(10), SchemaVersion(9)) => {
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV10>(&FORK_CHOICE_DB_KEY)?;
if let Some(fork_choice) = fork_choice_opt {
let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?;
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// Anything else is an error. // Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to, target_version: to,

View File

@ -0,0 +1,97 @@
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8};
use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8};
use crate::schema_change::{
types::{SszContainerV10, SszContainerV7},
StoreError,
};
use proto_array::core::SszContainer;
use ssz::{Decode, Encode};
pub fn update_fork_choice(
mut fork_choice: PersistedForkChoiceV8,
) -> Result<PersistedForkChoiceV10, StoreError> {
let ssz_container_v7 = SszContainerV7::from_ssz_bytes(
&fork_choice.fork_choice.proto_array_bytes,
)
.map_err(|e| {
StoreError::SchemaMigrationError(format!(
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
e
))
})?;
// These transformations instantiate `node.unrealized_justified_checkpoint` and
// `node.unrealized_finalized_checkpoint` to `None`.
let ssz_container_v10: SszContainerV10 = ssz_container_v7.into();
let ssz_container: SszContainer = ssz_container_v10.into();
fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes();
Ok(fork_choice.into())
}
pub fn downgrade_fork_choice(
mut fork_choice: PersistedForkChoiceV10,
) -> Result<PersistedForkChoiceV8, StoreError> {
let ssz_container_v10 = SszContainerV10::from_ssz_bytes(
&fork_choice.fork_choice.proto_array_bytes,
)
.map_err(|e| {
StoreError::SchemaMigrationError(format!(
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
e
))
})?;
let ssz_container_v7: SszContainerV7 = ssz_container_v10.into();
fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes();
Ok(fork_choice.into())
}
impl From<PersistedForkChoiceStoreV8> for PersistedForkChoiceStoreV10 {
fn from(other: PersistedForkChoiceStoreV8) -> Self {
Self {
balances_cache: other.balances_cache,
time: other.time,
finalized_checkpoint: other.finalized_checkpoint,
justified_checkpoint: other.justified_checkpoint,
justified_balances: other.justified_balances,
best_justified_checkpoint: other.best_justified_checkpoint,
unrealized_justified_checkpoint: other.best_justified_checkpoint,
unrealized_finalized_checkpoint: other.finalized_checkpoint,
proposer_boost_root: other.proposer_boost_root,
}
}
}
impl From<PersistedForkChoiceV8> for PersistedForkChoiceV10 {
fn from(other: PersistedForkChoiceV8) -> Self {
Self {
fork_choice: other.fork_choice,
fork_choice_store: other.fork_choice_store.into(),
}
}
}
impl From<PersistedForkChoiceStoreV10> for PersistedForkChoiceStoreV8 {
fn from(other: PersistedForkChoiceStoreV10) -> Self {
Self {
balances_cache: other.balances_cache,
time: other.time,
finalized_checkpoint: other.finalized_checkpoint,
justified_checkpoint: other.justified_checkpoint,
justified_balances: other.justified_balances,
best_justified_checkpoint: other.best_justified_checkpoint,
proposer_boost_root: other.proposer_boost_root,
}
}
}
impl From<PersistedForkChoiceV10> for PersistedForkChoiceV8 {
fn from(other: PersistedForkChoiceV10) -> Self {
Self {
fork_choice: other.fork_choice,
fork_choice_store: other.fork_choice_store.into(),
}
}
}

View File

@ -2,7 +2,7 @@
use crate::beacon_chain::BeaconChainTypes; use crate::beacon_chain::BeaconChainTypes;
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7}; use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7};
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7}; use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
use crate::schema_change::types::{ProtoNodeV6, SszContainerV6, SszContainerV7}; use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7};
use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot}; use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot};
use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot};
use fork_choice::ForkChoice; use fork_choice::ForkChoice;
@ -86,7 +86,8 @@ pub(crate) fn update_fork_choice<T: BeaconChainTypes>(
// to `None`. // to `None`.
let ssz_container_v7: SszContainerV7 = let ssz_container_v7: SszContainerV7 =
ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint); ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint);
let ssz_container: SszContainer = ssz_container_v7.into(); let ssz_container_v10: SszContainerV10 = ssz_container_v7.into();
let ssz_container: SszContainer = ssz_container_v10.into();
let mut fork_choice: ProtoArrayForkChoice = ssz_container.into(); let mut fork_choice: ProtoArrayForkChoice = ssz_container.into();
update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db) update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db)
@ -97,6 +98,13 @@ pub(crate) fn update_fork_choice<T: BeaconChainTypes>(
update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice) update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice)
.map_err(StoreError::SchemaMigrationError)?; .map_err(StoreError::SchemaMigrationError)?;
// Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence.
let ssz_container = SszContainer::from(&fork_choice);
let ssz_container_v7 = SszContainerV7::from(ssz_container);
persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes();
persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint;
Ok(()) Ok(())
} }
@ -301,8 +309,6 @@ fn update_store_justified_checkpoint(
.ok_or("Proto node with current finalized checkpoint not found")?; .ok_or("Proto node with current finalized checkpoint not found")?;
fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint; fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint;
persisted_fork_choice.fork_choice.proto_array_bytes = fork_choice.as_bytes();
persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint;
Ok(()) Ok(())
} }

View File

@ -12,7 +12,7 @@ four_byte_option_impl!(four_byte_option_usize, usize);
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint); four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
#[superstruct( #[superstruct(
variants(V1, V6, V7), variants(V1, V6, V7, V10),
variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)), variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)),
no_enum no_enum
)] )]
@ -30,18 +30,24 @@ pub struct ProtoNode {
#[superstruct(only(V1, V6))] #[superstruct(only(V1, V6))]
pub finalized_epoch: Epoch, pub finalized_epoch: Epoch,
#[ssz(with = "four_byte_option_checkpoint")] #[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V7))] #[superstruct(only(V7, V10))]
pub justified_checkpoint: Option<Checkpoint>, pub justified_checkpoint: Option<Checkpoint>,
#[ssz(with = "four_byte_option_checkpoint")] #[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V7))] #[superstruct(only(V7, V10))]
pub finalized_checkpoint: Option<Checkpoint>, pub finalized_checkpoint: Option<Checkpoint>,
pub weight: u64, pub weight: u64,
#[ssz(with = "four_byte_option_usize")] #[ssz(with = "four_byte_option_usize")]
pub best_child: Option<usize>, pub best_child: Option<usize>,
#[ssz(with = "four_byte_option_usize")] #[ssz(with = "four_byte_option_usize")]
pub best_descendant: Option<usize>, pub best_descendant: Option<usize>,
#[superstruct(only(V6, V7))] #[superstruct(only(V6, V7, V10))]
pub execution_status: ExecutionStatus, pub execution_status: ExecutionStatus,
#[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V10))]
pub unrealized_justified_checkpoint: Option<Checkpoint>,
#[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V10))]
pub unrealized_finalized_checkpoint: Option<Checkpoint>,
} }
impl Into<ProtoNodeV6> for ProtoNodeV1 { impl Into<ProtoNodeV6> for ProtoNodeV1 {
@ -88,9 +94,31 @@ impl Into<ProtoNodeV7> for ProtoNodeV6 {
} }
} }
impl Into<ProtoNode> for ProtoNodeV7 { impl Into<ProtoNodeV10> for ProtoNodeV7 {
fn into(self) -> ProtoNode { fn into(self) -> ProtoNodeV10 {
ProtoNode { ProtoNodeV10 {
slot: self.slot,
state_root: self.state_root,
target_root: self.target_root,
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
root: self.root,
parent: self.parent,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
execution_status: self.execution_status,
unrealized_justified_checkpoint: None,
unrealized_finalized_checkpoint: None,
}
}
}
impl Into<ProtoNodeV7> for ProtoNodeV10 {
fn into(self) -> ProtoNodeV7 {
ProtoNodeV7 {
slot: self.slot, slot: self.slot,
state_root: self.state_root, state_root: self.state_root,
target_root: self.target_root, target_root: self.target_root,
@ -108,8 +136,50 @@ impl Into<ProtoNode> for ProtoNodeV7 {
} }
} }
impl Into<ProtoNode> for ProtoNodeV10 {
fn into(self) -> ProtoNode {
ProtoNode {
slot: self.slot,
state_root: self.state_root,
target_root: self.target_root,
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
root: self.root,
parent: self.parent,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
execution_status: self.execution_status,
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
}
}
}
impl From<ProtoNode> for ProtoNodeV7 {
fn from(container: ProtoNode) -> Self {
Self {
slot: container.slot,
state_root: container.state_root,
target_root: container.target_root,
current_epoch_shuffling_id: container.current_epoch_shuffling_id,
next_epoch_shuffling_id: container.next_epoch_shuffling_id,
root: container.root,
parent: container.parent,
justified_checkpoint: container.justified_checkpoint,
finalized_checkpoint: container.finalized_checkpoint,
weight: container.weight,
best_child: container.best_child,
best_descendant: container.best_descendant,
execution_status: container.execution_status,
}
}
}
#[superstruct( #[superstruct(
variants(V1, V6, V7), variants(V1, V6, V7, V10),
variant_attributes(derive(Encode, Decode)), variant_attributes(derive(Encode, Decode)),
no_enum no_enum
)] )]
@ -122,9 +192,9 @@ pub struct SszContainer {
pub justified_epoch: Epoch, pub justified_epoch: Epoch,
#[superstruct(only(V1, V6))] #[superstruct(only(V1, V6))]
pub finalized_epoch: Epoch, pub finalized_epoch: Epoch,
#[superstruct(only(V7))] #[superstruct(only(V7, V10))]
pub justified_checkpoint: Checkpoint, pub justified_checkpoint: Checkpoint,
#[superstruct(only(V7))] #[superstruct(only(V7, V10))]
pub finalized_checkpoint: Checkpoint, pub finalized_checkpoint: Checkpoint,
#[superstruct(only(V1))] #[superstruct(only(V1))]
pub nodes: Vec<ProtoNodeV1>, pub nodes: Vec<ProtoNodeV1>,
@ -132,8 +202,10 @@ pub struct SszContainer {
pub nodes: Vec<ProtoNodeV6>, pub nodes: Vec<ProtoNodeV6>,
#[superstruct(only(V7))] #[superstruct(only(V7))]
pub nodes: Vec<ProtoNodeV7>, pub nodes: Vec<ProtoNodeV7>,
#[superstruct(only(V10))]
pub nodes: Vec<ProtoNodeV10>,
pub indices: Vec<(Hash256, usize)>, pub indices: Vec<(Hash256, usize)>,
#[superstruct(only(V7))] #[superstruct(only(V7, V10))]
pub previous_proposer_boost: ProposerBoost, pub previous_proposer_boost: ProposerBoost,
} }
@ -174,7 +246,41 @@ impl SszContainerV6 {
} }
} }
impl Into<SszContainer> for SszContainerV7 { impl Into<SszContainerV10> for SszContainerV7 {
fn into(self) -> SszContainerV10 {
let nodes = self.nodes.into_iter().map(Into::into).collect();
SszContainerV10 {
votes: self.votes,
balances: self.balances,
prune_threshold: self.prune_threshold,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
nodes,
indices: self.indices,
previous_proposer_boost: self.previous_proposer_boost,
}
}
}
impl Into<SszContainerV7> for SszContainerV10 {
fn into(self) -> SszContainerV7 {
let nodes = self.nodes.into_iter().map(Into::into).collect();
SszContainerV7 {
votes: self.votes,
balances: self.balances,
prune_threshold: self.prune_threshold,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
nodes,
indices: self.indices,
previous_proposer_boost: self.previous_proposer_boost,
}
}
}
impl Into<SszContainer> for SszContainerV10 {
fn into(self) -> SszContainer { fn into(self) -> SszContainer {
let nodes = self.nodes.into_iter().map(Into::into).collect(); let nodes = self.nodes.into_iter().map(Into::into).collect();
@ -190,3 +296,20 @@ impl Into<SszContainer> for SszContainerV7 {
} }
} }
} }
impl From<SszContainer> for SszContainerV7 {
fn from(container: SszContainer) -> Self {
let nodes = container.nodes.into_iter().map(Into::into).collect();
Self {
votes: container.votes,
balances: container.balances,
prune_threshold: container.prune_threshold,
justified_checkpoint: container.justified_checkpoint,
finalized_checkpoint: container.finalized_checkpoint,
nodes,
indices: container.indices,
previous_proposer_boost: container.previous_proposer_boost,
}
}
}

View File

@ -17,6 +17,7 @@ use execution_layer::{
test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK}, test_utils::{ExecutionBlockGenerator, MockExecutionLayer, DEFAULT_TERMINAL_BLOCK},
ExecutionLayer, ExecutionLayer,
}; };
use fork_choice::CountUnrealized;
use futures::channel::mpsc::Receiver; use futures::channel::mpsc::Receiver;
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
use int_to_bytes::int_to_bytes32; use int_to_bytes::int_to_bytes32;
@ -1370,8 +1371,11 @@ where
block: SignedBeaconBlock<E>, block: SignedBeaconBlock<E>,
) -> Result<SignedBeaconBlockHash, BlockError<E>> { ) -> Result<SignedBeaconBlockHash, BlockError<E>> {
self.set_current_slot(slot); self.set_current_slot(slot);
let block_hash: SignedBeaconBlockHash = let block_hash: SignedBeaconBlockHash = self
self.chain.process_block(Arc::new(block)).await?.into(); .chain
.process_block(Arc::new(block), CountUnrealized::True)
.await?
.into();
self.chain.recompute_head_at_current_slot().await?; self.chain.recompute_head_at_current_slot().await?;
Ok(block_hash) Ok(block_hash)
} }
@ -1380,8 +1384,11 @@ where
&self, &self,
block: SignedBeaconBlock<E>, block: SignedBeaconBlock<E>,
) -> Result<SignedBeaconBlockHash, BlockError<E>> { ) -> Result<SignedBeaconBlockHash, BlockError<E>> {
let block_hash: SignedBeaconBlockHash = let block_hash: SignedBeaconBlockHash = self
self.chain.process_block(Arc::new(block)).await?.into(); .chain
.process_block(Arc::new(block), CountUnrealized::True)
.await?
.into();
self.chain.recompute_head_at_current_slot().await?; self.chain.recompute_head_at_current_slot().await?;
Ok(block_hash) Ok(block_hash)
} }

View File

@ -4,6 +4,7 @@ use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
}; };
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult}; use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult};
use fork_choice::CountUnrealized;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use logging::test_logger; use logging::test_logger;
use slasher::{Config as SlasherConfig, Slasher}; use slasher::{Config as SlasherConfig, Slasher};
@ -147,14 +148,14 @@ async fn chain_segment_full_segment() {
// Sneak in a little check to ensure we can process empty chain segments. // Sneak in a little check to ensure we can process empty chain segments.
harness harness
.chain .chain
.process_chain_segment(vec![]) .process_chain_segment(vec![], CountUnrealized::True)
.await .await
.into_block_error() .into_block_error()
.expect("should import empty chain segment"); .expect("should import empty chain segment");
harness harness
.chain .chain
.process_chain_segment(blocks.clone()) .process_chain_segment(blocks.clone(), CountUnrealized::True)
.await .await
.into_block_error() .into_block_error()
.expect("should import chain segment"); .expect("should import chain segment");
@ -187,7 +188,7 @@ async fn chain_segment_varying_chunk_size() {
for chunk in blocks.chunks(*chunk_size) { for chunk in blocks.chunks(*chunk_size) {
harness harness
.chain .chain
.process_chain_segment(chunk.to_vec()) .process_chain_segment(chunk.to_vec(), CountUnrealized::True)
.await .await
.into_block_error() .into_block_error()
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size)); .unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
@ -227,7 +228,7 @@ async fn chain_segment_non_linear_parent_roots() {
matches!( matches!(
harness harness
.chain .chain
.process_chain_segment(blocks) .process_chain_segment(blocks, CountUnrealized::True)
.await .await
.into_block_error(), .into_block_error(),
Err(BlockError::NonLinearParentRoots) Err(BlockError::NonLinearParentRoots)
@ -247,7 +248,7 @@ async fn chain_segment_non_linear_parent_roots() {
matches!( matches!(
harness harness
.chain .chain
.process_chain_segment(blocks) .process_chain_segment(blocks, CountUnrealized::True)
.await .await
.into_block_error(), .into_block_error(),
Err(BlockError::NonLinearParentRoots) Err(BlockError::NonLinearParentRoots)
@ -278,7 +279,7 @@ async fn chain_segment_non_linear_slots() {
matches!( matches!(
harness harness
.chain .chain
.process_chain_segment(blocks) .process_chain_segment(blocks, CountUnrealized::True)
.await .await
.into_block_error(), .into_block_error(),
Err(BlockError::NonLinearSlots) Err(BlockError::NonLinearSlots)
@ -299,7 +300,7 @@ async fn chain_segment_non_linear_slots() {
matches!( matches!(
harness harness
.chain .chain
.process_chain_segment(blocks) .process_chain_segment(blocks, CountUnrealized::True)
.await .await
.into_block_error(), .into_block_error(),
Err(BlockError::NonLinearSlots) Err(BlockError::NonLinearSlots)
@ -325,7 +326,7 @@ async fn assert_invalid_signature(
matches!( matches!(
harness harness
.chain .chain
.process_chain_segment(blocks) .process_chain_segment(blocks, CountUnrealized::True)
.await .await
.into_block_error(), .into_block_error(),
Err(BlockError::InvalidSignature) Err(BlockError::InvalidSignature)
@ -342,12 +343,18 @@ async fn assert_invalid_signature(
.collect(); .collect();
// We don't care if this fails, we just call this to ensure that all prior blocks have been // We don't care if this fails, we just call this to ensure that all prior blocks have been
// imported prior to this test. // imported prior to this test.
let _ = harness.chain.process_chain_segment(ancestor_blocks).await; let _ = harness
.chain
.process_chain_segment(ancestor_blocks, CountUnrealized::True)
.await;
assert!( assert!(
matches!( matches!(
harness harness
.chain .chain
.process_block(snapshots[block_index].beacon_block.clone()) .process_block(
snapshots[block_index].beacon_block.clone(),
CountUnrealized::True
)
.await, .await,
Err(BlockError::InvalidSignature) Err(BlockError::InvalidSignature)
), ),
@ -397,7 +404,7 @@ async fn invalid_signature_gossip_block() {
.collect(); .collect();
harness harness
.chain .chain
.process_chain_segment(ancestor_blocks) .process_chain_segment(ancestor_blocks, CountUnrealized::True)
.await .await
.into_block_error() .into_block_error()
.expect("should import all blocks prior to the one being tested"); .expect("should import all blocks prior to the one being tested");
@ -405,10 +412,10 @@ async fn invalid_signature_gossip_block() {
matches!( matches!(
harness harness
.chain .chain
.process_block(Arc::new(SignedBeaconBlock::from_block( .process_block(
block, Arc::new(SignedBeaconBlock::from_block(block, junk_signature())),
junk_signature() CountUnrealized::True
))) )
.await, .await,
Err(BlockError::InvalidSignature) Err(BlockError::InvalidSignature)
), ),
@ -441,7 +448,7 @@ async fn invalid_signature_block_proposal() {
matches!( matches!(
harness harness
.chain .chain
.process_chain_segment(blocks) .process_chain_segment(blocks, CountUnrealized::True)
.await .await
.into_block_error(), .into_block_error(),
Err(BlockError::InvalidSignature) Err(BlockError::InvalidSignature)
@ -639,7 +646,7 @@ async fn invalid_signature_deposit() {
!matches!( !matches!(
harness harness
.chain .chain
.process_chain_segment(blocks) .process_chain_segment(blocks, CountUnrealized::True)
.await .await
.into_block_error(), .into_block_error(),
Err(BlockError::InvalidSignature) Err(BlockError::InvalidSignature)
@ -716,11 +723,18 @@ async fn block_gossip_verification() {
harness harness
.chain .chain
.process_block(gossip_verified) .process_block(gossip_verified, CountUnrealized::True)
.await .await
.expect("should import valid gossip verified block"); .expect("should import valid gossip verified block");
} }
// Recompute the head to ensure we cache the latest view of fork choice.
harness
.chain
.recompute_head_at_current_slot()
.await
.unwrap();
/* /*
* This test ensures that: * This test ensures that:
* *
@ -978,7 +992,11 @@ async fn verify_block_for_gossip_slashing_detection() {
.verify_block_for_gossip(Arc::new(block1)) .verify_block_for_gossip(Arc::new(block1))
.await .await
.unwrap(); .unwrap();
harness.chain.process_block(verified_block).await.unwrap(); harness
.chain
.process_block(verified_block, CountUnrealized::True)
.await
.unwrap();
unwrap_err( unwrap_err(
harness harness
.chain .chain
@ -1009,7 +1027,11 @@ async fn verify_block_for_gossip_doppelganger_detection() {
.await .await
.unwrap(); .unwrap();
let attestations = verified_block.block.message().body().attestations().clone(); let attestations = verified_block.block.message().body().attestations().clone();
harness.chain.process_block(verified_block).await.unwrap(); harness
.chain
.process_block(verified_block, CountUnrealized::True)
.await
.unwrap();
for att in attestations.iter() { for att in attestations.iter() {
let epoch = att.data.target.epoch; let epoch = att.data.target.epoch;
@ -1148,7 +1170,7 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.process_block(Arc::new(base_block.clone())) .process_block(Arc::new(base_block.clone()), CountUnrealized::True)
.await .await
.err() .err()
.expect("should error when processing base block"), .expect("should error when processing base block"),
@ -1162,7 +1184,7 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.process_chain_segment(vec![Arc::new(base_block)]) .process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True)
.await, .await,
ChainSegmentResult::Failed { ChainSegmentResult::Failed {
imported_blocks: 0, imported_blocks: 0,
@ -1276,7 +1298,7 @@ async fn add_altair_block_to_base_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.process_block(Arc::new(altair_block.clone())) .process_block(Arc::new(altair_block.clone()), CountUnrealized::True)
.await .await
.err() .err()
.expect("should error when processing altair block"), .expect("should error when processing altair block"),
@ -1290,7 +1312,7 @@ async fn add_altair_block_to_base_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.process_chain_segment(vec![Arc::new(altair_block)]) .process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True)
.await, .await,
ChainSegmentResult::Failed { ChainSegmentResult::Failed {
imported_blocks: 0, imported_blocks: 0,

View File

@ -9,7 +9,9 @@ use execution_layer::{
json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1}, json_structures::{JsonForkChoiceStateV1, JsonPayloadAttributesV1},
ExecutionLayer, ForkChoiceState, PayloadAttributes, ExecutionLayer, ForkChoiceState, PayloadAttributes,
}; };
use fork_choice::{Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus}; use fork_choice::{
CountUnrealized, Error as ForkChoiceError, InvalidationOperation, PayloadVerificationStatus,
};
use proto_array::{Error as ProtoArrayError, ExecutionStatus}; use proto_array::{Error as ProtoArrayError, ExecutionStatus};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
@ -648,7 +650,7 @@ async fn invalidates_all_descendants() {
let fork_block_root = rig let fork_block_root = rig
.harness .harness
.chain .chain
.process_block(Arc::new(fork_block)) .process_block(Arc::new(fork_block), CountUnrealized::True)
.await .await
.unwrap(); .unwrap();
rig.recompute_head().await; rig.recompute_head().await;
@ -740,7 +742,7 @@ async fn switches_heads() {
let fork_block_root = rig let fork_block_root = rig
.harness .harness
.chain .chain
.process_block(Arc::new(fork_block)) .process_block(Arc::new(fork_block), CountUnrealized::True)
.await .await
.unwrap(); .unwrap();
rig.recompute_head().await; rig.recompute_head().await;
@ -984,7 +986,7 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for import. // Ensure the block built atop an invalid payload is invalid for import.
assert!(matches!( assert!(matches!(
rig.harness.chain.process_block(block.clone()).await, rig.harness.chain.process_block(block.clone(), CountUnrealized::True).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root if invalid_root == parent_root
)); ));
@ -998,7 +1000,8 @@ async fn invalid_parent() {
Duration::from_secs(0), Duration::from_secs(0),
&state, &state,
PayloadVerificationStatus::Optimistic, PayloadVerificationStatus::Optimistic,
&rig.harness.chain.spec &rig.harness.chain.spec,
CountUnrealized::True,
), ),
Err(ForkChoiceError::ProtoArrayError(message)) Err(ForkChoiceError::ProtoArrayError(message))
if message.contains(&format!( if message.contains(&format!(

View File

@ -10,6 +10,7 @@ use beacon_chain::{
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler, BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler,
WhenSlotSkipped, WhenSlotSkipped,
}; };
use fork_choice::CountUnrealized;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use logging::test_logger; use logging::test_logger;
use maplit::hashset; use maplit::hashset;
@ -2124,7 +2125,7 @@ async fn weak_subjectivity_sync() {
beacon_chain.slot_clock.set_slot(block.slot().as_u64()); beacon_chain.slot_clock.set_slot(block.slot().as_u64());
beacon_chain beacon_chain
.process_block(Arc::new(full_block)) .process_block(Arc::new(full_block), CountUnrealized::True)
.await .await
.unwrap(); .unwrap();
beacon_chain.recompute_head_at_current_slot().await.unwrap(); beacon_chain.recompute_head_at_current_slot().await.unwrap();

View File

@ -8,6 +8,7 @@ use beacon_chain::{
}, },
BeaconChain, StateSkipConfig, WhenSlotSkipped, BeaconChain, StateSkipConfig, WhenSlotSkipped,
}; };
use fork_choice::CountUnrealized;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use operation_pool::PersistedOperationPool; use operation_pool::PersistedOperationPool;
use state_processing::{ use state_processing::{
@ -499,7 +500,7 @@ async fn unaggregated_attestations_added_to_fork_choice_some_none() {
// Move forward a slot so all queued attestations can be processed. // Move forward a slot so all queued attestations can be processed.
harness.advance_slot(); harness.advance_slot();
fork_choice fork_choice
.update_time(harness.chain.slot().unwrap()) .update_time(harness.chain.slot().unwrap(), &harness.chain.spec)
.unwrap(); .unwrap();
let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT)
@ -613,7 +614,7 @@ async fn unaggregated_attestations_added_to_fork_choice_all_updated() {
// Move forward a slot so all queued attestations can be processed. // Move forward a slot so all queued attestations can be processed.
harness.advance_slot(); harness.advance_slot();
fork_choice fork_choice
.update_time(harness.chain.slot().unwrap()) .update_time(harness.chain.slot().unwrap(), &harness.chain.spec)
.unwrap(); .unwrap();
let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect(); let validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
@ -683,7 +684,10 @@ async fn run_skip_slot_test(skip_slots: u64) {
assert_eq!( assert_eq!(
harness_b harness_b
.chain .chain
.process_block(harness_a.chain.head_snapshot().beacon_block.clone()) .process_block(
harness_a.chain.head_snapshot().beacon_block.clone(),
CountUnrealized::True
)
.await .await
.unwrap(), .unwrap(),
harness_a.chain.head_snapshot().beacon_block_root harness_a.chain.head_snapshot().beacon_block_root

View File

@ -23,7 +23,7 @@ use beacon_chain::{
observed_operations::ObservationOutcome, observed_operations::ObservationOutcome,
validator_monitor::{get_block_delay_ms, timestamp_now}, validator_monitor::{get_block_delay_ms, timestamp_now},
AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes,
ProduceBlockVerification, WhenSlotSkipped, CountUnrealized, ProduceBlockVerification, WhenSlotSkipped,
}; };
pub use block_id::BlockId; pub use block_id::BlockId;
use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; use eth2::types::{self as api_types, EndpointVersion, ValidatorId};
@ -1035,7 +1035,10 @@ pub fn serve<T: BeaconChainTypes>(
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay);
match chain.process_block(block.clone()).await { match chain
.process_block(block.clone(), CountUnrealized::True)
.await
{
Ok(root) => { Ok(root) => {
info!( info!(
log, log,
@ -1179,7 +1182,7 @@ pub fn serve<T: BeaconChainTypes>(
PubsubMessage::BeaconBlock(new_block.clone()), PubsubMessage::BeaconBlock(new_block.clone()),
)?; )?;
match chain.process_block(new_block).await { match chain.process_block(new_block, CountUnrealized::True).await {
Ok(_) => { Ok(_) => {
// Update the head since it's likely this block will become the new // Update the head since it's likely this block will become the new
// head. // head.

View File

@ -6,7 +6,8 @@ use beacon_chain::{
observed_operations::ObservationOutcome, observed_operations::ObservationOutcome,
sync_committee_verification::{self, Error as SyncCommitteeError}, sync_committee_verification::{self, Error as SyncCommitteeError},
validator_monitor::get_block_delay_ms, validator_monitor::get_block_delay_ms,
BeaconChainError, BeaconChainTypes, BlockError, ForkChoiceError, GossipVerifiedBlock, BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError,
GossipVerifiedBlock,
}; };
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource}; use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
use slog::{crit, debug, error, info, trace, warn}; use slog::{crit, debug, error, info, trace, warn};
@ -899,7 +900,11 @@ impl<T: BeaconChainTypes> Worker<T> {
) { ) {
let block: Arc<_> = verified_block.block.clone(); let block: Arc<_> = verified_block.block.clone();
match self.chain.process_block(verified_block).await { match self
.chain
.process_block(verified_block, CountUnrealized::True)
.await
{
Ok(block_root) => { Ok(block_root) => {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL);

View File

@ -7,10 +7,10 @@ use crate::beacon_processor::DuplicateCache;
use crate::metrics; use crate::metrics;
use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::manager::{BlockProcessType, SyncMessage};
use crate::sync::{BatchProcessResult, ChainId}; use crate::sync::{BatchProcessResult, ChainId};
use beacon_chain::ExecutionPayloadError;
use beacon_chain::{ use beacon_chain::{
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
}; };
use beacon_chain::{CountUnrealized, ExecutionPayloadError};
use lighthouse_network::PeerAction; use lighthouse_network::PeerAction;
use slog::{debug, error, info, warn}; use slog::{debug, error, info, warn};
use std::sync::Arc; use std::sync::Arc;
@ -21,7 +21,7 @@ use types::{Epoch, Hash256, SignedBeaconBlock};
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub enum ChainSegmentProcessId { pub enum ChainSegmentProcessId {
/// Processing Id of a range syncing batch. /// Processing Id of a range syncing batch.
RangeBatchId(ChainId, Epoch), RangeBatchId(ChainId, Epoch, CountUnrealized),
/// Processing ID for a backfill syncing batch. /// Processing ID for a backfill syncing batch.
BackSyncBatchId(Epoch), BackSyncBatchId(Epoch),
/// Processing Id of the parent lookup of a block. /// Processing Id of the parent lookup of a block.
@ -89,7 +89,7 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
}; };
let slot = block.slot(); let slot = block.slot();
let result = self.chain.process_block(block).await; let result = self.chain.process_block(block, CountUnrealized::True).await;
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
@ -133,12 +133,15 @@ impl<T: BeaconChainTypes> Worker<T> {
) { ) {
let result = match sync_type { let result = match sync_type {
// this a request from the range sync // this a request from the range sync
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { ChainSegmentProcessId::RangeBatchId(chain_id, epoch, count_unrealized) => {
let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64()); let start_slot = downloaded_blocks.first().map(|b| b.slot().as_u64());
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64()); let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
let sent_blocks = downloaded_blocks.len(); let sent_blocks = downloaded_blocks.len();
match self.process_blocks(downloaded_blocks.iter()).await { match self
.process_blocks(downloaded_blocks.iter(), count_unrealized)
.await
{
(_, Ok(_)) => { (_, Ok(_)) => {
debug!(self.log, "Batch processed"; debug!(self.log, "Batch processed";
"batch_epoch" => epoch, "batch_epoch" => epoch,
@ -207,7 +210,10 @@ impl<T: BeaconChainTypes> Worker<T> {
); );
// parent blocks are ordered from highest slot to lowest, so we need to process in // parent blocks are ordered from highest slot to lowest, so we need to process in
// reverse // reverse
match self.process_blocks(downloaded_blocks.iter().rev()).await { match self
.process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True)
.await
{
(imported_blocks, Err(e)) => { (imported_blocks, Err(e)) => {
debug!(self.log, "Parent lookup failed"; "error" => %e.message); debug!(self.log, "Parent lookup failed"; "error" => %e.message);
BatchProcessResult::Failed { BatchProcessResult::Failed {
@ -231,9 +237,14 @@ impl<T: BeaconChainTypes> Worker<T> {
async fn process_blocks<'a>( async fn process_blocks<'a>(
&self, &self,
downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>, downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>,
count_unrealized: CountUnrealized,
) -> (usize, Result<(), ChainSegmentFailed>) { ) -> (usize, Result<(), ChainSegmentFailed>) {
let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect(); let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect();
match self.chain.process_chain_segment(blocks).await { match self
.chain
.process_chain_segment(blocks, count_unrealized)
.await
{
ChainSegmentResult::Successful { imported_blocks } => { ChainSegmentResult::Successful { imported_blocks } => {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_SUCCESS_TOTAL);
if imported_blocks > 0 { if imported_blocks > 0 {

View File

@ -532,7 +532,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
.parent_block_processed(chain_hash, result, &mut self.network), .parent_block_processed(chain_hash, result, &mut self.network),
}, },
SyncMessage::BatchProcessed { sync_type, result } => match sync_type { SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
ChainSegmentProcessId::RangeBatchId(chain_id, epoch) => { ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => {
self.range_sync.handle_block_process_result( self.range_sync.handle_block_process_result(
&mut self.network, &mut self.network,
chain_id, chain_id,

View File

@ -2,7 +2,7 @@ use super::batch::{BatchInfo, BatchProcessingResult, BatchState};
use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::beacon_processor::WorkEvent as BeaconWorkEvent;
use crate::beacon_processor::{ChainSegmentProcessId, FailureMode}; use crate::beacon_processor::{ChainSegmentProcessId, FailureMode};
use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult}; use crate::sync::{manager::Id, network_context::SyncNetworkContext, BatchProcessResult};
use beacon_chain::BeaconChainTypes; use beacon_chain::{BeaconChainTypes, CountUnrealized};
use fnv::FnvHashMap; use fnv::FnvHashMap;
use lighthouse_network::{PeerAction, PeerId}; use lighthouse_network::{PeerAction, PeerId};
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
@ -100,6 +100,8 @@ pub struct SyncingChain<T: BeaconChainTypes> {
/// A multi-threaded, non-blocking processor for applying messages to the beacon chain. /// A multi-threaded, non-blocking processor for applying messages to the beacon chain.
beacon_processor_send: Sender<BeaconWorkEvent<T>>, beacon_processor_send: Sender<BeaconWorkEvent<T>>,
is_finalized_segment: bool,
/// The chain's log. /// The chain's log.
log: slog::Logger, log: slog::Logger,
} }
@ -126,6 +128,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
target_head_root: Hash256, target_head_root: Hash256,
peer_id: PeerId, peer_id: PeerId,
beacon_processor_send: Sender<BeaconWorkEvent<T>>, beacon_processor_send: Sender<BeaconWorkEvent<T>>,
is_finalized_segment: bool,
log: &slog::Logger, log: &slog::Logger,
) -> Self { ) -> Self {
let mut peers = FnvHashMap::default(); let mut peers = FnvHashMap::default();
@ -148,6 +151,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
current_processing_batch: None, current_processing_batch: None,
validated_batches: 0, validated_batches: 0,
beacon_processor_send, beacon_processor_send,
is_finalized_segment,
log: log.new(o!("chain" => id)), log: log.new(o!("chain" => id)),
} }
} }
@ -302,7 +306,12 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
// for removing chains and checking completion is in the callback. // for removing chains and checking completion is in the callback.
let blocks = batch.start_processing()?; let blocks = batch.start_processing()?;
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id); let count_unrealized = if self.is_finalized_segment {
CountUnrealized::False
} else {
CountUnrealized::True
};
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized);
self.current_processing_batch = Some(batch_id); self.current_processing_batch = Some(batch_id);
if let Err(e) = self if let Err(e) = self

View File

@ -472,10 +472,10 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
network: &mut SyncNetworkContext<T::EthSpec>, network: &mut SyncNetworkContext<T::EthSpec>,
) { ) {
let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot); let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot);
let collection = if let RangeSyncType::Finalized = sync_type { let (collection, is_finalized) = if let RangeSyncType::Finalized = sync_type {
&mut self.finalized_chains (&mut self.finalized_chains, true)
} else { } else {
&mut self.head_chains (&mut self.head_chains, false)
}; };
match collection.entry(id) { match collection.entry(id) {
Entry::Occupied(mut entry) => { Entry::Occupied(mut entry) => {
@ -501,6 +501,7 @@ impl<T: BeaconChainTypes, C: BlockStorage> ChainCollection<T, C> {
target_head_root, target_head_root,
peer, peer,
beacon_processor_send.clone(), beacon_processor_send.clone(),
is_finalized,
&self.log, &self.log,
); );
debug_assert_eq!(new_chain.get_id(), id); debug_assert_eq!(new_chain.get_id(), id);

View File

@ -708,4 +708,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
.default_value("250") .default_value("250")
.takes_value(true) .takes_value(true)
) )
.arg(
Arg::with_name("count-unrealized")
.long("count-unrealized")
.hidden(true)
.help("**EXPERIMENTAL** Enables an alternative, potentially more performant FFG \
vote tracking method.")
.takes_value(false)
)
} }

View File

@ -630,6 +630,10 @@ pub fn get_config<E: EthSpec>(
client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; client_config.chain.fork_choice_before_proposal_timeout_ms = timeout;
} }
if cli_args.is_present("count-unrealized") {
client_config.chain.count_unrealized = true;
}
Ok(client_config) Ok(client_config)
} }

View File

@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use types::{Checkpoint, Hash256, Slot}; use types::{Checkpoint, Hash256, Slot};
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(9); pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(10);
// All the keys that get stored under the `BeaconMeta` column. // All the keys that get stored under the `BeaconMeta` column.
// //

View File

@ -8,6 +8,7 @@ edition = "2021"
[dependencies] [dependencies]
types = { path = "../types" } types = { path = "../types" }
state_processing = { path = "../state_processing" }
proto_array = { path = "../proto_array" } proto_array = { path = "../proto_array" }
eth2_ssz = "0.4.1" eth2_ssz = "0.4.1"
eth2_ssz_derive = "0.3.0" eth2_ssz_derive = "0.3.0"

View File

@ -1,6 +1,7 @@
use crate::{ForkChoiceStore, InvalidationOperation}; use crate::{ForkChoiceStore, InvalidationOperation};
use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice}; use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use state_processing::per_epoch_processing;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::time::Duration; use std::time::Duration;
@ -51,6 +52,9 @@ pub enum Error<T> {
MissingFinalizedBlock { MissingFinalizedBlock {
finalized_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint,
}, },
UnrealizedVoteProcessing(state_processing::EpochProcessingError),
ParticipationCacheBuild(BeaconStateError),
ValidatorStatuses(BeaconStateError),
} }
impl<T> From<InvalidAttestation> for Error<T> { impl<T> From<InvalidAttestation> for Error<T> {
@ -59,6 +63,12 @@ impl<T> From<InvalidAttestation> for Error<T> {
} }
} }
impl<T> From<state_processing::EpochProcessingError> for Error<T> {
fn from(e: state_processing::EpochProcessingError) -> Self {
Error::UnrealizedVoteProcessing(e)
}
}
#[derive(Debug)] #[derive(Debug)]
pub enum InvalidBlock { pub enum InvalidBlock {
UnknownParent(Hash256), UnknownParent(Hash256),
@ -114,6 +124,66 @@ impl<T> From<String> for Error<T> {
} }
} }
/// Indicates whether the unrealized justification of a block should be calculated and tracked.
/// If a block has been finalized, this can be set to false. This is useful when syncing finalized
/// portions of the chain. Otherwise this should always be set to true.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CountUnrealized {
True,
False,
}
impl CountUnrealized {
pub fn is_true(&self) -> bool {
matches!(self, CountUnrealized::True)
}
pub fn and(&self, other: CountUnrealized) -> CountUnrealized {
if self.is_true() && other.is_true() {
CountUnrealized::True
} else {
CountUnrealized::False
}
}
}
impl From<bool> for CountUnrealized {
fn from(count_unrealized: bool) -> Self {
if count_unrealized {
CountUnrealized::True
} else {
CountUnrealized::False
}
}
}
#[derive(Copy, Clone)]
enum UpdateJustifiedCheckpointSlots {
OnTick {
current_slot: Slot,
},
OnBlock {
state_slot: Slot,
current_slot: Slot,
},
}
impl UpdateJustifiedCheckpointSlots {
fn current_slot(&self) -> Slot {
match self {
UpdateJustifiedCheckpointSlots::OnTick { current_slot } => *current_slot,
UpdateJustifiedCheckpointSlots::OnBlock { current_slot, .. } => *current_slot,
}
}
fn state_slot(&self) -> Option<Slot> {
match self {
UpdateJustifiedCheckpointSlots::OnTick { .. } => None,
UpdateJustifiedCheckpointSlots::OnBlock { state_slot, .. } => Some(*state_slot),
}
}
}
/// Indicates if a block has been verified by an execution payload. /// Indicates if a block has been verified by an execution payload.
/// ///
/// There is no variant for "invalid", since such a block should never be added to fork choice. /// There is no variant for "invalid", since such a block should never be added to fork choice.
@ -162,51 +232,6 @@ fn compute_start_slot_at_epoch<E: EthSpec>(epoch: Epoch) -> Slot {
epoch.start_slot(E::slots_per_epoch()) epoch.start_slot(E::slots_per_epoch())
} }
/// Called whenever the current time increases.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick
fn on_tick<T, E>(store: &mut T, time: Slot) -> Result<(), Error<T::Error>>
where
T: ForkChoiceStore<E>,
E: EthSpec,
{
let previous_slot = store.get_current_slot();
if time > previous_slot + 1 {
return Err(Error::InconsistentOnTick {
previous_slot,
time,
});
}
// Update store time.
store.set_current_slot(time);
let current_slot = store.get_current_slot();
// Reset proposer boost if this is a new slot.
if current_slot > previous_slot {
store.set_proposer_boost_root(Hash256::zero());
}
// Not a new epoch, return.
if !(current_slot > previous_slot && compute_slots_since_epoch_start::<E>(current_slot) == 0) {
return Ok(());
}
if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch {
store
.set_justified_checkpoint(*store.best_justified_checkpoint())
.map_err(Error::ForkChoiceStoreError)?;
}
Ok(())
}
/// Used for queuing attestations from the current slot. Only contains the minimum necessary /// Used for queuing attestations from the current slot. Only contains the minimum necessary
/// information about the attestation. /// information about the attestation.
#[derive(Clone, PartialEq, Encode, Decode)] #[derive(Clone, PartialEq, Encode, Decode)]
@ -356,7 +381,7 @@ where
// If the current slot is not provided, use the value that was last provided to the store. // If the current slot is not provided, use the value that was last provided to the store.
let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot());
let proto_array = ProtoArrayForkChoice::new( let proto_array = ProtoArrayForkChoice::new::<E>(
finalized_block_slot, finalized_block_slot,
finalized_block_state_root, finalized_block_state_root,
*fc_store.justified_checkpoint(), *fc_store.justified_checkpoint(),
@ -473,7 +498,7 @@ where
current_slot: Slot, current_slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Hash256, Error<T::Error>> { ) -> Result<Hash256, Error<T::Error>> {
self.update_time(current_slot)?; self.update_time(current_slot, spec)?;
let store = &mut self.fc_store; let store = &mut self.fc_store;
@ -482,6 +507,7 @@ where
*store.finalized_checkpoint(), *store.finalized_checkpoint(),
store.justified_balances(), store.justified_balances(),
store.proposer_boost_root(), store.proposer_boost_root(),
current_slot,
spec, spec,
)?; )?;
@ -539,13 +565,11 @@ where
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#should_update_justified_checkpoint
fn should_update_justified_checkpoint( fn should_update_justified_checkpoint(
&mut self, &mut self,
current_slot: Slot, new_justified_checkpoint: Checkpoint,
state: &BeaconState<E>, slots: UpdateJustifiedCheckpointSlots,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<bool, Error<T::Error>> { ) -> Result<bool, Error<T::Error>> {
self.update_time(current_slot)?; self.update_time(slots.current_slot(), spec)?;
let new_justified_checkpoint = &state.current_justified_checkpoint();
if compute_slots_since_epoch_start::<E>(self.fc_store.get_current_slot()) if compute_slots_since_epoch_start::<E>(self.fc_store.get_current_slot())
< spec.safe_slots_to_update_justified < spec.safe_slots_to_update_justified
@ -557,11 +581,13 @@ where
compute_start_slot_at_epoch::<E>(self.fc_store.justified_checkpoint().epoch); compute_start_slot_at_epoch::<E>(self.fc_store.justified_checkpoint().epoch);
// This sanity check is not in the spec, but the invariant is implied. // This sanity check is not in the spec, but the invariant is implied.
if justified_slot >= state.slot() { if let Some(state_slot) = slots.state_slot() {
return Err(Error::AttemptToRevertJustification { if justified_slot >= state_slot {
store: justified_slot, return Err(Error::AttemptToRevertJustification {
state: state.slot(), store: justified_slot,
}); state: state_slot,
});
}
} }
// We know that the slot for `new_justified_checkpoint.root` is not greater than // We know that the slot for `new_justified_checkpoint.root` is not greater than
@ -629,15 +655,15 @@ where
state: &BeaconState<E>, state: &BeaconState<E>,
payload_verification_status: PayloadVerificationStatus, payload_verification_status: PayloadVerificationStatus,
spec: &ChainSpec, spec: &ChainSpec,
count_unrealized: CountUnrealized,
) -> Result<(), Error<T::Error>> { ) -> Result<(), Error<T::Error>> {
let current_slot = self.update_time(current_slot)?; let current_slot = self.update_time(current_slot, spec)?;
// Parent block must be known. // Parent block must be known.
if !self.proto_array.contains_block(&block.parent_root()) { let parent_block = self
return Err(Error::InvalidBlock(InvalidBlock::UnknownParent( .proto_array
block.parent_root(), .get_block(&block.parent_root())
))); .ok_or_else(|| Error::InvalidBlock(InvalidBlock::UnknownParent(block.parent_root())))?;
}
// Blocks cannot be in the future. If they are, their consideration must be delayed until // Blocks cannot be in the future. If they are, their consideration must be delayed until
// the are in the past. // the are in the past.
@ -686,29 +712,110 @@ where
self.fc_store.set_proposer_boost_root(block_root); self.fc_store.set_proposer_boost_root(block_root);
} }
// Update justified checkpoint. let update_justified_checkpoint_slots = UpdateJustifiedCheckpointSlots::OnBlock {
if state.current_justified_checkpoint().epoch > self.fc_store.justified_checkpoint().epoch { state_slot: state.slot(),
if state.current_justified_checkpoint().epoch current_slot,
> self.fc_store.best_justified_checkpoint().epoch };
// Update store with checkpoints if necessary
self.update_checkpoints(
state.current_justified_checkpoint(),
state.finalized_checkpoint(),
update_justified_checkpoint_slots,
spec,
)?;
// Update unrealized justified/finalized checkpoints.
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if count_unrealized
.is_true()
{
let block_epoch = block.slot().epoch(E::slots_per_epoch());
// If the parent checkpoints are already at the same epoch as the block being imported,
// it's impossible for the unrealized checkpoints to differ from the parent's. This
// holds true because:
//
// 1. A child block cannot have lower FFG checkpoints than its parent.
// 2. A block in epoch `N` cannot contain attestations which would justify an epoch higher than `N`.
// 3. A block in epoch `N` cannot contain attestations which would finalize an epoch higher than `N - 1`.
//
// This is an optimization. It should reduce the amount of times we run
// `process_justification_and_finalization` by approximately 1/3rd when the chain is
// performing optimally.
let parent_checkpoints = parent_block
.unrealized_justified_checkpoint
.zip(parent_block.unrealized_finalized_checkpoint)
.filter(|(parent_justified, parent_finalized)| {
parent_justified.epoch == block_epoch
&& parent_finalized.epoch + 1 >= block_epoch
});
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) =
if let Some((parent_justified, parent_finalized)) = parent_checkpoints {
(parent_justified, parent_finalized)
} else {
let justification_and_finalization_state = match block {
BeaconBlockRef::Merge(_) | BeaconBlockRef::Altair(_) => {
let participation_cache =
per_epoch_processing::altair::ParticipationCache::new(state, spec)
.map_err(Error::ParticipationCacheBuild)?;
per_epoch_processing::altair::process_justification_and_finalization(
state,
&participation_cache,
)?
}
BeaconBlockRef::Base(_) => {
let mut validator_statuses =
per_epoch_processing::base::ValidatorStatuses::new(state, spec)
.map_err(Error::ValidatorStatuses)?;
validator_statuses
.process_attestations(state)
.map_err(Error::ValidatorStatuses)?;
per_epoch_processing::base::process_justification_and_finalization(
state,
&validator_statuses.total_balances,
spec,
)?
}
};
(
justification_and_finalization_state.current_justified_checkpoint(),
justification_and_finalization_state.finalized_checkpoint(),
)
};
// Update best known unrealized justified & finalized checkpoints
if unrealized_justified_checkpoint.epoch
> self.fc_store.unrealized_justified_checkpoint().epoch
{ {
self.fc_store self.fc_store
.set_best_justified_checkpoint(state.current_justified_checkpoint()); .set_unrealized_justified_checkpoint(unrealized_justified_checkpoint);
} }
if self.should_update_justified_checkpoint(current_slot, state, spec)? { if unrealized_finalized_checkpoint.epoch
> self.fc_store.unrealized_finalized_checkpoint().epoch
{
self.fc_store self.fc_store
.set_justified_checkpoint(state.current_justified_checkpoint()) .set_unrealized_finalized_checkpoint(unrealized_finalized_checkpoint);
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
} }
}
// Update finalized checkpoint. // If block is from past epochs, try to update store's justified & finalized checkpoints right away
if state.finalized_checkpoint().epoch > self.fc_store.finalized_checkpoint().epoch { if block.slot().epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) {
self.fc_store self.update_checkpoints(
.set_finalized_checkpoint(state.finalized_checkpoint()); unrealized_justified_checkpoint,
self.fc_store unrealized_finalized_checkpoint,
.set_justified_checkpoint(state.current_justified_checkpoint()) update_justified_checkpoint_slots,
.map_err(Error::UnableToSetJustifiedCheckpoint)?; spec,
} )?;
}
(
Some(unrealized_justified_checkpoint),
Some(unrealized_finalized_checkpoint),
)
} else {
(None, None)
};
let target_slot = block let target_slot = block
.slot() .slot()
@ -757,32 +864,68 @@ where
// This does not apply a vote to the block, it just makes fork choice aware of the block so // This does not apply a vote to the block, it just makes fork choice aware of the block so
// it can still be identified as the head even if it doesn't have any votes. // it can still be identified as the head even if it doesn't have any votes.
self.proto_array.process_block(ProtoBlock { self.proto_array.process_block::<E>(
slot: block.slot(), ProtoBlock {
root: block_root, slot: block.slot(),
parent_root: Some(block.parent_root()), root: block_root,
target_root, parent_root: Some(block.parent_root()),
current_epoch_shuffling_id: AttestationShufflingId::new( target_root,
block_root, current_epoch_shuffling_id: AttestationShufflingId::new(
state, block_root,
RelativeEpoch::Current, state,
) RelativeEpoch::Current,
.map_err(Error::BeaconStateError)?, )
next_epoch_shuffling_id: AttestationShufflingId::new( .map_err(Error::BeaconStateError)?,
block_root, next_epoch_shuffling_id: AttestationShufflingId::new(
state, block_root,
RelativeEpoch::Next, state,
) RelativeEpoch::Next,
.map_err(Error::BeaconStateError)?, )
state_root: block.state_root(), .map_err(Error::BeaconStateError)?,
justified_checkpoint: state.current_justified_checkpoint(), state_root: block.state_root(),
finalized_checkpoint: state.finalized_checkpoint(), justified_checkpoint: state.current_justified_checkpoint(),
execution_status, finalized_checkpoint: state.finalized_checkpoint(),
})?; execution_status,
unrealized_justified_checkpoint,
unrealized_finalized_checkpoint,
},
current_slot,
)?;
Ok(()) Ok(())
} }
/// Update checkpoints in store if necessary
fn update_checkpoints(
&mut self,
justified_checkpoint: Checkpoint,
finalized_checkpoint: Checkpoint,
slots: UpdateJustifiedCheckpointSlots,
spec: &ChainSpec,
) -> Result<(), Error<T::Error>> {
// Update justified checkpoint.
if justified_checkpoint.epoch > self.fc_store.justified_checkpoint().epoch {
if justified_checkpoint.epoch > self.fc_store.best_justified_checkpoint().epoch {
self.fc_store
.set_best_justified_checkpoint(justified_checkpoint);
}
if self.should_update_justified_checkpoint(justified_checkpoint, slots, spec)? {
self.fc_store
.set_justified_checkpoint(justified_checkpoint)
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
}
}
// Update finalized checkpoint.
if finalized_checkpoint.epoch > self.fc_store.finalized_checkpoint().epoch {
self.fc_store.set_finalized_checkpoint(finalized_checkpoint);
self.fc_store
.set_justified_checkpoint(justified_checkpoint)
.map_err(Error::UnableToSetJustifiedCheckpoint)?;
}
Ok(())
}
/// Validates the `epoch` against the current time according to the fork choice store. /// Validates the `epoch` against the current time according to the fork choice store.
/// ///
/// ## Specification /// ## Specification
@ -920,9 +1063,10 @@ where
current_slot: Slot, current_slot: Slot,
attestation: &IndexedAttestation<E>, attestation: &IndexedAttestation<E>,
is_from_block: AttestationFromBlock, is_from_block: AttestationFromBlock,
spec: &ChainSpec,
) -> Result<(), Error<T::Error>> { ) -> Result<(), Error<T::Error>> {
// Ensure the store is up-to-date. // Ensure the store is up-to-date.
self.update_time(current_slot)?; self.update_time(current_slot, spec)?;
// Ignore any attestations to the zero hash. // Ignore any attestations to the zero hash.
// //
@ -967,12 +1111,16 @@ where
/// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided /// Call `on_tick` for all slots between `fc_store.get_current_slot()` and the provided
/// `current_slot`. Returns the value of `self.fc_store.get_current_slot`. /// `current_slot`. Returns the value of `self.fc_store.get_current_slot`.
pub fn update_time(&mut self, current_slot: Slot) -> Result<Slot, Error<T::Error>> { pub fn update_time(
&mut self,
current_slot: Slot,
spec: &ChainSpec,
) -> Result<Slot, Error<T::Error>> {
while self.fc_store.get_current_slot() < current_slot { while self.fc_store.get_current_slot() < current_slot {
let previous_slot = self.fc_store.get_current_slot(); let previous_slot = self.fc_store.get_current_slot();
// Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't // Note: we are relying upon `on_tick` to update `fc_store.time` to ensure we don't
// get stuck in a loop. // get stuck in a loop.
on_tick(&mut self.fc_store, previous_slot + 1)? self.on_tick(previous_slot + 1, spec)?
} }
// Process any attestations that might now be eligible. // Process any attestations that might now be eligible.
@ -981,6 +1129,63 @@ where
Ok(self.fc_store.get_current_slot()) Ok(self.fc_store.get_current_slot())
} }
/// Called whenever the current time increases.
///
/// ## Specification
///
/// Equivalent to:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_tick
fn on_tick(&mut self, time: Slot, spec: &ChainSpec) -> Result<(), Error<T::Error>> {
let store = &mut self.fc_store;
let previous_slot = store.get_current_slot();
if time > previous_slot + 1 {
return Err(Error::InconsistentOnTick {
previous_slot,
time,
});
}
// Update store time.
store.set_current_slot(time);
let current_slot = store.get_current_slot();
// Reset proposer boost if this is a new slot.
if current_slot > previous_slot {
store.set_proposer_boost_root(Hash256::zero());
}
// Not a new epoch, return.
if !(current_slot > previous_slot
&& compute_slots_since_epoch_start::<E>(current_slot) == 0)
{
return Ok(());
}
if store.best_justified_checkpoint().epoch > store.justified_checkpoint().epoch {
let store = &self.fc_store;
if self.is_descendant_of_finalized(store.best_justified_checkpoint().root) {
let store = &mut self.fc_store;
store
.set_justified_checkpoint(*store.best_justified_checkpoint())
.map_err(Error::ForkChoiceStoreError)?;
}
}
// Update store.justified_checkpoint if a better unrealized justified checkpoint is known
let unrealized_justified_checkpoint = *self.fc_store.unrealized_justified_checkpoint();
let unrealized_finalized_checkpoint = *self.fc_store.unrealized_finalized_checkpoint();
self.update_checkpoints(
unrealized_justified_checkpoint,
unrealized_finalized_checkpoint,
UpdateJustifiedCheckpointSlots::OnTick { current_slot },
spec,
)?;
Ok(())
}
/// Processes and removes from the queue any queued attestations which may now be eligible for /// Processes and removes from the queue any queued attestations which may now be eligible for
/// processing due to the slot clock incrementing. /// processing due to the slot clock incrementing.
fn process_attestation_queue(&mut self) -> Result<(), Error<T::Error>> { fn process_attestation_queue(&mut self) -> Result<(), Error<T::Error>> {
@ -1158,6 +1363,14 @@ where
*self.fc_store.best_justified_checkpoint() *self.fc_store.best_justified_checkpoint()
} }
pub fn unrealized_justified_checkpoint(&self) -> Checkpoint {
*self.fc_store.unrealized_justified_checkpoint()
}
pub fn unrealized_finalized_checkpoint(&self) -> Checkpoint {
*self.fc_store.unrealized_finalized_checkpoint()
}
/// Returns the latest message for a given validator, if any. /// Returns the latest message for a given validator, if any.
/// ///
/// Returns `(block_root, block_slot)`. /// Returns `(block_root, block_slot)`.

View File

@ -50,6 +50,12 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized {
/// Returns the `finalized_checkpoint`. /// Returns the `finalized_checkpoint`.
fn finalized_checkpoint(&self) -> &Checkpoint; fn finalized_checkpoint(&self) -> &Checkpoint;
/// Returns the `unrealized_justified_checkpoint`.
fn unrealized_justified_checkpoint(&self) -> &Checkpoint;
/// Returns the `unrealized_finalized_checkpoint`.
fn unrealized_finalized_checkpoint(&self) -> &Checkpoint;
/// Returns the `proposer_boost_root`. /// Returns the `proposer_boost_root`.
fn proposer_boost_root(&self) -> Hash256; fn proposer_boost_root(&self) -> Hash256;
@ -62,6 +68,12 @@ pub trait ForkChoiceStore<T: EthSpec>: Sized {
/// Sets the `best_justified_checkpoint`. /// Sets the `best_justified_checkpoint`.
fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint); fn set_best_justified_checkpoint(&mut self, checkpoint: Checkpoint);
/// Sets the `unrealized_justified_checkpoint`.
fn set_unrealized_justified_checkpoint(&mut self, checkpoint: Checkpoint);
/// Sets the `unrealized_finalized_checkpoint`.
fn set_unrealized_finalized_checkpoint(&mut self, checkpoint: Checkpoint);
/// Sets the proposer boost root. /// Sets the proposer boost root.
fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256); fn set_proposer_boost_root(&mut self, proposer_boost_root: Hash256);
} }

View File

@ -2,9 +2,9 @@ mod fork_choice;
mod fork_choice_store; mod fork_choice_store;
pub use crate::fork_choice::{ pub use crate::fork_choice::{
AttestationFromBlock, Error, ForkChoice, ForkChoiceView, ForkchoiceUpdateParameters, AttestationFromBlock, CountUnrealized, Error, ForkChoice, ForkChoiceView,
InvalidAttestation, InvalidBlock, PayloadVerificationStatus, PersistedForkChoice, ForkchoiceUpdateParameters, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
QueuedAttestation, PersistedForkChoice, QueuedAttestation,
}; };
pub use fork_choice_store::ForkChoiceStore; pub use fork_choice_store::ForkChoiceStore;
pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation};

View File

@ -12,7 +12,8 @@ use beacon_chain::{
StateSkipConfig, WhenSlotSkipped, StateSkipConfig, WhenSlotSkipped,
}; };
use fork_choice::{ use fork_choice::{
ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, CountUnrealized, ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
QueuedAttestation,
}; };
use store::MemoryStore; use store::MemoryStore;
use types::{ use types::{
@ -150,7 +151,7 @@ impl ForkChoiceTest {
.chain .chain
.canonical_head .canonical_head
.fork_choice_write_lock() .fork_choice_write_lock()
.update_time(self.harness.chain.slot().unwrap()) .update_time(self.harness.chain.slot().unwrap(), &self.harness.spec)
.unwrap(); .unwrap();
func( func(
self.harness self.harness
@ -292,6 +293,7 @@ impl ForkChoiceTest {
&state, &state,
PayloadVerificationStatus::Verified, PayloadVerificationStatus::Verified,
&self.harness.chain.spec, &self.harness.chain.spec,
CountUnrealized::True,
) )
.unwrap(); .unwrap();
self self
@ -334,6 +336,7 @@ impl ForkChoiceTest {
&state, &state,
PayloadVerificationStatus::Verified, PayloadVerificationStatus::Verified,
&self.harness.chain.spec, &self.harness.chain.spec,
CountUnrealized::True,
) )
.err() .err()
.expect("on_block did not return an error"); .expect("on_block did not return an error");

View File

@ -78,7 +78,7 @@ impl ForkChoiceTestDefinition {
let junk_shuffling_id = let junk_shuffling_id =
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
let mut fork_choice = ProtoArrayForkChoice::new( let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>(
self.finalized_block_slot, self.finalized_block_slot,
Hash256::zero(), Hash256::zero(),
self.justified_checkpoint, self.justified_checkpoint,
@ -103,6 +103,7 @@ impl ForkChoiceTestDefinition {
finalized_checkpoint, finalized_checkpoint,
&justified_state_balances, &justified_state_balances,
Hash256::zero(), Hash256::zero(),
Slot::new(0),
&spec, &spec,
) )
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
@ -129,6 +130,7 @@ impl ForkChoiceTestDefinition {
finalized_checkpoint, finalized_checkpoint,
&justified_state_balances, &justified_state_balances,
proposer_boost_root, proposer_boost_root,
Slot::new(0),
&spec, &spec,
) )
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
@ -152,6 +154,7 @@ impl ForkChoiceTestDefinition {
finalized_checkpoint, finalized_checkpoint,
&justified_state_balances, &justified_state_balances,
Hash256::zero(), Hash256::zero(),
Slot::new(0),
&spec, &spec,
); );
@ -190,13 +193,17 @@ impl ForkChoiceTestDefinition {
execution_status: ExecutionStatus::Optimistic( execution_status: ExecutionStatus::Optimistic(
ExecutionBlockHash::from_root(root), ExecutionBlockHash::from_root(root),
), ),
unrealized_justified_checkpoint: None,
unrealized_finalized_checkpoint: None,
}; };
fork_choice.process_block(block).unwrap_or_else(|e| { fork_choice
panic!( .process_block::<MainnetEthSpec>(block, slot)
"process_block op at index {} returned error: {:?}", .unwrap_or_else(|e| {
op_index, e panic!(
) "process_block op at index {} returned error: {:?}",
}); op_index, e
)
});
check_bytes_round_trip(&fork_choice); check_bytes_round_trip(&fork_choice);
} }
Operation::ProcessAttestation { Operation::ProcessAttestation {

View File

@ -97,6 +97,10 @@ pub struct ProtoNode {
/// Indicates if an execution node has marked this block as valid. Also contains the execution /// Indicates if an execution node has marked this block as valid. Also contains the execution
/// block hash. /// block hash.
pub execution_status: ExecutionStatus, pub execution_status: ExecutionStatus,
#[ssz(with = "four_byte_option_checkpoint")]
pub unrealized_justified_checkpoint: Option<Checkpoint>,
#[ssz(with = "four_byte_option_checkpoint")]
pub unrealized_finalized_checkpoint: Option<Checkpoint>,
} }
#[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)] #[derive(PartialEq, Debug, Encode, Decode, Serialize, Deserialize, Copy, Clone)]
@ -140,6 +144,7 @@ impl ProtoArray {
/// - Compare the current node with the parents best-child, updating it if the current node /// - Compare the current node with the parents best-child, updating it if the current node
/// should become the best child. /// should become the best child.
/// - If required, update the parents best-descendant with the current node or its best-descendant. /// - If required, update the parents best-descendant with the current node or its best-descendant.
#[allow(clippy::too_many_arguments)]
pub fn apply_score_changes<E: EthSpec>( pub fn apply_score_changes<E: EthSpec>(
&mut self, &mut self,
mut deltas: Vec<i64>, mut deltas: Vec<i64>,
@ -147,6 +152,7 @@ impl ProtoArray {
finalized_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint,
new_balances: &[u64], new_balances: &[u64],
proposer_boost_root: Hash256, proposer_boost_root: Hash256,
current_slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
if deltas.len() != self.indices.len() { if deltas.len() != self.indices.len() {
@ -280,7 +286,11 @@ impl ProtoArray {
// If the node has a parent, try to update its best-child and best-descendant. // If the node has a parent, try to update its best-child and best-descendant.
if let Some(parent_index) = node.parent { if let Some(parent_index) = node.parent {
self.maybe_update_best_child_and_descendant(parent_index, node_index)?; self.maybe_update_best_child_and_descendant::<E>(
parent_index,
node_index,
current_slot,
)?;
} }
} }
@ -290,7 +300,7 @@ impl ProtoArray {
/// Register a block with the fork choice. /// Register a block with the fork choice.
/// ///
/// It is only sane to supply a `None` parent for the genesis block. /// It is only sane to supply a `None` parent for the genesis block.
pub fn on_block(&mut self, block: Block) -> Result<(), Error> { pub fn on_block<E: EthSpec>(&mut self, block: Block, current_slot: Slot) -> Result<(), Error> {
// If the block is already known, simply ignore it. // If the block is already known, simply ignore it.
if self.indices.contains_key(&block.root) { if self.indices.contains_key(&block.root) {
return Ok(()); return Ok(());
@ -314,6 +324,8 @@ impl ProtoArray {
best_child: None, best_child: None,
best_descendant: None, best_descendant: None,
execution_status: block.execution_status, execution_status: block.execution_status,
unrealized_justified_checkpoint: block.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint,
}; };
// If the parent has an invalid execution status, return an error before adding the block to // If the parent has an invalid execution status, return an error before adding the block to
@ -335,7 +347,11 @@ impl ProtoArray {
self.nodes.push(node.clone()); self.nodes.push(node.clone());
if let Some(parent_index) = node.parent { if let Some(parent_index) = node.parent {
self.maybe_update_best_child_and_descendant(parent_index, node_index)?; self.maybe_update_best_child_and_descendant::<E>(
parent_index,
node_index,
current_slot,
)?;
if matches!(block.execution_status, ExecutionStatus::Valid(_)) { if matches!(block.execution_status, ExecutionStatus::Valid(_)) {
self.propagate_execution_payload_validation_by_index(parent_index)?; self.propagate_execution_payload_validation_by_index(parent_index)?;
@ -604,7 +620,11 @@ impl ProtoArray {
/// been called without a subsequent `Self::apply_score_changes` call. This is because /// been called without a subsequent `Self::apply_score_changes` call. This is because
/// `on_new_block` does not attempt to walk backwards through the tree and update the /// `on_new_block` does not attempt to walk backwards through the tree and update the
/// best-child/best-descendant links. /// best-child/best-descendant links.
pub fn find_head(&self, justified_root: &Hash256) -> Result<Hash256, Error> { pub fn find_head<E: EthSpec>(
&self,
justified_root: &Hash256,
current_slot: Slot,
) -> Result<Hash256, Error> {
let justified_index = self let justified_index = self
.indices .indices
.get(justified_root) .get(justified_root)
@ -637,7 +657,7 @@ impl ProtoArray {
.ok_or(Error::InvalidBestDescendant(best_descendant_index))?; .ok_or(Error::InvalidBestDescendant(best_descendant_index))?;
// Perform a sanity check that the node is indeed valid to be the head. // Perform a sanity check that the node is indeed valid to be the head.
if !self.node_is_viable_for_head(best_node) { if !self.node_is_viable_for_head::<E>(best_node, current_slot) {
return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo {
start_root: *justified_root, start_root: *justified_root,
justified_checkpoint: self.justified_checkpoint, justified_checkpoint: self.justified_checkpoint,
@ -733,10 +753,11 @@ impl ProtoArray {
/// best-descendant. /// best-descendant.
/// - The child is not the best child but becomes the best child. /// - The child is not the best child but becomes the best child.
/// - The child is not the best child and does not become the best child. /// - The child is not the best child and does not become the best child.
fn maybe_update_best_child_and_descendant( fn maybe_update_best_child_and_descendant<E: EthSpec>(
&mut self, &mut self,
parent_index: usize, parent_index: usize,
child_index: usize, child_index: usize,
current_slot: Slot,
) -> Result<(), Error> { ) -> Result<(), Error> {
let child = self let child = self
.nodes .nodes
@ -748,7 +769,8 @@ impl ProtoArray {
.get(parent_index) .get(parent_index)
.ok_or(Error::InvalidNodeIndex(parent_index))?; .ok_or(Error::InvalidNodeIndex(parent_index))?;
let child_leads_to_viable_head = self.node_leads_to_viable_head(child)?; let child_leads_to_viable_head =
self.node_leads_to_viable_head::<E>(child, current_slot)?;
// These three variables are aliases to the three options that we may set the // These three variables are aliases to the three options that we may set the
// `parent.best_child` and `parent.best_descendant` to. // `parent.best_child` and `parent.best_descendant` to.
@ -761,54 +783,54 @@ impl ProtoArray {
); );
let no_change = (parent.best_child, parent.best_descendant); let no_change = (parent.best_child, parent.best_descendant);
let (new_best_child, new_best_descendant) = if let Some(best_child_index) = let (new_best_child, new_best_descendant) =
parent.best_child if let Some(best_child_index) = parent.best_child {
{ if best_child_index == child_index && !child_leads_to_viable_head {
if best_child_index == child_index && !child_leads_to_viable_head { // If the child is already the best-child of the parent but it's not viable for
// If the child is already the best-child of the parent but it's not viable for // the head, remove it.
// the head, remove it. change_to_none
change_to_none } else if best_child_index == child_index {
} else if best_child_index == child_index { // If the child is the best-child already, set it again to ensure that the
// If the child is the best-child already, set it again to ensure that the // best-descendant of the parent is updated.
// best-descendant of the parent is updated.
change_to_child
} else {
let best_child = self
.nodes
.get(best_child_index)
.ok_or(Error::InvalidBestDescendant(best_child_index))?;
let best_child_leads_to_viable_head = self.node_leads_to_viable_head(best_child)?;
if child_leads_to_viable_head && !best_child_leads_to_viable_head {
// The child leads to a viable head, but the current best-child doesn't.
change_to_child change_to_child
} else if !child_leads_to_viable_head && best_child_leads_to_viable_head {
// The best child leads to a viable head, but the child doesn't.
no_change
} else if child.weight == best_child.weight {
// Tie-breaker of equal weights by root.
if child.root >= best_child.root {
change_to_child
} else {
no_change
}
} else { } else {
// Choose the winner by weight. let best_child = self
if child.weight >= best_child.weight { .nodes
.get(best_child_index)
.ok_or(Error::InvalidBestDescendant(best_child_index))?;
let best_child_leads_to_viable_head =
self.node_leads_to_viable_head::<E>(best_child, current_slot)?;
if child_leads_to_viable_head && !best_child_leads_to_viable_head {
// The child leads to a viable head, but the current best-child doesn't.
change_to_child change_to_child
} else { } else if !child_leads_to_viable_head && best_child_leads_to_viable_head {
// The best child leads to a viable head, but the child doesn't.
no_change no_change
} else if child.weight == best_child.weight {
// Tie-breaker of equal weights by root.
if child.root >= best_child.root {
change_to_child
} else {
no_change
}
} else {
// Choose the winner by weight.
if child.weight >= best_child.weight {
change_to_child
} else {
no_change
}
} }
} }
} } else if child_leads_to_viable_head {
} else if child_leads_to_viable_head { // There is no current best-child and the child is viable.
// There is no current best-child and the child is viable. change_to_child
change_to_child } else {
} else { // There is no current best-child but the child is not viable.
// There is no current best-child but the child is not viable. no_change
no_change };
};
let parent = self let parent = self
.nodes .nodes
@ -823,7 +845,11 @@ impl ProtoArray {
/// Indicates if the node itself is viable for the head, or if it's best descendant is viable /// Indicates if the node itself is viable for the head, or if it's best descendant is viable
/// for the head. /// for the head.
fn node_leads_to_viable_head(&self, node: &ProtoNode) -> Result<bool, Error> { fn node_leads_to_viable_head<E: EthSpec>(
&self,
node: &ProtoNode,
current_slot: Slot,
) -> Result<bool, Error> {
let best_descendant_is_viable_for_head = let best_descendant_is_viable_for_head =
if let Some(best_descendant_index) = node.best_descendant { if let Some(best_descendant_index) = node.best_descendant {
let best_descendant = self let best_descendant = self
@ -831,12 +857,13 @@ impl ProtoArray {
.get(best_descendant_index) .get(best_descendant_index)
.ok_or(Error::InvalidBestDescendant(best_descendant_index))?; .ok_or(Error::InvalidBestDescendant(best_descendant_index))?;
self.node_is_viable_for_head(best_descendant) self.node_is_viable_for_head::<E>(best_descendant, current_slot)
} else { } else {
false false
}; };
Ok(best_descendant_is_viable_for_head || self.node_is_viable_for_head(node)) Ok(best_descendant_is_viable_for_head
|| self.node_is_viable_for_head::<E>(node, current_slot))
} }
/// This is the equivalent to the `filter_block_tree` function in the eth2 spec: /// This is the equivalent to the `filter_block_tree` function in the eth2 spec:
@ -845,18 +872,43 @@ impl ProtoArray {
/// ///
/// Any node that has a different finalized or justified epoch should not be viable for the /// Any node that has a different finalized or justified epoch should not be viable for the
/// head. /// head.
fn node_is_viable_for_head(&self, node: &ProtoNode) -> bool { fn node_is_viable_for_head<E: EthSpec>(&self, node: &ProtoNode, current_slot: Slot) -> bool {
if node.execution_status.is_invalid() { if node.execution_status.is_invalid() {
return false; return false;
} }
if let (Some(node_justified_checkpoint), Some(node_finalized_checkpoint)) = let checkpoint_match_predicate =
|node_justified_checkpoint: Checkpoint, node_finalized_checkpoint: Checkpoint| {
let correct_justified = node_justified_checkpoint == self.justified_checkpoint
|| self.justified_checkpoint.epoch == Epoch::new(0);
let correct_finalized = node_finalized_checkpoint == self.finalized_checkpoint
|| self.finalized_checkpoint.epoch == Epoch::new(0);
correct_justified && correct_finalized
};
if let (
Some(unrealized_justified_checkpoint),
Some(unrealized_finalized_checkpoint),
Some(justified_checkpoint),
Some(finalized_checkpoint),
) = (
node.unrealized_justified_checkpoint,
node.unrealized_finalized_checkpoint,
node.justified_checkpoint,
node.finalized_checkpoint,
) {
if node.slot.epoch(E::slots_per_epoch()) < current_slot.epoch(E::slots_per_epoch()) {
checkpoint_match_predicate(
unrealized_justified_checkpoint,
unrealized_finalized_checkpoint,
)
} else {
checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint)
}
} else if let (Some(justified_checkpoint), Some(finalized_checkpoint)) =
(node.justified_checkpoint, node.finalized_checkpoint) (node.justified_checkpoint, node.finalized_checkpoint)
{ {
(node_justified_checkpoint == self.justified_checkpoint checkpoint_match_predicate(justified_checkpoint, finalized_checkpoint)
|| self.justified_checkpoint.epoch == Epoch::new(0))
&& (node_finalized_checkpoint == self.finalized_checkpoint
|| self.finalized_checkpoint.epoch == Epoch::new(0))
} else { } else {
false false
} }

View File

@ -124,6 +124,8 @@ pub struct Block {
/// Indicates if an execution node has marked this block as valid. Also contains the execution /// Indicates if an execution node has marked this block as valid. Also contains the execution
/// block hash. /// block hash.
pub execution_status: ExecutionStatus, pub execution_status: ExecutionStatus,
pub unrealized_justified_checkpoint: Option<Checkpoint>,
pub unrealized_finalized_checkpoint: Option<Checkpoint>,
} }
/// A Vec-wrapper which will grow to match any request. /// A Vec-wrapper which will grow to match any request.
@ -162,7 +164,7 @@ pub struct ProtoArrayForkChoice {
impl ProtoArrayForkChoice { impl ProtoArrayForkChoice {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new( pub fn new<E: EthSpec>(
finalized_block_slot: Slot, finalized_block_slot: Slot,
finalized_block_state_root: Hash256, finalized_block_state_root: Hash256,
justified_checkpoint: Checkpoint, justified_checkpoint: Checkpoint,
@ -193,10 +195,12 @@ impl ProtoArrayForkChoice {
justified_checkpoint, justified_checkpoint,
finalized_checkpoint, finalized_checkpoint,
execution_status, execution_status,
unrealized_justified_checkpoint: Some(justified_checkpoint),
unrealized_finalized_checkpoint: Some(finalized_checkpoint),
}; };
proto_array proto_array
.on_block(block) .on_block::<E>(block, finalized_block_slot)
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
Ok(Self { Ok(Self {
@ -242,13 +246,17 @@ impl ProtoArrayForkChoice {
Ok(()) Ok(())
} }
pub fn process_block(&mut self, block: Block) -> Result<(), String> { pub fn process_block<E: EthSpec>(
&mut self,
block: Block,
current_slot: Slot,
) -> Result<(), String> {
if block.parent_root.is_none() { if block.parent_root.is_none() {
return Err("Missing parent root".to_string()); return Err("Missing parent root".to_string());
} }
self.proto_array self.proto_array
.on_block(block) .on_block::<E>(block, current_slot)
.map_err(|e| format!("process_block_error: {:?}", e)) .map_err(|e| format!("process_block_error: {:?}", e))
} }
@ -258,6 +266,7 @@ impl ProtoArrayForkChoice {
finalized_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint,
justified_state_balances: &[u64], justified_state_balances: &[u64],
proposer_boost_root: Hash256, proposer_boost_root: Hash256,
current_slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Hash256, String> { ) -> Result<Hash256, String> {
let old_balances = &mut self.balances; let old_balances = &mut self.balances;
@ -279,6 +288,7 @@ impl ProtoArrayForkChoice {
finalized_checkpoint, finalized_checkpoint,
new_balances, new_balances,
proposer_boost_root, proposer_boost_root,
current_slot,
spec, spec,
) )
.map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?; .map_err(|e| format!("find_head apply_score_changes failed: {:?}", e))?;
@ -286,7 +296,7 @@ impl ProtoArrayForkChoice {
*old_balances = new_balances.to_vec(); *old_balances = new_balances.to_vec();
self.proto_array self.proto_array
.find_head(&justified_checkpoint.root) .find_head::<E>(&justified_checkpoint.root, current_slot)
.map_err(|e| format!("find_head failed: {:?}", e)) .map_err(|e| format!("find_head failed: {:?}", e))
} }
@ -341,6 +351,8 @@ impl ProtoArrayForkChoice {
justified_checkpoint, justified_checkpoint,
finalized_checkpoint, finalized_checkpoint,
execution_status: block.execution_status, execution_status: block.execution_status,
unrealized_justified_checkpoint: block.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: block.unrealized_finalized_checkpoint,
}) })
} else { } else {
None None
@ -485,6 +497,7 @@ fn compute_deltas(
#[cfg(test)] #[cfg(test)]
mod test_compute_deltas { mod test_compute_deltas {
use super::*; use super::*;
use types::MainnetEthSpec;
/// Gives a hash that is not the zero hash (unless i is `usize::max_value)`. /// Gives a hash that is not the zero hash (unless i is `usize::max_value)`.
fn hash_from_index(i: usize) -> Hash256 { fn hash_from_index(i: usize) -> Hash256 {
@ -510,7 +523,7 @@ mod test_compute_deltas {
root: finalized_root, root: finalized_root,
}; };
let mut fc = ProtoArrayForkChoice::new( let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
genesis_slot, genesis_slot,
state_root, state_root,
genesis_checkpoint, genesis_checkpoint,
@ -523,34 +536,44 @@ mod test_compute_deltas {
// Add block that is a finalized descendant. // Add block that is a finalized descendant.
fc.proto_array fc.proto_array
.on_block(Block { .on_block::<MainnetEthSpec>(
slot: genesis_slot + 1, Block {
root: finalized_desc, slot: genesis_slot + 1,
parent_root: Some(finalized_root), root: finalized_desc,
state_root, parent_root: Some(finalized_root),
target_root: finalized_root, state_root,
current_epoch_shuffling_id: junk_shuffling_id.clone(), target_root: finalized_root,
next_epoch_shuffling_id: junk_shuffling_id.clone(), current_epoch_shuffling_id: junk_shuffling_id.clone(),
justified_checkpoint: genesis_checkpoint, next_epoch_shuffling_id: junk_shuffling_id.clone(),
finalized_checkpoint: genesis_checkpoint, justified_checkpoint: genesis_checkpoint,
execution_status, finalized_checkpoint: genesis_checkpoint,
}) execution_status,
unrealized_justified_checkpoint: Some(genesis_checkpoint),
unrealized_finalized_checkpoint: Some(genesis_checkpoint),
},
genesis_slot + 1,
)
.unwrap(); .unwrap();
// Add block that is *not* a finalized descendant. // Add block that is *not* a finalized descendant.
fc.proto_array fc.proto_array
.on_block(Block { .on_block::<MainnetEthSpec>(
slot: genesis_slot + 1, Block {
root: not_finalized_desc, slot: genesis_slot + 1,
parent_root: None, root: not_finalized_desc,
state_root, parent_root: None,
target_root: finalized_root, state_root,
current_epoch_shuffling_id: junk_shuffling_id.clone(), target_root: finalized_root,
next_epoch_shuffling_id: junk_shuffling_id, current_epoch_shuffling_id: junk_shuffling_id.clone(),
justified_checkpoint: genesis_checkpoint, next_epoch_shuffling_id: junk_shuffling_id,
finalized_checkpoint: genesis_checkpoint, justified_checkpoint: genesis_checkpoint,
execution_status, finalized_checkpoint: genesis_checkpoint,
}) execution_status,
unrealized_justified_checkpoint: None,
unrealized_finalized_checkpoint: None,
},
genesis_slot + 1,
)
.unwrap(); .unwrap();
assert!(!fc.is_descendant(unknown, unknown)); assert!(!fc.is_descendant(unknown, unknown));

View File

@ -2,6 +2,7 @@
pub use epoch_processing_summary::EpochProcessingSummary; pub use epoch_processing_summary::EpochProcessingSummary;
use errors::EpochProcessingError as Error; use errors::EpochProcessingError as Error;
pub use justification_and_finalization_state::JustificationAndFinalizationState;
pub use registry_updates::process_registry_updates; pub use registry_updates::process_registry_updates;
use safe_arith::SafeArith; use safe_arith::SafeArith;
pub use slashings::process_slashings; pub use slashings::process_slashings;
@ -14,6 +15,7 @@ pub mod effective_balance_updates;
pub mod epoch_processing_summary; pub mod epoch_processing_summary;
pub mod errors; pub mod errors;
pub mod historical_roots_update; pub mod historical_roots_update;
pub mod justification_and_finalization_state;
pub mod registry_updates; pub mod registry_updates;
pub mod resets; pub mod resets;
pub mod slashings; pub mod slashings;

View File

@ -33,7 +33,9 @@ pub fn process_epoch<T: EthSpec>(
let sync_committee = state.current_sync_committee()?.clone(); let sync_committee = state.current_sync_committee()?.clone();
// Justification and finalization. // Justification and finalization.
process_justification_and_finalization(state, &participation_cache)?; let justification_and_finalization_state =
process_justification_and_finalization(state, &participation_cache)?;
justification_and_finalization_state.apply_changes_to_state(state);
process_inactivity_updates(state, &participation_cache, spec)?; process_inactivity_updates(state, &participation_cache, spec)?;

View File

@ -1,17 +1,21 @@
use super::ParticipationCache; use super::ParticipationCache;
use crate::per_epoch_processing::weigh_justification_and_finalization;
use crate::per_epoch_processing::Error; use crate::per_epoch_processing::Error;
use crate::per_epoch_processing::{
weigh_justification_and_finalization, JustificationAndFinalizationState,
};
use safe_arith::SafeArith; use safe_arith::SafeArith;
use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; use types::consts::altair::TIMELY_TARGET_FLAG_INDEX;
use types::{BeaconState, EthSpec}; use types::{BeaconState, EthSpec};
/// Update the justified and finalized checkpoints for matching target attestations. /// Update the justified and finalized checkpoints for matching target attestations.
pub fn process_justification_and_finalization<T: EthSpec>( pub fn process_justification_and_finalization<T: EthSpec>(
state: &mut BeaconState<T>, state: &BeaconState<T>,
participation_cache: &ParticipationCache, participation_cache: &ParticipationCache,
) -> Result<(), Error> { ) -> Result<JustificationAndFinalizationState<T>, Error> {
let justification_and_finalization_state = JustificationAndFinalizationState::new(state);
if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { if state.current_epoch() <= T::genesis_epoch().safe_add(1)? {
return Ok(()); return Ok(justification_and_finalization_state);
} }
let previous_epoch = state.previous_epoch(); let previous_epoch = state.previous_epoch();
@ -24,7 +28,7 @@ pub fn process_justification_and_finalization<T: EthSpec>(
let previous_target_balance = previous_indices.total_balance()?; let previous_target_balance = previous_indices.total_balance()?;
let current_target_balance = current_indices.total_balance()?; let current_target_balance = current_indices.total_balance()?;
weigh_justification_and_finalization( weigh_justification_and_finalization(
state, justification_and_finalization_state,
total_active_balance, total_active_balance,
previous_target_balance, previous_target_balance,
current_target_balance, current_target_balance,

View File

@ -31,7 +31,9 @@ pub fn process_epoch<T: EthSpec>(
validator_statuses.process_attestations(state)?; validator_statuses.process_attestations(state)?;
// Justification and finalization. // Justification and finalization.
process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?; let justification_and_finalization_state =
process_justification_and_finalization(state, &validator_statuses.total_balances, spec)?;
justification_and_finalization_state.apply_changes_to_state(state);
// Rewards and Penalties. // Rewards and Penalties.
process_rewards_and_penalties(state, &mut validator_statuses, spec)?; process_rewards_and_penalties(state, &mut validator_statuses, spec)?;

View File

@ -1,21 +1,25 @@
use crate::per_epoch_processing::base::TotalBalances; use crate::per_epoch_processing::base::TotalBalances;
use crate::per_epoch_processing::weigh_justification_and_finalization;
use crate::per_epoch_processing::Error; use crate::per_epoch_processing::Error;
use crate::per_epoch_processing::{
weigh_justification_and_finalization, JustificationAndFinalizationState,
};
use safe_arith::SafeArith; use safe_arith::SafeArith;
use types::{BeaconState, ChainSpec, EthSpec}; use types::{BeaconState, ChainSpec, EthSpec};
/// Update the justified and finalized checkpoints for matching target attestations. /// Update the justified and finalized checkpoints for matching target attestations.
pub fn process_justification_and_finalization<T: EthSpec>( pub fn process_justification_and_finalization<T: EthSpec>(
state: &mut BeaconState<T>, state: &BeaconState<T>,
total_balances: &TotalBalances, total_balances: &TotalBalances,
_spec: &ChainSpec, _spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<JustificationAndFinalizationState<T>, Error> {
let justification_and_finalization_state = JustificationAndFinalizationState::new(state);
if state.current_epoch() <= T::genesis_epoch().safe_add(1)? { if state.current_epoch() <= T::genesis_epoch().safe_add(1)? {
return Ok(()); return Ok(justification_and_finalization_state);
} }
weigh_justification_and_finalization( weigh_justification_and_finalization(
state, justification_and_finalization_state,
total_balances.current_epoch(), total_balances.current_epoch(),
total_balances.previous_epoch_target_attesters(), total_balances.previous_epoch_target_attesters(),
total_balances.current_epoch_target_attesters(), total_balances.current_epoch_target_attesters(),

View File

@ -0,0 +1,115 @@
use types::{BeaconState, BeaconStateError, BitVector, Checkpoint, Epoch, EthSpec, Hash256};
/// This is a subset of the `BeaconState` which is used to compute justification and finality
/// without modifying the `BeaconState`.
///
/// A `JustificationAndFinalizationState` can be created from a `BeaconState` to compute
/// justification/finality changes and then applied to a `BeaconState` to enshrine those changes.
#[must_use = "this value must be applied to a state or explicitly dropped"]
pub struct JustificationAndFinalizationState<T: EthSpec> {
/*
* Immutable fields.
*/
previous_epoch: Epoch,
previous_epoch_target_root: Result<Hash256, BeaconStateError>,
current_epoch: Epoch,
current_epoch_target_root: Result<Hash256, BeaconStateError>,
/*
* Mutable fields.
*/
previous_justified_checkpoint: Checkpoint,
current_justified_checkpoint: Checkpoint,
finalized_checkpoint: Checkpoint,
justification_bits: BitVector<T::JustificationBitsLength>,
}
impl<T: EthSpec> JustificationAndFinalizationState<T> {
pub fn new(state: &BeaconState<T>) -> Self {
let previous_epoch = state.previous_epoch();
let current_epoch = state.current_epoch();
Self {
previous_epoch,
previous_epoch_target_root: state.get_block_root_at_epoch(previous_epoch).copied(),
current_epoch,
current_epoch_target_root: state.get_block_root_at_epoch(current_epoch).copied(),
previous_justified_checkpoint: state.previous_justified_checkpoint(),
current_justified_checkpoint: state.current_justified_checkpoint(),
finalized_checkpoint: state.finalized_checkpoint(),
justification_bits: state.justification_bits().clone(),
}
}
pub fn apply_changes_to_state(self, state: &mut BeaconState<T>) {
let Self {
/*
* Immutable fields do not need to be used.
*/
previous_epoch: _,
previous_epoch_target_root: _,
current_epoch: _,
current_epoch_target_root: _,
/*
* Mutable fields *must* be used.
*/
previous_justified_checkpoint,
current_justified_checkpoint,
finalized_checkpoint,
justification_bits,
} = self;
*state.previous_justified_checkpoint_mut() = previous_justified_checkpoint;
*state.current_justified_checkpoint_mut() = current_justified_checkpoint;
*state.finalized_checkpoint_mut() = finalized_checkpoint;
*state.justification_bits_mut() = justification_bits;
}
pub fn previous_epoch(&self) -> Epoch {
self.previous_epoch
}
pub fn current_epoch(&self) -> Epoch {
self.current_epoch
}
pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<Hash256, BeaconStateError> {
if epoch == self.previous_epoch {
self.previous_epoch_target_root.clone()
} else if epoch == self.current_epoch {
self.current_epoch_target_root.clone()
} else {
Err(BeaconStateError::SlotOutOfBounds)
}
}
pub fn previous_justified_checkpoint(&self) -> Checkpoint {
self.previous_justified_checkpoint
}
pub fn previous_justified_checkpoint_mut(&mut self) -> &mut Checkpoint {
&mut self.previous_justified_checkpoint
}
pub fn current_justified_checkpoint_mut(&mut self) -> &mut Checkpoint {
&mut self.current_justified_checkpoint
}
pub fn current_justified_checkpoint(&self) -> Checkpoint {
self.current_justified_checkpoint
}
pub fn finalized_checkpoint(&self) -> Checkpoint {
self.finalized_checkpoint
}
pub fn finalized_checkpoint_mut(&mut self) -> &mut Checkpoint {
&mut self.finalized_checkpoint
}
pub fn justification_bits(&self) -> &BitVector<T::JustificationBitsLength> {
&self.justification_bits
}
pub fn justification_bits_mut(&mut self) -> &mut BitVector<T::JustificationBitsLength> {
&mut self.justification_bits
}
}

View File

@ -1,16 +1,16 @@
use crate::per_epoch_processing::Error; use crate::per_epoch_processing::{Error, JustificationAndFinalizationState};
use safe_arith::SafeArith; use safe_arith::SafeArith;
use std::ops::Range; use std::ops::Range;
use types::{BeaconState, Checkpoint, EthSpec}; use types::{Checkpoint, EthSpec};
/// Update the justified and finalized checkpoints for matching target attestations. /// Update the justified and finalized checkpoints for matching target attestations.
#[allow(clippy::if_same_then_else)] // For readability and consistency with spec. #[allow(clippy::if_same_then_else)] // For readability and consistency with spec.
pub fn weigh_justification_and_finalization<T: EthSpec>( pub fn weigh_justification_and_finalization<T: EthSpec>(
state: &mut BeaconState<T>, mut state: JustificationAndFinalizationState<T>,
total_active_balance: u64, total_active_balance: u64,
previous_target_balance: u64, previous_target_balance: u64,
current_target_balance: u64, current_target_balance: u64,
) -> Result<(), Error> { ) -> Result<JustificationAndFinalizationState<T>, Error> {
let previous_epoch = state.previous_epoch(); let previous_epoch = state.previous_epoch();
let current_epoch = state.current_epoch(); let current_epoch = state.current_epoch();
@ -24,7 +24,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>(
if previous_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { if previous_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? {
*state.current_justified_checkpoint_mut() = Checkpoint { *state.current_justified_checkpoint_mut() = Checkpoint {
epoch: previous_epoch, epoch: previous_epoch,
root: *state.get_block_root_at_epoch(previous_epoch)?, root: state.get_block_root_at_epoch(previous_epoch)?,
}; };
state.justification_bits_mut().set(1, true)?; state.justification_bits_mut().set(1, true)?;
} }
@ -32,7 +32,7 @@ pub fn weigh_justification_and_finalization<T: EthSpec>(
if current_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? { if current_target_balance.safe_mul(3)? >= total_active_balance.safe_mul(2)? {
*state.current_justified_checkpoint_mut() = Checkpoint { *state.current_justified_checkpoint_mut() = Checkpoint {
epoch: current_epoch, epoch: current_epoch,
root: *state.get_block_root_at_epoch(current_epoch)?, root: state.get_block_root_at_epoch(current_epoch)?,
}; };
state.justification_bits_mut().set(0, true)?; state.justification_bits_mut().set(0, true)?;
} }
@ -66,5 +66,5 @@ pub fn weigh_justification_and_finalization<T: EthSpec>(
*state.finalized_checkpoint_mut() = old_current_justified_checkpoint; *state.finalized_checkpoint_mut() = old_current_justified_checkpoint;
} }
Ok(()) Ok(state)
} }

View File

@ -129,6 +129,7 @@ macro_rules! impl_test_random_for_u8_array {
}; };
} }
impl_test_random_for_u8_array!(3);
impl_test_random_for_u8_array!(4); impl_test_random_for_u8_array!(4);
impl_test_random_for_u8_array!(32); impl_test_random_for_u8_array!(32);
impl_test_random_for_u8_array!(48); impl_test_random_for_u8_array!(48);

View File

@ -37,3 +37,4 @@ web3 = { version = "0.18.0", default-features = false, features = ["http-tls", "
eth1_test_rig = { path = "../testing/eth1_test_rig" } eth1_test_rig = { path = "../testing/eth1_test_rig" }
sensitive_url = { path = "../common/sensitive_url" } sensitive_url = { path = "../common/sensitive_url" }
eth2 = { path = "../common/eth2" } eth2 = { path = "../common/eth2" }
snap = "1.0.1"

View File

@ -1,7 +1,9 @@
use clap::ArgMatches; use clap::ArgMatches;
use clap_utils::parse_required; use clap_utils::parse_required;
use serde::Serialize; use serde::Serialize;
use snap::raw::Decoder;
use ssz::Decode; use ssz::Decode;
use std::fs;
use std::fs::File; use std::fs::File;
use std::io::Read; use std::io::Read;
use std::str::FromStr; use std::str::FromStr;
@ -29,11 +31,18 @@ pub fn run_parse_ssz<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
let filename = matches.value_of("ssz-file").ok_or("No file supplied")?; let filename = matches.value_of("ssz-file").ok_or("No file supplied")?;
let format = parse_required(matches, "format")?; let format = parse_required(matches, "format")?;
let mut bytes = vec![]; let bytes = if filename.ends_with("ssz_snappy") {
let mut file = let bytes = fs::read(filename).unwrap();
File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?; let mut decoder = Decoder::new();
file.read_to_end(&mut bytes) decoder.decompress_vec(&bytes).unwrap()
.map_err(|e| format!("Unable to read {}: {}", filename, e))?; } else {
let mut bytes = vec![];
let mut file =
File::open(filename).map_err(|e| format!("Unable to open {}: {}", filename, e))?;
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {}: {}", filename, e))?;
bytes
};
info!("Using {} spec", T::spec_name()); info!("Using {} spec", T::spec_name());
info!("Type: {:?}", type_str); info!("Type: {:?}", type_str);

View File

@ -88,17 +88,23 @@ impl<E: EthSpec> EpochTransition<E> for JustificationAndFinalization {
BeaconState::Base(_) => { BeaconState::Base(_) => {
let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?;
validator_statuses.process_attestations(state)?; validator_statuses.process_attestations(state)?;
base::process_justification_and_finalization( let justification_and_finalization_state =
state, base::process_justification_and_finalization(
&validator_statuses.total_balances, state,
spec, &validator_statuses.total_balances,
) spec,
)?;
justification_and_finalization_state.apply_changes_to_state(state);
Ok(())
} }
BeaconState::Altair(_) | BeaconState::Merge(_) => { BeaconState::Altair(_) | BeaconState::Merge(_) => {
altair::process_justification_and_finalization( let justification_and_finalization_state =
state, altair::process_justification_and_finalization(
&altair::ParticipationCache::new(state, spec).unwrap(), state,
) &altair::ParticipationCache::new(state, spec).unwrap(),
)?;
justification_and_finalization_state.apply_changes_to_state(state);
Ok(())
} }
} }
} }

View File

@ -7,7 +7,7 @@ use beacon_chain::{
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation, obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
}, },
test_utils::{BeaconChainHarness, EphemeralHarnessType}, test_utils::{BeaconChainHarness, EphemeralHarnessType},
BeaconChainTypes, CachedHead, BeaconChainTypes, CachedHead, CountUnrealized,
}; };
use serde_derive::Deserialize; use serde_derive::Deserialize;
use ssz_derive::Decode; use ssz_derive::Decode;
@ -16,8 +16,8 @@ use std::future::Future;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use types::{ use types::{
Attestation, BeaconBlock, BeaconState, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, Attestation, AttesterSlashing, BeaconBlock, BeaconState, Checkpoint, EthSpec,
Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, Uint256,
}; };
#[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)]
@ -45,17 +45,20 @@ pub struct Checks {
justified_checkpoint_root: Option<Hash256>, justified_checkpoint_root: Option<Hash256>,
finalized_checkpoint: Option<Checkpoint>, finalized_checkpoint: Option<Checkpoint>,
best_justified_checkpoint: Option<Checkpoint>, best_justified_checkpoint: Option<Checkpoint>,
u_justified_checkpoint: Option<Checkpoint>,
u_finalized_checkpoint: Option<Checkpoint>,
proposer_boost_root: Option<Hash256>, proposer_boost_root: Option<Hash256>,
} }
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]
#[serde(untagged, deny_unknown_fields)] #[serde(untagged, deny_unknown_fields)]
pub enum Step<B, A, P> { pub enum Step<B, A, P, S> {
Tick { tick: u64 }, Tick { tick: u64 },
ValidBlock { block: B }, ValidBlock { block: B },
MaybeValidBlock { block: B, valid: bool }, MaybeValidBlock { block: B, valid: bool },
Attestation { attestation: A }, Attestation { attestation: A },
PowBlock { pow_block: P }, PowBlock { pow_block: P },
AttesterSlashing { attester_slashing: S },
Checks { checks: Box<Checks> }, Checks { checks: Box<Checks> },
} }
@ -71,16 +74,13 @@ pub struct ForkChoiceTest<E: EthSpec> {
pub description: String, pub description: String,
pub anchor_state: BeaconState<E>, pub anchor_state: BeaconState<E>,
pub anchor_block: BeaconBlock<E>, pub anchor_block: BeaconBlock<E>,
pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, PowBlock>>, #[allow(clippy::type_complexity)]
pub steps: Vec<Step<SignedBeaconBlock<E>, Attestation<E>, PowBlock, AttesterSlashing<E>>>,
} }
/// Spec for fork choice tests, with proposer boosting enabled. /// Spec to be used for fork choice tests.
///
/// This function can be deleted once `ChainSpec::mainnet` enables proposer boosting by default.
pub fn fork_choice_spec<E: EthSpec>(fork_name: ForkName) -> ChainSpec { pub fn fork_choice_spec<E: EthSpec>(fork_name: ForkName) -> ChainSpec {
let mut spec = testing_spec::<E>(fork_name); testing_spec::<E>(fork_name)
spec.proposer_score_boost = Some(70);
spec
} }
impl<E: EthSpec> LoadCase for ForkChoiceTest<E> { impl<E: EthSpec> LoadCase for ForkChoiceTest<E> {
@ -93,7 +93,8 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> {
.expect("path must be valid OsStr") .expect("path must be valid OsStr")
.to_string(); .to_string();
let spec = &fork_choice_spec::<E>(fork_name); let spec = &fork_choice_spec::<E>(fork_name);
let steps: Vec<Step<String, String, String>> = yaml_decode_file(&path.join("steps.yaml"))?; let steps: Vec<Step<String, String, String, String>> =
yaml_decode_file(&path.join("steps.yaml"))?;
// Resolve the object names in `steps.yaml` into actual decoded block/attestation objects. // Resolve the object names in `steps.yaml` into actual decoded block/attestation objects.
let steps = steps let steps = steps
.into_iter() .into_iter()
@ -119,6 +120,10 @@ impl<E: EthSpec> LoadCase for ForkChoiceTest<E> {
ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block))) ssz_decode_file(&path.join(format!("{}.ssz_snappy", pow_block)))
.map(|pow_block| Step::PowBlock { pow_block }) .map(|pow_block| Step::PowBlock { pow_block })
} }
Step::AttesterSlashing { attester_slashing } => {
ssz_decode_file(&path.join(format!("{}.ssz_snappy", attester_slashing)))
.map(|attester_slashing| Step::AttesterSlashing { attester_slashing })
}
Step::Checks { checks } => Ok(Step::Checks { checks }), Step::Checks { checks } => Ok(Step::Checks { checks }),
}) })
.collect::<Result<_, _>>()?; .collect::<Result<_, _>>()?;
@ -159,7 +164,10 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> {
// TODO(merge): re-enable this test before production. // TODO(merge): re-enable this test before production.
// This test is skipped until we can do retrospective confirmations of the terminal // This test is skipped until we can do retrospective confirmations of the terminal
// block after an optimistic sync. // block after an optimistic sync.
if self.description == "block_lookup_failed" { if self.description == "block_lookup_failed"
//TODO(sean): enable once we implement equivocation logic (https://github.com/sigp/lighthouse/issues/3241)
|| self.description == "discard_equivocations"
{
return Err(Error::SkippedKnownFailure); return Err(Error::SkippedKnownFailure);
}; };
@ -172,6 +180,10 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> {
} }
Step::Attestation { attestation } => tester.process_attestation(attestation)?, Step::Attestation { attestation } => tester.process_attestation(attestation)?,
Step::PowBlock { pow_block } => tester.process_pow_block(pow_block), Step::PowBlock { pow_block } => tester.process_pow_block(pow_block),
//TODO(sean): enable once we implement equivocation logic (https://github.com/sigp/lighthouse/issues/3241)
Step::AttesterSlashing {
attester_slashing: _,
} => (),
Step::Checks { checks } => { Step::Checks { checks } => {
let Checks { let Checks {
head, head,
@ -181,6 +193,8 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> {
justified_checkpoint_root, justified_checkpoint_root,
finalized_checkpoint, finalized_checkpoint,
best_justified_checkpoint, best_justified_checkpoint,
u_justified_checkpoint,
u_finalized_checkpoint,
proposer_boost_root, proposer_boost_root,
} = checks.as_ref(); } = checks.as_ref();
@ -214,6 +228,14 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> {
.check_best_justified_checkpoint(*expected_best_justified_checkpoint)?; .check_best_justified_checkpoint(*expected_best_justified_checkpoint)?;
} }
if let Some(expected_u_justified_checkpoint) = u_justified_checkpoint {
tester.check_u_justified_checkpoint(*expected_u_justified_checkpoint)?;
}
if let Some(expected_u_finalized_checkpoint) = u_finalized_checkpoint {
tester.check_u_finalized_checkpoint(*expected_u_finalized_checkpoint)?;
}
if let Some(expected_proposer_boost_root) = proposer_boost_root { if let Some(expected_proposer_boost_root) = proposer_boost_root {
tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?;
} }
@ -319,14 +341,18 @@ impl<E: EthSpec> Tester<E> {
.chain .chain
.canonical_head .canonical_head
.fork_choice_write_lock() .fork_choice_write_lock()
.update_time(slot) .update_time(slot, &self.spec)
.unwrap(); .unwrap();
} }
pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> { pub fn process_block(&self, block: SignedBeaconBlock<E>, valid: bool) -> Result<(), Error> {
let block_root = block.canonical_root(); let block_root = block.canonical_root();
let block = Arc::new(block); let block = Arc::new(block);
let result = self.block_on_dangerous(self.harness.chain.process_block(block.clone()))?; let result = self.block_on_dangerous(
self.harness
.chain
.process_block(block.clone(), CountUnrealized::True),
)?;
if result.is_ok() != valid { if result.is_ok() != valid {
return Err(Error::DidntFail(format!( return Err(Error::DidntFail(format!(
"block with root {} was valid={} whilst test expects valid={}. result: {:?}", "block with root {} was valid={} whilst test expects valid={}. result: {:?}",
@ -384,6 +410,7 @@ impl<E: EthSpec> Tester<E> {
&state, &state,
PayloadVerificationStatus::Irrelevant, PayloadVerificationStatus::Irrelevant,
&self.harness.chain.spec, &self.harness.chain.spec,
self.harness.chain.config.count_unrealized.into(),
); );
if result.is_ok() { if result.is_ok() {
@ -520,6 +547,40 @@ impl<E: EthSpec> Tester<E> {
) )
} }
pub fn check_u_justified_checkpoint(
&self,
expected_checkpoint: Checkpoint,
) -> Result<(), Error> {
let u_justified_checkpoint = self
.harness
.chain
.canonical_head
.fork_choice_read_lock()
.unrealized_justified_checkpoint();
check_equal(
"u_justified_checkpoint",
u_justified_checkpoint,
expected_checkpoint,
)
}
pub fn check_u_finalized_checkpoint(
&self,
expected_checkpoint: Checkpoint,
) -> Result<(), Error> {
let u_finalized_checkpoint = self
.harness
.chain
.canonical_head
.fork_choice_read_lock()
.unrealized_finalized_checkpoint();
check_equal(
"u_finalized_checkpoint",
u_finalized_checkpoint,
expected_checkpoint,
)
}
pub fn check_expected_proposer_boost_root( pub fn check_expected_proposer_boost_root(
&self, &self,
expected_proposer_boost_root: Hash256, expected_proposer_boost_root: Hash256,