[Merge] Optimistic Sync: Stage 1 (#2686)

* Add payload verification status to fork choice

* Pass payload verification status to import_block

* Add valid back-propagation

* Add head safety status latch to API

* Remove ExecutionLayerStatus

* Add execution info to client notifier

* Update notifier logs

* Change use of "hash" to refer to beacon block

* Shutdown on invalid finalized block

* Tidy, add comments

* Fix failing FC tests

* Allow blocks with unsafe head

* Fix forkchoiceUpdate call on startup
This commit is contained in:
Paul Hauner 2021-10-07 22:24:57 +11:00
parent aa1d57aa55
commit 6dde12f311
No known key found for this signature in database
GPG Key ID: 5E2CFF9B75FA63DF
17 changed files with 395 additions and 156 deletions

View File

@ -56,6 +56,7 @@ use itertools::process_results;
use itertools::Itertools; use itertools::Itertools;
use operation_pool::{OperationPool, PersistedOperationPool}; use operation_pool::{OperationPool, PersistedOperationPool};
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use proto_array::ExecutionStatus;
use safe_arith::SafeArith; use safe_arith::SafeArith;
use slasher::Slasher; use slasher::Slasher;
use slog::{crit, debug, error, info, trace, warn, Logger}; use slog::{crit, debug, error, info, trace, warn, Logger};
@ -195,6 +196,7 @@ pub struct HeadInfo {
pub genesis_validators_root: Hash256, pub genesis_validators_root: Hash256,
pub proposer_shuffling_decision_root: Hash256, pub proposer_shuffling_decision_root: Hash256,
pub is_merge_complete: bool, pub is_merge_complete: bool,
pub execution_payload_block_hash: Option<Hash256>,
} }
pub trait BeaconChainTypes: Send + Sync + 'static { pub trait BeaconChainTypes: Send + Sync + 'static {
@ -205,17 +207,23 @@ pub trait BeaconChainTypes: Send + Sync + 'static {
type EthSpec: types::EthSpec; type EthSpec: types::EthSpec;
} }
/// Indicates the status of the `ExecutionLayer`. /// Indicates the EL payload verification status of the head beacon block.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum ExecutionLayerStatus { pub enum HeadSafetyStatus {
/// The execution layer is synced and reachable. /// The head block has either been verified by an EL or is does not require EL verification
Ready, /// (e.g., it is pre-merge or pre-terminal-block).
/// The execution layer either syncing or unreachable. ///
NotReady, /// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with
/// The execution layer is required, but has not been enabled. This is a configuration error. /// the variant.
Missing, Safe(Option<Hash256>),
/// The execution layer is not yet required, therefore the status is irrelevant. /// The head block execution payload has not yet been verified by an EL.
NotRequired, ///
/// The `execution_payload.block_hash` of the head block is returned.
Unsafe(Hash256),
/// The head block execution payload was deemed to be invalid by an EL.
///
/// The `execution_payload.block_hash` of the head block is returned.
Invalid(Hash256),
} }
pub type BeaconForkChoice<T> = ForkChoice< pub type BeaconForkChoice<T> = ForkChoice<
@ -1016,6 +1024,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
genesis_validators_root: head.beacon_state.genesis_validators_root(), genesis_validators_root: head.beacon_state.genesis_validators_root(),
proposer_shuffling_decision_root, proposer_shuffling_decision_root,
is_merge_complete: is_merge_complete(&head.beacon_state), is_merge_complete: is_merge_complete(&head.beacon_state),
execution_payload_block_hash: head
.beacon_block
.message()
.body()
.execution_payload()
.map(|ep| ep.block_hash),
}) })
}) })
} }
@ -2308,6 +2322,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let current_slot = self.slot()?; let current_slot = self.slot()?;
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let mut ops = fully_verified_block.confirmation_db_batch; let mut ops = fully_verified_block.confirmation_db_batch;
let payload_verification_status = fully_verified_block.payload_verification_status;
let attestation_observation_timer = let attestation_observation_timer =
metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION); metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
@ -2427,7 +2442,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let _fork_choice_block_timer = let _fork_choice_block_timer =
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
fork_choice fork_choice
.on_block(current_slot, &block, block_root, &state) .on_block(
current_slot,
&block,
block_root,
&state,
payload_verification_status,
)
.map_err(|e| BlockError::BeaconChainError(e.into()))?; .map_err(|e| BlockError::BeaconChainError(e.into()))?;
} }
@ -3260,6 +3281,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch { if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch {
// Check to ensure that this finalized block hasn't been marked as invalid.
let finalized_block = self
.fork_choice
.read()
.get_block(&new_finalized_checkpoint.root)
.ok_or(BeaconChainError::FinalizedBlockMissingFromForkChoice(
new_finalized_checkpoint.root,
))?;
if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status {
crit!(
self.log,
"Finalized block has an invalid payload";
"msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \
You may be on a hostile network.",
"block_hash" => ?block_hash
);
let mut shutdown_sender = self.shutdown_sender();
shutdown_sender
.try_send(ShutdownReason::Failure(
"Finalized block has an invalid execution payload.",
))
.map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?;
}
// Due to race conditions, it's technically possible that the head we load here is // Due to race conditions, it's technically possible that the head we load here is
// different to the one earlier in this function. // different to the one earlier in this function.
// //
@ -3420,37 +3465,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map_err(Error::ExecutionForkChoiceUpdateFailed) .map_err(Error::ExecutionForkChoiceUpdateFailed)
} }
/// Indicates the status of the execution layer. /// Returns the status of the current head block, regarding the validity of the execution
pub async fn execution_layer_status(&self) -> Result<ExecutionLayerStatus, BeaconChainError> { /// payload.
let epoch = self.epoch()?; pub fn head_safety_status(&self) -> Result<HeadSafetyStatus, BeaconChainError> {
if self.spec.merge_fork_epoch.map_or(true, |fork| epoch < fork) { let head = self.head_info()?;
return Ok(ExecutionLayerStatus::NotRequired); let head_block = self
} .fork_choice
.read()
.get_block(&head.block_root)
.ok_or(BeaconChainError::HeadMissingFromForkChoice(head.block_root))?;
if let Some(execution_layer) = &self.execution_layer { let status = match head_block.execution_status {
if execution_layer.is_synced().await { ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)),
Ok(ExecutionLayerStatus::Ready) ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash),
} else { ExecutionStatus::Unknown(block_hash) => HeadSafetyStatus::Unsafe(block_hash),
Ok(ExecutionLayerStatus::NotReady) ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None),
} };
} else {
// This branch is slightly more restrictive than what is minimally required. Ok(status)
//
// It is possible for a node without an execution layer (EL) to follow the chain
// *after* the merge fork and *before* the terminal execution block, as long as
// that node is not required to produce blocks.
//
// However, here we say that all nodes *must* have an EL as soon as the merge fork
// happens. We do this because it's very difficult to determine that the terminal
// block has been met if we don't already have an EL. As far as we know, the
// terminal execution block might already exist and we've been rejecting it since
// we don't have an EL to verify it.
//
// I think it is very reasonable to say that the beacon chain expects all BNs to
// be paired with an EL node by the time the merge fork epoch is reached. So, we
// enforce that here.
Ok(ExecutionLayerStatus::Missing)
}
} }
/// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`. /// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`.

View File

@ -51,9 +51,9 @@ use crate::{
metrics, BeaconChain, BeaconChainError, BeaconChainTypes, metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
}; };
use execution_layer::ExecutePayloadResponse; use execution_layer::ExecutePayloadResponse;
use fork_choice::{ForkChoice, ForkChoiceStore}; use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus};
use parking_lot::RwLockReadGuard; use parking_lot::RwLockReadGuard;
use proto_array::Block as ProtoBlock; use proto_array::{Block as ProtoBlock, ExecutionStatus};
use safe_arith::ArithError; use safe_arith::ArithError;
use slog::{debug, error, info, Logger}; use slog::{debug, error, info, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -232,6 +232,16 @@ pub enum BlockError<T: EthSpec> {
/// ///
/// See `ExecutionPayloadError` for scoring information /// See `ExecutionPayloadError` for scoring information
ExecutionPayloadError(ExecutionPayloadError), ExecutionPayloadError(ExecutionPayloadError),
/// The block references an parent block which has an execution payload which was found to be
/// invalid.
///
/// ## Peer scoring
///
/// TODO(merge): reconsider how we score peers for this.
///
/// The peer sent us an invalid block, but I'm not really sure how to score this in an
/// "optimistic" sync world.
ParentExecutionPayloadInvalid { parent_root: Hash256 },
} }
/// Returned when block validation failed due to some issue verifying /// Returned when block validation failed due to some issue verifying
@ -529,6 +539,7 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> {
pub state: BeaconState<T::EthSpec>, pub state: BeaconState<T::EthSpec>,
pub parent_block: SignedBeaconBlock<T::EthSpec>, pub parent_block: SignedBeaconBlock<T::EthSpec>,
pub confirmation_db_batch: Vec<StoreOp<'a, T::EthSpec>>, pub confirmation_db_batch: Vec<StoreOp<'a, T::EthSpec>>,
pub payload_verification_status: PayloadVerificationStatus,
} }
/// Implemented on types that can be converted into a `FullyVerifiedBlock`. /// Implemented on types that can be converted into a `FullyVerifiedBlock`.
@ -1140,7 +1151,8 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
} }
// This is the soonest we can run these checks as they must be called AFTER per_slot_processing // This is the soonest we can run these checks as they must be called AFTER per_slot_processing
let execute_payload_handle = if is_execution_enabled(&state, block.message().body()) { let (execute_payload_handle, payload_verification_status) =
if is_execution_enabled(&state, block.message().body()) {
let execution_layer = chain let execution_layer = chain
.execution_layer .execution_layer
.as_ref() .as_ref()
@ -1160,31 +1172,20 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
match execute_payload_response { match execute_payload_response {
Ok((status, handle)) => match status { Ok((status, handle)) => match status {
ExecutePayloadResponse::Valid => handle, ExecutePayloadResponse::Valid => {
(handle, PayloadVerificationStatus::Verified)
}
ExecutePayloadResponse::Invalid => { ExecutePayloadResponse::Invalid => {
return Err(ExecutionPayloadError::RejectedByExecutionEngine.into()); return Err(ExecutionPayloadError::RejectedByExecutionEngine.into());
} }
ExecutePayloadResponse::Syncing => { ExecutePayloadResponse::Syncing => {
debug!( (handle, PayloadVerificationStatus::NotVerified)
chain.log,
"Optimistically accepting payload";
"msg" => "execution engine is syncing"
);
handle
} }
}, },
Err(e) => { Err(_) => (None, PayloadVerificationStatus::NotVerified),
error!(
chain.log,
"Optimistically accepting payload";
"error" => ?e,
"msg" => "execution engine returned an error"
);
None
}
} }
} else { } else {
None (None, PayloadVerificationStatus::Irrelevant)
}; };
// If the block is sufficiently recent, notify the validator monitor. // If the block is sufficiently recent, notify the validator monitor.
@ -1300,6 +1301,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
state, state,
parent_block: parent.beacon_block, parent_block: parent.beacon_block,
confirmation_db_batch, confirmation_db_batch,
payload_verification_status,
}) })
} }
} }
@ -1315,7 +1317,21 @@ fn validate_execution_payload<T: BeaconChainTypes>(
if let Some(execution_payload) = block.body().execution_payload() { if let Some(execution_payload) = block.body().execution_payload() {
// This logic should match `is_execution_enabled`. We use only the execution block hash of // This logic should match `is_execution_enabled`. We use only the execution block hash of
// the parent here in order to avoid loading the parent state during gossip verification. // the parent here in order to avoid loading the parent state during gossip verification.
let is_merge_complete = parent_block.execution_block_hash != Hash256::zero();
let is_merge_complete = match parent_block.execution_status {
// Optimistically declare that an "unknown" status block has completed the merge.
ExecutionStatus::Valid(_) | ExecutionStatus::Unknown(_) => true,
// It's impossible for an irrelevant block to have completed the merge. It is pre-merge
// by definition.
ExecutionStatus::Irrelevant(_) => false,
// If the parent has an invalid payload then it's impossible to build a valid block upon
// it. Reject the block.
ExecutionStatus::Invalid(_) => {
return Err(BlockError::ParentExecutionPayloadInvalid {
parent_root: parent_block.root,
})
}
};
let is_merge_block = let is_merge_block =
!is_merge_complete && *execution_payload != <ExecutionPayload<T::EthSpec>>::default(); !is_merge_complete && *execution_payload != <ExecutionPayload<T::EthSpec>>::default();
if !is_merge_block && !is_merge_complete { if !is_merge_block && !is_merge_complete {

View File

@ -136,6 +136,9 @@ pub enum BeaconChainError {
AltairForkDisabled, AltairForkDisabled,
ExecutionLayerMissing, ExecutionLayerMissing,
ExecutionForkChoiceUpdateFailed(execution_layer::Error), ExecutionForkChoiceUpdateFailed(execution_layer::Error),
HeadMissingFromForkChoice(Hash256),
FinalizedBlockMissingFromForkChoice(Hash256),
InvalidFinalizedPayloadShutdownError(TrySendError<ShutdownReason>),
} }
easy_from_to!(SlotProcessingError, BeaconChainError); easy_from_to!(SlotProcessingError, BeaconChainError);

View File

@ -1,5 +1,5 @@
use crate::{BeaconForkChoiceStore, BeaconSnapshot}; use crate::{BeaconForkChoiceStore, BeaconSnapshot};
use fork_choice::ForkChoice; use fork_choice::{ForkChoice, PayloadVerificationStatus};
use itertools::process_results; use itertools::process_results;
use slog::{info, warn, Logger}; use slog::{info, warn, Logger};
use state_processing::state_advance::complete_state_advance; use state_processing::state_advance::complete_state_advance;
@ -164,9 +164,21 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
) )
.map_err(|e| format!("Error replaying block: {:?}", e))?; .map_err(|e| format!("Error replaying block: {:?}", e))?;
// Setting this to unverified is the safest solution, since we don't have a way to
// retro-actively determine if they were valid or not.
//
// This scenario is so rare that it seems OK to double-verify some blocks.
let payload_verification_status = PayloadVerificationStatus::NotVerified;
let (block, _) = block.deconstruct(); let (block, _) = block.deconstruct();
fork_choice fork_choice
.on_block(block.slot(), &block, block.canonical_root(), &state) .on_block(
block.slot(),
&block,
block.canonical_root(),
&state,
payload_verification_status,
)
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?; .map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
} }

View File

@ -36,7 +36,7 @@ mod validator_pubkey_cache;
pub use self::beacon_chain::{ pub use self::beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
ExecutionLayerStatus, ForkChoiceError, HeadInfo, StateSkipConfig, WhenSlotSkipped, ForkChoiceError, HeadSafetyStatus, StateSkipConfig, WhenSlotSkipped, HeadInfo
MAXIMUM_GOSSIP_CLOCK_DISPARITY, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
}; };
pub use self::beacon_snapshot::BeaconSnapshot; pub use self::beacon_snapshot::BeaconSnapshot;

View File

@ -664,7 +664,7 @@ where
// Issue the head to the execution engine on startup. This ensures it can start // Issue the head to the execution engine on startup. This ensures it can start
// syncing. // syncing.
if head.is_merge_complete { if let Some(block_hash) = head.execution_payload_block_hash {
runtime_context.executor.spawn( runtime_context.executor.spawn(
async move { async move {
let result = BeaconChain::< let result = BeaconChain::<
@ -673,7 +673,7 @@ where
inner_execution_layer, inner_execution_layer,
store, store,
head.finalized_checkpoint.root, head.finalized_checkpoint.root,
head.block_root, block_hash,
) )
.await; .await;

View File

@ -1,8 +1,8 @@
use crate::metrics; use crate::metrics;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes, HeadSafetyStatus};
use lighthouse_network::{types::SyncState, NetworkGlobals}; use lighthouse_network::{types::SyncState, NetworkGlobals};
use parking_lot::Mutex; use parking_lot::Mutex;
use slog::{debug, error, info, warn, Logger}; use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@ -263,10 +263,43 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
} else { } else {
head_root.to_string() head_root.to_string()
}; };
let block_hash = match beacon_chain.head_safety_status() {
Ok(HeadSafetyStatus::Safe(hash_opt)) => hash_opt
.map(|hash| format!("{} (verified)", hash))
.unwrap_or_else(|| "n/a".to_string()),
Ok(HeadSafetyStatus::Unsafe(block_hash)) => {
warn!(
log,
"Head execution payload is unverified";
"execution_block_hash" => ?block_hash,
);
format!("{} (unverified)", block_hash)
}
Ok(HeadSafetyStatus::Invalid(block_hash)) => {
crit!(
log,
"Head execution payload is invalid";
"msg" => "this scenario may be unrecoverable",
"execution_block_hash" => ?block_hash,
);
format!("{} (invalid)", block_hash)
}
Err(e) => {
error!(
log,
"Failed to read head safety status";
"error" => ?e
);
"n/a".to_string()
}
};
info!( info!(
log, log,
"Synced"; "Synced";
"peers" => peer_count_pretty(connected_peer_count), "peers" => peer_count_pretty(connected_peer_count),
"exec_hash" => block_hash,
"finalized_root" => format!("{}", finalized_root), "finalized_root" => format!("{}", finalized_root),
"finalized_epoch" => finalized_epoch, "finalized_epoch" => finalized_epoch,
"epoch" => current_epoch, "epoch" => current_epoch,

View File

@ -20,7 +20,7 @@ use beacon_chain::{
observed_operations::ObservationOutcome, observed_operations::ObservationOutcome,
validator_monitor::{get_block_delay_ms, timestamp_now}, validator_monitor::{get_block_delay_ms, timestamp_now},
AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes,
ExecutionLayerStatus, WhenSlotSkipped, HeadSafetyStatus, WhenSlotSkipped,
}; };
use block_id::BlockId; use block_id::BlockId;
use eth2::types::{self as api_types, EndpointVersion, ValidatorId}; use eth2::types::{self as api_types, EndpointVersion, ValidatorId};
@ -385,24 +385,31 @@ pub fn serve<T: BeaconChainTypes>(
) )
.untuple_one(); .untuple_one();
// Create a `warp` filter that rejects requests unless the execution layer (EL) is ready. // Create a `warp` filter that rejects requests unless the head has been verified by the
let only_while_el_is_ready = warp::any() // execution layer.
let only_with_safe_head = warp::any()
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then(move |chain: Arc<BeaconChain<T>>| async move { .and_then(move |chain: Arc<BeaconChain<T>>| async move {
let status = chain.execution_layer_status().await.map_err(|e| { let status = chain.head_safety_status().map_err(|e| {
warp_utils::reject::custom_server_error(format!( warp_utils::reject::custom_server_error(format!(
"failed to read execution engine status: {:?}", "failed to read head safety status: {:?}",
e e
)) ))
})?; })?;
match status { match status {
ExecutionLayerStatus::Ready | ExecutionLayerStatus::NotRequired => Ok(()), HeadSafetyStatus::Safe(_) => Ok(()),
ExecutionLayerStatus::NotReady => Err(warp_utils::reject::custom_server_error( HeadSafetyStatus::Unsafe(hash) => {
"execution engine(s) not ready".to_string(), Err(warp_utils::reject::custom_server_error(format!(
)), "optimistic head hash {:?} has not been verified by the execution layer",
ExecutionLayerStatus::Missing => Err(warp_utils::reject::custom_server_error( hash
"no execution engines configured".to_string(), )))
)), }
HeadSafetyStatus::Invalid(hash) => {
Err(warp_utils::reject::custom_server_error(format!(
"the head block has an invalid payload {:?}, this may be unrecoverable",
hash
)))
}
} }
}) })
.untuple_one(); .untuple_one();
@ -1103,7 +1110,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::body::json()) .and(warp::body::json())
.and(network_tx_filter.clone()) .and(network_tx_filter.clone())
.and(log_filter.clone()) .and(log_filter.clone())
.and(only_while_el_is_ready.clone())
.and_then( .and_then(
|chain: Arc<BeaconChain<T>>, |chain: Arc<BeaconChain<T>>,
attestations: Vec<Attestation<T::EthSpec>>, attestations: Vec<Attestation<T::EthSpec>>,
@ -1401,7 +1407,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::body::json()) .and(warp::body::json())
.and(network_tx_filter.clone()) .and(network_tx_filter.clone())
.and(log_filter.clone()) .and(log_filter.clone())
.and(only_while_el_is_ready.clone())
.and_then( .and_then(
|chain: Arc<BeaconChain<T>>, |chain: Arc<BeaconChain<T>>,
signatures: Vec<SyncCommitteeMessage>, signatures: Vec<SyncCommitteeMessage>,
@ -1831,7 +1836,6 @@ pub fn serve<T: BeaconChainTypes>(
})) }))
.and(warp::path::end()) .and(warp::path::end())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and(log_filter.clone()) .and(log_filter.clone())
.and_then(|epoch: Epoch, chain: Arc<BeaconChain<T>>, log: Logger| { .and_then(|epoch: Epoch, chain: Arc<BeaconChain<T>>, log: Logger| {
@ -1849,7 +1853,6 @@ pub fn serve<T: BeaconChainTypes>(
})) }))
.and(warp::path::end()) .and(warp::path::end())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(warp::query::<api_types::ValidatorBlocksQuery>()) .and(warp::query::<api_types::ValidatorBlocksQuery>())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then( .and_then(
@ -1884,7 +1887,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end()) .and(warp::path::end())
.and(warp::query::<api_types::ValidatorAttestationDataQuery>()) .and(warp::query::<api_types::ValidatorAttestationDataQuery>())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone()) .and(only_with_safe_head.clone())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then( .and_then(
|query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| { |query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| {
@ -1917,7 +1920,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end()) .and(warp::path::end())
.and(warp::query::<api_types::ValidatorAggregateAttestationQuery>()) .and(warp::query::<api_types::ValidatorAggregateAttestationQuery>())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone()) .and(only_with_safe_head.clone())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then( .and_then(
|query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| { |query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| {
@ -1949,7 +1952,6 @@ pub fn serve<T: BeaconChainTypes>(
})) }))
.and(warp::path::end()) .and(warp::path::end())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(warp::body::json()) .and(warp::body::json())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then( .and_then(
@ -1972,7 +1974,6 @@ pub fn serve<T: BeaconChainTypes>(
})) }))
.and(warp::path::end()) .and(warp::path::end())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(warp::body::json()) .and(warp::body::json())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then( .and_then(
@ -1990,7 +1991,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end()) .and(warp::path::end())
.and(warp::query::<SyncContributionData>()) .and(warp::query::<SyncContributionData>())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone()) .and(only_with_safe_head)
.and(chain_filter.clone()) .and(chain_filter.clone())
.and_then( .and_then(
|sync_committee_data: SyncContributionData, chain: Arc<BeaconChain<T>>| { |sync_committee_data: SyncContributionData, chain: Arc<BeaconChain<T>>| {
@ -2013,7 +2014,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("aggregate_and_proofs")) .and(warp::path("aggregate_and_proofs"))
.and(warp::path::end()) .and(warp::path::end())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(chain_filter.clone()) .and(chain_filter.clone())
.and(warp::body::json()) .and(warp::body::json())
.and(network_tx_filter.clone()) .and(network_tx_filter.clone())
@ -2114,7 +2114,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("contribution_and_proofs")) .and(warp::path("contribution_and_proofs"))
.and(warp::path::end()) .and(warp::path::end())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready)
.and(chain_filter.clone()) .and(chain_filter.clone())
.and(warp::body::json()) .and(warp::body::json())
.and(network_tx_filter.clone()) .and(network_tx_filter.clone())

View File

@ -707,7 +707,7 @@ impl<T: BeaconChainTypes> Worker<T> {
self.log, self.log,
"New block received"; "New block received";
"slot" => verified_block.block.slot(), "slot" => verified_block.block.slot(),
"hash" => ?verified_block.block_root "root" => ?verified_block.block_root
); );
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
@ -770,8 +770,10 @@ impl<T: BeaconChainTypes> Worker<T> {
| Err(e @ BlockError::TooManySkippedSlots { .. }) | Err(e @ BlockError::TooManySkippedSlots { .. })
| Err(e @ BlockError::WeakSubjectivityConflict) | Err(e @ BlockError::WeakSubjectivityConflict)
| Err(e @ BlockError::InconsistentFork(_)) | Err(e @ BlockError::InconsistentFork(_))
// TODO: is this what we should be doing when block verification fails? // TODO(merge): reconsider peer scoring for this event.
| Err(e @ BlockError::ExecutionPayloadError(_)) | Err(e @ BlockError::ExecutionPayloadError(_))
// TODO(merge): reconsider peer scoring for this event.
| Err(e @ BlockError::ParentExecutionPayloadInvalid { .. })
| Err(e @ BlockError::GenesisBlock) => { | Err(e @ BlockError::GenesisBlock) => {
warn!(self.log, "Could not verify block for gossip, rejecting the block"; warn!(self.log, "Could not verify block for gossip, rejecting the block";
"error" => %e); "error" => %e);

View File

@ -1,6 +1,6 @@
use std::marker::PhantomData; use std::marker::PhantomData;
use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice}; use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use types::{ use types::{
AttestationShufflingId, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, AttestationShufflingId, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec,
@ -38,6 +38,11 @@ pub enum Error<T> {
block_slot: Slot, block_slot: Slot,
state_slot: Slot, state_slot: Slot,
}, },
InvalidPayloadStatus {
block_slot: Slot,
block_root: Hash256,
payload_verification_status: PayloadVerificationStatus,
},
} }
impl<T> From<InvalidAttestation> for Error<T> { impl<T> From<InvalidAttestation> for Error<T> {
@ -101,6 +106,19 @@ impl<T> From<String> for Error<T> {
} }
} }
/// Indicates if a block has been verified by an execution payload.
///
/// There is no variant for "invalid", since such a block should never be added to fork choice.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PayloadVerificationStatus {
/// An EL has declared the execution payload to be valid.
Verified,
/// An EL has not yet made a determination about the execution payload.
NotVerified,
/// The block is either pre-merge-fork, or prior to the terminal PoW block.
Irrelevant,
}
/// Calculate how far `slot` lies from the start of its epoch. /// Calculate how far `slot` lies from the start of its epoch.
/// ///
/// ## Specification /// ## Specification
@ -262,9 +280,13 @@ where
.map_err(Error::BeaconStateError)?; .map_err(Error::BeaconStateError)?;
// Default any non-merge execution block hashes to 0x000..000. // Default any non-merge execution block hashes to 0x000..000.
let execution_block_hash = anchor_block.message_merge().map_or_else( let execution_status = anchor_block.message_merge().map_or_else(
|()| Hash256::zero(), |()| ExecutionStatus::irrelevant(),
|message| message.body.execution_payload.block_hash, |message| {
// Assume that this payload is valid, since the anchor should be a trusted block and
// state.
ExecutionStatus::Valid(message.body.execution_payload.block_hash)
},
); );
let proto_array = ProtoArrayForkChoice::new( let proto_array = ProtoArrayForkChoice::new(
@ -275,7 +297,7 @@ where
fc_store.finalized_checkpoint().root, fc_store.finalized_checkpoint().root,
current_epoch_shuffling_id, current_epoch_shuffling_id,
next_epoch_shuffling_id, next_epoch_shuffling_id,
execution_block_hash, execution_status,
)?; )?;
Ok(Self { Ok(Self {
@ -446,6 +468,7 @@ where
block: &BeaconBlock<E>, block: &BeaconBlock<E>,
block_root: Hash256, block_root: Hash256,
state: &BeaconState<E>, state: &BeaconState<E>,
payload_verification_status: PayloadVerificationStatus,
) -> Result<(), Error<T::Error>> { ) -> Result<(), Error<T::Error>> {
let current_slot = self.update_time(current_slot)?; let current_slot = self.update_time(current_slot)?;
@ -552,11 +575,32 @@ where
.on_verified_block(block, block_root, state) .on_verified_block(block, block_root, state)
.map_err(Error::AfterBlockFailed)?; .map_err(Error::AfterBlockFailed)?;
// Default any non-merge execution block hashes to 0x000..000. let execution_status = if let Some(execution_payload) = block.body().execution_payload() {
let execution_block_hash = block.body_merge().map_or_else( let block_hash = execution_payload.block_hash;
|()| Hash256::zero(),
|body| body.execution_payload.block_hash, if block_hash == Hash256::zero() {
); // The block is post-merge-fork, but pre-terminal-PoW block. We don't need to verify
// the payload.
ExecutionStatus::irrelevant()
} else {
match payload_verification_status {
PayloadVerificationStatus::Verified => ExecutionStatus::Valid(block_hash),
PayloadVerificationStatus::NotVerified => ExecutionStatus::Unknown(block_hash),
// It would be a logic error to declare a block irrelevant if it has an
// execution payload with a non-zero block hash.
PayloadVerificationStatus::Irrelevant => {
return Err(Error::InvalidPayloadStatus {
block_slot: block.slot(),
block_root,
payload_verification_status,
})
}
}
}
} else {
// There is no payload to verify.
ExecutionStatus::irrelevant()
};
// This does not apply a vote to the block, it just makes fork choice aware of the block so // This does not apply a vote to the block, it just makes fork choice aware of the block so
// it can still be identified as the head even if it doesn't have any votes. // it can still be identified as the head even if it doesn't have any votes.
@ -580,7 +624,7 @@ where
state_root: block.state_root(), state_root: block.state_root(),
justified_epoch: state.current_justified_checkpoint().epoch, justified_epoch: state.current_justified_checkpoint().epoch,
finalized_epoch: state.finalized_checkpoint().epoch, finalized_epoch: state.finalized_checkpoint().epoch,
execution_block_hash, execution_status,
})?; })?;
Ok(()) Ok(())

View File

@ -2,7 +2,8 @@ mod fork_choice;
mod fork_choice_store; mod fork_choice_store;
pub use crate::fork_choice::{ pub use crate::fork_choice::{
Error, ForkChoice, InvalidAttestation, InvalidBlock, PersistedForkChoice, QueuedAttestation, Error, ForkChoice, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
PersistedForkChoice, QueuedAttestation, SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
}; };
pub use fork_choice_store::ForkChoiceStore; pub use fork_choice_store::ForkChoiceStore;
pub use proto_array::Block as ProtoBlock; pub use proto_array::Block as ProtoBlock;

View File

@ -10,7 +10,10 @@ use beacon_chain::{
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError,
StateSkipConfig, WhenSlotSkipped, StateSkipConfig, WhenSlotSkipped,
}; };
use fork_choice::{ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation}; use fork_choice::{
ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
QueuedAttestation, SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
};
use store::MemoryStore; use store::MemoryStore;
use types::{ use types::{
test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState, test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState,
@ -268,7 +271,13 @@ impl ForkChoiceTest {
.chain .chain
.fork_choice .fork_choice
.write() .write()
.on_block(current_slot, &block, block.canonical_root(), &state) .on_block(
current_slot,
&block,
block.canonical_root(),
&state,
PayloadVerificationStatus::Verified,
)
.unwrap(); .unwrap();
self self
} }
@ -303,7 +312,13 @@ impl ForkChoiceTest {
.chain .chain
.fork_choice .fork_choice
.write() .write()
.on_block(current_slot, &block, block.canonical_root(), &state) .on_block(
current_slot,
&block,
block.canonical_root(),
&state,
PayloadVerificationStatus::Verified,
)
.err() .err()
.expect("on_block did not return an error"); .expect("on_block did not return an error");
comparison_func(err); comparison_func(err);

View File

@ -30,4 +30,8 @@ pub enum Error {
head_justified_epoch: Epoch, head_justified_epoch: Epoch,
head_finalized_epoch: Epoch, head_finalized_epoch: Epoch,
}, },
InvalidAncestorOfValidPayload {
ancestor_block_root: Hash256,
ancestor_payload_block_hash: Hash256,
},
} }

View File

@ -2,7 +2,7 @@ mod ffg_updates;
mod no_votes; mod no_votes;
mod votes; mod votes;
use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use types::{AttestationShufflingId, Epoch, Hash256, Slot}; use types::{AttestationShufflingId, Epoch, Hash256, Slot};
@ -57,7 +57,7 @@ impl ForkChoiceTestDefinition {
pub fn run(self) { pub fn run(self) {
let junk_shuffling_id = let junk_shuffling_id =
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
let execution_block_hash = Hash256::zero(); let execution_status = ExecutionStatus::irrelevant();
let mut fork_choice = ProtoArrayForkChoice::new( let mut fork_choice = ProtoArrayForkChoice::new(
self.finalized_block_slot, self.finalized_block_slot,
Hash256::zero(), Hash256::zero(),
@ -66,7 +66,7 @@ impl ForkChoiceTestDefinition {
self.finalized_root, self.finalized_root,
junk_shuffling_id.clone(), junk_shuffling_id.clone(),
junk_shuffling_id, junk_shuffling_id,
execution_block_hash, execution_status,
) )
.expect("should create fork choice struct"); .expect("should create fork choice struct");
@ -141,7 +141,7 @@ impl ForkChoiceTestDefinition {
), ),
justified_epoch, justified_epoch,
finalized_epoch, finalized_epoch,
execution_block_hash, execution_status,
}; };
fork_choice.process_block(block).unwrap_or_else(|e| { fork_choice.process_block(block).unwrap_or_else(|e| {
panic!( panic!(

View File

@ -4,7 +4,7 @@ mod proto_array;
mod proto_array_fork_choice; mod proto_array_fork_choice;
mod ssz_container; mod ssz_container;
pub use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice}; pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
pub use error::Error; pub use error::Error;
pub mod core { pub mod core {

View File

@ -1,4 +1,4 @@
use crate::{error::Error, Block}; use crate::{error::Error, Block, ExecutionStatus};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz::four_byte_option_impl; use ssz::four_byte_option_impl;
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
@ -35,11 +35,9 @@ pub struct ProtoNode {
best_child: Option<usize>, best_child: Option<usize>,
#[ssz(with = "four_byte_option_usize")] #[ssz(with = "four_byte_option_usize")]
best_descendant: Option<usize>, best_descendant: Option<usize>,
/// It's necessary to track this so that we can refuse to propagate post-merge blocks without /// Indicates if an execution node has marked this block as valid. Also contains the execution
/// execution payloads, without confusing these with pre-merge blocks. /// block hash.
/// pub execution_status: ExecutionStatus,
/// Relevant spec issue: https://github.com/ethereum/consensus-specs/issues/2618
pub execution_block_hash: Hash256,
} }
/// Only used for SSZ deserialization of the persisted fork choice during the database migration /// Only used for SSZ deserialization of the persisted fork choice during the database migration
@ -78,7 +76,11 @@ impl Into<ProtoNode> for LegacyProtoNode {
weight: self.weight, weight: self.weight,
best_child: self.best_child, best_child: self.best_child,
best_descendant: self.best_descendant, best_descendant: self.best_descendant,
execution_block_hash: Hash256::zero(), // We set the following execution value as if the block is a pre-merge-fork block. This
// is safe as long as we never import a merge block with the old version of proto-array.
// This will be safe since we can't actually process merge blocks until we've made this
// change to fork choice.
execution_status: ExecutionStatus::irrelevant(),
} }
} }
} }
@ -224,7 +226,7 @@ impl ProtoArray {
weight: 0, weight: 0,
best_child: None, best_child: None,
best_descendant: None, best_descendant: None,
execution_block_hash: block.execution_block_hash, execution_status: block.execution_status,
}; };
self.indices.insert(node.root, node_index); self.indices.insert(node.root, node_index);
@ -232,11 +234,58 @@ impl ProtoArray {
if let Some(parent_index) = node.parent { if let Some(parent_index) = node.parent {
self.maybe_update_best_child_and_descendant(parent_index, node_index)?; self.maybe_update_best_child_and_descendant(parent_index, node_index)?;
if matches!(block.execution_status, ExecutionStatus::Valid(_)) {
self.propagate_execution_payload_verification(parent_index)?;
}
} }
Ok(()) Ok(())
} }
pub fn propagate_execution_payload_verification(
&mut self,
verified_node_index: usize,
) -> Result<(), Error> {
let mut index = verified_node_index;
loop {
let node = self
.nodes
.get_mut(index)
.ok_or(Error::InvalidNodeIndex(index))?;
let parent_index = match node.execution_status {
// We have reached a node that we already know is valid. No need to iterate further
// since we assume an ancestors have already been set to valid.
ExecutionStatus::Valid(_) => return Ok(()),
// We have reached an irrelevant node, this node is prior to a terminal execution
// block. There's no need to iterate further, it's impossible for this block to have
// any relevant ancestors.
ExecutionStatus::Irrelevant(_) => return Ok(()),
// The block has an unknown status, set it to valid since any ancestor of a valid
// payload can be considered valid.
ExecutionStatus::Unknown(payload_block_hash) => {
node.execution_status = ExecutionStatus::Valid(payload_block_hash);
if let Some(parent_index) = node.parent {
parent_index
} else {
// We have reached the root block, iteration complete.
return Ok(());
}
}
// An ancestor of the valid payload was invalid. This is a serious error which
// indicates a consensus failure in the execution node. This is unrecoverable.
ExecutionStatus::Invalid(ancestor_payload_block_hash) => {
return Err(Error::InvalidAncestorOfValidPayload {
ancestor_block_root: node.root,
ancestor_payload_block_hash,
})
}
};
index = parent_index;
}
}
/// Follows the best-descendant links to find the best-block (i.e., head-block). /// Follows the best-descendant links to find the best-block (i.e., head-block).
/// ///
/// ## Notes /// ## Notes

View File

@ -1,6 +1,7 @@
use crate::error::Error; use crate::error::Error;
use crate::proto_array::ProtoArray; use crate::proto_array::ProtoArray;
use crate::ssz_container::{LegacySszContainer, SszContainer}; use crate::ssz_container::{LegacySszContainer, SszContainer};
use serde_derive::{Deserialize, Serialize};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use std::collections::HashMap; use std::collections::HashMap;
@ -15,6 +16,32 @@ pub struct VoteTracker {
next_epoch: Epoch, next_epoch: Epoch,
} }
/// Represents the verification status of an execution payload.
#[derive(Clone, Copy, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)]
#[ssz(enum_behaviour = "union")]
pub enum ExecutionStatus {
/// An EL has determined that the payload is valid.
Valid(Hash256),
/// An EL has determined that the payload is invalid.
Invalid(Hash256),
/// An EL has not yet verified the execution payload.
Unknown(Hash256),
/// The block is either prior to the merge fork, or after the merge fork but before the terminal
/// PoW block has been found.
///
/// # Note:
///
/// This `bool` only exists to satisfy our SSZ implementation which requires all variants
/// to have a value. It can be set to anything.
Irrelevant(bool), // TODO(merge): fix bool.
}
impl ExecutionStatus {
pub fn irrelevant() -> Self {
ExecutionStatus::Irrelevant(false)
}
}
/// A block that is to be applied to the fork choice. /// A block that is to be applied to the fork choice.
/// ///
/// A simplified version of `types::BeaconBlock`. /// A simplified version of `types::BeaconBlock`.
@ -29,7 +56,9 @@ pub struct Block {
pub next_epoch_shuffling_id: AttestationShufflingId, pub next_epoch_shuffling_id: AttestationShufflingId,
pub justified_epoch: Epoch, pub justified_epoch: Epoch,
pub finalized_epoch: Epoch, pub finalized_epoch: Epoch,
pub execution_block_hash: Hash256, /// Indicates if an execution node has marked this block as valid. Also contains the execution
/// block hash.
pub execution_status: ExecutionStatus,
} }
/// A Vec-wrapper which will grow to match any request. /// A Vec-wrapper which will grow to match any request.
@ -76,7 +105,7 @@ impl ProtoArrayForkChoice {
finalized_root: Hash256, finalized_root: Hash256,
current_epoch_shuffling_id: AttestationShufflingId, current_epoch_shuffling_id: AttestationShufflingId,
next_epoch_shuffling_id: AttestationShufflingId, next_epoch_shuffling_id: AttestationShufflingId,
execution_block_hash: Hash256, execution_status: ExecutionStatus,
) -> Result<Self, String> { ) -> Result<Self, String> {
let mut proto_array = ProtoArray { let mut proto_array = ProtoArray {
prune_threshold: DEFAULT_PRUNE_THRESHOLD, prune_threshold: DEFAULT_PRUNE_THRESHOLD,
@ -98,7 +127,7 @@ impl ProtoArrayForkChoice {
next_epoch_shuffling_id, next_epoch_shuffling_id,
justified_epoch, justified_epoch,
finalized_epoch, finalized_epoch,
execution_block_hash, execution_status,
}; };
proto_array proto_array
@ -208,7 +237,7 @@ impl ProtoArrayForkChoice {
next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(), next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(),
justified_epoch: block.justified_epoch, justified_epoch: block.justified_epoch,
finalized_epoch: block.finalized_epoch, finalized_epoch: block.finalized_epoch,
execution_block_hash: block.execution_block_hash, execution_status: block.execution_status,
}) })
} }
@ -372,7 +401,7 @@ mod test_compute_deltas {
let unknown = Hash256::from_low_u64_be(4); let unknown = Hash256::from_low_u64_be(4);
let junk_shuffling_id = let junk_shuffling_id =
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
let execution_block_hash = Hash256::zero(); let execution_status = ExecutionStatus::irrelevant();
let mut fc = ProtoArrayForkChoice::new( let mut fc = ProtoArrayForkChoice::new(
genesis_slot, genesis_slot,
@ -382,7 +411,7 @@ mod test_compute_deltas {
finalized_root, finalized_root,
junk_shuffling_id.clone(), junk_shuffling_id.clone(),
junk_shuffling_id.clone(), junk_shuffling_id.clone(),
execution_block_hash, execution_status,
) )
.unwrap(); .unwrap();
@ -398,7 +427,7 @@ mod test_compute_deltas {
next_epoch_shuffling_id: junk_shuffling_id.clone(), next_epoch_shuffling_id: junk_shuffling_id.clone(),
justified_epoch: genesis_epoch, justified_epoch: genesis_epoch,
finalized_epoch: genesis_epoch, finalized_epoch: genesis_epoch,
execution_block_hash, execution_status,
}) })
.unwrap(); .unwrap();
@ -414,7 +443,7 @@ mod test_compute_deltas {
next_epoch_shuffling_id: junk_shuffling_id, next_epoch_shuffling_id: junk_shuffling_id,
justified_epoch: genesis_epoch, justified_epoch: genesis_epoch,
finalized_epoch: genesis_epoch, finalized_epoch: genesis_epoch,
execution_block_hash, execution_status,
}) })
.unwrap(); .unwrap();