[Merge] Optimistic Sync: Stage 1 (#2686)

* Add payload verification status to fork choice

* Pass payload verification status to import_block

* Add valid back-propagation

* Add head safety status latch to API

* Remove ExecutionLayerStatus

* Add execution info to client notifier

* Update notifier logs

* Change use of "hash" to refer to beacon block

* Shutdown on invalid finalized block

* Tidy, add comments

* Fix failing FC tests

* Allow blocks with unsafe head

* Fix forkchoiceUpdate call on startup
This commit is contained in:
Paul Hauner 2021-10-07 22:24:57 +11:00
parent aa1d57aa55
commit 6dde12f311
No known key found for this signature in database
GPG Key ID: 5E2CFF9B75FA63DF
17 changed files with 395 additions and 156 deletions

View File

@ -56,6 +56,7 @@ use itertools::process_results;
use itertools::Itertools;
use operation_pool::{OperationPool, PersistedOperationPool};
use parking_lot::{Mutex, RwLock};
use proto_array::ExecutionStatus;
use safe_arith::SafeArith;
use slasher::Slasher;
use slog::{crit, debug, error, info, trace, warn, Logger};
@ -195,6 +196,7 @@ pub struct HeadInfo {
pub genesis_validators_root: Hash256,
pub proposer_shuffling_decision_root: Hash256,
pub is_merge_complete: bool,
pub execution_payload_block_hash: Option<Hash256>,
}
pub trait BeaconChainTypes: Send + Sync + 'static {
@ -205,17 +207,23 @@ pub trait BeaconChainTypes: Send + Sync + 'static {
type EthSpec: types::EthSpec;
}
/// Indicates the status of the `ExecutionLayer`.
/// Indicates the EL payload verification status of the head beacon block.
#[derive(Debug, PartialEq)]
pub enum ExecutionLayerStatus {
/// The execution layer is synced and reachable.
Ready,
/// The execution layer either syncing or unreachable.
NotReady,
/// The execution layer is required, but has not been enabled. This is a configuration error.
Missing,
/// The execution layer is not yet required, therefore the status is irrelevant.
NotRequired,
pub enum HeadSafetyStatus {
/// The head block has either been verified by an EL or is does not require EL verification
/// (e.g., it is pre-merge or pre-terminal-block).
///
/// If the block is post-terminal-block, `Some(execution_payload.block_hash)` is included with
/// the variant.
Safe(Option<Hash256>),
/// The head block execution payload has not yet been verified by an EL.
///
/// The `execution_payload.block_hash` of the head block is returned.
Unsafe(Hash256),
/// The head block execution payload was deemed to be invalid by an EL.
///
/// The `execution_payload.block_hash` of the head block is returned.
Invalid(Hash256),
}
pub type BeaconForkChoice<T> = ForkChoice<
@ -1016,6 +1024,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
genesis_validators_root: head.beacon_state.genesis_validators_root(),
proposer_shuffling_decision_root,
is_merge_complete: is_merge_complete(&head.beacon_state),
execution_payload_block_hash: head
.beacon_block
.message()
.body()
.execution_payload()
.map(|ep| ep.block_hash),
})
})
}
@ -2308,6 +2322,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let current_slot = self.slot()?;
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let mut ops = fully_verified_block.confirmation_db_batch;
let payload_verification_status = fully_verified_block.payload_verification_status;
let attestation_observation_timer =
metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
@ -2427,7 +2442,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let _fork_choice_block_timer =
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
fork_choice
.on_block(current_slot, &block, block_root, &state)
.on_block(
current_slot,
&block,
block_root,
&state,
payload_verification_status,
)
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
}
@ -3260,6 +3281,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
if new_finalized_checkpoint.epoch != old_finalized_checkpoint.epoch {
// Check to ensure that this finalized block hasn't been marked as invalid.
let finalized_block = self
.fork_choice
.read()
.get_block(&new_finalized_checkpoint.root)
.ok_or(BeaconChainError::FinalizedBlockMissingFromForkChoice(
new_finalized_checkpoint.root,
))?;
if let ExecutionStatus::Invalid(block_hash) = finalized_block.execution_status {
crit!(
self.log,
"Finalized block has an invalid payload";
"msg" => "You must use the `--purge-db` flag to clear the database and restart sync. \
You may be on a hostile network.",
"block_hash" => ?block_hash
);
let mut shutdown_sender = self.shutdown_sender();
shutdown_sender
.try_send(ShutdownReason::Failure(
"Finalized block has an invalid execution payload.",
))
.map_err(BeaconChainError::InvalidFinalizedPayloadShutdownError)?;
}
// Due to race conditions, it's technically possible that the head we load here is
// different to the one earlier in this function.
//
@ -3420,37 +3465,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map_err(Error::ExecutionForkChoiceUpdateFailed)
}
/// Indicates the status of the execution layer.
pub async fn execution_layer_status(&self) -> Result<ExecutionLayerStatus, BeaconChainError> {
let epoch = self.epoch()?;
if self.spec.merge_fork_epoch.map_or(true, |fork| epoch < fork) {
return Ok(ExecutionLayerStatus::NotRequired);
}
/// Returns the status of the current head block, regarding the validity of the execution
/// payload.
pub fn head_safety_status(&self) -> Result<HeadSafetyStatus, BeaconChainError> {
let head = self.head_info()?;
let head_block = self
.fork_choice
.read()
.get_block(&head.block_root)
.ok_or(BeaconChainError::HeadMissingFromForkChoice(head.block_root))?;
if let Some(execution_layer) = &self.execution_layer {
if execution_layer.is_synced().await {
Ok(ExecutionLayerStatus::Ready)
} else {
Ok(ExecutionLayerStatus::NotReady)
}
} else {
// This branch is slightly more restrictive than what is minimally required.
//
// It is possible for a node without an execution layer (EL) to follow the chain
// *after* the merge fork and *before* the terminal execution block, as long as
// that node is not required to produce blocks.
//
// However, here we say that all nodes *must* have an EL as soon as the merge fork
// happens. We do this because it's very difficult to determine that the terminal
// block has been met if we don't already have an EL. As far as we know, the
// terminal execution block might already exist and we've been rejecting it since
// we don't have an EL to verify it.
//
// I think it is very reasonable to say that the beacon chain expects all BNs to
// be paired with an EL node by the time the merge fork epoch is reached. So, we
// enforce that here.
Ok(ExecutionLayerStatus::Missing)
}
let status = match head_block.execution_status {
ExecutionStatus::Valid(block_hash) => HeadSafetyStatus::Safe(Some(block_hash)),
ExecutionStatus::Invalid(block_hash) => HeadSafetyStatus::Invalid(block_hash),
ExecutionStatus::Unknown(block_hash) => HeadSafetyStatus::Unsafe(block_hash),
ExecutionStatus::Irrelevant(_) => HeadSafetyStatus::Safe(None),
};
Ok(status)
}
/// This function takes a configured weak subjectivity `Checkpoint` and the latest finalized `Checkpoint`.

View File

@ -51,9 +51,9 @@ use crate::{
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use execution_layer::ExecutePayloadResponse;
use fork_choice::{ForkChoice, ForkChoiceStore};
use fork_choice::{ForkChoice, ForkChoiceStore, PayloadVerificationStatus};
use parking_lot::RwLockReadGuard;
use proto_array::Block as ProtoBlock;
use proto_array::{Block as ProtoBlock, ExecutionStatus};
use safe_arith::ArithError;
use slog::{debug, error, info, Logger};
use slot_clock::SlotClock;
@ -232,6 +232,16 @@ pub enum BlockError<T: EthSpec> {
///
/// See `ExecutionPayloadError` for scoring information
ExecutionPayloadError(ExecutionPayloadError),
/// The block references an parent block which has an execution payload which was found to be
/// invalid.
///
/// ## Peer scoring
///
/// TODO(merge): reconsider how we score peers for this.
///
/// The peer sent us an invalid block, but I'm not really sure how to score this in an
/// "optimistic" sync world.
ParentExecutionPayloadInvalid { parent_root: Hash256 },
}
/// Returned when block validation failed due to some issue verifying
@ -529,6 +539,7 @@ pub struct FullyVerifiedBlock<'a, T: BeaconChainTypes> {
pub state: BeaconState<T::EthSpec>,
pub parent_block: SignedBeaconBlock<T::EthSpec>,
pub confirmation_db_batch: Vec<StoreOp<'a, T::EthSpec>>,
pub payload_verification_status: PayloadVerificationStatus,
}
/// Implemented on types that can be converted into a `FullyVerifiedBlock`.
@ -1140,52 +1151,42 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
}
// This is the soonest we can run these checks as they must be called AFTER per_slot_processing
let execute_payload_handle = if is_execution_enabled(&state, block.message().body()) {
let execution_layer = chain
.execution_layer
.as_ref()
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
let execution_payload =
block
.message()
.body()
.execution_payload()
.ok_or_else(|| InconsistentFork {
fork_at_slot: eth2::types::ForkName::Merge,
object_fork: block.message().body().fork_name(),
})?;
let (execute_payload_handle, payload_verification_status) =
if is_execution_enabled(&state, block.message().body()) {
let execution_layer = chain
.execution_layer
.as_ref()
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
let execution_payload =
block
.message()
.body()
.execution_payload()
.ok_or_else(|| InconsistentFork {
fork_at_slot: eth2::types::ForkName::Merge,
object_fork: block.message().body().fork_name(),
})?;
let execute_payload_response = execution_layer
.block_on(|execution_layer| execution_layer.execute_payload(execution_payload));
let execute_payload_response = execution_layer
.block_on(|execution_layer| execution_layer.execute_payload(execution_payload));
match execute_payload_response {
Ok((status, handle)) => match status {
ExecutePayloadResponse::Valid => handle,
ExecutePayloadResponse::Invalid => {
return Err(ExecutionPayloadError::RejectedByExecutionEngine.into());
}
ExecutePayloadResponse::Syncing => {
debug!(
chain.log,
"Optimistically accepting payload";
"msg" => "execution engine is syncing"
);
handle
}
},
Err(e) => {
error!(
chain.log,
"Optimistically accepting payload";
"error" => ?e,
"msg" => "execution engine returned an error"
);
None
match execute_payload_response {
Ok((status, handle)) => match status {
ExecutePayloadResponse::Valid => {
(handle, PayloadVerificationStatus::Verified)
}
ExecutePayloadResponse::Invalid => {
return Err(ExecutionPayloadError::RejectedByExecutionEngine.into());
}
ExecutePayloadResponse::Syncing => {
(handle, PayloadVerificationStatus::NotVerified)
}
},
Err(_) => (None, PayloadVerificationStatus::NotVerified),
}
}
} else {
None
};
} else {
(None, PayloadVerificationStatus::Irrelevant)
};
// If the block is sufficiently recent, notify the validator monitor.
if let Some(slot) = chain.slot_clock.now() {
@ -1300,6 +1301,7 @@ impl<'a, T: BeaconChainTypes> FullyVerifiedBlock<'a, T> {
state,
parent_block: parent.beacon_block,
confirmation_db_batch,
payload_verification_status,
})
}
}
@ -1315,7 +1317,21 @@ fn validate_execution_payload<T: BeaconChainTypes>(
if let Some(execution_payload) = block.body().execution_payload() {
// This logic should match `is_execution_enabled`. We use only the execution block hash of
// the parent here in order to avoid loading the parent state during gossip verification.
let is_merge_complete = parent_block.execution_block_hash != Hash256::zero();
let is_merge_complete = match parent_block.execution_status {
// Optimistically declare that an "unknown" status block has completed the merge.
ExecutionStatus::Valid(_) | ExecutionStatus::Unknown(_) => true,
// It's impossible for an irrelevant block to have completed the merge. It is pre-merge
// by definition.
ExecutionStatus::Irrelevant(_) => false,
// If the parent has an invalid payload then it's impossible to build a valid block upon
// it. Reject the block.
ExecutionStatus::Invalid(_) => {
return Err(BlockError::ParentExecutionPayloadInvalid {
parent_root: parent_block.root,
})
}
};
let is_merge_block =
!is_merge_complete && *execution_payload != <ExecutionPayload<T::EthSpec>>::default();
if !is_merge_block && !is_merge_complete {

View File

@ -136,6 +136,9 @@ pub enum BeaconChainError {
AltairForkDisabled,
ExecutionLayerMissing,
ExecutionForkChoiceUpdateFailed(execution_layer::Error),
HeadMissingFromForkChoice(Hash256),
FinalizedBlockMissingFromForkChoice(Hash256),
InvalidFinalizedPayloadShutdownError(TrySendError<ShutdownReason>),
}
easy_from_to!(SlotProcessingError, BeaconChainError);

View File

@ -1,5 +1,5 @@
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
use fork_choice::ForkChoice;
use fork_choice::{ForkChoice, PayloadVerificationStatus};
use itertools::process_results;
use slog::{info, warn, Logger};
use state_processing::state_advance::complete_state_advance;
@ -164,9 +164,21 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
)
.map_err(|e| format!("Error replaying block: {:?}", e))?;
// Setting this to unverified is the safest solution, since we don't have a way to
// retro-actively determine if they were valid or not.
//
// This scenario is so rare that it seems OK to double-verify some blocks.
let payload_verification_status = PayloadVerificationStatus::NotVerified;
let (block, _) = block.deconstruct();
fork_choice
.on_block(block.slot(), &block, block.canonical_root(), &state)
.on_block(
block.slot(),
&block,
block.canonical_root(),
&state,
payload_verification_status,
)
.map_err(|e| format!("Error applying replayed block to fork choice: {:?}", e))?;
}

View File

@ -36,7 +36,7 @@ mod validator_pubkey_cache;
pub use self::beacon_chain::{
AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
ExecutionLayerStatus, ForkChoiceError, HeadInfo, StateSkipConfig, WhenSlotSkipped,
ForkChoiceError, HeadSafetyStatus, StateSkipConfig, WhenSlotSkipped, HeadInfo
MAXIMUM_GOSSIP_CLOCK_DISPARITY,
};
pub use self::beacon_snapshot::BeaconSnapshot;

View File

@ -664,7 +664,7 @@ where
// Issue the head to the execution engine on startup. This ensures it can start
// syncing.
if head.is_merge_complete {
if let Some(block_hash) = head.execution_payload_block_hash {
runtime_context.executor.spawn(
async move {
let result = BeaconChain::<
@ -673,7 +673,7 @@ where
inner_execution_layer,
store,
head.finalized_checkpoint.root,
head.block_root,
block_hash,
)
.await;

View File

@ -1,8 +1,8 @@
use crate::metrics;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use beacon_chain::{BeaconChain, BeaconChainTypes, HeadSafetyStatus};
use lighthouse_network::{types::SyncState, NetworkGlobals};
use parking_lot::Mutex;
use slog::{debug, error, info, warn, Logger};
use slog::{crit, debug, error, info, warn, Logger};
use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::{Duration, Instant};
@ -263,10 +263,43 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
} else {
head_root.to_string()
};
let block_hash = match beacon_chain.head_safety_status() {
Ok(HeadSafetyStatus::Safe(hash_opt)) => hash_opt
.map(|hash| format!("{} (verified)", hash))
.unwrap_or_else(|| "n/a".to_string()),
Ok(HeadSafetyStatus::Unsafe(block_hash)) => {
warn!(
log,
"Head execution payload is unverified";
"execution_block_hash" => ?block_hash,
);
format!("{} (unverified)", block_hash)
}
Ok(HeadSafetyStatus::Invalid(block_hash)) => {
crit!(
log,
"Head execution payload is invalid";
"msg" => "this scenario may be unrecoverable",
"execution_block_hash" => ?block_hash,
);
format!("{} (invalid)", block_hash)
}
Err(e) => {
error!(
log,
"Failed to read head safety status";
"error" => ?e
);
"n/a".to_string()
}
};
info!(
log,
"Synced";
"peers" => peer_count_pretty(connected_peer_count),
"exec_hash" => block_hash,
"finalized_root" => format!("{}", finalized_root),
"finalized_epoch" => finalized_epoch,
"epoch" => current_epoch,

View File

@ -20,7 +20,7 @@ use beacon_chain::{
observed_operations::ObservationOutcome,
validator_monitor::{get_block_delay_ms, timestamp_now},
AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes,
ExecutionLayerStatus, WhenSlotSkipped,
HeadSafetyStatus, WhenSlotSkipped,
};
use block_id::BlockId;
use eth2::types::{self as api_types, EndpointVersion, ValidatorId};
@ -385,24 +385,31 @@ pub fn serve<T: BeaconChainTypes>(
)
.untuple_one();
// Create a `warp` filter that rejects requests unless the execution layer (EL) is ready.
let only_while_el_is_ready = warp::any()
// Create a `warp` filter that rejects requests unless the head has been verified by the
// execution layer.
let only_with_safe_head = warp::any()
.and(chain_filter.clone())
.and_then(move |chain: Arc<BeaconChain<T>>| async move {
let status = chain.execution_layer_status().await.map_err(|e| {
let status = chain.head_safety_status().map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"failed to read execution engine status: {:?}",
"failed to read head safety status: {:?}",
e
))
})?;
match status {
ExecutionLayerStatus::Ready | ExecutionLayerStatus::NotRequired => Ok(()),
ExecutionLayerStatus::NotReady => Err(warp_utils::reject::custom_server_error(
"execution engine(s) not ready".to_string(),
)),
ExecutionLayerStatus::Missing => Err(warp_utils::reject::custom_server_error(
"no execution engines configured".to_string(),
)),
HeadSafetyStatus::Safe(_) => Ok(()),
HeadSafetyStatus::Unsafe(hash) => {
Err(warp_utils::reject::custom_server_error(format!(
"optimistic head hash {:?} has not been verified by the execution layer",
hash
)))
}
HeadSafetyStatus::Invalid(hash) => {
Err(warp_utils::reject::custom_server_error(format!(
"the head block has an invalid payload {:?}, this may be unrecoverable",
hash
)))
}
}
})
.untuple_one();
@ -1103,7 +1110,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::body::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.and(only_while_el_is_ready.clone())
.and_then(
|chain: Arc<BeaconChain<T>>,
attestations: Vec<Attestation<T::EthSpec>>,
@ -1401,7 +1407,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::body::json())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.and(only_while_el_is_ready.clone())
.and_then(
|chain: Arc<BeaconChain<T>>,
signatures: Vec<SyncCommitteeMessage>,
@ -1831,7 +1836,6 @@ pub fn serve<T: BeaconChainTypes>(
}))
.and(warp::path::end())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(chain_filter.clone())
.and(log_filter.clone())
.and_then(|epoch: Epoch, chain: Arc<BeaconChain<T>>, log: Logger| {
@ -1849,7 +1853,6 @@ pub fn serve<T: BeaconChainTypes>(
}))
.and(warp::path::end())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(warp::query::<api_types::ValidatorBlocksQuery>())
.and(chain_filter.clone())
.and_then(
@ -1884,7 +1887,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp::query::<api_types::ValidatorAttestationDataQuery>())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(only_with_safe_head.clone())
.and(chain_filter.clone())
.and_then(
|query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| {
@ -1917,7 +1920,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp::query::<api_types::ValidatorAggregateAttestationQuery>())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(only_with_safe_head.clone())
.and(chain_filter.clone())
.and_then(
|query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| {
@ -1949,7 +1952,6 @@ pub fn serve<T: BeaconChainTypes>(
}))
.and(warp::path::end())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(warp::body::json())
.and(chain_filter.clone())
.and_then(
@ -1972,7 +1974,6 @@ pub fn serve<T: BeaconChainTypes>(
}))
.and(warp::path::end())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(warp::body::json())
.and(chain_filter.clone())
.and_then(
@ -1990,7 +1991,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path::end())
.and(warp::query::<SyncContributionData>())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(only_with_safe_head)
.and(chain_filter.clone())
.and_then(
|sync_committee_data: SyncContributionData, chain: Arc<BeaconChain<T>>| {
@ -2013,7 +2014,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("aggregate_and_proofs"))
.and(warp::path::end())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready.clone())
.and(chain_filter.clone())
.and(warp::body::json())
.and(network_tx_filter.clone())
@ -2114,7 +2114,6 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("contribution_and_proofs"))
.and(warp::path::end())
.and(not_while_syncing_filter.clone())
.and(only_while_el_is_ready)
.and(chain_filter.clone())
.and(warp::body::json())
.and(network_tx_filter.clone())

View File

@ -707,7 +707,7 @@ impl<T: BeaconChainTypes> Worker<T> {
self.log,
"New block received";
"slot" => verified_block.block.slot(),
"hash" => ?verified_block.block_root
"root" => ?verified_block.block_root
);
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
@ -770,8 +770,10 @@ impl<T: BeaconChainTypes> Worker<T> {
| Err(e @ BlockError::TooManySkippedSlots { .. })
| Err(e @ BlockError::WeakSubjectivityConflict)
| Err(e @ BlockError::InconsistentFork(_))
// TODO: is this what we should be doing when block verification fails?
| Err(e @BlockError::ExecutionPayloadError(_))
// TODO(merge): reconsider peer scoring for this event.
| Err(e @ BlockError::ExecutionPayloadError(_))
// TODO(merge): reconsider peer scoring for this event.
| Err(e @ BlockError::ParentExecutionPayloadInvalid { .. })
| Err(e @ BlockError::GenesisBlock) => {
warn!(self.log, "Could not verify block for gossip, rejecting the block";
"error" => %e);

View File

@ -1,6 +1,6 @@
use std::marker::PhantomData;
use proto_array::{Block as ProtoBlock, ProtoArrayForkChoice};
use proto_array::{Block as ProtoBlock, ExecutionStatus, ProtoArrayForkChoice};
use ssz_derive::{Decode, Encode};
use types::{
AttestationShufflingId, BeaconBlock, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec,
@ -38,6 +38,11 @@ pub enum Error<T> {
block_slot: Slot,
state_slot: Slot,
},
InvalidPayloadStatus {
block_slot: Slot,
block_root: Hash256,
payload_verification_status: PayloadVerificationStatus,
},
}
impl<T> From<InvalidAttestation> for Error<T> {
@ -101,6 +106,19 @@ impl<T> From<String> for Error<T> {
}
}
/// Indicates if a block has been verified by an execution payload.
///
/// There is no variant for "invalid", since such a block should never be added to fork choice.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PayloadVerificationStatus {
/// An EL has declared the execution payload to be valid.
Verified,
/// An EL has not yet made a determination about the execution payload.
NotVerified,
/// The block is either pre-merge-fork, or prior to the terminal PoW block.
Irrelevant,
}
/// Calculate how far `slot` lies from the start of its epoch.
///
/// ## Specification
@ -262,9 +280,13 @@ where
.map_err(Error::BeaconStateError)?;
// Default any non-merge execution block hashes to 0x000..000.
let execution_block_hash = anchor_block.message_merge().map_or_else(
|()| Hash256::zero(),
|message| message.body.execution_payload.block_hash,
let execution_status = anchor_block.message_merge().map_or_else(
|()| ExecutionStatus::irrelevant(),
|message| {
// Assume that this payload is valid, since the anchor should be a trusted block and
// state.
ExecutionStatus::Valid(message.body.execution_payload.block_hash)
},
);
let proto_array = ProtoArrayForkChoice::new(
@ -275,7 +297,7 @@ where
fc_store.finalized_checkpoint().root,
current_epoch_shuffling_id,
next_epoch_shuffling_id,
execution_block_hash,
execution_status,
)?;
Ok(Self {
@ -446,6 +468,7 @@ where
block: &BeaconBlock<E>,
block_root: Hash256,
state: &BeaconState<E>,
payload_verification_status: PayloadVerificationStatus,
) -> Result<(), Error<T::Error>> {
let current_slot = self.update_time(current_slot)?;
@ -552,11 +575,32 @@ where
.on_verified_block(block, block_root, state)
.map_err(Error::AfterBlockFailed)?;
// Default any non-merge execution block hashes to 0x000..000.
let execution_block_hash = block.body_merge().map_or_else(
|()| Hash256::zero(),
|body| body.execution_payload.block_hash,
);
let execution_status = if let Some(execution_payload) = block.body().execution_payload() {
let block_hash = execution_payload.block_hash;
if block_hash == Hash256::zero() {
// The block is post-merge-fork, but pre-terminal-PoW block. We don't need to verify
// the payload.
ExecutionStatus::irrelevant()
} else {
match payload_verification_status {
PayloadVerificationStatus::Verified => ExecutionStatus::Valid(block_hash),
PayloadVerificationStatus::NotVerified => ExecutionStatus::Unknown(block_hash),
// It would be a logic error to declare a block irrelevant if it has an
// execution payload with a non-zero block hash.
PayloadVerificationStatus::Irrelevant => {
return Err(Error::InvalidPayloadStatus {
block_slot: block.slot(),
block_root,
payload_verification_status,
})
}
}
}
} else {
// There is no payload to verify.
ExecutionStatus::irrelevant()
};
// This does not apply a vote to the block, it just makes fork choice aware of the block so
// it can still be identified as the head even if it doesn't have any votes.
@ -580,7 +624,7 @@ where
state_root: block.state_root(),
justified_epoch: state.current_justified_checkpoint().epoch,
finalized_epoch: state.finalized_checkpoint().epoch,
execution_block_hash,
execution_status,
})?;
Ok(())

View File

@ -2,7 +2,8 @@ mod fork_choice;
mod fork_choice_store;
pub use crate::fork_choice::{
Error, ForkChoice, InvalidAttestation, InvalidBlock, PersistedForkChoice, QueuedAttestation,
Error, ForkChoice, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
PersistedForkChoice, QueuedAttestation, SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
};
pub use fork_choice_store::ForkChoiceStore;
pub use proto_array::Block as ProtoBlock;

View File

@ -10,7 +10,10 @@ use beacon_chain::{
BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError,
StateSkipConfig, WhenSlotSkipped,
};
use fork_choice::{ForkChoiceStore, InvalidAttestation, InvalidBlock, QueuedAttestation};
use fork_choice::{
ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus,
QueuedAttestation, SAFE_SLOTS_TO_UPDATE_JUSTIFIED,
};
use store::MemoryStore;
use types::{
test_utils::generate_deterministic_keypair, BeaconBlock, BeaconBlockRef, BeaconState,
@ -268,7 +271,13 @@ impl ForkChoiceTest {
.chain
.fork_choice
.write()
.on_block(current_slot, &block, block.canonical_root(), &state)
.on_block(
current_slot,
&block,
block.canonical_root(),
&state,
PayloadVerificationStatus::Verified,
)
.unwrap();
self
}
@ -303,7 +312,13 @@ impl ForkChoiceTest {
.chain
.fork_choice
.write()
.on_block(current_slot, &block, block.canonical_root(), &state)
.on_block(
current_slot,
&block,
block.canonical_root(),
&state,
PayloadVerificationStatus::Verified,
)
.err()
.expect("on_block did not return an error");
comparison_func(err);

View File

@ -30,4 +30,8 @@ pub enum Error {
head_justified_epoch: Epoch,
head_finalized_epoch: Epoch,
},
InvalidAncestorOfValidPayload {
ancestor_block_root: Hash256,
ancestor_payload_block_hash: Hash256,
},
}

View File

@ -2,7 +2,7 @@ mod ffg_updates;
mod no_votes;
mod votes;
use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice};
use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
use serde_derive::{Deserialize, Serialize};
use types::{AttestationShufflingId, Epoch, Hash256, Slot};
@ -57,7 +57,7 @@ impl ForkChoiceTestDefinition {
pub fn run(self) {
let junk_shuffling_id =
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
let execution_block_hash = Hash256::zero();
let execution_status = ExecutionStatus::irrelevant();
let mut fork_choice = ProtoArrayForkChoice::new(
self.finalized_block_slot,
Hash256::zero(),
@ -66,7 +66,7 @@ impl ForkChoiceTestDefinition {
self.finalized_root,
junk_shuffling_id.clone(),
junk_shuffling_id,
execution_block_hash,
execution_status,
)
.expect("should create fork choice struct");
@ -141,7 +141,7 @@ impl ForkChoiceTestDefinition {
),
justified_epoch,
finalized_epoch,
execution_block_hash,
execution_status,
};
fork_choice.process_block(block).unwrap_or_else(|e| {
panic!(

View File

@ -4,7 +4,7 @@ mod proto_array;
mod proto_array_fork_choice;
mod ssz_container;
pub use crate::proto_array_fork_choice::{Block, ProtoArrayForkChoice};
pub use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice};
pub use error::Error;
pub mod core {

View File

@ -1,4 +1,4 @@
use crate::{error::Error, Block};
use crate::{error::Error, Block, ExecutionStatus};
use serde_derive::{Deserialize, Serialize};
use ssz::four_byte_option_impl;
use ssz_derive::{Decode, Encode};
@ -35,11 +35,9 @@ pub struct ProtoNode {
best_child: Option<usize>,
#[ssz(with = "four_byte_option_usize")]
best_descendant: Option<usize>,
/// It's necessary to track this so that we can refuse to propagate post-merge blocks without
/// execution payloads, without confusing these with pre-merge blocks.
///
/// Relevant spec issue: https://github.com/ethereum/consensus-specs/issues/2618
pub execution_block_hash: Hash256,
/// Indicates if an execution node has marked this block as valid. Also contains the execution
/// block hash.
pub execution_status: ExecutionStatus,
}
/// Only used for SSZ deserialization of the persisted fork choice during the database migration
@ -78,7 +76,11 @@ impl Into<ProtoNode> for LegacyProtoNode {
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
execution_block_hash: Hash256::zero(),
// We set the following execution value as if the block is a pre-merge-fork block. This
// is safe as long as we never import a merge block with the old version of proto-array.
// This will be safe since we can't actually process merge blocks until we've made this
// change to fork choice.
execution_status: ExecutionStatus::irrelevant(),
}
}
}
@ -224,7 +226,7 @@ impl ProtoArray {
weight: 0,
best_child: None,
best_descendant: None,
execution_block_hash: block.execution_block_hash,
execution_status: block.execution_status,
};
self.indices.insert(node.root, node_index);
@ -232,11 +234,58 @@ impl ProtoArray {
if let Some(parent_index) = node.parent {
self.maybe_update_best_child_and_descendant(parent_index, node_index)?;
if matches!(block.execution_status, ExecutionStatus::Valid(_)) {
self.propagate_execution_payload_verification(parent_index)?;
}
}
Ok(())
}
pub fn propagate_execution_payload_verification(
&mut self,
verified_node_index: usize,
) -> Result<(), Error> {
let mut index = verified_node_index;
loop {
let node = self
.nodes
.get_mut(index)
.ok_or(Error::InvalidNodeIndex(index))?;
let parent_index = match node.execution_status {
// We have reached a node that we already know is valid. No need to iterate further
// since we assume an ancestors have already been set to valid.
ExecutionStatus::Valid(_) => return Ok(()),
// We have reached an irrelevant node, this node is prior to a terminal execution
// block. There's no need to iterate further, it's impossible for this block to have
// any relevant ancestors.
ExecutionStatus::Irrelevant(_) => return Ok(()),
// The block has an unknown status, set it to valid since any ancestor of a valid
// payload can be considered valid.
ExecutionStatus::Unknown(payload_block_hash) => {
node.execution_status = ExecutionStatus::Valid(payload_block_hash);
if let Some(parent_index) = node.parent {
parent_index
} else {
// We have reached the root block, iteration complete.
return Ok(());
}
}
// An ancestor of the valid payload was invalid. This is a serious error which
// indicates a consensus failure in the execution node. This is unrecoverable.
ExecutionStatus::Invalid(ancestor_payload_block_hash) => {
return Err(Error::InvalidAncestorOfValidPayload {
ancestor_block_root: node.root,
ancestor_payload_block_hash,
})
}
};
index = parent_index;
}
}
/// Follows the best-descendant links to find the best-block (i.e., head-block).
///
/// ## Notes

View File

@ -1,6 +1,7 @@
use crate::error::Error;
use crate::proto_array::ProtoArray;
use crate::ssz_container::{LegacySszContainer, SszContainer};
use serde_derive::{Deserialize, Serialize};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
@ -15,6 +16,32 @@ pub struct VoteTracker {
next_epoch: Epoch,
}
/// Represents the verification status of an execution payload.
#[derive(Clone, Copy, Debug, PartialEq, Encode, Decode, Serialize, Deserialize)]
#[ssz(enum_behaviour = "union")]
pub enum ExecutionStatus {
/// An EL has determined that the payload is valid.
Valid(Hash256),
/// An EL has determined that the payload is invalid.
Invalid(Hash256),
/// An EL has not yet verified the execution payload.
Unknown(Hash256),
/// The block is either prior to the merge fork, or after the merge fork but before the terminal
/// PoW block has been found.
///
/// # Note:
///
/// This `bool` only exists to satisfy our SSZ implementation which requires all variants
/// to have a value. It can be set to anything.
Irrelevant(bool), // TODO(merge): fix bool.
}
impl ExecutionStatus {
pub fn irrelevant() -> Self {
ExecutionStatus::Irrelevant(false)
}
}
/// A block that is to be applied to the fork choice.
///
/// A simplified version of `types::BeaconBlock`.
@ -29,7 +56,9 @@ pub struct Block {
pub next_epoch_shuffling_id: AttestationShufflingId,
pub justified_epoch: Epoch,
pub finalized_epoch: Epoch,
pub execution_block_hash: Hash256,
/// Indicates if an execution node has marked this block as valid. Also contains the execution
/// block hash.
pub execution_status: ExecutionStatus,
}
/// A Vec-wrapper which will grow to match any request.
@ -76,7 +105,7 @@ impl ProtoArrayForkChoice {
finalized_root: Hash256,
current_epoch_shuffling_id: AttestationShufflingId,
next_epoch_shuffling_id: AttestationShufflingId,
execution_block_hash: Hash256,
execution_status: ExecutionStatus,
) -> Result<Self, String> {
let mut proto_array = ProtoArray {
prune_threshold: DEFAULT_PRUNE_THRESHOLD,
@ -98,7 +127,7 @@ impl ProtoArrayForkChoice {
next_epoch_shuffling_id,
justified_epoch,
finalized_epoch,
execution_block_hash,
execution_status,
};
proto_array
@ -208,7 +237,7 @@ impl ProtoArrayForkChoice {
next_epoch_shuffling_id: block.next_epoch_shuffling_id.clone(),
justified_epoch: block.justified_epoch,
finalized_epoch: block.finalized_epoch,
execution_block_hash: block.execution_block_hash,
execution_status: block.execution_status,
})
}
@ -372,7 +401,7 @@ mod test_compute_deltas {
let unknown = Hash256::from_low_u64_be(4);
let junk_shuffling_id =
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
let execution_block_hash = Hash256::zero();
let execution_status = ExecutionStatus::irrelevant();
let mut fc = ProtoArrayForkChoice::new(
genesis_slot,
@ -382,7 +411,7 @@ mod test_compute_deltas {
finalized_root,
junk_shuffling_id.clone(),
junk_shuffling_id.clone(),
execution_block_hash,
execution_status,
)
.unwrap();
@ -398,7 +427,7 @@ mod test_compute_deltas {
next_epoch_shuffling_id: junk_shuffling_id.clone(),
justified_epoch: genesis_epoch,
finalized_epoch: genesis_epoch,
execution_block_hash,
execution_status,
})
.unwrap();
@ -414,7 +443,7 @@ mod test_compute_deltas {
next_epoch_shuffling_id: junk_shuffling_id,
justified_epoch: genesis_epoch,
finalized_epoch: genesis_epoch,
execution_block_hash,
execution_status,
})
.unwrap();