diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index caa73401e..c39a2b26c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7,6 +7,7 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::blob_cache::BlobCache; +use crate::blob_verification::{AsBlock, AvailableBlock, BlockWrapper}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::{ check_block_is_finalized_descendant, check_block_relevancy, get_block_root, @@ -107,7 +108,6 @@ use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS; use types::consts::merge::INTERVALS_PER_SLOT; -use types::signed_block_and_blobs::BlockWrapper; use types::*; pub type ForkChoiceError = fork_choice::Error; @@ -2367,19 +2367,19 @@ impl BeaconChain { let children = chain_segment .iter() .skip(1) - .map(|block| (block.block().parent_root(), block.slot())) + .map(|block| (block.parent_root(), block.slot())) .collect::>(); for (i, block) in chain_segment.into_iter().enumerate() { // Ensure the block is the correct structure for the fork at `block.slot()`. - if let Err(e) = block.block().fork_name(&self.spec) { + if let Err(e) = block.as_block().fork_name(&self.spec) { return Err(ChainSegmentResult::Failed { imported_blocks, error: BlockError::InconsistentFork(e), }); } - let block_root = get_block_root(block.block()); + let block_root = get_block_root(block.as_block()); if let Some((child_parent_root, child_slot)) = children.get(i) { // If this block has a child in this chain segment, ensure that its parent root matches @@ -2403,7 +2403,7 @@ impl BeaconChain { } } - match check_block_relevancy(block.block(), block_root, self) { + match check_block_relevancy(block.as_block(), block_root, self) { // If the block is relevant, add it to the filtered chain segment. Ok(_) => filtered_chain_segment.push((block_root, block)), // If the block is already known, simply ignore this block. @@ -2780,7 +2780,7 @@ impl BeaconChain { #[allow(clippy::too_many_arguments)] fn import_block( &self, - signed_block: BlockWrapper, + signed_block: AvailableBlock, block_root: Hash256, mut state: BeaconState, confirmed_state_roots: Vec, @@ -2940,7 +2940,7 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - let (signed_block, blobs) = signed_block.deconstruct(Some(block_root)); + let (signed_block, blobs) = signed_block.deconstruct(); let block = signed_block.message(); let mut ops: Vec<_> = confirmed_state_roots .into_iter() @@ -2949,7 +2949,7 @@ impl BeaconChain { ops.push(StoreOp::PutBlock(block_root, signed_block.clone())); ops.push(StoreOp::PutState(block.state_root(), &state)); - if let Some(blobs) = blobs? { + if let Some(blobs) = blobs { if blobs.blobs.len() > 0 { //FIXME(sean) using this for debugging for now info!(self.log, "Writing blobs to store"; "block_root" => ?block_root); @@ -4569,10 +4569,7 @@ impl BeaconChain { }; // Use a context without block root or proposer index so that both are checked. - let mut ctxt = ConsensusContext::new(block.slot()) - //FIXME(sean) This is a hack beacuse `valdiate blobs sidecar requires the block root` - // which we won't have until after the state root is calculated. - .set_blobs_sidecar_validated(true); + let mut ctxt = ConsensusContext::new(block.slot()); per_block_processing( &mut state, @@ -4594,11 +4591,10 @@ impl BeaconChain { //FIXME(sean) // - add a new timer for processing here if let Some(blobs) = blobs_opt { - let kzg = if let Some(kzg) = &self.kzg { - kzg - } else { - return Err(BlockProductionError::TrustedSetupNotInitialized); - }; + let kzg = self + .kzg + .as_ref() + .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; let kzg_aggregated_proof = kzg_utils::compute_aggregate_kzg_proof::(&kzg, &blobs) .map_err(|e| BlockProductionError::KzgError(e))?; diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 0725bd865..dc909b89a 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1,10 +1,17 @@ +use derivative::Derivative; use slot_clock::SlotClock; +use std::sync::Arc; use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use crate::{kzg_utils, BeaconChainError}; use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions; use types::signed_beacon_block::BlobReconstructionError; -use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions}; +use types::ExecPayload; +use types::{ + BeaconBlockRef, BeaconStateError, BlobsSidecar, EthSpec, Hash256, KzgCommitment, + SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockHeader, Slot, + Transactions, +}; #[derive(Debug)] pub enum BlobError { @@ -29,30 +36,6 @@ pub enum BlobError { block_slot: Slot, }, - /// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`. - /// - /// - /// ## Peer scoring - /// - /// The peer has sent an invalid message. - BlobOutOfRange { - blob_index: usize, - }, - - /// The blob sidecar contains a KZGCommitment that is not a valid G1 point on - /// the bls curve. - /// - /// ## Peer scoring - /// - /// The peer has sent an invalid message. - InvalidKZGCommitment, - /// The proposal signature in invalid. - /// - /// ## Peer scoring - /// - /// The signature on the blob sidecar invalid and the peer is faulty. - ProposalSignatureInvalid, - /// No kzg ccommitment associated with blob sidecar. KzgCommitmentMissing, @@ -68,17 +51,6 @@ pub enum BlobError { KzgError(kzg::Error), - /// A blob sidecar for this proposer and slot has already been observed. - /// - /// ## Peer scoring - /// - /// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not - /// be equal to the given sidecar. - RepeatSidecar { - proposer: u64, - slot: Slot, - }, - /// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid. /// /// ## Peer scoring @@ -87,12 +59,17 @@ pub enum BlobError { /// sync committee message is valid. BeaconChainError(BeaconChainError), /// No blobs for the specified block where we would expect blobs. - MissingBlobs, + UnavailableBlobs, + /// Blobs provided for a pre-Eip4844 fork. + InconsistentFork, } impl From for BlobError { - fn from(_: BlobReconstructionError) -> Self { - BlobError::MissingBlobs + fn from(e: BlobReconstructionError) -> Self { + match e { + BlobReconstructionError::UnavailableBlobs => BlobError::UnavailableBlobs, + BlobReconstructionError::InconsistentFork => BlobError::InconsistentFork, + } } } @@ -109,6 +86,36 @@ impl From for BlobError { } pub fn validate_blob_for_gossip( + block_wrapper: BlockWrapper, + block_root: Hash256, + chain: &BeaconChain, +) -> Result, BlobError> { + if let BlockWrapper::BlockAndBlob(ref block, ref blobs_sidecar) = block_wrapper { + let blob_slot = blobs_sidecar.beacon_block_slot; + // Do not gossip or process blobs from future or past slots. + let latest_permissible_slot = chain + .slot_clock + .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) + .ok_or(BeaconChainError::UnableToReadSlot)?; + if blob_slot > latest_permissible_slot { + return Err(BlobError::FutureSlot { + message_slot: latest_permissible_slot, + latest_permissible_slot: blob_slot, + }); + } + + if blob_slot != block.slot() { + return Err(BlobError::SlotMismatch { + blob_slot, + block_slot: block.slot(), + }); + } + } + + block_wrapper.into_available_block(block_root, chain) +} + +fn verify_data_availability( blob_sidecar: &BlobsSidecar, kzg_commitments: &[KzgCommitment], transactions: &Transactions, @@ -116,27 +123,6 @@ pub fn validate_blob_for_gossip( block_root: Hash256, chain: &BeaconChain, ) -> Result<(), BlobError> { - let blob_slot = blob_sidecar.beacon_block_slot; - // Do not gossip or process blobs from future or past slots. - let latest_permissible_slot = chain - .slot_clock - .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) - .ok_or(BeaconChainError::UnableToReadSlot)?; - if blob_slot > latest_permissible_slot { - return Err(BlobError::FutureSlot { - message_slot: latest_permissible_slot, - latest_permissible_slot: blob_slot, - }); - } - - if blob_slot != block_slot { - return Err(BlobError::SlotMismatch { - blob_slot, - block_slot, - }); - } - - // Validate commitments agains transactions in the block. if verify_kzg_commitments_against_transactions::(transactions, kzg_commitments) .is_err() { @@ -162,3 +148,395 @@ pub fn validate_blob_for_gossip( } Ok(()) } + +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. This makes no +/// claims about data availability and should not be used in consensus. This struct is useful in +/// networking when we want to send blocks around without consensus checks. +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +pub enum BlockWrapper { + Block(Arc>), + BlockAndBlob(Arc>, Arc>), +} + +impl BlockWrapper { + pub fn new( + block: Arc>, + blobs_sidecar: Option>>, + ) -> Self { + if let Some(blobs_sidecar) = blobs_sidecar { + BlockWrapper::BlockAndBlob(block, blobs_sidecar) + } else { + BlockWrapper::Block(block) + } + } +} + +impl From> for BlockWrapper { + fn from(block: SignedBeaconBlock) -> Self { + BlockWrapper::Block(Arc::new(block)) + } +} + +impl From> for BlockWrapper { + fn from(block: SignedBeaconBlockAndBlobsSidecar) -> Self { + let SignedBeaconBlockAndBlobsSidecar { + beacon_block, + blobs_sidecar, + } = block; + BlockWrapper::BlockAndBlob(beacon_block, blobs_sidecar) + } +} + +impl From>> for BlockWrapper { + fn from(block: Arc>) -> Self { + BlockWrapper::Block(block) + } +} + +#[derive(Copy, Clone)] +pub enum DataAvailabilityCheckRequired { + Yes, + No, +} + +pub trait IntoAvailableBlock { + fn into_available_block( + self, + block_root: Hash256, + chain: &BeaconChain, + ) -> Result, BlobError>; +} + +impl IntoAvailableBlock for BlockWrapper { + fn into_available_block( + self, + block_root: Hash256, + chain: &BeaconChain, + ) -> Result, BlobError> { + let data_availability_boundary = chain.data_availability_boundary(); + let da_check_required = + data_availability_boundary.map_or(DataAvailabilityCheckRequired::No, |boundary| { + if self.slot().epoch(T::EthSpec::slots_per_epoch()) >= boundary { + DataAvailabilityCheckRequired::Yes + } else { + DataAvailabilityCheckRequired::No + } + }); + match self { + BlockWrapper::Block(block) => AvailableBlock::new(block, block_root, da_check_required), + BlockWrapper::BlockAndBlob(block, blobs_sidecar) => { + if matches!(da_check_required, DataAvailabilityCheckRequired::Yes) { + let kzg_commitments = block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_| BlobError::KzgCommitmentMissing)?; + let transactions = block + .message() + .body() + .execution_payload_eip4844() + .map(|payload| payload.transactions()) + .map_err(|_| BlobError::TransactionsMissing)? + .ok_or(BlobError::TransactionsMissing)?; + verify_data_availability( + &blobs_sidecar, + kzg_commitments, + transactions, + block.slot(), + block_root, + chain, + )?; + } + + AvailableBlock::new_with_blobs(block, blobs_sidecar, da_check_required) + } + } + } +} + +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. An +/// `AvailableBlock` has passed any required data availability checks and should be used in +/// consensus. This newtype wraps `AvailableBlockInner` to ensure data availability checks +/// cannot be circumvented on construction. +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +pub struct AvailableBlock(AvailableBlockInner); + +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. +#[derive(Clone, Debug, Derivative)] +#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +enum AvailableBlockInner { + Block(Arc>), + BlockAndBlob(SignedBeaconBlockAndBlobsSidecar), +} + +impl AvailableBlock { + pub fn new( + beacon_block: Arc>, + block_root: Hash256, + da_check_required: DataAvailabilityCheckRequired, + ) -> Result { + match beacon_block.as_ref() { + // No data availability check required prior to Eip4844. + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Capella(_) + | SignedBeaconBlock::Merge(_) => { + Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block))) + } + SignedBeaconBlock::Eip4844(_) => { + match da_check_required { + DataAvailabilityCheckRequired::Yes => { + // Attempt to reconstruct empty blobs here. + let blobs_sidecar = beacon_block + .reconstruct_empty_blobs(Some(block_root)) + .map(Arc::new)?; + return Ok(AvailableBlock(AvailableBlockInner::BlockAndBlob( + SignedBeaconBlockAndBlobsSidecar { + beacon_block, + blobs_sidecar, + }, + ))); + } + DataAvailabilityCheckRequired::No => { + Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block))) + } + } + } + } + } + + /// This function is private because an `AvailableBlock` should be + /// constructed via the `into_available_block` method. + fn new_with_blobs( + beacon_block: Arc>, + blobs_sidecar: Arc>, + da_check_required: DataAvailabilityCheckRequired, + ) -> Result { + match beacon_block.as_ref() { + // This method shouldn't be called with a pre-Eip4844 block. + SignedBeaconBlock::Base(_) + | SignedBeaconBlock::Altair(_) + | SignedBeaconBlock::Capella(_) + | SignedBeaconBlock::Merge(_) => Err(BlobError::InconsistentFork), + SignedBeaconBlock::Eip4844(_) => { + match da_check_required { + DataAvailabilityCheckRequired::Yes => Ok(AvailableBlock( + AvailableBlockInner::BlockAndBlob(SignedBeaconBlockAndBlobsSidecar { + beacon_block, + blobs_sidecar, + }), + )), + DataAvailabilityCheckRequired::No => { + // Blobs were not verified so we drop them, we'll instead just pass around + // an available `Eip4844` block without blobs. + Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block))) + } + } + } + } + } + + pub fn blobs(&self) -> Option>> { + match &self.0 { + AvailableBlockInner::Block(_) => None, + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + Some(block_sidecar_pair.blobs_sidecar.clone()) + } + } + } + + pub fn deconstruct(self) -> (Arc>, Option>>) { + match self.0 { + AvailableBlockInner::Block(block) => (block, None), + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + let SignedBeaconBlockAndBlobsSidecar { + beacon_block, + blobs_sidecar, + } = block_sidecar_pair; + (beacon_block, Some(blobs_sidecar)) + } + } + } +} + +pub trait IntoBlockWrapper: AsBlock { + fn into_block_wrapper(self) -> BlockWrapper; +} + +impl IntoBlockWrapper for BlockWrapper { + fn into_block_wrapper(self) -> BlockWrapper { + self + } +} + +impl IntoBlockWrapper for AvailableBlock { + fn into_block_wrapper(self) -> BlockWrapper { + let (block, blobs) = self.deconstruct(); + if let Some(blobs) = blobs { + BlockWrapper::BlockAndBlob(block, blobs) + } else { + BlockWrapper::Block(block) + } + } +} + +pub trait AsBlock { + fn slot(&self) -> Slot; + fn parent_root(&self) -> Hash256; + fn state_root(&self) -> Hash256; + fn signed_block_header(&self) -> SignedBeaconBlockHeader; + fn message(&self) -> BeaconBlockRef; + fn as_block(&self) -> &SignedBeaconBlock; + fn block_cloned(&self) -> Arc>; +} + +impl AsBlock for BlockWrapper { + fn slot(&self) -> Slot { + match self { + BlockWrapper::Block(block) => block.slot(), + BlockWrapper::BlockAndBlob(block, _) => block.slot(), + } + } + fn parent_root(&self) -> Hash256 { + match self { + BlockWrapper::Block(block) => block.parent_root(), + BlockWrapper::BlockAndBlob(block, _) => block.parent_root(), + } + } + fn state_root(&self) -> Hash256 { + match self { + BlockWrapper::Block(block) => block.state_root(), + BlockWrapper::BlockAndBlob(block, _) => block.state_root(), + } + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + match &self { + BlockWrapper::Block(block) => block.signed_block_header(), + BlockWrapper::BlockAndBlob(block, _) => block.signed_block_header(), + } + } + fn message(&self) -> BeaconBlockRef { + match &self { + BlockWrapper::Block(block) => block.message(), + BlockWrapper::BlockAndBlob(block, _) => block.message(), + } + } + fn as_block(&self) -> &SignedBeaconBlock { + match &self { + BlockWrapper::Block(block) => &block, + BlockWrapper::BlockAndBlob(block, _) => &block, + } + } + fn block_cloned(&self) -> Arc> { + match &self { + BlockWrapper::Block(block) => block.clone(), + BlockWrapper::BlockAndBlob(block, _) => block.clone(), + } + } +} + +impl AsBlock for &BlockWrapper { + fn slot(&self) -> Slot { + match self { + BlockWrapper::Block(block) => block.slot(), + BlockWrapper::BlockAndBlob(block, _) => block.slot(), + } + } + fn parent_root(&self) -> Hash256 { + match self { + BlockWrapper::Block(block) => block.parent_root(), + BlockWrapper::BlockAndBlob(block, _) => block.parent_root(), + } + } + fn state_root(&self) -> Hash256 { + match self { + BlockWrapper::Block(block) => block.state_root(), + BlockWrapper::BlockAndBlob(block, _) => block.state_root(), + } + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + match &self { + BlockWrapper::Block(block) => block.signed_block_header(), + BlockWrapper::BlockAndBlob(block, _) => block.signed_block_header(), + } + } + fn message(&self) -> BeaconBlockRef { + match &self { + BlockWrapper::Block(block) => block.message(), + BlockWrapper::BlockAndBlob(block, _) => block.message(), + } + } + fn as_block(&self) -> &SignedBeaconBlock { + match &self { + BlockWrapper::Block(block) => &block, + BlockWrapper::BlockAndBlob(block, _) => &block, + } + } + fn block_cloned(&self) -> Arc> { + match &self { + BlockWrapper::Block(block) => block.clone(), + BlockWrapper::BlockAndBlob(block, _) => block.clone(), + } + } +} + +impl AsBlock for AvailableBlock { + fn slot(&self) -> Slot { + match &self.0 { + AvailableBlockInner::Block(block) => block.slot(), + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + block_sidecar_pair.beacon_block.slot() + } + } + } + fn parent_root(&self) -> Hash256 { + match &self.0 { + AvailableBlockInner::Block(block) => block.parent_root(), + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + block_sidecar_pair.beacon_block.parent_root() + } + } + } + fn state_root(&self) -> Hash256 { + match &self.0 { + AvailableBlockInner::Block(block) => block.state_root(), + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + block_sidecar_pair.beacon_block.state_root() + } + } + } + fn signed_block_header(&self) -> SignedBeaconBlockHeader { + match &self.0 { + AvailableBlockInner::Block(block) => block.signed_block_header(), + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + block_sidecar_pair.beacon_block.signed_block_header() + } + } + } + fn message(&self) -> BeaconBlockRef { + match &self.0 { + AvailableBlockInner::Block(block) => block.message(), + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + block_sidecar_pair.beacon_block.message() + } + } + } + fn as_block(&self) -> &SignedBeaconBlock { + match &self.0 { + AvailableBlockInner::Block(block) => &block, + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + &block_sidecar_pair.beacon_block + } + } + } + fn block_cloned(&self) -> Arc> { + match &self.0 { + AvailableBlockInner::Block(block) => block.clone(), + AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { + block_sidecar_pair.beacon_block.clone() + } + } + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 2b759e4ad..4cb53a639 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -23,6 +23,10 @@ //! | //! ▼ //! SignedBeaconBlock +//! | +//! ▼ +//! AvailableBlock +//! | //! |--------------- //! | | //! | ▼ @@ -42,17 +46,18 @@ //! END //! //! ``` -use crate::blob_verification::{validate_blob_for_gossip, BlobError}; +use crate::blob_verification::{ + validate_blob_for_gossip, AsBlock, AvailableBlock, BlobError, BlockWrapper, IntoAvailableBlock, + IntoBlockWrapper, +}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; -use crate::kzg_utils; use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; -use crate::BlockError::BlobValidation; use crate::{ beacon_chain::{ BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, @@ -70,7 +75,6 @@ use safe_arith::ArithError; use slog::{debug, error, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; -use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions; use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block}; use state_processing::{ block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError}, @@ -88,7 +92,6 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; use types::signed_beacon_block::BlobReconstructionError; -use types::signed_block_and_blobs::BlockWrapper; use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, @@ -308,6 +311,12 @@ pub enum BlockError { BlobValidation(BlobError), } +impl From for BlockError { + fn from(e: BlobError) -> Self { + Self::BlobValidation(e) + } +} + /// Returned when block validation failed due to some issue verifying /// the execution payload. #[derive(Debug)] @@ -589,12 +598,16 @@ pub fn signature_verify_chain_segment( let mut consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(*block_root); - signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?; + signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?; + + //FIXME(sean) batch kzg verification + let available_block = block.clone().into_available_block(*block_root, chain)?; + consensus_context = consensus_context.set_kzg_commitments_consistent(true); // Save the block and its consensus context. The context will have had its proposer index // and attesting indices filled in, which can be used to accelerate later block processing. signature_verified_blocks.push(SignatureVerifiedBlock { - block: block.clone(), + block: available_block, block_root: *block_root, parent: None, consensus_context, @@ -619,7 +632,7 @@ pub fn signature_verify_chain_segment( #[derive(Derivative)] #[derivative(Debug(bound = "T: BeaconChainTypes"))] pub struct GossipVerifiedBlock { - pub block: BlockWrapper, + pub block: AvailableBlock, pub block_root: Hash256, parent: Option>, consensus_context: ConsensusContext, @@ -628,7 +641,7 @@ pub struct GossipVerifiedBlock { /// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit /// signatures) have been verified. pub struct SignatureVerifiedBlock { - block: BlockWrapper, + block: AvailableBlock, block_root: Hash256, parent: Option>, consensus_context: ConsensusContext, @@ -651,7 +664,7 @@ type PayloadVerificationHandle = /// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the /// `BeaconChain` immediately after it is instantiated. pub struct ExecutionPendingBlock { - pub block: BlockWrapper, + pub block: AvailableBlock, pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, @@ -675,8 +688,7 @@ pub trait IntoExecutionPendingBlock: Sized { .map(|execution_pending| { // Supply valid block to slasher. if let Some(slasher) = chain.slasher.as_ref() { - slasher - .accept_block_header(execution_pending.block.block().signed_block_header()); + slasher.accept_block_header(execution_pending.block.signed_block_header()); } execution_pending }) @@ -707,7 +719,7 @@ impl GossipVerifiedBlock { // we assume it will be transformed into a fully verified block. We *do* need to supply // it to the slasher if an error occurs, because that's the end of this block's journey, // and it could be a repeat proposal (a likely cause for slashing!). - let header = block.block().signed_block_header(); + let header = block.signed_block_header(); Self::new_without_slasher_checks(block, chain).map_err(|e| { process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e)) }) @@ -720,7 +732,7 @@ impl GossipVerifiedBlock { ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. block - .block() + .as_block() .fork_name(&chain.spec) .map_err(BlockError::InconsistentFork)?; @@ -736,7 +748,7 @@ impl GossipVerifiedBlock { }); } - let block_root = get_block_root(block.block()); + let block_root = get_block_root(block.as_block()); // Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any. check_block_against_anchor_slot(block.message(), chain)?; @@ -872,7 +884,7 @@ impl GossipVerifiedBlock { let pubkey = pubkey_cache .get(block.message().proposer_index() as usize) .ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?; - block.block().verify_signature( + block.as_block().verify_signature( Some(block_root), pubkey, &fork, @@ -912,39 +924,16 @@ impl GossipVerifiedBlock { // Validate the block's execution_payload (if any). validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?; - if let Some(blobs_sidecar) = block.blobs(Some(block_root))? { - let kzg_commitments = block - .message() - .body() - .blob_kzg_commitments() - .map_err(|_| BlockError::BlobValidation(BlobError::KzgCommitmentMissing))?; - let transactions = block - .message() - .body() - .execution_payload_eip4844() - .map(|payload| payload.transactions()) - .map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))? - .ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?; - validate_blob_for_gossip( - &blobs_sidecar, - kzg_commitments, - transactions, - block.slot(), - block_root, - chain, - ) - .map_err(BlobValidation)?; - } + let available_block = validate_blob_for_gossip(block, block_root, chain)?; // Having checked the proposer index and the block root we can cache them. - let consensus_context = ConsensusContext::new(block.slot()) + let consensus_context = ConsensusContext::new(available_block.slot()) .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()) - .set_blobs_sidecar_validated(true) // Validated in `validate_blob_for_gossip` - .set_blobs_verified_vs_txs(true); + .set_proposer_index(available_block.as_block().message().proposer_index()) + .set_kzg_commitments_consistent(true); Ok(Self { - block, + block: available_block, block_root, parent, consensus_context, @@ -974,7 +963,7 @@ impl IntoExecutionPendingBlock for GossipVerifiedBlock &SignedBeaconBlock { - self.block.block() + self.block.as_block() } } @@ -984,13 +973,13 @@ impl SignatureVerifiedBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn new( - block: BlockWrapper, + block: AvailableBlock, block_root: Hash256, chain: &BeaconChain, ) -> Result> { // Ensure the block is the correct structure for the fork at `block.slot()`. block - .block() + .as_block() .fork_name(&chain.spec) .map_err(BlockError::InconsistentFork)?; @@ -1013,10 +1002,12 @@ impl SignatureVerifiedBlock { let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - let mut consensus_context = - ConsensusContext::new(block.slot()).set_current_block_root(block_root); + let mut consensus_context = ConsensusContext::new(block.slot()) + .set_current_block_root(block_root) + // An `AvailabileBlock is passed in here, so we know this check has been run.` + .set_kzg_commitments_consistent(true); - signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?; + signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { @@ -1032,11 +1023,11 @@ impl SignatureVerifiedBlock { /// As for `new` above but producing `BlockSlashInfo`. pub fn check_slashable( - block: BlockWrapper, + block: AvailableBlock, block_root: Hash256, chain: &BeaconChain, ) -> Result>> { - let header = block.block().signed_block_header(); + let header = block.signed_block_header(); Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e)) } @@ -1067,7 +1058,7 @@ impl SignatureVerifiedBlock { // signature. let mut consensus_context = from.consensus_context; signature_verifier - .include_all_signatures_except_proposal(block.block(), &mut consensus_context)?; + .include_all_signatures_except_proposal(block.as_block(), &mut consensus_context)?; if signature_verifier.verify().is_ok() { Ok(Self { @@ -1086,7 +1077,7 @@ impl SignatureVerifiedBlock { from: GossipVerifiedBlock, chain: &BeaconChain, ) -> Result>> { - let header = from.block.block().signed_block_header(); + let header = from.block.signed_block_header(); Self::from_gossip_verified_block(from, chain) .map_err(|e| BlockSlashInfo::from_early_error(header, e)) } @@ -1104,7 +1095,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo>> { - let header = self.block.block().signed_block_header(); + let header = self.block.signed_block_header(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { @@ -1124,7 +1115,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc } fn block(&self) -> &SignedBeaconBlock { - &self.block.block() + &self.block.as_block() } } @@ -1141,7 +1132,12 @@ impl IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for BlockWrapper { +impl IntoExecutionPendingBlock for AvailableBlock { /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. fn into_execution_pending_block_slashable( @@ -1160,16 +1156,15 @@ impl IntoExecutionPendingBlock for BlockWrapper Result, BlockSlashInfo>> { // Perform an early check to prevent wasting time on irrelevant blocks. - let block_root = check_block_relevancy(self.block(), block_root, chain).map_err(|e| { - BlockSlashInfo::SignatureNotChecked(self.block().signed_block_header(), e) - })?; + let block_root = check_block_relevancy(self.as_block(), block_root, chain) + .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; SignatureVerifiedBlock::check_slashable(self, block_root, chain)? .into_execution_pending_block_slashable(block_root, chain, notify_execution_layer) } fn block(&self) -> &SignedBeaconBlock { - self.block() + self.as_block() } } @@ -1182,7 +1177,7 @@ impl ExecutionPendingBlock { /// /// Returns an error if the block is invalid, or if the block was unable to be verified. pub fn from_signature_verified_components( - block: BlockWrapper, + block: AvailableBlock, block_root: Hash256, parent: PreProcessingSnapshot, mut consensus_context: ConsensusContext, @@ -1212,7 +1207,7 @@ impl ExecutionPendingBlock { // because it will revert finalization. Note that the finalized block is stored in fork // choice, so we will not reject any child of the finalized block (this is relevant during // genesis). - return Err(BlockError::ParentUnknown(block)); + return Err(BlockError::ParentUnknown(block.into_block_wrapper())); } // Reject any block that exceeds our limit on skipped slots. @@ -1222,7 +1217,7 @@ impl ExecutionPendingBlock { * Perform cursory checks to see if the block is even worth processing. */ - check_block_relevancy(block.block(), block_root, chain)?; + check_block_relevancy(block.as_block(), block_root, chain)?; // Define a future that will verify the execution payload with an execution engine. // @@ -1471,13 +1466,13 @@ impl ExecutionPendingBlock { &state, &chain.log, ); - write_block(block.block(), block_root, &chain.log); + write_block(block.as_block(), block_root, &chain.log); let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); if let Err(err) = per_block_processing( &mut state, - block.block(), + block.as_block(), // Signatures were verified earlier in this function. BlockSignatureStrategy::NoVerification, VerifyBlockRoot::True, @@ -1514,9 +1509,9 @@ impl ExecutionPendingBlock { * Check to ensure the state root on the block matches the one we have calculated. */ - if block.block().state_root() != state_root { + if block.state_root() != state_root { return Err(BlockError::StateRootMismatch { - block: block.block().state_root(), + block: block.state_root(), local: state_root, }); } @@ -1559,58 +1554,6 @@ impl ExecutionPendingBlock { } drop(fork_choice); - /* - * Verify kzg proofs and kzg commitments against transactions if required - */ - //FIXME(sean) should this be prior to applying attestions to fork choice above? done in parallel? - if let Some(data_availability_boundary) = chain.data_availability_boundary() { - if block_slot.epoch(T::EthSpec::slots_per_epoch()) >= data_availability_boundary { - let sidecar = block - .blobs(Some(block_root))? - .ok_or(BlockError::BlobValidation(BlobError::MissingBlobs))?; - let kzg = chain.kzg.as_ref().ok_or(BlockError::BlobValidation( - BlobError::TrustedSetupNotInitialized, - ))?; - let transactions = block - .message() - .body() - .execution_payload_eip4844() - .map(|payload| payload.transactions()) - .map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))? - .ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?; - let kzg_commitments = block - .message() - .body() - .blob_kzg_commitments() - .map_err(|_| BlockError::BlobValidation(BlobError::KzgCommitmentMissing))?; - if !consensus_context.blobs_sidecar_validated() { - if !kzg_utils::validate_blobs_sidecar( - &kzg, - block.slot(), - block_root, - kzg_commitments, - &sidecar, - ) - .map_err(|e| BlockError::BlobValidation(BlobError::KzgError(e)))? - { - return Err(BlockError::BlobValidation(BlobError::InvalidKzgProof)); - } - } - if !consensus_context.blobs_verified_vs_txs() - && verify_kzg_commitments_against_transactions::( - transactions, - kzg_commitments, - ) - //FIXME(sean) we should maybe just map this error so we have more info about the mismatch - .is_err() - { - return Err(BlockError::BlobValidation( - BlobError::TransactionCommitmentMismatch, - )); - } - } - } - Ok(Self { block, block_root, @@ -1697,11 +1640,11 @@ fn check_block_against_finalized_slot( /// ## Warning /// /// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here. -pub fn check_block_is_finalized_descendant( +pub fn check_block_is_finalized_descendant>( chain: &BeaconChain, fork_choice: &BeaconForkChoice, - block: BlockWrapper, -) -> Result, BlockError> { + block: B, +) -> Result> { if fork_choice.is_descendant_of_finalized(block.parent_root()) { Ok(block) } else { @@ -1722,7 +1665,7 @@ pub fn check_block_is_finalized_descendant( block_parent_root: block.parent_root(), }) } else { - Err(BlockError::ParentUnknown(block)) + Err(BlockError::ParentUnknown(block.into_block_wrapper())) } } } @@ -1799,7 +1742,7 @@ fn verify_parent_block_is_known( if let Some(proto_block) = chain .canonical_head .fork_choice_read_lock() - .get_block(&block.message().parent_root()) + .get_block(&block.parent_root()) { Ok((proto_block, block)) } else { @@ -1812,11 +1755,11 @@ fn verify_parent_block_is_known( /// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs /// whilst attempting the operation. #[allow(clippy::type_complexity)] -fn load_parent( +fn load_parent>( block_root: Hash256, - block: BlockWrapper, + block: B, chain: &BeaconChain, -) -> Result<(PreProcessingSnapshot, BlockWrapper), BlockError> { +) -> Result<(PreProcessingSnapshot, B), BlockError> { let spec = &chain.spec; // Reject any block if its parent is not known to fork choice. @@ -1834,7 +1777,7 @@ fn load_parent( .fork_choice_read_lock() .contains_block(&block.parent_root()) { - return Err(BlockError::ParentUnknown(block)); + return Err(BlockError::ParentUnknown(block.into_block_wrapper())); } let block_delay = chain diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 1216d5d4d..189935ba4 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -1,3 +1,4 @@ +use crate::blob_verification::AvailableBlock; use crate::{ attester_cache::{CommitteeLengths, Error}, metrics, @@ -5,7 +6,6 @@ use crate::{ use parking_lot::RwLock; use proto_array::Block as ProtoBlock; use std::sync::Arc; -use store::signed_block_and_blobs::BlockWrapper; use types::*; pub struct CacheItem { @@ -51,7 +51,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: BlockWrapper, + block: AvailableBlock, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -69,7 +69,7 @@ impl EarlyAttesterCache { }, }; - let (block, blobs) = block.deconstruct(Some(beacon_block_root)); + let (block, blobs) = block.deconstruct(); let item = CacheItem { epoch, committee_lengths, @@ -77,7 +77,7 @@ impl EarlyAttesterCache { source, target, block, - blobs: blobs.map_err(|_| Error::MissingBlobs)?, + blobs, proto_block, }; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index db9da1ede..a9a0bc9c6 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -1,4 +1,5 @@ use crate::metrics; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper, IntoAvailableBlock}; use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now}; use beacon_chain::NotifyExecutionLayer; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized}; @@ -9,7 +10,6 @@ use slot_clock::SlotClock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; -use types::signed_block_and_blobs::BlockWrapper; use types::{ AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, @@ -56,14 +56,27 @@ pub async fn publish_block( }; // Determine the delay after the start of the slot, register it with metrics. - let block = wrapped_block.block(); + let block = wrapped_block.as_block(); let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock); metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay); + let available_block = match wrapped_block.into_available_block(block_root, &chain) { + Ok(available_block) => available_block, + Err(e) => { + let msg = format!("{:?}", e); + error!( + log, + "Invalid block provided to HTTP API"; + "reason" => &msg + ); + return Err(warp_utils::reject::broadcast_without_import(msg)); + } + }; + match chain .process_block( block_root, - wrapped_block.clone(), + available_block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes, ) @@ -75,14 +88,14 @@ pub async fn publish_block( "Valid block from HTTP API"; "block_delay" => ?delay, "root" => format!("{}", root), - "proposer_index" => block.message().proposer_index(), - "slot" => block.slot(), + "proposer_index" => available_block.message().proposer_index(), + "slot" => available_block.slot(), ); // Notify the validator monitor. chain.validator_monitor.read().register_api_block( seen_timestamp, - block.message(), + available_block.message(), root, &chain.slot_clock, ); @@ -104,7 +117,7 @@ pub async fn publish_block( "Block was broadcast too late"; "msg" => "system may be overloaded, block likely to be orphaned", "delay_ms" => delay.as_millis(), - "slot" => block.slot(), + "slot" => available_block.slot(), "root" => ?root, ) } else if delay >= delayed_threshold { @@ -113,7 +126,7 @@ pub async fn publish_block( "Block broadcast was delayed"; "msg" => "system may be overloaded, block may be orphaned", "delay_ms" => delay.as_millis(), - "slot" => block.slot(), + "slot" => available_block.slot(), "root" => ?root, ) } @@ -124,8 +137,8 @@ pub async fn publish_block( info!( log, "Block from HTTP API already known"; - "block" => ?block.canonical_root(), - "slot" => block.slot(), + "block" => ?block_root, + "slot" => available_block.slot(), ); Ok(()) } diff --git a/beacon_node/network/src/beacon_processor/mod.rs b/beacon_node/network/src/beacon_processor/mod.rs index 4bf42fd9c..3f3b6986d 100644 --- a/beacon_node/network/src/beacon_processor/mod.rs +++ b/beacon_node/network/src/beacon_processor/mod.rs @@ -40,6 +40,7 @@ use crate::sync::manager::BlockProcessType; use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; +use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::parking_lot::Mutex; use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer}; use derivative::Derivative; @@ -62,7 +63,6 @@ use std::time::Duration; use std::{cmp, collections::HashSet}; use task_executor::TaskExecutor; use tokio::sync::mpsc; -use types::signed_block_and_blobs::BlockWrapper; use types::{ Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, diff --git a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs index 6f4330055..bf9dfd904 100644 --- a/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs +++ b/beacon_node/network/src/beacon_processor/work_reprocessing_queue.rs @@ -13,6 +13,7 @@ use super::MAX_SCHEDULED_WORK_QUEUE_LEN; use crate::metrics; use crate::sync::manager::BlockProcessType; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use fnv::FnvHashMap; use futures::task::Poll; @@ -29,7 +30,6 @@ use task_executor::TaskExecutor; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::error::Error as TimeError; use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey}; -use types::signed_block_and_blobs::BlockWrapper; use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SubnetId}; const TASK_NAME: &str = "beacon_processor_reprocess_queue"; diff --git a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs index 5a085159f..19d82c449 100644 --- a/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/gossip_methods.rs @@ -1,5 +1,6 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage}; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::store::Error; use beacon_chain::{ attestation_verification::{self, Error as AttnError, VerifiedAttestation}, @@ -18,7 +19,6 @@ use ssz::Encode; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; -use types::signed_block_and_blobs::BlockWrapper; use types::{ Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, @@ -726,7 +726,7 @@ impl Worker { let block_root = if let Ok(verified_block) = &verification_result { verified_block.block_root } else { - block.block().canonical_root() + block.as_block().canonical_root() }; // Write the time the block was observed into delay cache. diff --git a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs index 284f96da7..88fcef6b9 100644 --- a/beacon_node/network/src/beacon_processor/worker/sync_methods.rs +++ b/beacon_node/network/src/beacon_processor/worker/sync_methods.rs @@ -7,6 +7,7 @@ use crate::beacon_processor::DuplicateCache; use crate::metrics; use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::{BatchProcessResult, ChainId}; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper, IntoAvailableBlock}; use beacon_chain::CountUnrealized; use beacon_chain::{ BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, @@ -16,7 +17,6 @@ use lighthouse_network::PeerAction; use slog::{debug, error, info, warn}; use std::sync::Arc; use tokio::sync::mpsc; -use types::signed_block_and_blobs::BlockWrapper; use types::{Epoch, Hash256, SignedBeaconBlock}; /// Id associated to a batch processing request, either a sync batch or a parent lookup. @@ -85,15 +85,23 @@ impl Worker { } }; let slot = block.slot(); - let result = self - .chain - .process_block( - block_root, - block, - CountUnrealized::True, - NotifyExecutionLayer::Yes, - ) - .await; + let available_block = block + .into_available_block(block_root, &self.chain) + .map_err(BlockError::BlobValidation); + + let result = match available_block { + Ok(block) => { + self.chain + .process_block( + block_root, + block, + CountUnrealized::True, + NotifyExecutionLayer::Yes, + ) + .await + } + Err(e) => Err(e), + }; metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index ad1bfb1d4..c2dc31cc6 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -14,6 +14,7 @@ use crate::sync::network_context::SyncNetworkContext; use crate::sync::range_sync::{ BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, }; +use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::types::{BackFillState, NetworkGlobals}; use lighthouse_network::{PeerAction, PeerId}; @@ -24,7 +25,6 @@ use std::collections::{ HashMap, HashSet, }; use std::sync::Arc; -use types::signed_block_and_blobs::BlockWrapper; use types::{Epoch, EthSpec}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index 84b49e25f..690d56644 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -2,6 +2,7 @@ use std::collections::hash_map::Entry; use std::collections::HashMap; use std::time::Duration; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::{BeaconChainTypes, BlockError}; use fnv::FnvHashMap; use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode}; @@ -10,7 +11,6 @@ use lru_cache::LRUTimeCache; use slog::{debug, error, trace, warn, Logger}; use smallvec::SmallVec; use store::Hash256; -use types::signed_block_and_blobs::BlockWrapper; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent}; use crate::metrics; diff --git a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs index 2aabbb563..b6de52d70 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_lookup.rs @@ -1,9 +1,9 @@ use super::RootBlockTuple; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::BeaconChainTypes; use lighthouse_network::PeerId; use store::Hash256; use strum::IntoStaticStr; -use types::signed_block_and_blobs::BlockWrapper; use crate::sync::block_lookups::ForceBlockRequest; use crate::sync::{ diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 05df18a0d..0ba08571f 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -1,4 +1,5 @@ use super::RootBlockTuple; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::get_block_root; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use rand::seq::IteratorRandom; @@ -6,7 +7,6 @@ use ssz_types::VariableList; use std::collections::HashSet; use store::{EthSpec, Hash256}; use strum::IntoStaticStr; -use types::signed_block_and_blobs::BlockWrapper; /// Object representing a single block lookup request. #[derive(PartialEq, Eq)] @@ -115,7 +115,7 @@ impl SingleBlockRequest { Some(block) => { // Compute the block root using this specific function so that we can get timing // metrics. - let block_root = get_block_root(block.block()); + let block_root = get_block_root(block.as_block()); if block_root != self.hash { // return an error and drop the block // NOTE: we take this is as a download failure to prevent counting the @@ -205,7 +205,7 @@ impl slog::Value for SingleBlockRequest { mod tests { use super::*; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::MinimalEthSpec as E; + use types::{MinimalEthSpec as E, SignedBeaconBlock}; fn rand_block() -> SignedBeaconBlock { let mut rng = XorShiftRng::from_seed([42; 16]); diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 46ac5bd0f..886c90397 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -1,6 +1,7 @@ +use beacon_chain::blob_verification::BlockWrapper; use std::{collections::VecDeque, sync::Arc}; -use types::{signed_block_and_blobs::BlockWrapper, BlobsSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; #[derive(Debug, Default)] pub struct BlocksAndBlobsRequestInfo { @@ -46,21 +47,20 @@ impl BlocksAndBlobsRequestInfo { .map(|sidecar| sidecar.beacon_block_slot == beacon_block.slot()) .unwrap_or(false) { - let blobs_sidecar = - accumulated_sidecars.pop_front().ok_or("missing sidecar")?; - Ok(BlockWrapper::new_with_blobs(beacon_block, blobs_sidecar)) + let blobs_sidecar = accumulated_sidecars.pop_front(); + BlockWrapper::new(beacon_block, blobs_sidecar) } else { - Ok(beacon_block.into()) + BlockWrapper::new(beacon_block, None) } }) - .collect::, _>>(); + .collect::>(); // if accumulated sidecars is not empty, throw an error. if !accumulated_sidecars.is_empty() { return Err("Received more sidecars than blocks"); } - pairs + Ok(pairs) } pub fn is_finished(&self) -> bool { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 5da203e0e..6b3a7b5de 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -42,6 +42,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEven use crate::service::NetworkMessage; use crate::status::ToStatusMessage; use crate::sync::range_sync::ByRangeRequestType; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; use futures::StreamExt; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; @@ -55,7 +56,6 @@ use std::ops::Sub; use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; -use types::signed_block_and_blobs::BlockWrapper; use types::{ BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, Slot, }; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index c54b3b1a9..2a0f2ea95 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -8,6 +8,7 @@ use crate::beacon_processor::WorkEvent; use crate::service::{NetworkMessage, RequestId}; use crate::status::ToStatusMessage; use crate::sync::block_lookups::ForceBlockRequest; +use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use fnv::FnvHashMap; use lighthouse_network::rpc::methods::BlobsByRangeRequest; @@ -17,7 +18,6 @@ use slog::{debug, trace, warn}; use std::collections::hash_map::Entry; use std::sync::Arc; use tokio::sync::mpsc; -use types::signed_block_and_blobs::BlockWrapper; use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. diff --git a/beacon_node/network/src/sync/range_sync/batch.rs b/beacon_node/network/src/sync/range_sync/batch.rs index 184dcffc4..dda22dcfa 100644 --- a/beacon_node/network/src/sync/range_sync/batch.rs +++ b/beacon_node/network/src/sync/range_sync/batch.rs @@ -1,11 +1,11 @@ use crate::sync::manager::Id; +use beacon_chain::blob_verification::{AsBlock, BlockWrapper}; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::PeerId; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::ops::Sub; use strum::Display; -use types::signed_block_and_blobs::BlockWrapper; use types::{Epoch, EthSpec, Slot}; /// The number of times to retry a batch before it is considered failed. diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d60de3224..ea78cd3c5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -3,6 +3,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEven use crate::sync::{ manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult, }; +use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::{BeaconChainTypes, CountUnrealized}; use fnv::FnvHashMap; use lighthouse_network::{PeerAction, PeerId}; @@ -10,7 +11,6 @@ use rand::seq::SliceRandom; use slog::{crit, debug, o, warn}; use std::collections::{btree_map::Entry, BTreeMap, HashSet}; use std::hash::{Hash, Hasher}; -use types::signed_block_and_blobs::BlockWrapper; use types::{Epoch, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index 09d93b0e8..e3fceef66 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -47,6 +47,7 @@ use crate::status::ToStatusMessage; use crate::sync::manager::Id; use crate::sync::network_context::SyncNetworkContext; use crate::sync::BatchProcessResult; +use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::{BeaconChain, BeaconChainTypes}; use lighthouse_network::rpc::GoodbyeReason; use lighthouse_network::PeerId; @@ -55,7 +56,6 @@ use lru_cache::LRUTimeCache; use slog::{crit, debug, trace, warn}; use std::collections::HashMap; use std::sync::Arc; -use types::signed_block_and_blobs::BlockWrapper; use types::{Epoch, EthSpec, Hash256, Slot}; /// For how long we store failed finalized chains to prevent retries. diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index 23dd989f9..4c8966f92 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -18,10 +18,8 @@ pub struct ConsensusContext { /// Cache of indexed attestations constructed during block processing. indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, - /// Whether `validate_blobs_sidecar` has successfully passed. - blobs_sidecar_validated: bool, /// Whether `verify_kzg_commitments_against_transactions` has successfully passed. - blobs_verified_vs_txs: bool, + kzg_commitments_consistent: bool, } #[derive(Debug, PartialEq, Clone)] @@ -44,8 +42,7 @@ impl ConsensusContext { proposer_index: None, current_block_root: None, indexed_attestations: HashMap::new(), - blobs_sidecar_validated: false, - blobs_verified_vs_txs: false, + kzg_commitments_consistent: false, } } @@ -162,21 +159,12 @@ impl ConsensusContext { self.indexed_attestations.len() } - pub fn set_blobs_sidecar_validated(mut self, blobs_sidecar_validated: bool) -> Self { - self.blobs_sidecar_validated = blobs_sidecar_validated; + pub fn set_kzg_commitments_consistent(mut self, kzg_commitments_consistent: bool) -> Self { + self.kzg_commitments_consistent = kzg_commitments_consistent; self } - pub fn set_blobs_verified_vs_txs(mut self, blobs_verified_vs_txs: bool) -> Self { - self.blobs_verified_vs_txs = blobs_verified_vs_txs; - self - } - - pub fn blobs_sidecar_validated(&self) -> bool { - self.blobs_sidecar_validated - } - - pub fn blobs_verified_vs_txs(&self) -> bool { - self.blobs_verified_vs_txs + pub fn kzg_commitments_consistent(&self) -> bool { + self.kzg_commitments_consistent } } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 01e7614ac..c61662090 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -180,10 +180,7 @@ pub fn per_block_processing>( )?; } - process_blob_kzg_commitments(block.body())?; - - //FIXME(sean) add `validate_blobs_sidecar` (is_data_available) and only run it if the consensus - // context tells us it wasnt already run + process_blob_kzg_commitments(block.body(), ctxt)?; Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs index b1336eb11..be4afc57d 100644 --- a/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs +++ b/consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs @@ -1,4 +1,4 @@ -use crate::BlockProcessingError; +use crate::{BlockProcessingError, ConsensusContext}; use eth2_hashing::hash_fixed; use itertools::{EitherOrBoth, Itertools}; use safe_arith::SafeArith; @@ -11,13 +11,17 @@ use types::{ pub fn process_blob_kzg_commitments>( block_body: BeaconBlockBodyRef, + ctxt: &mut ConsensusContext, ) -> Result<(), BlockProcessingError> { + // Return early if this check has already been run. + if ctxt.kzg_commitments_consistent() { + return Ok(()); + } if let (Ok(payload), Ok(kzg_commitments)) = ( block_body.execution_payload(), block_body.blob_kzg_commitments(), ) { if let Some(transactions) = payload.transactions() { - //FIXME(sean) only run if this wasn't run in gossip (use consensus context) if !verify_kzg_commitments_against_transactions::(transactions, kzg_commitments)? { return Err(BlockProcessingError::BlobVersionHashMismatch); } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index f57169c72..0df386b94 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -35,8 +35,12 @@ impl From for Hash256 { } } +#[derive(Debug)] pub enum BlobReconstructionError { - BlobsMissing, + /// No blobs for the specified block where we would expect blobs. + UnavailableBlobs, + /// Blobs provided for a pre-Eip4844 fork. + InconsistentFork, } /// A `BeaconBlock` and a signature from its proposer. @@ -244,25 +248,24 @@ impl> SignedBeaconBlock /// Reconstructs an empty `BlobsSidecar`, using the given block root if provided, else calculates it. /// If this block has kzg commitments, an error will be returned. If this block is from prior to the - /// Eip4844 fork, `None` will be returned. + /// Eip4844 fork, this will error. pub fn reconstruct_empty_blobs( &self, block_root_opt: Option, - ) -> Result>, BlobReconstructionError> { - self.message() + ) -> Result, BlobReconstructionError> { + let kzg_commitments = self + .message() .body() .blob_kzg_commitments() - .map(|kzg_commitments| { - if kzg_commitments.len() > 0 { - Err(BlobReconstructionError::BlobsMissing) - } else { - Ok(Some(BlobsSidecar::empty_from_parts( - block_root_opt.unwrap_or(self.canonical_root()), - self.slot(), - ))) - } - }) - .unwrap_or(Ok(None)) + .map_err(|_| BlobReconstructionError::InconsistentFork)?; + if kzg_commitments.is_empty() { + Ok(BlobsSidecar::empty_from_parts( + block_root_opt.unwrap_or(self.canonical_root()), + self.slot(), + )) + } else { + Err(BlobReconstructionError::UnavailableBlobs) + } } } diff --git a/consensus/types/src/signed_block_and_blobs.rs b/consensus/types/src/signed_block_and_blobs.rs index c589fbcfe..4ea0d6616 100644 --- a/consensus/types/src/signed_block_and_blobs.rs +++ b/consensus/types/src/signed_block_and_blobs.rs @@ -1,5 +1,4 @@ -use crate::signed_beacon_block::BlobReconstructionError; -use crate::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockEip4844, Slot}; +use crate::{BlobsSidecar, EthSpec, SignedBeaconBlock, SignedBeaconBlockEip4844}; use derivative::Derivative; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, DecodeError}; @@ -33,128 +32,3 @@ impl SignedBeaconBlockAndBlobsSidecar { }) } } - -/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. This newtype -/// wraps the `BlockWrapperInner` to ensure blobs cannot be accessed via an enum match. This would -/// circumvent empty blob reconstruction when accessing blobs. -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -pub struct BlockWrapper(BlockWrapperInner); - -/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq, Hash(bound = "T: EthSpec"))] -pub enum BlockWrapperInner { - Block(Arc>), - BlockAndBlob(SignedBeaconBlockAndBlobsSidecar), -} - -impl BlockWrapper { - pub fn new(block: Arc>) -> Self { - Self(BlockWrapperInner::Block(block)) - } - - pub fn new_with_blobs( - beacon_block: Arc>, - blobs_sidecar: Arc>, - ) -> Self { - Self(BlockWrapperInner::BlockAndBlob( - SignedBeaconBlockAndBlobsSidecar { - beacon_block, - blobs_sidecar, - }, - )) - } - - pub fn slot(&self) -> Slot { - match &self.0 { - BlockWrapperInner::Block(block) => block.slot(), - BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { - block_sidecar_pair.beacon_block.slot() - } - } - } - pub fn block(&self) -> &SignedBeaconBlock { - match &self.0 { - BlockWrapperInner::Block(block) => &block, - BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => &block_sidecar_pair.beacon_block, - } - } - pub fn block_cloned(&self) -> Arc> { - match &self.0 { - BlockWrapperInner::Block(block) => block.clone(), - BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { - block_sidecar_pair.beacon_block.clone() - } - } - } - - pub fn blobs( - &self, - block_root: Option, - ) -> Result>>, BlobReconstructionError> { - match &self.0 { - BlockWrapperInner::Block(block) => block - .reconstruct_empty_blobs(block_root) - .map(|blob_opt| blob_opt.map(Arc::new)), - BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { - Ok(Some(block_sidecar_pair.blobs_sidecar.clone())) - } - } - } - - pub fn message(&self) -> crate::BeaconBlockRef { - match &self.0 { - BlockWrapperInner::Block(block) => block.message(), - BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { - block_sidecar_pair.beacon_block.message() - } - } - } - - pub fn parent_root(&self) -> Hash256 { - self.block().parent_root() - } - - pub fn deconstruct( - self, - block_root: Option, - ) -> ( - Arc>, - Result>>, BlobReconstructionError>, - ) { - match self.0 { - BlockWrapperInner::Block(block) => { - let blobs = block - .reconstruct_empty_blobs(block_root) - .map(|blob_opt| blob_opt.map(Arc::new)); - (block, blobs) - } - BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => { - let SignedBeaconBlockAndBlobsSidecar { - beacon_block, - blobs_sidecar, - } = block_sidecar_pair; - (beacon_block, Ok(Some(blobs_sidecar))) - } - } - } -} - -impl From> for BlockWrapper { - fn from(block: SignedBeaconBlock) -> Self { - BlockWrapper(BlockWrapperInner::Block(Arc::new(block))) - } -} - -impl From>> for BlockWrapper { - fn from(block: Arc>) -> Self { - BlockWrapper(BlockWrapperInner::Block(block)) - } -} - -impl From> for BlockWrapper { - fn from(block: SignedBeaconBlockAndBlobsSidecar) -> Self { - BlockWrapper(BlockWrapperInner::BlockAndBlob(block)) - } -}