merge conflict resolution

This commit is contained in:
realbigsean 2023-01-25 11:10:44 +01:00
commit 5e8d79891b
25 changed files with 652 additions and 431 deletions

View File

@ -7,6 +7,7 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey};
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
use crate::beacon_proposer_cache::BeaconProposerCache;
use crate::blob_cache::BlobCache;
use crate::blob_verification::{AsBlock, AvailableBlock, BlockWrapper};
use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::{
check_block_is_finalized_descendant, check_block_relevancy, get_block_root,
@ -107,7 +108,6 @@ use tree_hash::TreeHash;
use types::beacon_state::CloneConfig;
use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
use types::consts::merge::INTERVALS_PER_SLOT;
use types::signed_block_and_blobs::BlockWrapper;
use types::*;
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
@ -2392,19 +2392,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let children = chain_segment
.iter()
.skip(1)
.map(|block| (block.block().parent_root(), block.slot()))
.map(|block| (block.parent_root(), block.slot()))
.collect::<Vec<_>>();
for (i, block) in chain_segment.into_iter().enumerate() {
// Ensure the block is the correct structure for the fork at `block.slot()`.
if let Err(e) = block.block().fork_name(&self.spec) {
if let Err(e) = block.as_block().fork_name(&self.spec) {
return Err(ChainSegmentResult::Failed {
imported_blocks,
error: BlockError::InconsistentFork(e),
});
}
let block_root = get_block_root(block.block());
let block_root = get_block_root(block.as_block());
if let Some((child_parent_root, child_slot)) = children.get(i) {
// If this block has a child in this chain segment, ensure that its parent root matches
@ -2428,7 +2428,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
}
match check_block_relevancy(block.block(), block_root, self) {
match check_block_relevancy(block.as_block(), block_root, self) {
// If the block is relevant, add it to the filtered chain segment.
Ok(_) => filtered_chain_segment.push((block_root, block)),
// If the block is already known, simply ignore this block.
@ -2803,7 +2803,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
#[allow(clippy::too_many_arguments)]
fn import_block(
&self,
signed_block: BlockWrapper<T::EthSpec>,
signed_block: AvailableBlock<T::EthSpec>,
block_root: Hash256,
mut state: BeaconState<T::EthSpec>,
confirmed_state_roots: Vec<Hash256>,
@ -2963,7 +2963,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If the write fails, revert fork choice to the version from disk, else we can
// end up with blocks in fork choice that are missing from disk.
// See https://github.com/sigp/lighthouse/issues/2028
let (signed_block, blobs) = signed_block.deconstruct(Some(block_root));
let (signed_block, blobs) = signed_block.deconstruct();
let block = signed_block.message();
let mut ops: Vec<_> = confirmed_state_roots
.into_iter()
@ -2972,7 +2972,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ops.push(StoreOp::PutBlock(block_root, signed_block.clone()));
ops.push(StoreOp::PutState(block.state_root(), &state));
if let Some(blobs) = blobs? {
if let Some(blobs) = blobs {
if blobs.blobs.len() > 0 {
//FIXME(sean) using this for debugging for now
info!(self.log, "Writing blobs to store"; "block_root" => ?block_root);
@ -4592,10 +4592,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
};
// Use a context without block root or proposer index so that both are checked.
let mut ctxt = ConsensusContext::new(block.slot())
//FIXME(sean) This is a hack beacuse `valdiate blobs sidecar requires the block root`
// which we won't have until after the state root is calculated.
.set_blobs_sidecar_validated(true);
let mut ctxt = ConsensusContext::new(block.slot());
per_block_processing(
&mut state,
@ -4617,11 +4614,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
//FIXME(sean)
// - add a new timer for processing here
if let Some(blobs) = blobs_opt {
let kzg = if let Some(kzg) = &self.kzg {
kzg
} else {
return Err(BlockProductionError::TrustedSetupNotInitialized);
};
let kzg = self
.kzg
.as_ref()
.ok_or(BlockProductionError::TrustedSetupNotInitialized)?;
let kzg_aggregated_proof =
kzg_utils::compute_aggregate_kzg_proof::<T::EthSpec>(&kzg, &blobs)
.map_err(|e| BlockProductionError::KzgError(e))?;

View File

@ -1,10 +1,17 @@
use derivative::Derivative;
use slot_clock::SlotClock;
use std::sync::Arc;
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use crate::{kzg_utils, BeaconChainError};
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
use types::signed_beacon_block::BlobReconstructionError;
use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions};
use types::{
BeaconBlockRef, BeaconStateError, BlobsSidecar, EthSpec, Hash256, KzgCommitment,
SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockHeader, Slot,
Transactions,
};
use types::{Epoch, ExecPayload};
#[derive(Debug)]
pub enum BlobError {
@ -29,30 +36,6 @@ pub enum BlobError {
block_slot: Slot,
},
/// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`.
///
///
/// ## Peer scoring
///
/// The peer has sent an invalid message.
BlobOutOfRange {
blob_index: usize,
},
/// The blob sidecar contains a KZGCommitment that is not a valid G1 point on
/// the bls curve.
///
/// ## Peer scoring
///
/// The peer has sent an invalid message.
InvalidKZGCommitment,
/// The proposal signature in invalid.
///
/// ## Peer scoring
///
/// The signature on the blob sidecar invalid and the peer is faulty.
ProposalSignatureInvalid,
/// No kzg ccommitment associated with blob sidecar.
KzgCommitmentMissing,
@ -68,17 +51,6 @@ pub enum BlobError {
KzgError(kzg::Error),
/// A blob sidecar for this proposer and slot has already been observed.
///
/// ## Peer scoring
///
/// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not
/// be equal to the given sidecar.
RepeatSidecar {
proposer: u64,
slot: Slot,
},
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
///
/// ## Peer scoring
@ -87,12 +59,17 @@ pub enum BlobError {
/// sync committee message is valid.
BeaconChainError(BeaconChainError),
/// No blobs for the specified block where we would expect blobs.
MissingBlobs,
UnavailableBlobs,
/// Blobs provided for a pre-Eip4844 fork.
InconsistentFork,
}
impl From<BlobReconstructionError> for BlobError {
fn from(_: BlobReconstructionError) -> Self {
BlobError::MissingBlobs
fn from(e: BlobReconstructionError) -> Self {
match e {
BlobReconstructionError::UnavailableBlobs => BlobError::UnavailableBlobs,
BlobReconstructionError::InconsistentFork => BlobError::InconsistentFork,
}
}
}
@ -109,6 +86,36 @@ impl From<BeaconStateError> for BlobError {
}
pub fn validate_blob_for_gossip<T: BeaconChainTypes>(
block_wrapper: BlockWrapper<T::EthSpec>,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<AvailableBlock<T::EthSpec>, BlobError> {
if let BlockWrapper::BlockAndBlob(ref block, ref blobs_sidecar) = block_wrapper {
let blob_slot = blobs_sidecar.beacon_block_slot;
// Do not gossip or process blobs from future or past slots.
let latest_permissible_slot = chain
.slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot {
return Err(BlobError::FutureSlot {
message_slot: latest_permissible_slot,
latest_permissible_slot: blob_slot,
});
}
if blob_slot != block.slot() {
return Err(BlobError::SlotMismatch {
blob_slot,
block_slot: block.slot(),
});
}
}
block_wrapper.into_available_block(block_root, chain)
}
fn verify_data_availability<T: BeaconChainTypes>(
blob_sidecar: &BlobsSidecar<T::EthSpec>,
kzg_commitments: &[KzgCommitment],
transactions: &Transactions<T::EthSpec>,
@ -116,27 +123,6 @@ pub fn validate_blob_for_gossip<T: BeaconChainTypes>(
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<(), BlobError> {
let blob_slot = blob_sidecar.beacon_block_slot;
// Do not gossip or process blobs from future or past slots.
let latest_permissible_slot = chain
.slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot {
return Err(BlobError::FutureSlot {
message_slot: latest_permissible_slot,
latest_permissible_slot: blob_slot,
});
}
if blob_slot != block_slot {
return Err(BlobError::SlotMismatch {
blob_slot,
block_slot,
});
}
// Validate commitments agains transactions in the block.
if verify_kzg_commitments_against_transactions::<T::EthSpec>(transactions, kzg_commitments)
.is_err()
{
@ -162,3 +148,416 @@ pub fn validate_blob_for_gossip<T: BeaconChainTypes>(
}
Ok(())
}
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. This makes no
/// claims about data availability and should not be used in consensus. This struct is useful in
/// networking when we want to send blocks around without consensus checks.
#[derive(Clone, Debug, Derivative)]
#[derivative(PartialEq, Hash(bound = "E: EthSpec"))]
pub enum BlockWrapper<E: EthSpec> {
Block(Arc<SignedBeaconBlock<E>>),
BlockAndBlob(Arc<SignedBeaconBlock<E>>, Arc<BlobsSidecar<E>>),
}
impl<E: EthSpec> BlockWrapper<E> {
pub fn new(
block: Arc<SignedBeaconBlock<E>>,
blobs_sidecar: Option<Arc<BlobsSidecar<E>>>,
) -> Self {
if let Some(blobs_sidecar) = blobs_sidecar {
BlockWrapper::BlockAndBlob(block, blobs_sidecar)
} else {
BlockWrapper::Block(block)
}
}
}
impl<E: EthSpec> From<SignedBeaconBlock<E>> for BlockWrapper<E> {
fn from(block: SignedBeaconBlock<E>) -> Self {
BlockWrapper::Block(Arc::new(block))
}
}
impl<E: EthSpec> From<SignedBeaconBlockAndBlobsSidecar<E>> for BlockWrapper<E> {
fn from(block: SignedBeaconBlockAndBlobsSidecar<E>) -> Self {
let SignedBeaconBlockAndBlobsSidecar {
beacon_block,
blobs_sidecar,
} = block;
BlockWrapper::BlockAndBlob(beacon_block, blobs_sidecar)
}
}
impl<E: EthSpec> From<Arc<SignedBeaconBlock<E>>> for BlockWrapper<E> {
fn from(block: Arc<SignedBeaconBlock<E>>) -> Self {
BlockWrapper::Block(block)
}
}
#[derive(Copy, Clone)]
pub enum DataAvailabilityCheckRequired {
Yes,
No,
}
pub trait IntoAvailableBlock<T: BeaconChainTypes> {
fn into_available_block(
self,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<AvailableBlock<T::EthSpec>, BlobError>;
}
impl<T: BeaconChainTypes> IntoAvailableBlock<T> for BlockWrapper<T::EthSpec> {
fn into_available_block(
self,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<AvailableBlock<T::EthSpec>, BlobError> {
let data_availability_boundary = chain.data_availability_boundary();
let da_check_required =
data_availability_boundary.map_or(DataAvailabilityCheckRequired::No, |boundary| {
if self.slot().epoch(T::EthSpec::slots_per_epoch()) >= boundary {
DataAvailabilityCheckRequired::Yes
} else {
DataAvailabilityCheckRequired::No
}
});
match self {
BlockWrapper::Block(block) => AvailableBlock::new(block, block_root, da_check_required),
BlockWrapper::BlockAndBlob(block, blobs_sidecar) => {
if matches!(da_check_required, DataAvailabilityCheckRequired::Yes) {
let kzg_commitments = block
.message()
.body()
.blob_kzg_commitments()
.map_err(|_| BlobError::KzgCommitmentMissing)?;
let transactions = block
.message()
.body()
.execution_payload_eip4844()
.map(|payload| payload.transactions())
.map_err(|_| BlobError::TransactionsMissing)?
.ok_or(BlobError::TransactionsMissing)?;
verify_data_availability(
&blobs_sidecar,
kzg_commitments,
transactions,
block.slot(),
block_root,
chain,
)?;
}
AvailableBlock::new_with_blobs(block, blobs_sidecar, da_check_required)
}
}
}
}
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. An
/// `AvailableBlock` has passed any required data availability checks and should be used in
/// consensus. This newtype wraps `AvailableBlockInner` to ensure data availability checks
/// cannot be circumvented on construction.
#[derive(Clone, Debug, Derivative)]
#[derivative(PartialEq, Hash(bound = "E: EthSpec"))]
pub struct AvailableBlock<E: EthSpec>(AvailableBlockInner<E>);
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`].
#[derive(Clone, Debug, Derivative)]
#[derivative(PartialEq, Hash(bound = "E: EthSpec"))]
enum AvailableBlockInner<E: EthSpec> {
Block(Arc<SignedBeaconBlock<E>>),
BlockAndBlob(SignedBeaconBlockAndBlobsSidecar<E>),
}
impl<E: EthSpec> AvailableBlock<E> {
pub fn new(
beacon_block: Arc<SignedBeaconBlock<E>>,
block_root: Hash256,
da_check_required: DataAvailabilityCheckRequired,
) -> Result<Self, BlobError> {
match beacon_block.as_ref() {
// No data availability check required prior to Eip4844.
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Capella(_)
| SignedBeaconBlock::Merge(_) => {
Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block)))
}
SignedBeaconBlock::Eip4844(_) => {
match da_check_required {
DataAvailabilityCheckRequired::Yes => {
// Attempt to reconstruct empty blobs here.
let blobs_sidecar = beacon_block
.reconstruct_empty_blobs(Some(block_root))
.map(Arc::new)?;
return Ok(AvailableBlock(AvailableBlockInner::BlockAndBlob(
SignedBeaconBlockAndBlobsSidecar {
beacon_block,
blobs_sidecar,
},
)));
}
DataAvailabilityCheckRequired::No => {
Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block)))
}
}
}
}
}
/// This function is private because an `AvailableBlock` should be
/// constructed via the `into_available_block` method.
fn new_with_blobs(
beacon_block: Arc<SignedBeaconBlock<E>>,
blobs_sidecar: Arc<BlobsSidecar<E>>,
da_check_required: DataAvailabilityCheckRequired,
) -> Result<Self, BlobError> {
match beacon_block.as_ref() {
// This method shouldn't be called with a pre-Eip4844 block.
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Capella(_)
| SignedBeaconBlock::Merge(_) => Err(BlobError::InconsistentFork),
SignedBeaconBlock::Eip4844(_) => {
match da_check_required {
DataAvailabilityCheckRequired::Yes => Ok(AvailableBlock(
AvailableBlockInner::BlockAndBlob(SignedBeaconBlockAndBlobsSidecar {
beacon_block,
blobs_sidecar,
}),
)),
DataAvailabilityCheckRequired::No => {
// Blobs were not verified so we drop them, we'll instead just pass around
// an available `Eip4844` block without blobs.
Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block)))
}
}
}
}
}
pub fn blobs(&self) -> Option<Arc<BlobsSidecar<E>>> {
match &self.0 {
AvailableBlockInner::Block(_) => None,
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
Some(block_sidecar_pair.blobs_sidecar.clone())
}
}
}
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<Arc<BlobsSidecar<E>>>) {
match self.0 {
AvailableBlockInner::Block(block) => (block, None),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
let SignedBeaconBlockAndBlobsSidecar {
beacon_block,
blobs_sidecar,
} = block_sidecar_pair;
(beacon_block, Some(blobs_sidecar))
}
}
}
}
pub trait IntoBlockWrapper<E: EthSpec>: AsBlock<E> {
fn into_block_wrapper(self) -> BlockWrapper<E>;
}
impl<E: EthSpec> IntoBlockWrapper<E> for BlockWrapper<E> {
fn into_block_wrapper(self) -> BlockWrapper<E> {
self
}
}
impl<E: EthSpec> IntoBlockWrapper<E> for AvailableBlock<E> {
fn into_block_wrapper(self) -> BlockWrapper<E> {
let (block, blobs) = self.deconstruct();
if let Some(blobs) = blobs {
BlockWrapper::BlockAndBlob(block, blobs)
} else {
BlockWrapper::Block(block)
}
}
}
pub trait AsBlock<E: EthSpec> {
fn slot(&self) -> Slot;
fn epoch(&self) -> Epoch;
fn parent_root(&self) -> Hash256;
fn state_root(&self) -> Hash256;
fn signed_block_header(&self) -> SignedBeaconBlockHeader;
fn message(&self) -> BeaconBlockRef<E>;
fn as_block(&self) -> &SignedBeaconBlock<E>;
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>>;
}
impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
fn slot(&self) -> Slot {
match self {
BlockWrapper::Block(block) => block.slot(),
BlockWrapper::BlockAndBlob(block, _) => block.slot(),
}
}
fn epoch(&self) -> Epoch {
match self {
BlockWrapper::Block(block) => block.epoch(),
BlockWrapper::BlockAndBlob(block, _) => block.epoch(),
}
}
fn parent_root(&self) -> Hash256 {
match self {
BlockWrapper::Block(block) => block.parent_root(),
BlockWrapper::BlockAndBlob(block, _) => block.parent_root(),
}
}
fn state_root(&self) -> Hash256 {
match self {
BlockWrapper::Block(block) => block.state_root(),
BlockWrapper::BlockAndBlob(block, _) => block.state_root(),
}
}
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
match &self {
BlockWrapper::Block(block) => block.signed_block_header(),
BlockWrapper::BlockAndBlob(block, _) => block.signed_block_header(),
}
}
fn message(&self) -> BeaconBlockRef<E> {
match &self {
BlockWrapper::Block(block) => block.message(),
BlockWrapper::BlockAndBlob(block, _) => block.message(),
}
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self {
BlockWrapper::Block(block) => &block,
BlockWrapper::BlockAndBlob(block, _) => &block,
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
match &self {
BlockWrapper::Block(block) => block.clone(),
BlockWrapper::BlockAndBlob(block, _) => block.clone(),
}
}
}
impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> {
fn slot(&self) -> Slot {
match self {
BlockWrapper::Block(block) => block.slot(),
BlockWrapper::BlockAndBlob(block, _) => block.slot(),
}
}
fn epoch(&self) -> Epoch {
match self {
BlockWrapper::Block(block) => block.epoch(),
BlockWrapper::BlockAndBlob(block, _) => block.epoch(),
}
}
fn parent_root(&self) -> Hash256 {
match self {
BlockWrapper::Block(block) => block.parent_root(),
BlockWrapper::BlockAndBlob(block, _) => block.parent_root(),
}
}
fn state_root(&self) -> Hash256 {
match self {
BlockWrapper::Block(block) => block.state_root(),
BlockWrapper::BlockAndBlob(block, _) => block.state_root(),
}
}
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
match &self {
BlockWrapper::Block(block) => block.signed_block_header(),
BlockWrapper::BlockAndBlob(block, _) => block.signed_block_header(),
}
}
fn message(&self) -> BeaconBlockRef<E> {
match &self {
BlockWrapper::Block(block) => block.message(),
BlockWrapper::BlockAndBlob(block, _) => block.message(),
}
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self {
BlockWrapper::Block(block) => &block,
BlockWrapper::BlockAndBlob(block, _) => &block,
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
match &self {
BlockWrapper::Block(block) => block.clone(),
BlockWrapper::BlockAndBlob(block, _) => block.clone(),
}
}
}
impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
fn slot(&self) -> Slot {
match &self.0 {
AvailableBlockInner::Block(block) => block.slot(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.slot()
}
}
}
fn epoch(&self) -> Epoch {
match &self.0 {
AvailableBlockInner::Block(block) => block.epoch(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.epoch()
}
}
}
fn parent_root(&self) -> Hash256 {
match &self.0 {
AvailableBlockInner::Block(block) => block.parent_root(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.parent_root()
}
}
}
fn state_root(&self) -> Hash256 {
match &self.0 {
AvailableBlockInner::Block(block) => block.state_root(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.state_root()
}
}
}
fn signed_block_header(&self) -> SignedBeaconBlockHeader {
match &self.0 {
AvailableBlockInner::Block(block) => block.signed_block_header(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.signed_block_header()
}
}
}
fn message(&self) -> BeaconBlockRef<E> {
match &self.0 {
AvailableBlockInner::Block(block) => block.message(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.message()
}
}
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self.0 {
AvailableBlockInner::Block(block) => &block,
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
&block_sidecar_pair.beacon_block
}
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
match &self.0 {
AvailableBlockInner::Block(block) => block.clone(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.clone()
}
}
}
}

View File

@ -23,6 +23,10 @@
//! |
//! ▼
//! SignedBeaconBlock
//! |
//! ▼
//! AvailableBlock
//! |
//! |---------------
//! | |
//! | ▼
@ -42,17 +46,18 @@
//! END
//!
//! ```
use crate::blob_verification::{validate_blob_for_gossip, BlobError};
use crate::blob_verification::{
validate_blob_for_gossip, AsBlock, AvailableBlock, BlobError, BlockWrapper, IntoAvailableBlock,
IntoBlockWrapper,
};
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::execution_payload::{
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
};
use crate::kzg_utils;
use crate::snapshot_cache::PreProcessingSnapshot;
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::BlockError::BlobValidation;
use crate::{
beacon_chain::{
BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
@ -70,7 +75,6 @@ use safe_arith::ArithError;
use slog::{debug, error, warn, Logger};
use slot_clock::SlotClock;
use ssz::Encode;
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
use state_processing::{
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
@ -88,7 +92,6 @@ use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
use task_executor::JoinHandle;
use tree_hash::TreeHash;
use types::signed_beacon_block::BlobReconstructionError;
use types::signed_block_and_blobs::BlockWrapper;
use types::ExecPayload;
use types::{
BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch,
@ -308,6 +311,12 @@ pub enum BlockError<T: EthSpec> {
BlobValidation(BlobError),
}
impl<T: EthSpec> From<BlobError> for BlockError<T> {
fn from(e: BlobError) -> Self {
Self::BlobValidation(e)
}
}
/// Returned when block validation failed due to some issue verifying
/// the execution payload.
#[derive(Debug)]
@ -589,12 +598,16 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
let mut consensus_context =
ConsensusContext::new(block.slot()).set_current_block_root(*block_root);
signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?;
signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?;
//FIXME(sean) batch kzg verification
let available_block = block.clone().into_available_block(*block_root, chain)?;
consensus_context = consensus_context.set_kzg_commitments_consistent(true);
// Save the block and its consensus context. The context will have had its proposer index
// and attesting indices filled in, which can be used to accelerate later block processing.
signature_verified_blocks.push(SignatureVerifiedBlock {
block: block.clone(),
block: available_block,
block_root: *block_root,
parent: None,
consensus_context,
@ -619,7 +632,7 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
#[derive(Derivative)]
#[derivative(Debug(bound = "T: BeaconChainTypes"))]
pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
pub block: BlockWrapper<T::EthSpec>,
pub block: AvailableBlock<T::EthSpec>,
pub block_root: Hash256,
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
consensus_context: ConsensusContext<T::EthSpec>,
@ -628,7 +641,7 @@ pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
/// signatures) have been verified.
pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
block: BlockWrapper<T::EthSpec>,
block: AvailableBlock<T::EthSpec>,
block_root: Hash256,
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
consensus_context: ConsensusContext<T::EthSpec>,
@ -651,7 +664,7 @@ type PayloadVerificationHandle<E> =
/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the
/// `BeaconChain` immediately after it is instantiated.
pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
pub block: BlockWrapper<T::EthSpec>,
pub block: AvailableBlock<T::EthSpec>,
pub block_root: Hash256,
pub state: BeaconState<T::EthSpec>,
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
@ -675,8 +688,7 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
.map(|execution_pending| {
// Supply valid block to slasher.
if let Some(slasher) = chain.slasher.as_ref() {
slasher
.accept_block_header(execution_pending.block.block().signed_block_header());
slasher.accept_block_header(execution_pending.block.signed_block_header());
}
execution_pending
})
@ -707,7 +719,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// we assume it will be transformed into a fully verified block. We *do* need to supply
// it to the slasher if an error occurs, because that's the end of this block's journey,
// and it could be a repeat proposal (a likely cause for slashing!).
let header = block.block().signed_block_header();
let header = block.signed_block_header();
Self::new_without_slasher_checks(block, chain).map_err(|e| {
process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e))
})
@ -720,7 +732,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
) -> Result<Self, BlockError<T::EthSpec>> {
// Ensure the block is the correct structure for the fork at `block.slot()`.
block
.block()
.as_block()
.fork_name(&chain.spec)
.map_err(BlockError::InconsistentFork)?;
@ -736,7 +748,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
});
}
let block_root = get_block_root(block.block());
let block_root = get_block_root(block.as_block());
// Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any.
check_block_against_anchor_slot(block.message(), chain)?;
@ -872,7 +884,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
let pubkey = pubkey_cache
.get(block.message().proposer_index() as usize)
.ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?;
block.block().verify_signature(
block.as_block().verify_signature(
Some(block_root),
pubkey,
&fork,
@ -912,39 +924,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// Validate the block's execution_payload (if any).
validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?;
if let Some(blobs_sidecar) = block.blobs(Some(block_root))? {
let kzg_commitments = block
.message()
.body()
.blob_kzg_commitments()
.map_err(|_| BlockError::BlobValidation(BlobError::KzgCommitmentMissing))?;
let transactions = block
.message()
.body()
.execution_payload_eip4844()
.map(|payload| payload.transactions())
.map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))?
.ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?;
validate_blob_for_gossip(
&blobs_sidecar,
kzg_commitments,
transactions,
block.slot(),
block_root,
chain,
)
.map_err(BlobValidation)?;
}
let available_block = validate_blob_for_gossip(block, block_root, chain)?;
// Having checked the proposer index and the block root we can cache them.
let consensus_context = ConsensusContext::new(block.slot())
let consensus_context = ConsensusContext::new(available_block.slot())
.set_current_block_root(block_root)
.set_proposer_index(block.message().proposer_index())
.set_blobs_sidecar_validated(true) // Validated in `validate_blob_for_gossip`
.set_blobs_verified_vs_txs(true);
.set_proposer_index(available_block.as_block().message().proposer_index())
.set_kzg_commitments_consistent(true);
Ok(Self {
block,
block: available_block,
block_root,
parent,
consensus_context,
@ -974,7 +963,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
}
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.block.block()
self.block.as_block()
}
}
@ -984,13 +973,13 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
///
/// Returns an error if the block is invalid, or if the block was unable to be verified.
pub fn new(
block: BlockWrapper<T::EthSpec>,
block: AvailableBlock<T::EthSpec>,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<Self, BlockError<T::EthSpec>> {
// Ensure the block is the correct structure for the fork at `block.slot()`.
block
.block()
.as_block()
.fork_name(&chain.spec)
.map_err(BlockError::InconsistentFork)?;
@ -1013,10 +1002,12 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
let mut consensus_context =
ConsensusContext::new(block.slot()).set_current_block_root(block_root);
let mut consensus_context = ConsensusContext::new(block.slot())
.set_current_block_root(block_root)
// An `AvailabileBlock is passed in here, so we know this check has been run.`
.set_kzg_commitments_consistent(true);
signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?;
signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?;
if signature_verifier.verify().is_ok() {
Ok(Self {
@ -1032,11 +1023,11 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
/// As for `new` above but producing `BlockSlashInfo`.
pub fn check_slashable(
block: BlockWrapper<T::EthSpec>,
block: AvailableBlock<T::EthSpec>,
block_root: Hash256,
chain: &BeaconChain<T>,
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = block.block().signed_block_header();
let header = block.signed_block_header();
Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e))
}
@ -1067,7 +1058,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
// signature.
let mut consensus_context = from.consensus_context;
signature_verifier
.include_all_signatures_except_proposal(block.block(), &mut consensus_context)?;
.include_all_signatures_except_proposal(block.as_block(), &mut consensus_context)?;
if signature_verifier.verify().is_ok() {
Ok(Self {
@ -1086,7 +1077,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
from: GossipVerifiedBlock<T>,
chain: &BeaconChain<T>,
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = from.block.block().signed_block_header();
let header = from.block.signed_block_header();
Self::from_gossip_verified_block(from, chain)
.map_err(|e| BlockSlashInfo::from_early_error(header, e))
}
@ -1104,7 +1095,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
chain: &Arc<BeaconChain<T>>,
notify_execution_layer: NotifyExecutionLayer,
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = self.block.block().signed_block_header();
let header = self.block.signed_block_header();
let (parent, block) = if let Some(parent) = self.parent {
(parent, self.block)
} else {
@ -1124,7 +1115,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
}
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
&self.block.block()
&self.block.as_block()
}
}
@ -1141,7 +1132,12 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
let block_root = check_block_relevancy(&self, block_root, chain)
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
SignatureVerifiedBlock::check_slashable(self.into(), block_root, chain)?
let header = self.signed_block_header();
let available_block = BlockWrapper::from(self)
.into_available_block(block_root, chain)
.map_err(|e| BlockSlashInfo::from_early_error(header, BlockError::BlobValidation(e)))?;
SignatureVerifiedBlock::check_slashable(available_block, block_root, chain)?
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
}
@ -1150,7 +1146,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
}
}
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for BlockWrapper<T::EthSpec> {
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for AvailableBlock<T::EthSpec> {
/// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock`
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.
fn into_execution_pending_block_slashable(
@ -1160,16 +1156,15 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for BlockWrapper<T::EthSp
notify_execution_layer: NotifyExecutionLayer,
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
// Perform an early check to prevent wasting time on irrelevant blocks.
let block_root = check_block_relevancy(self.block(), block_root, chain).map_err(|e| {
BlockSlashInfo::SignatureNotChecked(self.block().signed_block_header(), e)
})?;
let block_root = check_block_relevancy(self.as_block(), block_root, chain)
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
}
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.block()
self.as_block()
}
}
@ -1182,7 +1177,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
///
/// Returns an error if the block is invalid, or if the block was unable to be verified.
pub fn from_signature_verified_components(
block: BlockWrapper<T::EthSpec>,
block: AvailableBlock<T::EthSpec>,
block_root: Hash256,
parent: PreProcessingSnapshot<T::EthSpec>,
mut consensus_context: ConsensusContext<T::EthSpec>,
@ -1212,7 +1207,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
// because it will revert finalization. Note that the finalized block is stored in fork
// choice, so we will not reject any child of the finalized block (this is relevant during
// genesis).
return Err(BlockError::ParentUnknown(block));
return Err(BlockError::ParentUnknown(block.into_block_wrapper()));
}
// Reject any block that exceeds our limit on skipped slots.
@ -1222,7 +1217,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
* Perform cursory checks to see if the block is even worth processing.
*/
check_block_relevancy(block.block(), block_root, chain)?;
check_block_relevancy(block.as_block(), block_root, chain)?;
// Define a future that will verify the execution payload with an execution engine.
//
@ -1471,13 +1466,13 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
&state,
&chain.log,
);
write_block(block.block(), block_root, &chain.log);
write_block(block.as_block(), block_root, &chain.log);
let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE);
if let Err(err) = per_block_processing(
&mut state,
block.block(),
block.as_block(),
// Signatures were verified earlier in this function.
BlockSignatureStrategy::NoVerification,
VerifyBlockRoot::True,
@ -1514,9 +1509,9 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
* Check to ensure the state root on the block matches the one we have calculated.
*/
if block.block().state_root() != state_root {
if block.state_root() != state_root {
return Err(BlockError::StateRootMismatch {
block: block.block().state_root(),
block: block.state_root(),
local: state_root,
});
}
@ -1559,58 +1554,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
}
drop(fork_choice);
/*
* Verify kzg proofs and kzg commitments against transactions if required
*/
//FIXME(sean) should this be prior to applying attestions to fork choice above? done in parallel?
if let Some(data_availability_boundary) = chain.data_availability_boundary() {
if block_slot.epoch(T::EthSpec::slots_per_epoch()) >= data_availability_boundary {
let sidecar = block
.blobs(Some(block_root))?
.ok_or(BlockError::BlobValidation(BlobError::MissingBlobs))?;
let kzg = chain.kzg.as_ref().ok_or(BlockError::BlobValidation(
BlobError::TrustedSetupNotInitialized,
))?;
let transactions = block
.message()
.body()
.execution_payload_eip4844()
.map(|payload| payload.transactions())
.map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))?
.ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?;
let kzg_commitments = block
.message()
.body()
.blob_kzg_commitments()
.map_err(|_| BlockError::BlobValidation(BlobError::KzgCommitmentMissing))?;
if !consensus_context.blobs_sidecar_validated() {
if !kzg_utils::validate_blobs_sidecar(
&kzg,
block.slot(),
block_root,
kzg_commitments,
&sidecar,
)
.map_err(|e| BlockError::BlobValidation(BlobError::KzgError(e)))?
{
return Err(BlockError::BlobValidation(BlobError::InvalidKzgProof));
}
}
if !consensus_context.blobs_verified_vs_txs()
&& verify_kzg_commitments_against_transactions::<T::EthSpec>(
transactions,
kzg_commitments,
)
//FIXME(sean) we should maybe just map this error so we have more info about the mismatch
.is_err()
{
return Err(BlockError::BlobValidation(
BlobError::TransactionCommitmentMismatch,
));
}
}
}
Ok(Self {
block,
block_root,
@ -1697,11 +1640,11 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>(
/// ## Warning
///
/// Taking a lock on the `chain.canonical_head.fork_choice` might cause a deadlock here.
pub fn check_block_is_finalized_descendant<T: BeaconChainTypes>(
pub fn check_block_is_finalized_descendant<T: BeaconChainTypes, B: IntoBlockWrapper<T::EthSpec>>(
chain: &BeaconChain<T>,
fork_choice: &BeaconForkChoice<T>,
block: BlockWrapper<T::EthSpec>,
) -> Result<BlockWrapper<T::EthSpec>, BlockError<T::EthSpec>> {
block: B,
) -> Result<B, BlockError<T::EthSpec>> {
if fork_choice.is_descendant_of_finalized(block.parent_root()) {
Ok(block)
} else {
@ -1722,7 +1665,7 @@ pub fn check_block_is_finalized_descendant<T: BeaconChainTypes>(
block_parent_root: block.parent_root(),
})
} else {
Err(BlockError::ParentUnknown(block))
Err(BlockError::ParentUnknown(block.into_block_wrapper()))
}
}
}
@ -1799,7 +1742,7 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
if let Some(proto_block) = chain
.canonical_head
.fork_choice_read_lock()
.get_block(&block.message().parent_root())
.get_block(&block.parent_root())
{
Ok((proto_block, block))
} else {
@ -1812,11 +1755,11 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
/// Returns `Err(BlockError::ParentUnknown)` if the parent is not found, or if an error occurs
/// whilst attempting the operation.
#[allow(clippy::type_complexity)]
fn load_parent<T: BeaconChainTypes>(
fn load_parent<T: BeaconChainTypes, B: IntoBlockWrapper<T::EthSpec>>(
block_root: Hash256,
block: BlockWrapper<T::EthSpec>,
block: B,
chain: &BeaconChain<T>,
) -> Result<(PreProcessingSnapshot<T::EthSpec>, BlockWrapper<T::EthSpec>), BlockError<T::EthSpec>> {
) -> Result<(PreProcessingSnapshot<T::EthSpec>, B), BlockError<T::EthSpec>> {
let spec = &chain.spec;
// Reject any block if its parent is not known to fork choice.
@ -1834,7 +1777,7 @@ fn load_parent<T: BeaconChainTypes>(
.fork_choice_read_lock()
.contains_block(&block.parent_root())
{
return Err(BlockError::ParentUnknown(block));
return Err(BlockError::ParentUnknown(block.into_block_wrapper()));
}
let block_delay = chain

View File

@ -1,3 +1,4 @@
use crate::blob_verification::AvailableBlock;
use crate::{
attester_cache::{CommitteeLengths, Error},
metrics,
@ -5,7 +6,6 @@ use crate::{
use parking_lot::RwLock;
use proto_array::Block as ProtoBlock;
use std::sync::Arc;
use store::signed_block_and_blobs::BlockWrapper;
use types::*;
pub struct CacheItem<E: EthSpec> {
@ -51,7 +51,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
pub fn add_head_block(
&self,
beacon_block_root: Hash256,
block: BlockWrapper<E>,
block: AvailableBlock<E>,
proto_block: ProtoBlock,
state: &BeaconState<E>,
spec: &ChainSpec,
@ -69,7 +69,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
},
};
let (block, blobs) = block.deconstruct(Some(beacon_block_root));
let (block, blobs) = block.deconstruct();
let item = CacheItem {
epoch,
committee_lengths,
@ -77,7 +77,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
source,
target,
block,
blobs: blobs.map_err(|_| Error::MissingBlobs)?,
blobs,
proto_block,
};

View File

@ -1,4 +1,5 @@
use crate::metrics;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper, IntoAvailableBlock};
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
use beacon_chain::NotifyExecutionLayer;
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
@ -9,7 +10,6 @@ use slot_clock::SlotClock;
use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender;
use tree_hash::TreeHash;
use types::signed_block_and_blobs::BlockWrapper;
use types::{
AbstractExecPayload, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, FullPayload,
Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
@ -56,14 +56,27 @@ pub async fn publish_block<T: BeaconChainTypes>(
};
// Determine the delay after the start of the slot, register it with metrics.
let block = wrapped_block.block();
let block = wrapped_block.as_block();
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay);
let available_block = match wrapped_block.into_available_block(block_root, &chain) {
Ok(available_block) => available_block,
Err(e) => {
let msg = format!("{:?}", e);
error!(
log,
"Invalid block provided to HTTP API";
"reason" => &msg
);
return Err(warp_utils::reject::broadcast_without_import(msg));
}
};
match chain
.process_block(
block_root,
wrapped_block.clone(),
available_block.clone(),
CountUnrealized::True,
NotifyExecutionLayer::Yes,
)
@ -75,14 +88,14 @@ pub async fn publish_block<T: BeaconChainTypes>(
"Valid block from HTTP API";
"block_delay" => ?delay,
"root" => format!("{}", root),
"proposer_index" => block.message().proposer_index(),
"slot" => block.slot(),
"proposer_index" => available_block.message().proposer_index(),
"slot" => available_block.slot(),
);
// Notify the validator monitor.
chain.validator_monitor.read().register_api_block(
seen_timestamp,
block.message(),
available_block.message(),
root,
&chain.slot_clock,
);
@ -104,7 +117,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
"Block was broadcast too late";
"msg" => "system may be overloaded, block likely to be orphaned",
"delay_ms" => delay.as_millis(),
"slot" => block.slot(),
"slot" => available_block.slot(),
"root" => ?root,
)
} else if delay >= delayed_threshold {
@ -113,7 +126,7 @@ pub async fn publish_block<T: BeaconChainTypes>(
"Block broadcast was delayed";
"msg" => "system may be overloaded, block may be orphaned",
"delay_ms" => delay.as_millis(),
"slot" => block.slot(),
"slot" => available_block.slot(),
"root" => ?root,
)
}
@ -124,8 +137,8 @@ pub async fn publish_block<T: BeaconChainTypes>(
info!(
log,
"Block from HTTP API already known";
"block" => ?block.canonical_root(),
"slot" => block.slot(),
"block" => ?block_root,
"slot" => available_block.slot(),
);
Ok(())
}

View File

@ -267,9 +267,15 @@ impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlobsByRoot, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlobsByRange, Version::V1, Encoding::SSZSnappy),
];
if let ForkName::Eip4844 = self.fork_context.current_fork() {
supported_protocols.extend_from_slice(&[
ProtocolId::new(Protocol::BlobsByRoot, Version::V1, Encoding::SSZSnappy),
ProtocolId::new(Protocol::BlobsByRange, Version::V1, Encoding::SSZSnappy),
]);
}
if self.enable_light_client_server {
supported_protocols.push(ProtocolId::new(
Protocol::LightClientBootstrap,

View File

@ -40,6 +40,7 @@
use crate::sync::manager::BlockProcessType;
use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::parking_lot::Mutex;
use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer};
use derivative::Derivative;
@ -62,7 +63,6 @@ use std::time::Duration;
use std::{cmp, collections::HashSet};
use task_executor::TaskExecutor;
use tokio::sync::mpsc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{
Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate,
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,

View File

@ -13,6 +13,7 @@
use super::MAX_SCHEDULED_WORK_QUEUE_LEN;
use crate::metrics;
use crate::sync::manager::BlockProcessType;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use fnv::FnvHashMap;
use futures::task::Poll;
@ -29,7 +30,6 @@ use task_executor::TaskExecutor;
use tokio::sync::mpsc::{self, Receiver, Sender};
use tokio::time::error::Error as TimeError;
use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey};
use types::signed_block_and_blobs::BlockWrapper;
use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SubnetId};
const TASK_NAME: &str = "beacon_processor_reprocess_queue";

View File

@ -1,5 +1,6 @@
use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::store::Error;
use beacon_chain::{
attestation_verification::{self, Error as AttnError, VerifiedAttestation},
@ -18,7 +19,6 @@ use ssz::Encode;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::hot_cold_store::HotColdDBError;
use tokio::sync::mpsc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{
Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof,
@ -726,7 +726,7 @@ impl<T: BeaconChainTypes> Worker<T> {
let block_root = if let Ok(verified_block) = &verification_result {
verified_block.block_root
} else {
block.block().canonical_root()
block.as_block().canonical_root()
};
// Write the time the block was observed into delay cache.

View File

@ -7,6 +7,7 @@ use crate::beacon_processor::DuplicateCache;
use crate::metrics;
use crate::sync::manager::{BlockProcessType, SyncMessage};
use crate::sync::{BatchProcessResult, ChainId};
use beacon_chain::blob_verification::{AsBlock, BlockWrapper, IntoAvailableBlock};
use beacon_chain::CountUnrealized;
use beacon_chain::{
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
@ -16,7 +17,6 @@ use lighthouse_network::PeerAction;
use slog::{debug, error, info, warn};
use std::sync::Arc;
use tokio::sync::mpsc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{Epoch, Hash256, SignedBeaconBlock};
/// Id associated to a batch processing request, either a sync batch or a parent lookup.
@ -85,15 +85,23 @@ impl<T: BeaconChainTypes> Worker<T> {
}
};
let slot = block.slot();
let result = self
.chain
.process_block(
block_root,
block,
CountUnrealized::True,
NotifyExecutionLayer::Yes,
)
.await;
let available_block = block
.into_available_block(block_root, &self.chain)
.map_err(BlockError::BlobValidation);
let result = match available_block {
Ok(block) => {
self.chain
.process_block(
block_root,
block,
CountUnrealized::True,
NotifyExecutionLayer::Yes,
)
.await
}
Err(e) => Err(e),
};
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);

View File

@ -14,6 +14,7 @@ use crate::sync::network_context::SyncNetworkContext;
use crate::sync::range_sync::{
BatchConfig, BatchId, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState,
};
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use lighthouse_network::types::{BackFillState, NetworkGlobals};
use lighthouse_network::{PeerAction, PeerId};
@ -24,7 +25,6 @@ use std::collections::{
HashMap, HashSet,
};
use std::sync::Arc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{Epoch, EthSpec};
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of

View File

@ -2,6 +2,7 @@ use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::time::Duration;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::{BeaconChainTypes, BlockError};
use fnv::FnvHashMap;
use lighthouse_network::rpc::{RPCError, RPCResponseErrorCode};
@ -10,7 +11,6 @@ use lru_cache::LRUTimeCache;
use slog::{debug, error, trace, warn, Logger};
use smallvec::SmallVec;
use store::Hash256;
use types::signed_block_and_blobs::BlockWrapper;
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
use crate::metrics;

View File

@ -1,9 +1,9 @@
use super::RootBlockTuple;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::BeaconChainTypes;
use lighthouse_network::PeerId;
use store::Hash256;
use strum::IntoStaticStr;
use types::signed_block_and_blobs::BlockWrapper;
use crate::sync::block_lookups::ForceBlockRequest;
use crate::sync::{

View File

@ -1,4 +1,5 @@
use super::RootBlockTuple;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::get_block_root;
use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
use rand::seq::IteratorRandom;
@ -6,7 +7,6 @@ use ssz_types::VariableList;
use std::collections::HashSet;
use store::{EthSpec, Hash256};
use strum::IntoStaticStr;
use types::signed_block_and_blobs::BlockWrapper;
/// Object representing a single block lookup request.
#[derive(PartialEq, Eq)]
@ -115,7 +115,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
Some(block) => {
// Compute the block root using this specific function so that we can get timing
// metrics.
let block_root = get_block_root(block.block());
let block_root = get_block_root(block.as_block());
if block_root != self.hash {
// return an error and drop the block
// NOTE: we take this is as a download failure to prevent counting the
@ -205,7 +205,7 @@ impl<const MAX_ATTEMPTS: u8> slog::Value for SingleBlockRequest<MAX_ATTEMPTS> {
mod tests {
use super::*;
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use types::MinimalEthSpec as E;
use types::{MinimalEthSpec as E, SignedBeaconBlock};
fn rand_block() -> SignedBeaconBlock<E> {
let mut rng = XorShiftRng::from_seed([42; 16]);

View File

@ -1,6 +1,7 @@
use beacon_chain::blob_verification::BlockWrapper;
use std::{collections::VecDeque, sync::Arc};
use types::{signed_block_and_blobs::BlockWrapper, BlobsSidecar, EthSpec, SignedBeaconBlock};
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
#[derive(Debug, Default)]
pub struct BlocksAndBlobsRequestInfo<T: EthSpec> {
@ -46,21 +47,20 @@ impl<T: EthSpec> BlocksAndBlobsRequestInfo<T> {
.map(|sidecar| sidecar.beacon_block_slot == beacon_block.slot())
.unwrap_or(false)
{
let blobs_sidecar =
accumulated_sidecars.pop_front().ok_or("missing sidecar")?;
Ok(BlockWrapper::new_with_blobs(beacon_block, blobs_sidecar))
let blobs_sidecar = accumulated_sidecars.pop_front();
BlockWrapper::new(beacon_block, blobs_sidecar)
} else {
Ok(beacon_block.into())
BlockWrapper::new(beacon_block, None)
}
})
.collect::<Result<Vec<_>, _>>();
.collect::<Vec<_>>();
// if accumulated sidecars is not empty, throw an error.
if !accumulated_sidecars.is_empty() {
return Err("Received more sidecars than blocks");
}
pairs
Ok(pairs)
}
pub fn is_finished(&self) -> bool {

View File

@ -42,6 +42,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEven
use crate::service::NetworkMessage;
use crate::status::ToStatusMessage;
use crate::sync::range_sync::ByRangeRequestType;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState};
use futures::StreamExt;
use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
@ -55,7 +56,6 @@ use std::ops::Sub;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{
BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, Slot,
};

View File

@ -8,6 +8,7 @@ use crate::beacon_processor::WorkEvent;
use crate::service::{NetworkMessage, RequestId};
use crate::status::ToStatusMessage;
use crate::sync::block_lookups::ForceBlockRequest;
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState};
use fnv::FnvHashMap;
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
@ -17,7 +18,6 @@ use slog::{debug, trace, warn};
use std::collections::hash_map::Entry;
use std::sync::Arc;
use tokio::sync::mpsc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
/// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id.

View File

@ -1,11 +1,11 @@
use crate::sync::manager::Id;
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use lighthouse_network::rpc::methods::BlocksByRangeRequest;
use lighthouse_network::PeerId;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::Sub;
use strum::Display;
use types::signed_block_and_blobs::BlockWrapper;
use types::{Epoch, EthSpec, Slot};
/// The number of times to retry a batch before it is considered failed.

View File

@ -3,6 +3,7 @@ use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEven
use crate::sync::{
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
};
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChainTypes, CountUnrealized};
use fnv::FnvHashMap;
use lighthouse_network::{PeerAction, PeerId};
@ -10,7 +11,6 @@ use rand::seq::SliceRandom;
use slog::{crit, debug, o, warn};
use std::collections::{btree_map::Entry, BTreeMap, HashSet};
use std::hash::{Hash, Hasher};
use types::signed_block_and_blobs::BlockWrapper;
use types::{Epoch, EthSpec, Hash256, Slot};
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of

View File

@ -47,6 +47,7 @@ use crate::status::ToStatusMessage;
use crate::sync::manager::Id;
use crate::sync::network_context::SyncNetworkContext;
use crate::sync::BatchProcessResult;
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use lighthouse_network::rpc::GoodbyeReason;
use lighthouse_network::PeerId;
@ -55,7 +56,6 @@ use lru_cache::LRUTimeCache;
use slog::{crit, debug, trace, warn};
use std::collections::HashMap;
use std::sync::Arc;
use types::signed_block_and_blobs::BlockWrapper;
use types::{Epoch, EthSpec, Hash256, Slot};
/// For how long we store failed finalized chains to prevent retries.

View File

@ -18,10 +18,8 @@ pub struct ConsensusContext<T: EthSpec> {
/// Cache of indexed attestations constructed during block processing.
indexed_attestations:
HashMap<(AttestationData, BitList<T::MaxValidatorsPerCommittee>), IndexedAttestation<T>>,
/// Whether `validate_blobs_sidecar` has successfully passed.
blobs_sidecar_validated: bool,
/// Whether `verify_kzg_commitments_against_transactions` has successfully passed.
blobs_verified_vs_txs: bool,
kzg_commitments_consistent: bool,
}
#[derive(Debug, PartialEq, Clone)]
@ -44,8 +42,7 @@ impl<T: EthSpec> ConsensusContext<T> {
proposer_index: None,
current_block_root: None,
indexed_attestations: HashMap::new(),
blobs_sidecar_validated: false,
blobs_verified_vs_txs: false,
kzg_commitments_consistent: false,
}
}
@ -162,21 +159,12 @@ impl<T: EthSpec> ConsensusContext<T> {
self.indexed_attestations.len()
}
pub fn set_blobs_sidecar_validated(mut self, blobs_sidecar_validated: bool) -> Self {
self.blobs_sidecar_validated = blobs_sidecar_validated;
pub fn set_kzg_commitments_consistent(mut self, kzg_commitments_consistent: bool) -> Self {
self.kzg_commitments_consistent = kzg_commitments_consistent;
self
}
pub fn set_blobs_verified_vs_txs(mut self, blobs_verified_vs_txs: bool) -> Self {
self.blobs_verified_vs_txs = blobs_verified_vs_txs;
self
}
pub fn blobs_sidecar_validated(&self) -> bool {
self.blobs_sidecar_validated
}
pub fn blobs_verified_vs_txs(&self) -> bool {
self.blobs_verified_vs_txs
pub fn kzg_commitments_consistent(&self) -> bool {
self.kzg_commitments_consistent
}
}

View File

@ -180,10 +180,7 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
)?;
}
process_blob_kzg_commitments(block.body())?;
//FIXME(sean) add `validate_blobs_sidecar` (is_data_available) and only run it if the consensus
// context tells us it wasnt already run
process_blob_kzg_commitments(block.body(), ctxt)?;
Ok(())
}

View File

@ -1,4 +1,4 @@
use crate::BlockProcessingError;
use crate::{BlockProcessingError, ConsensusContext};
use eth2_hashing::hash_fixed;
use itertools::{EitherOrBoth, Itertools};
use safe_arith::SafeArith;
@ -11,13 +11,17 @@ use types::{
pub fn process_blob_kzg_commitments<T: EthSpec, Payload: AbstractExecPayload<T>>(
block_body: BeaconBlockBodyRef<T, Payload>,
ctxt: &mut ConsensusContext<T>,
) -> Result<(), BlockProcessingError> {
// Return early if this check has already been run.
if ctxt.kzg_commitments_consistent() {
return Ok(());
}
if let (Ok(payload), Ok(kzg_commitments)) = (
block_body.execution_payload(),
block_body.blob_kzg_commitments(),
) {
if let Some(transactions) = payload.transactions() {
//FIXME(sean) only run if this wasn't run in gossip (use consensus context)
if !verify_kzg_commitments_against_transactions::<T>(transactions, kzg_commitments)? {
return Err(BlockProcessingError::BlobVersionHashMismatch);
}

View File

@ -35,8 +35,12 @@ impl From<SignedBeaconBlockHash> for Hash256 {
}
}
#[derive(Debug)]
pub enum BlobReconstructionError {
BlobsMissing,
/// No blobs for the specified block where we would expect blobs.
UnavailableBlobs,
/// Blobs provided for a pre-Eip4844 fork.
InconsistentFork,
}
/// A `BeaconBlock` and a signature from its proposer.
@ -249,25 +253,24 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
/// Reconstructs an empty `BlobsSidecar`, using the given block root if provided, else calculates it.
/// If this block has kzg commitments, an error will be returned. If this block is from prior to the
/// Eip4844 fork, `None` will be returned.
/// Eip4844 fork, this will error.
pub fn reconstruct_empty_blobs(
&self,
block_root_opt: Option<Hash256>,
) -> Result<Option<BlobsSidecar<E>>, BlobReconstructionError> {
self.message()
) -> Result<BlobsSidecar<E>, BlobReconstructionError> {
let kzg_commitments = self
.message()
.body()
.blob_kzg_commitments()
.map(|kzg_commitments| {
if kzg_commitments.len() > 0 {
Err(BlobReconstructionError::BlobsMissing)
} else {
Ok(Some(BlobsSidecar::empty_from_parts(
block_root_opt.unwrap_or(self.canonical_root()),
self.slot(),
)))
}
})
.unwrap_or(Ok(None))
.map_err(|_| BlobReconstructionError::InconsistentFork)?;
if kzg_commitments.is_empty() {
Ok(BlobsSidecar::empty_from_parts(
block_root_opt.unwrap_or(self.canonical_root()),
self.slot(),
))
} else {
Err(BlobReconstructionError::UnavailableBlobs)
}
}
}

View File

@ -1,7 +1,4 @@
use crate::signed_beacon_block::BlobReconstructionError;
use crate::{
BlobsSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockEip4844, Slot,
};
use crate::{BlobsSidecar, EthSpec, SignedBeaconBlock, SignedBeaconBlockEip4844};
use derivative::Derivative;
use serde_derive::{Deserialize, Serialize};
use ssz::{Decode, DecodeError};
@ -35,136 +32,3 @@ impl<T: EthSpec> SignedBeaconBlockAndBlobsSidecar<T> {
})
}
}
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`]. This newtype
/// wraps the `BlockWrapperInner` to ensure blobs cannot be accessed via an enum match. This would
/// circumvent empty blob reconstruction when accessing blobs.
#[derive(Clone, Debug, Derivative)]
#[derivative(PartialEq, Hash(bound = "T: EthSpec"))]
pub struct BlockWrapper<T: EthSpec>(BlockWrapperInner<T>);
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`].
#[derive(Clone, Debug, Derivative)]
#[derivative(PartialEq, Hash(bound = "T: EthSpec"))]
pub enum BlockWrapperInner<T: EthSpec> {
Block(Arc<SignedBeaconBlock<T>>),
BlockAndBlob(SignedBeaconBlockAndBlobsSidecar<T>),
}
impl<T: EthSpec> BlockWrapper<T> {
pub fn new(block: Arc<SignedBeaconBlock<T>>) -> Self {
Self(BlockWrapperInner::Block(block))
}
pub fn new_with_blobs(
beacon_block: Arc<SignedBeaconBlock<T>>,
blobs_sidecar: Arc<BlobsSidecar<T>>,
) -> Self {
Self(BlockWrapperInner::BlockAndBlob(
SignedBeaconBlockAndBlobsSidecar {
beacon_block,
blobs_sidecar,
},
))
}
pub fn slot(&self) -> Slot {
match &self.0 {
BlockWrapperInner::Block(block) => block.slot(),
BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.slot()
}
}
}
pub fn epoch(&self) -> Epoch {
match &self.0 {
BlockWrapperInner::Block(block) => block.epoch(),
BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.epoch()
}
}
}
pub fn block(&self) -> &SignedBeaconBlock<T> {
match &self.0 {
BlockWrapperInner::Block(block) => &block,
BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => &block_sidecar_pair.beacon_block,
}
}
pub fn block_cloned(&self) -> Arc<SignedBeaconBlock<T>> {
match &self.0 {
BlockWrapperInner::Block(block) => block.clone(),
BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.clone()
}
}
}
pub fn blobs(
&self,
block_root: Option<Hash256>,
) -> Result<Option<Arc<BlobsSidecar<T>>>, BlobReconstructionError> {
match &self.0 {
BlockWrapperInner::Block(block) => block
.reconstruct_empty_blobs(block_root)
.map(|blob_opt| blob_opt.map(Arc::new)),
BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => {
Ok(Some(block_sidecar_pair.blobs_sidecar.clone()))
}
}
}
pub fn message(&self) -> crate::BeaconBlockRef<T> {
match &self.0 {
BlockWrapperInner::Block(block) => block.message(),
BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.message()
}
}
}
pub fn parent_root(&self) -> Hash256 {
self.block().parent_root()
}
pub fn deconstruct(
self,
block_root: Option<Hash256>,
) -> (
Arc<SignedBeaconBlock<T>>,
Result<Option<Arc<BlobsSidecar<T>>>, BlobReconstructionError>,
) {
match self.0 {
BlockWrapperInner::Block(block) => {
let blobs = block
.reconstruct_empty_blobs(block_root)
.map(|blob_opt| blob_opt.map(Arc::new));
(block, blobs)
}
BlockWrapperInner::BlockAndBlob(block_sidecar_pair) => {
let SignedBeaconBlockAndBlobsSidecar {
beacon_block,
blobs_sidecar,
} = block_sidecar_pair;
(beacon_block, Ok(Some(blobs_sidecar)))
}
}
}
}
impl<T: EthSpec> From<SignedBeaconBlock<T>> for BlockWrapper<T> {
fn from(block: SignedBeaconBlock<T>) -> Self {
BlockWrapper(BlockWrapperInner::Block(Arc::new(block)))
}
}
impl<T: EthSpec> From<Arc<SignedBeaconBlock<T>>> for BlockWrapper<T> {
fn from(block: Arc<SignedBeaconBlock<T>>) -> Self {
BlockWrapper(BlockWrapperInner::Block(block))
}
}
impl<T: EthSpec> From<SignedBeaconBlockAndBlobsSidecar<T>> for BlockWrapper<T> {
fn from(block: SignedBeaconBlockAndBlobsSidecar<T>) -> Self {
BlockWrapper(BlockWrapperInner::BlockAndBlob(block))
}
}