Fix todos in deneb code (#4547)

* Low hanging fruits

* Remove unnecessary todo

I think it's fine to not handle this since the calling functions handle the error.
No specific reason imo to handle it in the function as well.

* Rename BlobError to GossipBlobError

I feel this signified better what the error is for. The BlobError was only for failures when gossip
verifying a blob. We cannot get this error when doing rpc validation

* Remove the BlockError::BlobValidation variant

This error was only there to appease gossip verification before publish.
It's unclear how to peer score this error since this cannot actually occur during any
block verification flows.
This commit introuduces an additional error type BlockContentsError to better represent the
Error type

* Add docs for peer scoring (or lack thereof) of AvailabilityCheck errors

* I do not see a non-convoluted way of doing this. Okay to have some redundant code here

* Removing this to catch the failure red handed

* Fix compilation

* Cannot be deleted because some tests assume the trait impl

Also useful to have around for testing in the future imo

* Add some metrics and logs

* Only process `Imported` variant in sync_methods

The only additional thing for other variants that might be useful is logging. We can do that
later if required

* Convert to TryFrom

Not really sure where this would be used, but just did what the comment says.
Could consider just returning the Block variant for a deneb block in the From version

* Unlikely to change now

* This is fine as this is max_rpc_size per rpc chunk (for blobs, it would be 128kb max)

* Log count instead of individual blobs, can delete log later if it becomes too annoying.

* Add block production blob verification timer

* Extend block_straemer test to deneb

* Remove dbg statement

* Fix tests
This commit is contained in:
Pawan Dhananjay 2023-08-03 17:27:03 -07:00 committed by GitHub
parent 9c75d8088d
commit a36e34eec4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 212 additions and 124 deletions

View File

@ -715,21 +715,21 @@ mod tests {
} }
#[tokio::test] #[tokio::test]
async fn check_all_blocks_from_altair_to_capella() { async fn check_all_blocks_from_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8; let num_epochs = 8;
let bellatrix_fork_epoch = 2usize; let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize; let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch; let num_blocks_produced = num_epochs * slots_per_epoch;
let mut spec = test_spec::<MinimalEthSpec>(); let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0)); spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
//FIXME(sean) extend this to test deneb? spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
spec.deneb_fork_epoch = None;
let harness = get_harness(VALIDATOR_COUNT, spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone());
// go to bellatrix fork // go to bellatrix fork
harness harness
.extend_slots(bellatrix_fork_epoch * slots_per_epoch) .extend_slots(bellatrix_fork_epoch * slots_per_epoch)
@ -836,19 +836,19 @@ mod tests {
} }
#[tokio::test] #[tokio::test]
async fn check_fallback_altair_to_capella() { async fn check_fallback_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8; let num_epochs = 8;
let bellatrix_fork_epoch = 2usize; let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize; let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch; let num_blocks_produced = num_epochs * slots_per_epoch;
let mut spec = test_spec::<MinimalEthSpec>(); let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0)); spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64)); spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
//FIXME(sean) extend this to test deneb? spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
spec.deneb_fork_epoch = None;
let harness = get_harness(VALIDATOR_COUNT, spec); let harness = get_harness(VALIDATOR_COUNT, spec);

View File

@ -8,7 +8,7 @@ use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}
use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
use crate::beacon_proposer_cache::BeaconProposerCache; use crate::beacon_proposer_cache::BeaconProposerCache;
use crate::blob_cache::BlobCache; use crate::blob_cache::BlobCache;
use crate::blob_verification::{self, BlobError, GossipVerifiedBlob}; use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob};
use crate::block_times_cache::BlockTimesCache; use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::POS_PANDA_BANNER;
use crate::block_verification::{ use crate::block_verification::{
@ -2015,7 +2015,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self: &Arc<Self>, self: &Arc<Self>,
blob_sidecar: SignedBlobSidecar<T::EthSpec>, blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet_id: u64, subnet_id: u64,
) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> { ) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self) blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self)
} }
@ -2834,7 +2834,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
notify_execution_layer, notify_execution_layer,
)?; )?;
//TODO(sean) error handling?
publish_fn()?; publish_fn()?;
let executed_block = self let executed_block = self
@ -3216,10 +3215,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if let Some(blobs) = blobs { if let Some(blobs) = blobs {
if !blobs.is_empty() { if !blobs.is_empty() {
//FIXME(sean) using this for debugging for now
info!( info!(
self.log, "Writing blobs to store"; self.log, "Writing blobs to store";
"block_root" => ?block_root "block_root" => %block_root,
"count" => blobs.len(),
); );
ops.push(StoreOp::PutBlobs(block_root, blobs)); ops.push(StoreOp::PutBlobs(block_root, blobs));
} }
@ -4948,8 +4947,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (mut block, _) = block.deconstruct(); let (mut block, _) = block.deconstruct();
*block.state_root_mut() = state_root; *block.state_root_mut() = state_root;
//FIXME(sean) let blobs_verification_timer =
// - add a new timer for processing here metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES);
if let (Some(blobs), Some(proofs)) = (blobs_opt, proofs_opt) { if let (Some(blobs), Some(proofs)) = (blobs_opt, proofs_opt) {
let kzg = self let kzg = self
.kzg .kzg
@ -5012,6 +5011,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.put(beacon_block_root, blob_sidecars); .put(beacon_block_root, blob_sidecars);
} }
drop(blobs_verification_timer);
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);
trace!( trace!(

View File

@ -21,8 +21,9 @@ use types::{
Hash256, KzgCommitment, RelativeEpoch, SignedBlobSidecar, Slot, Hash256, KzgCommitment, RelativeEpoch, SignedBlobSidecar, Slot,
}; };
/// An error occurred while validating a gossip blob.
#[derive(Debug)] #[derive(Debug)]
pub enum BlobError<T: EthSpec> { pub enum GossipBlobError<T: EthSpec> {
/// The blob sidecar is from a slot that is later than the current slot (with respect to the /// The blob sidecar is from a slot that is later than the current slot (with respect to the
/// gossip clock disparity). /// gossip clock disparity).
/// ///
@ -109,15 +110,30 @@ pub enum BlobError<T: EthSpec> {
}, },
} }
impl<T: EthSpec> From<BeaconChainError> for BlobError<T> { impl<T: EthSpec> std::fmt::Display for GossipBlobError<T> {
fn from(e: BeaconChainError) -> Self { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
BlobError::BeaconChainError(e) match self {
GossipBlobError::BlobParentUnknown(blob_sidecar) => {
write!(
f,
"BlobParentUnknown(parent_root:{})",
blob_sidecar.block_parent_root
)
}
other => write!(f, "{:?}", other),
}
} }
} }
impl<T: EthSpec> From<BeaconStateError> for BlobError<T> { impl<T: EthSpec> From<BeaconChainError> for GossipBlobError<T> {
fn from(e: BeaconChainError) -> Self {
GossipBlobError::BeaconChainError(e)
}
}
impl<T: EthSpec> From<BeaconStateError> for GossipBlobError<T> {
fn from(e: BeaconStateError) -> Self { fn from(e: BeaconStateError) -> Self {
BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
} }
} }
@ -137,7 +153,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
pub fn new( pub fn new(
blob: SignedBlobSidecar<T::EthSpec>, blob: SignedBlobSidecar<T::EthSpec>,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<Self, BlobError<T::EthSpec>> { ) -> Result<Self, GossipBlobError<T::EthSpec>> {
let blob_index = blob.message.index; let blob_index = blob.message.index;
validate_blob_sidecar_for_gossip(blob, blob_index, chain) validate_blob_sidecar_for_gossip(blob, blob_index, chain)
} }
@ -162,7 +178,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>, signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet: u64, subnet: u64,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> { ) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
let blob_slot = signed_blob_sidecar.message.slot; let blob_slot = signed_blob_sidecar.message.slot;
let blob_index = signed_blob_sidecar.message.index; let blob_index = signed_blob_sidecar.message.index;
let block_parent_root = signed_blob_sidecar.message.block_parent_root; let block_parent_root = signed_blob_sidecar.message.block_parent_root;
@ -171,7 +187,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
// Verify that the blob_sidecar was received on the correct subnet. // Verify that the blob_sidecar was received on the correct subnet.
if blob_index != subnet { if blob_index != subnet {
return Err(BlobError::InvalidSubnet { return Err(GossipBlobError::InvalidSubnet {
expected: blob_index, expected: blob_index,
received: subnet, received: subnet,
}); });
@ -183,7 +199,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?; .ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot { if blob_slot > latest_permissible_slot {
return Err(BlobError::FutureSlot { return Err(GossipBlobError::FutureSlot {
message_slot: blob_slot, message_slot: blob_slot,
latest_permissible_slot, latest_permissible_slot,
}); });
@ -196,7 +212,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.epoch .epoch
.start_slot(T::EthSpec::slots_per_epoch()); .start_slot(T::EthSpec::slots_per_epoch());
if blob_slot <= latest_finalized_slot { if blob_slot <= latest_finalized_slot {
return Err(BlobError::PastFinalizedSlot { return Err(GossipBlobError::PastFinalizedSlot {
blob_slot, blob_slot,
finalized_slot: latest_finalized_slot, finalized_slot: latest_finalized_slot,
}); });
@ -207,9 +223,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.observed_blob_sidecars .observed_blob_sidecars
.read() .read()
.is_known(&signed_blob_sidecar.message) .is_known(&signed_blob_sidecar.message)
.map_err(|e| BlobError::BeaconChainError(e.into()))? .map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{ {
return Err(BlobError::RepeatBlob { return Err(GossipBlobError::RepeatBlob {
proposer: blob_proposer_index, proposer: blob_proposer_index,
slot: blob_slot, slot: blob_slot,
index: blob_index, index: blob_index,
@ -224,13 +240,15 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.get_block(&block_parent_root) .get_block(&block_parent_root)
{ {
if parent_block.slot >= blob_slot { if parent_block.slot >= blob_slot {
return Err(BlobError::BlobIsNotLaterThanParent { return Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot, blob_slot,
parent_slot: parent_block.slot, parent_slot: parent_block.slot,
}); });
} }
} else { } else {
return Err(BlobError::BlobParentUnknown(signed_blob_sidecar.message)); return Err(GossipBlobError::BlobParentUnknown(
signed_blob_sidecar.message,
));
} }
// Note: We check that the proposer_index matches against the shuffling first to avoid // Note: We check that the proposer_index matches against the shuffling first to avoid
@ -301,9 +319,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
let parent_block = chain let parent_block = chain
.get_blinded_block(&block_parent_root) .get_blinded_block(&block_parent_root)
.map_err(BlobError::BeaconChainError)? .map_err(GossipBlobError::BeaconChainError)?
.ok_or_else(|| { .ok_or_else(|| {
BlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root)) GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root))
})?; })?;
let mut parent_state = chain let mut parent_state = chain
@ -338,7 +356,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
}; };
if proposer_index != blob_proposer_index as usize { if proposer_index != blob_proposer_index as usize {
return Err(BlobError::ProposerIndexMismatch { return Err(GossipBlobError::ProposerIndexMismatch {
sidecar: blob_proposer_index as usize, sidecar: blob_proposer_index as usize,
local: proposer_index, local: proposer_index,
}); });
@ -350,11 +368,11 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.validator_pubkey_cache .validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.map_err(BlobError::BeaconChainError)?; .map_err(GossipBlobError::BeaconChainError)?;
let pubkey = pubkey_cache let pubkey = pubkey_cache
.get(proposer_index) .get(proposer_index)
.ok_or_else(|| BlobError::UnknownValidator(proposer_index as u64))?; .ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?;
signed_blob_sidecar.verify_signature( signed_blob_sidecar.verify_signature(
None, None,
@ -366,7 +384,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
}; };
if !signature_is_valid { if !signature_is_valid {
return Err(BlobError::ProposerSignatureInvalid); return Err(GossipBlobError::ProposerSignatureInvalid);
} }
// Now the signature is valid, store the proposal so we don't accept another blob sidecar // Now the signature is valid, store the proposal so we don't accept another blob sidecar
@ -384,9 +402,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.observed_blob_sidecars .observed_blob_sidecars
.write() .write()
.observe_sidecar(&signed_blob_sidecar.message) .observe_sidecar(&signed_blob_sidecar.message)
.map_err(|e| BlobError::BeaconChainError(e.into()))? .map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{ {
return Err(BlobError::RepeatBlob { return Err(GossipBlobError::RepeatBlob {
proposer: proposer_index as u64, proposer: proposer_index as u64,
slot: blob_slot, slot: blob_slot,
index: blob_index, index: blob_index,
@ -412,13 +430,12 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
/// ///
/// Note: This is a copy of the `block_verification::cheap_state_advance_to_obtain_committees` to return /// Note: This is a copy of the `block_verification::cheap_state_advance_to_obtain_committees` to return
/// a BlobError error type instead. /// a BlobError error type instead.
/// TODO(pawan): try to unify the 2 functions.
fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
state: &'a mut BeaconState<E>, state: &'a mut BeaconState<E>,
state_root_opt: Option<Hash256>, state_root_opt: Option<Hash256>,
blob_slot: Slot, blob_slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Cow<'a, BeaconState<E>>, BlobError<E>> { ) -> Result<Cow<'a, BeaconState<E>>, GossipBlobError<E>> {
let block_epoch = blob_slot.epoch(E::slots_per_epoch()); let block_epoch = blob_slot.epoch(E::slots_per_epoch());
if state.current_epoch() == block_epoch { if state.current_epoch() == block_epoch {
@ -429,7 +446,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
Ok(Cow::Borrowed(state)) Ok(Cow::Borrowed(state))
} else if state.slot() > blob_slot { } else if state.slot() > blob_slot {
Err(BlobError::BlobIsNotLaterThanParent { Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot, blob_slot,
parent_slot: state.slot(), parent_slot: state.slot(),
}) })
@ -440,7 +457,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
// Advance the state into the same epoch as the block. Use the "partial" method since state // Advance the state into the same epoch as the block. Use the "partial" method since state
// roots are not important for proposer/attester shuffling. // roots are not important for proposer/attester shuffling.
partial_state_advance(&mut state, state_root_opt, target_slot, spec) partial_state_advance(&mut state, state_root_opt, target_slot, spec)
.map_err(|e| BlobError::BeaconChainError(BeaconChainError::from(e)))?; .map_err(|e| GossipBlobError::BeaconChainError(BeaconChainError::from(e)))?;
state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Previous, spec)?;
state.build_committee_cache(RelativeEpoch::Current, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?;

View File

@ -48,9 +48,9 @@
// returned alongside. // returned alongside.
#![allow(clippy::result_large_err)] #![allow(clippy::result_large_err)]
use crate::blob_verification::{BlobError, GossipVerifiedBlob}; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use crate::block_verification_types::{ use crate::block_verification_types::{
AsBlock, BlockImportData, GossipVerifiedBlockContents, RpcBlock, AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock,
}; };
use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock};
use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::eth1_finalization_cache::Eth1FinalizationData;
@ -292,19 +292,23 @@ pub enum BlockError<T: EthSpec> {
/// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so /// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
/// we penalise them with a mid-tolerance error. /// we penalise them with a mid-tolerance error.
Slashable, Slashable,
//TODO(sean) peer scoring docs
/// A blob alone failed validation.
BlobValidation(BlobError<T>),
/// The block and blob together failed validation. /// The block and blob together failed validation.
///
/// ## Peer scoring
///
/// This error implies that the block satisfied all block validity conditions except consistency
/// with the corresponding blob that we received over gossip/rpc. This is because availability
/// checks are always done after all other checks are completed.
/// This implies that either:
/// 1. The block proposer is faulty
/// 2. We received the blob over rpc and it is invalid (inconsistent w.r.t the block).
/// 3. It is an internal error
/// For all these cases, we cannot penalize the peer that gave us the block.
/// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob.
/// https://github.com/sigp/lighthouse/issues/4546
AvailabilityCheck(AvailabilityCheckError), AvailabilityCheck(AvailabilityCheckError),
} }
impl<T: EthSpec> From<BlobError<T>> for BlockError<T> {
fn from(e: BlobError<T>) -> Self {
Self::BlobValidation(e)
}
}
impl<T: EthSpec> From<AvailabilityCheckError> for BlockError<T> { impl<T: EthSpec> From<AvailabilityCheckError> for BlockError<T> {
fn from(e: AvailabilityCheckError) -> Self { fn from(e: AvailabilityCheckError) -> Self {
Self::AvailabilityCheck(e) Self::AvailabilityCheck(e)
@ -662,7 +666,7 @@ pub trait IntoGossipVerifiedBlockContents<T: BeaconChainTypes>: Sized {
fn into_gossip_verified_block( fn into_gossip_verified_block(
self, self,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>>; ) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>>;
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec>; fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec>;
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>>; fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>>;
} }
@ -671,7 +675,7 @@ impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for GossipVerifiedB
fn into_gossip_verified_block( fn into_gossip_verified_block(
self, self,
_chain: &BeaconChain<T>, _chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>> { ) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
Ok(self) Ok(self)
} }
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> { fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
@ -693,16 +697,16 @@ impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for SignedBlockCont
fn into_gossip_verified_block( fn into_gossip_verified_block(
self, self,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>> { ) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
let (block, blobs) = self.deconstruct(); let (block, blobs) = self.deconstruct();
let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?; let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?;
let gossip_verified_blobs = blobs let gossip_verified_blobs = blobs
.map(|blobs| { .map(|blobs| {
Ok::<_, BlobError<T::EthSpec>>(VariableList::from( Ok::<_, GossipBlobError<T::EthSpec>>(VariableList::from(
blobs blobs
.into_iter() .into_iter()
.map(|blob| GossipVerifiedBlob::new(blob, chain)) .map(|blob| GossipVerifiedBlob::new(blob, chain))
.collect::<Result<Vec<_>, BlobError<T::EthSpec>>>()?, .collect::<Result<Vec<_>, GossipBlobError<T::EthSpec>>>()?,
)) ))
}) })
.transpose()?; .transpose()?;
@ -1139,7 +1143,6 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
} }
} }
//TODO(sean) can this be deleted
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> { impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> {
/// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock` /// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock`
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification. /// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.

View File

@ -1,4 +1,5 @@
use crate::blob_verification::GossipVerifiedBlobList; use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList};
use crate::block_verification::BlockError;
use crate::data_availability_checker::AvailabilityCheckError; use crate::data_availability_checker::AvailabilityCheckError;
pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock};
use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::eth1_finalization_cache::Eth1FinalizationData;
@ -249,6 +250,37 @@ pub struct BlockImportData<E: EthSpec> {
pub type GossipVerifiedBlockContents<T> = pub type GossipVerifiedBlockContents<T> =
(GossipVerifiedBlock<T>, Option<GossipVerifiedBlobList<T>>); (GossipVerifiedBlock<T>, Option<GossipVerifiedBlobList<T>>);
#[derive(Debug)]
pub enum BlockContentsError<T: EthSpec> {
BlockError(BlockError<T>),
BlobError(GossipBlobError<T>),
}
impl<T: EthSpec> From<BlockError<T>> for BlockContentsError<T> {
fn from(value: BlockError<T>) -> Self {
Self::BlockError(value)
}
}
impl<T: EthSpec> From<GossipBlobError<T>> for BlockContentsError<T> {
fn from(value: GossipBlobError<T>) -> Self {
Self::BlobError(value)
}
}
impl<T: EthSpec> std::fmt::Display for BlockContentsError<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockContentsError::BlockError(err) => {
write!(f, "BlockError({})", err)
}
BlockContentsError::BlobError(err) => {
write!(f, "BlobError({})", err)
}
}
}
}
/// Trait for common block operations. /// Trait for common block operations.
pub trait AsBlock<E: EthSpec> { pub trait AsBlock<E: EthSpec> {
fn slot(&self) -> Slot; fn slot(&self) -> Slot;

View File

@ -45,7 +45,6 @@ pub enum AvailabilityCheckError {
KzgCommitmentMismatch { KzgCommitmentMismatch {
blob_index: u64, blob_index: u64,
}, },
IncorrectFork,
BlobIndexInvalid(u64), BlobIndexInvalid(u64),
UnorderedBlobs { UnorderedBlobs {
blob_index: u64, blob_index: u64,

View File

@ -30,19 +30,6 @@ pub fn validate_blobs<T: EthSpec>(
blobs: &[Blob<T>], blobs: &[Blob<T>],
kzg_proofs: &[KzgProof], kzg_proofs: &[KzgProof],
) -> Result<bool, KzgError> { ) -> Result<bool, KzgError> {
// TODO(sean) batch verification fails with a single element, it's unclear to me why
if blobs.len() == 1 && kzg_proofs.len() == 1 && expected_kzg_commitments.len() == 1 {
if let (Some(blob), Some(kzg_proof), Some(kzg_commitment)) = (
blobs.get(0),
kzg_proofs.get(0),
expected_kzg_commitments.get(0),
) {
return validate_blob::<T>(kzg, blob.clone(), *kzg_commitment, *kzg_proof);
} else {
return Ok(false);
}
}
let blobs = blobs let blobs = blobs
.iter() .iter()
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // Avoid this clone .map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // Avoid this clone

View File

@ -1036,6 +1036,10 @@ lazy_static! {
pub static ref KZG_VERIFICATION_BATCH_TIMES: Result<Histogram> = pub static ref KZG_VERIFICATION_BATCH_TIMES: Result<Histogram> =
try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification"); try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification");
pub static ref BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
"beacon_block_production_blobs_verification_seconds",
"Time taken to verify blobs against commitments and creating BlobSidecar objects in block production"
);
/* /*
* Availability related metrics * Availability related metrics
*/ */

View File

@ -86,7 +86,7 @@ pub async fn gossip_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
); );
} }
@ -229,7 +229,7 @@ pub async fn consensus_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
); );
} }
@ -443,7 +443,7 @@ pub async fn equivocation_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
); );
} }
@ -515,7 +515,7 @@ pub async fn equivocation_consensus_early_equivocation() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string())
); );
} }
@ -741,7 +741,7 @@ pub async fn blinded_gossip_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
); );
} }
@ -886,7 +886,7 @@ pub async fn blinded_consensus_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
); );
} }
@ -1035,7 +1035,7 @@ pub async fn blinded_equivocation_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
); );
} }
@ -1103,7 +1103,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!( assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string()) matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string())
); );
} }

View File

@ -2393,7 +2393,8 @@ impl ApiTester {
.0; .0;
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
let signed_block_contents = SignedBlockContents::from(signed_block.clone()); let signed_block_contents =
SignedBlockContents::try_from(signed_block.clone()).unwrap();
self.client self.client
.post_beacon_blocks(&signed_block_contents) .post_beacon_blocks(&signed_block_contents)

View File

@ -134,7 +134,6 @@ pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
/// The maximum bytes that can be sent across the RPC post-merge. /// The maximum bytes that can be sent across the RPC post-merge.
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
// FIXME(sean) should this be increased to account for blobs?
pub(crate) const MAX_RPC_SIZE_POST_DENEB: usize = 10 * 1_048_576; // 10M pub(crate) const MAX_RPC_SIZE_POST_DENEB: usize = 10 * 1_048_576; // 10M
/// The protocol prefix the RPC protocol id. /// The protocol prefix the RPC protocol id.
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";

View File

@ -5,11 +5,12 @@ use crate::{
sync::SyncMessage, sync::SyncMessage,
}; };
use beacon_chain::blob_verification::{BlobError, GossipVerifiedBlob}; use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use beacon_chain::block_verification_types::AsBlock; use beacon_chain::block_verification_types::AsBlock;
use beacon_chain::store::Error; use beacon_chain::store::Error;
use beacon_chain::{ use beacon_chain::{
attestation_verification::{self, Error as AttnError, VerifiedAttestation}, attestation_verification::{self, Error as AttnError, VerifiedAttestation},
data_availability_checker::AvailabilityCheckError,
light_client_finality_update_verification::Error as LightClientFinalityUpdateError, light_client_finality_update_verification::Error as LightClientFinalityUpdateError,
light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError, light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError,
observed_operations::ObservationOutcome, observed_operations::ObservationOutcome,
@ -598,7 +599,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
} }
} }
// TODO: docs
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn process_gossip_blob( pub async fn process_gossip_blob(
self: &Arc<Self>, self: &Arc<Self>,
@ -635,7 +635,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
} }
Err(err) => { Err(err) => {
match err { match err {
BlobError::BlobParentUnknown(blob) => { GossipBlobError::BlobParentUnknown(blob) => {
debug!( debug!(
self.log, self.log,
"Unknown parent hash for blob"; "Unknown parent hash for blob";
@ -645,11 +645,11 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
); );
self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob));
} }
BlobError::ProposerSignatureInvalid GossipBlobError::ProposerSignatureInvalid
| BlobError::UnknownValidator(_) | GossipBlobError::UnknownValidator(_)
| BlobError::ProposerIndexMismatch { .. } | GossipBlobError::ProposerIndexMismatch { .. }
| BlobError::BlobIsNotLaterThanParent { .. } | GossipBlobError::BlobIsNotLaterThanParent { .. }
| BlobError::InvalidSubnet { .. } => { | GossipBlobError::InvalidSubnet { .. } => {
warn!( warn!(
self.log, self.log,
"Could not verify blob sidecar for gossip. Rejecting the blob sidecar"; "Could not verify blob sidecar for gossip. Rejecting the blob sidecar";
@ -670,10 +670,10 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
MessageAcceptance::Reject, MessageAcceptance::Reject,
); );
} }
BlobError::FutureSlot { .. } GossipBlobError::FutureSlot { .. }
| BlobError::BeaconChainError(_) | GossipBlobError::BeaconChainError(_)
| BlobError::RepeatBlob { .. } | GossipBlobError::RepeatBlob { .. }
| BlobError::PastFinalizedSlot { .. } => { | GossipBlobError::PastFinalizedSlot { .. } => {
warn!( warn!(
self.log, self.log,
"Could not verify blob sidecar for gossip. Ignoring the blob sidecar"; "Could not verify blob sidecar for gossip. Ignoring the blob sidecar";
@ -710,11 +710,24 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
let blob_slot = verified_blob.slot(); let blob_slot = verified_blob.slot();
let blob_index = verified_blob.id().index; let blob_index = verified_blob.id().index;
match self.chain.process_blob(verified_blob).await { match self.chain.process_blob(verified_blob).await {
Ok(AvailabilityProcessingStatus::Imported(_hash)) => { Ok(AvailabilityProcessingStatus::Imported(hash)) => {
//TODO(sean) add metrics and logging // Note: Reusing block imported metric here
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL);
info!(
self.log,
"Gossipsub blob processed, imported fully available block";
"hash" => %hash
);
self.chain.recompute_head_at_current_slot().await; self.chain.recompute_head_at_current_slot().await;
} }
Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_hash)) => { Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_hash)) => {
debug!(
self.log,
"Missing block components for gossip verified blob";
"slot" => %blob_slot,
"blob_index" => %blob_index,
"blob_root" => %blob_root,
);
self.send_sync_message(SyncMessage::MissingGossipBlockComponents( self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
slot, peer_id, block_hash, slot, peer_id, block_hash,
)); ));
@ -954,14 +967,12 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
); );
return None; return None;
} }
Err(e @ BlockError::BlobValidation(_)) | Err(e @ BlockError::AvailabilityCheck(_)) => { // Note: This error variant cannot be reached when doing gossip validation
warn!(self.log, "Could not verify block against known blobs in gossip. Rejecting the block"; // as we do not do availability checks here.
"error" => %e); Err(e @ BlockError::AvailabilityCheck(_)) => {
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); crit!(self.log, "Internal block gossip validation error. Availability check during
self.gossip_penalize_peer( gossip validation";
peer_id, "error" => %e
PeerAction::LowToleranceError,
"gossip_blob_low",
); );
return None; return None;
} }
@ -1142,6 +1153,43 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
"error" => %e "error" => %e
); );
} }
Err(BlockError::AvailabilityCheck(err)) => {
match err {
AvailabilityCheckError::KzgNotInitialized
| AvailabilityCheckError::SszTypes(_)
| AvailabilityCheckError::MissingBlobs
| AvailabilityCheckError::StoreError(_)
| AvailabilityCheckError::DecodeError(_) => {
crit!(
self.log,
"Internal availability check error";
"error" => ?err,
);
}
AvailabilityCheckError::Kzg(_)
| AvailabilityCheckError::KzgVerificationFailed
| AvailabilityCheckError::NumBlobsMismatch { .. }
| AvailabilityCheckError::TxKzgCommitmentMismatch(_)
| AvailabilityCheckError::BlobIndexInvalid(_)
| AvailabilityCheckError::UnorderedBlobs { .. }
| AvailabilityCheckError::BlockBlobRootMismatch { .. }
| AvailabilityCheckError::BlockBlobSlotMismatch { .. }
| AvailabilityCheckError::KzgCommitmentMismatch { .. } => {
// Note: we cannot penalize the peer that sent us the block
// over gossip here because these errors imply either an issue
// with:
// 1. Blobs we have received over non-gossip sources
// (from potentially other peers)
// 2. The proposer being malicious and sending inconsistent
// blocks and blobs.
warn!(
self.log,
"Received invalid blob or malicious proposer";
"error" => ?err
);
}
}
}
other => { other => {
debug!( debug!(
self.log, self.log,

View File

@ -222,8 +222,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
// RPC block imported, regardless of process type // RPC block imported, regardless of process type
//TODO(sean) do we need to do anything here for missing blobs? or is passing the result
// along to sync enough?
if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result { if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result {
info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash);

View File

@ -928,8 +928,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
BlockError::AvailabilityCheck( BlockError::AvailabilityCheck(
AvailabilityCheckError::KzgVerificationFailed, AvailabilityCheckError::KzgVerificationFailed,
) )
| BlockError::AvailabilityCheck(AvailabilityCheckError::Kzg(_)) | BlockError::AvailabilityCheck(AvailabilityCheckError::Kzg(_)) => {
| BlockError::BlobValidation(_) => {
warn!(self.log, "Blob validation failure"; "root" => %root, "peer_id" => %peer_id); warn!(self.log, "Blob validation failure"; "root" => %root, "peer_id" => %peer_id);
if let Ok(blob_peer) = request_ref.processing_peer(ResponseType::Blob) { if let Ok(blob_peer) = request_ref.processing_peer(ResponseType::Blob) {
cx.report_peer( cx.report_peer(

View File

@ -1143,7 +1143,7 @@ fn test_same_chain_race_condition() {
mod deneb_only { mod deneb_only {
use super::*; use super::*;
use beacon_chain::blob_verification::BlobError; use beacon_chain::data_availability_checker::AvailabilityCheckError;
use std::ops::IndexMut; use std::ops::IndexMut;
use std::str::FromStr; use std::str::FromStr;
@ -1509,8 +1509,8 @@ mod deneb_only {
fn invalid_blob_processed(mut self) -> Self { fn invalid_blob_processed(mut self) -> Self {
self.bl.single_block_component_processed( self.bl.single_block_component_processed(
self.blob_req_id.expect("blob request id"), self.blob_req_id.expect("blob request id"),
BlockProcessingResult::Err(BlockError::BlobValidation( BlockProcessingResult::Err(BlockError::AvailabilityCheck(
BlobError::ProposerSignatureInvalid, AvailabilityCheckError::KzgVerificationFailed,
)), )),
ResponseType::Blob, ResponseType::Blob,
&mut self.cx, &mut self.cx,

View File

@ -1469,17 +1469,19 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload
} }
} }
impl<T: EthSpec, Payload: AbstractExecPayload<T>> From<SignedBeaconBlock<T, Payload>> impl<T: EthSpec, Payload: AbstractExecPayload<T>> TryFrom<SignedBeaconBlock<T, Payload>>
for SignedBlockContents<T, Payload> for SignedBlockContents<T, Payload>
{ {
fn from(block: SignedBeaconBlock<T, Payload>) -> Self { type Error = &'static str;
fn try_from(block: SignedBeaconBlock<T, Payload>) -> Result<Self, Self::Error> {
match block { match block {
SignedBeaconBlock::Base(_) SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_) | SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => SignedBlockContents::Block(block), | SignedBeaconBlock::Capella(_) => Ok(SignedBlockContents::Block(block)),
//TODO: error handling, this should be try from SignedBeaconBlock::Deneb(_) => {
SignedBeaconBlock::Deneb(_block) => todo!(), Err("deneb block contents cannot be fully constructed from just the signed block")
}
} }
} }
} }

View File

@ -34,7 +34,6 @@ CAPELLA_FORK_VERSION: 0x40484404
CAPELLA_FORK_EPOCH: 1 CAPELLA_FORK_EPOCH: 1
# DENEB/Deneb # DENEB/Deneb
# TODO: Rename to Deneb once specs/clients support it
DENEB_FORK_VERSION: 0x50484404 DENEB_FORK_VERSION: 0x50484404
DENEB_FORK_EPOCH: 5 DENEB_FORK_EPOCH: 5

View File

@ -346,8 +346,8 @@ impl EthSpec for MinimalEthSpec {
type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch
type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch
type MaxWithdrawalsPerPayload = U4; type MaxWithdrawalsPerPayload = U4;
type FieldElementsPerBlob = U4; //FIXME(sean) this is spec'd out currently but will likely change type FieldElementsPerBlob = U4;
type BytesPerBlob = U128; //FIXME(sean) this is spec'd out currently but will likely change type BytesPerBlob = U128;
type MaxBlobCommitmentsPerBlock = U16; type MaxBlobCommitmentsPerBlock = U16;
params_from_eth_spec!(MainnetEthSpec { params_from_eth_spec!(MainnetEthSpec {

View File

@ -1,10 +1,9 @@
use super::*; use super::*;
use kzg::KzgProof; use kzg::{KzgProof, BYTES_PER_COMMITMENT};
impl TestRandom for KzgProof { impl TestRandom for KzgProof {
fn random_for_test(rng: &mut impl RngCore) -> Self { fn random_for_test(rng: &mut impl RngCore) -> Self {
// TODO(pawan): use the length constant here let mut bytes = [0; BYTES_PER_COMMITMENT];
let mut bytes = [0; 48];
rng.fill_bytes(&mut bytes); rng.fill_bytes(&mut bytes);
Self(bytes) Self(bytes)
} }

View File

@ -8,7 +8,7 @@ use std::ops::Deref;
use std::str::FromStr; use std::str::FromStr;
pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup}; pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup};
pub use c_kzg::{Bytes32, Bytes48}; pub use c_kzg::{Bytes32, Bytes48, BYTES_PER_COMMITMENT, BYTES_PER_PROOF};
#[derive(Debug)] #[derive(Debug)]
pub enum Error { pub enum Error {