Fix todos in deneb code (#4547)

* Low hanging fruits

* Remove unnecessary todo

I think it's fine to not handle this since the calling functions handle the error.
No specific reason imo to handle it in the function as well.

* Rename BlobError to GossipBlobError

I feel this signified better what the error is for. The BlobError was only for failures when gossip
verifying a blob. We cannot get this error when doing rpc validation

* Remove the BlockError::BlobValidation variant

This error was only there to appease gossip verification before publish.
It's unclear how to peer score this error since this cannot actually occur during any
block verification flows.
This commit introuduces an additional error type BlockContentsError to better represent the
Error type

* Add docs for peer scoring (or lack thereof) of AvailabilityCheck errors

* I do not see a non-convoluted way of doing this. Okay to have some redundant code here

* Removing this to catch the failure red handed

* Fix compilation

* Cannot be deleted because some tests assume the trait impl

Also useful to have around for testing in the future imo

* Add some metrics and logs

* Only process `Imported` variant in sync_methods

The only additional thing for other variants that might be useful is logging. We can do that
later if required

* Convert to TryFrom

Not really sure where this would be used, but just did what the comment says.
Could consider just returning the Block variant for a deneb block in the From version

* Unlikely to change now

* This is fine as this is max_rpc_size per rpc chunk (for blobs, it would be 128kb max)

* Log count instead of individual blobs, can delete log later if it becomes too annoying.

* Add block production blob verification timer

* Extend block_straemer test to deneb

* Remove dbg statement

* Fix tests
This commit is contained in:
Pawan Dhananjay 2023-08-03 17:27:03 -07:00 committed by GitHub
parent 9c75d8088d
commit a36e34eec4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 212 additions and 124 deletions

View File

@ -715,21 +715,21 @@ mod tests {
}
#[tokio::test]
async fn check_all_blocks_from_altair_to_capella() {
async fn check_all_blocks_from_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8;
let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch;
let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
//FIXME(sean) extend this to test deneb?
spec.deneb_fork_epoch = None;
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
let harness = get_harness(VALIDATOR_COUNT, spec);
let harness = get_harness(VALIDATOR_COUNT, spec.clone());
// go to bellatrix fork
harness
.extend_slots(bellatrix_fork_epoch * slots_per_epoch)
@ -836,19 +836,19 @@ mod tests {
}
#[tokio::test]
async fn check_fallback_altair_to_capella() {
async fn check_fallback_altair_to_deneb() {
let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize;
let num_epochs = 8;
let bellatrix_fork_epoch = 2usize;
let capella_fork_epoch = 4usize;
let deneb_fork_epoch = 6usize;
let num_blocks_produced = num_epochs * slots_per_epoch;
let mut spec = test_spec::<MinimalEthSpec>();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(bellatrix_fork_epoch as u64));
spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64));
//FIXME(sean) extend this to test deneb?
spec.deneb_fork_epoch = None;
spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64));
let harness = get_harness(VALIDATOR_COUNT, spec);

View File

@ -8,7 +8,7 @@ use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
use crate::beacon_proposer_cache::BeaconProposerCache;
use crate::blob_cache::BlobCache;
use crate::blob_verification::{self, BlobError, GossipVerifiedBlob};
use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob};
use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::POS_PANDA_BANNER;
use crate::block_verification::{
@ -2015,7 +2015,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self: &Arc<Self>,
blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet_id: u64,
) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> {
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self)
}
@ -2834,7 +2834,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
notify_execution_layer,
)?;
//TODO(sean) error handling?
publish_fn()?;
let executed_block = self
@ -3216,10 +3215,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
if let Some(blobs) = blobs {
if !blobs.is_empty() {
//FIXME(sean) using this for debugging for now
info!(
self.log, "Writing blobs to store";
"block_root" => ?block_root
"block_root" => %block_root,
"count" => blobs.len(),
);
ops.push(StoreOp::PutBlobs(block_root, blobs));
}
@ -4948,8 +4947,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (mut block, _) = block.deconstruct();
*block.state_root_mut() = state_root;
//FIXME(sean)
// - add a new timer for processing here
let blobs_verification_timer =
metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES);
if let (Some(blobs), Some(proofs)) = (blobs_opt, proofs_opt) {
let kzg = self
.kzg
@ -5012,6 +5011,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.put(beacon_block_root, blob_sidecars);
}
drop(blobs_verification_timer);
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);
trace!(

View File

@ -21,8 +21,9 @@ use types::{
Hash256, KzgCommitment, RelativeEpoch, SignedBlobSidecar, Slot,
};
/// An error occurred while validating a gossip blob.
#[derive(Debug)]
pub enum BlobError<T: EthSpec> {
pub enum GossipBlobError<T: EthSpec> {
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
/// gossip clock disparity).
///
@ -109,15 +110,30 @@ pub enum BlobError<T: EthSpec> {
},
}
impl<T: EthSpec> From<BeaconChainError> for BlobError<T> {
fn from(e: BeaconChainError) -> Self {
BlobError::BeaconChainError(e)
impl<T: EthSpec> std::fmt::Display for GossipBlobError<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
GossipBlobError::BlobParentUnknown(blob_sidecar) => {
write!(
f,
"BlobParentUnknown(parent_root:{})",
blob_sidecar.block_parent_root
)
}
other => write!(f, "{:?}", other),
}
}
}
impl<T: EthSpec> From<BeaconStateError> for BlobError<T> {
impl<T: EthSpec> From<BeaconChainError> for GossipBlobError<T> {
fn from(e: BeaconChainError) -> Self {
GossipBlobError::BeaconChainError(e)
}
}
impl<T: EthSpec> From<BeaconStateError> for GossipBlobError<T> {
fn from(e: BeaconStateError) -> Self {
BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
GossipBlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
}
}
@ -137,7 +153,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
pub fn new(
blob: SignedBlobSidecar<T::EthSpec>,
chain: &BeaconChain<T>,
) -> Result<Self, BlobError<T::EthSpec>> {
) -> Result<Self, GossipBlobError<T::EthSpec>> {
let blob_index = blob.message.index;
validate_blob_sidecar_for_gossip(blob, blob_index, chain)
}
@ -162,7 +178,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet: u64,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlob<T>, BlobError<T::EthSpec>> {
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
let blob_slot = signed_blob_sidecar.message.slot;
let blob_index = signed_blob_sidecar.message.index;
let block_parent_root = signed_blob_sidecar.message.block_parent_root;
@ -171,7 +187,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
// Verify that the blob_sidecar was received on the correct subnet.
if blob_index != subnet {
return Err(BlobError::InvalidSubnet {
return Err(GossipBlobError::InvalidSubnet {
expected: blob_index,
received: subnet,
});
@ -183,7 +199,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
.ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot {
return Err(BlobError::FutureSlot {
return Err(GossipBlobError::FutureSlot {
message_slot: blob_slot,
latest_permissible_slot,
});
@ -196,7 +212,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.epoch
.start_slot(T::EthSpec::slots_per_epoch());
if blob_slot <= latest_finalized_slot {
return Err(BlobError::PastFinalizedSlot {
return Err(GossipBlobError::PastFinalizedSlot {
blob_slot,
finalized_slot: latest_finalized_slot,
});
@ -207,9 +223,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.observed_blob_sidecars
.read()
.is_known(&signed_blob_sidecar.message)
.map_err(|e| BlobError::BeaconChainError(e.into()))?
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(BlobError::RepeatBlob {
return Err(GossipBlobError::RepeatBlob {
proposer: blob_proposer_index,
slot: blob_slot,
index: blob_index,
@ -224,13 +240,15 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.get_block(&block_parent_root)
{
if parent_block.slot >= blob_slot {
return Err(BlobError::BlobIsNotLaterThanParent {
return Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
parent_slot: parent_block.slot,
});
}
} else {
return Err(BlobError::BlobParentUnknown(signed_blob_sidecar.message));
return Err(GossipBlobError::BlobParentUnknown(
signed_blob_sidecar.message,
));
}
// Note: We check that the proposer_index matches against the shuffling first to avoid
@ -301,9 +319,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
let parent_block = chain
.get_blinded_block(&block_parent_root)
.map_err(BlobError::BeaconChainError)?
.map_err(GossipBlobError::BeaconChainError)?
.ok_or_else(|| {
BlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root))
GossipBlobError::from(BeaconChainError::MissingBeaconBlock(block_parent_root))
})?;
let mut parent_state = chain
@ -338,7 +356,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
};
if proposer_index != blob_proposer_index as usize {
return Err(BlobError::ProposerIndexMismatch {
return Err(GossipBlobError::ProposerIndexMismatch {
sidecar: blob_proposer_index as usize,
local: proposer_index,
});
@ -350,11 +368,11 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.map_err(BlobError::BeaconChainError)?;
.map_err(GossipBlobError::BeaconChainError)?;
let pubkey = pubkey_cache
.get(proposer_index)
.ok_or_else(|| BlobError::UnknownValidator(proposer_index as u64))?;
.ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?;
signed_blob_sidecar.verify_signature(
None,
@ -366,7 +384,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
};
if !signature_is_valid {
return Err(BlobError::ProposerSignatureInvalid);
return Err(GossipBlobError::ProposerSignatureInvalid);
}
// Now the signature is valid, store the proposal so we don't accept another blob sidecar
@ -384,9 +402,9 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.observed_blob_sidecars
.write()
.observe_sidecar(&signed_blob_sidecar.message)
.map_err(|e| BlobError::BeaconChainError(e.into()))?
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(BlobError::RepeatBlob {
return Err(GossipBlobError::RepeatBlob {
proposer: proposer_index as u64,
slot: blob_slot,
index: blob_index,
@ -412,13 +430,12 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
///
/// Note: This is a copy of the `block_verification::cheap_state_advance_to_obtain_committees` to return
/// a BlobError error type instead.
/// TODO(pawan): try to unify the 2 functions.
fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
state: &'a mut BeaconState<E>,
state_root_opt: Option<Hash256>,
blob_slot: Slot,
spec: &ChainSpec,
) -> Result<Cow<'a, BeaconState<E>>, BlobError<E>> {
) -> Result<Cow<'a, BeaconState<E>>, GossipBlobError<E>> {
let block_epoch = blob_slot.epoch(E::slots_per_epoch());
if state.current_epoch() == block_epoch {
@ -429,7 +446,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
Ok(Cow::Borrowed(state))
} else if state.slot() > blob_slot {
Err(BlobError::BlobIsNotLaterThanParent {
Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
parent_slot: state.slot(),
})
@ -440,7 +457,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
// Advance the state into the same epoch as the block. Use the "partial" method since state
// roots are not important for proposer/attester shuffling.
partial_state_advance(&mut state, state_root_opt, target_slot, spec)
.map_err(|e| BlobError::BeaconChainError(BeaconChainError::from(e)))?;
.map_err(|e| GossipBlobError::BeaconChainError(BeaconChainError::from(e)))?;
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
state.build_committee_cache(RelativeEpoch::Current, spec)?;

View File

@ -48,9 +48,9 @@
// returned alongside.
#![allow(clippy::result_large_err)]
use crate::blob_verification::{BlobError, GossipVerifiedBlob};
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use crate::block_verification_types::{
AsBlock, BlockImportData, GossipVerifiedBlockContents, RpcBlock,
AsBlock, BlockContentsError, BlockImportData, GossipVerifiedBlockContents, RpcBlock,
};
use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock};
use crate::eth1_finalization_cache::Eth1FinalizationData;
@ -292,19 +292,23 @@ pub enum BlockError<T: EthSpec> {
/// Honest peers shouldn't forward more than 1 equivocating block from the same proposer, so
/// we penalise them with a mid-tolerance error.
Slashable,
//TODO(sean) peer scoring docs
/// A blob alone failed validation.
BlobValidation(BlobError<T>),
/// The block and blob together failed validation.
///
/// ## Peer scoring
///
/// This error implies that the block satisfied all block validity conditions except consistency
/// with the corresponding blob that we received over gossip/rpc. This is because availability
/// checks are always done after all other checks are completed.
/// This implies that either:
/// 1. The block proposer is faulty
/// 2. We received the blob over rpc and it is invalid (inconsistent w.r.t the block).
/// 3. It is an internal error
/// For all these cases, we cannot penalize the peer that gave us the block.
/// TODO: We may need to penalize the peer that gave us a potentially invalid rpc blob.
/// https://github.com/sigp/lighthouse/issues/4546
AvailabilityCheck(AvailabilityCheckError),
}
impl<T: EthSpec> From<BlobError<T>> for BlockError<T> {
fn from(e: BlobError<T>) -> Self {
Self::BlobValidation(e)
}
}
impl<T: EthSpec> From<AvailabilityCheckError> for BlockError<T> {
fn from(e: AvailabilityCheckError) -> Self {
Self::AvailabilityCheck(e)
@ -662,7 +666,7 @@ pub trait IntoGossipVerifiedBlockContents<T: BeaconChainTypes>: Sized {
fn into_gossip_verified_block(
self,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>>;
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>>;
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec>;
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>>;
}
@ -671,7 +675,7 @@ impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for GossipVerifiedB
fn into_gossip_verified_block(
self,
_chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>> {
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
Ok(self)
}
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
@ -693,16 +697,16 @@ impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for SignedBlockCont
fn into_gossip_verified_block(
self,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockError<T::EthSpec>> {
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
let (block, blobs) = self.deconstruct();
let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?;
let gossip_verified_blobs = blobs
.map(|blobs| {
Ok::<_, BlobError<T::EthSpec>>(VariableList::from(
Ok::<_, GossipBlobError<T::EthSpec>>(VariableList::from(
blobs
.into_iter()
.map(|blob| GossipVerifiedBlob::new(blob, chain))
.collect::<Result<Vec<_>, BlobError<T::EthSpec>>>()?,
.collect::<Result<Vec<_>, GossipBlobError<T::EthSpec>>>()?,
))
})
.transpose()?;
@ -1139,7 +1143,6 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
}
}
//TODO(sean) can this be deleted
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> {
/// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock`
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.

View File

@ -1,4 +1,5 @@
use crate::blob_verification::GossipVerifiedBlobList;
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlobList};
use crate::block_verification::BlockError;
use crate::data_availability_checker::AvailabilityCheckError;
pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock};
use crate::eth1_finalization_cache::Eth1FinalizationData;
@ -249,6 +250,37 @@ pub struct BlockImportData<E: EthSpec> {
pub type GossipVerifiedBlockContents<T> =
(GossipVerifiedBlock<T>, Option<GossipVerifiedBlobList<T>>);
#[derive(Debug)]
pub enum BlockContentsError<T: EthSpec> {
BlockError(BlockError<T>),
BlobError(GossipBlobError<T>),
}
impl<T: EthSpec> From<BlockError<T>> for BlockContentsError<T> {
fn from(value: BlockError<T>) -> Self {
Self::BlockError(value)
}
}
impl<T: EthSpec> From<GossipBlobError<T>> for BlockContentsError<T> {
fn from(value: GossipBlobError<T>) -> Self {
Self::BlobError(value)
}
}
impl<T: EthSpec> std::fmt::Display for BlockContentsError<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockContentsError::BlockError(err) => {
write!(f, "BlockError({})", err)
}
BlockContentsError::BlobError(err) => {
write!(f, "BlobError({})", err)
}
}
}
}
/// Trait for common block operations.
pub trait AsBlock<E: EthSpec> {
fn slot(&self) -> Slot;

View File

@ -45,7 +45,6 @@ pub enum AvailabilityCheckError {
KzgCommitmentMismatch {
blob_index: u64,
},
IncorrectFork,
BlobIndexInvalid(u64),
UnorderedBlobs {
blob_index: u64,

View File

@ -30,19 +30,6 @@ pub fn validate_blobs<T: EthSpec>(
blobs: &[Blob<T>],
kzg_proofs: &[KzgProof],
) -> Result<bool, KzgError> {
// TODO(sean) batch verification fails with a single element, it's unclear to me why
if blobs.len() == 1 && kzg_proofs.len() == 1 && expected_kzg_commitments.len() == 1 {
if let (Some(blob), Some(kzg_proof), Some(kzg_commitment)) = (
blobs.get(0),
kzg_proofs.get(0),
expected_kzg_commitments.get(0),
) {
return validate_blob::<T>(kzg, blob.clone(), *kzg_commitment, *kzg_proof);
} else {
return Ok(false);
}
}
let blobs = blobs
.iter()
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // Avoid this clone

View File

@ -1036,6 +1036,10 @@ lazy_static! {
pub static ref KZG_VERIFICATION_BATCH_TIMES: Result<Histogram> =
try_create_histogram("kzg_verification_batch_seconds", "Runtime of batched kzg verification");
pub static ref BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: Result<Histogram> = try_create_histogram(
"beacon_block_production_blobs_verification_seconds",
"Time taken to verify blobs against commitments and creating BlobSidecar objects in block production"
);
/*
* Availability related metrics
*/

View File

@ -86,7 +86,7 @@ pub async fn gossip_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
);
}
@ -229,7 +229,7 @@ pub async fn consensus_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
);
}
@ -443,7 +443,7 @@ pub async fn equivocation_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
);
}
@ -515,7 +515,7 @@ pub async fn equivocation_consensus_early_equivocation() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string())
);
}
@ -741,7 +741,7 @@ pub async fn blinded_gossip_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
);
}
@ -886,7 +886,7 @@ pub async fn blinded_consensus_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
);
}
@ -1035,7 +1035,7 @@ pub async fn blinded_equivocation_invalid() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 }".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(NotFinalizedDescendant { block_parent_root: 0x0000000000000000000000000000000000000000000000000000000000000000 })".to_string())
);
}
@ -1103,7 +1103,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST));
assert!(
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: Slashable".to_string())
matches!(error_response, eth2::Error::ServerMessage(err) if err.message == "BAD_REQUEST: BlockError(Slashable)".to_string())
);
}

View File

@ -2393,7 +2393,8 @@ impl ApiTester {
.0;
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
let signed_block_contents = SignedBlockContents::from(signed_block.clone());
let signed_block_contents =
SignedBlockContents::try_from(signed_block.clone()).unwrap();
self.client
.post_beacon_blocks(&signed_block_contents)

View File

@ -134,7 +134,6 @@ pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
/// The maximum bytes that can be sent across the RPC post-merge.
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
// FIXME(sean) should this be increased to account for blobs?
pub(crate) const MAX_RPC_SIZE_POST_DENEB: usize = 10 * 1_048_576; // 10M
/// The protocol prefix the RPC protocol id.
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";

View File

@ -5,11 +5,12 @@ use crate::{
sync::SyncMessage,
};
use beacon_chain::blob_verification::{BlobError, GossipVerifiedBlob};
use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use beacon_chain::block_verification_types::AsBlock;
use beacon_chain::store::Error;
use beacon_chain::{
attestation_verification::{self, Error as AttnError, VerifiedAttestation},
data_availability_checker::AvailabilityCheckError,
light_client_finality_update_verification::Error as LightClientFinalityUpdateError,
light_client_optimistic_update_verification::Error as LightClientOptimisticUpdateError,
observed_operations::ObservationOutcome,
@ -598,7 +599,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
}
}
// TODO: docs
#[allow(clippy::too_many_arguments)]
pub async fn process_gossip_blob(
self: &Arc<Self>,
@ -635,7 +635,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
}
Err(err) => {
match err {
BlobError::BlobParentUnknown(blob) => {
GossipBlobError::BlobParentUnknown(blob) => {
debug!(
self.log,
"Unknown parent hash for blob";
@ -645,11 +645,11 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
);
self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob));
}
BlobError::ProposerSignatureInvalid
| BlobError::UnknownValidator(_)
| BlobError::ProposerIndexMismatch { .. }
| BlobError::BlobIsNotLaterThanParent { .. }
| BlobError::InvalidSubnet { .. } => {
GossipBlobError::ProposerSignatureInvalid
| GossipBlobError::UnknownValidator(_)
| GossipBlobError::ProposerIndexMismatch { .. }
| GossipBlobError::BlobIsNotLaterThanParent { .. }
| GossipBlobError::InvalidSubnet { .. } => {
warn!(
self.log,
"Could not verify blob sidecar for gossip. Rejecting the blob sidecar";
@ -670,10 +670,10 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
MessageAcceptance::Reject,
);
}
BlobError::FutureSlot { .. }
| BlobError::BeaconChainError(_)
| BlobError::RepeatBlob { .. }
| BlobError::PastFinalizedSlot { .. } => {
GossipBlobError::FutureSlot { .. }
| GossipBlobError::BeaconChainError(_)
| GossipBlobError::RepeatBlob { .. }
| GossipBlobError::PastFinalizedSlot { .. } => {
warn!(
self.log,
"Could not verify blob sidecar for gossip. Ignoring the blob sidecar";
@ -710,11 +710,24 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
let blob_slot = verified_blob.slot();
let blob_index = verified_blob.id().index;
match self.chain.process_blob(verified_blob).await {
Ok(AvailabilityProcessingStatus::Imported(_hash)) => {
//TODO(sean) add metrics and logging
Ok(AvailabilityProcessingStatus::Imported(hash)) => {
// Note: Reusing block imported metric here
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL);
info!(
self.log,
"Gossipsub blob processed, imported fully available block";
"hash" => %hash
);
self.chain.recompute_head_at_current_slot().await;
}
Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_hash)) => {
debug!(
self.log,
"Missing block components for gossip verified blob";
"slot" => %blob_slot,
"blob_index" => %blob_index,
"blob_root" => %blob_root,
);
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
slot, peer_id, block_hash,
));
@ -954,14 +967,12 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
);
return None;
}
Err(e @ BlockError::BlobValidation(_)) | Err(e @ BlockError::AvailabilityCheck(_)) => {
warn!(self.log, "Could not verify block against known blobs in gossip. Rejecting the block";
"error" => %e);
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
self.gossip_penalize_peer(
peer_id,
PeerAction::LowToleranceError,
"gossip_blob_low",
// Note: This error variant cannot be reached when doing gossip validation
// as we do not do availability checks here.
Err(e @ BlockError::AvailabilityCheck(_)) => {
crit!(self.log, "Internal block gossip validation error. Availability check during
gossip validation";
"error" => %e
);
return None;
}
@ -1142,6 +1153,43 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
"error" => %e
);
}
Err(BlockError::AvailabilityCheck(err)) => {
match err {
AvailabilityCheckError::KzgNotInitialized
| AvailabilityCheckError::SszTypes(_)
| AvailabilityCheckError::MissingBlobs
| AvailabilityCheckError::StoreError(_)
| AvailabilityCheckError::DecodeError(_) => {
crit!(
self.log,
"Internal availability check error";
"error" => ?err,
);
}
AvailabilityCheckError::Kzg(_)
| AvailabilityCheckError::KzgVerificationFailed
| AvailabilityCheckError::NumBlobsMismatch { .. }
| AvailabilityCheckError::TxKzgCommitmentMismatch(_)
| AvailabilityCheckError::BlobIndexInvalid(_)
| AvailabilityCheckError::UnorderedBlobs { .. }
| AvailabilityCheckError::BlockBlobRootMismatch { .. }
| AvailabilityCheckError::BlockBlobSlotMismatch { .. }
| AvailabilityCheckError::KzgCommitmentMismatch { .. } => {
// Note: we cannot penalize the peer that sent us the block
// over gossip here because these errors imply either an issue
// with:
// 1. Blobs we have received over non-gossip sources
// (from potentially other peers)
// 2. The proposer being malicious and sending inconsistent
// blocks and blobs.
warn!(
self.log,
"Received invalid blob or malicious proposer";
"error" => ?err
);
}
}
}
other => {
debug!(
self.log,

View File

@ -222,8 +222,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
// RPC block imported, regardless of process type
//TODO(sean) do we need to do anything here for missing blobs? or is passing the result
// along to sync enough?
if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result {
info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash);

View File

@ -928,8 +928,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
BlockError::AvailabilityCheck(
AvailabilityCheckError::KzgVerificationFailed,
)
| BlockError::AvailabilityCheck(AvailabilityCheckError::Kzg(_))
| BlockError::BlobValidation(_) => {
| BlockError::AvailabilityCheck(AvailabilityCheckError::Kzg(_)) => {
warn!(self.log, "Blob validation failure"; "root" => %root, "peer_id" => %peer_id);
if let Ok(blob_peer) = request_ref.processing_peer(ResponseType::Blob) {
cx.report_peer(

View File

@ -1143,7 +1143,7 @@ fn test_same_chain_race_condition() {
mod deneb_only {
use super::*;
use beacon_chain::blob_verification::BlobError;
use beacon_chain::data_availability_checker::AvailabilityCheckError;
use std::ops::IndexMut;
use std::str::FromStr;
@ -1509,8 +1509,8 @@ mod deneb_only {
fn invalid_blob_processed(mut self) -> Self {
self.bl.single_block_component_processed(
self.blob_req_id.expect("blob request id"),
BlockProcessingResult::Err(BlockError::BlobValidation(
BlobError::ProposerSignatureInvalid,
BlockProcessingResult::Err(BlockError::AvailabilityCheck(
AvailabilityCheckError::KzgVerificationFailed,
)),
ResponseType::Blob,
&mut self.cx,

View File

@ -1469,17 +1469,19 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> From<SignedBeaconBlock<T, Payload>>
impl<T: EthSpec, Payload: AbstractExecPayload<T>> TryFrom<SignedBeaconBlock<T, Payload>>
for SignedBlockContents<T, Payload>
{
fn from(block: SignedBeaconBlock<T, Payload>) -> Self {
type Error = &'static str;
fn try_from(block: SignedBeaconBlock<T, Payload>) -> Result<Self, Self::Error> {
match block {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => SignedBlockContents::Block(block),
//TODO: error handling, this should be try from
SignedBeaconBlock::Deneb(_block) => todo!(),
| SignedBeaconBlock::Capella(_) => Ok(SignedBlockContents::Block(block)),
SignedBeaconBlock::Deneb(_) => {
Err("deneb block contents cannot be fully constructed from just the signed block")
}
}
}
}

View File

@ -34,7 +34,6 @@ CAPELLA_FORK_VERSION: 0x40484404
CAPELLA_FORK_EPOCH: 1
# DENEB/Deneb
# TODO: Rename to Deneb once specs/clients support it
DENEB_FORK_VERSION: 0x50484404
DENEB_FORK_EPOCH: 5

View File

@ -346,8 +346,8 @@ impl EthSpec for MinimalEthSpec {
type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch
type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch
type MaxWithdrawalsPerPayload = U4;
type FieldElementsPerBlob = U4; //FIXME(sean) this is spec'd out currently but will likely change
type BytesPerBlob = U128; //FIXME(sean) this is spec'd out currently but will likely change
type FieldElementsPerBlob = U4;
type BytesPerBlob = U128;
type MaxBlobCommitmentsPerBlock = U16;
params_from_eth_spec!(MainnetEthSpec {

View File

@ -1,10 +1,9 @@
use super::*;
use kzg::KzgProof;
use kzg::{KzgProof, BYTES_PER_COMMITMENT};
impl TestRandom for KzgProof {
fn random_for_test(rng: &mut impl RngCore) -> Self {
// TODO(pawan): use the length constant here
let mut bytes = [0; 48];
let mut bytes = [0; BYTES_PER_COMMITMENT];
rng.fill_bytes(&mut bytes);
Self(bytes)
}

View File

@ -8,7 +8,7 @@ use std::ops::Deref;
use std::str::FromStr;
pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup};
pub use c_kzg::{Bytes32, Bytes48};
pub use c_kzg::{Bytes32, Bytes48, BYTES_PER_COMMITMENT, BYTES_PER_PROOF};
#[derive(Debug)]
pub enum Error {