From 31044402ee180ff937027ec842513bef90d7eec8 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 5 Dec 2023 08:19:59 -0800 Subject: [PATCH] Sidecar inclusion proof (#4900) * Refactor BlobSidecar to new type * Fix some compile errors * Gossip verification compiles * Fix http api types take 1 * Fix another round of compile errors * Beacon node crate compiles * EF tests compile * Remove all blob signing from VC * fmt * Tests compile * Fix some tests * Fix more http tests * get compiling * Fix gossip conditions and tests * Add basic proof generation and verification * remove unnecessary ssz decode * add back build_sidecar * remove default at fork for blobs * fix beacon chain tests * get relase tests compiling * fix lints * fix existing spec tests * add new ef tests * fix gossip duplicate rule * lints * add back sidecar signature check in gossip * add finalized descendant check to blob sidecar gossip * fix error conversion * fix release tests * sidecar inclusion self review cleanup * Add proof verification and computation metrics * Remove accidentally committed file * Unify some block and blob errors; add slashing conditions for sidecars * Address review comment * Clean up re-org tests (#4957) * Address more review comments * Add Comments & Eliminate Unnecessary Clones * update names * Update beacon_node/beacon_chain/src/metrics.rs Co-authored-by: Jimmy Chen * Update beacon_node/network/src/network_beacon_processor/tests.rs Co-authored-by: Jimmy Chen * pr feedback * fix test compile * Sidecar Inclusion proof small refactor and updates (#4967) * Update some comments, variables and small cosmetic fixes. * Couple blobs and proofs into a tuple in `PayloadAndBlobs` for simplicity and safety. * Update function comment. * Update testing/ef_tests/src/cases/merkle_proof_validity.rs Co-authored-by: Jimmy Chen * Rename the block and blob wrapper types used in the beacon API interfaces. * make sure gossip invalid blobs are passed to the slasher (#4970) * Add blob headers to slasher before adding to DA checker * Replace Vec with HashSet in BlockQueue * fmt * Rename gindex -> index * Simplify gossip condition --------- Co-authored-by: realbigsean Co-authored-by: realbigsean Co-authored-by: Michael Sproul Co-authored-by: Mark Mackey Co-authored-by: Jimmy Chen --- Cargo.lock | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 149 ++++-- .../beacon_chain/src/blob_verification.rs | 399 ++++++++------ .../beacon_chain/src/block_times_cache.rs | 92 +++- .../beacon_chain/src/block_verification.rs | 123 +++-- .../src/block_verification_types.rs | 13 +- .../src/data_availability_checker.rs | 21 +- .../src/data_availability_checker/error.rs | 13 +- .../overflow_lru_cache.rs | 81 +-- beacon_node/beacon_chain/src/kzg_utils.rs | 8 +- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/beacon_chain/src/metrics.rs | 8 + .../src/observed_blob_sidecars.rs | 72 +-- beacon_node/beacon_chain/src/test_utils.rs | 180 ++----- .../beacon_chain/tests/block_verification.rs | 185 +++---- beacon_node/beacon_chain/tests/events.rs | 22 +- beacon_node/beacon_processor/src/lib.rs | 13 +- beacon_node/builder_client/src/lib.rs | 8 +- beacon_node/execution_layer/src/lib.rs | 61 +-- .../test_utils/execution_block_generator.rs | 6 +- .../src/test_utils/mock_builder.rs | 8 +- .../http_api/src/build_block_contents.rs | 72 ++- beacon_node/http_api/src/lib.rs | 52 +- beacon_node/http_api/src/produce_block.rs | 69 +-- beacon_node/http_api/src/publish_blocks.rs | 59 ++- .../tests/broadcast_validation_tests.rs | 138 ++--- .../http_api/tests/interactive_tests.rs | 12 +- beacon_node/http_api/tests/tests.rs | 238 ++++----- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../lighthouse_network/src/types/pubsub.rs | 24 +- .../gossip_methods.rs | 45 +- .../src/network_beacon_processor/mod.rs | 6 +- .../network_beacon_processor/sync_methods.rs | 2 +- .../src/network_beacon_processor/tests.rs | 38 +- beacon_node/network/src/router.rs | 4 +- .../network/src/sync/block_lookups/common.rs | 2 +- .../network/src/sync/block_lookups/tests.rs | 6 +- .../src/sync/block_sidecar_coupling.rs | 2 +- beacon_node/network/src/sync/manager.rs | 6 +- common/eth2/src/lib.rs | 98 ++-- common/eth2/src/types.rs | 487 ++++++------------ consensus/fork_choice/src/fork_choice.rs | 2 +- consensus/fork_choice/src/lib.rs | 4 +- consensus/merkle_proof/src/lib.rs | 2 +- .../src/proto_array_fork_choice.rs | 6 +- consensus/types/presets/gnosis/deneb.yaml | 2 + consensus/types/presets/mainnet/deneb.yaml | 2 + consensus/types/presets/minimal/deneb.yaml | 2 + consensus/types/src/beacon_block_body.rs | 78 +++ consensus/types/src/beacon_block_header.rs | 10 + consensus/types/src/blob_sidecar.rs | 216 ++++---- consensus/types/src/builder_bid.rs | 15 +- consensus/types/src/chain_spec.rs | 8 - consensus/types/src/config_and_preset.rs | 1 - consensus/types/src/eth_spec.rs | 9 + consensus/types/src/lib.rs | 9 +- consensus/types/src/payload.rs | 53 +- consensus/types/src/sidecar.rs | 221 -------- consensus/types/src/signed_blob.rs | 114 ---- crypto/kzg/src/lib.rs | 40 +- slasher/src/block_queue.rs | 7 +- testing/ef_tests/Cargo.toml | 1 + testing/ef_tests/Makefile | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 154 +++++- .../src/cases/kzg_verify_blob_kzg_proof.rs | 12 +- .../cases/kzg_verify_blob_kzg_proof_batch.rs | 22 +- .../src/cases/merkle_proof_validity.rs | 73 ++- testing/ef_tests/src/handler.rs | 35 ++ testing/ef_tests/src/type_name.rs | 1 - testing/ef_tests/tests/tests.rs | 28 +- validator_client/src/block_service.rs | 207 ++++---- validator_client/src/http_metrics/metrics.rs | 5 - validator_client/src/signing_method.rs | 6 - validator_client/src/validator_store.rs | 44 +- 74 files changed, 1950 insertions(+), 2270 deletions(-) delete mode 100644 consensus/types/src/sidecar.rs delete mode 100644 consensus/types/src/signed_blob.rs diff --git a/Cargo.lock b/Cargo.lock index 9c1b59134..0a1af70bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1857,6 +1857,7 @@ dependencies = [ "fs2", "hex", "kzg", + "logging", "rayon", "serde", "serde_json", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 53583390f..71270c197 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7,7 +7,7 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey}; use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache}; use crate::beacon_proposer_cache::compute_proposer_duties_from_head; use crate::beacon_proposer_cache::BeaconProposerCache; -use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob}; +use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob}; use crate::block_times_cache::BlockTimesCache; use crate::block_verification::POS_PANDA_BANNER; use crate::block_verification::{ @@ -121,7 +121,6 @@ use tree_hash::TreeHash; use types::beacon_state::CloneConfig; use types::blob_sidecar::{BlobSidecarList, FixedBlobSidecarList}; use types::payload::BlockProductionVersion; -use types::sidecar::BlobItems; use types::*; pub type ForkChoiceError = fork_choice::Error; @@ -489,16 +488,49 @@ pub struct BeaconChain { pub block_production_state: Arc)>>>, } -pub enum BeaconBlockResponseType { +pub enum BeaconBlockResponseWrapper { Full(BeaconBlockResponse>), Blinded(BeaconBlockResponse>), } +impl BeaconBlockResponseWrapper { + pub fn fork_name(&self, spec: &ChainSpec) -> Result { + Ok(match self { + BeaconBlockResponseWrapper::Full(resp) => resp.block.to_ref().fork_name(spec)?, + BeaconBlockResponseWrapper::Blinded(resp) => resp.block.to_ref().fork_name(spec)?, + }) + } + + pub fn execution_payload_value(&self) -> Option { + match self { + BeaconBlockResponseWrapper::Full(resp) => resp.execution_payload_value, + BeaconBlockResponseWrapper::Blinded(resp) => resp.execution_payload_value, + } + } + + pub fn consensus_block_value(&self) -> Option { + match self { + BeaconBlockResponseWrapper::Full(resp) => resp.consensus_block_value, + BeaconBlockResponseWrapper::Blinded(resp) => resp.consensus_block_value, + } + } + + pub fn is_blinded(&self) -> bool { + matches!(self, BeaconBlockResponseWrapper::Blinded(_)) + } +} + +/// The components produced when the local beacon node creates a new block to extend the chain pub struct BeaconBlockResponse> { + /// The newly produced beacon block pub block: BeaconBlock, + /// The post-state after applying the new block pub state: BeaconState, - pub maybe_side_car: Option>::Sidecar>>, + /// The Blobs / Proofs associated with the new block + pub blob_items: Option<(KzgProofs, BlobsList)>, + /// The execution layer reward for the block pub execution_payload_value: Option, + /// The consensus layer reward to the proposer pub consensus_block_value: Option, } @@ -2022,17 +2054,15 @@ impl BeaconChain { pub fn verify_blob_sidecar_for_gossip( self: &Arc, - blob_sidecar: SignedBlobSidecar, + blob_sidecar: Arc>, subnet_id: u64, ) -> Result, GossipBlobError> { metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS); let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES); - blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self).map( - |v| { - metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); - v - }, - ) + GossipVerifiedBlob::new(blob_sidecar, subnet_id, self).map(|v| { + metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES); + v + }) } /// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it @@ -2832,7 +2862,7 @@ impl BeaconChain { } self.data_availability_checker - .notify_gossip_blob(blob.as_blob().slot, block_root, &blob); + .notify_gossip_blob(blob.slot(), block_root, &blob); let r = self.check_gossip_blob_availability_and_import(blob).await; self.remove_notified(&block_root, r) } @@ -2942,6 +2972,20 @@ impl BeaconChain { // Increment the Prometheus counter for block processing requests. metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + // Set observed time if not already set. Usually this should be set by gossip or RPC, + // but just in case we set it again here (useful for tests). + if let (Some(seen_timestamp), Some(current_slot)) = + (self.slot_clock.now_duration(), self.slot_clock.now()) + { + self.block_times_cache.write().set_time_observed( + block_root, + current_slot, + seen_timestamp, + None, + None, + ); + } + let block_slot = unverified_block.block().slot(); // A small closure to group the verification and import errors. @@ -3097,6 +3141,9 @@ impl BeaconChain { blob: GossipVerifiedBlob, ) -> Result> { let slot = blob.slot(); + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(blob.signed_block_header()); + } let availability = self.data_availability_checker.put_gossip_blob(blob)?; self.process_availability(slot, availability).await @@ -3110,6 +3157,11 @@ impl BeaconChain { block_root: Hash256, blobs: FixedBlobSidecarList, ) -> Result> { + if let Some(slasher) = self.slasher.as_ref() { + for blob_sidecar in blobs.iter().filter_map(|blob| blob.clone()) { + slasher.accept_block_header(blob_sidecar.signed_block_header.clone()); + } + } let availability = self .data_availability_checker .put_rpc_blobs(block_root, blobs)?; @@ -3968,7 +4020,7 @@ impl BeaconChain { validator_graffiti: Option, verification: ProduceBlockVerification, block_production_version: BlockProductionVersion, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); // Part 1/2 (blocking) @@ -4414,7 +4466,7 @@ impl BeaconChain { /// This function uses heuristics that align quite closely but not exactly with the re-org /// conditions set out in `get_state_for_re_org` and `get_proposer_head`. The differences are /// documented below. - fn overridden_forkchoice_update_params( + pub fn overridden_forkchoice_update_params( &self, canonical_forkchoice_params: ForkchoiceUpdateParameters, ) -> Result { @@ -4432,7 +4484,7 @@ impl BeaconChain { }) } - fn overridden_forkchoice_update_params_or_failure_reason( + pub fn overridden_forkchoice_update_params_or_failure_reason( &self, canonical_forkchoice_params: &ForkchoiceUpdateParameters, ) -> Result> { @@ -4573,7 +4625,7 @@ impl BeaconChain { .unwrap_or_else(|| Duration::from_secs(0)), ); block_delays.observed.map_or(false, |delay| { - delay > self.slot_clock.unagg_attestation_production_delay() + delay >= self.slot_clock.unagg_attestation_production_delay() }) } @@ -4599,7 +4651,7 @@ impl BeaconChain { validator_graffiti: Option, verification: ProduceBlockVerification, block_production_version: BlockProductionVersion, - ) -> Result, BlockProductionError> { + ) -> Result, BlockProductionError> { // Part 1/3 (blocking) // // Perform the state advance and block-packing functions. @@ -4658,7 +4710,7 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; - Ok(BeaconBlockResponseType::Full(beacon_block_response)) + Ok(BeaconBlockResponseWrapper::Full(beacon_block_response)) } BlockProposalContentsType::Blinded(block_contents) => { let chain = self.clone(); @@ -4678,7 +4730,7 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; - Ok(BeaconBlockResponseType::Blinded(beacon_block_response)) + Ok(BeaconBlockResponseWrapper::Blinded(beacon_block_response)) } } } else { @@ -4699,7 +4751,7 @@ impl BeaconChain { .await .map_err(BlockProductionError::TokioJoin)??; - Ok(BeaconBlockResponseType::Full(beacon_block_response)) + Ok(BeaconBlockResponseWrapper::Full(beacon_block_response)) } } @@ -4977,7 +5029,7 @@ impl BeaconChain { bls_to_execution_changes, } = partial_beacon_block; - let (inner_block, blobs_opt, proofs_opt, execution_payload_value) = match &state { + let (inner_block, maybe_blobs_and_proofs, execution_payload_value) = match &state { BeaconState::Base(_) => ( BeaconBlock::Base(BeaconBlockBase { slot, @@ -4997,7 +5049,6 @@ impl BeaconChain { }, }), None, - None, Uint256::zero(), ), BeaconState::Altair(_) => ( @@ -5021,7 +5072,6 @@ impl BeaconChain { }, }), None, - None, Uint256::zero(), ), BeaconState::Merge(_) => { @@ -5052,7 +5102,6 @@ impl BeaconChain { }, }), None, - None, execution_payload_value, ) } @@ -5086,12 +5135,11 @@ impl BeaconChain { }, }), None, - None, execution_payload_value, ) } BeaconState::Deneb(_) => { - let (payload, kzg_commitments, blobs, proofs, execution_payload_value) = + let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) = block_contents .ok_or(BlockProductionError::MissingExecutionPayload)? .deconstruct(); @@ -5121,8 +5169,7 @@ impl BeaconChain { .ok_or(BlockProductionError::InvalidPayloadFork)?, }, }), - blobs, - proofs, + maybe_blobs_and_proofs, execution_payload_value, ) } @@ -5181,8 +5228,8 @@ impl BeaconChain { let blobs_verification_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES); - let maybe_sidecar_list = match (blobs_opt, proofs_opt) { - (Some(blobs_or_blobs_roots), Some(proofs)) => { + let blob_items = match maybe_blobs_and_proofs { + Some((blobs, proofs)) => { let expected_kzg_commitments = block.body().blob_kzg_commitments().map_err(|_| { BlockProductionError::InvalidBlockVariant( @@ -5190,42 +5237,32 @@ impl BeaconChain { ) })?; - if expected_kzg_commitments.len() != blobs_or_blobs_roots.len() { + if expected_kzg_commitments.len() != blobs.len() { return Err(BlockProductionError::MissingKzgCommitment(format!( "Missing KZG commitment for slot {}. Expected {}, got: {}", block.slot(), - blobs_or_blobs_roots.len(), + blobs.len(), expected_kzg_commitments.len() ))); } let kzg_proofs = Vec::from(proofs); - if let Some(blobs) = blobs_or_blobs_roots.blobs() { - let kzg = self - .kzg - .as_ref() - .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; - kzg_utils::validate_blobs::( - kzg, - expected_kzg_commitments, - blobs.iter().collect(), - &kzg_proofs, - ) - .map_err(BlockProductionError::KzgError)?; - } - - Some( - Sidecar::build_sidecar( - blobs_or_blobs_roots, - &block, - expected_kzg_commitments, - kzg_proofs, - ) - .map_err(BlockProductionError::FailedToBuildBlobSidecars)?, + let kzg = self + .kzg + .as_ref() + .ok_or(BlockProductionError::TrustedSetupNotInitialized)?; + kzg_utils::validate_blobs::( + kzg, + expected_kzg_commitments, + blobs.iter().collect(), + &kzg_proofs, ) + .map_err(BlockProductionError::KzgError)?; + + Some((kzg_proofs.into(), blobs)) } - _ => None, + None => None, }; drop(blobs_verification_timer); @@ -5243,7 +5280,7 @@ impl BeaconChain { Ok(BeaconBlockResponse { block, state, - maybe_side_car: maybe_sidecar_list, + blob_items, execution_payload_value: Some(execution_payload_value), consensus_block_value: Some(consensus_block_value), }) diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index ca69d2ab6..e2a1f0928 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -2,15 +2,15 @@ use derivative::Derivative; use slot_clock::SlotClock; use std::sync::Arc; -use crate::beacon_chain::{ - BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, - VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, +use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}; +use crate::block_verification::{ + cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, + BlockSlashInfo, }; -use crate::block_verification::cheap_state_advance_to_obtain_committees; -use crate::data_availability_checker::AvailabilityCheckError; use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::{metrics, BeaconChainError}; -use kzg::{Kzg, KzgCommitment}; +use kzg::{Error as KzgError, Kzg, KzgCommitment}; +use merkle_proof::MerkleTreeError; use slog::{debug, warn}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; @@ -18,7 +18,7 @@ use tree_hash::TreeHash; use types::blob_sidecar::BlobIdentifier; use types::{ BeaconStateError, BlobSidecar, BlobSidecarList, CloneConfig, EthSpec, Hash256, - SignedBlobSidecar, Slot, + SignedBeaconBlockHeader, Slot, }; /// An error occurred while validating a gossip blob. @@ -75,7 +75,7 @@ pub enum GossipBlobError { /// ## Peer scoring /// /// The blob is invalid and the peer is faulty. - ProposerSignatureInvalid, + ProposalSignatureInvalid, /// The proposal_index corresponding to blob.beacon_block_root is not known. /// @@ -98,6 +98,12 @@ pub enum GossipBlobError { /// We cannot process the blob without validating its parent, the peer isn't necessarily faulty. BlobParentUnknown(Arc>), + /// Invalid kzg commitment inclusion proof + /// ## Peer scoring + /// + /// The blob sidecar is invalid and the peer is faulty + InvalidInclusionProof, + /// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple /// over gossip or no gossip sources. /// @@ -109,6 +115,42 @@ pub enum GossipBlobError { slot: Slot, index: u64, }, + + /// `Kzg` struct hasn't been initialized. This is an internal error. + /// + /// ## Peer scoring + /// + /// The peer isn't faulty, This is an internal error. + KzgNotInitialized, + + /// The kzg verification failed. + /// + /// ## Peer scoring + /// + /// The blob sidecar is invalid and the peer is faulty. + KzgError(kzg::Error), + + /// The kzg commitment inclusion proof failed. + /// + /// ## Peer scoring + /// + /// The blob sidecar is invalid + InclusionProof(MerkleTreeError), + + /// The pubkey cache timed out. + /// + /// ## Peer scoring + /// + /// The blob sidecar may be valid, this is an internal error. + PubkeyCacheTimeout, + + /// The block conflicts with finalization, no need to propagate. + /// + /// ## Peer scoring + /// + /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be + /// imported. + NotFinalizedDescendant { block_parent_root: Hash256 }, } impl std::fmt::Display for GossipBlobError { @@ -118,7 +160,7 @@ impl std::fmt::Display for GossipBlobError { write!( f, "BlobParentUnknown(parent_root:{})", - blob_sidecar.block_parent_root + blob_sidecar.block_parent_root() ) } other => write!(f, "{:?}", other), @@ -147,63 +189,168 @@ pub type GossipVerifiedBlobList = VariableList< /// the p2p network. #[derive(Debug)] pub struct GossipVerifiedBlob { - blob: SignedBlobSidecar, + block_root: Hash256, + blob: KzgVerifiedBlob, } impl GossipVerifiedBlob { pub fn new( - blob: SignedBlobSidecar, + blob: Arc>, + subnet_id: u64, chain: &BeaconChain, ) -> Result> { - let blob_index = blob.message.index; - validate_blob_sidecar_for_gossip(blob, blob_index, chain) + let header = blob.signed_block_header.clone(); + // We only process slashing info if the gossip verification failed + // since we do not process the blob any further in that case. + validate_blob_sidecar_for_gossip(blob, subnet_id, chain).map_err(|e| { + process_block_slash_info::<_, GossipBlobError>( + chain, + BlockSlashInfo::from_early_error_blob(header, e), + ) + }) } /// Construct a `GossipVerifiedBlob` that is assumed to be valid. /// /// This should ONLY be used for testing. - pub fn __assumed_valid(blob: SignedBlobSidecar) -> Self { - Self { blob } + pub fn __assumed_valid(blob: Arc>) -> Self { + Self { + block_root: blob.block_root(), + blob: KzgVerifiedBlob { blob }, + } } pub fn id(&self) -> BlobIdentifier { - self.blob.message.id() + BlobIdentifier { + block_root: self.block_root, + index: self.blob.blob_index(), + } } pub fn block_root(&self) -> Hash256 { - self.blob.message.block_root - } - pub fn to_blob(self) -> Arc> { - self.blob.message - } - pub fn as_blob(&self) -> &BlobSidecar { - &self.blob.message - } - pub fn signed_blob(&self) -> SignedBlobSidecar { - self.blob.clone() + self.block_root } pub fn slot(&self) -> Slot { - self.blob.message.slot + self.blob.blob.slot() } pub fn index(&self) -> u64 { - self.blob.message.index + self.blob.blob.index } pub fn kzg_commitment(&self) -> KzgCommitment { - self.blob.message.kzg_commitment + self.blob.blob.kzg_commitment } - pub fn proposer_index(&self) -> u64 { - self.blob.message.proposer_index + pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { + self.blob.blob.signed_block_header.clone() + } + pub fn block_proposer_index(&self) -> u64 { + self.blob.blob.block_proposer_index() + } + pub fn into_inner(self) -> KzgVerifiedBlob { + self.blob + } + pub fn as_blob(&self) -> &BlobSidecar { + self.blob.as_blob() + } + /// This is cheap as we're calling clone on an Arc + pub fn clone_blob(&self) -> Arc> { + self.blob.clone_blob() } } +/// Wrapper over a `BlobSidecar` for which we have completed kzg verification. +/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. +#[derive(Debug, Derivative, Clone, Encode, Decode)] +#[derivative(PartialEq, Eq)] +#[ssz(struct_behaviour = "transparent")] +pub struct KzgVerifiedBlob { + blob: Arc>, +} + +impl PartialOrd for KzgVerifiedBlob { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for KzgVerifiedBlob { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.blob.cmp(&other.blob) + } +} + +impl KzgVerifiedBlob { + pub fn to_blob(self) -> Arc> { + self.blob + } + pub fn as_blob(&self) -> &BlobSidecar { + &self.blob + } + /// This is cheap as we're calling clone on an Arc + pub fn clone_blob(&self) -> Arc> { + self.blob.clone() + } + pub fn blob_index(&self) -> u64 { + self.blob.index + } +} + +#[cfg(test)] +impl KzgVerifiedBlob { + pub fn new(blob: BlobSidecar) -> Self { + Self { + blob: Arc::new(blob), + } + } +} + +/// Complete kzg verification for a `BlobSidecar`. +/// +/// Returns an error if the kzg verification check fails. +pub fn verify_kzg_for_blob( + blob: Arc>, + kzg: &Kzg, +) -> Result, KzgError> { + validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?; + + Ok(KzgVerifiedBlob { blob }) +} + +/// Complete kzg verification for a list of `BlobSidecar`s. +/// Returns an error if any of the `BlobSidecar`s fails kzg verification. +/// +/// Note: This function should be preferred over calling `verify_kzg_for_blob` +/// in a loop since this function kzg verifies a list of blobs more efficiently. +pub fn verify_kzg_for_blob_list( + blob_list: &BlobSidecarList, + kzg: &Kzg, +) -> Result<(), KzgError> { + let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list + .iter() + .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) + .unzip(); + validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) +} + pub fn validate_blob_sidecar_for_gossip( - signed_blob_sidecar: SignedBlobSidecar, + blob_sidecar: Arc>, subnet: u64, chain: &BeaconChain, ) -> Result, GossipBlobError> { - let blob_slot = signed_blob_sidecar.message.slot; - let blob_index = signed_blob_sidecar.message.index; - let block_parent_root = signed_blob_sidecar.message.block_parent_root; - let blob_proposer_index = signed_blob_sidecar.message.proposer_index; - let block_root = signed_blob_sidecar.message.block_root; + let blob_slot = blob_sidecar.slot(); + let blob_index = blob_sidecar.index; + let block_parent_root = blob_sidecar.block_parent_root(); + let blob_proposer_index = blob_sidecar.block_proposer_index(); + let block_root = blob_sidecar.block_root(); let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch()); + let signed_block_header = &blob_sidecar.signed_block_header; + + // This condition is not possible if we have received the blob from the network + // since we only subscribe to `MaxBlobsPerBlock` subnets over gossip network. + // We include this check only for completeness. + // Getting this error would imply something very wrong with our networking decoding logic. + if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { + return Err(GossipBlobError::InvalidSubnet { + expected: subnet, + received: blob_index, + }); + } // Verify that the blob_sidecar was received on the correct subnet. if blob_index != subnet { @@ -213,8 +360,6 @@ pub fn validate_blob_sidecar_for_gossip( }); } - let blob_root = get_blob_root(&signed_blob_sidecar); - // Verify that the sidecar is not from a future slot. let latest_permissible_slot = chain .slot_clock @@ -240,11 +385,12 @@ pub fn validate_blob_sidecar_for_gossip( }); } - // Verify that this is the first blob sidecar received for the (sidecar.block_root, sidecar.index) tuple + // Verify that this is the first blob sidecar received for the tuple: + // (block_header.slot, block_header.proposer_index, blob_sidecar.index) if chain .observed_blob_sidecars .read() - .is_known(&signed_blob_sidecar.message) + .is_known(&blob_sidecar) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { @@ -254,18 +400,31 @@ pub fn validate_blob_sidecar_for_gossip( }); } + // Verify the inclusion proof in the sidecar + let _timer = metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION); + if !blob_sidecar + .verify_blob_sidecar_inclusion_proof() + .map_err(GossipBlobError::InclusionProof)? + { + return Err(GossipBlobError::InvalidInclusionProof); + } + drop(_timer); + + let fork_choice = chain.canonical_head.fork_choice_read_lock(); + // We have already verified that the blob is past finalization, so we can // just check fork choice for the block's parent. - let Some(parent_block) = chain - .canonical_head - .fork_choice_read_lock() - .get_block(&block_parent_root) - else { - return Err(GossipBlobError::BlobParentUnknown( - signed_blob_sidecar.message, - )); + let Some(parent_block) = fork_choice.get_block(&block_parent_root) else { + return Err(GossipBlobError::BlobParentUnknown(blob_sidecar)); }; + // Do not process a blob that does not descend from the finalized root. + // We just loaded the parent_block, so we can be sure that it exists in fork choice. + if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) { + return Err(GossipBlobError::NotFinalizedDescendant { block_parent_root }); + } + drop(fork_choice); + if parent_block.slot >= blob_slot { return Err(GossipBlobError::BlobIsNotLaterThanParent { blob_slot, @@ -273,8 +432,6 @@ pub fn validate_blob_sidecar_for_gossip( }); } - // Note: We check that the proposer_index matches against the shuffling first to avoid - // signature verification against an invalid proposer_index. let proposer_shuffling_root = if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == blob_epoch { parent_block @@ -374,38 +531,26 @@ pub fn validate_blob_sidecar_for_gossip( .get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize) .ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?; + let fork = state.fork(); // Prime the proposer shuffling cache with the newly-learned value. chain.beacon_proposer_cache.lock().insert( blob_epoch, proposer_shuffling_root, proposers, - state.fork(), + fork, )?; - (proposer_index, state.fork()) + (proposer_index, fork) } }; - if proposer_index != blob_proposer_index as usize { - return Err(GossipBlobError::ProposerIndexMismatch { - sidecar: blob_proposer_index as usize, - local: proposer_index, - }); - } - - // Signature verification + // Signature verify the signed block header. let signature_is_valid = { - let pubkey_cache = chain - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) - .map_err(GossipBlobError::BeaconChainError)?; - + let pubkey_cache = + get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?; let pubkey = pubkey_cache .get(proposer_index) .ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?; - - signed_blob_sidecar.verify_signature( - Some(blob_root), + signed_block_header.verify_signature::( pubkey, &fork, chain.genesis_validators_root, @@ -414,7 +559,14 @@ pub fn validate_blob_sidecar_for_gossip( }; if !signature_is_valid { - return Err(GossipBlobError::ProposerSignatureInvalid); + return Err(GossipBlobError::ProposalSignatureInvalid); + } + + if proposer_index != blob_proposer_index as usize { + return Err(GossipBlobError::ProposerIndexMismatch { + sidecar: blob_proposer_index as usize, + local: proposer_index, + }); } // Now the signature is valid, store the proposal so we don't accept another blob sidecar @@ -431,7 +583,7 @@ pub fn validate_blob_sidecar_for_gossip( if chain .observed_blob_sidecars .write() - .observe_sidecar(&signed_blob_sidecar.message) + .observe_sidecar(&blob_sidecar) .map_err(|e| GossipBlobError::BeaconChainError(e.into()))? { return Err(GossipBlobError::RepeatBlob { @@ -441,106 +593,27 @@ pub fn validate_blob_sidecar_for_gossip( }); } + // Kzg verification for gossip blob sidecar + let kzg = chain + .kzg + .as_ref() + .ok_or(GossipBlobError::KzgNotInitialized)?; + let kzg_verified_blob = + verify_kzg_for_blob(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?; + Ok(GossipVerifiedBlob { - blob: signed_blob_sidecar, + block_root, + blob: kzg_verified_blob, }) } -/// Wrapper over a `BlobSidecar` for which we have completed kzg verification. -/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] -#[ssz(struct_behaviour = "transparent")] -pub struct KzgVerifiedBlob { - blob: Arc>, -} - -impl PartialOrd for KzgVerifiedBlob { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for KzgVerifiedBlob { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.blob.cmp(&other.blob) - } -} - -impl KzgVerifiedBlob { - pub fn to_blob(self) -> Arc> { - self.blob - } - pub fn as_blob(&self) -> &BlobSidecar { - &self.blob - } - pub fn clone_blob(&self) -> Arc> { - self.blob.clone() - } - pub fn block_root(&self) -> Hash256 { - self.blob.block_root - } - pub fn blob_index(&self) -> u64 { - self.blob.index - } -} - -#[cfg(test)] -impl KzgVerifiedBlob { - pub fn new(blob: BlobSidecar) -> Self { - Self { - blob: Arc::new(blob), - } - } -} - -/// Complete kzg verification for a `GossipVerifiedBlob`. -/// -/// Returns an error if the kzg verification check fails. -pub fn verify_kzg_for_blob( - blob: Arc>, - kzg: &Kzg, -) -> Result, AvailabilityCheckError> { - let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); - if validate_blob::(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof) - .map_err(AvailabilityCheckError::Kzg)? - { - Ok(KzgVerifiedBlob { blob }) - } else { - Err(AvailabilityCheckError::KzgVerificationFailed) - } -} - -/// Complete kzg verification for a list of `BlobSidecar`s. -/// Returns an error if any of the `BlobSidecar`s fails kzg verification. -/// -/// Note: This function should be preferred over calling `verify_kzg_for_blob` -/// in a loop since this function kzg verifies a list of blobs more efficiently. -pub fn verify_kzg_for_blob_list( - blob_list: &BlobSidecarList, - kzg: &Kzg, -) -> Result<(), AvailabilityCheckError> { - let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); - let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list - .iter() - .map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof))) - .unzip(); - if validate_blobs::(kzg, commitments.as_slice(), blobs, proofs.as_slice()) - .map_err(AvailabilityCheckError::Kzg)? - { - Ok(()) - } else { - Err(AvailabilityCheckError::KzgVerificationFailed) - } -} - /// Returns the canonical root of the given `blob`. /// /// Use this function to ensure that we report the blob hashing time Prometheus metric. -pub fn get_blob_root(blob: &SignedBlobSidecar) -> Hash256 { +pub fn get_blob_root(blob: &BlobSidecar) -> Hash256 { let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT); - let blob_root = blob.message.tree_hash_root(); + let blob_root = blob.tree_hash_root(); metrics::stop_timer(blob_root_timer); diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index 484de841d..c5293bcb0 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -23,7 +23,7 @@ pub struct Timestamps { } // Helps arrange delay data so it is more relevant to metrics. -#[derive(Default)] +#[derive(Debug, Default)] pub struct BlockDelays { pub observed: Option, pub imported: Option, @@ -51,7 +51,7 @@ impl BlockDelays { // If the block was received via gossip, we can record the client type of the peer which sent us // the block. -#[derive(Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct BlockPeerInfo { pub id: Option, pub client: Option, @@ -80,6 +80,8 @@ pub struct BlockTimesCache { /// Helper methods to read from and write to the cache. impl BlockTimesCache { + /// Set the observation time for `block_root` to `timestamp` if `timestamp` is less than + /// any previous timestamp at which this block was observed. pub fn set_time_observed( &mut self, block_root: BlockRoot, @@ -92,11 +94,19 @@ impl BlockTimesCache { .cache .entry(block_root) .or_insert_with(|| BlockTimesCacheValue::new(slot)); - block_times.timestamps.observed = Some(timestamp); - block_times.peer_info = BlockPeerInfo { - id: peer_id, - client: peer_client, - }; + match block_times.timestamps.observed { + Some(existing_observation_time) if existing_observation_time <= timestamp => { + // Existing timestamp is earlier, do nothing. + } + _ => { + // No existing timestamp, or new timestamp is earlier. + block_times.timestamps.observed = Some(timestamp); + block_times.peer_info = BlockPeerInfo { + id: peer_id, + client: peer_client, + }; + } + } } pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { @@ -141,3 +151,71 @@ impl BlockTimesCache { .retain(|_, cache| cache.slot > current_slot.saturating_sub(64_u64)); } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn observed_time_uses_minimum() { + let mut cache = BlockTimesCache::default(); + + let block_root = Hash256::zero(); + let slot = Slot::new(100); + + let slot_start_time = Duration::from_secs(0); + + let ts1 = Duration::from_secs(5); + let ts2 = Duration::from_secs(6); + let ts3 = Duration::from_secs(4); + + let peer_info2 = BlockPeerInfo { + id: Some("peer2".to_string()), + client: Some("lighthouse".to_string()), + }; + + let peer_info3 = BlockPeerInfo { + id: Some("peer3".to_string()), + client: Some("prysm".to_string()), + }; + + cache.set_time_observed(block_root, slot, ts1, None, None); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts1) + ); + assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default()); + + // Second observation with higher timestamp should not override anything, even though it has + // superior peer info. + cache.set_time_observed( + block_root, + slot, + ts2, + peer_info2.id.clone(), + peer_info2.client.clone(), + ); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts1) + ); + assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default()); + + // Third observation with lower timestamp should override everything. + cache.set_time_observed( + block_root, + slot, + ts3, + peer_info3.id.clone(), + peer_info3.client.clone(), + ); + + assert_eq!( + cache.get_block_delays(block_root, slot_start_time).observed, + Some(ts3) + ); + assert_eq!(cache.get_peer_info(block_root), peer_info3); + } +} diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 65cf7a728..e86ca85bb 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -70,7 +70,7 @@ use crate::{ metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; use derivative::Derivative; -use eth2::types::{EventKind, SignedBlockContents}; +use eth2::types::{EventKind, PublishBlockRequest}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; use parking_lot::RwLockReadGuard; @@ -95,15 +95,15 @@ use std::fs; use std::io::Write; use std::sync::Arc; use std::time::Duration; -use store::{Error as DBError, HotStateSummary, KeyValueStore, SignedBlobSidecarList, StoreOp}; +use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; -use types::ExecPayload; use types::{ BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; +use types::{BlobSidecar, ExecPayload}; pub const POS_PANDA_BANNER: &str = r#" ,,, ,,, ,,, ,,, @@ -507,7 +507,7 @@ pub enum BlockSlashInfo { } impl BlockSlashInfo> { - pub fn from_early_error(header: SignedBeaconBlockHeader, e: BlockError) -> Self { + pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError) -> Self { match e { BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), // `InvalidSignature` could indicate any signature in the block, so we want @@ -517,17 +517,28 @@ impl BlockSlashInfo> { } } +impl BlockSlashInfo> { + pub fn from_early_error_blob(header: SignedBeaconBlockHeader, e: GossipBlobError) -> Self { + match e { + GossipBlobError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e), + // `InvalidSignature` could indicate any signature in the block, so we want + // to recheck the proposer signature alone. + _ => BlockSlashInfo::SignatureNotChecked(header, e), + } + } +} + /// Process invalid blocks to see if they are suitable for the slasher. /// /// If no slasher is configured, this is a no-op. -fn process_block_slash_info( +pub(crate) fn process_block_slash_info( chain: &BeaconChain, - slash_info: BlockSlashInfo>, -) -> BlockError { + slash_info: BlockSlashInfo, +) -> TErr { if let Some(slasher) = chain.slasher.as_ref() { let (verified_header, error) = match slash_info { BlockSlashInfo::SignatureNotChecked(header, e) => { - if verify_header_signature(chain, &header).is_ok() { + if verify_header_signature::<_, TErr>(chain, &header).is_ok() { (header, e) } else { return e; @@ -673,7 +684,6 @@ pub trait IntoGossipVerifiedBlockContents: Sized { chain: &BeaconChain, ) -> Result, BlockContentsError>; fn inner_block(&self) -> &SignedBeaconBlock; - fn inner_blobs(&self) -> Option>; } impl IntoGossipVerifiedBlockContents for GossipVerifiedBlockContents { @@ -686,45 +696,40 @@ impl IntoGossipVerifiedBlockContents for GossipVerifiedB fn inner_block(&self) -> &SignedBeaconBlock { self.0.block.as_block() } - fn inner_blobs(&self) -> Option> { - self.1.as_ref().map(|blobs| { - VariableList::from( - blobs - .into_iter() - .map(GossipVerifiedBlob::signed_blob) - .collect::>(), - ) - }) - } } -impl IntoGossipVerifiedBlockContents for SignedBlockContents { +impl IntoGossipVerifiedBlockContents for PublishBlockRequest { fn into_gossip_verified_block( self, chain: &BeaconChain, ) -> Result, BlockContentsError> { let (block, blobs) = self.deconstruct(); - let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?; + let gossip_verified_blobs = blobs - .map(|blobs| { - Ok::<_, GossipBlobError>(VariableList::from( - blobs - .into_iter() - .map(|blob| GossipVerifiedBlob::new(blob, chain)) - .collect::, GossipBlobError>>()?, - )) + .map(|(kzg_proofs, blobs)| { + let mut gossip_verified_blobs = vec![]; + for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { + let _timer = + metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION); + let blob = BlobSidecar::new(i, blob, &block, *kzg_proof) + .map_err(BlockContentsError::SidecarError)?; + drop(_timer); + let gossip_verified_blob = + GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?; + gossip_verified_blobs.push(gossip_verified_blob); + } + let gossip_verified_blobs = VariableList::from(gossip_verified_blobs); + Ok::<_, BlockContentsError>(gossip_verified_blobs) }) .transpose()?; + let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?; + Ok((gossip_verified_block, gossip_verified_blobs)) } fn inner_block(&self) -> &SignedBeaconBlock { self.signed_block() } - - fn inner_blobs(&self) -> Option> { - self.blobs_cloned() - } } /// Implemented on types that can be converted into a `ExecutionPendingBlock`. @@ -745,7 +750,9 @@ pub trait IntoExecutionPendingBlock: Sized { } execution_pending }) - .map_err(|slash_info| process_block_slash_info(chain, slash_info)) + .map_err(|slash_info| { + process_block_slash_info::<_, BlockError>(chain, slash_info) + }) } /// Convert the block to fully-verified form while producing data to aid checking slashability. @@ -774,7 +781,10 @@ impl GossipVerifiedBlock { // and it could be a repeat proposal (a likely cause for slashing!). let header = block.signed_block_header(); Self::new_without_slasher_checks(block, chain).map_err(|e| { - process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e)) + process_block_slash_info::<_, BlockError>( + chain, + BlockSlashInfo::from_early_error_block(header, e), + ) }) } @@ -1055,7 +1065,8 @@ impl SignatureVerifiedBlock { chain: &BeaconChain, ) -> Result>> { let header = block.signed_block_header(); - Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e)) + Self::new(block, block_root, chain) + .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) } /// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify @@ -1109,7 +1120,7 @@ impl SignatureVerifiedBlock { ) -> Result>> { let header = from.block.signed_block_header(); Self::from_gossip_verified_block(from, chain) - .map_err(|e| BlockSlashInfo::from_early_error(header, e)) + .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) } pub fn block_root(&self) -> Hash256 { @@ -1908,28 +1919,45 @@ fn load_parent>( result } -/// This trait is used to unify `BlockError` and `BlobError` so -/// `cheap_state_advance_to_obtain_committees` can be re-used in gossip blob validation. -pub trait CheapStateAdvanceError: From + From + Debug { +/// This trait is used to unify `BlockError` and `GossipBlobError`. +pub trait BlockBlobError: From + From + Debug { fn not_later_than_parent_error(block_slot: Slot, state_slot: Slot) -> Self; + fn unknown_validator_error(validator_index: u64) -> Self; + fn proposer_signature_invalid() -> Self; } -impl CheapStateAdvanceError for BlockError { +impl BlockBlobError for BlockError { fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self { BlockError::BlockIsNotLaterThanParent { block_slot, parent_slot, } } + + fn unknown_validator_error(validator_index: u64) -> Self { + BlockError::UnknownValidator(validator_index) + } + + fn proposer_signature_invalid() -> Self { + BlockError::ProposalSignatureInvalid + } } -impl CheapStateAdvanceError for GossipBlobError { +impl BlockBlobError for GossipBlobError { fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self { GossipBlobError::BlobIsNotLaterThanParent { blob_slot, parent_slot, } } + + fn unknown_validator_error(validator_index: u64) -> Self { + GossipBlobError::UnknownValidator(validator_index) + } + + fn proposer_signature_invalid() -> Self { + GossipBlobError::ProposalSignatureInvalid + } } /// Performs a cheap (time-efficient) state advancement so the committees and proposer shuffling for @@ -1943,7 +1971,7 @@ impl CheapStateAdvanceError for GossipBlobError { /// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply /// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never /// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build). -pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateAdvanceError>( +pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobError>( state: &'a mut BeaconState, state_root_opt: Option, block_slot: Slot, @@ -1979,12 +2007,11 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateA /// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`. pub fn get_validator_pubkey_cache( chain: &BeaconChain, -) -> Result>, BlockError> { +) -> Result>, BeaconChainError> { chain .validator_pubkey_cache .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) .ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout) - .map_err(BlockError::BeaconChainError) } /// Produces an _empty_ `BlockSignatureVerifier`. @@ -2025,14 +2052,14 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>( /// Verify that `header` was signed with a valid signature from its proposer. /// /// Return `Ok(())` if the signature is valid, and an `Err` otherwise. -fn verify_header_signature( +fn verify_header_signature( chain: &BeaconChain, header: &SignedBeaconBlockHeader, -) -> Result<(), BlockError> { +) -> Result<(), Err> { let proposer_pubkey = get_validator_pubkey_cache(chain)? .get(header.message.proposer_index as usize) .cloned() - .ok_or(BlockError::UnknownValidator(header.message.proposer_index))?; + .ok_or(Err::unknown_validator_error(header.message.proposer_index))?; let head_fork = chain.canonical_head.cached_head().head_fork(); if header.verify_signature::( @@ -2043,7 +2070,7 @@ fn verify_header_signature( ) { Ok(()) } else { - Err(BlockError::ProposalSignatureInvalid) + Err(Err::proposer_signature_invalid()) } } diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 9cd853ba8..a6840ed76 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -8,7 +8,7 @@ use derivative::Derivative; use ssz_types::VariableList; use state_processing::ConsensusContext; use std::sync::Arc; -use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::blob_sidecar::{BlobIdentifier, BlobSidecarError, FixedBlobSidecarList}; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -98,13 +98,6 @@ impl RpcBlock { return Err(AvailabilityCheckError::MissingBlobs); } for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) { - let blob_block_root = blob.block_root; - if blob_block_root != block_root { - return Err(AvailabilityCheckError::InconsistentBlobBlockRoots { - block_root, - blob_block_root, - }); - } let blob_commitment = blob.kzg_commitment; if blob_commitment != block_commitment { return Err(AvailabilityCheckError::KzgCommitmentMismatch { @@ -309,6 +302,7 @@ pub type GossipVerifiedBlockContents = pub enum BlockContentsError { BlockError(BlockError), BlobError(GossipBlobError), + SidecarError(BlobSidecarError), } impl From> for BlockContentsError { @@ -332,6 +326,9 @@ impl std::fmt::Display for BlockContentsError { BlockContentsError::BlobError(err) => { write!(f, "BlobError({})", err) } + BlockContentsError::SidecarError(err) => { + write!(f, "SidecarError({:?})", err) + } } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index ad328077d..2fcb3b7a9 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -200,7 +200,9 @@ impl DataAvailabilityChecker { let mut verified_blobs = vec![]; if let Some(kzg) = self.kzg.as_ref() { for blob in blobs.iter().flatten() { - verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?) + verified_blobs.push( + verify_kzg_for_blob(blob.clone(), kzg).map_err(AvailabilityCheckError::Kzg)?, + ); } } else { return Err(AvailabilityCheckError::KzgNotInitialized); @@ -209,7 +211,6 @@ impl DataAvailabilityChecker { .put_kzg_verified_blobs(block_root, verified_blobs) } - /// This first validates the KZG commitments included in the blob sidecar. /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -219,15 +220,8 @@ impl DataAvailabilityChecker { &self, gossip_blob: GossipVerifiedBlob, ) -> Result, AvailabilityCheckError> { - // Verify the KZG commitments. - let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() { - verify_kzg_for_blob(gossip_blob.to_blob(), kzg)? - } else { - return Err(AvailabilityCheckError::KzgNotInitialized); - }; - self.availability_cache - .put_kzg_verified_blobs(kzg_verified_blob.block_root(), vec![kzg_verified_blob]) + .put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()]) } /// Check if we have all the blobs for a block. Returns `Availability` which has information @@ -268,7 +262,8 @@ impl DataAvailabilityChecker { .kzg .as_ref() .ok_or(AvailabilityCheckError::KzgNotInitialized)?; - verify_kzg_for_blob_list(&blob_list, kzg)?; + verify_kzg_for_blob_list(&blob_list, kzg) + .map_err(AvailabilityCheckError::Kzg)?; Some(blob_list) } else { None @@ -375,8 +370,8 @@ impl DataAvailabilityChecker { block_root: Hash256, blob: &GossipVerifiedBlob, ) { - let index = blob.as_blob().index; - let commitment = blob.as_blob().kzg_commitment; + let index = blob.index(); + let commitment = blob.kzg_commitment(); self.processing_cache .write() .entry(block_root) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index b2979f2bf..0804fe3b9 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -16,10 +16,6 @@ pub enum Error { BlobIndexInvalid(u64), StoreError(store::Error), DecodeError(ssz::DecodeError), - InconsistentBlobBlockRoots { - block_root: Hash256, - blob_block_root: Hash256, - }, ParentStateMissing(Hash256), BlockReplayError(state_processing::BlockReplayError), RebuildingStateCaches(BeaconStateError), @@ -47,8 +43,7 @@ impl Error { Error::Kzg(_) | Error::BlobIndexInvalid(_) | Error::KzgCommitmentMismatch { .. } - | Error::KzgVerificationFailed - | Error::InconsistentBlobBlockRoots { .. } => ErrorCategory::Malicious, + | Error::KzgVerificationFailed => ErrorCategory::Malicious, } } } @@ -76,3 +71,9 @@ impl From for Error { Self::BlockReplayError(value) } } + +impl From for Error { + fn from(value: KzgError) -> Self { + Self::Kzg(value) + } +} diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 6033293b8..36d7c2aca 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -125,7 +125,10 @@ impl PendingComponents { for maybe_blob in self.verified_blobs.iter() { if maybe_blob.is_some() { return maybe_blob.as_ref().map(|kzg_verified_blob| { - kzg_verified_blob.as_blob().slot.epoch(T::slots_per_epoch()) + kzg_verified_blob + .as_blob() + .slot() + .epoch(T::slots_per_epoch()) }); } } @@ -418,15 +421,7 @@ impl OverflowLRUCache { ) -> Result, AvailabilityCheckError> { let mut fixed_blobs = FixedVector::default(); - // Initial check to ensure all provided blobs have a consistent block root. for blob in kzg_verified_blobs { - let blob_block_root = blob.block_root(); - if blob_block_root != block_root { - return Err(AvailabilityCheckError::InconsistentBlobBlockRoots { - block_root, - blob_block_root, - }); - } if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) { *blob_opt = Some(blob); } @@ -651,7 +646,7 @@ impl OverflowLRUCache { OverflowKey::Blob(_, _) => { KzgVerifiedBlob::::from_ssz_bytes(value_bytes.as_slice())? .as_blob() - .slot + .slot() .epoch(T::EthSpec::slots_per_epoch()) } }; @@ -743,9 +738,7 @@ impl ssz::Decode for OverflowKey { mod test { use super::*; use crate::{ - blob_verification::{ - validate_blob_sidecar_for_gossip, verify_kzg_for_blob, GossipVerifiedBlob, - }, + blob_verification::{validate_blob_sidecar_for_gossip, GossipVerifiedBlob}, block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, data_availability_checker::STATE_LRU_CAPACITY, @@ -926,12 +919,13 @@ mod test { } info!(log, "done printing kzg commitments"); - let gossip_verified_blobs = if let Some(blobs) = maybe_blobs { - Vec::from(blobs) + let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs { + let sidecars = BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap(); + Vec::from(sidecars) .into_iter() - .map(|signed_blob| { - let subnet = signed_blob.message.index; - validate_blob_sidecar_for_gossip(signed_blob, subnet, &harness.chain) + .map(|sidecar| { + let subnet = sidecar.index; + validate_blob_sidecar_for_gossip(sidecar, subnet, &harness.chain) .expect("should validate blob") }) .collect() @@ -1036,17 +1030,9 @@ mod test { ); } - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); let mut kzg_verified_blobs = Vec::new(); for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); @@ -1072,9 +1058,7 @@ mod test { let root = pending_block.import_data.block_root; let mut kzg_verified_blobs = vec![]; for gossip_blob in blobs { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); @@ -1198,20 +1182,11 @@ mod test { assert!(cache.critical.read().store_keys.contains(&roots[0])); assert!(cache.critical.read().store_keys.contains(&roots[1])); - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); - let blobs_0 = pending_blobs.pop_front().expect("should have blobs"); let expected_blobs = blobs_0.len(); let mut kzg_verified_blobs = vec![]; for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache .put_kzg_verified_blobs(roots[0], kzg_verified_blobs.clone()) .expect("should put blob"); @@ -1278,13 +1253,6 @@ mod test { pending_blobs.push_back(blobs); } - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); - for _ in 0..(n_epochs * capacity) { let pending_block = pending_blocks.pop_front().expect("should have block"); let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs"); @@ -1295,9 +1263,7 @@ mod test { let one_blob = pending_block_blobs .pop() .expect("should have at least one blob"); - let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - let kzg_verified_blobs = vec![kzg_verified_blob]; + let kzg_verified_blobs = vec![one_blob.into_inner()]; // generate random boolean let block_first = (rand::random::() % 2) == 0; if block_first { @@ -1418,13 +1384,6 @@ mod test { pending_blobs.push_back(blobs); } - let kzg = harness - .chain - .kzg - .as_ref() - .cloned() - .expect("kzg should exist"); - let mut remaining_blobs = HashMap::new(); for _ in 0..(n_epochs * capacity) { let pending_block = pending_blocks.pop_front().expect("should have block"); @@ -1436,9 +1395,7 @@ mod test { let one_blob = pending_block_blobs .pop() .expect("should have at least one blob"); - let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - let kzg_verified_blobs = vec![kzg_verified_blob]; + let kzg_verified_blobs = vec![one_blob.into_inner()]; // generate random boolean let block_first = (rand::random::() % 2) == 0; if block_first { @@ -1551,9 +1508,7 @@ mod test { let additional_blobs = blobs.len(); let mut kzg_verified_blobs = vec![]; for (i, gossip_blob) in blobs.into_iter().enumerate() { - let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref()) - .expect("kzg should verify"); - kzg_verified_blobs.push(kzg_verified_blob); + kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = recovered_cache .put_kzg_verified_blobs(root, kzg_verified_blobs.clone()) .expect("should put blob"); diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 9f5186f31..924cc2652 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -4,7 +4,7 @@ use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof}; /// Converts a blob ssz List object to an array to be used with the kzg /// crypto library. fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result { - KzgBlob::from_bytes(blob.as_ref()) + KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into) } /// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. @@ -13,7 +13,8 @@ pub fn validate_blob( blob: &Blob, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, -) -> Result { +) -> Result<(), KzgError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES); let kzg_blob = ssz_blob_to_crypto_blob::(blob)?; kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } @@ -24,7 +25,8 @@ pub fn validate_blobs( expected_kzg_commitments: &[KzgCommitment], blobs: Vec<&Blob>, kzg_proofs: &[KzgProof], -) -> Result { +) -> Result<(), KzgError> { + let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES); let blobs = blobs .into_iter() .map(|blob| ssz_blob_to_crypto_blob::(blob)) diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e2d37078a..8edb7b4fc 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -57,7 +57,7 @@ pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse, - BeaconBlockResponseType, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 0fe68ba19..ca04366b0 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1004,6 +1004,14 @@ lazy_static! { "beacon_blobs_sidecar_gossip_verification_seconds", "Full runtime of blob sidecars gossip verification" ); + pub static ref BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION: Result = try_create_histogram( + "blob_sidecar_inclusion_proof_verification_seconds", + "Time taken to verify blob sidecar inclusion proof" + ); + pub static ref BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: Result = try_create_histogram( + "blob_sidecar_inclusion_proof_computation_seconds", + "Time taken to compute blob sidecar inclusion proof" + ); } // Fifth lazy-static block is used to account for macro recursion limit. diff --git a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs index f16f38bad..4f8496144 100644 --- a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs @@ -5,8 +5,7 @@ use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use std::sync::Arc; -use types::{BlobSidecar, EthSpec, Hash256, Slot}; +use types::{BlobSidecar, EthSpec, Slot}; #[derive(Debug, PartialEq)] pub enum Error { @@ -29,8 +28,8 @@ pub enum Error { /// like checking the proposer signature. pub struct ObservedBlobSidecars { finalized_slot: Slot, - /// Stores all received blob indices for a given `(Root, Slot)` tuple. - items: HashMap<(Hash256, Slot), HashSet>, + /// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple. + items: HashMap<(u64, Slot), HashSet>, _phantom: PhantomData, } @@ -46,16 +45,16 @@ impl Default for ObservedBlobSidecars { } impl ObservedBlobSidecars { - /// Observe the `blob_sidecar` at (`blob_sidecar.block_root, blob_sidecar.slot`). + /// Observe the `blob_sidecar` at (`blob_sidecar.block_proposer_index, blob_sidecar.slot`). /// This will update `self` so future calls to it indicate that this `blob_sidecar` is known. /// /// The supplied `blob_sidecar` **MUST** have completed proposer signature verification. - pub fn observe_sidecar(&mut self, blob_sidecar: &Arc>) -> Result { + pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar) -> Result { self.sanitize_blob_sidecar(blob_sidecar)?; let did_not_exist = self .items - .entry((blob_sidecar.block_root, blob_sidecar.slot)) + .entry((blob_sidecar.block_proposer_index(), blob_sidecar.slot())) .or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block())) .insert(blob_sidecar.index); @@ -63,23 +62,23 @@ impl ObservedBlobSidecars { } /// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window. - pub fn is_known(&self, blob_sidecar: &Arc>) -> Result { + pub fn is_known(&self, blob_sidecar: &BlobSidecar) -> Result { self.sanitize_blob_sidecar(blob_sidecar)?; let is_known = self .items - .get(&(blob_sidecar.block_root, blob_sidecar.slot)) + .get(&(blob_sidecar.block_proposer_index(), blob_sidecar.slot())) .map_or(false, |set| set.contains(&blob_sidecar.index)); Ok(is_known) } - fn sanitize_blob_sidecar(&self, blob_sidecar: &Arc>) -> Result<(), Error> { + fn sanitize_blob_sidecar(&self, blob_sidecar: &BlobSidecar) -> Result<(), Error> { if blob_sidecar.index >= T::max_blobs_per_block() as u64 { return Err(Error::InvalidBlobIndex(blob_sidecar.index)); } let finalized_slot = self.finalized_slot; - if finalized_slot > 0 && blob_sidecar.slot <= finalized_slot { + if finalized_slot > 0 && blob_sidecar.slot() <= finalized_slot { return Err(Error::FinalizedBlob { - slot: blob_sidecar.slot, + slot: blob_sidecar.slot(), finalized_slot, }); } @@ -101,14 +100,15 @@ impl ObservedBlobSidecars { #[cfg(test)] mod tests { use super::*; - use types::{BlobSidecar, Hash256, MainnetEthSpec}; + use std::sync::Arc; + use types::{BlobSidecar, MainnetEthSpec}; type E = MainnetEthSpec; - fn get_blob_sidecar(slot: u64, block_root: Hash256, index: u64) -> Arc> { + fn get_blob_sidecar(slot: u64, proposer_index: u64, index: u64) -> Arc> { let mut blob_sidecar = BlobSidecar::empty(); - blob_sidecar.block_root = block_root; - blob_sidecar.slot = slot.into(); + blob_sidecar.signed_block_header.message.slot = slot.into(); + blob_sidecar.signed_block_header.message.proposer_index = proposer_index; blob_sidecar.index = index; Arc::new(blob_sidecar) } @@ -121,8 +121,8 @@ mod tests { assert_eq!(cache.items.len(), 0, "no slots should be present"); // Slot 0, index 0 - let block_root_a = Hash256::random(); - let sidecar_a = get_blob_sidecar(0, block_root_a, 0); + let proposer_index_a = 420; + let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( cache.observe_sidecar(&sidecar_a), @@ -138,12 +138,12 @@ mod tests { assert_eq!( cache.items.len(), 1, - "only one (slot, root) tuple should be present" + "only one (validator_index, slot) tuple should be present" ); assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -161,7 +161,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -185,7 +185,7 @@ mod tests { */ // First slot of finalized epoch - let block_b = get_blob_sidecar(E::slots_per_epoch(), Hash256::random(), 0); + let block_b = get_blob_sidecar(E::slots_per_epoch(), 419, 0); assert_eq!( cache.observe_sidecar(&block_b), @@ -205,8 +205,8 @@ mod tests { let three_epochs = E::slots_per_epoch() * 3; // First slot of finalized epoch - let block_root_b = Hash256::random(); - let block_b = get_blob_sidecar(three_epochs, block_root_b, 0); + let proposer_index_b = 421; + let block_b = get_blob_sidecar(three_epochs, proposer_index_b, 0); assert_eq!( cache.observe_sidecar(&block_b), @@ -218,7 +218,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_b, Slot::new(three_epochs))) + .get(&(proposer_index_b, Slot::new(three_epochs))) .expect("the three epochs slot should be present") .len(), 1, @@ -242,7 +242,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_b, Slot::new(three_epochs))) + .get(&(proposer_index_b, Slot::new(three_epochs))) .expect("the three epochs slot should be present") .len(), 1, @@ -255,8 +255,8 @@ mod tests { let mut cache = ObservedBlobSidecars::default(); // Slot 0, index 0 - let block_root_a = Hash256::random(); - let sidecar_a = get_blob_sidecar(0, block_root_a, 0); + let proposer_index_a = 420; + let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0); assert_eq!( cache.is_known(&sidecar_a), @@ -287,7 +287,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -296,8 +296,8 @@ mod tests { // Slot 1, proposer 0 - let block_root_b = Hash256::random(); - let sidecar_b = get_blob_sidecar(1, block_root_b, 0); + let proposer_index_b = 421; + let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0); assert_eq!( cache.is_known(&sidecar_b), @@ -325,7 +325,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 1, @@ -334,7 +334,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_b, Slot::new(1))) + .get(&(proposer_index_b, Slot::new(1))) .expect("slot zero should be present") .len(), 1, @@ -342,7 +342,7 @@ mod tests { ); // Slot 0, index 1 - let sidecar_c = get_blob_sidecar(0, block_root_a, 1); + let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1); assert_eq!( cache.is_known(&sidecar_c), @@ -370,7 +370,7 @@ mod tests { assert_eq!( cache .items - .get(&(block_root_a, Slot::new(0))) + .get(&(proposer_index_a, Slot::new(0))) .expect("slot zero should be present") .len(), 2, @@ -379,7 +379,7 @@ mod tests { // Try adding an out of bounds index let invalid_index = E::max_blobs_per_block() as u64; - let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index); + let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); assert_eq!( cache.observe_sidecar(&sidecar_d), Err(Error::InvalidBlobIndex(invalid_index)), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 23af0c812..eb73478de 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,7 +1,7 @@ use crate::block_verification_types::{AsBlock, RpcBlock}; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; -use crate::BeaconBlockResponseType; +use crate::BeaconBlockResponseWrapper; pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, @@ -33,8 +33,8 @@ use int_to_bytes::int_to_bytes32; use kzg::{Kzg, TrustedSetup}; use merkle_proof::MerkleTree; use operation_pool::ReceivedPreCapella; +use parking_lot::Mutex; use parking_lot::RwLockWriteGuard; -use parking_lot::{Mutex, RwLock}; use rand::rngs::StdRng; use rand::Rng; use rand::SeedableRng; @@ -52,7 +52,6 @@ use state_processing::{ use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; -use std::marker::PhantomData; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -567,7 +566,6 @@ where runtime: self.runtime, mock_execution_layer: self.mock_execution_layer, mock_builder: None, - blob_signature_cache: <_>::default(), rng: make_rng(), } } @@ -623,29 +621,9 @@ pub struct BeaconChainHarness { pub mock_execution_layer: Option>, pub mock_builder: Option>>, - /// Cache for blob signature because we don't need them for import, but we do need them - /// to test gossip validation. We always make them during block production but drop them - /// before storing them in the db. - pub blob_signature_cache: Arc>>, - pub rng: Mutex, } -#[derive(Clone, Debug, Hash, Eq, PartialEq)] -pub struct BlobSignatureKey { - block_root: Hash256, - blob_index: u64, -} - -impl BlobSignatureKey { - pub fn new(block_root: Hash256, blob_index: u64) -> Self { - Self { - block_root, - blob_index, - } - } -} - pub type CommitteeAttestations = Vec<(Attestation, SubnetId)>; pub type HarnessAttestations = Vec<(CommitteeAttestations, Option>)>; @@ -845,28 +823,9 @@ where &self, state: BeaconState, slot: Slot, - ) -> ( - SignedBlockContentsTuple>, - BeaconState, - ) { + ) -> (SignedBlindedBeaconBlock, BeaconState) { let (unblinded, new_state) = self.make_block(state, slot).await; - let maybe_blinded_blob_sidecars = unblinded.1.map(|blob_sidecar_list| { - VariableList::new( - blob_sidecar_list - .into_iter() - .map(|blob_sidecar| { - let blinded_sidecar: BlindedBlobSidecar = blob_sidecar.message.into(); - SignedSidecar { - message: Arc::new(blinded_sidecar), - signature: blob_sidecar.signature, - _phantom: PhantomData, - } - }) - .collect(), - ) - .unwrap() - }); - ((unblinded.0.into(), maybe_blinded_blob_sidecars), new_state) + (unblinded.0.into(), new_state) } /// Returns a newly created block, signed by the proposer for the given slot. @@ -874,7 +833,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBlockContentsTuple>, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -892,7 +851,7 @@ where let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); - let BeaconBlockResponseType::Full(block_response) = self + let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, @@ -916,17 +875,12 @@ where &self.spec, ); - let block_contents: SignedBlockContentsTuple> = match &signed_block { + let block_contents: SignedBlockContentsTuple = match &signed_block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Merge(_) | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) => ( - signed_block, - block_response - .maybe_side_car - .map(|blobs| self.sign_blobs(blobs, &block_response.state, proposer_index)), - ), + SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items), }; (block_contents, block_response.state) @@ -938,7 +892,7 @@ where &self, mut state: BeaconState, slot: Slot, - ) -> (SignedBlockContentsTuple>, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -958,7 +912,7 @@ where let pre_state = state.clone(); - let BeaconBlockResponseType::Full(block_response) = self + let BeaconBlockResponseWrapper::Full(block_response) = self .chain .produce_block_on_state( state, @@ -982,37 +936,12 @@ where &self.spec, ); - let block_contents: SignedBlockContentsTuple> = match &signed_block { + let block_contents: SignedBlockContentsTuple = match &signed_block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Merge(_) | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) => { - if let Some(blobs) = block_response.maybe_side_car { - let signed_blobs: SignedSidecarList> = Vec::from(blobs) - .into_iter() - .map(|blob| { - blob.sign( - &self.validator_keypairs[proposer_index].sk, - &block_response.state.fork(), - block_response.state.genesis_validators_root(), - &self.spec, - ) - }) - .collect::>() - .into(); - let mut guard = self.blob_signature_cache.write(); - for blob in &signed_blobs { - guard.insert( - BlobSignatureKey::new(blob.message.block_root, blob.message.index), - blob.signature.clone(), - ); - } - (signed_block, Some(signed_blobs)) - } else { - (signed_block, None) - } - } + SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items), }; (block_contents, pre_state) } @@ -1051,35 +980,6 @@ where ) } - /// Sign blobs, and cache their signatures. - pub fn sign_blobs( - &self, - blobs: BlobSidecarList, - state: &BeaconState, - proposer_index: usize, - ) -> SignedSidecarList> { - let signed_blobs: SignedSidecarList> = Vec::from(blobs) - .into_iter() - .map(|blob| { - blob.sign( - &self.validator_keypairs[proposer_index].sk, - &state.fork(), - state.genesis_validators_root(), - &self.spec, - ) - }) - .collect::>() - .into(); - let mut guard = self.blob_signature_cache.write(); - for blob in &signed_blobs { - guard.insert( - BlobSignatureKey::new(blob.message.block_root, blob.message.index), - blob.signature.clone(), - ); - } - signed_blobs - } - /// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to /// `beacon_block_root`. The provided `state` should match the `block.state_root` for the /// `block` identified by `beacon_block_root`. @@ -1837,7 +1737,7 @@ where state: BeaconState, slot: Slot, block_modifier: impl FnOnce(&mut BeaconBlock), - ) -> (SignedBlockContentsTuple>, BeaconState) { + ) -> (SignedBlockContentsTuple, BeaconState) { assert_ne!(slot, 0, "can't produce a block at slot 0"); assert!(slot >= state.slot()); @@ -1935,24 +1835,20 @@ where &self, slot: Slot, block_root: Hash256, - block_contents: SignedBlockContentsTuple>, + block_contents: SignedBlockContentsTuple, ) -> Result> { self.set_current_slot(slot); - let (block, blobs) = block_contents; - // Note: we are just dropping signatures here and skipping signature verification. - let blobs_without_signatures = blobs.map(|blobs| { - VariableList::from( - blobs - .into_iter() - .map(|blob| blob.message) - .collect::>(), - ) - }); + let (block, blob_items) = block_contents; + + let sidecars = blob_items + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .transpose() + .unwrap(); let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(block), sidecars).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -1965,24 +1861,20 @@ where pub async fn process_block_result( &self, - block_contents: SignedBlockContentsTuple>, + block_contents: SignedBlockContentsTuple, ) -> Result> { - let (block, blobs) = block_contents; - // Note: we are just dropping signatures here and skipping signature verification. - let blobs_without_signatures = blobs.map(|blobs| { - VariableList::from( - blobs - .into_iter() - .map(|blob| blob.message) - .collect::>(), - ) - }); + let (block, blob_items) = block_contents; + + let sidecars = blob_items + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .transpose() + .unwrap(); let block_root = block.canonical_root(); let block_hash: SignedBeaconBlockHash = self .chain .process_block( block_root, - RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(block), sidecars).unwrap(), NotifyExecutionLayer::Yes, || Ok(()), ) @@ -2051,7 +1943,7 @@ where ) -> Result< ( SignedBeaconBlockHash, - SignedBlockContentsTuple>, + SignedBlockContentsTuple, BeaconState, ), BlockError, @@ -2603,8 +2495,6 @@ pub fn generate_rand_block_and_blobs( blobs, } = bundle; - let block_root = block.canonical_root(); - for (index, ((blob, kzg_commitment), kzg_proof)) in blobs .into_iter() .zip(commitments.into_iter()) @@ -2612,14 +2502,16 @@ pub fn generate_rand_block_and_blobs( .enumerate() { blob_sidecars.push(BlobSidecar { - block_root, index: index as u64, - slot: block.slot(), - block_parent_root: block.parent_root(), - proposer_index: block.message().proposer_index(), blob: blob.clone(), kzg_commitment, kzg_proof, + signed_block_header: block.signed_block_header(), + kzg_commitment_inclusion_proof: block + .message() + .body() + .kzg_commitment_merkle_proof(index) + .unwrap(), }); } } diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 3ac398071..4344013b3 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -1,7 +1,6 @@ -#![cfg(not(debug_assertions))] +// #![cfg(not(debug_assertions))] use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; -use beacon_chain::test_utils::BlobSignatureKey; use beacon_chain::{ test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, @@ -77,10 +76,8 @@ async fn get_chain_segment() -> (Vec>, Vec ( - Vec>, - Vec, ::MaxBlobsPerBlock>>>, -) { +async fn get_chain_segment_with_blob_sidecars( +) -> (Vec>, Vec>>) { let harness = get_harness(VALIDATOR_COUNT); harness @@ -111,27 +108,11 @@ async fn get_chain_segment_with_signed_blobs() -> ( beacon_block: Arc::new(full_block), beacon_state: snapshot.beacon_state, }); - let signed_blobs = harness + let blob_sidecars = harness .chain .get_blobs(&snapshot.beacon_block_root) - .unwrap() - .into_iter() - .map(|blob| { - let block_root = blob.block_root; - let blob_index = blob.index; - SignedBlobSidecar { - message: blob, - signature: harness - .blob_signature_cache - .read() - .get(&BlobSignatureKey::new(block_root, blob_index)) - .unwrap() - .clone(), - _phantom: PhantomData, - } - }) - .collect::>(); - segment_blobs.push(Some(VariableList::from(signed_blobs))) + .unwrap(); + segment_blobs.push(Some(blob_sidecars)) } (segment, segment_blobs) } @@ -159,7 +140,7 @@ fn chain_segment_blocks( ) -> Vec> { chain_segment .iter() - .zip(blobs.into_iter()) + .zip(blobs.iter()) .map(|(snapshot, blobs)| { RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap() }) @@ -214,34 +195,30 @@ fn update_parent_roots( let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct(); *block.parent_root_mut() = root; let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature)); - let new_child_root = new_child.canonical_root(); - child.beacon_block = new_child; if let Some(blobs) = child_blobs { - update_blob_roots(new_child_root, blobs); + update_blob_signed_header(&new_child, blobs); } + child.beacon_block = new_child; } } } -fn update_blob_roots(block_root: Hash256, blobs: &mut BlobSidecarList) { +fn update_blob_signed_header( + signed_block: &SignedBeaconBlock, + blobs: &mut BlobSidecarList, +) { for old_blob_sidecar in blobs.iter_mut() { - let index = old_blob_sidecar.index; - let slot = old_blob_sidecar.slot; - let block_parent_root = old_blob_sidecar.block_parent_root; - let proposer_index = old_blob_sidecar.proposer_index; - let blob = old_blob_sidecar.blob.clone(); - let kzg_commitment = old_blob_sidecar.kzg_commitment; - let kzg_proof = old_blob_sidecar.kzg_proof; - let new_blob = Arc::new(BlobSidecar:: { - block_root, - index, - slot, - block_parent_root, - proposer_index, - blob, - kzg_commitment, - kzg_proof, + index: old_blob_sidecar.index, + blob: old_blob_sidecar.blob.clone(), + kzg_commitment: old_blob_sidecar.kzg_commitment, + kzg_proof: old_blob_sidecar.kzg_proof, + signed_block_header: signed_block.signed_block_header(), + kzg_commitment_inclusion_proof: signed_block + .message() + .body() + .kzg_commitment_merkle_proof(old_blob_sidecar.index as usize) + .unwrap(), }); *old_blob_sidecar = new_blob; } @@ -253,7 +230,6 @@ async fn chain_segment_full_segment() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); harness @@ -292,7 +268,6 @@ async fn chain_segment_varying_chunk_size() { let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); harness @@ -334,7 +309,6 @@ async fn chain_segment_non_linear_parent_roots() { */ let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); blocks.remove(2); @@ -355,7 +329,6 @@ async fn chain_segment_non_linear_parent_roots() { */ let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); @@ -393,7 +366,6 @@ async fn chain_segment_non_linear_slots() { let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = Slot::new(0); @@ -420,7 +392,6 @@ async fn chain_segment_non_linear_slots() { let mut blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() - .map(|block| block.into()) .collect(); let (mut block, signature) = blocks[3].as_block().clone().deconstruct(); *block.slot_mut() = blocks[2].slot(); @@ -879,7 +850,7 @@ fn unwrap_err(result: Result) -> E { #[tokio::test] async fn block_gossip_verification() { let harness = get_harness(VALIDATOR_COUNT); - let (chain_segment, chain_segment_blobs) = get_chain_segment_with_signed_blobs().await; + let (chain_segment, chain_segment_blobs) = get_chain_segment_with_blob_sidecars().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -909,12 +880,12 @@ async fn block_gossip_verification() { ) .await .expect("should import valid gossip verified block"); - if let Some(blobs) = blobs_opt { - for blob in blobs { - let blob_index = blob.message.index; + if let Some(blob_sidecars) = blobs_opt { + for blob_sidecar in blob_sidecars { + let blob_index = blob_sidecar.index; let gossip_verified = harness .chain - .verify_blob_sidecar_for_gossip(blob.clone(), blob_index) + .verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index) .expect("should obtain gossip verified blob"); harness @@ -948,7 +919,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_block_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::FutureSlot { present_slot, block_slot, @@ -982,7 +953,7 @@ async fn block_gossip_verification() { *block.slot_mut() = expected_finalized_slot; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::WouldRevertFinalizedSlot { block_slot, finalized_slot, @@ -1012,9 +983,10 @@ async fn block_gossip_verification() { unwrap_err( harness .chain - .verify_block_for_gossip( - Arc::new(SignedBeaconBlock::from_block(block, junk_signature())).into() - ) + .verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( + block, + junk_signature() + ))) .await ), BlockError::ProposalSignatureInvalid @@ -1039,7 +1011,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::ParentUnknown(block) if block.parent_root() == parent_root ), @@ -1065,7 +1037,7 @@ async fn block_gossip_verification() { *block.parent_root_mut() = parent_root; assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), BlockError::NotFinalizedDescendant { block_parent_root } if block_parent_root == parent_root ), @@ -1091,7 +1063,6 @@ async fn block_gossip_verification() { .0; let expected_proposer = block.proposer_index(); let other_proposer = (0..VALIDATOR_COUNT as u64) - .into_iter() .find(|i| *i != block.proposer_index()) .expect("there must be more than one validator in this test"); *block.proposer_index_mut() = other_proposer; @@ -1103,7 +1074,7 @@ async fn block_gossip_verification() { ); assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::IncorrectBlockProposer { block, local_shuffling, @@ -1115,7 +1086,7 @@ async fn block_gossip_verification() { // Check to ensure that we registered this is a valid block from this proposer. assert!( matches!( - unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await), + unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), BlockError::BlockIsAlreadyKnown, ), "should register any valid signature against the proposer, even if the block failed later verification" @@ -1141,10 +1112,9 @@ async fn block_gossip_verification() { matches!( harness .chain - .verify_block_for_gossip(block.clone().into()) + .verify_block_for_gossip(block.clone()) .await - .err() - .expect("should error when processing known block"), + .expect_err("should error when processing known block"), BlockError::BlockIsAlreadyKnown ), "the second proposal by this validator should be rejected" @@ -1178,12 +1148,14 @@ async fn verify_block_for_gossip_slashing_detection() { .await .unwrap(); - if let Some(blobs) = blobs1 { - for blob in blobs { - let blob_index = blob.message.index; + if let Some((kzg_proofs, blobs)) = blobs1 { + let sidecars = + BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap(); + for sidecar in sidecars { + let blob_index = sidecar.index; let verified_blob = harness .chain - .verify_blob_sidecar_for_gossip(blob, blob_index) + .verify_blob_sidecar_for_gossip(sidecar, blob_index) .unwrap(); harness .chain @@ -1368,10 +1340,9 @@ async fn add_base_block_to_altair_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(base_block.clone()).into()) + .verify_block_for_gossip(Arc::new(base_block.clone())) .await - .err() - .expect("should error when processing base block"), + .expect_err("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Altair, object_fork: ForkName::Base, @@ -1389,8 +1360,7 @@ async fn add_base_block_to_altair_chain() { || Ok(()), ) .await - .err() - .expect("should error when processing base block"), + .expect_err("should error when processing base block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Altair, object_fork: ForkName::Base, @@ -1506,10 +1476,9 @@ async fn add_altair_block_to_base_chain() { assert!(matches!( harness .chain - .verify_block_for_gossip(Arc::new(altair_block.clone()).into()) + .verify_block_for_gossip(Arc::new(altair_block.clone())) .await - .err() - .expect("should error when processing altair block"), + .expect_err("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Base, object_fork: ForkName::Altair, @@ -1527,8 +1496,7 @@ async fn add_altair_block_to_base_chain() { || Ok(()), ) .await - .err() - .expect("should error when processing altair block"), + .expect_err("should error when processing altair block"), BlockError::InconsistentFork(InconsistentFork { fork_at_slot: ForkName::Base, object_fork: ForkName::Altair, @@ -1584,10 +1552,12 @@ async fn import_duplicate_block_unrealized_justification() { // The store's justified checkpoint must still be at epoch 0, while unrealized justification // must be at epoch 1. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); - assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); - drop(fc); + { + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1); + drop(fc); + } // Produce a block to justify epoch 2. let state = harness.get_current_state(); @@ -1602,10 +1572,10 @@ async fn import_duplicate_block_unrealized_justification() { let notify_execution_layer = NotifyExecutionLayer::Yes; let verified_block1 = block .clone() - .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); let verified_block2 = block - .into_execution_pending_block(block_root, &chain, notify_execution_layer) + .into_execution_pending_block(block_root, chain, notify_execution_layer) .unwrap(); // Import the first block, simulating a block processed via a finalized chain segment. @@ -1614,18 +1584,20 @@ async fn import_duplicate_block_unrealized_justification() { .unwrap(); // Unrealized justification should NOT have updated. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); - let unrealized_justification = fc.unrealized_justified_checkpoint(); - assert_eq!(unrealized_justification.epoch, 2); - - // The fork choice node for the block should have unrealized justification. - let fc_block = fc.get_block(&block_root).unwrap(); - assert_eq!( - fc_block.unrealized_justified_checkpoint, - Some(unrealized_justification) - ); - drop(fc); + let unrealized_justification = { + let fc = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc.justified_checkpoint().epoch, 0); + let unrealized_justification = fc.unrealized_justified_checkpoint(); + assert_eq!(unrealized_justification.epoch, 2); + // The fork choice node for the block should have unrealized justification. + let fc_block = fc.get_block(&block_root).unwrap(); + assert_eq!( + fc_block.unrealized_justified_checkpoint, + Some(unrealized_justification) + ); + drop(fc); + unrealized_justification + }; // Import the second verified block, simulating a block processed via RPC. import_execution_pending_block(chain.clone(), verified_block2) @@ -1633,15 +1605,16 @@ async fn import_duplicate_block_unrealized_justification() { .unwrap(); // Unrealized justification should still be updated. - let fc = chain.canonical_head.fork_choice_read_lock(); - assert_eq!(fc.justified_checkpoint().epoch, 0); + let fc3 = chain.canonical_head.fork_choice_read_lock(); + assert_eq!(fc3.justified_checkpoint().epoch, 0); assert_eq!( - fc.unrealized_justified_checkpoint(), + fc3.unrealized_justified_checkpoint(), unrealized_justification ); // The fork choice node for the block should still have the unrealized justified checkpoint. - let fc_block = fc.get_block(&block_root).unwrap(); + let fc_block = fc3.get_block(&block_root).unwrap(); + drop(fc3); assert_eq!( fc_block.unrealized_justified_checkpoint, Some(unrealized_justification) diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index c48cf310a..d54543e4f 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -1,17 +1,15 @@ use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::test_utils::BeaconChainHarness; -use bls::Signature; use eth2::types::{EventKind, SseBlobSidecar}; use rand::rngs::StdRng; use rand::SeedableRng; -use std::marker::PhantomData; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; -use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec, SignedBlobSidecar}; +use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec}; type E = MinimalEthSpec; -/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. +/// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. #[tokio::test] async fn blob_sidecar_event_on_process_gossip_blob() { let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); @@ -29,14 +27,10 @@ async fn blob_sidecar_event_on_process_gossip_blob() { // build and process a gossip verified blob let kzg = harness.chain.kzg.as_ref().unwrap(); let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); - let signed_sidecar = SignedBlobSidecar { - message: BlobSidecar::random_valid(&mut rng, kzg) - .map(Arc::new) - .unwrap(), - signature: Signature::empty(), - _phantom: PhantomData, - }; - let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(signed_sidecar); + let sidecar = BlobSidecar::random_valid(&mut rng, kzg) + .map(Arc::new) + .unwrap(); + let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(sidecar); let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob()); let _ = harness @@ -49,7 +43,7 @@ async fn blob_sidecar_event_on_process_gossip_blob() { assert_eq!(sidecar_event, EventKind::BlobSidecar(expected_sse_blobs)); } -/// Verifies that a blob event is emitted when blobs are received via RPC. +/// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { let spec = ForkName::Deneb.make_genesis_spec(E::default_spec()); @@ -83,7 +77,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let _ = harness .chain - .process_rpc_blobs(blob_1.slot, blob_1.block_root, blobs) + .process_rpc_blobs(blob_1.slot(), blob_1.block_root(), blobs) .await .unwrap(); diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 31d4e4aac..1c675d280 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -591,7 +591,7 @@ pub enum Work { process_batch: Box>) + Send + Sync>, }, GossipBlock(AsyncFn), - GossipSignedBlobSidecar(AsyncFn), + GossipBlobSidecar(AsyncFn), DelayedImportBlock { beacon_block_slot: Slot, beacon_block_root: Hash256, @@ -641,7 +641,7 @@ impl Work { Work::GossipAggregate { .. } => GOSSIP_AGGREGATE, Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH, Work::GossipBlock(_) => GOSSIP_BLOCK, - Work::GossipSignedBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, + Work::GossipBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR, Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK, Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT, Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING, @@ -1205,7 +1205,7 @@ impl BeaconProcessor { Work::GossipBlock { .. } => { gossip_block_queue.push(work, work_id, &self.log) } - Work::GossipSignedBlobSidecar { .. } => { + Work::GossipBlobSidecar { .. } => { gossip_blob_queue.push(work, work_id, &self.log) } Work::DelayedImportBlock { .. } => { @@ -1457,10 +1457,11 @@ impl BeaconProcessor { task_spawner.spawn_async(process_fn) } Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), - Work::GossipBlock(work) | Work::GossipSignedBlobSidecar(work) => task_spawner - .spawn_async(async move { + Work::GossipBlock(work) | Work::GossipBlobSidecar(work) => { + task_spawner.spawn_async(async move { work.await; - }), + }) + } Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 28cd1fe48..934ef059d 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,9 +1,9 @@ use eth2::types::builder_bid::SignedBuilderBid; -use eth2::types::FullPayloadContents; use eth2::types::{ - BlindedPayload, EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, - SignedBlockContents, SignedValidatorRegistrationData, Slot, + EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes, + SignedValidatorRegistrationData, Slot, }; +use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; pub use eth2::Error; use eth2::{ok_or_error, StatusCode}; use reqwest::{IntoUrl, Response}; @@ -140,7 +140,7 @@ impl BuilderHttpClient { /// `POST /eth/v1/builder/blinded_blocks` pub async fn post_builder_blinded_blocks( &self, - blinded_block: &SignedBlockContents>, + blinded_block: &SignedBlindedBeaconBlock, ) -> Result>, Error> { let mut path = self.server.full.clone(); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 07fdf6414..6b0277ff3 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -14,8 +14,8 @@ pub use engine_api::*; pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc}; use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; +use eth2::types::FullPayloadContents; use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse}; -use eth2::types::{FullPayloadContents, SignedBlockContents}; use ethers_core::types::Transaction as EthersTransaction; use fork_choice::ForkchoiceUpdateParameters; use lru::LruCache; @@ -43,8 +43,9 @@ use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; use types::builder_bid::BuilderBid; use types::payload::BlockProductionVersion; -use types::sidecar::{BlobItems, Sidecar}; -use types::{AbstractExecPayload, ExecutionPayloadDeneb, KzgProofs}; +use types::{ + AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock, +}; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot, @@ -103,12 +104,8 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { payload: ExecutionPayloadHeader::Deneb(builder_bid.header).into(), block_value: builder_bid.value, - kzg_commitments: builder_bid.blinded_blobs_bundle.commitments, - blobs: BlobItems::::try_from_blob_roots( - builder_bid.blinded_blobs_bundle.blob_roots, - ) - .map_err(Error::InvalidBlobConversion)?, - proofs: builder_bid.blinded_blobs_bundle.proofs, + kzg_commitments: builder_bid.blob_kzg_commitments, + blobs_and_proofs: None, }, }; Ok(ProvenancedPayload::Builder( @@ -170,8 +167,8 @@ pub enum BlockProposalContents> { payload: Payload, block_value: Uint256, kzg_commitments: KzgCommitments, - blobs: >::BlobItems, - proofs: KzgProofs, + /// `None` for blinded `PayloadAndBlobs`. + blobs_and_proofs: Option<(BlobsList, KzgProofs)>, }, } @@ -203,9 +200,7 @@ impl> TryFrom> payload: execution_payload.into(), block_value, kzg_commitments: bundle.commitments, - blobs: BlobItems::try_from_blobs(bundle.blobs) - .map_err(Error::InvalidBlobConversion)?, - proofs: bundle.proofs, + blobs_and_proofs: Some((bundle.blobs, bundle.proofs)), }), None => Ok(Self::Payload { payload: execution_payload.into(), @@ -233,26 +228,23 @@ impl> BlockProposalContents ( Payload, Option>, - Option<>::BlobItems>, - Option>, + Option<(BlobsList, KzgProofs)>, Uint256, ) { match self { Self::Payload { payload, block_value, - } => (payload, None, None, None, block_value), + } => (payload, None, None, block_value), Self::PayloadAndBlobs { payload, block_value, kzg_commitments, - blobs, - proofs, + blobs_and_proofs, } => ( payload, Some(kzg_commitments), - Some(blobs), - Some(proofs), + blobs_and_proofs, block_value, ), } @@ -276,23 +268,6 @@ impl> BlockProposalContents block_value, } } - pub fn default_at_fork(fork_name: ForkName) -> Result { - Ok(match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BlockProposalContents::Payload { - payload: Payload::default_at_fork(fork_name)?, - block_value: Uint256::zero(), - } - } - ForkName::Deneb => BlockProposalContents::PayloadAndBlobs { - payload: Payload::default_at_fork(fork_name)?, - block_value: Uint256::zero(), - blobs: Payload::default_blobs_at_fork(fork_name)?, - kzg_commitments: VariableList::default(), - proofs: VariableList::default(), - }, - }) - } } #[derive(Clone, PartialEq)] @@ -753,6 +728,13 @@ impl ExecutionLayer { } } + /// Delete proposer preparation data for `proposer_index`. This is only useful in tests. + pub async fn clear_proposer_preparation(&self, proposer_index: u64) { + self.proposer_preparation_data() + .await + .remove(&proposer_index); + } + /// Removes expired entries from proposer_preparation_data and proposers caches async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> { let mut proposer_preparation_data = self.proposer_preparation_data().await; @@ -2003,7 +1985,7 @@ impl ExecutionLayer { pub async fn propose_blinded_beacon_block( &self, block_root: Hash256, - block: &SignedBlockContents>, + block: &SignedBlindedBeaconBlock, ) -> Result, Error> { debug!( self.log(), @@ -2052,7 +2034,6 @@ impl ExecutionLayer { "relay_response_ms" => duration.as_millis(), "block_root" => ?block_root, "parent_hash" => ?block - .signed_block() .message() .execution_payload() .map(|payload| format!("{}", payload.parent_hash())) diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 713ebb670..182cad50f 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -881,16 +881,16 @@ mod test { #[test] fn valid_test_blobs() { assert!( - validate_blob::().unwrap(), + validate_blob::().is_ok(), "Mainnet preset test blobs bundle should contain valid proofs" ); assert!( - validate_blob::().unwrap(), + validate_blob::().is_ok(), "Minimal preset test blobs bundle should contain valid proofs" ); } - fn validate_blob() -> Result { + fn validate_blob() -> Result<(), String> { let kzg = load_kzg()?; let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::()?; let kzg_blob = kzg::Blob::from_bytes(blob.as_ref()) diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 32b352b6a..7da2022d5 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -533,8 +533,8 @@ pub fn serve( .as_deneb() .map_err(|_| reject("incorrect payload variant"))? .into(), - blinded_blobs_bundle: maybe_blobs_bundle - .map(Into::into) + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) .unwrap_or_default(), value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), pubkey: builder.builder_sk.public_key().compress(), @@ -572,8 +572,8 @@ pub fn serve( .as_deneb() .map_err(|_| reject("incorrect payload variant"))? .into(), - blinded_blobs_bundle: maybe_blobs_bundle - .map(Into::into) + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) .unwrap_or_default(), value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), pubkey: builder.builder_sk.public_key().compress(), diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index f59a4b521..37b4049c0 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -1,50 +1,42 @@ -use beacon_chain::BlockProductionError; -use eth2::types::{BeaconBlockAndBlobSidecars, BlindedBeaconBlockAndBlobSidecars, BlockContents}; -use types::{AbstractExecPayload, BeaconBlock, EthSpec, ForkName, SidecarList}; +use beacon_chain::{BeaconBlockResponse, BeaconBlockResponseWrapper, BlockProductionError}; +use eth2::types::{BlockContents, FullBlockContents, ProduceBlockV3Response}; +use types::{EthSpec, ForkName}; type Error = warp::reject::Rejection; -pub fn build_block_contents>( +pub fn build_block_contents( fork_name: ForkName, - block: BeaconBlock, - maybe_blobs: Option>::Sidecar>>, -) -> Result, Error> { - match Payload::block_type() { - types::BlockType::Blinded => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Ok(BlockContents::Block(block)) - } + block_response: BeaconBlockResponseWrapper, +) -> Result, Error> { + match block_response { + BeaconBlockResponseWrapper::Blinded(block) => { + Ok(ProduceBlockV3Response::Blinded(block.block)) + } + BeaconBlockResponseWrapper::Full(block) => match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Ok( + ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)), + ), ForkName::Deneb => { - if let Some(blinded_blob_sidecars) = maybe_blobs { - let block_and_blobs = BlindedBeaconBlockAndBlobSidecars { - blinded_block: block, - blinded_blob_sidecars, - }; + let BeaconBlockResponse { + block, + state: _, + blob_items, + execution_payload_value: _, + consensus_block_value: _, + } = block; - Ok(BlockContents::BlindedBlockAndBlobSidecars(block_and_blobs)) - } else { - Err(warp_utils::reject::block_production_error( + let Some((kzg_proofs, blobs)) = blob_items else { + return Err(warp_utils::reject::block_production_error( BlockProductionError::MissingBlobs, - )) - } - } - }, - types::BlockType::Full => match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Ok(BlockContents::Block(block)) - } - ForkName::Deneb => { - if let Some(blob_sidecars) = maybe_blobs { - let block_and_blobs = BeaconBlockAndBlobSidecars { + )); + }; + + Ok(ProduceBlockV3Response::Full( + FullBlockContents::BlockContents(BlockContents { block, - blob_sidecars, - }; - - Ok(BlockContents::BlockAndBlobSidecars(block_and_blobs)) - } else { - Err(warp_utils::reject::block_production_error( - BlockProductionError::MissingBlobs, - )) - } + kzg_proofs, + blobs, + }), + )) } }, } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b00a80bd..08c67a00b 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -41,7 +41,7 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - SignedBlindedBlockContents, SignedBlockContents, ValidatorId, ValidatorStatus, + PublishBlockRequest, ValidatorId, ValidatorStatus, }; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; @@ -76,9 +76,9 @@ use tokio_stream::{ }; use types::{ Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, - ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch, - SignedAggregateAndProof, SignedBlsToExecutionChange, SignedContributionAndProof, + CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256, + ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, + SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage, SyncContributionData, }; @@ -1306,7 +1306,7 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - move |block_contents: SignedBlockContents, + move |block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1342,7 +1342,7 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = SignedBlockContents::::from_ssz_bytes( + let block_contents = PublishBlockRequest::::from_ssz_bytes( &block_bytes, &chain.spec, ) @@ -1375,7 +1375,7 @@ pub fn serve( .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, - block_contents: SignedBlockContents, + block_contents: PublishBlockRequest, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1413,7 +1413,7 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = SignedBlockContents::::from_ssz_bytes( + let block_contents = PublishBlockRequest::::from_ssz_bytes( &block_bytes, &chain.spec, ) @@ -1449,7 +1449,7 @@ pub fn serve( .and(network_tx_filter.clone()) .and(log_filter.clone()) .then( - move |block_contents: SignedBlindedBlockContents, + move |block_contents: SignedBlindedBeaconBlock, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, @@ -1485,14 +1485,13 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBlockContents::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_blinded_block( block, chain, @@ -1518,14 +1517,14 @@ pub fn serve( .and(log_filter.clone()) .then( move |validation_level: api_types::BroadcastValidationQuery, - block_contents: SignedBlindedBlockContents, + blinded_block: SignedBlindedBeaconBlock, task_spawner: TaskSpawner, chain: Arc>, network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { publish_blocks::publish_blinded_block( - block_contents, + blinded_block, chain, &network_tx, log, @@ -1555,14 +1554,13 @@ pub fn serve( network_tx: UnboundedSender>, log: Logger| { task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = - SignedBlockContents::>::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; publish_blocks::publish_blinded_block( block, chain, diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 73da4853e..09b95136b 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use types::{payload::BlockProductionVersion, *}; use beacon_chain::{ - BeaconBlockResponse, BeaconBlockResponseType, BeaconChain, BeaconChainTypes, - ProduceBlockVerification, + BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; use eth2::types::{self as api_types, EndpointVersion, SkipRandaoVerification}; use ssz::Encode; @@ -69,35 +68,23 @@ pub async fn produce_block_v3( warp_utils::reject::custom_bad_request(format!("failed to fetch a block: {:?}", e)) })?; - match block_response_type { - BeaconBlockResponseType::Full(block_response) => { - build_response_v3(chain, block_response, endpoint_version, accept_header) - } - BeaconBlockResponseType::Blinded(block_response) => { - build_response_v3(chain, block_response, endpoint_version, accept_header) - } - } + build_response_v3(chain, block_response_type, endpoint_version, accept_header) } -pub fn build_response_v3>( +pub fn build_response_v3( chain: Arc>, - block_response: BeaconBlockResponse, + block_response: BeaconBlockResponseWrapper, endpoint_version: EndpointVersion, accept_header: Option, ) -> Result, warp::Rejection> { let fork_name = block_response - .block - .to_ref() .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; + let execution_payload_value = block_response.execution_payload_value(); + let consensus_block_value = block_response.consensus_block_value(); + let execution_payload_blinded = block_response.is_blinded(); - let block_contents = build_block_contents::build_block_contents( - fork_name, - block_response.block, - block_response.maybe_side_car, - )?; - - let execution_payload_blinded = Payload::block_type() == BlockType::Blinded; + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; match accept_header { Some(api_types::Accept::Ssz) => Response::builder() @@ -107,9 +94,9 @@ pub fn build_response_v3| add_consensus_version_header(res, fork_name)) .map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded)) .map(|res: Response| { - add_execution_payload_value_header(res, block_response.execution_payload_value) + add_execution_payload_value_header(res, execution_payload_value) }) - .map(|res| add_consensus_block_value_header(res, block_response.consensus_block_value)) + .map(|res| add_consensus_block_value_header(res, consensus_block_value)) .map_err(|e| -> warp::Rejection { warp_utils::reject::custom_server_error(format!("failed to create response: {}", e)) }), @@ -117,10 +104,8 @@ pub fn build_response_v3( .await .map_err(warp_utils::reject::block_production_error)?; - match block_response_type { - BeaconBlockResponseType::Full(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - BeaconBlockResponseType::Blinded(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - } + build_response_v2(chain, block_response_type, endpoint_version, accept_header) } pub async fn produce_block_v2( @@ -187,33 +165,20 @@ pub async fn produce_block_v2( .await .map_err(warp_utils::reject::block_production_error)?; - match block_response_type { - BeaconBlockResponseType::Full(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - BeaconBlockResponseType::Blinded(block_response) => { - build_response_v2(chain, block_response, endpoint_version, accept_header) - } - } + build_response_v2(chain, block_response_type, endpoint_version, accept_header) } -pub fn build_response_v2>( +pub fn build_response_v2( chain: Arc>, - block_response: BeaconBlockResponse, + block_response: BeaconBlockResponseWrapper, endpoint_version: EndpointVersion, accept_header: Option, ) -> Result, warp::Rejection> { let fork_name = block_response - .block - .to_ref() .fork_name(&chain.spec) .map_err(inconsistent_fork_rejection)?; - let block_contents = build_block_contents::build_block_contents( - fork_name, - block_response.block, - block_response.maybe_side_car, - )?; + let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?; match accept_header { Some(api_types::Accept::Ssz) => Response::builder() diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index e41cf51ec..432d91b72 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -6,8 +6,8 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlockContents, NotifyExecutionLayer, }; -use eth2::types::{BroadcastValidation, ErrorMessage}; -use eth2::types::{FullPayloadContents, SignedBlockContents}; +use eth2::types::{into_full_block_and_blobs, BroadcastValidation, ErrorMessage}; +use eth2::types::{FullPayloadContents, PublishBlockRequest}; use execution_layer::ProvenancedPayload; use lighthouse_network::PubsubMessage; use network::NetworkMessage; @@ -19,8 +19,9 @@ use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; use tree_hash::TreeHash; use types::{ - AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash, - ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlobSidecarList, + AbstractExecPayload, BeaconBlockRef, BlobSidecarList, EthSpec, ExecPayload, ExecutionBlockHash, + ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, + VariableList, }; use warp::http::StatusCode; use warp::{reply::Response, Rejection, Reply}; @@ -65,7 +66,7 @@ pub async fn publish_block>, - blobs_opt: Option>, + blobs_opt: Option>, sender, log, seen_timestamp| { @@ -86,8 +87,8 @@ pub async fn publish_block { let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block.clone())]; - if let Some(signed_blobs) = blobs_opt { - for (blob_index, blob) in signed_blobs.into_iter().enumerate() { + if let Some(blob_sidecars) = blobs_opt { + for (blob_index, blob) in blob_sidecars.into_iter().enumerate() { pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new(( blob_index as u64, blob, @@ -108,10 +109,6 @@ pub async fn publish_block>(); + VariableList::from(blobs) + }); let block_root = block_root.unwrap_or(gossip_verified_block.block_root); @@ -292,16 +296,16 @@ pub async fn publish_block( - block_contents: SignedBlockContents>, + blinded_block: SignedBlindedBeaconBlock, chain: Arc>, network_tx: &UnboundedSender>, log: Logger, validation_level: BroadcastValidation, duplicate_status_code: StatusCode, ) -> Result { - let block_root = block_contents.signed_block().canonical_root(); - let full_block: ProvenancedBlock> = - reconstruct_block(chain.clone(), block_root, block_contents, log.clone()).await?; + let block_root = blinded_block.canonical_root(); + let full_block: ProvenancedBlock> = + reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?; publish_block::( Some(block_root), full_block, @@ -320,10 +324,9 @@ pub async fn publish_blinded_block( pub async fn reconstruct_block( chain: Arc>, block_root: Hash256, - block_contents: SignedBlockContents>, + block: SignedBlindedBeaconBlock, log: Logger, -) -> Result>, Rejection> { - let block = block_contents.signed_block(); +) -> Result>, Rejection> { let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() { let el = chain.execution_layer.as_ref().ok_or_else(|| { warp_utils::reject::custom_server_error("Missing execution layer".to_string()) @@ -365,7 +368,7 @@ pub async fn reconstruct_block( ); let full_payload = el - .propose_blinded_beacon_block(block_root, &block_contents) + .propose_blinded_beacon_block(block_root, &block) .await .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -385,15 +388,15 @@ pub async fn reconstruct_block( match full_payload_opt { // A block without a payload is pre-merge and we consider it locally // built. - None => block_contents - .try_into_full_block_and_blobs(None) - .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Local(full_payload_contents)) => block_contents - .try_into_full_block_and_blobs(Some(full_payload_contents)) - .map(ProvenancedBlock::local), - Some(ProvenancedPayload::Builder(full_payload_contents)) => block_contents - .try_into_full_block_and_blobs(Some(full_payload_contents)) - .map(ProvenancedBlock::builder), + None => into_full_block_and_blobs(block, None).map(ProvenancedBlock::local), + Some(ProvenancedPayload::Local(full_payload_contents)) => { + into_full_block_and_blobs(block, Some(full_payload_contents)) + .map(ProvenancedBlock::local) + } + Some(ProvenancedPayload::Builder(full_payload_contents)) => { + into_full_block_and_blobs(block, Some(full_payload_contents)) + .map(ProvenancedBlock::builder) + } } .map_err(|e| { warp_utils::reject::custom_server_error(format!("Unable to add payload to block: {e:?}")) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index fe300ae5e..7961b32c5 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -2,18 +2,12 @@ use beacon_chain::{ test_utils::{AttestationStrategy, BlockStrategy}, GossipVerifiedBlock, IntoGossipVerifiedBlockContents, }; -use eth2::types::{ - BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlockContents, - SignedBlockContentsTuple, -}; +use eth2::types::{BroadcastValidation, PublishBlockRequest, SignedBeaconBlock}; use http_api::test_utils::InteractiveTester; use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock}; use std::sync::Arc; use tree_hash::TreeHash; -use types::{ - BlindedBlobSidecar, BlindedPayload, BlobSidecar, FullPayload, Hash256, MainnetEthSpec, - SignedSidecarList, Slot, -}; +use types::{Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -80,7 +74,7 @@ pub async fn gossip_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -131,7 +125,7 @@ pub async fn gossip_partial_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -174,7 +168,7 @@ pub async fn gossip_full_pass() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), + &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) .await; @@ -266,7 +260,7 @@ pub async fn consensus_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -315,7 +309,7 @@ pub async fn consensus_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -358,10 +352,8 @@ pub async fn consensus_partial_pass_only_consensus() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, _), state_after_a): ((SignedBeaconBlock, _), _) = - tester.harness.make_block(state_a.clone(), slot_b).await; - let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock, _), _) = - tester.harness.make_block(state_a, slot_b).await; + let ((block_a, _), state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await; + let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await; let block_b_root = block_b.canonical_root(); /* check for `make_block` curios */ @@ -369,7 +361,7 @@ pub async fn consensus_partial_pass_only_consensus() { assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b) + let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) .into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_contents_b.is_ok()); let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain); @@ -430,7 +422,7 @@ pub async fn consensus_full_pass() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), + &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) .await; @@ -481,7 +473,7 @@ pub async fn equivocation_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -538,7 +530,7 @@ pub async fn equivocation_consensus_early_equivocation() { assert!(tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block_a.clone(), blobs_a), + &PublishBlockRequest::new(block_a.clone(), blobs_a), validation_level ) .await @@ -552,7 +544,7 @@ pub async fn equivocation_consensus_early_equivocation() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block_b.clone(), blobs_b), + &PublishBlockRequest::new(block_b.clone(), blobs_b), validation_level, ) .await; @@ -603,7 +595,7 @@ pub async fn equivocation_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level) + .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -661,10 +653,10 @@ pub async fn equivocation_consensus_late_equivocation() { assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); - let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b) + let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b) .into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_contents_b.is_ok()); - let gossip_block_contents_a = SignedBlockContents::new(block_a, blobs_a) + let gossip_block_contents_a = PublishBlockRequest::new(block_a, blobs_a) .into_gossip_verified_block(&tester.harness.chain); assert!(gossip_block_contents_a.is_err()); @@ -728,7 +720,7 @@ pub async fn equivocation_full_pass() { let response: Result<(), eth2::Error> = tester .client .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), + &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) .await; @@ -776,11 +768,9 @@ pub async fn blinded_gossip_invalid() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -829,11 +819,9 @@ pub async fn blinded_gossip_partial_pass() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -870,18 +858,17 @@ pub async fn blinded_gossip_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; - let block_contents = block_contents_tuple.into(); + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } // This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`. @@ -912,19 +899,18 @@ pub async fn blinded_gossip_full_pass_ssz() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; - let block_contents = block_contents_tuple.into(); + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2_ssz(&block_contents, validation_level) + .post_beacon_blinded_blocks_v2_ssz(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. @@ -963,11 +949,9 @@ pub async fn blinded_consensus_invalid() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1014,11 +998,9 @@ pub async fn blinded_consensus_gossip() { .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1060,19 +1042,18 @@ pub async fn blinded_consensus_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await; + let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; - let block_contents = block_contents_tuple.into(); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&blinded_block, validation_level) .await; assert!(response.is_ok()); assert!(tester .harness .chain - .block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root())); + .block_is_known_to_fork_choice(&blinded_block.canonical_root())); } /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`. @@ -1112,11 +1093,9 @@ pub async fn blinded_equivocation_invalid() { }) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1159,18 +1138,13 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let (block_contents_tuple_a, state_after_a) = tester + let (block_a, state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let (block_contents_tuple_b, state_after_b) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ - let block_contents_a: SignedBlockContents> = block_contents_tuple_a.into(); - let block_contents_b: SignedBlockContents> = block_contents_tuple_b.into(); - let block_a = block_contents_a.signed_block(); - let block_b = block_contents_b.signed_block(); assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); assert_eq!(block_b.state_root(), state_after_b.tree_hash_root()); assert_ne!(block_a.state_root(), block_b.state_root()); @@ -1178,7 +1152,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { /* submit `block_a` as valid */ assert!(tester .client - .post_beacon_blinded_blocks_v2(&block_contents_a, validation_level) + .post_beacon_blinded_blocks_v2(&block_a, validation_level) .await .is_ok()); assert!(tester @@ -1189,7 +1163,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() { /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&block_contents_b, validation_level) + .post_beacon_blinded_blocks_v2(&block_b, validation_level) .await; assert!(response.is_err()); @@ -1236,11 +1210,9 @@ pub async fn blinded_equivocation_gossip() { .make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero()) .await; - let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple); - let response: Result<(), eth2::Error> = tester .client - .post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level) + .post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level) .await; assert!(response.is_err()); @@ -1286,12 +1258,11 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block_a, blobs_a), state_after_a): ((SignedBlindedBeaconBlock, _), _) = tester + let (block_a, state_after_a) = tester .harness .make_blinded_block(state_a.clone(), slot_b) .await; - let ((block_b, blobs_b), state_after_b): ((SignedBlindedBeaconBlock, _), _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await; /* check for `make_blinded_block` curios */ assert_eq!(block_a.state_root(), state_after_a.tree_hash_root()); @@ -1301,7 +1272,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let unblinded_block_a = reconstruct_block( tester.harness.chain.clone(), block_a.canonical_root(), - SignedBlockContents::new(block_a, blobs_a), + block_a, test_logger.clone(), ) .await @@ -1309,7 +1280,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let unblinded_block_b = reconstruct_block( tester.harness.chain.clone(), block_b.canonical_root(), - SignedBlockContents::new(block_b.clone(), blobs_b.clone()), + block_b.clone(), test_logger.clone(), ) .await @@ -1338,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() { let channel = tokio::sync::mpsc::unbounded_channel(); let publication_result = publish_blinded_block( - SignedBlockContents::new(block_b, blobs_b), + block_b, tester.harness.chain, &channel.0, test_logger, @@ -1383,15 +1354,11 @@ pub async fn blinded_equivocation_full_pass() { let slot_b = slot_a + 1; let state_a = tester.harness.get_current_state(); - let ((block, blobs), _): ((SignedBlindedBeaconBlock, _), _) = - tester.harness.make_blinded_block(state_a, slot_b).await; + let (block, _) = tester.harness.make_blinded_block(state_a, slot_b).await; let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( - &SignedBlockContents::new(block.clone(), blobs), - validation_level, - ) + .post_beacon_blinded_blocks_v2(&block, validation_level) .await; assert!(response.is_ok()); @@ -1400,20 +1367,3 @@ pub async fn blinded_equivocation_full_pass() { .chain .block_is_known_to_fork_choice(&block.canonical_root())); } - -fn into_signed_blinded_block_contents( - block_contents_tuple: SignedBlockContentsTuple>, -) -> SignedBlockContents> { - let (block, maybe_blobs) = block_contents_tuple; - SignedBlockContents::new(block.into(), maybe_blobs.map(into_blinded_blob_sidecars)) -} - -fn into_blinded_blob_sidecars( - blobs: SignedSidecarList>, -) -> SignedSidecarList { - blobs - .into_iter() - .map(|blob| blob.into()) - .collect::>() - .into() -} diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 327215209..48a2f450e 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -17,8 +17,8 @@ use std::sync::Arc; use std::time::Duration; use tree_hash::TreeHash; use types::{ - Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, - MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, MainnetEthSpec, + MinimalEthSpec, ProposerPreparationData, Slot, }; use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full}; @@ -641,13 +641,9 @@ pub async fn proposer_boost_re_org_test( assert_eq!(block_c.parent_root(), block_b_root); } - // Sign blobs. - let block_c_signed_blobs = - block_c_blobs.map(|blobs| harness.sign_blobs(blobs, &state_b, proposer_index)); - // Applying block C should cause it to become head regardless (re-org or continuation). let block_root_c = harness - .process_block_result((block_c.clone(), block_c_signed_blobs)) + .process_block_result((block_c.clone(), block_c_blobs)) .await .unwrap() .into(); @@ -828,7 +824,7 @@ pub async fn fork_choice_before_proposal() { .into(); let block_d = tester .client - .get_validator_blocks::>(slot_d, &randao_reveal, None) + .get_validator_blocks::(slot_d, &randao_reveal, None) .await .unwrap() .data diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d5fa50ba2..7b769009c 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -64,8 +64,8 @@ struct ApiTester { harness: Arc>>, chain: Arc>>, client: BeaconNodeHttpClient, - next_block: SignedBlockContents, - reorg_block: SignedBlockContents, + next_block: PublishBlockRequest, + reorg_block: PublishBlockRequest, attestations: Vec>, contribution_and_proofs: Vec>, attester_slashing: AttesterSlashing, @@ -173,13 +173,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; - let next_block = SignedBlockContents::from(next_block); + let next_block = PublishBlockRequest::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1) .await; - let reorg_block = SignedBlockContents::from(reorg_block); + let reorg_block = PublishBlockRequest::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -314,13 +314,13 @@ impl ApiTester { let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; - let next_block = SignedBlockContents::from(next_block); + let next_block = PublishBlockRequest::from(next_block); // `make_block` adds random graffiti, so this will produce an alternate block let (reorg_block, _reorg_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; - let reorg_block = SignedBlockContents::from(reorg_block); + let reorg_block = PublishBlockRequest::from(reorg_block); let head_state_root = head.beacon_state_root(); let attestations = harness @@ -1301,7 +1301,7 @@ impl ApiTester { assert!(self .client - .post_beacon_blocks(&SignedBlockContents::from(block)) + .post_beacon_blocks(&PublishBlockRequest::from(block)) .await .is_err()); @@ -1328,7 +1328,7 @@ impl ApiTester { assert!(self .client - .post_beacon_blocks_ssz(&SignedBlockContents::from(block)) + .post_beacon_blocks_ssz(&PublishBlockRequest::from(block)) .await .is_err()); @@ -1357,7 +1357,8 @@ impl ApiTester { .await .is_ok()); - let blinded_block_contents = block_contents.clone_as_blinded(); + // Blinded deneb block contents is just the blinded block + let blinded_block_contents = block_contents.signed_block().clone_as_blinded(); // Test all the POST methods in sequence, they should all behave the same. let responses = vec![ @@ -2567,7 +2568,7 @@ impl ApiTester { let block = self .client - .get_validator_blocks::>(slot, &randao_reveal, None) + .get_validator_blocks::(slot, &randao_reveal, None) .await .unwrap() .data @@ -2576,7 +2577,7 @@ impl ApiTester { let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); let signed_block_contents = - SignedBlockContents::try_from(signed_block.clone()).unwrap(); + PublishBlockRequest::try_from(signed_block.clone()).unwrap(); self.client .post_beacon_blocks(&signed_block_contents) @@ -2631,13 +2632,13 @@ impl ApiTester { let block_bytes = self .client - .get_validator_blocks_ssz::>(slot, &randao_reveal, None) + .get_validator_blocks_ssz::(slot, &randao_reveal, None) .await .unwrap() .expect("block bytes"); let block_contents = - BlockContents::>::from_ssz_bytes(&block_bytes, &self.chain.spec) + FullBlockContents::::from_ssz_bytes(&block_bytes, &self.chain.spec) .expect("block contents bytes can be decoded"); let signed_block_contents = @@ -2704,28 +2705,26 @@ impl ApiTester { .unwrap(); if is_blinded_payload { - let block_contents = >>::from_ssz_bytes( + let blinded_block = >::from_ssz_bytes( &fork_version_response_bytes.unwrap(), &self.chain.spec, ) .expect("block contents bytes can be decoded"); - let signed_block_contents = - block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_blinded_block = + blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blocks_ssz(&signed_block_contents) + .post_beacon_blinded_blocks_ssz(&signed_blinded_block) .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let signed_block = signed_block_contents.deconstruct().0; - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self.chain.head_beacon_block().clone_as_blinded(); + assert_eq!(head_block, signed_blinded_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } else { - let block_contents = >>::from_ssz_bytes( + let block_contents = >::from_ssz_bytes( &fork_version_response_bytes.unwrap(), &self.chain.spec, ) @@ -2757,7 +2756,7 @@ impl ApiTester { let block = self .client - .get_validator_blocks_modular::>( + .get_validator_blocks_modular::( slot, &Signature::infinity().unwrap().into(), None, @@ -2815,13 +2814,13 @@ impl ApiTester { // Check failure with no `skip_randao_verification` passed. self.client - .get_validator_blocks::>(slot, &bad_randao_reveal, None) + .get_validator_blocks::(slot, &bad_randao_reveal, None) .await .unwrap_err(); // Check failure with `skip_randao_verification` (requires infinity sig). self.client - .get_validator_blocks_modular::>( + .get_validator_blocks_modular::( slot, &bad_randao_reveal, None, @@ -2836,7 +2835,7 @@ impl ApiTester { self } - pub async fn test_blinded_block_production>(&self) { + pub async fn test_blinded_block_production(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2876,29 +2875,33 @@ impl ApiTester { let block = self .client - .get_validator_blinded_blocks::(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data; - let signed_block_contents = - block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blinded_blocks(&signed_block_contents) + .post_beacon_blinded_blocks(&signed_block) .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let signed_block = signed_block_contents.deconstruct().0; - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self + .client + .get_beacon_blocks(CoreBlockId::Head) + .await + .unwrap() + .unwrap() + .data; + + assert_eq!(head_block.clone_as_blinded(), signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } } - pub async fn test_blinded_block_production_ssz>(&self) { + pub async fn test_blinded_block_production_ssz(&self) { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -2938,43 +2941,47 @@ impl ApiTester { let block_contents_bytes = self .client - .get_validator_blinded_blocks_ssz::(slot, &randao_reveal, None) + .get_validator_blinded_blocks_ssz::(slot, &randao_reveal, None) .await .unwrap() .expect("block bytes"); - let block_contents = BlockContents::::from_ssz_bytes( - &block_contents_bytes, - &self.chain.spec, - ) - .expect("block contents bytes can be decoded"); + let block_contents = + FullBlockContents::::from_ssz_bytes(&block_contents_bytes, &self.chain.spec) + .expect("block contents bytes can be decoded"); let signed_block_contents = block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); self.client - .post_beacon_blinded_blocks_ssz(&signed_block_contents) + .post_beacon_blinded_blocks_ssz( + &signed_block_contents.signed_block().clone_as_blinded(), + ) .await .unwrap(); - // This converts the generic `Payload` to a concrete type for comparison. - let signed_block = signed_block_contents.deconstruct().0; - let head_block = SignedBeaconBlock::from(signed_block.clone()); - assert_eq!(head_block, signed_block); + let head_block = self + .client + .get_beacon_blocks(CoreBlockId::Head) + .await + .unwrap() + .unwrap() + .data; + + let signed_block = signed_block_contents.signed_block(); + assert_eq!(&head_block, signed_block); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } } - pub async fn test_blinded_block_production_no_verify_randao>( - self, - ) -> Self { + pub async fn test_blinded_block_production_no_verify_randao(self) -> Self { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); - let block_contents = self + let blinded_block = self .client - .get_validator_blinded_blocks_modular::( + .get_validator_blinded_blocks_modular::( slot, &Signature::infinity().unwrap().into(), None, @@ -2983,18 +2990,14 @@ impl ApiTester { .await .unwrap() .data; - assert_eq!(block_contents.block().slot(), slot); + assert_eq!(blinded_block.slot(), slot); self.chain.slot_clock.set_slot(slot.as_u64() + 1); } self } - pub async fn test_blinded_block_production_verify_randao_invalid< - Payload: AbstractExecPayload, - >( - self, - ) -> Self { + pub async fn test_blinded_block_production_verify_randao_invalid(self) -> Self { let fork = self.chain.canonical_head.cached_head().head_fork(); let genesis_validators_root = self.chain.genesis_validators_root; @@ -3034,13 +3037,13 @@ impl ApiTester { // Check failure with full randao verification enabled. self.client - .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) + .get_validator_blinded_blocks::(slot, &bad_randao_reveal, None) .await .unwrap_err(); // Check failure with `skip_randao_verification` (requires infinity sig). self.client - .get_validator_blinded_blocks_modular::( + .get_validator_blinded_blocks_modular::( slot, &bad_randao_reveal, None, @@ -3520,13 +3523,7 @@ impl ApiTester { .unwrap(); let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), + Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), Full(_) => panic!("Expecting a blinded payload"), }; @@ -3545,11 +3542,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3586,11 +3582,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3630,13 +3625,7 @@ impl ApiTester { .unwrap(); let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), + Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), Full(_) => panic!("Expecting a blinded payload"), }; @@ -3665,11 +3654,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3711,13 +3699,7 @@ impl ApiTester { .unwrap(); let payload: BlindedPayload = match payload_type { - Blinded(payload) => payload - .data - .block() - .body() - .execution_payload() - .unwrap() - .into(), + Blinded(payload) => payload.data.body().execution_payload().unwrap().into(), Full(_) => panic!("Expecting a blinded payload"), }; @@ -3752,11 +3734,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3845,11 +3826,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -3936,11 +3916,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4026,11 +4005,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4102,11 +4080,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4162,11 +4139,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4235,11 +4211,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4265,11 +4240,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4370,11 +4344,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4410,11 +4383,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(next_slot, &randao_reveal, None) + .get_validator_blinded_blocks::(next_slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4524,11 +4496,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4608,11 +4579,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4673,11 +4643,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4738,11 +4707,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4803,11 +4771,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4867,11 +4834,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -4907,16 +4873,11 @@ impl ApiTester { .await .unwrap(); - let block_contents = match payload_type { + let _block_contents = match payload_type { Blinded(payload) => payload.data, Full(_) => panic!("Expecting a blinded payload"), }; - let (_, maybe_sidecars) = block_contents.deconstruct(); - - // Response should contain blob sidecars - assert!(maybe_sidecars.is_some()); - self } @@ -4940,11 +4901,10 @@ impl ApiTester { let payload: BlindedPayload = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blinded_blocks::(slot, &randao_reveal, None) .await .unwrap() .data - .block() .body() .execution_payload() .unwrap() @@ -5892,17 +5852,14 @@ async fn block_production_v3_ssz_with_skip_slots() { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_full_payload_premerge() { - ApiTester::new() - .await - .test_blinded_block_production::>() - .await; + ApiTester::new().await.test_blinded_block_production().await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_ssz_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_ssz::>() + .test_blinded_block_production_ssz() .await; } @@ -5911,7 +5868,7 @@ async fn blinded_block_production_with_skip_slots_full_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production::>() + .test_blinded_block_production() .await; } @@ -5920,7 +5877,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production_ssz::>() + .test_blinded_block_production_ssz() .await; } @@ -5928,7 +5885,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() { async fn blinded_block_production_no_verify_randao_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_no_verify_randao::>() + .test_blinded_block_production_no_verify_randao() .await; } @@ -5936,16 +5893,13 @@ async fn blinded_block_production_no_verify_randao_full_payload_premerge() { async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_verify_randao_invalid::>() + .test_blinded_block_production_verify_randao_invalid() .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_blinded_payload_premerge() { - ApiTester::new() - .await - .test_blinded_block_production::>() - .await; + ApiTester::new().await.test_blinded_block_production().await; } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -5953,7 +5907,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { ApiTester::new() .await .skip_slots(E::slots_per_epoch() * 2) - .test_blinded_block_production::>() + .test_blinded_block_production() .await; } @@ -5961,7 +5915,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() { async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_no_verify_randao::>() + .test_blinded_block_production_no_verify_randao() .await; } @@ -5969,7 +5923,7 @@ async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() { async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() { ApiTester::new() .await - .test_blinded_block_production_verify_randao_invalid::>() + .test_blinded_block_production_verify_randao_invalid() .await; } diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 627c871c4..9a6ad19ac 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -563,10 +563,10 @@ impl std::fmt::Display for RPCResponse { write!(f, "BlocksByRoot: Block slot: {}", block.slot()) } RPCResponse::BlobsByRange(blob) => { - write!(f, "BlobsByRange: Blob slot: {}", blob.slot) + write!(f, "BlobsByRange: Blob slot: {}", blob.slot()) } RPCResponse::BlobsByRoot(sidecar) => { - write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot) + write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 9efe44f75..60fe37482 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -9,19 +9,20 @@ use std::boxed::Box; use std::io::{Error, ErrorKind}; use std::sync::Arc; use types::{ - Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockMerge, SignedBlobSidecar, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobSidecar, EthSpec, ForkContext, ForkName, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockMerge, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. BeaconBlock(Arc>), - /// Gossipsub message providing notification of a [`SignedBlobSidecar`] along with the subnet id where it was received. - BlobSidecar(Box<(u64, SignedBlobSidecar)>), + /// Gossipsub message providing notification of a [`BlobSidecar`] along with the subnet id where it was received. + BlobSidecar(Box<(u64, Arc>)>), /// Gossipsub message providing notification of a Aggregate attestation and associated proof. AggregateAndProofAttestation(Box>), /// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id. @@ -204,8 +205,10 @@ impl PubsubMessage { GossipKind::BlobSidecar(blob_index) => { match fork_context.from_context_bytes(gossip_topic.fork_digest) { Some(ForkName::Deneb) => { - let blob_sidecar = SignedBlobSidecar::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?; + let blob_sidecar = Arc::new( + BlobSidecar::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ); Ok(PubsubMessage::BlobSidecar(Box::new(( *blob_index, blob_sidecar, @@ -318,7 +321,8 @@ impl std::fmt::Display for PubsubMessage { PubsubMessage::BlobSidecar(data) => write!( f, "BlobSidecar: slot: {}, blob index: {}", - data.1.message.slot, data.1.message.index, + data.1.slot(), + data.1.index, ), PubsubMessage::AggregateAndProofAttestation(att) => write!( f, diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 82daf74ef..5d98039a8 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -33,10 +33,11 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::HotColdDBError; use tokio::sync::mpsc; use types::{ - Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate, - LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, - SignedBlobSidecar, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, - Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId, + Attestation, AttesterSlashing, BlobSidecar, EthSpec, Hash256, IndexedAttestation, + LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing, + SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage, + SyncSubnetId, }; use beacon_processor::{ @@ -607,20 +608,20 @@ impl NetworkBeaconProcessor { peer_id: PeerId, _peer_client: Client, blob_index: u64, - signed_blob: SignedBlobSidecar, + blob_sidecar: Arc>, seen_duration: Duration, ) { - let slot = signed_blob.message.slot; - let root = signed_blob.message.block_root; - let index = signed_blob.message.index; - let commitment = signed_blob.message.kzg_commitment; + let slot = blob_sidecar.slot(); + let root = blob_sidecar.block_root(); + let index = blob_sidecar.index; + let commitment = blob_sidecar.kzg_commitment; let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock); // Log metrics to track delay from other nodes on the network. metrics::observe_duration(&metrics::BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME, delay); metrics::set_gauge(&metrics::BEACON_BLOB_LAST_DELAY, delay.as_millis() as i64); match self .chain - .verify_blob_sidecar_for_gossip(signed_blob, blob_index) + .verify_blob_sidecar_for_gossip(blob_sidecar, blob_index) { Ok(gossip_verified_blob) => { metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL); @@ -631,7 +632,7 @@ impl NetworkBeaconProcessor { self.log, "Gossip blob arrived late"; "block_root" => ?gossip_verified_blob.block_root(), - "proposer_index" => gossip_verified_blob.proposer_index(), + "proposer_index" => gossip_verified_blob.block_proposer_index(), "slot" => gossip_verified_blob.slot(), "delay" => ?delay, "commitment" => %gossip_verified_blob.kzg_commitment(), @@ -670,17 +671,30 @@ impl NetworkBeaconProcessor { self.log, "Unknown parent hash for blob"; "action" => "requesting parent", - "block_root" => %blob.block_root, - "parent_root" => %blob.block_parent_root, + "block_root" => %blob.block_root(), + "parent_root" => %blob.block_parent_root(), "commitment" => %commitment, ); self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob)); } - GossipBlobError::ProposerSignatureInvalid + GossipBlobError::KzgNotInitialized + | GossipBlobError::PubkeyCacheTimeout + | GossipBlobError::BeaconChainError(_) => { + crit!( + self.log, + "Internal error when verifying blob sidecar"; + "error" => ?err, + ) + } + GossipBlobError::ProposalSignatureInvalid | GossipBlobError::UnknownValidator(_) | GossipBlobError::ProposerIndexMismatch { .. } | GossipBlobError::BlobIsNotLaterThanParent { .. } - | GossipBlobError::InvalidSubnet { .. } => { + | GossipBlobError::InvalidSubnet { .. } + | GossipBlobError::InvalidInclusionProof + | GossipBlobError::KzgError(_) + | GossipBlobError::InclusionProof(_) + | GossipBlobError::NotFinalizedDescendant { .. } => { warn!( self.log, "Could not verify blob sidecar for gossip. Rejecting the blob sidecar"; @@ -703,7 +717,6 @@ impl NetworkBeaconProcessor { ); } GossipBlobError::FutureSlot { .. } - | GossipBlobError::BeaconChainError(_) | GossipBlobError::RepeatBlob { .. } | GossipBlobError::PastFinalizedSlot { .. } => { warn!( diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 8094d4677..2356a197c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -212,7 +212,7 @@ impl NetworkBeaconProcessor { peer_id: PeerId, peer_client: Client, blob_index: u64, - blob: SignedBlobSidecar, + blob_sidecar: Arc>, seen_timestamp: Duration, ) -> Result<(), Error> { let processor = self.clone(); @@ -223,7 +223,7 @@ impl NetworkBeaconProcessor { peer_id, peer_client, blob_index, - blob, + blob_sidecar, seen_timestamp, ) .await @@ -231,7 +231,7 @@ impl NetworkBeaconProcessor { self.try_send(BeaconWorkEvent { drop_during_sync: false, - work: Work::GossipSignedBlobSidecar(Box::pin(process_fn)), + work: Work::GossipBlobSidecar(Box::pin(process_fn)), }) } diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index d76ce5aad..acfa069d3 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -292,7 +292,7 @@ impl NetworkBeaconProcessor { ) { let Some(slot) = blobs .iter() - .find_map(|blob| blob.as_ref().map(|blob| blob.slot)) + .find_map(|blob| blob.as_ref().map(|blob| blob.slot())) else { return; }; diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 0945aa743..503d2f126 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -33,8 +33,8 @@ use std::time::Duration; use tokio::sync::mpsc; use types::blob_sidecar::FixedBlobSidecarList; use types::{ - Attestation, AttesterSlashing, Epoch, Hash256, MainnetEthSpec, ProposerSlashing, - SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecarList, SignedVoluntaryExit, Slot, + Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec, + ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot, SubnetId, }; @@ -55,7 +55,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10); struct TestRig { chain: Arc>, next_block: Arc>, - next_blobs: Option>, + next_blobs: Option>, attestations: Vec<(Attestation, SubnetId)>, next_block_attestations: Vec<(Attestation, SubnetId)>, next_block_aggregate_attestations: Vec>, @@ -186,8 +186,10 @@ impl TestRig { let log = harness.logger().clone(); - let mut beacon_processor_config = BeaconProcessorConfig::default(); - beacon_processor_config.enable_backfill_rate_limiting = enable_backfill_rate_limiting; + let beacon_processor_config = BeaconProcessorConfig { + enable_backfill_rate_limiting, + ..Default::default() + }; let BeaconProcessorChannels { beacon_processor_tx, beacon_processor_rx, @@ -243,12 +245,17 @@ impl TestRig { chain.spec.maximum_gossip_clock_disparity(), ); - assert!(!beacon_processor.is_err()); - + assert!(beacon_processor.is_ok()); + let block = next_block_tuple.0; + let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { + Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap()) + } else { + None + }; Self { chain, - next_block: Arc::new(next_block_tuple.0), - next_blobs: next_block_tuple.1, + next_block: Arc::new(block), + next_blobs: blob_sidecars, attestations, next_block_attestations, next_block_aggregate_attestations, @@ -293,7 +300,7 @@ impl TestRig { junk_message_id(), junk_peer_id(), Client::default(), - blob.message.index, + blob.index, blob.clone(), Duration::from_secs(0), ) @@ -306,7 +313,7 @@ impl TestRig { self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()), + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::ParentLookup { chain_hash: Hash256::random(), @@ -320,7 +327,7 @@ impl TestRig { self.network_beacon_processor .send_rpc_beacon_block( block_root, - RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()), + RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()), std::time::Duration::default(), BlockProcessType::SingleBlock { id: 1 }, ) @@ -328,12 +335,7 @@ impl TestRig { } pub fn enqueue_single_lookup_rpc_blobs(&self) { if let Some(blobs) = self.next_blobs.clone() { - let blobs = FixedBlobSidecarList::from( - blobs - .into_iter() - .map(|b| Some(b.message)) - .collect::>(), - ); + let blobs = FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()); self.network_beacon_processor .send_rpc_blobs( self.next_block.canonical_root(), diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index 4df940a3b..5d3dde90c 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -302,14 +302,14 @@ impl Router { ), ), PubsubMessage::BlobSidecar(data) => { - let (blob_index, signed_blob) = *data; + let (blob_index, blob_sidecar) = *data; self.handle_beacon_processor_send_result( self.network_beacon_processor.send_gossip_blob_sidecar( message_id, peer_id, self.network_globals.client(&peer_id), blob_index, - signed_blob, + blob_sidecar, timestamp_now(), ), ) diff --git a/beacon_node/network/src/sync/block_lookups/common.rs b/beacon_node/network/src/sync/block_lookups/common.rs index 7f141edb5..e089ef4fe 100644 --- a/beacon_node/network/src/sync/block_lookups/common.rs +++ b/beacon_node/network/src/sync/block_lookups/common.rs @@ -428,7 +428,7 @@ impl RequestState for BlobRequestState (SignedBeaconBlock, Vec>) { let (mut block, mut blobs) = self.rand_block_and_blobs(fork_name, num_blobs); *block.message_mut().parent_root_mut() = parent_root; - let block_root = block.canonical_root(); blobs.iter_mut().for_each(|blob| { - blob.block_parent_root = parent_root; - blob.block_root = block_root; + blob.signed_block_header = block.signed_block_header(); }); (block, blobs) } @@ -1293,7 +1291,7 @@ mod deneb_only { let child_blob = blobs.first().cloned().unwrap(); let parent_root = block_root; - let child_root = child_blob.block_root; + let child_root = child_blob.block_root(); block_root = child_root; let mut blobs = FixedBlobSidecarList::default(); diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 819ea8e30..f9ed45fcd 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -46,7 +46,7 @@ impl BlocksAndBlobsRequestInfo { while { let pair_next_blob = blob_iter .peek() - .map(|sidecar| sidecar.slot == block.slot()) + .map(|sidecar| sidecar.slot() == block.slot()) .unwrap_or(false); pair_next_blob } { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c1e9cde3f..3bd32308a 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -637,9 +637,9 @@ impl SyncManager { ); } SyncMessage::UnknownParentBlob(peer_id, blob) => { - let blob_slot = blob.slot; - let block_root = blob.block_root; - let parent_root = blob.block_parent_root; + let blob_slot = blob.slot(); + let block_root = blob.block_root(); + let parent_root = blob.block_parent_root(); let blob_index = blob.index; if blob_index >= T::EthSpec::max_blobs_per_block() as u64 { warn!(self.log, "Peer sent blob with invalid index"; "index" => blob_index, "peer_id" => %peer_id); diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index da5c1a5a1..50f180744 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -769,9 +769,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks>( + pub async fn post_beacon_blocks( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -789,9 +789,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blocks_ssz>( + pub async fn post_beacon_blocks_ssz( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -813,9 +813,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks>( + pub async fn post_beacon_blinded_blocks( &self, - block: &SignedBlockContents, + block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -833,9 +833,9 @@ impl BeaconNodeHttpClient { /// `POST beacon/blinded_blocks` /// /// Returns `Ok(None)` on a 404 error. - pub async fn post_beacon_blinded_blocks_ssz>( + pub async fn post_beacon_blinded_blocks_ssz( &self, - block: &SignedBlockContents, + block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { let mut path = self.eth_path(V1)?; @@ -887,9 +887,9 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blocks` - pub async fn post_beacon_blocks_v2>( + pub async fn post_beacon_blocks_v2( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( @@ -904,9 +904,9 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blocks` - pub async fn post_beacon_blocks_v2_ssz>( + pub async fn post_beacon_blocks_v2_ssz( &self, - block_contents: &SignedBlockContents, + block_contents: &PublishBlockRequest, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( @@ -921,16 +921,16 @@ impl BeaconNodeHttpClient { } /// `POST v2/beacon/blinded_blocks` - pub async fn post_beacon_blinded_blocks_v2>( + pub async fn post_beacon_blinded_blocks_v2( &self, - block_contents: &SignedBlockContents, + signed_block: &SignedBlindedBeaconBlock, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block_contents, + signed_block, Some(self.timeouts.proposal), - block_contents.signed_block().message().body().fork_name(), + signed_block.message().body().fork_name(), ) .await?; @@ -940,14 +940,14 @@ impl BeaconNodeHttpClient { /// `POST v2/beacon/blinded_blocks` pub async fn post_beacon_blinded_blocks_v2_ssz( &self, - block_contents: &SignedBlindedBlockContents, + signed_block: &SignedBlindedBeaconBlock, validation_level: Option, ) -> Result<(), Error> { self.post_generic_with_consensus_version_and_ssz_body( self.post_beacon_blinded_blocks_v2_path(validation_level)?, - block_contents.as_ssz_bytes(), + signed_block.as_ssz_bytes(), Some(self.timeouts.proposal), - block_contents.signed_block().message().body().fork_name(), + signed_block.message().body().fork_name(), ) .await?; @@ -1700,38 +1700,33 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks>( + pub async fn get_validator_blocks( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blocks_modular(slot, randao_reveal, graffiti, SkipRandaoVerification::No) .await } /// `GET v2/validator/blocks/{slot}` - pub async fn get_validator_blocks_modular>( + pub async fn get_validator_blocks_modular( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result>, Error> { let path = self - .get_validator_blocks_path::( - slot, - randao_reveal, - graffiti, - skip_randao_verification, - ) + .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; self.get(path).await } /// returns `GET v2/validator/blocks/{slot}` URL path - pub async fn get_validator_blocks_path>( + pub async fn get_validator_blocks_path( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1837,12 +1832,12 @@ impl BeaconNodeHttpClient { if is_blinded_payload { let blinded_payload = response - .json::>>>() + .json::>>() .await?; Ok(ForkVersionedBeaconBlockType::Blinded(blinded_payload)) } else { let full_payload = response - .json::>>>() + .json::>>() .await?; Ok(ForkVersionedBeaconBlockType::Full(full_payload)) } @@ -1901,13 +1896,13 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` in ssz format - pub async fn get_validator_blocks_ssz>( + pub async fn get_validator_blocks_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, ) -> Result>, Error> { - self.get_validator_blocks_modular_ssz::( + self.get_validator_blocks_modular_ssz::( slot, randao_reveal, graffiti, @@ -1917,7 +1912,7 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blocks/{slot}` in ssz format - pub async fn get_validator_blocks_modular_ssz>( + pub async fn get_validator_blocks_modular_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1925,12 +1920,7 @@ impl BeaconNodeHttpClient { skip_randao_verification: SkipRandaoVerification, ) -> Result>, Error> { let path = self - .get_validator_blocks_path::( - slot, - randao_reveal, - graffiti, - skip_randao_verification, - ) + .get_validator_blocks_path::(slot, randao_reveal, graffiti, skip_randao_verification) .await?; self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz) @@ -1938,12 +1928,12 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks>( + pub async fn get_validator_blinded_blocks( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, - ) -> Result>, Error> { + ) -> Result>, Error> { self.get_validator_blinded_blocks_modular( slot, randao_reveal, @@ -1954,7 +1944,7 @@ impl BeaconNodeHttpClient { } /// returns `GET v1/validator/blinded_blocks/{slot}` URL path - pub async fn get_validator_blinded_blocks_path>( + pub async fn get_validator_blinded_blocks_path( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -1986,18 +1976,15 @@ impl BeaconNodeHttpClient { } /// `GET v1/validator/blinded_blocks/{slot}` - pub async fn get_validator_blinded_blocks_modular< - T: EthSpec, - Payload: AbstractExecPayload, - >( + pub async fn get_validator_blinded_blocks_modular( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result>, Error> { + ) -> Result>, Error> { let path = self - .get_validator_blinded_blocks_path::( + .get_validator_blinded_blocks_path::( slot, randao_reveal, graffiti, @@ -2009,13 +1996,13 @@ impl BeaconNodeHttpClient { } /// `GET v2/validator/blinded_blocks/{slot}` in ssz format - pub async fn get_validator_blinded_blocks_ssz>( + pub async fn get_validator_blinded_blocks_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, ) -> Result>, Error> { - self.get_validator_blinded_blocks_modular_ssz::( + self.get_validator_blinded_blocks_modular_ssz::( slot, randao_reveal, graffiti, @@ -2024,10 +2011,7 @@ impl BeaconNodeHttpClient { .await } - pub async fn get_validator_blinded_blocks_modular_ssz< - T: EthSpec, - Payload: AbstractExecPayload, - >( + pub async fn get_validator_blinded_blocks_modular_ssz( &self, slot: Slot, randao_reveal: &SignatureBytes, @@ -2035,7 +2019,7 @@ impl BeaconNodeHttpClient { skip_randao_verification: SkipRandaoVerification, ) -> Result>, Error> { let path = self - .get_validator_blinded_blocks_path::( + .get_validator_blinded_blocks_path::( slot, randao_reveal, graffiti, diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index dea8b2bf5..7007138d8 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -12,9 +12,7 @@ use std::convert::TryFrom; use std::fmt::{self, Display}; use std::str::{from_utf8, FromStr}; use std::time::Duration; -use tree_hash::TreeHash; use types::beacon_block_body::KzgCommitments; -use types::builder_bid::BlindedBlobsBundle; pub use types::*; #[cfg(feature = "lighthouse")] @@ -901,9 +899,9 @@ pub struct SseBlobSidecar { impl SseBlobSidecar { pub fn from_blob_sidecar(blob_sidecar: &BlobSidecar) -> SseBlobSidecar { SseBlobSidecar { - block_root: blob_sidecar.block_root, + block_root: blob_sidecar.block_root(), index: blob_sidecar.index, - slot: blob_sidecar.slot, + slot: blob_sidecar.slot(), kzg_commitment: blob_sidecar.kzg_commitment, versioned_hash: blob_sidecar.kzg_commitment.calculate_versioned_hash(), } @@ -1411,15 +1409,14 @@ pub mod serde_status_code { } pub enum ForkVersionedBeaconBlockType { - Full(ForkVersionedResponse>>), - Blinded(ForkVersionedResponse>>), + Full(ForkVersionedResponse>), + Blinded(ForkVersionedResponse>), } #[cfg(test)] mod tests { use super::*; use ssz::Encode; - use std::sync::Arc; #[test] fn query_vec() { @@ -1460,17 +1457,17 @@ mod tests { type E = MainnetEthSpec; let spec = ForkName::Capella.make_genesis_spec(E::default_spec()); - let block: SignedBlockContents> = SignedBeaconBlock::from_block( + let block: PublishBlockRequest = SignedBeaconBlock::from_block( BeaconBlock::::Capella(BeaconBlockCapella::empty(&spec)), Signature::empty(), ) .try_into() .expect("should convert into signed block contents"); - let decoded: SignedBlockContents = - SignedBlockContents::from_ssz_bytes(&block.as_ssz_bytes(), &spec) + let decoded: PublishBlockRequest = + PublishBlockRequest::from_ssz_bytes(&block.as_ssz_bytes(), &spec) .expect("should decode Block"); - assert!(matches!(decoded, SignedBlockContents::Block(_))); + assert!(matches!(decoded, PublishBlockRequest::Block(_))); } #[test] @@ -1482,87 +1479,49 @@ mod tests { BeaconBlock::::Deneb(BeaconBlockDeneb::empty(&spec)), Signature::empty(), ); - let blobs = SignedSidecarList::from(vec![SignedSidecar { - message: Arc::new(BlobSidecar::empty()), - signature: Signature::empty(), - _phantom: Default::default(), - }]); - let signed_block_contents = SignedBlockContents::new(block, Some(blobs)); + let blobs = BlobsList::::from(vec![Blob::::default()]); + let kzg_proofs = KzgProofs::::from(vec![KzgProof::empty()]); + let signed_block_contents = PublishBlockRequest::new(block, Some((kzg_proofs, blobs))); - let decoded: SignedBlockContents> = - SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) + let decoded: PublishBlockRequest = + PublishBlockRequest::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) .expect("should decode BlockAndBlobSidecars"); - assert!(matches!( - decoded, - SignedBlockContents::BlockAndBlobSidecars(_) - )); - } - - #[test] - fn ssz_signed_blinded_block_contents_with_blobs() { - type E = MainnetEthSpec; - let mut spec = E::default_spec(); - spec.altair_fork_epoch = Some(Epoch::new(0)); - spec.bellatrix_fork_epoch = Some(Epoch::new(0)); - spec.capella_fork_epoch = Some(Epoch::new(0)); - spec.deneb_fork_epoch = Some(Epoch::new(0)); - - let blinded_block = SignedBeaconBlock::from_block( - BeaconBlock::>::Deneb(BeaconBlockDeneb::empty(&spec)), - Signature::empty(), - ); - let blinded_blobs = SignedSidecarList::from(vec![SignedSidecar { - message: Arc::new(BlindedBlobSidecar::empty()), - signature: Signature::empty(), - _phantom: Default::default(), - }]); - let signed_block_contents = SignedBlockContents::new(blinded_block, Some(blinded_blobs)); - - let decoded: SignedBlockContents> = - SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec) - .expect("should decode BlindedBlockAndBlobSidecars"); - assert!(matches!( - decoded, - SignedBlockContents::BlindedBlockAndBlobSidecars(_) - )); + assert!(matches!(decoded, PublishBlockRequest::BlockContents(_))); } } -/// A wrapper over a [`BeaconBlock`] or a [`BeaconBlockAndBlobSidecars`]. +#[derive(Debug, Encode, Serialize, Deserialize)] +#[serde(untagged)] +#[serde(bound = "E: EthSpec")] +#[ssz(enum_behaviour = "transparent")] +pub enum ProduceBlockV3Response { + Full(FullBlockContents), + Blinded(BlindedBeaconBlock), +} + +/// A wrapper over a [`BeaconBlock`] or a [`BlockContents`]. #[derive(Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[ssz(enum_behaviour = "transparent")] -pub enum BlockContents> { - BlockAndBlobSidecars(BeaconBlockAndBlobSidecars), - BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars), - Block(BeaconBlock), +pub enum FullBlockContents { + /// This is a full deneb variant with block and blobs. + BlockContents(BlockContents), + /// This variant is for all pre-deneb full blocks. + Block(BeaconBlock), } -pub type BlockContentsTuple = ( - BeaconBlock, - Option>::Sidecar>>, -); +pub type BlockContentsTuple = (BeaconBlock, Option<(KzgProofs, BlobsList)>); -impl> BlockContents { - pub fn new( - block: BeaconBlock, - blobs: Option>, - ) -> Self { - match (Payload::block_type(), blobs) { - (BlockType::Full, Some(blobs)) => { - Self::BlockAndBlobSidecars(BeaconBlockAndBlobSidecars { - block, - blob_sidecars: blobs, - }) - } - (BlockType::Blinded, Some(blobs)) => { - Self::BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars { - blinded_block: block, - blinded_blob_sidecars: blobs, - }) - } - (_, None) => Self::Block(block), +impl FullBlockContents { + pub fn new(block: BeaconBlock, blob_data: Option<(KzgProofs, BlobsList)>) -> Self { + match blob_data { + Some((kzg_proofs, blobs)) => Self::BlockContents(BlockContents { + block, + kzg_proofs, + blobs, + }), + None => Self::Block(block), } } @@ -1581,43 +1540,41 @@ impl> BlockContents { match fork_at_slot { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - BeaconBlock::from_ssz_bytes(bytes, spec).map(|block| BlockContents::Block(block)) + BeaconBlock::from_ssz_bytes(bytes, spec) + .map(|block| FullBlockContents::Block(block)) } ForkName::Deneb => { let mut builder = ssz::SszDecoderBuilder::new(bytes); + builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; + builder.register_type::>()?; + builder.register_type::>()?; let mut decoder = builder.build()?; let block = decoder.decode_next_with(|bytes| BeaconBlock::from_ssz_bytes(bytes, spec))?; + let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; - Ok(BlockContents::new(block, Some(blobs))) + + Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs)))) } } } - pub fn block(&self) -> &BeaconBlock { + pub fn block(&self) -> &BeaconBlock { match self { - BlockContents::BlockAndBlobSidecars(block_and_sidecars) => &block_and_sidecars.block, - BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - &block_and_sidecars.blinded_block - } - BlockContents::Block(block) => block, + FullBlockContents::BlockContents(block_and_sidecars) => &block_and_sidecars.block, + FullBlockContents::Block(block) => block, } } - pub fn deconstruct(self) -> BlockContentsTuple { + pub fn deconstruct(self) -> BlockContentsTuple { match self { - BlockContents::BlockAndBlobSidecars(block_and_sidecars) => ( + FullBlockContents::BlockContents(block_and_sidecars) => ( block_and_sidecars.block, - Some(block_and_sidecars.blob_sidecars), + Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)), ), - BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => ( - block_and_sidecars.blinded_block, - Some(block_and_sidecars.blinded_blob_sidecars), - ), - BlockContents::Block(block) => (block, None), + FullBlockContents::Block(block) => (block, None), } } @@ -1628,104 +1585,64 @@ impl> BlockContents { fork: &Fork, genesis_validators_root: Hash256, spec: &ChainSpec, - ) -> SignedBlockContents { + ) -> PublishBlockRequest { let (block, maybe_blobs) = self.deconstruct(); let signed_block = block.sign(secret_key, fork, genesis_validators_root, spec); - let signed_blobs = maybe_blobs.map(|blobs| { - blobs - .into_iter() - .map(|blob| blob.sign(secret_key, fork, genesis_validators_root, spec)) - .collect::>() - .into() - }); - SignedBlockContents::new(signed_block, signed_blobs) + PublishBlockRequest::new(signed_block, maybe_blobs) } } -impl> ForkVersionDeserialize - for BlockContents -{ +impl ForkVersionDeserialize for FullBlockContents { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, fork_name: ForkName, ) -> Result { match fork_name { ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Ok(BlockContents::Block(BeaconBlock::deserialize_by_fork::< - 'de, - D, - >(value, fork_name)?)) - } - ForkName::Deneb => { - let block_contents = match Payload::block_type() { - BlockType::Blinded => BlockContents::BlindedBlockAndBlobSidecars( - BlindedBeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>( - value, fork_name, - )?, - ), - BlockType::Full => BlockContents::BlockAndBlobSidecars( - BeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>( - value, fork_name, - )?, - ), - }; - Ok(block_contents) + Ok(FullBlockContents::Block( + BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?, + )) } + ForkName::Deneb => Ok(FullBlockContents::BlockContents( + BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?, + )), } } } -impl> Into> - for BlockContents -{ - fn into(self) -> BeaconBlock { +impl Into> for FullBlockContents { + fn into(self) -> BeaconBlock { match self { - Self::BlockAndBlobSidecars(block_and_sidecars) => block_and_sidecars.block, - Self::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - block_and_sidecars.blinded_block - } + Self::BlockContents(block_and_sidecars) => block_and_sidecars.block, Self::Block(block) => block, } } } -pub type SignedBlockContentsTuple = ( - SignedBeaconBlock, - Option>::Sidecar>>, -); +pub type SignedBlockContentsTuple = (SignedBeaconBlock, Option<(KzgProofs, BlobsList)>); -pub type SignedBlindedBlockContents = SignedBlockContents>; - -/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`]. +/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`]. #[derive(Clone, Debug, Encode, Serialize, Deserialize)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] #[ssz(enum_behaviour = "transparent")] -pub enum SignedBlockContents = FullPayload> { - BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars), - BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars), - Block(SignedBeaconBlock), +pub enum PublishBlockRequest { + BlockContents(SignedBlockContents), + Block(SignedBeaconBlock), } -impl> SignedBlockContents { +impl PublishBlockRequest { pub fn new( - block: SignedBeaconBlock, - blobs: Option>, + block: SignedBeaconBlock, + blob_items: Option<(KzgProofs, BlobsList)>, ) -> Self { - match (Payload::block_type(), blobs) { - (BlockType::Full, Some(blobs)) => { - Self::BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars { - signed_block: block, - signed_blob_sidecars: blobs, - }) - } - (BlockType::Blinded, Some(blobs)) => { - Self::BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars { - signed_blinded_block: block, - signed_blinded_blob_sidecars: blobs, - }) - } - (_, None) => Self::Block(block), + match blob_items { + Some((kzg_proofs, blobs)) => Self::BlockContents(SignedBlockContents { + signed_block: block, + kzg_proofs, + blobs, + }), + None => Self::Block(block), } } @@ -1745,133 +1662,88 @@ impl> SignedBlockContents { SignedBeaconBlock::from_ssz_bytes(bytes, spec) - .map(|block| SignedBlockContents::Block(block)) + .map(|block| PublishBlockRequest::Block(block)) } ForkName::Deneb => { let mut builder = ssz::SszDecoderBuilder::new(bytes); builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; + builder.register_type::>()?; + builder.register_type::>()?; let mut decoder = builder.build()?; let block = decoder .decode_next_with(|bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec))?; + let kzg_proofs = decoder.decode_next()?; let blobs = decoder.decode_next()?; - Ok(SignedBlockContents::new(block, Some(blobs))) + Ok(PublishBlockRequest::new(block, Some((kzg_proofs, blobs)))) } } } - pub fn signed_block(&self) -> &SignedBeaconBlock { + pub fn signed_block(&self) -> &SignedBeaconBlock { match self { - SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { + PublishBlockRequest::BlockContents(block_and_sidecars) => { &block_and_sidecars.signed_block } - SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - &block_and_sidecars.signed_blinded_block - } - SignedBlockContents::Block(block) => block, + PublishBlockRequest::Block(block) => block, } } - pub fn blobs_cloned(&self) -> Option> { + pub fn deconstruct(self) -> SignedBlockContentsTuple { match self { - SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { - Some(block_and_sidecars.signed_blob_sidecars.clone()) - } - SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => { - Some(block_and_sidecars.signed_blinded_blob_sidecars.clone()) - } - SignedBlockContents::Block(_block) => None, - } - } - - pub fn deconstruct(self) -> SignedBlockContentsTuple { - match self { - SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => ( + PublishBlockRequest::BlockContents(block_and_sidecars) => ( block_and_sidecars.signed_block, - Some(block_and_sidecars.signed_blob_sidecars), + Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)), ), - SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => ( - block_and_sidecars.signed_blinded_block, - Some(block_and_sidecars.signed_blinded_blob_sidecars), - ), - SignedBlockContents::Block(block) => (block, None), + PublishBlockRequest::Block(block) => (block, None), } } } -impl SignedBlockContents> { - pub fn try_into_full_block_and_blobs( - self, - maybe_full_payload_contents: Option>, - ) -> Result>, String> { - match self { - SignedBlockContents::BlindedBlockAndBlobSidecars(blinded_block_and_blob_sidecars) => { - match maybe_full_payload_contents { - None | Some(FullPayloadContents::Payload(_)) => { - Err("Can't build full block contents without payload and blobs".to_string()) - } - Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { - let signed_block = blinded_block_and_blob_sidecars - .signed_blinded_block - .try_into_full_block(Some(payload_and_blobs.execution_payload)) - .ok_or("Failed to build full block with payload".to_string())?; - let signed_blob_sidecars: SignedBlobSidecarList = - blinded_block_and_blob_sidecars - .signed_blinded_blob_sidecars - .into_iter() - .zip(payload_and_blobs.blobs_bundle.blobs) - .map(|(blinded_blob_sidecar, blob)| { - blinded_blob_sidecar.into_full_blob_sidecars(blob) - }) - .collect::>() - .into(); +/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`. +pub fn into_full_block_and_blobs( + blinded_block: SignedBlindedBeaconBlock, + maybe_full_payload_contents: Option>, +) -> Result, String> { + match maybe_full_payload_contents { + None => { + let signed_block = blinded_block + .try_into_full_block(None) + .ok_or("Failed to build full block with payload".to_string())?; + Ok(PublishBlockRequest::new(signed_block, None)) + } + // This variant implies a pre-deneb block + Some(FullPayloadContents::Payload(execution_payload)) => { + let signed_block = blinded_block + .try_into_full_block(Some(execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; + Ok(PublishBlockRequest::new(signed_block, None)) + } + // This variant implies a post-deneb block + Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => { + let signed_block = blinded_block + .try_into_full_block(Some(payload_and_blobs.execution_payload)) + .ok_or("Failed to build full block with payload".to_string())?; - Ok(SignedBlockContents::new( - signed_block, - Some(signed_blob_sidecars), - )) - } - } - } - SignedBlockContents::Block(blinded_block) => { - let full_payload_opt = maybe_full_payload_contents.map(|o| o.deconstruct().0); - blinded_block - .try_into_full_block(full_payload_opt) - .map(SignedBlockContents::Block) - .ok_or("Can't build full block without payload".to_string()) - } - SignedBlockContents::BlockAndBlobSidecars(_) => Err( - "BlockAndBlobSidecars variant not expected when constructing full block" - .to_string(), - ), + Ok(PublishBlockRequest::new( + signed_block, + Some(( + payload_and_blobs.blobs_bundle.proofs, + payload_and_blobs.blobs_bundle.blobs, + )), + )) } } } -impl SignedBlockContents { - pub fn clone_as_blinded(&self) -> SignedBlindedBlockContents { - let blinded_blobs = self.blobs_cloned().map(|blob_sidecars| { - blob_sidecars - .into_iter() - .map(|blob| blob.into()) - .collect::>() - .into() - }); - SignedBlockContents::new(self.signed_block().clone_as_blinded(), blinded_blobs) - } -} - -impl> TryFrom> - for SignedBlockContents -{ +impl TryFrom> for PublishBlockRequest { type Error = &'static str; - fn try_from(block: SignedBeaconBlock) -> Result { + fn try_from(block: SignedBeaconBlock) -> Result { match block { SignedBeaconBlock::Base(_) | SignedBeaconBlock::Altair(_) | SignedBeaconBlock::Merge(_) - | SignedBeaconBlock::Capella(_) => Ok(SignedBlockContents::Block(block)), + | SignedBeaconBlock::Capella(_) => Ok(PublishBlockRequest::Block(block)), SignedBeaconBlock::Deneb(_) => { Err("deneb block contents cannot be fully constructed from just the signed block") } @@ -1879,93 +1751,49 @@ impl> TryFrom> From> - for SignedBlockContents -{ - fn from(block_contents_tuple: SignedBlockContentsTuple) -> Self { - SignedBlockContents::new(block_contents_tuple.0, block_contents_tuple.1) +impl From> for PublishBlockRequest { + fn from(block_contents_tuple: SignedBlockContentsTuple) -> Self { + PublishBlockRequest::new(block_contents_tuple.0, block_contents_tuple.1) } } #[derive(Debug, Clone, Serialize, Deserialize, Encode)] #[serde(bound = "T: EthSpec")] -pub struct SignedBeaconBlockAndBlobSidecars> { - pub signed_block: SignedBeaconBlock, - pub signed_blob_sidecars: SignedSidecarList, +pub struct SignedBlockContents { + pub signed_block: SignedBeaconBlock, + pub kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, } #[derive(Debug, Clone, Serialize, Deserialize, Encode)] -#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] -pub struct BeaconBlockAndBlobSidecars> { - pub block: BeaconBlock, - pub blob_sidecars: SidecarList, +#[serde(bound = "T: EthSpec")] +pub struct BlockContents { + pub block: BeaconBlock, + pub kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + pub blobs: BlobsList, } -impl> ForkVersionDeserialize - for BeaconBlockAndBlobSidecars -{ +impl ForkVersionDeserialize for BlockContents { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, fork_name: ForkName, ) -> Result { #[derive(Deserialize)] - #[serde(bound = "T: EthSpec, S: Sidecar")] - struct Helper> { + #[serde(bound = "T: EthSpec")] + struct Helper { block: serde_json::Value, - blob_sidecars: SidecarList, + kzg_proofs: KzgProofs, + #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] + blobs: BlobsList, } - let helper: Helper = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; + let helper: Helper = serde_json::from_value(value).map_err(serde::de::Error::custom)?; Ok(Self { block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?, - blob_sidecars: helper.blob_sidecars, - }) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize, Encode)] -#[serde(bound = "T: EthSpec")] -pub struct SignedBlindedBeaconBlockAndBlobSidecars< - T: EthSpec, - Payload: AbstractExecPayload = BlindedPayload, -> { - pub signed_blinded_block: SignedBeaconBlock, - pub signed_blinded_blob_sidecars: SignedSidecarList, -} - -#[derive(Debug, Clone, Serialize, Deserialize, Encode)] -#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload")] -pub struct BlindedBeaconBlockAndBlobSidecars< - T: EthSpec, - Payload: AbstractExecPayload = BlindedPayload, -> { - pub blinded_block: BeaconBlock, - pub blinded_blob_sidecars: SidecarList, -} - -impl> ForkVersionDeserialize - for BlindedBeaconBlockAndBlobSidecars -{ - fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( - value: serde_json::value::Value, - fork_name: ForkName, - ) -> Result { - #[derive(Deserialize)] - #[serde(bound = "T: EthSpec, S: Sidecar")] - struct Helper> { - blinded_block: serde_json::Value, - blinded_blob_sidecars: SidecarList, - } - let helper: Helper = - serde_json::from_value(value).map_err(serde::de::Error::custom)?; - - Ok(Self { - blinded_block: BeaconBlock::deserialize_by_fork::<'de, D>( - helper.blinded_block, - fork_name, - )?, - blinded_blob_sidecars: helper.blinded_blob_sidecars, + kzg_proofs: helper.kzg_proofs, + blobs: helper.blobs, }) } } @@ -2051,18 +1879,3 @@ pub struct BlobsBundle { #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] pub blobs: BlobsList, } - -impl Into> for BlobsBundle { - fn into(self) -> BlindedBlobsBundle { - BlindedBlobsBundle { - commitments: self.commitments, - proofs: self.proofs, - blob_roots: self - .blobs - .into_iter() - .map(|blob| blob.tree_hash_root()) - .collect::>() - .into(), - } - } -} diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index bdd74c1a2..865a5affb 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -291,7 +291,7 @@ pub enum AttestationFromBlock { } /// Parameters which are cached between calls to `ForkChoice::get_head`. -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ForkchoiceUpdateParameters { /// The most recent result of running `ForkChoice::get_head`. pub head_root: Hash256, diff --git a/consensus/fork_choice/src/lib.rs b/consensus/fork_choice/src/lib.rs index e7ca84efb..5e8cfb1ee 100644 --- a/consensus/fork_choice/src/lib.rs +++ b/consensus/fork_choice/src/lib.rs @@ -7,4 +7,6 @@ pub use crate::fork_choice::{ QueuedAttestation, ResetPayloadStatuses, }; pub use fork_choice_store::ForkChoiceStore; -pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation}; +pub use proto_array::{ + Block as ProtoBlock, ExecutionStatus, InvalidationOperation, ProposerHeadError, +}; diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index 595de86e8..2d2d2afdd 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -369,7 +369,7 @@ pub fn verify_merkle_proof( } /// Compute a root hash from a leaf and a Merkle proof. -fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { +pub fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 { assert_eq!(branch.len(), depth, "proof length should equal depth"); let mut merkle_root = leaf.as_bytes().to_vec(); diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index 6fc677073..1c41b1855 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -188,7 +188,7 @@ where } /// Information about the proposer head used for opportunistic re-orgs. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ProposerHeadInfo { /// Information about the *current* head block, which may be re-orged. pub head_node: ProtoNode, @@ -206,7 +206,7 @@ pub struct ProposerHeadInfo { /// /// This type intentionally does not implement `Debug` so that callers are forced to handle the /// enum. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum ProposerHeadError { DoNotReOrg(DoNotReOrg), Error(E), @@ -243,7 +243,7 @@ impl ProposerHeadError { /// Reasons why a re-org should not be attempted. /// /// This type intentionally does not implement `Debug` so that the `Display` impl must be used. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub enum DoNotReOrg { MissingHeadOrParentNode, MissingHeadFinalizedCheckpoint, diff --git a/consensus/types/presets/gnosis/deneb.yaml b/consensus/types/presets/gnosis/deneb.yaml index b78a95027..d2d7d0abe 100644 --- a/consensus/types/presets/gnosis/deneb.yaml +++ b/consensus/types/presets/gnosis/deneb.yaml @@ -10,3 +10,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096 MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17 diff --git a/consensus/types/presets/mainnet/deneb.yaml b/consensus/types/presets/mainnet/deneb.yaml index 23889fd18..6d2fb4abd 100644 --- a/consensus/types/presets/mainnet/deneb.yaml +++ b/consensus/types/presets/mainnet/deneb.yaml @@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096 MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17 diff --git a/consensus/types/presets/minimal/deneb.yaml b/consensus/types/presets/minimal/deneb.yaml index 3da2f80a7..be2b9fadf 100644 --- a/consensus/types/presets/minimal/deneb.yaml +++ b/consensus/types/presets/minimal/deneb.yaml @@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096 MAX_BLOB_COMMITMENTS_PER_BLOCK: 16 # `uint64(6)` MAX_BLOBS_PER_BLOCK: 6 +# [customized] `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9 diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 2f7c6891e..146dff895 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -1,12 +1,14 @@ use crate::test_utils::TestRandom; use crate::*; use derivative::Derivative; +use merkle_proof::{MerkleTree, MerkleTreeError}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use ssz_types::VariableList; use std::marker::PhantomData; use superstruct::superstruct; use test_random_derive::TestRandom; +use tree_hash::{TreeHash, BYTES_PER_CHUNK}; use tree_hash_derive::TreeHash; pub type KzgCommitments = @@ -14,6 +16,9 @@ pub type KzgCommitments = pub type KzgCommitmentOpts = FixedVector, ::MaxBlobsPerBlock>; +/// Index of the `blob_kzg_commitments` leaf in the `BeaconBlockBody` tree post-deneb. +pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; + /// The body of a `BeaconChain` block, containing operations. /// /// This *superstruct* abstracts over the hard-fork. @@ -98,6 +103,79 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), } } + + /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` + /// at `index`. + pub fn kzg_commitment_merkle_proof( + &self, + index: usize, + ) -> Result, Error> { + match self { + Self::Base(_) | Self::Altair(_) | Self::Merge(_) | Self::Capella(_) => { + Err(Error::IncorrectStateVariant) + } + Self::Deneb(body) => { + // We compute the branches by generating 2 merkle trees: + // 1. Merkle tree for the `blob_kzg_commitments` List object + // 2. Merkle tree for the `BeaconBlockBody` container + // We then merge the branches for both the trees all the way up to the root. + + // Part1 (Branches for the subtree rooted at `blob_kzg_commitments`) + // + // Branches for `blob_kzg_commitments` without length mix-in + let depth = T::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2(); + let leaves: Vec<_> = body + .blob_kzg_commitments + .iter() + .map(|commitment| commitment.tree_hash_root()) + .collect(); + let tree = MerkleTree::create(&leaves, depth as usize); + let (_, mut proof) = tree + .generate_proof(index, depth as usize) + .map_err(Error::MerkleTreeError)?; + + // Add the branch corresponding to the length mix-in. + let length = body.blob_kzg_commitments.len(); + let usize_len = std::mem::size_of::(); + let mut length_bytes = [0; BYTES_PER_CHUNK]; + length_bytes + .get_mut(0..usize_len) + .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .copy_from_slice(&length.to_le_bytes()); + let length_root = Hash256::from_slice(length_bytes.as_slice()); + proof.push(length_root); + + // Part 2 + // Branches for `BeaconBlockBody` container + let leaves = [ + body.randao_reveal.tree_hash_root(), + body.eth1_data.tree_hash_root(), + body.graffiti.tree_hash_root(), + body.proposer_slashings.tree_hash_root(), + body.attester_slashings.tree_hash_root(), + body.attestations.tree_hash_root(), + body.deposits.tree_hash_root(), + body.voluntary_exits.tree_hash_root(), + body.sync_aggregate.tree_hash_root(), + body.execution_payload.tree_hash_root(), + body.bls_to_execution_changes.tree_hash_root(), + body.blob_kzg_commitments.tree_hash_root(), + ]; + let beacon_block_body_depth = leaves.len().next_power_of_two().ilog2() as usize; + let tree = MerkleTree::create(&leaves, beacon_block_body_depth); + let (_, mut proof_body) = tree + .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) + .map_err(Error::MerkleTreeError)?; + // Join the proofs for the subtree and the main tree + proof.append(&mut proof_body); + + debug_assert_eq!(proof.len(), T::kzg_proof_inclusion_proof_depth()); + Ok(proof.into()) + } + } + } } impl<'a, T: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, T, Payload> { diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/beacon_block_header.rs index 689f1a28b..b38235931 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/beacon_block_header.rs @@ -60,6 +60,16 @@ impl BeaconBlockHeader { signature, } } + + pub fn empty() -> Self { + Self { + body_root: Default::default(), + parent_root: Default::default(), + proposer_index: Default::default(), + slot: Default::default(), + state_root: Default::default(), + } + } } #[cfg(test)] diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 8637e538d..c249d8b4d 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,11 +1,18 @@ use crate::test_utils::TestRandom; -use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; +use crate::{ + beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, + EthSpec, Hash256, SignedBeaconBlockHeader, Slot, +}; +use crate::{KzgProofs, SignedBeaconBlock}; +use bls::Signature; use derivative::Derivative; use kzg::{ Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, FIELD_ELEMENTS_PER_BLOB, }; +use merkle_proof::{merkle_root_from_branch, verify_merkle_proof, MerkleTreeError}; use rand::Rng; +use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; @@ -67,47 +74,14 @@ impl Ord for BlobIdentifier { #[arbitrary(bound = "T: EthSpec")] #[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))] pub struct BlobSidecar { - pub block_root: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub index: u64, - pub slot: Slot, - pub block_parent_root: Hash256, - #[serde(with = "serde_utils::quoted_u64")] - pub proposer_index: u64, #[serde(with = "ssz_types::serde_utils::hex_fixed_vec")] pub blob: Blob, pub kzg_commitment: KzgCommitment, pub kzg_proof: KzgProof, -} - -impl From>> for BlindedBlobSidecar { - fn from(blob_sidecar: Arc>) -> Self { - BlindedBlobSidecar { - block_root: blob_sidecar.block_root, - index: blob_sidecar.index, - slot: blob_sidecar.slot, - block_parent_root: blob_sidecar.block_parent_root, - proposer_index: blob_sidecar.proposer_index, - blob_root: blob_sidecar.blob.tree_hash_root(), - kzg_commitment: blob_sidecar.kzg_commitment, - kzg_proof: blob_sidecar.kzg_proof, - } - } -} - -impl From> for BlindedBlobSidecar { - fn from(blob_sidecar: BlobSidecar) -> Self { - BlindedBlobSidecar { - block_root: blob_sidecar.block_root, - index: blob_sidecar.index, - slot: blob_sidecar.slot, - block_parent_root: blob_sidecar.block_parent_root, - proposer_index: blob_sidecar.proposer_index, - blob_root: blob_sidecar.blob.tree_hash_root(), - kzg_commitment: blob_sidecar.kzg_commitment, - kzg_proof: blob_sidecar.kzg_proof, - } - } + pub signed_block_header: SignedBeaconBlockHeader, + pub kzg_commitment_inclusion_proof: FixedVector, } impl PartialOrd for BlobSidecar { @@ -122,29 +96,130 @@ impl Ord for BlobSidecar { } } -impl SignedRoot for BlobSidecar {} +#[derive(Debug)] +pub enum BlobSidecarError { + PreDeneb, + MissingKzgCommitment, + BeaconState(BeaconStateError), + MerkleTree(MerkleTreeError), + ArithError(ArithError), +} + +impl From for BlobSidecarError { + fn from(e: BeaconStateError) -> Self { + BlobSidecarError::BeaconState(e) + } +} + +impl From for BlobSidecarError { + fn from(e: MerkleTreeError) -> Self { + BlobSidecarError::MerkleTree(e) + } +} + +impl From for BlobSidecarError { + fn from(e: ArithError) -> Self { + BlobSidecarError::ArithError(e) + } +} impl BlobSidecar { + pub fn new( + index: usize, + blob: Blob, + signed_block: &SignedBeaconBlock, + kzg_proof: KzgProof, + ) -> Result { + let expected_kzg_commitments = signed_block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_e| BlobSidecarError::PreDeneb)?; + let kzg_commitment = *expected_kzg_commitments + .get(index) + .ok_or(BlobSidecarError::MissingKzgCommitment)?; + let kzg_commitment_inclusion_proof = signed_block + .message() + .body() + .kzg_commitment_merkle_proof(index)?; + + Ok(Self { + index: index as u64, + blob, + kzg_commitment, + kzg_proof, + signed_block_header: signed_block.signed_block_header(), + kzg_commitment_inclusion_proof, + }) + } + pub fn id(&self) -> BlobIdentifier { BlobIdentifier { - block_root: self.block_root, + block_root: self.block_root(), index: self.index, } } + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + + pub fn block_root(&self) -> Hash256 { + self.signed_block_header.message.tree_hash_root() + } + + pub fn block_parent_root(&self) -> Hash256 { + self.signed_block_header.message.parent_root + } + + pub fn block_proposer_index(&self) -> u64 { + self.signed_block_header.message.proposer_index + } + pub fn empty() -> Self { Self { - block_root: Hash256::zero(), index: 0, - slot: Slot::new(0), - block_parent_root: Hash256::zero(), - proposer_index: 0, blob: Blob::::default(), kzg_commitment: KzgCommitment::empty_for_testing(), kzg_proof: KzgProof::empty(), + signed_block_header: SignedBeaconBlockHeader { + message: BeaconBlockHeader::empty(), + signature: Signature::empty(), + }, + kzg_commitment_inclusion_proof: Default::default(), } } + /// Verifies the kzg commitment inclusion merkle proof. + pub fn verify_blob_sidecar_inclusion_proof(&self) -> Result { + // Depth of the subtree rooted at `blob_kzg_commitments` in the `BeaconBlockBody` + // is equal to depth of the ssz List max size + 1 for the length mixin + let kzg_commitments_tree_depth = (T::max_blob_commitments_per_block() + .next_power_of_two() + .ilog2() + .safe_add(1))? as usize; + // Compute the `tree_hash_root` of the `blob_kzg_commitments` subtree using the + // inclusion proof branches + let blob_kzg_commitments_root = merkle_root_from_branch( + self.kzg_commitment.tree_hash_root(), + self.kzg_commitment_inclusion_proof + .get(0..kzg_commitments_tree_depth) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + kzg_commitments_tree_depth, + self.index as usize, + ); + // The remaining inclusion proof branches are for the top level `BeaconBlockBody` tree + Ok(verify_merkle_proof( + blob_kzg_commitments_root, + self.kzg_commitment_inclusion_proof + .get(kzg_commitments_tree_depth..T::kzg_proof_inclusion_proof_depth()) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + T::kzg_proof_inclusion_proof_depth().safe_sub(kzg_commitments_tree_depth)?, + BLOB_KZG_COMMITMENTS_INDEX, + self.signed_block_header.message.body_root, + )) + } + pub fn random_valid(rng: &mut R, kzg: &Kzg) -> Result { let mut blob_bytes = vec![0u8; BYTES_PER_BLOB]; rng.fill_bytes(&mut blob_bytes); @@ -185,57 +260,22 @@ impl BlobSidecar { // Fixed part Self::empty().as_ssz_bytes().len() } -} -#[derive( - Debug, - Clone, - Serialize, - Deserialize, - Encode, - Decode, - TreeHash, - TestRandom, - Derivative, - arbitrary::Arbitrary, -)] -#[derivative(PartialEq, Eq, Hash)] -pub struct BlindedBlobSidecar { - pub block_root: Hash256, - #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, - pub slot: Slot, - pub block_parent_root: Hash256, - #[serde(with = "serde_utils::quoted_u64")] - pub proposer_index: u64, - pub blob_root: Hash256, - pub kzg_commitment: KzgCommitment, - pub kzg_proof: KzgProof, -} - -impl BlindedBlobSidecar { - pub fn empty() -> Self { - Self { - block_root: Hash256::zero(), - index: 0, - slot: Slot::new(0), - block_parent_root: Hash256::zero(), - proposer_index: 0, - blob_root: Hash256::zero(), - kzg_commitment: KzgCommitment::empty_for_testing(), - kzg_proof: KzgProof::empty(), + pub fn build_sidecars( + blobs: BlobsList, + block: &SignedBeaconBlock, + kzg_proofs: KzgProofs, + ) -> Result, BlobSidecarError> { + let mut blob_sidecars = vec![]; + for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { + let blob_sidecar = BlobSidecar::new(i, blob, block, *kzg_proof)?; + blob_sidecars.push(Arc::new(blob_sidecar)); } + Ok(VariableList::from(blob_sidecars)) } } -impl SignedRoot for BlindedBlobSidecar {} - -pub type SidecarList = VariableList, ::MaxBlobsPerBlock>; -pub type BlobSidecarList = SidecarList>; -pub type BlindedBlobSidecarList = SidecarList; - +pub type BlobSidecarList = VariableList>, ::MaxBlobsPerBlock>; pub type FixedBlobSidecarList = FixedVector>>, ::MaxBlobsPerBlock>; - pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; -pub type BlobRootsList = VariableList::MaxBlobCommitmentsPerBlock>; diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 910ef97c7..f43585000 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,24 +1,15 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ - BlobRootsList, ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, - ForkVersionDeserialize, KzgProofs, SignedRoot, Uint256, + ForkVersionDeserialize, SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; use serde::{Deserialize, Deserializer, Serialize}; -use ssz_derive::Encode; use superstruct::superstruct; use tree_hash_derive::TreeHash; -#[derive(PartialEq, Debug, Default, Serialize, Deserialize, TreeHash, Clone, Encode)] -#[serde(bound = "E: EthSpec")] -pub struct BlindedBlobsBundle { - pub commitments: KzgCommitments, - pub proofs: KzgProofs, - pub blob_roots: BlobRootsList, -} - #[superstruct( variants(Merge, Capella, Deneb), variant_attributes( @@ -39,7 +30,7 @@ pub struct BuilderBid { #[superstruct(only(Deneb), partial_getter(rename = "header_deneb"))] pub header: ExecutionPayloadHeaderDeneb, #[superstruct(only(Deneb))] - pub blinded_blobs_bundle: BlindedBlobsBundle, + pub blob_kzg_commitments: KzgCommitments, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, pub pubkey: PublicKeyBytes, diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ed0029785..784d98c13 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -15,7 +15,6 @@ pub enum Domain { BlsToExecutionChange, BeaconProposer, BeaconAttester, - BlobSidecar, Randao, Deposit, VoluntaryExit, @@ -102,7 +101,6 @@ pub struct ChainSpec { */ pub(crate) domain_beacon_proposer: u32, pub(crate) domain_beacon_attester: u32, - pub(crate) domain_blob_sidecar: u32, pub(crate) domain_randao: u32, pub(crate) domain_deposit: u32, pub(crate) domain_voluntary_exit: u32, @@ -374,7 +372,6 @@ impl ChainSpec { match domain { Domain::BeaconProposer => self.domain_beacon_proposer, Domain::BeaconAttester => self.domain_beacon_attester, - Domain::BlobSidecar => self.domain_blob_sidecar, Domain::Randao => self.domain_randao, Domain::Deposit => self.domain_deposit, Domain::VoluntaryExit => self.domain_voluntary_exit, @@ -579,7 +576,6 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_blob_sidecar: 11, // 0x0B000000 /* * Fork choice @@ -822,7 +818,6 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, - domain_blob_sidecar: 11, /* * Fork choice @@ -1416,7 +1411,6 @@ mod tests { test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec); test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec); - test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec); test_domain(Domain::Randao, spec.domain_randao, &spec); test_domain(Domain::Deposit, spec.domain_deposit, &spec); test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec); @@ -1441,8 +1435,6 @@ mod tests { spec.domain_bls_to_execution_change, &spec, ); - - test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec); } fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 { diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index bd2efd3d9..b651d34af 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -82,7 +82,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte), "domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer), "domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester), - "domain_blob_sidecar".to_uppercase() => u32_hex(spec.domain_blob_sidecar), "domain_randao".to_uppercase()=> u32_hex(spec.domain_randao), "domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit), "domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 70982e8d5..17baad9c4 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -6,6 +6,7 @@ use ssz_types::typenum::{ bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16, U16777216, U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192, }; +use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -109,6 +110,7 @@ pub trait EthSpec: type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type KzgCommitmentInclusionProofDepth: Unsigned + Clone + Sync + Send + Debug + PartialEq; /* * Derived values (set these CAREFULLY) */ @@ -271,6 +273,10 @@ pub trait EthSpec: fn bytes_per_blob() -> usize { Self::BytesPerBlob::to_usize() } + /// Returns the `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` preset for this specification. + fn kzg_proof_inclusion_proof_depth() -> usize { + Self::KzgCommitmentInclusionProofDepth::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -315,6 +321,7 @@ impl EthSpec for MainnetEthSpec { type BytesPerFieldElement = U32; type FieldElementsPerBlob = U4096; type BytesPerBlob = U131072; + type KzgCommitmentInclusionProofDepth = U17; type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch @@ -348,6 +355,7 @@ impl EthSpec for MinimalEthSpec { type FieldElementsPerBlob = U4096; type BytesPerBlob = U131072; type MaxBlobCommitmentsPerBlock = U16; + type KzgCommitmentInclusionProofDepth = U9; params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -421,6 +429,7 @@ impl EthSpec for GnosisEthSpec { type FieldElementsPerBlob = U4096; type BytesPerFieldElement = U32; type BytesPerBlob = U131072; + type KzgCommitmentInclusionProofDepth = U17; fn default_spec() -> ChainSpec { ChainSpec::gnosis() diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 0f284bde9..2322a67a6 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -100,8 +100,6 @@ pub mod sqlite; pub mod blob_sidecar; pub mod light_client_header; -pub mod sidecar; -pub mod signed_blob; use ethereum_types::{H160, H256}; @@ -121,10 +119,7 @@ pub use crate::beacon_block_body::{ pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; -pub use crate::blob_sidecar::{ - BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList, BlobSidecar, BlobSidecarList, - BlobsList, SidecarList, -}; +pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; @@ -182,7 +177,6 @@ pub use crate::signed_beacon_block::{ SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; -pub use crate::signed_blob::*; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; pub use crate::signed_contribution_and_proof::SignedContributionAndProof; pub use crate::signed_voluntary_exit::SignedVoluntaryExit; @@ -223,6 +217,5 @@ pub use bls::{ pub use kzg::{KzgCommitment, KzgProof}; -pub use sidecar::Sidecar; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index 6d584fc1e..fa7745ad9 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -83,8 +83,6 @@ pub trait AbstractExecPayload: + TryInto + TryInto { - type Sidecar: Sidecar; - type Ref<'a>: ExecPayload + Copy + From<&'a Self::Merge> @@ -103,11 +101,6 @@ pub trait AbstractExecPayload: + Into + for<'a> From>> + TryFrom>; - - fn default_at_fork(fork_name: ForkName) -> Result; - fn default_blobs_at_fork( - fork_name: ForkName, - ) -> Result<>::BlobItems, Error>; } #[superstruct( @@ -280,6 +273,15 @@ impl FullPayload { cons(inner.execution_payload) }) } + + pub fn default_at_fork(fork_name: ForkName) -> Result { + match fork_name { + ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Merge => Ok(FullPayloadMerge::default().into()), + ForkName::Capella => Ok(FullPayloadCapella::default().into()), + ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), + } + } } impl<'a, T: EthSpec> FullPayloadRef<'a, T> { @@ -384,28 +386,10 @@ impl<'b, T: EthSpec> ExecPayload for FullPayloadRef<'b, T> { } impl AbstractExecPayload for FullPayload { - type Sidecar = BlobSidecar; type Ref<'a> = FullPayloadRef<'a, T>; type Merge = FullPayloadMerge; type Capella = FullPayloadCapella; type Deneb = FullPayloadDeneb; - - fn default_at_fork(fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), - ForkName::Merge => Ok(FullPayloadMerge::default().into()), - ForkName::Capella => Ok(FullPayloadCapella::default().into()), - ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), - } - } - fn default_blobs_at_fork(fork_name: ForkName) -> Result, Error> { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Err(Error::IncorrectStateVariant) - } - ForkName::Deneb => Ok(VariableList::default()), - } - } } impl From> for FullPayload { @@ -910,25 +894,6 @@ impl AbstractExecPayload for BlindedPayload { type Merge = BlindedPayloadMerge; type Capella = BlindedPayloadCapella; type Deneb = BlindedPayloadDeneb; - - type Sidecar = BlindedBlobSidecar; - - fn default_at_fork(fork_name: ForkName) -> Result { - match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), - ForkName::Merge => Ok(BlindedPayloadMerge::default().into()), - ForkName::Capella => Ok(BlindedPayloadCapella::default().into()), - ForkName::Deneb => Ok(BlindedPayloadDeneb::default().into()), - } - } - fn default_blobs_at_fork(fork_name: ForkName) -> Result, Error> { - match fork_name { - ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { - Err(Error::IncorrectStateVariant) - } - ForkName::Deneb => Ok(VariableList::default()), - } - } } impl From> for BlindedPayload { diff --git a/consensus/types/src/sidecar.rs b/consensus/types/src/sidecar.rs deleted file mode 100644 index e784cc57f..000000000 --- a/consensus/types/src/sidecar.rs +++ /dev/null @@ -1,221 +0,0 @@ -use crate::beacon_block_body::KzgCommitments; -use crate::test_utils::TestRandom; -use crate::{ - AbstractExecPayload, BeaconBlock, BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList, - BlobSidecar, BlobSidecarList, BlobsList, ChainSpec, Domain, EthSpec, Fork, Hash256, - SidecarList, SignedRoot, SignedSidecar, Slot, -}; -use bls::SecretKey; -use kzg::KzgProof; -use serde::de::DeserializeOwned; -use ssz::{Decode, Encode}; -use ssz_types::VariableList; -use std::fmt::Debug; -use std::hash::Hash; -use std::marker::PhantomData; -use std::sync::Arc; -use tree_hash::TreeHash; - -pub trait Sidecar: - serde::Serialize - + Clone - + DeserializeOwned - + Encode - + Decode - + Hash - + TreeHash - + TestRandom - + Debug - + SignedRoot - + Sync - + Send - + for<'a> arbitrary::Arbitrary<'a> -{ - type BlobItems: BlobItems; - - fn slot(&self) -> Slot; - - fn build_sidecar>( - blob_items: Self::BlobItems, - block: &BeaconBlock, - expected_kzg_commitments: &KzgCommitments, - kzg_proofs: Vec, - ) -> Result, String>; - - // this is mostly not used except for in testing - fn sign( - self: Arc, - secret_key: &SecretKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> SignedSidecar { - let signing_epoch = self.slot().epoch(E::slots_per_epoch()); - let domain = spec.get_domain( - signing_epoch, - Domain::BlobSidecar, - fork, - genesis_validators_root, - ); - let message = self.signing_root(domain); - let signature = secret_key.sign(message); - - SignedSidecar { - message: self, - signature, - _phantom: PhantomData, - } - } -} - -pub trait BlobItems: Sync + Send + Sized { - fn try_from_blob_roots(roots: BlobRootsList) -> Result; - fn try_from_blobs(blobs: BlobsList) -> Result; - fn len(&self) -> usize; - fn is_empty(&self) -> bool; - fn blobs(&self) -> Option<&BlobsList>; -} - -impl BlobItems for BlobsList { - fn try_from_blob_roots(_roots: BlobRootsList) -> Result { - Err("Unexpected conversion from blob roots to blobs".to_string()) - } - - fn try_from_blobs(blobs: BlobsList) -> Result { - Ok(blobs) - } - - fn len(&self) -> usize { - VariableList::len(self) - } - - fn is_empty(&self) -> bool { - VariableList::is_empty(self) - } - - fn blobs(&self) -> Option<&BlobsList> { - Some(self) - } -} - -impl BlobItems for BlobRootsList { - fn try_from_blob_roots(roots: BlobRootsList) -> Result { - Ok(roots) - } - - fn try_from_blobs(blobs: BlobsList) -> Result { - VariableList::new( - blobs - .into_iter() - .map(|blob| blob.tree_hash_root()) - .collect(), - ) - .map_err(|e| format!("{e:?}")) - } - - fn len(&self) -> usize { - VariableList::len(self) - } - - fn is_empty(&self) -> bool { - VariableList::is_empty(self) - } - - fn blobs(&self) -> Option<&BlobsList> { - None - } -} - -impl Sidecar for BlobSidecar { - type BlobItems = BlobsList; - - fn slot(&self) -> Slot { - self.slot - } - - fn build_sidecar>( - blobs: BlobsList, - block: &BeaconBlock, - expected_kzg_commitments: &KzgCommitments, - kzg_proofs: Vec, - ) -> Result, String> { - let beacon_block_root = block.canonical_root(); - let slot = block.slot(); - let blob_sidecars = BlobSidecarList::from( - blobs - .into_iter() - .enumerate() - .map(|(blob_index, blob)| { - let kzg_commitment = expected_kzg_commitments - .get(blob_index) - .ok_or("KZG commitment should exist for blob")?; - - let kzg_proof = kzg_proofs - .get(blob_index) - .ok_or("KZG proof should exist for blob")?; - - Ok(Arc::new(BlobSidecar { - block_root: beacon_block_root, - index: blob_index as u64, - slot, - block_parent_root: block.parent_root(), - proposer_index: block.proposer_index(), - blob, - kzg_commitment: *kzg_commitment, - kzg_proof: *kzg_proof, - })) - }) - .collect::, String>>()?, - ); - - Ok(blob_sidecars) - } -} - -impl Sidecar for BlindedBlobSidecar { - type BlobItems = BlobRootsList; - - fn slot(&self) -> Slot { - self.slot - } - - fn build_sidecar>( - blob_roots: BlobRootsList, - block: &BeaconBlock, - expected_kzg_commitments: &KzgCommitments, - kzg_proofs: Vec, - ) -> Result, String> { - let beacon_block_root = block.canonical_root(); - let slot = block.slot(); - - let blob_sidecars = BlindedBlobSidecarList::::from( - blob_roots - .into_iter() - .enumerate() - .map(|(blob_index, blob_root)| { - let kzg_commitment = expected_kzg_commitments - .get(blob_index) - .ok_or("KZG commitment should exist for blob")?; - - let kzg_proof = kzg_proofs.get(blob_index).ok_or(format!( - "Missing KZG proof for slot {} blob index: {}", - slot, blob_index - ))?; - - Ok(Arc::new(BlindedBlobSidecar { - block_root: beacon_block_root, - index: blob_index as u64, - slot, - block_parent_root: block.parent_root(), - proposer_index: block.proposer_index(), - blob_root, - kzg_commitment: *kzg_commitment, - kzg_proof: *kzg_proof, - })) - }) - .collect::, String>>()?, - ); - - Ok(blob_sidecars) - } -} diff --git a/consensus/types/src/signed_blob.rs b/consensus/types/src/signed_blob.rs deleted file mode 100644 index 3c560823c..000000000 --- a/consensus/types/src/signed_blob.rs +++ /dev/null @@ -1,114 +0,0 @@ -use crate::sidecar::Sidecar; -use crate::{ - test_utils::TestRandom, BlindedBlobSidecar, Blob, BlobSidecar, ChainSpec, Domain, EthSpec, - Fork, Hash256, Signature, SignedRoot, SigningData, -}; -use bls::PublicKey; -use derivative::Derivative; -use serde::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; -use std::marker::PhantomData; -use std::sync::Arc; -use test_random_derive::TestRandom; -use tree_hash::TreeHash; -use tree_hash_derive::TreeHash; - -#[derive( - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, - Derivative, - arbitrary::Arbitrary, -)] -#[serde(bound = "T: EthSpec, S: Sidecar")] -#[arbitrary(bound = "T: EthSpec, S: Sidecar")] -#[derivative(Hash(bound = "T: EthSpec, S: Sidecar"))] -pub struct SignedSidecar> { - pub message: Arc, - pub signature: Signature, - #[ssz(skip_serializing, skip_deserializing)] - #[tree_hash(skip_hashing)] - #[serde(skip)] - #[arbitrary(default)] - pub _phantom: PhantomData, -} - -impl SignedSidecar { - pub fn into_full_blob_sidecars(self, blob: Blob) -> SignedSidecar> { - let blinded_sidecar = self.message; - SignedSidecar { - message: Arc::new(BlobSidecar { - block_root: blinded_sidecar.block_root, - index: blinded_sidecar.index, - slot: blinded_sidecar.slot, - block_parent_root: blinded_sidecar.block_parent_root, - proposer_index: blinded_sidecar.proposer_index, - blob, - kzg_commitment: blinded_sidecar.kzg_commitment, - kzg_proof: blinded_sidecar.kzg_proof, - }), - signature: self.signature, - _phantom: PhantomData, - } - } -} - -impl SignedBlobSidecar { - /// Verify `self.signature`. - /// - /// If the root of `block.message` is already known it can be passed in via `object_root_opt`. - /// Otherwise, it will be computed locally. - pub fn verify_signature( - &self, - object_root_opt: Option, - pubkey: &PublicKey, - fork: &Fork, - genesis_validators_root: Hash256, - spec: &ChainSpec, - ) -> bool { - let domain = spec.get_domain( - self.message.slot.epoch(T::slots_per_epoch()), - Domain::BlobSidecar, - fork, - genesis_validators_root, - ); - - let message = if let Some(object_root) = object_root_opt { - SigningData { - object_root, - domain, - } - .tree_hash_root() - } else { - self.message.signing_root(domain) - }; - - self.signature.verify(pubkey, message) - } -} - -impl From> for SignedBlindedBlobSidecar { - fn from(signed: SignedBlobSidecar) -> Self { - SignedBlindedBlobSidecar { - message: Arc::new(signed.message.into()), - signature: signed.signature, - _phantom: PhantomData, - } - } -} - -pub type SignedBlobSidecar = SignedSidecar>; -pub type SignedBlindedBlobSidecar = SignedSidecar; - -/// List of Signed Sidecars that implements `Sidecar`. -pub type SignedSidecarList = - VariableList, ::MaxBlobsPerBlock>; -pub type SignedBlobSidecarList = SignedSidecarList>; -pub type SignedBlindedBlobSidecarList = SignedSidecarList; diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index fb2a6d394..cb5212aea 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -6,10 +6,24 @@ use std::fmt::Debug; pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup}; pub use c_kzg::{ - Blob, Bytes32, Bytes48, Error, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, + Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, }; +#[derive(Debug)] +pub enum Error { + /// An error from the underlying kzg library. + Kzg(c_kzg::Error), + /// The kzg verification failed + KzgVerificationFailed, +} + +impl From for Error { + fn from(value: c_kzg::Error) -> Self { + Error::Kzg(value) + } +} + /// A wrapper over a kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { @@ -35,6 +49,7 @@ impl Kzg { ) -> Result { c_kzg::KzgProof::compute_blob_kzg_proof(blob, &kzg_commitment.into(), &self.trusted_setup) .map(|proof| KzgProof(proof.to_bytes().into_inner())) + .map_err(Into::into) } /// Verify a kzg proof given the blob, kzg commitment and kzg proof. @@ -43,13 +58,17 @@ impl Kzg { blob: &Blob, kzg_commitment: KzgCommitment, kzg_proof: KzgProof, - ) -> Result { - c_kzg::KzgProof::verify_blob_kzg_proof( + ) -> Result<(), Error> { + if !c_kzg::KzgProof::verify_blob_kzg_proof( blob, &kzg_commitment.into(), &kzg_proof.into(), &self.trusted_setup, - ) + )? { + Err(Error::KzgVerificationFailed) + } else { + Ok(()) + } } /// Verify a batch of blob commitment proof triplets. @@ -61,7 +80,7 @@ impl Kzg { blobs: &[Blob], kzg_commitments: &[KzgCommitment], kzg_proofs: &[KzgProof], - ) -> Result { + ) -> Result<(), Error> { let commitments_bytes = kzg_commitments .iter() .map(|comm| Bytes48::from(*comm)) @@ -72,18 +91,23 @@ impl Kzg { .map(|proof| Bytes48::from(*proof)) .collect::>(); - c_kzg::KzgProof::verify_blob_kzg_proof_batch( + if !c_kzg::KzgProof::verify_blob_kzg_proof_batch( blobs, &commitments_bytes, &proofs_bytes, &self.trusted_setup, - ) + )? { + Err(Error::KzgVerificationFailed) + } else { + Ok(()) + } } /// Converts a blob to a kzg commitment. pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result { c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, &self.trusted_setup) .map(|commitment| KzgCommitment(commitment.to_bytes().into_inner())) + .map_err(Into::into) } /// Computes the kzg proof for a given `blob` and an evaluation point `z` @@ -94,6 +118,7 @@ impl Kzg { ) -> Result<(KzgProof, Bytes32), Error> { c_kzg::KzgProof::compute_kzg_proof(blob, z, &self.trusted_setup) .map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y)) + .map_err(Into::into) } /// Verifies a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y` @@ -111,5 +136,6 @@ impl Kzg { &kzg_proof.into(), &self.trusted_setup, ) + .map_err(Into::into) } } diff --git a/slasher/src/block_queue.rs b/slasher/src/block_queue.rs index 3d2472c18..b91ceba89 100644 --- a/slasher/src/block_queue.rs +++ b/slasher/src/block_queue.rs @@ -1,17 +1,18 @@ use parking_lot::Mutex; +use std::collections::HashSet; use types::SignedBeaconBlockHeader; #[derive(Debug, Default)] pub struct BlockQueue { - blocks: Mutex>, + blocks: Mutex>, } impl BlockQueue { pub fn queue(&self, block_header: SignedBeaconBlockHeader) { - self.blocks.lock().push(block_header) + self.blocks.lock().insert(block_header); } - pub fn dequeue(&self) -> Vec { + pub fn dequeue(&self) -> HashSet { let mut blocks = self.blocks.lock(); std::mem::take(&mut *blocks) } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index dffed7647..8bc36d008 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -40,3 +40,4 @@ beacon_chain = { workspace = true } store = { workspace = true } fork_choice = { workspace = true } execution_layer = { workspace = true } +logging = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index 452d805ce..e42db1801 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.4.0-beta.3 +TESTS_TAG := v1.4.0-beta.4 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index db9410697..9884a709e 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -1,6 +1,11 @@ use super::*; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; -use ::fork_choice::PayloadVerificationStatus; +use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError}; +use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head; +use beacon_chain::blob_verification::GossipBlobError; +use beacon_chain::chain_config::{ + DisallowedReOrgOffsets, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD, +}; use beacon_chain::slot_clock::SlotClock; use beacon_chain::{ attestation_verification::{ @@ -20,7 +25,7 @@ use std::time::Duration; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, Checkpoint, EthSpec, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, KzgProof, - ProgressiveBalancesMode, Signature, SignedBeaconBlock, SignedBlobSidecar, Slot, Uint256, + ProgressiveBalancesMode, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256, }; #[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)] @@ -38,6 +43,13 @@ pub struct Head { root: Hash256, } +#[derive(Debug, Clone, Copy, PartialEq, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ShouldOverrideFcu { + validator_is_connected: bool, + result: bool, +} + #[derive(Debug, Clone, Deserialize)] #[serde(deny_unknown_fields)] pub struct Checks { @@ -50,6 +62,8 @@ pub struct Checks { u_justified_checkpoint: Option, u_finalized_checkpoint: Option, proposer_boost_root: Option, + get_proposer_head: Option, + should_override_forkchoice_update: Option, } #[derive(Debug, Clone, Deserialize)] @@ -256,6 +270,8 @@ impl Case for ForkChoiceTest { u_justified_checkpoint, u_finalized_checkpoint, proposer_boost_root, + get_proposer_head, + should_override_forkchoice_update: should_override_fcu, } = checks.as_ref(); if let Some(expected_head) = head { @@ -294,6 +310,14 @@ impl Case for ForkChoiceTest { if let Some(expected_proposer_boost_root) = proposer_boost_root { tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?; } + + if let Some(should_override_fcu) = should_override_fcu { + tester.check_should_override_fcu(*should_override_fcu)?; + } + + if let Some(expected_proposer_head) = get_proposer_head { + tester.check_expected_proposer_head(*expected_proposer_head)?; + } } } } @@ -325,6 +349,7 @@ impl Tester { } let harness = BeaconChainHarness::>::builder(E::default()) + .logger(logging::test_logger()) .spec(spec.clone()) .keypairs(vec![]) .chain_config(ChainConfig { @@ -413,6 +438,8 @@ impl Tester { ) -> Result<(), Error> { let block_root = block.canonical_root(); + let mut blob_success = true; + // Convert blobs and kzg_proofs into sidecars, then plumb them into the availability tracker if let Some(blobs) = blobs.clone() { let proofs = kzg_proofs.unwrap(); @@ -432,25 +459,32 @@ impl Tester { .zip(commitments.into_iter()) .enumerate() { - let signed_sidecar = SignedBlobSidecar { - message: Arc::new(BlobSidecar { - block_root, - index: i as u64, - slot: block.slot(), - block_parent_root: block.parent_root(), - proposer_index: block.message().proposer_index(), - blob, - kzg_commitment, - kzg_proof, - }), - signature: Signature::empty(), - _phantom: Default::default(), - }; - let result = self.block_on_dangerous( - self.harness - .chain - .process_gossip_blob(GossipVerifiedBlob::__assumed_valid(signed_sidecar)), - )?; + let blob_sidecar = Arc::new(BlobSidecar { + index: i as u64, + blob, + kzg_commitment, + kzg_proof, + signed_block_header: block.signed_block_header(), + kzg_commitment_inclusion_proof: block + .message() + .body() + .kzg_commitment_merkle_proof(i) + .unwrap(), + }); + + let chain = self.harness.chain.clone(); + let blob = + match GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain) + { + Ok(gossip_verified_blob) => gossip_verified_blob, + Err(GossipBlobError::KzgError(_)) => { + blob_success = false; + GossipVerifiedBlob::__assumed_valid(blob_sidecar) + } + Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar), + }; + let result = + self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?; if valid { assert!(result.is_ok()); } @@ -466,7 +500,7 @@ impl Tester { || Ok(()), ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); - let success = result.as_ref().map_or(false, |inner| inner.is_ok()); + let success = blob_success && result.as_ref().map_or(false, |inner| inner.is_ok()); if success != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", @@ -703,6 +737,82 @@ impl Tester { expected_proposer_boost_root, ) } + + pub fn check_expected_proposer_head( + &self, + expected_proposer_head: Hash256, + ) -> Result<(), Error> { + let mut fc = self.harness.chain.canonical_head.fork_choice_write_lock(); + let slot = self.harness.chain.slot().unwrap(); + let canonical_head = fc.get_head(slot, &self.harness.spec).unwrap(); + let proposer_head_result = fc.get_proposer_head( + slot, + canonical_head, + DEFAULT_RE_ORG_THRESHOLD, + &DisallowedReOrgOffsets::default(), + DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, + ); + let proposer_head = match proposer_head_result { + Ok(head) => head.parent_node.root, + Err(ProposerHeadError::DoNotReOrg(_)) => canonical_head, + _ => panic!("Unexpected error in get proposer head"), + }; + + check_equal("proposer_head", proposer_head, expected_proposer_head) + } + + pub fn check_should_override_fcu( + &self, + expected_should_override_fcu: ShouldOverrideFcu, + ) -> Result<(), Error> { + // Determine proposer. + let cached_head = self.harness.chain.canonical_head.cached_head(); + let next_slot = cached_head.snapshot.beacon_block.slot() + 1; + let next_slot_epoch = next_slot.epoch(E::slots_per_epoch()); + let (proposer_indices, decision_root, _, fork) = + compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap(); + let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize]; + + // Ensure the proposer index cache is primed. + self.harness + .chain + .beacon_proposer_cache + .lock() + .insert(next_slot_epoch, decision_root, proposer_indices, fork) + .unwrap(); + + // Update the execution layer proposer preparation to match the test config. + let el = self.harness.chain.execution_layer.clone().unwrap(); + self.block_on_dangerous(async { + if expected_should_override_fcu.validator_is_connected { + el.update_proposer_preparation( + next_slot_epoch, + &[ProposerPreparationData { + validator_index: dbg!(proposer_index) as u64, + fee_recipient: Default::default(), + }], + ) + .await; + } else { + el.clear_proposer_preparation(proposer_index as u64).await; + } + }) + .unwrap(); + + // Check forkchoice override. + let canonical_fcu_params = cached_head.forkchoice_update_parameters(); + let fcu_params = self + .harness + .chain + .overridden_forkchoice_update_params(canonical_fcu_params) + .unwrap(); + + check_equal( + "should_override_forkchoice_update", + fcu_params != canonical_fcu_params, + expected_should_override_fcu.result, + ) + } } /// Checks that the `head` checkpoint from the beacon chain head matches the `fc` checkpoint gleaned diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index 226d162b9..04d1b8d5d 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -2,7 +2,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blob; use eth2_network_config::TRUSTED_SETUP_BYTES; -use kzg::{Kzg, KzgCommitment, KzgProof, TrustedSetup}; +use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup}; use serde::Deserialize; use std::convert::TryInto; use std::marker::PhantomData; @@ -91,8 +91,14 @@ impl Case for KZGVerifyBlobKZGProof { let kzg = get_kzg()?; let result = parse_input(&self.input).and_then(|(blob, commitment, proof)| { - validate_blob::(&kzg, &blob, commitment, proof) - .map_err(|e| Error::InternalError(format!("Failed to validate blob: {:?}", e))) + match validate_blob::(&kzg, &blob, commitment, proof) { + Ok(_) => Ok(true), + Err(KzgError::KzgVerificationFailed) => Ok(false), + Err(e) => Err(Error::InternalError(format!( + "Failed to validate blob: {:?}", + e + ))), + } }); compare_result::(&result, &self.output) diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs index 24182b69f..ae5caedf0 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -1,6 +1,7 @@ use super::*; use crate::case_result::compare_result; use beacon_chain::kzg_utils::validate_blobs; +use kzg::Error as KzgError; use serde::Deserialize; use std::marker::PhantomData; @@ -53,10 +54,23 @@ impl Case for KZGVerifyBlobKZGProofBatch { }; let kzg = get_kzg()?; - let result = parse_input(&self.input).and_then(|(commitments, blobs, proofs)| { - validate_blobs::(&kzg, &commitments, blobs.iter().collect(), &proofs) - .map_err(|e| Error::InternalError(format!("Failed to validate blobs: {:?}", e))) - }); + + let result = + parse_input(&self.input).and_then( + |(commitments, blobs, proofs)| match validate_blobs::( + &kzg, + &commitments, + blobs.iter().collect(), + &proofs, + ) { + Ok(_) => Ok(true), + Err(KzgError::KzgVerificationFailed) => Ok(false), + Err(e) => Err(Error::InternalError(format!( + "Failed to validate blobs: {:?}", + e + ))), + }, + ); compare_result::(&result, &self.output) } diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 0ba2c9266..d9deda812 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,9 +1,9 @@ use super::*; -use crate::decode::{ssz_decode_state, yaml_decode_file}; +use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use std::path::Path; use tree_hash::Hash256; -use types::{BeaconState, EthSpec, ForkName}; +use types::{BeaconBlockBody, BeaconBlockBodyDeneb, BeaconState, EthSpec, ForkName}; #[derive(Debug, Clone, Deserialize)] pub struct Metadata { @@ -82,3 +82,72 @@ impl Case for MerkleProofValidity { Ok(()) } } + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct KzgInclusionMerkleProofValidity { + pub metadata: Option, + pub block: BeaconBlockBody, + pub merkle_proof: MerkleProof, +} + +impl LoadCase for KzgInclusionMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let block = match fork_name { + ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => { + return Err(Error::InternalError(format!( + "KZG inclusion merkle proof validity test skipped for {:?}", + fork_name + ))) + } + ForkName::Deneb => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))? + } + }; + let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; + // Metadata does not exist in these tests but it is left like this just in case. + let meta_path = path.join("meta.yaml"); + let metadata = if meta_path.exists() { + Some(yaml_decode_file(&meta_path)?) + } else { + None + }; + + Ok(Self { + metadata, + block: block.into(), + merkle_proof, + }) + } +} + +impl Case for KzgInclusionMerkleProofValidity { + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { + let Ok(proof) = self.block.to_ref().kzg_commitment_merkle_proof(0) else { + return Err(Error::FailedToParseTest( + "Could not retrieve merkle proof".to_string(), + )); + }; + let proof_len = proof.len(); + let branch_len = self.merkle_proof.branch.len(); + if proof_len != branch_len { + return Err(Error::NotEqual(format!( + "Branches not equal in length computed: {}, expected {}", + proof_len, branch_len + ))); + } + + for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) { + let expected_leaf = self.merkle_proof.branch[i]; + if *proof_leaf != expected_leaf { + return Err(Error::NotEqual(format!( + "Leaves not equal in merkle proof computed: {}, expected: {}", + hex::encode(proof_leaf), + hex::encode(expected_leaf) + ))); + } + } + + Ok(()) + } +} diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 6dec93462..0295ff1bd 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -560,6 +560,13 @@ impl Handler for ForkChoiceHandler { return false; } + // No FCU override tests prior to bellatrix. + if self.handler_name == "should_override_forkchoice_update" + && (fork_name == ForkName::Base || fork_name == ForkName::Altair) + { + return false; + } + // These tests check block validity (which may include signatures) and there is no need to // run them with fake crypto. cfg!(not(feature = "fake_crypto")) @@ -786,6 +793,34 @@ impl Handler for MerkleProofValidityHandler { } } +#[derive(Derivative)] +#[derivative(Default(bound = ""))] +pub struct KzgInclusionMerkleProofValidityHandler(PhantomData); + +impl Handler for KzgInclusionMerkleProofValidityHandler { + type Case = cases::KzgInclusionMerkleProofValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "merkle_proof" + } + + fn handler_name(&self) -> String { + "single_merkle_proof".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Enabled in Deneb + fork_name != ForkName::Base + && fork_name != ForkName::Altair + && fork_name != ForkName::Merge + && fork_name != ForkName::Capella + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/src/type_name.rs b/testing/ef_tests/src/type_name.rs index ef1284403..13121854a 100644 --- a/testing/ef_tests/src/type_name.rs +++ b/testing/ef_tests/src/type_name.rs @@ -78,7 +78,6 @@ type_name!(ProposerSlashing); type_name_generic!(SignedAggregateAndProof); type_name_generic!(SignedBeaconBlock); type_name!(SignedBeaconBlockHeader); -type_name_generic!(SignedBlobSidecar); type_name_generic!(SignedContributionAndProof); type_name!(SignedVoluntaryExit); type_name!(SigningData); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index d2d30b596..dd25dba8b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,7 +1,7 @@ #![cfg(feature = "ef_tests")] -use ef_tests::*; -use types::*; +use ef_tests::{KzgInclusionMerkleProofValidityHandler, *}; +use types::{MainnetEthSpec, MinimalEthSpec, *}; // Check that the hand-computed multiplications on EthSpec are correctly computed. // This test lives here because one is most likely to muck these up during a spec update. @@ -378,12 +378,6 @@ mod ssz_static { SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); } - #[test] - fn signed_blob_sidecar() { - SszStaticHandler::, MinimalEthSpec>::deneb_only().run(); - SszStaticHandler::, MainnetEthSpec>::deneb_only().run(); - } - #[test] fn blob_identifier() { SszStaticHandler::::deneb_only().run(); @@ -546,6 +540,18 @@ fn fork_choice_withholding() { // There is no mainnet variant for this test. } +#[test] +fn fork_choice_should_override_forkchoice_update() { + ForkChoiceHandler::::new("should_override_forkchoice_update").run(); + ForkChoiceHandler::::new("should_override_forkchoice_update").run(); +} + +#[test] +fn fork_choice_get_proposer_head() { + ForkChoiceHandler::::new("get_proposer_head").run(); + ForkChoiceHandler::::new("get_proposer_head").run(); +} + #[test] fn optimistic_sync() { OptimisticSyncHandler::::default().run(); @@ -598,6 +604,12 @@ fn merkle_proof_validity() { MerkleProofValidityHandler::::default().run(); } +#[test] +fn kzg_inclusion_merkle_proof_validity() { + KzgInclusionMerkleProofValidityHandler::::default().run(); + KzgInclusionMerkleProofValidityHandler::::default().run(); +} + #[test] fn rewards() { for handler in &["basic", "leak", "random"] { diff --git a/validator_client/src/block_service.rs b/validator_client/src/block_service.rs index 47f16fc41..00d9b2e86 100644 --- a/validator_client/src/block_service.rs +++ b/validator_client/src/block_service.rs @@ -11,7 +11,7 @@ use crate::{ }; use bls::SignatureBytes; use environment::RuntimeContext; -use eth2::types::{BlockContents, SignedBlockContents}; +use eth2::types::{FullBlockContents, PublishBlockRequest}; use eth2::{BeaconNodeHttpClient, StatusCode}; use slog::{crit, debug, error, info, trace, warn, Logger}; use slot_clock::SlotClock; @@ -22,7 +22,7 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::mpsc; use types::{ - AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes, + BlindedBeaconBlock, BlockType, EthSpec, Graffiti, PublicKeyBytes, SignedBlindedBeaconBlock, Slot, }; @@ -329,10 +329,7 @@ impl BlockService { self.inner.context.executor.spawn( async move { if builder_proposals { - let result = service - .clone() - .publish_block::>(slot, validator_pubkey) - .await; + let result = service.publish_block(slot, validator_pubkey, true).await; match result { Err(BlockError::Recoverable(e)) => { error!( @@ -342,9 +339,8 @@ impl BlockService { "block_slot" => ?slot, "info" => "blinded proposal failed, attempting full block" ); - if let Err(e) = service - .publish_block::>(slot, validator_pubkey) - .await + if let Err(e) = + service.publish_block(slot, validator_pubkey, false).await { // Log a `crit` since a full block // (non-builder) proposal failed. @@ -371,9 +367,8 @@ impl BlockService { } Ok(_) => {} }; - } else if let Err(e) = service - .publish_block::>(slot, validator_pubkey) - .await + } else if let Err(e) = + service.publish_block(slot, validator_pubkey, false).await { // Log a `crit` since a full block (non-builder) // proposal failed. @@ -394,10 +389,11 @@ impl BlockService { } /// Produce a block at the given slot for validator_pubkey - async fn publish_block>( - self, + async fn publish_block( + &self, slot: Slot, validator_pubkey: PublicKeyBytes, + builder_proposal: bool, ) -> Result<(), BlockError> { let log = self.context.log(); let _timer = @@ -460,7 +456,7 @@ impl BlockService { // // Try the proposer nodes last, since it's likely that they don't have a // great view of attestations on the network. - let block_contents = proposer_fallback + let unsigned_block = proposer_fallback .request_proposers_last( RequireSynced::No, OfflineOnFailure::Yes, @@ -471,20 +467,32 @@ impl BlockService { randao_reveal_ref, graffiti, proposer_index, + builder_proposal, log, ) }, ) .await?; - let (block, maybe_blob_sidecars) = block_contents.deconstruct(); let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES); - let signed_block = match self_ref - .validator_store - .sign_block::(*validator_pubkey_ref, block, current_slot) - .await - { + let res = match unsigned_block { + UnsignedBlock::Full(block_contents) => { + let (block, maybe_blobs) = block_contents.deconstruct(); + self_ref + .validator_store + .sign_block(*validator_pubkey_ref, block, current_slot) + .await + .map(|b| SignedBlock::Full(PublishBlockRequest::new(b, maybe_blobs))) + } + UnsignedBlock::Blinded(block) => self_ref + .validator_store + .sign_block(*validator_pubkey_ref, block, current_slot) + .await + .map(SignedBlock::Blinded), + }; + + let signed_block = match res { Ok(block) => block, Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { // A pubkey can be missing when a validator was recently removed @@ -506,36 +514,6 @@ impl BlockService { } }; - let maybe_signed_blobs = match maybe_blob_sidecars { - Some(blob_sidecars) => { - match self_ref - .validator_store - .sign_blobs::(*validator_pubkey_ref, blob_sidecars) - .await - { - Ok(signed_blobs) => Some(signed_blobs), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently removed - // via the API. - warn!( - log, - "Missing pubkey for blobs"; - "info" => "a validator may have recently been removed from this VC", - "pubkey" => ?pubkey, - "slot" => ?slot - ); - return Ok(()); - } - Err(e) => { - return Err(BlockError::Recoverable(format!( - "Unable to sign blobs: {:?}", - e - ))) - } - } - } - None => None, - }; let signing_time_ms = Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis(); @@ -546,8 +524,6 @@ impl BlockService { "signing_time_ms" => signing_time_ms, ); - let signed_block_contents = SignedBlockContents::from((signed_block, maybe_signed_blobs)); - // Publish block with first available beacon node. // // Try the proposer nodes first, since we've likely gone to efforts to @@ -558,11 +534,8 @@ impl BlockService { RequireSynced::No, OfflineOnFailure::Yes, |beacon_node| async { - self.publish_signed_block_contents::( - &signed_block_contents, - beacon_node, - ) - .await + self.publish_signed_block_contents(&signed_block, beacon_node) + .await }, ) .await?; @@ -570,41 +543,41 @@ impl BlockService { info!( log, "Successfully published block"; - "block_type" => ?Payload::block_type(), - "deposits" => signed_block_contents.signed_block().message().body().deposits().len(), - "attestations" => signed_block_contents.signed_block().message().body().attestations().len(), + "block_type" => ?signed_block.block_type(), + "deposits" => signed_block.num_deposits(), + "attestations" => signed_block.num_attestations(), "graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()), - "slot" => signed_block_contents.signed_block().slot().as_u64(), + "slot" => signed_block.slot().as_u64(), ); Ok(()) } - async fn publish_signed_block_contents>( + async fn publish_signed_block_contents( &self, - signed_block_contents: &SignedBlockContents, + signed_block: &SignedBlock, beacon_node: &BeaconNodeHttpClient, ) -> Result<(), BlockError> { let log = self.context.log(); - let slot = signed_block_contents.signed_block().slot(); - match Payload::block_type() { - BlockType::Full => { + let slot = signed_block.slot(); + match signed_block { + SignedBlock::Full(signed_block) => { let _post_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, &[metrics::BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blocks(signed_block_contents) + .post_beacon_blocks(signed_block) .await .or_else(|e| handle_block_post_error(e, slot, log))? } - BlockType::Blinded => { + SignedBlock::Blinded(signed_block) => { let _post_timer = metrics::start_timer_vec( &metrics::BLOCK_SERVICE_TIMES, &[metrics::BLINDED_BEACON_BLOCK_HTTP_POST], ); beacon_node - .post_beacon_blinded_blocks(signed_block_contents) + .post_beacon_blinded_blocks(signed_block) .await .or_else(|e| handle_block_post_error(e, slot, log))? } @@ -612,22 +585,23 @@ impl BlockService { Ok::<_, BlockError>(()) } - async fn get_validator_block>( + async fn get_validator_block( beacon_node: &BeaconNodeHttpClient, slot: Slot, randao_reveal_ref: &SignatureBytes, graffiti: Option, proposer_index: Option, + builder_proposal: bool, log: &Logger, - ) -> Result, BlockError> { - let block_contents: BlockContents = match Payload::block_type() { - BlockType::Full => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BEACON_BLOCK_HTTP_GET], - ); + ) -> Result, BlockError> { + let unsigned_block = if !builder_proposal { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BEACON_BLOCK_HTTP_GET], + ); + UnsignedBlock::Full( beacon_node - .get_validator_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) + .get_validator_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) .await .map_err(|e| { BlockError::Recoverable(format!( @@ -635,19 +609,16 @@ impl BlockService { e )) })? - .data - } - BlockType::Blinded => { - let _get_timer = metrics::start_timer_vec( - &metrics::BLOCK_SERVICE_TIMES, - &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], - ); + .data, + ) + } else { + let _get_timer = metrics::start_timer_vec( + &metrics::BLOCK_SERVICE_TIMES, + &[metrics::BLINDED_BEACON_BLOCK_HTTP_GET], + ); + UnsignedBlock::Blinded( beacon_node - .get_validator_blinded_blocks::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - ) + .get_validator_blinded_blocks::(slot, randao_reveal_ref, graffiti.as_ref()) .await .map_err(|e| { BlockError::Recoverable(format!( @@ -655,8 +626,8 @@ impl BlockService { e )) })? - .data - } + .data, + ) }; info!( @@ -664,13 +635,59 @@ impl BlockService { "Received unsigned block"; "slot" => slot.as_u64(), ); - if proposer_index != Some(block_contents.block().proposer_index()) { + if proposer_index != Some(unsigned_block.proposer_index()) { return Err(BlockError::Recoverable( "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), )); } - Ok::<_, BlockError>(block_contents) + Ok::<_, BlockError>(unsigned_block) + } +} + +pub enum UnsignedBlock { + Full(FullBlockContents), + Blinded(BlindedBeaconBlock), +} + +impl UnsignedBlock { + pub fn proposer_index(&self) -> u64 { + match self { + UnsignedBlock::Full(block) => block.block().proposer_index(), + UnsignedBlock::Blinded(block) => block.proposer_index(), + } + } +} + +pub enum SignedBlock { + Full(PublishBlockRequest), + Blinded(SignedBlindedBeaconBlock), +} + +impl SignedBlock { + pub fn block_type(&self) -> BlockType { + match self { + SignedBlock::Full(_) => BlockType::Full, + SignedBlock::Blinded(_) => BlockType::Blinded, + } + } + pub fn slot(&self) -> Slot { + match self { + SignedBlock::Full(block) => block.signed_block().message().slot(), + SignedBlock::Blinded(block) => block.message().slot(), + } + } + pub fn num_deposits(&self) -> usize { + match self { + SignedBlock::Full(block) => block.signed_block().message().body().deposits().len(), + SignedBlock::Blinded(block) => block.message().body().deposits().len(), + } + } + pub fn num_attestations(&self) -> usize { + match self { + SignedBlock::Full(block) => block.signed_block().message().body().attestations().len(), + SignedBlock::Blinded(block) => block.message().body().attestations().len(), + } } } diff --git a/validator_client/src/http_metrics/metrics.rs b/validator_client/src/http_metrics/metrics.rs index ed16f52d2..52b52126b 100644 --- a/validator_client/src/http_metrics/metrics.rs +++ b/validator_client/src/http_metrics/metrics.rs @@ -59,11 +59,6 @@ lazy_static::lazy_static! { "Total count of attempted block signings", &["status"] ); - pub static ref SIGNED_BLOBS_TOTAL: Result = try_create_int_counter_vec( - "vc_signed_beacon_blobs_total", - "Total count of attempted blob signings", - &["status"] - ); pub static ref SIGNED_ATTESTATIONS_TOTAL: Result = try_create_int_counter_vec( "vc_signed_attestations_total", "Total count of attempted Attestation signings", diff --git a/validator_client/src/signing_method.rs b/validator_client/src/signing_method.rs index 96bfd2511..0de2f2f54 100644 --- a/validator_client/src/signing_method.rs +++ b/validator_client/src/signing_method.rs @@ -37,7 +37,6 @@ pub enum Error { pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload = FullPayload> { RandaoReveal(Epoch), BeaconBlock(&'a BeaconBlock), - BlobSidecar(&'a Payload::Sidecar), AttestationData(&'a AttestationData), SignedAggregateAndProof(&'a AggregateAndProof), SelectionProof(Slot), @@ -60,7 +59,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload> SignableMessage<'a, T, Pay match self { SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain), SignableMessage::BeaconBlock(b) => b.signing_root(domain), - SignableMessage::BlobSidecar(b) => b.signing_root(domain), SignableMessage::AttestationData(a) => a.signing_root(domain), SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain), SignableMessage::SelectionProof(slot) => slot.signing_root(domain), @@ -184,10 +182,6 @@ impl SigningMethod { Web3SignerObject::RandaoReveal { epoch } } SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?, - SignableMessage::BlobSidecar(_) => { - // https://github.com/ConsenSys/web3signer/issues/726 - unimplemented!("Web3Signer blob signing not implemented.") - } SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a), SignableMessage::SignedAggregateAndProof(a) => { Web3SignerObject::AggregateAndProof(a) diff --git a/validator_client/src/validator_store.rs b/validator_client/src/validator_store.rs index 612dd96bc..60155d8ef 100644 --- a/validator_client/src/validator_store.rs +++ b/validator_client/src/validator_store.rs @@ -6,7 +6,6 @@ use crate::{ Config, }; use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; -use eth2::types::VariableList; use parking_lot::{Mutex, RwLock}; use slashing_protection::{ interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase, @@ -18,16 +17,14 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; -use types::sidecar::Sidecar; use types::{ attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, Keypair, PublicKeyBytes, - SelectionProof, SidecarList, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedContributionAndProof, SignedRoot, SignedSidecar, SignedSidecarList, - SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData, - SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, - ValidatorRegistrationData, VoluntaryExit, + SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, + SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, + Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, }; use validator_dir::ValidatorDir; @@ -567,39 +564,6 @@ impl ValidatorStore { } } - pub async fn sign_blobs>( - &self, - validator_pubkey: PublicKeyBytes, - blob_sidecars: SidecarList, - ) -> Result, Error> { - let mut signed_blob_sidecars = Vec::new(); - for blob_sidecar in blob_sidecars.into_iter() { - let slot = blob_sidecar.slot(); - let signing_epoch = slot.epoch(E::slots_per_epoch()); - let signing_context = self.signing_context(Domain::BlobSidecar, signing_epoch); - let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?; - - let signature = signing_method - .get_signature::( - SignableMessage::BlobSidecar(blob_sidecar.as_ref()), - signing_context, - &self.spec, - &self.task_executor, - ) - .await?; - - metrics::inc_counter_vec(&metrics::SIGNED_BLOBS_TOTAL, &[metrics::SUCCESS]); - - signed_blob_sidecars.push(SignedSidecar { - message: blob_sidecar, - signature, - _phantom: PhantomData, - }); - } - - Ok(VariableList::from(signed_blob_sidecars)) - } - pub async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes,