Sidecar inclusion proof (#4900)

* Refactor BlobSidecar to new type

* Fix some compile errors

* Gossip verification compiles

* Fix http api types take 1

* Fix another round of compile errors

* Beacon node crate compiles

* EF tests compile

* Remove all blob signing from VC

* fmt

* Tests compile

* Fix some tests

* Fix more http tests

* get compiling

* Fix gossip conditions and tests

* Add basic proof generation and verification

* remove unnecessary ssz decode

* add back build_sidecar

* remove default at fork for blobs

* fix beacon chain tests

* get relase tests compiling

* fix lints

* fix existing spec tests

* add new ef tests

* fix gossip duplicate rule

* lints

* add back sidecar signature check in gossip

* add finalized descendant check to blob sidecar gossip

* fix error conversion

* fix release tests

* sidecar inclusion self review cleanup

* Add proof verification and computation metrics

* Remove accidentally committed file

* Unify some block and blob errors; add slashing conditions for sidecars

* Address review comment

* Clean up re-org tests (#4957)

* Address more review comments

* Add Comments & Eliminate Unnecessary Clones

* update names

* Update beacon_node/beacon_chain/src/metrics.rs

Co-authored-by: Jimmy Chen <jchen.tc@gmail.com>

* Update beacon_node/network/src/network_beacon_processor/tests.rs

Co-authored-by: Jimmy Chen <jchen.tc@gmail.com>

* pr feedback

* fix test compile

* Sidecar Inclusion proof small refactor and updates (#4967)

* Update some comments, variables and small cosmetic fixes.

* Couple blobs and proofs into a tuple in `PayloadAndBlobs` for simplicity and safety.

* Update function comment.

* Update testing/ef_tests/src/cases/merkle_proof_validity.rs

Co-authored-by: Jimmy Chen <jchen.tc@gmail.com>

* Rename the block and blob wrapper types used in the beacon API interfaces.

* make sure gossip invalid blobs are passed to the slasher (#4970)

* Add blob headers to slasher before adding to DA checker

* Replace Vec with HashSet in BlockQueue

* fmt

* Rename gindex -> index

* Simplify gossip condition

---------

Co-authored-by: realbigsean <seananderson33@gmail.com>
Co-authored-by: realbigsean <sean@sigmaprime.io>
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
Co-authored-by: Mark Mackey <mark@sigmaprime.io>
Co-authored-by: Jimmy Chen <jchen.tc@gmail.com>
This commit is contained in:
Pawan Dhananjay 2023-12-05 08:19:59 -08:00 committed by GitHub
parent ec8edfb89a
commit 31044402ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
74 changed files with 1950 additions and 2270 deletions

1
Cargo.lock generated
View File

@ -1857,6 +1857,7 @@ dependencies = [
"fs2",
"hex",
"kzg",
"logging",
"rayon",
"serde",
"serde_json",

View File

@ -7,7 +7,7 @@ use crate::attester_cache::{AttesterCache, AttesterCacheKey};
use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache};
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
use crate::beacon_proposer_cache::BeaconProposerCache;
use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob};
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::POS_PANDA_BANNER;
use crate::block_verification::{
@ -121,7 +121,6 @@ use tree_hash::TreeHash;
use types::beacon_state::CloneConfig;
use types::blob_sidecar::{BlobSidecarList, FixedBlobSidecarList};
use types::payload::BlockProductionVersion;
use types::sidecar::BlobItems;
use types::*;
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
@ -489,16 +488,49 @@ pub struct BeaconChain<T: BeaconChainTypes> {
pub block_production_state: Arc<Mutex<Option<(Hash256, BlockProductionPreState<T::EthSpec>)>>>,
}
pub enum BeaconBlockResponseType<T: EthSpec> {
pub enum BeaconBlockResponseWrapper<T: EthSpec> {
Full(BeaconBlockResponse<T, FullPayload<T>>),
Blinded(BeaconBlockResponse<T, BlindedPayload<T>>),
}
impl<E: EthSpec> BeaconBlockResponseWrapper<E> {
pub fn fork_name(&self, spec: &ChainSpec) -> Result<ForkName, InconsistentFork> {
Ok(match self {
BeaconBlockResponseWrapper::Full(resp) => resp.block.to_ref().fork_name(spec)?,
BeaconBlockResponseWrapper::Blinded(resp) => resp.block.to_ref().fork_name(spec)?,
})
}
pub fn execution_payload_value(&self) -> Option<Uint256> {
match self {
BeaconBlockResponseWrapper::Full(resp) => resp.execution_payload_value,
BeaconBlockResponseWrapper::Blinded(resp) => resp.execution_payload_value,
}
}
pub fn consensus_block_value(&self) -> Option<u64> {
match self {
BeaconBlockResponseWrapper::Full(resp) => resp.consensus_block_value,
BeaconBlockResponseWrapper::Blinded(resp) => resp.consensus_block_value,
}
}
pub fn is_blinded(&self) -> bool {
matches!(self, BeaconBlockResponseWrapper::Blinded(_))
}
}
/// The components produced when the local beacon node creates a new block to extend the chain
pub struct BeaconBlockResponse<T: EthSpec, Payload: AbstractExecPayload<T>> {
/// The newly produced beacon block
pub block: BeaconBlock<T, Payload>,
/// The post-state after applying the new block
pub state: BeaconState<T>,
pub maybe_side_car: Option<SidecarList<T, <Payload as AbstractExecPayload<T>>::Sidecar>>,
/// The Blobs / Proofs associated with the new block
pub blob_items: Option<(KzgProofs<T>, BlobsList<T>)>,
/// The execution layer reward for the block
pub execution_payload_value: Option<Uint256>,
/// The consensus layer reward to the proposer
pub consensus_block_value: Option<u64>,
}
@ -2022,17 +2054,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn verify_blob_sidecar_for_gossip(
self: &Arc<Self>,
blob_sidecar: SignedBlobSidecar<T::EthSpec>,
blob_sidecar: Arc<BlobSidecar<T::EthSpec>>,
subnet_id: u64,
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS);
let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES);
blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self).map(
|v| {
metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES);
v
},
)
GossipVerifiedBlob::new(blob_sidecar, subnet_id, self).map(|v| {
metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES);
v
})
}
/// Accepts some 'LightClientOptimisticUpdate' from the network and attempts to verify it
@ -2832,7 +2862,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
}
self.data_availability_checker
.notify_gossip_blob(blob.as_blob().slot, block_root, &blob);
.notify_gossip_blob(blob.slot(), block_root, &blob);
let r = self.check_gossip_blob_availability_and_import(blob).await;
self.remove_notified(&block_root, r)
}
@ -2942,6 +2972,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Increment the Prometheus counter for block processing requests.
metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS);
// Set observed time if not already set. Usually this should be set by gossip or RPC,
// but just in case we set it again here (useful for tests).
if let (Some(seen_timestamp), Some(current_slot)) =
(self.slot_clock.now_duration(), self.slot_clock.now())
{
self.block_times_cache.write().set_time_observed(
block_root,
current_slot,
seen_timestamp,
None,
None,
);
}
let block_slot = unverified_block.block().slot();
// A small closure to group the verification and import errors.
@ -3097,6 +3141,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
blob: GossipVerifiedBlob<T>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
let slot = blob.slot();
if let Some(slasher) = self.slasher.as_ref() {
slasher.accept_block_header(blob.signed_block_header());
}
let availability = self.data_availability_checker.put_gossip_blob(blob)?;
self.process_availability(slot, availability).await
@ -3110,6 +3157,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
if let Some(slasher) = self.slasher.as_ref() {
for blob_sidecar in blobs.iter().filter_map(|blob| blob.clone()) {
slasher.accept_block_header(blob_sidecar.signed_block_header.clone());
}
}
let availability = self
.data_availability_checker
.put_rpc_blobs(block_root, blobs)?;
@ -3968,7 +4020,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
validator_graffiti: Option<Graffiti>,
verification: ProduceBlockVerification,
block_production_version: BlockProductionVersion,
) -> Result<BeaconBlockResponseType<T::EthSpec>, BlockProductionError> {
) -> Result<BeaconBlockResponseWrapper<T::EthSpec>, BlockProductionError> {
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS);
let _complete_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES);
// Part 1/2 (blocking)
@ -4414,7 +4466,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// This function uses heuristics that align quite closely but not exactly with the re-org
/// conditions set out in `get_state_for_re_org` and `get_proposer_head`. The differences are
/// documented below.
fn overridden_forkchoice_update_params(
pub fn overridden_forkchoice_update_params(
&self,
canonical_forkchoice_params: ForkchoiceUpdateParameters,
) -> Result<ForkchoiceUpdateParameters, Error> {
@ -4432,7 +4484,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
})
}
fn overridden_forkchoice_update_params_or_failure_reason(
pub fn overridden_forkchoice_update_params_or_failure_reason(
&self,
canonical_forkchoice_params: &ForkchoiceUpdateParameters,
) -> Result<ForkchoiceUpdateParameters, ProposerHeadError<Error>> {
@ -4573,7 +4625,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.unwrap_or_else(|| Duration::from_secs(0)),
);
block_delays.observed.map_or(false, |delay| {
delay > self.slot_clock.unagg_attestation_production_delay()
delay >= self.slot_clock.unagg_attestation_production_delay()
})
}
@ -4599,7 +4651,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
validator_graffiti: Option<Graffiti>,
verification: ProduceBlockVerification,
block_production_version: BlockProductionVersion,
) -> Result<BeaconBlockResponseType<T::EthSpec>, BlockProductionError> {
) -> Result<BeaconBlockResponseWrapper<T::EthSpec>, BlockProductionError> {
// Part 1/3 (blocking)
//
// Perform the state advance and block-packing functions.
@ -4658,7 +4710,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.await
.map_err(BlockProductionError::TokioJoin)??;
Ok(BeaconBlockResponseType::Full(beacon_block_response))
Ok(BeaconBlockResponseWrapper::Full(beacon_block_response))
}
BlockProposalContentsType::Blinded(block_contents) => {
let chain = self.clone();
@ -4678,7 +4730,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.await
.map_err(BlockProductionError::TokioJoin)??;
Ok(BeaconBlockResponseType::Blinded(beacon_block_response))
Ok(BeaconBlockResponseWrapper::Blinded(beacon_block_response))
}
}
} else {
@ -4699,7 +4751,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.await
.map_err(BlockProductionError::TokioJoin)??;
Ok(BeaconBlockResponseType::Full(beacon_block_response))
Ok(BeaconBlockResponseWrapper::Full(beacon_block_response))
}
}
@ -4977,7 +5029,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
bls_to_execution_changes,
} = partial_beacon_block;
let (inner_block, blobs_opt, proofs_opt, execution_payload_value) = match &state {
let (inner_block, maybe_blobs_and_proofs, execution_payload_value) = match &state {
BeaconState::Base(_) => (
BeaconBlock::Base(BeaconBlockBase {
slot,
@ -4997,7 +5049,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
},
}),
None,
None,
Uint256::zero(),
),
BeaconState::Altair(_) => (
@ -5021,7 +5072,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
},
}),
None,
None,
Uint256::zero(),
),
BeaconState::Merge(_) => {
@ -5052,7 +5102,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
},
}),
None,
None,
execution_payload_value,
)
}
@ -5086,12 +5135,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
},
}),
None,
None,
execution_payload_value,
)
}
BeaconState::Deneb(_) => {
let (payload, kzg_commitments, blobs, proofs, execution_payload_value) =
let (payload, kzg_commitments, maybe_blobs_and_proofs, execution_payload_value) =
block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)?
.deconstruct();
@ -5121,8 +5169,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.ok_or(BlockProductionError::InvalidPayloadFork)?,
},
}),
blobs,
proofs,
maybe_blobs_and_proofs,
execution_payload_value,
)
}
@ -5181,8 +5228,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let blobs_verification_timer =
metrics::start_timer(&metrics::BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES);
let maybe_sidecar_list = match (blobs_opt, proofs_opt) {
(Some(blobs_or_blobs_roots), Some(proofs)) => {
let blob_items = match maybe_blobs_and_proofs {
Some((blobs, proofs)) => {
let expected_kzg_commitments =
block.body().blob_kzg_commitments().map_err(|_| {
BlockProductionError::InvalidBlockVariant(
@ -5190,42 +5237,32 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
)
})?;
if expected_kzg_commitments.len() != blobs_or_blobs_roots.len() {
if expected_kzg_commitments.len() != blobs.len() {
return Err(BlockProductionError::MissingKzgCommitment(format!(
"Missing KZG commitment for slot {}. Expected {}, got: {}",
block.slot(),
blobs_or_blobs_roots.len(),
blobs.len(),
expected_kzg_commitments.len()
)));
}
let kzg_proofs = Vec::from(proofs);
if let Some(blobs) = blobs_or_blobs_roots.blobs() {
let kzg = self
.kzg
.as_ref()
.ok_or(BlockProductionError::TrustedSetupNotInitialized)?;
kzg_utils::validate_blobs::<T::EthSpec>(
kzg,
expected_kzg_commitments,
blobs.iter().collect(),
&kzg_proofs,
)
.map_err(BlockProductionError::KzgError)?;
}
Some(
Sidecar::build_sidecar(
blobs_or_blobs_roots,
&block,
expected_kzg_commitments,
kzg_proofs,
)
.map_err(BlockProductionError::FailedToBuildBlobSidecars)?,
let kzg = self
.kzg
.as_ref()
.ok_or(BlockProductionError::TrustedSetupNotInitialized)?;
kzg_utils::validate_blobs::<T::EthSpec>(
kzg,
expected_kzg_commitments,
blobs.iter().collect(),
&kzg_proofs,
)
.map_err(BlockProductionError::KzgError)?;
Some((kzg_proofs.into(), blobs))
}
_ => None,
None => None,
};
drop(blobs_verification_timer);
@ -5243,7 +5280,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(BeaconBlockResponse {
block,
state,
maybe_side_car: maybe_sidecar_list,
blob_items,
execution_payload_value: Some(execution_payload_value),
consensus_block_value: Some(consensus_block_value),
})

View File

@ -2,15 +2,15 @@ use derivative::Derivative;
use slot_clock::SlotClock;
use std::sync::Arc;
use crate::beacon_chain::{
BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT};
use crate::block_verification::{
cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info,
BlockSlashInfo,
};
use crate::block_verification::cheap_state_advance_to_obtain_committees;
use crate::data_availability_checker::AvailabilityCheckError;
use crate::kzg_utils::{validate_blob, validate_blobs};
use crate::{metrics, BeaconChainError};
use kzg::{Kzg, KzgCommitment};
use kzg::{Error as KzgError, Kzg, KzgCommitment};
use merkle_proof::MerkleTreeError;
use slog::{debug, warn};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
@ -18,7 +18,7 @@ use tree_hash::TreeHash;
use types::blob_sidecar::BlobIdentifier;
use types::{
BeaconStateError, BlobSidecar, BlobSidecarList, CloneConfig, EthSpec, Hash256,
SignedBlobSidecar, Slot,
SignedBeaconBlockHeader, Slot,
};
/// An error occurred while validating a gossip blob.
@ -75,7 +75,7 @@ pub enum GossipBlobError<T: EthSpec> {
/// ## Peer scoring
///
/// The blob is invalid and the peer is faulty.
ProposerSignatureInvalid,
ProposalSignatureInvalid,
/// The proposal_index corresponding to blob.beacon_block_root is not known.
///
@ -98,6 +98,12 @@ pub enum GossipBlobError<T: EthSpec> {
/// We cannot process the blob without validating its parent, the peer isn't necessarily faulty.
BlobParentUnknown(Arc<BlobSidecar<T>>),
/// Invalid kzg commitment inclusion proof
/// ## Peer scoring
///
/// The blob sidecar is invalid and the peer is faulty
InvalidInclusionProof,
/// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple
/// over gossip or no gossip sources.
///
@ -109,6 +115,42 @@ pub enum GossipBlobError<T: EthSpec> {
slot: Slot,
index: u64,
},
/// `Kzg` struct hasn't been initialized. This is an internal error.
///
/// ## Peer scoring
///
/// The peer isn't faulty, This is an internal error.
KzgNotInitialized,
/// The kzg verification failed.
///
/// ## Peer scoring
///
/// The blob sidecar is invalid and the peer is faulty.
KzgError(kzg::Error),
/// The kzg commitment inclusion proof failed.
///
/// ## Peer scoring
///
/// The blob sidecar is invalid
InclusionProof(MerkleTreeError),
/// The pubkey cache timed out.
///
/// ## Peer scoring
///
/// The blob sidecar may be valid, this is an internal error.
PubkeyCacheTimeout,
/// The block conflicts with finalization, no need to propagate.
///
/// ## Peer scoring
///
/// It's unclear if this block is valid, but it conflicts with finality and shouldn't be
/// imported.
NotFinalizedDescendant { block_parent_root: Hash256 },
}
impl<T: EthSpec> std::fmt::Display for GossipBlobError<T> {
@ -118,7 +160,7 @@ impl<T: EthSpec> std::fmt::Display for GossipBlobError<T> {
write!(
f,
"BlobParentUnknown(parent_root:{})",
blob_sidecar.block_parent_root
blob_sidecar.block_parent_root()
)
}
other => write!(f, "{:?}", other),
@ -147,63 +189,168 @@ pub type GossipVerifiedBlobList<T> = VariableList<
/// the p2p network.
#[derive(Debug)]
pub struct GossipVerifiedBlob<T: BeaconChainTypes> {
blob: SignedBlobSidecar<T::EthSpec>,
block_root: Hash256,
blob: KzgVerifiedBlob<T::EthSpec>,
}
impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
pub fn new(
blob: SignedBlobSidecar<T::EthSpec>,
blob: Arc<BlobSidecar<T::EthSpec>>,
subnet_id: u64,
chain: &BeaconChain<T>,
) -> Result<Self, GossipBlobError<T::EthSpec>> {
let blob_index = blob.message.index;
validate_blob_sidecar_for_gossip(blob, blob_index, chain)
let header = blob.signed_block_header.clone();
// We only process slashing info if the gossip verification failed
// since we do not process the blob any further in that case.
validate_blob_sidecar_for_gossip(blob, subnet_id, chain).map_err(|e| {
process_block_slash_info::<_, GossipBlobError<T::EthSpec>>(
chain,
BlockSlashInfo::from_early_error_blob(header, e),
)
})
}
/// Construct a `GossipVerifiedBlob` that is assumed to be valid.
///
/// This should ONLY be used for testing.
pub fn __assumed_valid(blob: SignedBlobSidecar<T::EthSpec>) -> Self {
Self { blob }
pub fn __assumed_valid(blob: Arc<BlobSidecar<T::EthSpec>>) -> Self {
Self {
block_root: blob.block_root(),
blob: KzgVerifiedBlob { blob },
}
}
pub fn id(&self) -> BlobIdentifier {
self.blob.message.id()
BlobIdentifier {
block_root: self.block_root,
index: self.blob.blob_index(),
}
}
pub fn block_root(&self) -> Hash256 {
self.blob.message.block_root
}
pub fn to_blob(self) -> Arc<BlobSidecar<T::EthSpec>> {
self.blob.message
}
pub fn as_blob(&self) -> &BlobSidecar<T::EthSpec> {
&self.blob.message
}
pub fn signed_blob(&self) -> SignedBlobSidecar<T::EthSpec> {
self.blob.clone()
self.block_root
}
pub fn slot(&self) -> Slot {
self.blob.message.slot
self.blob.blob.slot()
}
pub fn index(&self) -> u64 {
self.blob.message.index
self.blob.blob.index
}
pub fn kzg_commitment(&self) -> KzgCommitment {
self.blob.message.kzg_commitment
self.blob.blob.kzg_commitment
}
pub fn proposer_index(&self) -> u64 {
self.blob.message.proposer_index
pub fn signed_block_header(&self) -> SignedBeaconBlockHeader {
self.blob.blob.signed_block_header.clone()
}
pub fn block_proposer_index(&self) -> u64 {
self.blob.blob.block_proposer_index()
}
pub fn into_inner(self) -> KzgVerifiedBlob<T::EthSpec> {
self.blob
}
pub fn as_blob(&self) -> &BlobSidecar<T::EthSpec> {
self.blob.as_blob()
}
/// This is cheap as we're calling clone on an Arc
pub fn clone_blob(&self) -> Arc<BlobSidecar<T::EthSpec>> {
self.blob.clone_blob()
}
}
/// Wrapper over a `BlobSidecar` for which we have completed kzg verification.
/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`.
#[derive(Debug, Derivative, Clone, Encode, Decode)]
#[derivative(PartialEq, Eq)]
#[ssz(struct_behaviour = "transparent")]
pub struct KzgVerifiedBlob<T: EthSpec> {
blob: Arc<BlobSidecar<T>>,
}
impl<T: EthSpec> PartialOrd for KzgVerifiedBlob<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T: EthSpec> Ord for KzgVerifiedBlob<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.blob.cmp(&other.blob)
}
}
impl<T: EthSpec> KzgVerifiedBlob<T> {
pub fn to_blob(self) -> Arc<BlobSidecar<T>> {
self.blob
}
pub fn as_blob(&self) -> &BlobSidecar<T> {
&self.blob
}
/// This is cheap as we're calling clone on an Arc
pub fn clone_blob(&self) -> Arc<BlobSidecar<T>> {
self.blob.clone()
}
pub fn blob_index(&self) -> u64 {
self.blob.index
}
}
#[cfg(test)]
impl<T: EthSpec> KzgVerifiedBlob<T> {
pub fn new(blob: BlobSidecar<T>) -> Self {
Self {
blob: Arc::new(blob),
}
}
}
/// Complete kzg verification for a `BlobSidecar`.
///
/// Returns an error if the kzg verification check fails.
pub fn verify_kzg_for_blob<T: EthSpec>(
blob: Arc<BlobSidecar<T>>,
kzg: &Kzg,
) -> Result<KzgVerifiedBlob<T>, KzgError> {
validate_blob::<T>(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)?;
Ok(KzgVerifiedBlob { blob })
}
/// Complete kzg verification for a list of `BlobSidecar`s.
/// Returns an error if any of the `BlobSidecar`s fails kzg verification.
///
/// Note: This function should be preferred over calling `verify_kzg_for_blob`
/// in a loop since this function kzg verifies a list of blobs more efficiently.
pub fn verify_kzg_for_blob_list<T: EthSpec>(
blob_list: &BlobSidecarList<T>,
kzg: &Kzg,
) -> Result<(), KzgError> {
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list
.iter()
.map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof)))
.unzip();
validate_blobs::<T>(kzg, commitments.as_slice(), blobs, proofs.as_slice())
}
pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
blob_sidecar: Arc<BlobSidecar<T::EthSpec>>,
subnet: u64,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlob<T>, GossipBlobError<T::EthSpec>> {
let blob_slot = signed_blob_sidecar.message.slot;
let blob_index = signed_blob_sidecar.message.index;
let block_parent_root = signed_blob_sidecar.message.block_parent_root;
let blob_proposer_index = signed_blob_sidecar.message.proposer_index;
let block_root = signed_blob_sidecar.message.block_root;
let blob_slot = blob_sidecar.slot();
let blob_index = blob_sidecar.index;
let block_parent_root = blob_sidecar.block_parent_root();
let blob_proposer_index = blob_sidecar.block_proposer_index();
let block_root = blob_sidecar.block_root();
let blob_epoch = blob_slot.epoch(T::EthSpec::slots_per_epoch());
let signed_block_header = &blob_sidecar.signed_block_header;
// This condition is not possible if we have received the blob from the network
// since we only subscribe to `MaxBlobsPerBlock` subnets over gossip network.
// We include this check only for completeness.
// Getting this error would imply something very wrong with our networking decoding logic.
if blob_index >= T::EthSpec::max_blobs_per_block() as u64 {
return Err(GossipBlobError::InvalidSubnet {
expected: subnet,
received: blob_index,
});
}
// Verify that the blob_sidecar was received on the correct subnet.
if blob_index != subnet {
@ -213,8 +360,6 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
});
}
let blob_root = get_blob_root(&signed_blob_sidecar);
// Verify that the sidecar is not from a future slot.
let latest_permissible_slot = chain
.slot_clock
@ -240,11 +385,12 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
});
}
// Verify that this is the first blob sidecar received for the (sidecar.block_root, sidecar.index) tuple
// Verify that this is the first blob sidecar received for the tuple:
// (block_header.slot, block_header.proposer_index, blob_sidecar.index)
if chain
.observed_blob_sidecars
.read()
.is_known(&signed_blob_sidecar.message)
.is_known(&blob_sidecar)
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(GossipBlobError::RepeatBlob {
@ -254,18 +400,31 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
});
}
// Verify the inclusion proof in the sidecar
let _timer = metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION);
if !blob_sidecar
.verify_blob_sidecar_inclusion_proof()
.map_err(GossipBlobError::InclusionProof)?
{
return Err(GossipBlobError::InvalidInclusionProof);
}
drop(_timer);
let fork_choice = chain.canonical_head.fork_choice_read_lock();
// We have already verified that the blob is past finalization, so we can
// just check fork choice for the block's parent.
let Some(parent_block) = chain
.canonical_head
.fork_choice_read_lock()
.get_block(&block_parent_root)
else {
return Err(GossipBlobError::BlobParentUnknown(
signed_blob_sidecar.message,
));
let Some(parent_block) = fork_choice.get_block(&block_parent_root) else {
return Err(GossipBlobError::BlobParentUnknown(blob_sidecar));
};
// Do not process a blob that does not descend from the finalized root.
// We just loaded the parent_block, so we can be sure that it exists in fork choice.
if !fork_choice.is_finalized_checkpoint_or_descendant(block_parent_root) {
return Err(GossipBlobError::NotFinalizedDescendant { block_parent_root });
}
drop(fork_choice);
if parent_block.slot >= blob_slot {
return Err(GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
@ -273,8 +432,6 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
});
}
// Note: We check that the proposer_index matches against the shuffling first to avoid
// signature verification against an invalid proposer_index.
let proposer_shuffling_root =
if parent_block.slot.epoch(T::EthSpec::slots_per_epoch()) == blob_epoch {
parent_block
@ -374,38 +531,26 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.get(blob_slot.as_usize() % T::EthSpec::slots_per_epoch() as usize)
.ok_or_else(|| BeaconChainError::NoProposerForSlot(blob_slot))?;
let fork = state.fork();
// Prime the proposer shuffling cache with the newly-learned value.
chain.beacon_proposer_cache.lock().insert(
blob_epoch,
proposer_shuffling_root,
proposers,
state.fork(),
fork,
)?;
(proposer_index, state.fork())
(proposer_index, fork)
}
};
if proposer_index != blob_proposer_index as usize {
return Err(GossipBlobError::ProposerIndexMismatch {
sidecar: blob_proposer_index as usize,
local: proposer_index,
});
}
// Signature verification
// Signature verify the signed block header.
let signature_is_valid = {
let pubkey_cache = chain
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.map_err(GossipBlobError::BeaconChainError)?;
let pubkey_cache =
get_validator_pubkey_cache(chain).map_err(|_| GossipBlobError::PubkeyCacheTimeout)?;
let pubkey = pubkey_cache
.get(proposer_index)
.ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?;
signed_blob_sidecar.verify_signature(
Some(blob_root),
signed_block_header.verify_signature::<T::EthSpec>(
pubkey,
&fork,
chain.genesis_validators_root,
@ -414,7 +559,14 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
};
if !signature_is_valid {
return Err(GossipBlobError::ProposerSignatureInvalid);
return Err(GossipBlobError::ProposalSignatureInvalid);
}
if proposer_index != blob_proposer_index as usize {
return Err(GossipBlobError::ProposerIndexMismatch {
sidecar: blob_proposer_index as usize,
local: proposer_index,
});
}
// Now the signature is valid, store the proposal so we don't accept another blob sidecar
@ -431,7 +583,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
if chain
.observed_blob_sidecars
.write()
.observe_sidecar(&signed_blob_sidecar.message)
.observe_sidecar(&blob_sidecar)
.map_err(|e| GossipBlobError::BeaconChainError(e.into()))?
{
return Err(GossipBlobError::RepeatBlob {
@ -441,106 +593,27 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
});
}
// Kzg verification for gossip blob sidecar
let kzg = chain
.kzg
.as_ref()
.ok_or(GossipBlobError::KzgNotInitialized)?;
let kzg_verified_blob =
verify_kzg_for_blob(blob_sidecar, kzg).map_err(GossipBlobError::KzgError)?;
Ok(GossipVerifiedBlob {
blob: signed_blob_sidecar,
block_root,
blob: kzg_verified_blob,
})
}
/// Wrapper over a `BlobSidecar` for which we have completed kzg verification.
/// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`.
#[derive(Debug, Derivative, Clone, Encode, Decode)]
#[derivative(PartialEq, Eq)]
#[ssz(struct_behaviour = "transparent")]
pub struct KzgVerifiedBlob<T: EthSpec> {
blob: Arc<BlobSidecar<T>>,
}
impl<T: EthSpec> PartialOrd for KzgVerifiedBlob<T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T: EthSpec> Ord for KzgVerifiedBlob<T> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.blob.cmp(&other.blob)
}
}
impl<T: EthSpec> KzgVerifiedBlob<T> {
pub fn to_blob(self) -> Arc<BlobSidecar<T>> {
self.blob
}
pub fn as_blob(&self) -> &BlobSidecar<T> {
&self.blob
}
pub fn clone_blob(&self) -> Arc<BlobSidecar<T>> {
self.blob.clone()
}
pub fn block_root(&self) -> Hash256 {
self.blob.block_root
}
pub fn blob_index(&self) -> u64 {
self.blob.index
}
}
#[cfg(test)]
impl<T: EthSpec> KzgVerifiedBlob<T> {
pub fn new(blob: BlobSidecar<T>) -> Self {
Self {
blob: Arc::new(blob),
}
}
}
/// Complete kzg verification for a `GossipVerifiedBlob`.
///
/// Returns an error if the kzg verification check fails.
pub fn verify_kzg_for_blob<T: EthSpec>(
blob: Arc<BlobSidecar<T>>,
kzg: &Kzg,
) -> Result<KzgVerifiedBlob<T>, AvailabilityCheckError> {
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES);
if validate_blob::<T>(kzg, &blob.blob, blob.kzg_commitment, blob.kzg_proof)
.map_err(AvailabilityCheckError::Kzg)?
{
Ok(KzgVerifiedBlob { blob })
} else {
Err(AvailabilityCheckError::KzgVerificationFailed)
}
}
/// Complete kzg verification for a list of `BlobSidecar`s.
/// Returns an error if any of the `BlobSidecar`s fails kzg verification.
///
/// Note: This function should be preferred over calling `verify_kzg_for_blob`
/// in a loop since this function kzg verifies a list of blobs more efficiently.
pub fn verify_kzg_for_blob_list<T: EthSpec>(
blob_list: &BlobSidecarList<T>,
kzg: &Kzg,
) -> Result<(), AvailabilityCheckError> {
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES);
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list
.iter()
.map(|blob| (&blob.blob, (blob.kzg_commitment, blob.kzg_proof)))
.unzip();
if validate_blobs::<T>(kzg, commitments.as_slice(), blobs, proofs.as_slice())
.map_err(AvailabilityCheckError::Kzg)?
{
Ok(())
} else {
Err(AvailabilityCheckError::KzgVerificationFailed)
}
}
/// Returns the canonical root of the given `blob`.
///
/// Use this function to ensure that we report the blob hashing time Prometheus metric.
pub fn get_blob_root<E: EthSpec>(blob: &SignedBlobSidecar<E>) -> Hash256 {
pub fn get_blob_root<E: EthSpec>(blob: &BlobSidecar<E>) -> Hash256 {
let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT);
let blob_root = blob.message.tree_hash_root();
let blob_root = blob.tree_hash_root();
metrics::stop_timer(blob_root_timer);

View File

@ -23,7 +23,7 @@ pub struct Timestamps {
}
// Helps arrange delay data so it is more relevant to metrics.
#[derive(Default)]
#[derive(Debug, Default)]
pub struct BlockDelays {
pub observed: Option<Duration>,
pub imported: Option<Duration>,
@ -51,7 +51,7 @@ impl BlockDelays {
// If the block was received via gossip, we can record the client type of the peer which sent us
// the block.
#[derive(Clone, Default)]
#[derive(Debug, Clone, Default, PartialEq)]
pub struct BlockPeerInfo {
pub id: Option<String>,
pub client: Option<String>,
@ -80,6 +80,8 @@ pub struct BlockTimesCache {
/// Helper methods to read from and write to the cache.
impl BlockTimesCache {
/// Set the observation time for `block_root` to `timestamp` if `timestamp` is less than
/// any previous timestamp at which this block was observed.
pub fn set_time_observed(
&mut self,
block_root: BlockRoot,
@ -92,11 +94,19 @@ impl BlockTimesCache {
.cache
.entry(block_root)
.or_insert_with(|| BlockTimesCacheValue::new(slot));
block_times.timestamps.observed = Some(timestamp);
block_times.peer_info = BlockPeerInfo {
id: peer_id,
client: peer_client,
};
match block_times.timestamps.observed {
Some(existing_observation_time) if existing_observation_time <= timestamp => {
// Existing timestamp is earlier, do nothing.
}
_ => {
// No existing timestamp, or new timestamp is earlier.
block_times.timestamps.observed = Some(timestamp);
block_times.peer_info = BlockPeerInfo {
id: peer_id,
client: peer_client,
};
}
}
}
pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) {
@ -141,3 +151,71 @@ impl BlockTimesCache {
.retain(|_, cache| cache.slot > current_slot.saturating_sub(64_u64));
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn observed_time_uses_minimum() {
let mut cache = BlockTimesCache::default();
let block_root = Hash256::zero();
let slot = Slot::new(100);
let slot_start_time = Duration::from_secs(0);
let ts1 = Duration::from_secs(5);
let ts2 = Duration::from_secs(6);
let ts3 = Duration::from_secs(4);
let peer_info2 = BlockPeerInfo {
id: Some("peer2".to_string()),
client: Some("lighthouse".to_string()),
};
let peer_info3 = BlockPeerInfo {
id: Some("peer3".to_string()),
client: Some("prysm".to_string()),
};
cache.set_time_observed(block_root, slot, ts1, None, None);
assert_eq!(
cache.get_block_delays(block_root, slot_start_time).observed,
Some(ts1)
);
assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default());
// Second observation with higher timestamp should not override anything, even though it has
// superior peer info.
cache.set_time_observed(
block_root,
slot,
ts2,
peer_info2.id.clone(),
peer_info2.client.clone(),
);
assert_eq!(
cache.get_block_delays(block_root, slot_start_time).observed,
Some(ts1)
);
assert_eq!(cache.get_peer_info(block_root), BlockPeerInfo::default());
// Third observation with lower timestamp should override everything.
cache.set_time_observed(
block_root,
slot,
ts3,
peer_info3.id.clone(),
peer_info3.client.clone(),
);
assert_eq!(
cache.get_block_delays(block_root, slot_start_time).observed,
Some(ts3)
);
assert_eq!(cache.get_peer_info(block_root), peer_info3);
}
}

View File

@ -70,7 +70,7 @@ use crate::{
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use derivative::Derivative;
use eth2::types::{EventKind, SignedBlockContents};
use eth2::types::{EventKind, PublishBlockRequest};
use execution_layer::PayloadStatus;
pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
use parking_lot::RwLockReadGuard;
@ -95,15 +95,15 @@ use std::fs;
use std::io::Write;
use std::sync::Arc;
use std::time::Duration;
use store::{Error as DBError, HotStateSummary, KeyValueStore, SignedBlobSidecarList, StoreOp};
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
use task_executor::JoinHandle;
use tree_hash::TreeHash;
use types::ExecPayload;
use types::{
BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec,
ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch,
SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
};
use types::{BlobSidecar, ExecPayload};
pub const POS_PANDA_BANNER: &str = r#"
,,, ,,, ,,, ,,,
@ -507,7 +507,7 @@ pub enum BlockSlashInfo<TErr> {
}
impl<E: EthSpec> BlockSlashInfo<BlockError<E>> {
pub fn from_early_error(header: SignedBeaconBlockHeader, e: BlockError<E>) -> Self {
pub fn from_early_error_block(header: SignedBeaconBlockHeader, e: BlockError<E>) -> Self {
match e {
BlockError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e),
// `InvalidSignature` could indicate any signature in the block, so we want
@ -517,17 +517,28 @@ impl<E: EthSpec> BlockSlashInfo<BlockError<E>> {
}
}
impl<E: EthSpec> BlockSlashInfo<GossipBlobError<E>> {
pub fn from_early_error_blob(header: SignedBeaconBlockHeader, e: GossipBlobError<E>) -> Self {
match e {
GossipBlobError::ProposalSignatureInvalid => BlockSlashInfo::SignatureInvalid(e),
// `InvalidSignature` could indicate any signature in the block, so we want
// to recheck the proposer signature alone.
_ => BlockSlashInfo::SignatureNotChecked(header, e),
}
}
}
/// Process invalid blocks to see if they are suitable for the slasher.
///
/// If no slasher is configured, this is a no-op.
fn process_block_slash_info<T: BeaconChainTypes>(
pub(crate) fn process_block_slash_info<T: BeaconChainTypes, TErr: BlockBlobError>(
chain: &BeaconChain<T>,
slash_info: BlockSlashInfo<BlockError<T::EthSpec>>,
) -> BlockError<T::EthSpec> {
slash_info: BlockSlashInfo<TErr>,
) -> TErr {
if let Some(slasher) = chain.slasher.as_ref() {
let (verified_header, error) = match slash_info {
BlockSlashInfo::SignatureNotChecked(header, e) => {
if verify_header_signature(chain, &header).is_ok() {
if verify_header_signature::<_, TErr>(chain, &header).is_ok() {
(header, e)
} else {
return e;
@ -673,7 +684,6 @@ pub trait IntoGossipVerifiedBlockContents<T: BeaconChainTypes>: Sized {
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>>;
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec>;
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>>;
}
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for GossipVerifiedBlockContents<T> {
@ -686,45 +696,40 @@ impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for GossipVerifiedB
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.0.block.as_block()
}
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>> {
self.1.as_ref().map(|blobs| {
VariableList::from(
blobs
.into_iter()
.map(GossipVerifiedBlob::signed_blob)
.collect::<Vec<_>>(),
)
})
}
}
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for SignedBlockContents<T::EthSpec> {
impl<T: BeaconChainTypes> IntoGossipVerifiedBlockContents<T> for PublishBlockRequest<T::EthSpec> {
fn into_gossip_verified_block(
self,
chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlockContents<T>, BlockContentsError<T::EthSpec>> {
let (block, blobs) = self.deconstruct();
let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?;
let gossip_verified_blobs = blobs
.map(|blobs| {
Ok::<_, GossipBlobError<T::EthSpec>>(VariableList::from(
blobs
.into_iter()
.map(|blob| GossipVerifiedBlob::new(blob, chain))
.collect::<Result<Vec<_>, GossipBlobError<T::EthSpec>>>()?,
))
.map(|(kzg_proofs, blobs)| {
let mut gossip_verified_blobs = vec![];
for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() {
let _timer =
metrics::start_timer(&metrics::BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION);
let blob = BlobSidecar::new(i, blob, &block, *kzg_proof)
.map_err(BlockContentsError::SidecarError)?;
drop(_timer);
let gossip_verified_blob =
GossipVerifiedBlob::new(Arc::new(blob), i as u64, chain)?;
gossip_verified_blobs.push(gossip_verified_blob);
}
let gossip_verified_blobs = VariableList::from(gossip_verified_blobs);
Ok::<_, BlockContentsError<T::EthSpec>>(gossip_verified_blobs)
})
.transpose()?;
let gossip_verified_block = GossipVerifiedBlock::new(Arc::new(block), chain)?;
Ok((gossip_verified_block, gossip_verified_blobs))
}
fn inner_block(&self) -> &SignedBeaconBlock<T::EthSpec> {
self.signed_block()
}
fn inner_blobs(&self) -> Option<SignedBlobSidecarList<T::EthSpec>> {
self.blobs_cloned()
}
}
/// Implemented on types that can be converted into a `ExecutionPendingBlock`.
@ -745,7 +750,9 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
}
execution_pending
})
.map_err(|slash_info| process_block_slash_info(chain, slash_info))
.map_err(|slash_info| {
process_block_slash_info::<_, BlockError<T::EthSpec>>(chain, slash_info)
})
}
/// Convert the block to fully-verified form while producing data to aid checking slashability.
@ -774,7 +781,10 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// and it could be a repeat proposal (a likely cause for slashing!).
let header = block.signed_block_header();
Self::new_without_slasher_checks(block, chain).map_err(|e| {
process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e))
process_block_slash_info::<_, BlockError<T::EthSpec>>(
chain,
BlockSlashInfo::from_early_error_block(header, e),
)
})
}
@ -1055,7 +1065,8 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
chain: &BeaconChain<T>,
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = block.signed_block_header();
Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e))
Self::new(block, block_root, chain)
.map_err(|e| BlockSlashInfo::from_early_error_block(header, e))
}
/// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify
@ -1109,7 +1120,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
let header = from.block.signed_block_header();
Self::from_gossip_verified_block(from, chain)
.map_err(|e| BlockSlashInfo::from_early_error(header, e))
.map_err(|e| BlockSlashInfo::from_early_error_block(header, e))
}
pub fn block_root(&self) -> Hash256 {
@ -1908,28 +1919,45 @@ fn load_parent<T: BeaconChainTypes, B: AsBlock<T::EthSpec>>(
result
}
/// This trait is used to unify `BlockError` and `BlobError` so
/// `cheap_state_advance_to_obtain_committees` can be re-used in gossip blob validation.
pub trait CheapStateAdvanceError: From<BeaconStateError> + From<BeaconChainError> + Debug {
/// This trait is used to unify `BlockError` and `GossipBlobError`.
pub trait BlockBlobError: From<BeaconStateError> + From<BeaconChainError> + Debug {
fn not_later_than_parent_error(block_slot: Slot, state_slot: Slot) -> Self;
fn unknown_validator_error(validator_index: u64) -> Self;
fn proposer_signature_invalid() -> Self;
}
impl<E: EthSpec> CheapStateAdvanceError for BlockError<E> {
impl<E: EthSpec> BlockBlobError for BlockError<E> {
fn not_later_than_parent_error(block_slot: Slot, parent_slot: Slot) -> Self {
BlockError::BlockIsNotLaterThanParent {
block_slot,
parent_slot,
}
}
fn unknown_validator_error(validator_index: u64) -> Self {
BlockError::UnknownValidator(validator_index)
}
fn proposer_signature_invalid() -> Self {
BlockError::ProposalSignatureInvalid
}
}
impl<E: EthSpec> CheapStateAdvanceError for GossipBlobError<E> {
impl<E: EthSpec> BlockBlobError for GossipBlobError<E> {
fn not_later_than_parent_error(blob_slot: Slot, parent_slot: Slot) -> Self {
GossipBlobError::BlobIsNotLaterThanParent {
blob_slot,
parent_slot,
}
}
fn unknown_validator_error(validator_index: u64) -> Self {
GossipBlobError::UnknownValidator(validator_index)
}
fn proposer_signature_invalid() -> Self {
GossipBlobError::ProposalSignatureInvalid
}
}
/// Performs a cheap (time-efficient) state advancement so the committees and proposer shuffling for
@ -1943,7 +1971,7 @@ impl<E: EthSpec> CheapStateAdvanceError for GossipBlobError<E> {
/// and `Cow::Borrowed(state)` will be returned. Otherwise, the state will be cloned, cheaply
/// advanced and then returned as a `Cow::Owned`. The end result is that the given `state` is never
/// mutated to be invalid (in fact, it is never changed beyond a simple committee cache build).
pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateAdvanceError>(
pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: BlockBlobError>(
state: &'a mut BeaconState<E>,
state_root_opt: Option<Hash256>,
block_slot: Slot,
@ -1979,12 +2007,11 @@ pub fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec, Err: CheapStateA
/// Obtains a read-locked `ValidatorPubkeyCache` from the `chain`.
pub fn get_validator_pubkey_cache<T: BeaconChainTypes>(
chain: &BeaconChain<T>,
) -> Result<RwLockReadGuard<ValidatorPubkeyCache<T>>, BlockError<T::EthSpec>> {
) -> Result<RwLockReadGuard<ValidatorPubkeyCache<T>>, BeaconChainError> {
chain
.validator_pubkey_cache
.try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
.ok_or(BeaconChainError::ValidatorPubkeyCacheLockTimeout)
.map_err(BlockError::BeaconChainError)
}
/// Produces an _empty_ `BlockSignatureVerifier`.
@ -2025,14 +2052,14 @@ fn get_signature_verifier<'a, T: BeaconChainTypes>(
/// Verify that `header` was signed with a valid signature from its proposer.
///
/// Return `Ok(())` if the signature is valid, and an `Err` otherwise.
fn verify_header_signature<T: BeaconChainTypes>(
fn verify_header_signature<T: BeaconChainTypes, Err: BlockBlobError>(
chain: &BeaconChain<T>,
header: &SignedBeaconBlockHeader,
) -> Result<(), BlockError<T::EthSpec>> {
) -> Result<(), Err> {
let proposer_pubkey = get_validator_pubkey_cache(chain)?
.get(header.message.proposer_index as usize)
.cloned()
.ok_or(BlockError::UnknownValidator(header.message.proposer_index))?;
.ok_or(Err::unknown_validator_error(header.message.proposer_index))?;
let head_fork = chain.canonical_head.cached_head().head_fork();
if header.verify_signature::<T::EthSpec>(
@ -2043,7 +2070,7 @@ fn verify_header_signature<T: BeaconChainTypes>(
) {
Ok(())
} else {
Err(BlockError::ProposalSignatureInvalid)
Err(Err::proposer_signature_invalid())
}
}

View File

@ -8,7 +8,7 @@ use derivative::Derivative;
use ssz_types::VariableList;
use state_processing::ConsensusContext;
use std::sync::Arc;
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
use types::blob_sidecar::{BlobIdentifier, BlobSidecarError, FixedBlobSidecarList};
use types::{
BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, Epoch, EthSpec, Hash256,
SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
@ -98,13 +98,6 @@ impl<E: EthSpec> RpcBlock<E> {
return Err(AvailabilityCheckError::MissingBlobs);
}
for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) {
let blob_block_root = blob.block_root;
if blob_block_root != block_root {
return Err(AvailabilityCheckError::InconsistentBlobBlockRoots {
block_root,
blob_block_root,
});
}
let blob_commitment = blob.kzg_commitment;
if blob_commitment != block_commitment {
return Err(AvailabilityCheckError::KzgCommitmentMismatch {
@ -309,6 +302,7 @@ pub type GossipVerifiedBlockContents<T> =
pub enum BlockContentsError<T: EthSpec> {
BlockError(BlockError<T>),
BlobError(GossipBlobError<T>),
SidecarError(BlobSidecarError),
}
impl<T: EthSpec> From<BlockError<T>> for BlockContentsError<T> {
@ -332,6 +326,9 @@ impl<T: EthSpec> std::fmt::Display for BlockContentsError<T> {
BlockContentsError::BlobError(err) => {
write!(f, "BlobError({})", err)
}
BlockContentsError::SidecarError(err) => {
write!(f, "SidecarError({:?})", err)
}
}
}
}

View File

@ -200,7 +200,9 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
let mut verified_blobs = vec![];
if let Some(kzg) = self.kzg.as_ref() {
for blob in blobs.iter().flatten() {
verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?)
verified_blobs.push(
verify_kzg_for_blob(blob.clone(), kzg).map_err(AvailabilityCheckError::Kzg)?,
);
}
} else {
return Err(AvailabilityCheckError::KzgNotInitialized);
@ -209,7 +211,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
.put_kzg_verified_blobs(block_root, verified_blobs)
}
/// This first validates the KZG commitments included in the blob sidecar.
/// Check if we've cached other blobs for this block. If it completes a set and we also
/// have a block cached, return the `Availability` variant triggering block import.
/// Otherwise cache the blob sidecar.
@ -219,15 +220,8 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
&self,
gossip_blob: GossipVerifiedBlob<T>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
// Verify the KZG commitments.
let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() {
verify_kzg_for_blob(gossip_blob.to_blob(), kzg)?
} else {
return Err(AvailabilityCheckError::KzgNotInitialized);
};
self.availability_cache
.put_kzg_verified_blobs(kzg_verified_blob.block_root(), vec![kzg_verified_blob])
.put_kzg_verified_blobs(gossip_blob.block_root(), vec![gossip_blob.into_inner()])
}
/// Check if we have all the blobs for a block. Returns `Availability` which has information
@ -268,7 +262,8 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
.kzg
.as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
verify_kzg_for_blob_list(&blob_list, kzg)?;
verify_kzg_for_blob_list(&blob_list, kzg)
.map_err(AvailabilityCheckError::Kzg)?;
Some(blob_list)
} else {
None
@ -375,8 +370,8 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
block_root: Hash256,
blob: &GossipVerifiedBlob<T>,
) {
let index = blob.as_blob().index;
let commitment = blob.as_blob().kzg_commitment;
let index = blob.index();
let commitment = blob.kzg_commitment();
self.processing_cache
.write()
.entry(block_root)

View File

@ -16,10 +16,6 @@ pub enum Error {
BlobIndexInvalid(u64),
StoreError(store::Error),
DecodeError(ssz::DecodeError),
InconsistentBlobBlockRoots {
block_root: Hash256,
blob_block_root: Hash256,
},
ParentStateMissing(Hash256),
BlockReplayError(state_processing::BlockReplayError),
RebuildingStateCaches(BeaconStateError),
@ -47,8 +43,7 @@ impl Error {
Error::Kzg(_)
| Error::BlobIndexInvalid(_)
| Error::KzgCommitmentMismatch { .. }
| Error::KzgVerificationFailed
| Error::InconsistentBlobBlockRoots { .. } => ErrorCategory::Malicious,
| Error::KzgVerificationFailed => ErrorCategory::Malicious,
}
}
}
@ -76,3 +71,9 @@ impl From<state_processing::BlockReplayError> for Error {
Self::BlockReplayError(value)
}
}
impl From<KzgError> for Error {
fn from(value: KzgError) -> Self {
Self::Kzg(value)
}
}

View File

@ -125,7 +125,10 @@ impl<T: EthSpec> PendingComponents<T> {
for maybe_blob in self.verified_blobs.iter() {
if maybe_blob.is_some() {
return maybe_blob.as_ref().map(|kzg_verified_blob| {
kzg_verified_blob.as_blob().slot.epoch(T::slots_per_epoch())
kzg_verified_blob
.as_blob()
.slot()
.epoch(T::slots_per_epoch())
});
}
}
@ -418,15 +421,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
let mut fixed_blobs = FixedVector::default();
// Initial check to ensure all provided blobs have a consistent block root.
for blob in kzg_verified_blobs {
let blob_block_root = blob.block_root();
if blob_block_root != block_root {
return Err(AvailabilityCheckError::InconsistentBlobBlockRoots {
block_root,
blob_block_root,
});
}
if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) {
*blob_opt = Some(blob);
}
@ -651,7 +646,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
OverflowKey::Blob(_, _) => {
KzgVerifiedBlob::<T::EthSpec>::from_ssz_bytes(value_bytes.as_slice())?
.as_blob()
.slot
.slot()
.epoch(T::EthSpec::slots_per_epoch())
}
};
@ -743,9 +738,7 @@ impl ssz::Decode for OverflowKey {
mod test {
use super::*;
use crate::{
blob_verification::{
validate_blob_sidecar_for_gossip, verify_kzg_for_blob, GossipVerifiedBlob,
},
blob_verification::{validate_blob_sidecar_for_gossip, GossipVerifiedBlob},
block_verification::PayloadVerificationOutcome,
block_verification_types::{AsBlock, BlockImportData},
data_availability_checker::STATE_LRU_CAPACITY,
@ -926,12 +919,13 @@ mod test {
}
info!(log, "done printing kzg commitments");
let gossip_verified_blobs = if let Some(blobs) = maybe_blobs {
Vec::from(blobs)
let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs {
let sidecars = BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap();
Vec::from(sidecars)
.into_iter()
.map(|signed_blob| {
let subnet = signed_blob.message.index;
validate_blob_sidecar_for_gossip(signed_blob, subnet, &harness.chain)
.map(|sidecar| {
let subnet = sidecar.index;
validate_blob_sidecar_for_gossip(sidecar, subnet, &harness.chain)
.expect("should validate blob")
})
.collect()
@ -1036,17 +1030,9 @@ mod test {
);
}
let kzg = harness
.chain
.kzg
.as_ref()
.cloned()
.expect("kzg should exist");
let mut kzg_verified_blobs = Vec::new();
for (blob_index, gossip_blob) in blobs.into_iter().enumerate() {
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
kzg_verified_blobs.push(gossip_blob.into_inner());
let availability = cache
.put_kzg_verified_blobs(root, kzg_verified_blobs.clone())
.expect("should put blob");
@ -1072,9 +1058,7 @@ mod test {
let root = pending_block.import_data.block_root;
let mut kzg_verified_blobs = vec![];
for gossip_blob in blobs {
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
kzg_verified_blobs.push(gossip_blob.into_inner());
let availability = cache
.put_kzg_verified_blobs(root, kzg_verified_blobs.clone())
.expect("should put blob");
@ -1198,20 +1182,11 @@ mod test {
assert!(cache.critical.read().store_keys.contains(&roots[0]));
assert!(cache.critical.read().store_keys.contains(&roots[1]));
let kzg = harness
.chain
.kzg
.as_ref()
.cloned()
.expect("kzg should exist");
let blobs_0 = pending_blobs.pop_front().expect("should have blobs");
let expected_blobs = blobs_0.len();
let mut kzg_verified_blobs = vec![];
for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() {
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
kzg_verified_blobs.push(gossip_blob.into_inner());
let availability = cache
.put_kzg_verified_blobs(roots[0], kzg_verified_blobs.clone())
.expect("should put blob");
@ -1278,13 +1253,6 @@ mod test {
pending_blobs.push_back(blobs);
}
let kzg = harness
.chain
.kzg
.as_ref()
.cloned()
.expect("kzg should exist");
for _ in 0..(n_epochs * capacity) {
let pending_block = pending_blocks.pop_front().expect("should have block");
let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs");
@ -1295,9 +1263,7 @@ mod test {
let one_blob = pending_block_blobs
.pop()
.expect("should have at least one blob");
let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
let kzg_verified_blobs = vec![kzg_verified_blob];
let kzg_verified_blobs = vec![one_blob.into_inner()];
// generate random boolean
let block_first = (rand::random::<usize>() % 2) == 0;
if block_first {
@ -1418,13 +1384,6 @@ mod test {
pending_blobs.push_back(blobs);
}
let kzg = harness
.chain
.kzg
.as_ref()
.cloned()
.expect("kzg should exist");
let mut remaining_blobs = HashMap::new();
for _ in 0..(n_epochs * capacity) {
let pending_block = pending_blocks.pop_front().expect("should have block");
@ -1436,9 +1395,7 @@ mod test {
let one_blob = pending_block_blobs
.pop()
.expect("should have at least one blob");
let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
let kzg_verified_blobs = vec![kzg_verified_blob];
let kzg_verified_blobs = vec![one_blob.into_inner()];
// generate random boolean
let block_first = (rand::random::<usize>() % 2) == 0;
if block_first {
@ -1551,9 +1508,7 @@ mod test {
let additional_blobs = blobs.len();
let mut kzg_verified_blobs = vec![];
for (i, gossip_blob) in blobs.into_iter().enumerate() {
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
kzg_verified_blobs.push(gossip_blob.into_inner());
let availability = recovered_cache
.put_kzg_verified_blobs(root, kzg_verified_blobs.clone())
.expect("should put blob");

View File

@ -4,7 +4,7 @@ use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof};
/// Converts a blob ssz List object to an array to be used with the kzg
/// crypto library.
fn ssz_blob_to_crypto_blob<T: EthSpec>(blob: &Blob<T>) -> Result<KzgBlob, KzgError> {
KzgBlob::from_bytes(blob.as_ref())
KzgBlob::from_bytes(blob.as_ref()).map_err(Into::into)
}
/// Validate a single blob-commitment-proof triplet from a `BlobSidecar`.
@ -13,7 +13,8 @@ pub fn validate_blob<T: EthSpec>(
blob: &Blob<T>,
kzg_commitment: KzgCommitment,
kzg_proof: KzgProof,
) -> Result<bool, KzgError> {
) -> Result<(), KzgError> {
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_SINGLE_TIMES);
let kzg_blob = ssz_blob_to_crypto_blob::<T>(blob)?;
kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof)
}
@ -24,7 +25,8 @@ pub fn validate_blobs<T: EthSpec>(
expected_kzg_commitments: &[KzgCommitment],
blobs: Vec<&Blob<T>>,
kzg_proofs: &[KzgProof],
) -> Result<bool, KzgError> {
) -> Result<(), KzgError> {
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES);
let blobs = blobs
.into_iter()
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob))

View File

@ -57,7 +57,7 @@ pub mod validator_pubkey_cache;
pub use self::beacon_chain::{
AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse,
BeaconBlockResponseType, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig,
WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,

View File

@ -1004,6 +1004,14 @@ lazy_static! {
"beacon_blobs_sidecar_gossip_verification_seconds",
"Full runtime of blob sidecars gossip verification"
);
pub static ref BLOB_SIDECAR_INCLUSION_PROOF_VERIFICATION: Result<Histogram> = try_create_histogram(
"blob_sidecar_inclusion_proof_verification_seconds",
"Time taken to verify blob sidecar inclusion proof"
);
pub static ref BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: Result<Histogram> = try_create_histogram(
"blob_sidecar_inclusion_proof_computation_seconds",
"Time taken to compute blob sidecar inclusion proof"
);
}
// Fifth lazy-static block is used to account for macro recursion limit.

View File

@ -5,8 +5,7 @@
use std::collections::{HashMap, HashSet};
use std::marker::PhantomData;
use std::sync::Arc;
use types::{BlobSidecar, EthSpec, Hash256, Slot};
use types::{BlobSidecar, EthSpec, Slot};
#[derive(Debug, PartialEq)]
pub enum Error {
@ -29,8 +28,8 @@ pub enum Error {
/// like checking the proposer signature.
pub struct ObservedBlobSidecars<T: EthSpec> {
finalized_slot: Slot,
/// Stores all received blob indices for a given `(Root, Slot)` tuple.
items: HashMap<(Hash256, Slot), HashSet<u64>>,
/// Stores all received blob indices for a given `(ValidatorIndex, Slot)` tuple.
items: HashMap<(u64, Slot), HashSet<u64>>,
_phantom: PhantomData<T>,
}
@ -46,16 +45,16 @@ impl<E: EthSpec> Default for ObservedBlobSidecars<E> {
}
impl<T: EthSpec> ObservedBlobSidecars<T> {
/// Observe the `blob_sidecar` at (`blob_sidecar.block_root, blob_sidecar.slot`).
/// Observe the `blob_sidecar` at (`blob_sidecar.block_proposer_index, blob_sidecar.slot`).
/// This will update `self` so future calls to it indicate that this `blob_sidecar` is known.
///
/// The supplied `blob_sidecar` **MUST** have completed proposer signature verification.
pub fn observe_sidecar(&mut self, blob_sidecar: &Arc<BlobSidecar<T>>) -> Result<bool, Error> {
pub fn observe_sidecar(&mut self, blob_sidecar: &BlobSidecar<T>) -> Result<bool, Error> {
self.sanitize_blob_sidecar(blob_sidecar)?;
let did_not_exist = self
.items
.entry((blob_sidecar.block_root, blob_sidecar.slot))
.entry((blob_sidecar.block_proposer_index(), blob_sidecar.slot()))
.or_insert_with(|| HashSet::with_capacity(T::max_blobs_per_block()))
.insert(blob_sidecar.index);
@ -63,23 +62,23 @@ impl<T: EthSpec> ObservedBlobSidecars<T> {
}
/// Returns `true` if the `blob_sidecar` has already been observed in the cache within the prune window.
pub fn is_known(&self, blob_sidecar: &Arc<BlobSidecar<T>>) -> Result<bool, Error> {
pub fn is_known(&self, blob_sidecar: &BlobSidecar<T>) -> Result<bool, Error> {
self.sanitize_blob_sidecar(blob_sidecar)?;
let is_known = self
.items
.get(&(blob_sidecar.block_root, blob_sidecar.slot))
.get(&(blob_sidecar.block_proposer_index(), blob_sidecar.slot()))
.map_or(false, |set| set.contains(&blob_sidecar.index));
Ok(is_known)
}
fn sanitize_blob_sidecar(&self, blob_sidecar: &Arc<BlobSidecar<T>>) -> Result<(), Error> {
fn sanitize_blob_sidecar(&self, blob_sidecar: &BlobSidecar<T>) -> Result<(), Error> {
if blob_sidecar.index >= T::max_blobs_per_block() as u64 {
return Err(Error::InvalidBlobIndex(blob_sidecar.index));
}
let finalized_slot = self.finalized_slot;
if finalized_slot > 0 && blob_sidecar.slot <= finalized_slot {
if finalized_slot > 0 && blob_sidecar.slot() <= finalized_slot {
return Err(Error::FinalizedBlob {
slot: blob_sidecar.slot,
slot: blob_sidecar.slot(),
finalized_slot,
});
}
@ -101,14 +100,15 @@ impl<T: EthSpec> ObservedBlobSidecars<T> {
#[cfg(test)]
mod tests {
use super::*;
use types::{BlobSidecar, Hash256, MainnetEthSpec};
use std::sync::Arc;
use types::{BlobSidecar, MainnetEthSpec};
type E = MainnetEthSpec;
fn get_blob_sidecar(slot: u64, block_root: Hash256, index: u64) -> Arc<BlobSidecar<E>> {
fn get_blob_sidecar(slot: u64, proposer_index: u64, index: u64) -> Arc<BlobSidecar<E>> {
let mut blob_sidecar = BlobSidecar::empty();
blob_sidecar.block_root = block_root;
blob_sidecar.slot = slot.into();
blob_sidecar.signed_block_header.message.slot = slot.into();
blob_sidecar.signed_block_header.message.proposer_index = proposer_index;
blob_sidecar.index = index;
Arc::new(blob_sidecar)
}
@ -121,8 +121,8 @@ mod tests {
assert_eq!(cache.items.len(), 0, "no slots should be present");
// Slot 0, index 0
let block_root_a = Hash256::random();
let sidecar_a = get_blob_sidecar(0, block_root_a, 0);
let proposer_index_a = 420;
let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0);
assert_eq!(
cache.observe_sidecar(&sidecar_a),
@ -138,12 +138,12 @@ mod tests {
assert_eq!(
cache.items.len(),
1,
"only one (slot, root) tuple should be present"
"only one (validator_index, slot) tuple should be present"
);
assert_eq!(
cache
.items
.get(&(block_root_a, Slot::new(0)))
.get(&(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present")
.len(),
1,
@ -161,7 +161,7 @@ mod tests {
assert_eq!(
cache
.items
.get(&(block_root_a, Slot::new(0)))
.get(&(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present")
.len(),
1,
@ -185,7 +185,7 @@ mod tests {
*/
// First slot of finalized epoch
let block_b = get_blob_sidecar(E::slots_per_epoch(), Hash256::random(), 0);
let block_b = get_blob_sidecar(E::slots_per_epoch(), 419, 0);
assert_eq!(
cache.observe_sidecar(&block_b),
@ -205,8 +205,8 @@ mod tests {
let three_epochs = E::slots_per_epoch() * 3;
// First slot of finalized epoch
let block_root_b = Hash256::random();
let block_b = get_blob_sidecar(three_epochs, block_root_b, 0);
let proposer_index_b = 421;
let block_b = get_blob_sidecar(three_epochs, proposer_index_b, 0);
assert_eq!(
cache.observe_sidecar(&block_b),
@ -218,7 +218,7 @@ mod tests {
assert_eq!(
cache
.items
.get(&(block_root_b, Slot::new(three_epochs)))
.get(&(proposer_index_b, Slot::new(three_epochs)))
.expect("the three epochs slot should be present")
.len(),
1,
@ -242,7 +242,7 @@ mod tests {
assert_eq!(
cache
.items
.get(&(block_root_b, Slot::new(three_epochs)))
.get(&(proposer_index_b, Slot::new(three_epochs)))
.expect("the three epochs slot should be present")
.len(),
1,
@ -255,8 +255,8 @@ mod tests {
let mut cache = ObservedBlobSidecars::default();
// Slot 0, index 0
let block_root_a = Hash256::random();
let sidecar_a = get_blob_sidecar(0, block_root_a, 0);
let proposer_index_a = 420;
let sidecar_a = get_blob_sidecar(0, proposer_index_a, 0);
assert_eq!(
cache.is_known(&sidecar_a),
@ -287,7 +287,7 @@ mod tests {
assert_eq!(
cache
.items
.get(&(block_root_a, Slot::new(0)))
.get(&(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present")
.len(),
1,
@ -296,8 +296,8 @@ mod tests {
// Slot 1, proposer 0
let block_root_b = Hash256::random();
let sidecar_b = get_blob_sidecar(1, block_root_b, 0);
let proposer_index_b = 421;
let sidecar_b = get_blob_sidecar(1, proposer_index_b, 0);
assert_eq!(
cache.is_known(&sidecar_b),
@ -325,7 +325,7 @@ mod tests {
assert_eq!(
cache
.items
.get(&(block_root_a, Slot::new(0)))
.get(&(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present")
.len(),
1,
@ -334,7 +334,7 @@ mod tests {
assert_eq!(
cache
.items
.get(&(block_root_b, Slot::new(1)))
.get(&(proposer_index_b, Slot::new(1)))
.expect("slot zero should be present")
.len(),
1,
@ -342,7 +342,7 @@ mod tests {
);
// Slot 0, index 1
let sidecar_c = get_blob_sidecar(0, block_root_a, 1);
let sidecar_c = get_blob_sidecar(0, proposer_index_a, 1);
assert_eq!(
cache.is_known(&sidecar_c),
@ -370,7 +370,7 @@ mod tests {
assert_eq!(
cache
.items
.get(&(block_root_a, Slot::new(0)))
.get(&(proposer_index_a, Slot::new(0)))
.expect("slot zero should be present")
.len(),
2,
@ -379,7 +379,7 @@ mod tests {
// Try adding an out of bounds index
let invalid_index = E::max_blobs_per_block() as u64;
let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index);
let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index);
assert_eq!(
cache.observe_sidecar(&sidecar_d),
Err(Error::InvalidBlobIndex(invalid_index)),

View File

@ -1,7 +1,7 @@
use crate::block_verification_types::{AsBlock, RpcBlock};
use crate::observed_operations::ObservationOutcome;
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
use crate::BeaconBlockResponseType;
use crate::BeaconBlockResponseWrapper;
pub use crate::{
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
migrate::MigratorConfig,
@ -33,8 +33,8 @@ use int_to_bytes::int_to_bytes32;
use kzg::{Kzg, TrustedSetup};
use merkle_proof::MerkleTree;
use operation_pool::ReceivedPreCapella;
use parking_lot::Mutex;
use parking_lot::RwLockWriteGuard;
use parking_lot::{Mutex, RwLock};
use rand::rngs::StdRng;
use rand::Rng;
use rand::SeedableRng;
@ -52,7 +52,6 @@ use state_processing::{
use std::borrow::Cow;
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::marker::PhantomData;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
@ -567,7 +566,6 @@ where
runtime: self.runtime,
mock_execution_layer: self.mock_execution_layer,
mock_builder: None,
blob_signature_cache: <_>::default(),
rng: make_rng(),
}
}
@ -623,29 +621,9 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> {
pub mock_execution_layer: Option<MockExecutionLayer<T::EthSpec>>,
pub mock_builder: Option<Arc<MockBuilder<T::EthSpec>>>,
/// Cache for blob signature because we don't need them for import, but we do need them
/// to test gossip validation. We always make them during block production but drop them
/// before storing them in the db.
pub blob_signature_cache: Arc<RwLock<HashMap<BlobSignatureKey, Signature>>>,
pub rng: Mutex<StdRng>,
}
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
pub struct BlobSignatureKey {
block_root: Hash256,
blob_index: u64,
}
impl BlobSignatureKey {
pub fn new(block_root: Hash256, blob_index: u64) -> Self {
Self {
block_root,
blob_index,
}
}
}
pub type CommitteeAttestations<E> = Vec<(Attestation<E>, SubnetId)>;
pub type HarnessAttestations<E> =
Vec<(CommitteeAttestations<E>, Option<SignedAggregateAndProof<E>>)>;
@ -845,28 +823,9 @@ where
&self,
state: BeaconState<E>,
slot: Slot,
) -> (
SignedBlockContentsTuple<E, BlindedPayload<E>>,
BeaconState<E>,
) {
) -> (SignedBlindedBeaconBlock<E>, BeaconState<E>) {
let (unblinded, new_state) = self.make_block(state, slot).await;
let maybe_blinded_blob_sidecars = unblinded.1.map(|blob_sidecar_list| {
VariableList::new(
blob_sidecar_list
.into_iter()
.map(|blob_sidecar| {
let blinded_sidecar: BlindedBlobSidecar = blob_sidecar.message.into();
SignedSidecar {
message: Arc::new(blinded_sidecar),
signature: blob_sidecar.signature,
_phantom: PhantomData,
}
})
.collect(),
)
.unwrap()
});
((unblinded.0.into(), maybe_blinded_blob_sidecars), new_state)
(unblinded.0.into(), new_state)
}
/// Returns a newly created block, signed by the proposer for the given slot.
@ -874,7 +833,7 @@ where
&self,
mut state: BeaconState<E>,
slot: Slot,
) -> (SignedBlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
) -> (SignedBlockContentsTuple<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
@ -892,7 +851,7 @@ where
let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot);
let BeaconBlockResponseType::Full(block_response) = self
let BeaconBlockResponseWrapper::Full(block_response) = self
.chain
.produce_block_on_state(
state,
@ -916,17 +875,12 @@ where
&self.spec,
);
let block_contents: SignedBlockContentsTuple<E, FullPayload<E>> = match &signed_block {
let block_contents: SignedBlockContentsTuple<E> = match &signed_block {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => (signed_block, None),
SignedBeaconBlock::Deneb(_) => (
signed_block,
block_response
.maybe_side_car
.map(|blobs| self.sign_blobs(blobs, &block_response.state, proposer_index)),
),
SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items),
};
(block_contents, block_response.state)
@ -938,7 +892,7 @@ where
&self,
mut state: BeaconState<E>,
slot: Slot,
) -> (SignedBlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
) -> (SignedBlockContentsTuple<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
@ -958,7 +912,7 @@ where
let pre_state = state.clone();
let BeaconBlockResponseType::Full(block_response) = self
let BeaconBlockResponseWrapper::Full(block_response) = self
.chain
.produce_block_on_state(
state,
@ -982,37 +936,12 @@ where
&self.spec,
);
let block_contents: SignedBlockContentsTuple<E, FullPayload<E>> = match &signed_block {
let block_contents: SignedBlockContentsTuple<E> = match &signed_block {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => (signed_block, None),
SignedBeaconBlock::Deneb(_) => {
if let Some(blobs) = block_response.maybe_side_car {
let signed_blobs: SignedSidecarList<E, BlobSidecar<E>> = Vec::from(blobs)
.into_iter()
.map(|blob| {
blob.sign(
&self.validator_keypairs[proposer_index].sk,
&block_response.state.fork(),
block_response.state.genesis_validators_root(),
&self.spec,
)
})
.collect::<Vec<_>>()
.into();
let mut guard = self.blob_signature_cache.write();
for blob in &signed_blobs {
guard.insert(
BlobSignatureKey::new(blob.message.block_root, blob.message.index),
blob.signature.clone(),
);
}
(signed_block, Some(signed_blobs))
} else {
(signed_block, None)
}
}
SignedBeaconBlock::Deneb(_) => (signed_block, block_response.blob_items),
};
(block_contents, pre_state)
}
@ -1051,35 +980,6 @@ where
)
}
/// Sign blobs, and cache their signatures.
pub fn sign_blobs(
&self,
blobs: BlobSidecarList<E>,
state: &BeaconState<E>,
proposer_index: usize,
) -> SignedSidecarList<E, BlobSidecar<E>> {
let signed_blobs: SignedSidecarList<E, BlobSidecar<E>> = Vec::from(blobs)
.into_iter()
.map(|blob| {
blob.sign(
&self.validator_keypairs[proposer_index].sk,
&state.fork(),
state.genesis_validators_root(),
&self.spec,
)
})
.collect::<Vec<_>>()
.into();
let mut guard = self.blob_signature_cache.write();
for blob in &signed_blobs {
guard.insert(
BlobSignatureKey::new(blob.message.block_root, blob.message.index),
blob.signature.clone(),
);
}
signed_blobs
}
/// Produces an "unaggregated" attestation for the given `slot` and `index` that attests to
/// `beacon_block_root`. The provided `state` should match the `block.state_root` for the
/// `block` identified by `beacon_block_root`.
@ -1837,7 +1737,7 @@ where
state: BeaconState<E>,
slot: Slot,
block_modifier: impl FnOnce(&mut BeaconBlock<E>),
) -> (SignedBlockContentsTuple<E, FullPayload<E>>, BeaconState<E>) {
) -> (SignedBlockContentsTuple<E>, BeaconState<E>) {
assert_ne!(slot, 0, "can't produce a block at slot 0");
assert!(slot >= state.slot());
@ -1935,24 +1835,20 @@ where
&self,
slot: Slot,
block_root: Hash256,
block_contents: SignedBlockContentsTuple<E, FullPayload<E>>,
block_contents: SignedBlockContentsTuple<E>,
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
self.set_current_slot(slot);
let (block, blobs) = block_contents;
// Note: we are just dropping signatures here and skipping signature verification.
let blobs_without_signatures = blobs.map(|blobs| {
VariableList::from(
blobs
.into_iter()
.map(|blob| blob.message)
.collect::<Vec<_>>(),
)
});
let (block, blob_items) = block_contents;
let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs))
.transpose()
.unwrap();
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block_root,
RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(),
RpcBlock::new(Some(block_root), Arc::new(block), sidecars).unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
@ -1965,24 +1861,20 @@ where
pub async fn process_block_result(
&self,
block_contents: SignedBlockContentsTuple<E, FullPayload<E>>,
block_contents: SignedBlockContentsTuple<E>,
) -> Result<SignedBeaconBlockHash, BlockError<E>> {
let (block, blobs) = block_contents;
// Note: we are just dropping signatures here and skipping signature verification.
let blobs_without_signatures = blobs.map(|blobs| {
VariableList::from(
blobs
.into_iter()
.map(|blob| blob.message)
.collect::<Vec<_>>(),
)
});
let (block, blob_items) = block_contents;
let sidecars = blob_items
.map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs))
.transpose()
.unwrap();
let block_root = block.canonical_root();
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block_root,
RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(),
RpcBlock::new(Some(block_root), Arc::new(block), sidecars).unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
@ -2051,7 +1943,7 @@ where
) -> Result<
(
SignedBeaconBlockHash,
SignedBlockContentsTuple<E, FullPayload<E>>,
SignedBlockContentsTuple<E>,
BeaconState<E>,
),
BlockError<E>,
@ -2603,8 +2495,6 @@ pub fn generate_rand_block_and_blobs<E: EthSpec>(
blobs,
} = bundle;
let block_root = block.canonical_root();
for (index, ((blob, kzg_commitment), kzg_proof)) in blobs
.into_iter()
.zip(commitments.into_iter())
@ -2612,14 +2502,16 @@ pub fn generate_rand_block_and_blobs<E: EthSpec>(
.enumerate()
{
blob_sidecars.push(BlobSidecar {
block_root,
index: index as u64,
slot: block.slot(),
block_parent_root: block.parent_root(),
proposer_index: block.message().proposer_index(),
blob: blob.clone(),
kzg_commitment,
kzg_proof,
signed_block_header: block.signed_block_header(),
kzg_commitment_inclusion_proof: block
.message()
.body()
.kzg_commitment_merkle_proof(index)
.unwrap(),
});
}
}

View File

@ -1,7 +1,6 @@
#![cfg(not(debug_assertions))]
// #![cfg(not(debug_assertions))]
use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock};
use beacon_chain::test_utils::BlobSignatureKey;
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock,
@ -77,10 +76,8 @@ async fn get_chain_segment() -> (Vec<BeaconSnapshot<E>>, Vec<Option<BlobSidecarL
(segment, segment_blobs)
}
async fn get_chain_segment_with_signed_blobs() -> (
Vec<BeaconSnapshot<E>>,
Vec<Option<VariableList<SignedBlobSidecar<E>, <E as EthSpec>::MaxBlobsPerBlock>>>,
) {
async fn get_chain_segment_with_blob_sidecars(
) -> (Vec<BeaconSnapshot<E>>, Vec<Option<BlobSidecarList<E>>>) {
let harness = get_harness(VALIDATOR_COUNT);
harness
@ -111,27 +108,11 @@ async fn get_chain_segment_with_signed_blobs() -> (
beacon_block: Arc::new(full_block),
beacon_state: snapshot.beacon_state,
});
let signed_blobs = harness
let blob_sidecars = harness
.chain
.get_blobs(&snapshot.beacon_block_root)
.unwrap()
.into_iter()
.map(|blob| {
let block_root = blob.block_root;
let blob_index = blob.index;
SignedBlobSidecar {
message: blob,
signature: harness
.blob_signature_cache
.read()
.get(&BlobSignatureKey::new(block_root, blob_index))
.unwrap()
.clone(),
_phantom: PhantomData,
}
})
.collect::<Vec<_>>();
segment_blobs.push(Some(VariableList::from(signed_blobs)))
.unwrap();
segment_blobs.push(Some(blob_sidecars))
}
(segment, segment_blobs)
}
@ -159,7 +140,7 @@ fn chain_segment_blocks(
) -> Vec<RpcBlock<E>> {
chain_segment
.iter()
.zip(blobs.into_iter())
.zip(blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
@ -214,34 +195,30 @@ fn update_parent_roots(
let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct();
*block.parent_root_mut() = root;
let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature));
let new_child_root = new_child.canonical_root();
child.beacon_block = new_child;
if let Some(blobs) = child_blobs {
update_blob_roots(new_child_root, blobs);
update_blob_signed_header(&new_child, blobs);
}
child.beacon_block = new_child;
}
}
}
fn update_blob_roots<E: EthSpec>(block_root: Hash256, blobs: &mut BlobSidecarList<E>) {
fn update_blob_signed_header<E: EthSpec>(
signed_block: &SignedBeaconBlock<E>,
blobs: &mut BlobSidecarList<E>,
) {
for old_blob_sidecar in blobs.iter_mut() {
let index = old_blob_sidecar.index;
let slot = old_blob_sidecar.slot;
let block_parent_root = old_blob_sidecar.block_parent_root;
let proposer_index = old_blob_sidecar.proposer_index;
let blob = old_blob_sidecar.blob.clone();
let kzg_commitment = old_blob_sidecar.kzg_commitment;
let kzg_proof = old_blob_sidecar.kzg_proof;
let new_blob = Arc::new(BlobSidecar::<E> {
block_root,
index,
slot,
block_parent_root,
proposer_index,
blob,
kzg_commitment,
kzg_proof,
index: old_blob_sidecar.index,
blob: old_blob_sidecar.blob.clone(),
kzg_commitment: old_blob_sidecar.kzg_commitment,
kzg_proof: old_blob_sidecar.kzg_proof,
signed_block_header: signed_block.signed_block_header(),
kzg_commitment_inclusion_proof: signed_block
.message()
.body()
.kzg_commitment_merkle_proof(old_blob_sidecar.index as usize)
.unwrap(),
});
*old_blob_sidecar = new_blob;
}
@ -253,7 +230,6 @@ async fn chain_segment_full_segment() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.map(|block| block.into())
.collect();
harness
@ -292,7 +268,6 @@ async fn chain_segment_varying_chunk_size() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.map(|block| block.into())
.collect();
harness
@ -334,7 +309,6 @@ async fn chain_segment_non_linear_parent_roots() {
*/
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.map(|block| block.into())
.collect();
blocks.remove(2);
@ -355,7 +329,6 @@ async fn chain_segment_non_linear_parent_roots() {
*/
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
@ -393,7 +366,6 @@ async fn chain_segment_non_linear_slots() {
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = Slot::new(0);
@ -420,7 +392,6 @@ async fn chain_segment_non_linear_slots() {
let mut blocks: Vec<RpcBlock<E>> = chain_segment_blocks(&chain_segment, &chain_segment_blobs)
.into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = blocks[2].slot();
@ -879,7 +850,7 @@ fn unwrap_err<T, E>(result: Result<T, E>) -> E {
#[tokio::test]
async fn block_gossip_verification() {
let harness = get_harness(VALIDATOR_COUNT);
let (chain_segment, chain_segment_blobs) = get_chain_segment_with_signed_blobs().await;
let (chain_segment, chain_segment_blobs) = get_chain_segment_with_blob_sidecars().await;
let block_index = CHAIN_SEGMENT_LENGTH - 2;
@ -909,12 +880,12 @@ async fn block_gossip_verification() {
)
.await
.expect("should import valid gossip verified block");
if let Some(blobs) = blobs_opt {
for blob in blobs {
let blob_index = blob.message.index;
if let Some(blob_sidecars) = blobs_opt {
for blob_sidecar in blob_sidecars {
let blob_index = blob_sidecar.index;
let gossip_verified = harness
.chain
.verify_blob_sidecar_for_gossip(blob.clone(), blob_index)
.verify_blob_sidecar_for_gossip(blob_sidecar.clone(), blob_index)
.expect("should obtain gossip verified blob");
harness
@ -948,7 +919,7 @@ async fn block_gossip_verification() {
*block.slot_mut() = expected_block_slot;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
BlockError::FutureSlot {
present_slot,
block_slot,
@ -982,7 +953,7 @@ async fn block_gossip_verification() {
*block.slot_mut() = expected_finalized_slot;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
BlockError::WouldRevertFinalizedSlot {
block_slot,
finalized_slot,
@ -1012,9 +983,10 @@ async fn block_gossip_verification() {
unwrap_err(
harness
.chain
.verify_block_for_gossip(
Arc::new(SignedBeaconBlock::from_block(block, junk_signature())).into()
)
.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(
block,
junk_signature()
)))
.await
),
BlockError::ProposalSignatureInvalid
@ -1039,7 +1011,7 @@ async fn block_gossip_verification() {
*block.parent_root_mut() = parent_root;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
BlockError::ParentUnknown(block)
if block.parent_root() == parent_root
),
@ -1065,7 +1037,7 @@ async fn block_gossip_verification() {
*block.parent_root_mut() = parent_root;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
BlockError::NotFinalizedDescendant { block_parent_root }
if block_parent_root == parent_root
),
@ -1091,7 +1063,6 @@ async fn block_gossip_verification() {
.0;
let expected_proposer = block.proposer_index();
let other_proposer = (0..VALIDATOR_COUNT as u64)
.into_iter()
.find(|i| *i != block.proposer_index())
.expect("there must be more than one validator in this test");
*block.proposer_index_mut() = other_proposer;
@ -1103,7 +1074,7 @@ async fn block_gossip_verification() {
);
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await),
BlockError::IncorrectBlockProposer {
block,
local_shuffling,
@ -1115,7 +1086,7 @@ async fn block_gossip_verification() {
// Check to ensure that we registered this is a valid block from this proposer.
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await),
BlockError::BlockIsAlreadyKnown,
),
"should register any valid signature against the proposer, even if the block failed later verification"
@ -1141,10 +1112,9 @@ async fn block_gossip_verification() {
matches!(
harness
.chain
.verify_block_for_gossip(block.clone().into())
.verify_block_for_gossip(block.clone())
.await
.err()
.expect("should error when processing known block"),
.expect_err("should error when processing known block"),
BlockError::BlockIsAlreadyKnown
),
"the second proposal by this validator should be rejected"
@ -1178,12 +1148,14 @@ async fn verify_block_for_gossip_slashing_detection() {
.await
.unwrap();
if let Some(blobs) = blobs1 {
for blob in blobs {
let blob_index = blob.message.index;
if let Some((kzg_proofs, blobs)) = blobs1 {
let sidecars =
BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap();
for sidecar in sidecars {
let blob_index = sidecar.index;
let verified_blob = harness
.chain
.verify_blob_sidecar_for_gossip(blob, blob_index)
.verify_blob_sidecar_for_gossip(sidecar, blob_index)
.unwrap();
harness
.chain
@ -1368,10 +1340,9 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!(
harness
.chain
.verify_block_for_gossip(Arc::new(base_block.clone()).into())
.verify_block_for_gossip(Arc::new(base_block.clone()))
.await
.err()
.expect("should error when processing base block"),
.expect_err("should error when processing base block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
@ -1389,8 +1360,7 @@ async fn add_base_block_to_altair_chain() {
|| Ok(()),
)
.await
.err()
.expect("should error when processing base block"),
.expect_err("should error when processing base block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Altair,
object_fork: ForkName::Base,
@ -1506,10 +1476,9 @@ async fn add_altair_block_to_base_chain() {
assert!(matches!(
harness
.chain
.verify_block_for_gossip(Arc::new(altair_block.clone()).into())
.verify_block_for_gossip(Arc::new(altair_block.clone()))
.await
.err()
.expect("should error when processing altair block"),
.expect_err("should error when processing altair block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
@ -1527,8 +1496,7 @@ async fn add_altair_block_to_base_chain() {
|| Ok(()),
)
.await
.err()
.expect("should error when processing altair block"),
.expect_err("should error when processing altair block"),
BlockError::InconsistentFork(InconsistentFork {
fork_at_slot: ForkName::Base,
object_fork: ForkName::Altair,
@ -1584,10 +1552,12 @@ async fn import_duplicate_block_unrealized_justification() {
// The store's justified checkpoint must still be at epoch 0, while unrealized justification
// must be at epoch 1.
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1);
drop(fc);
{
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
assert_eq!(fc.unrealized_justified_checkpoint().epoch, 1);
drop(fc);
}
// Produce a block to justify epoch 2.
let state = harness.get_current_state();
@ -1602,10 +1572,10 @@ async fn import_duplicate_block_unrealized_justification() {
let notify_execution_layer = NotifyExecutionLayer::Yes;
let verified_block1 = block
.clone()
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
.into_execution_pending_block(block_root, chain, notify_execution_layer)
.unwrap();
let verified_block2 = block
.into_execution_pending_block(block_root, &chain, notify_execution_layer)
.into_execution_pending_block(block_root, chain, notify_execution_layer)
.unwrap();
// Import the first block, simulating a block processed via a finalized chain segment.
@ -1614,18 +1584,20 @@ async fn import_duplicate_block_unrealized_justification() {
.unwrap();
// Unrealized justification should NOT have updated.
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
let unrealized_justification = fc.unrealized_justified_checkpoint();
assert_eq!(unrealized_justification.epoch, 2);
// The fork choice node for the block should have unrealized justification.
let fc_block = fc.get_block(&block_root).unwrap();
assert_eq!(
fc_block.unrealized_justified_checkpoint,
Some(unrealized_justification)
);
drop(fc);
let unrealized_justification = {
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
let unrealized_justification = fc.unrealized_justified_checkpoint();
assert_eq!(unrealized_justification.epoch, 2);
// The fork choice node for the block should have unrealized justification.
let fc_block = fc.get_block(&block_root).unwrap();
assert_eq!(
fc_block.unrealized_justified_checkpoint,
Some(unrealized_justification)
);
drop(fc);
unrealized_justification
};
// Import the second verified block, simulating a block processed via RPC.
import_execution_pending_block(chain.clone(), verified_block2)
@ -1633,15 +1605,16 @@ async fn import_duplicate_block_unrealized_justification() {
.unwrap();
// Unrealized justification should still be updated.
let fc = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc.justified_checkpoint().epoch, 0);
let fc3 = chain.canonical_head.fork_choice_read_lock();
assert_eq!(fc3.justified_checkpoint().epoch, 0);
assert_eq!(
fc.unrealized_justified_checkpoint(),
fc3.unrealized_justified_checkpoint(),
unrealized_justification
);
// The fork choice node for the block should still have the unrealized justified checkpoint.
let fc_block = fc.get_block(&block_root).unwrap();
let fc_block = fc3.get_block(&block_root).unwrap();
drop(fc3);
assert_eq!(
fc_block.unrealized_justified_checkpoint,
Some(unrealized_justification)

View File

@ -1,13 +1,11 @@
use beacon_chain::blob_verification::GossipVerifiedBlob;
use beacon_chain::test_utils::BeaconChainHarness;
use bls::Signature;
use eth2::types::{EventKind, SseBlobSidecar};
use rand::rngs::StdRng;
use rand::SeedableRng;
use std::marker::PhantomData;
use std::sync::Arc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec, SignedBlobSidecar};
use types::{BlobSidecar, EthSpec, ForkName, MinimalEthSpec};
type E = MinimalEthSpec;
@ -29,14 +27,10 @@ async fn blob_sidecar_event_on_process_gossip_blob() {
// build and process a gossip verified blob
let kzg = harness.chain.kzg.as_ref().unwrap();
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
let signed_sidecar = SignedBlobSidecar {
message: BlobSidecar::random_valid(&mut rng, kzg)
.map(Arc::new)
.unwrap(),
signature: Signature::empty(),
_phantom: PhantomData,
};
let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(signed_sidecar);
let sidecar = BlobSidecar::random_valid(&mut rng, kzg)
.map(Arc::new)
.unwrap();
let gossip_verified_blob = GossipVerifiedBlob::__assumed_valid(sidecar);
let expected_sse_blobs = SseBlobSidecar::from_blob_sidecar(gossip_verified_blob.as_blob());
let _ = harness
@ -83,7 +77,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() {
let _ = harness
.chain
.process_rpc_blobs(blob_1.slot, blob_1.block_root, blobs)
.process_rpc_blobs(blob_1.slot(), blob_1.block_root(), blobs)
.await
.unwrap();

View File

@ -591,7 +591,7 @@ pub enum Work<E: EthSpec> {
process_batch: Box<dyn FnOnce(Vec<GossipAggregatePackage<E>>) + Send + Sync>,
},
GossipBlock(AsyncFn),
GossipSignedBlobSidecar(AsyncFn),
GossipBlobSidecar(AsyncFn),
DelayedImportBlock {
beacon_block_slot: Slot,
beacon_block_root: Hash256,
@ -641,7 +641,7 @@ impl<E: EthSpec> Work<E> {
Work::GossipAggregate { .. } => GOSSIP_AGGREGATE,
Work::GossipAggregateBatch { .. } => GOSSIP_AGGREGATE_BATCH,
Work::GossipBlock(_) => GOSSIP_BLOCK,
Work::GossipSignedBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR,
Work::GossipBlobSidecar(_) => GOSSIP_BLOBS_SIDECAR,
Work::DelayedImportBlock { .. } => DELAYED_IMPORT_BLOCK,
Work::GossipVoluntaryExit(_) => GOSSIP_VOLUNTARY_EXIT,
Work::GossipProposerSlashing(_) => GOSSIP_PROPOSER_SLASHING,
@ -1205,7 +1205,7 @@ impl<E: EthSpec> BeaconProcessor<E> {
Work::GossipBlock { .. } => {
gossip_block_queue.push(work, work_id, &self.log)
}
Work::GossipSignedBlobSidecar { .. } => {
Work::GossipBlobSidecar { .. } => {
gossip_blob_queue.push(work, work_id, &self.log)
}
Work::DelayedImportBlock { .. } => {
@ -1457,10 +1457,11 @@ impl<E: EthSpec> BeaconProcessor<E> {
task_spawner.spawn_async(process_fn)
}
Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn),
Work::GossipBlock(work) | Work::GossipSignedBlobSidecar(work) => task_spawner
.spawn_async(async move {
Work::GossipBlock(work) | Work::GossipBlobSidecar(work) => {
task_spawner.spawn_async(async move {
work.await;
}),
})
}
Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => {
task_spawner.spawn_blocking(process_fn)
}

View File

@ -1,9 +1,9 @@
use eth2::types::builder_bid::SignedBuilderBid;
use eth2::types::FullPayloadContents;
use eth2::types::{
BlindedPayload, EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes,
SignedBlockContents, SignedValidatorRegistrationData, Slot,
EthSpec, ExecutionBlockHash, ForkVersionedResponse, PublicKeyBytes,
SignedValidatorRegistrationData, Slot,
};
use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock};
pub use eth2::Error;
use eth2::{ok_or_error, StatusCode};
use reqwest::{IntoUrl, Response};
@ -140,7 +140,7 @@ impl BuilderHttpClient {
/// `POST /eth/v1/builder/blinded_blocks`
pub async fn post_builder_blinded_blocks<E: EthSpec>(
&self,
blinded_block: &SignedBlockContents<E, BlindedPayload<E>>,
blinded_block: &SignedBlindedBeaconBlock<E>,
) -> Result<ForkVersionedResponse<FullPayloadContents<E>>, Error> {
let mut path = self.server.full.clone();

View File

@ -14,8 +14,8 @@ pub use engine_api::*;
pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc};
use engines::{Engine, EngineError};
pub use engines::{EngineState, ForkchoiceState};
use eth2::types::FullPayloadContents;
use eth2::types::{builder_bid::SignedBuilderBid, BlobsBundle, ForkVersionedResponse};
use eth2::types::{FullPayloadContents, SignedBlockContents};
use ethers_core::types::Transaction as EthersTransaction;
use fork_choice::ForkchoiceUpdateParameters;
use lru::LruCache;
@ -43,8 +43,9 @@ use tree_hash::TreeHash;
use types::beacon_block_body::KzgCommitments;
use types::builder_bid::BuilderBid;
use types::payload::BlockProductionVersion;
use types::sidecar::{BlobItems, Sidecar};
use types::{AbstractExecPayload, ExecutionPayloadDeneb, KzgProofs};
use types::{
AbstractExecPayload, BlobsList, ExecutionPayloadDeneb, KzgProofs, SignedBlindedBeaconBlock,
};
use types::{
BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadCapella,
ExecutionPayloadMerge, FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot,
@ -103,12 +104,8 @@ impl<E: EthSpec> TryFrom<BuilderBid<E>> for ProvenancedPayload<BlockProposalCont
BuilderBid::Deneb(builder_bid) => BlockProposalContents::PayloadAndBlobs {
payload: ExecutionPayloadHeader::Deneb(builder_bid.header).into(),
block_value: builder_bid.value,
kzg_commitments: builder_bid.blinded_blobs_bundle.commitments,
blobs: BlobItems::<E>::try_from_blob_roots(
builder_bid.blinded_blobs_bundle.blob_roots,
)
.map_err(Error::InvalidBlobConversion)?,
proofs: builder_bid.blinded_blobs_bundle.proofs,
kzg_commitments: builder_bid.blob_kzg_commitments,
blobs_and_proofs: None,
},
};
Ok(ProvenancedPayload::Builder(
@ -170,8 +167,8 @@ pub enum BlockProposalContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
payload: Payload,
block_value: Uint256,
kzg_commitments: KzgCommitments<T>,
blobs: <Payload::Sidecar as Sidecar<T>>::BlobItems,
proofs: KzgProofs<T>,
/// `None` for blinded `PayloadAndBlobs`.
blobs_and_proofs: Option<(BlobsList<T>, KzgProofs<T>)>,
},
}
@ -203,9 +200,7 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> TryFrom<GetPayloadResponse<E>>
payload: execution_payload.into(),
block_value,
kzg_commitments: bundle.commitments,
blobs: BlobItems::try_from_blobs(bundle.blobs)
.map_err(Error::InvalidBlobConversion)?,
proofs: bundle.proofs,
blobs_and_proofs: Some((bundle.blobs, bundle.proofs)),
}),
None => Ok(Self::Payload {
payload: execution_payload.into(),
@ -233,26 +228,23 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
) -> (
Payload,
Option<KzgCommitments<T>>,
Option<<Payload::Sidecar as Sidecar<T>>::BlobItems>,
Option<KzgProofs<T>>,
Option<(BlobsList<T>, KzgProofs<T>)>,
Uint256,
) {
match self {
Self::Payload {
payload,
block_value,
} => (payload, None, None, None, block_value),
} => (payload, None, None, block_value),
Self::PayloadAndBlobs {
payload,
block_value,
kzg_commitments,
blobs,
proofs,
blobs_and_proofs,
} => (
payload,
Some(kzg_commitments),
Some(blobs),
Some(proofs),
blobs_and_proofs,
block_value,
),
}
@ -276,23 +268,6 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
Self::PayloadAndBlobs { block_value, .. } => block_value,
}
}
pub fn default_at_fork(fork_name: ForkName) -> Result<Self, BeaconStateError> {
Ok(match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
BlockProposalContents::Payload {
payload: Payload::default_at_fork(fork_name)?,
block_value: Uint256::zero(),
}
}
ForkName::Deneb => BlockProposalContents::PayloadAndBlobs {
payload: Payload::default_at_fork(fork_name)?,
block_value: Uint256::zero(),
blobs: Payload::default_blobs_at_fork(fork_name)?,
kzg_commitments: VariableList::default(),
proofs: VariableList::default(),
},
})
}
}
#[derive(Clone, PartialEq)]
@ -753,6 +728,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
}
}
/// Delete proposer preparation data for `proposer_index`. This is only useful in tests.
pub async fn clear_proposer_preparation(&self, proposer_index: u64) {
self.proposer_preparation_data()
.await
.remove(&proposer_index);
}
/// Removes expired entries from proposer_preparation_data and proposers caches
async fn clean_proposer_caches(&self, current_epoch: Epoch) -> Result<(), Error> {
let mut proposer_preparation_data = self.proposer_preparation_data().await;
@ -2003,7 +1985,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
pub async fn propose_blinded_beacon_block(
&self,
block_root: Hash256,
block: &SignedBlockContents<T, BlindedPayload<T>>,
block: &SignedBlindedBeaconBlock<T>,
) -> Result<FullPayloadContents<T>, Error> {
debug!(
self.log(),
@ -2052,7 +2034,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
"relay_response_ms" => duration.as_millis(),
"block_root" => ?block_root,
"parent_hash" => ?block
.signed_block()
.message()
.execution_payload()
.map(|payload| format!("{}", payload.parent_hash()))

View File

@ -881,16 +881,16 @@ mod test {
#[test]
fn valid_test_blobs() {
assert!(
validate_blob::<MainnetEthSpec>().unwrap(),
validate_blob::<MainnetEthSpec>().is_ok(),
"Mainnet preset test blobs bundle should contain valid proofs"
);
assert!(
validate_blob::<MinimalEthSpec>().unwrap(),
validate_blob::<MinimalEthSpec>().is_ok(),
"Minimal preset test blobs bundle should contain valid proofs"
);
}
fn validate_blob<E: EthSpec>() -> Result<bool, String> {
fn validate_blob<E: EthSpec>() -> Result<(), String> {
let kzg = load_kzg()?;
let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle::<E>()?;
let kzg_blob = kzg::Blob::from_bytes(blob.as_ref())

View File

@ -533,8 +533,8 @@ pub fn serve<E: EthSpec>(
.as_deneb()
.map_err(|_| reject("incorrect payload variant"))?
.into(),
blinded_blobs_bundle: maybe_blobs_bundle
.map(Into::into)
blob_kzg_commitments: maybe_blobs_bundle
.map(|b| b.commitments)
.unwrap_or_default(),
value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI),
pubkey: builder.builder_sk.public_key().compress(),
@ -572,8 +572,8 @@ pub fn serve<E: EthSpec>(
.as_deneb()
.map_err(|_| reject("incorrect payload variant"))?
.into(),
blinded_blobs_bundle: maybe_blobs_bundle
.map(Into::into)
blob_kzg_commitments: maybe_blobs_bundle
.map(|b| b.commitments)
.unwrap_or_default(),
value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI),
pubkey: builder.builder_sk.public_key().compress(),

View File

@ -1,50 +1,42 @@
use beacon_chain::BlockProductionError;
use eth2::types::{BeaconBlockAndBlobSidecars, BlindedBeaconBlockAndBlobSidecars, BlockContents};
use types::{AbstractExecPayload, BeaconBlock, EthSpec, ForkName, SidecarList};
use beacon_chain::{BeaconBlockResponse, BeaconBlockResponseWrapper, BlockProductionError};
use eth2::types::{BlockContents, FullBlockContents, ProduceBlockV3Response};
use types::{EthSpec, ForkName};
type Error = warp::reject::Rejection;
pub fn build_block_contents<E: EthSpec, Payload: AbstractExecPayload<E>>(
pub fn build_block_contents<E: EthSpec>(
fork_name: ForkName,
block: BeaconBlock<E, Payload>,
maybe_blobs: Option<SidecarList<E, <Payload as AbstractExecPayload<E>>::Sidecar>>,
) -> Result<BlockContents<E, Payload>, Error> {
match Payload::block_type() {
types::BlockType::Blinded => match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
Ok(BlockContents::Block(block))
}
block_response: BeaconBlockResponseWrapper<E>,
) -> Result<ProduceBlockV3Response<E>, Error> {
match block_response {
BeaconBlockResponseWrapper::Blinded(block) => {
Ok(ProduceBlockV3Response::Blinded(block.block))
}
BeaconBlockResponseWrapper::Full(block) => match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => Ok(
ProduceBlockV3Response::Full(FullBlockContents::Block(block.block)),
),
ForkName::Deneb => {
if let Some(blinded_blob_sidecars) = maybe_blobs {
let block_and_blobs = BlindedBeaconBlockAndBlobSidecars {
blinded_block: block,
blinded_blob_sidecars,
};
let BeaconBlockResponse {
block,
state: _,
blob_items,
execution_payload_value: _,
consensus_block_value: _,
} = block;
Ok(BlockContents::BlindedBlockAndBlobSidecars(block_and_blobs))
} else {
Err(warp_utils::reject::block_production_error(
let Some((kzg_proofs, blobs)) = blob_items else {
return Err(warp_utils::reject::block_production_error(
BlockProductionError::MissingBlobs,
))
}
}
},
types::BlockType::Full => match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
Ok(BlockContents::Block(block))
}
ForkName::Deneb => {
if let Some(blob_sidecars) = maybe_blobs {
let block_and_blobs = BeaconBlockAndBlobSidecars {
));
};
Ok(ProduceBlockV3Response::Full(
FullBlockContents::BlockContents(BlockContents {
block,
blob_sidecars,
};
Ok(BlockContents::BlockAndBlobSidecars(block_and_blobs))
} else {
Err(warp_utils::reject::block_production_error(
BlockProductionError::MissingBlobs,
))
}
kzg_proofs,
blobs,
}),
))
}
},
}

View File

@ -41,7 +41,7 @@ use bytes::Bytes;
use directory::DEFAULT_ROOT_DIR;
use eth2::types::{
self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode,
SignedBlindedBlockContents, SignedBlockContents, ValidatorId, ValidatorStatus,
PublishBlockRequest, ValidatorId, ValidatorStatus,
};
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform;
@ -76,9 +76,9 @@ use tokio_stream::{
};
use types::{
Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError,
BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName,
ForkVersionedResponse, Hash256, ProposerPreparationData, ProposerSlashing, RelativeEpoch,
SignedAggregateAndProof, SignedBlsToExecutionChange, SignedContributionAndProof,
CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, ForkVersionedResponse, Hash256,
ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof,
SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof,
SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncCommitteeMessage,
SyncContributionData,
};
@ -1306,7 +1306,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
move |block_contents: SignedBlockContents<T::EthSpec>,
move |block_contents: PublishBlockRequest<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
@ -1342,7 +1342,7 @@ pub fn serve<T: BeaconChainTypes>(
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
let block_contents = PublishBlockRequest::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
&chain.spec,
)
@ -1375,7 +1375,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlockContents<T::EthSpec>,
block_contents: PublishBlockRequest<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
@ -1413,7 +1413,7 @@ pub fn serve<T: BeaconChainTypes>(
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block_contents = SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
let block_contents = PublishBlockRequest::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
&chain.spec,
)
@ -1449,7 +1449,7 @@ pub fn serve<T: BeaconChainTypes>(
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
move |block_contents: SignedBlindedBlockContents<T::EthSpec>,
move |block_contents: SignedBlindedBeaconBlock<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
@ -1485,14 +1485,13 @@ pub fn serve<T: BeaconChainTypes>(
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block =
SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
&block_bytes,
&chain.spec,
)
.map_err(|e| {
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
})?;
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
&chain.spec,
)
.map_err(|e| {
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
})?;
publish_blocks::publish_blinded_block(
block,
chain,
@ -1518,14 +1517,14 @@ pub fn serve<T: BeaconChainTypes>(
.and(log_filter.clone())
.then(
move |validation_level: api_types::BroadcastValidationQuery,
block_contents: SignedBlindedBlockContents<T::EthSpec>,
blinded_block: SignedBlindedBeaconBlock<T::EthSpec>,
task_spawner: TaskSpawner<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
publish_blocks::publish_blinded_block(
block_contents,
blinded_block,
chain,
&network_tx,
log,
@ -1555,14 +1554,13 @@ pub fn serve<T: BeaconChainTypes>(
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| {
task_spawner.spawn_async_with_rejection(Priority::P0, async move {
let block =
SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
&block_bytes,
&chain.spec,
)
.map_err(|e| {
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
})?;
let block = SignedBlindedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
&chain.spec,
)
.map_err(|e| {
warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}"))
})?;
publish_blocks::publish_blinded_block(
block,
chain,

View File

@ -3,8 +3,7 @@ use std::sync::Arc;
use types::{payload::BlockProductionVersion, *};
use beacon_chain::{
BeaconBlockResponse, BeaconBlockResponseType, BeaconChain, BeaconChainTypes,
ProduceBlockVerification,
BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification,
};
use eth2::types::{self as api_types, EndpointVersion, SkipRandaoVerification};
use ssz::Encode;
@ -69,35 +68,23 @@ pub async fn produce_block_v3<T: BeaconChainTypes>(
warp_utils::reject::custom_bad_request(format!("failed to fetch a block: {:?}", e))
})?;
match block_response_type {
BeaconBlockResponseType::Full(block_response) => {
build_response_v3(chain, block_response, endpoint_version, accept_header)
}
BeaconBlockResponseType::Blinded(block_response) => {
build_response_v3(chain, block_response, endpoint_version, accept_header)
}
}
build_response_v3(chain, block_response_type, endpoint_version, accept_header)
}
pub fn build_response_v3<T: BeaconChainTypes, E: EthSpec, Payload: AbstractExecPayload<E>>(
pub fn build_response_v3<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
block_response: BeaconBlockResponse<E, Payload>,
block_response: BeaconBlockResponseWrapper<T::EthSpec>,
endpoint_version: EndpointVersion,
accept_header: Option<api_types::Accept>,
) -> Result<Response<Body>, warp::Rejection> {
let fork_name = block_response
.block
.to_ref()
.fork_name(&chain.spec)
.map_err(inconsistent_fork_rejection)?;
let execution_payload_value = block_response.execution_payload_value();
let consensus_block_value = block_response.consensus_block_value();
let execution_payload_blinded = block_response.is_blinded();
let block_contents = build_block_contents::build_block_contents(
fork_name,
block_response.block,
block_response.maybe_side_car,
)?;
let execution_payload_blinded = Payload::block_type() == BlockType::Blinded;
let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?;
match accept_header {
Some(api_types::Accept::Ssz) => Response::builder()
@ -107,9 +94,9 @@ pub fn build_response_v3<T: BeaconChainTypes, E: EthSpec, Payload: AbstractExecP
.map(|res: Response<Body>| add_consensus_version_header(res, fork_name))
.map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded))
.map(|res: Response<Body>| {
add_execution_payload_value_header(res, block_response.execution_payload_value)
add_execution_payload_value_header(res, execution_payload_value)
})
.map(|res| add_consensus_block_value_header(res, block_response.consensus_block_value))
.map(|res| add_consensus_block_value_header(res, consensus_block_value))
.map_err(|e| -> warp::Rejection {
warp_utils::reject::custom_server_error(format!("failed to create response: {}", e))
}),
@ -117,10 +104,8 @@ pub fn build_response_v3<T: BeaconChainTypes, E: EthSpec, Payload: AbstractExecP
.map(|response| warp::reply::json(&response).into_response())
.map(|res| add_consensus_version_header(res, fork_name))
.map(|res| add_execution_payload_blinded_header(res, execution_payload_blinded))
.map(|res| {
add_execution_payload_value_header(res, block_response.execution_payload_value)
})
.map(|res| add_consensus_block_value_header(res, block_response.consensus_block_value)),
.map(|res| add_execution_payload_value_header(res, execution_payload_value))
.map(|res| add_consensus_block_value_header(res, consensus_block_value)),
}
}
@ -150,14 +135,7 @@ pub async fn produce_blinded_block_v2<T: BeaconChainTypes>(
.await
.map_err(warp_utils::reject::block_production_error)?;
match block_response_type {
BeaconBlockResponseType::Full(block_response) => {
build_response_v2(chain, block_response, endpoint_version, accept_header)
}
BeaconBlockResponseType::Blinded(block_response) => {
build_response_v2(chain, block_response, endpoint_version, accept_header)
}
}
build_response_v2(chain, block_response_type, endpoint_version, accept_header)
}
pub async fn produce_block_v2<T: BeaconChainTypes>(
@ -187,33 +165,20 @@ pub async fn produce_block_v2<T: BeaconChainTypes>(
.await
.map_err(warp_utils::reject::block_production_error)?;
match block_response_type {
BeaconBlockResponseType::Full(block_response) => {
build_response_v2(chain, block_response, endpoint_version, accept_header)
}
BeaconBlockResponseType::Blinded(block_response) => {
build_response_v2(chain, block_response, endpoint_version, accept_header)
}
}
build_response_v2(chain, block_response_type, endpoint_version, accept_header)
}
pub fn build_response_v2<T: BeaconChainTypes, E: EthSpec, Payload: AbstractExecPayload<E>>(
pub fn build_response_v2<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
block_response: BeaconBlockResponse<E, Payload>,
block_response: BeaconBlockResponseWrapper<T::EthSpec>,
endpoint_version: EndpointVersion,
accept_header: Option<api_types::Accept>,
) -> Result<Response<Body>, warp::Rejection> {
let fork_name = block_response
.block
.to_ref()
.fork_name(&chain.spec)
.map_err(inconsistent_fork_rejection)?;
let block_contents = build_block_contents::build_block_contents(
fork_name,
block_response.block,
block_response.maybe_side_car,
)?;
let block_contents = build_block_contents::build_block_contents(fork_name, block_response)?;
match accept_header {
Some(api_types::Accept::Ssz) => Response::builder()

View File

@ -6,8 +6,8 @@ use beacon_chain::{
AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError,
IntoGossipVerifiedBlockContents, NotifyExecutionLayer,
};
use eth2::types::{BroadcastValidation, ErrorMessage};
use eth2::types::{FullPayloadContents, SignedBlockContents};
use eth2::types::{into_full_block_and_blobs, BroadcastValidation, ErrorMessage};
use eth2::types::{FullPayloadContents, PublishBlockRequest};
use execution_layer::ProvenancedPayload;
use lighthouse_network::PubsubMessage;
use network::NetworkMessage;
@ -19,8 +19,9 @@ use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tree_hash::TreeHash;
use types::{
AbstractExecPayload, BeaconBlockRef, BlindedPayload, EthSpec, ExecPayload, ExecutionBlockHash,
ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlobSidecarList,
AbstractExecPayload, BeaconBlockRef, BlobSidecarList, EthSpec, ExecPayload, ExecutionBlockHash,
ForkName, FullPayload, FullPayloadMerge, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock,
VariableList,
};
use warp::http::StatusCode;
use warp::{reply::Response, Rejection, Reply};
@ -65,7 +66,7 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
/* actually publish a block */
let publish_block = move |block: Arc<SignedBeaconBlock<T::EthSpec>>,
blobs_opt: Option<SignedBlobSidecarList<T::EthSpec>>,
blobs_opt: Option<BlobSidecarList<T::EthSpec>>,
sender,
log,
seen_timestamp| {
@ -86,8 +87,8 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
}
SignedBeaconBlock::Deneb(_) => {
let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block.clone())];
if let Some(signed_blobs) = blobs_opt {
for (blob_index, blob) in signed_blobs.into_iter().enumerate() {
if let Some(blob_sidecars) = blobs_opt {
for (blob_index, blob) in blob_sidecars.into_iter().enumerate() {
pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new((
blob_index as u64,
blob,
@ -108,10 +109,6 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
let sender_clone = network_tx.clone();
let log_clone = log.clone();
// We can clone this because the blobs are `Arc`'d in `BlockContents`, but the block is not,
// so we avoid cloning the block at this point.
let blobs_opt = block_contents.inner_blobs();
/* if we can form a `GossipVerifiedBlock`, we've passed our basic gossip checks */
let (gossip_verified_block, gossip_verified_blobs) =
match block_contents.into_gossip_verified_block(&chain) {
@ -142,6 +139,13 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
// Clone here, so we can take advantage of the `Arc`. The block in `BlockContents` is not,
// `Arc`'d but blobs are.
let block = gossip_verified_block.block.block_cloned();
let blobs_opt = gossip_verified_blobs.as_ref().map(|gossip_verified_blobs| {
let blobs = gossip_verified_blobs
.into_iter()
.map(|b| b.clone_blob())
.collect::<Vec<_>>();
VariableList::from(blobs)
});
let block_root = block_root.unwrap_or(gossip_verified_block.block_root);
@ -292,16 +296,16 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
/// Handles a request from the HTTP API for blinded blocks. This converts blinded blocks into full
/// blocks before publishing.
pub async fn publish_blinded_block<T: BeaconChainTypes>(
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<T::EthSpec>>,
blinded_block: SignedBlindedBeaconBlock<T::EthSpec>,
chain: Arc<BeaconChain<T>>,
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger,
validation_level: BroadcastValidation,
duplicate_status_code: StatusCode,
) -> Result<Response, Rejection> {
let block_root = block_contents.signed_block().canonical_root();
let full_block: ProvenancedBlock<T, SignedBlockContents<T::EthSpec>> =
reconstruct_block(chain.clone(), block_root, block_contents, log.clone()).await?;
let block_root = blinded_block.canonical_root();
let full_block: ProvenancedBlock<T, PublishBlockRequest<T::EthSpec>> =
reconstruct_block(chain.clone(), block_root, blinded_block, log.clone()).await?;
publish_block::<T, _>(
Some(block_root),
full_block,
@ -320,10 +324,9 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
pub async fn reconstruct_block<T: BeaconChainTypes>(
chain: Arc<BeaconChain<T>>,
block_root: Hash256,
block_contents: SignedBlockContents<T::EthSpec, BlindedPayload<T::EthSpec>>,
block: SignedBlindedBeaconBlock<T::EthSpec>,
log: Logger,
) -> Result<ProvenancedBlock<T, SignedBlockContents<T::EthSpec>>, Rejection> {
let block = block_contents.signed_block();
) -> Result<ProvenancedBlock<T, PublishBlockRequest<T::EthSpec>>, Rejection> {
let full_payload_opt = if let Ok(payload_header) = block.message().body().execution_payload() {
let el = chain.execution_layer.as_ref().ok_or_else(|| {
warp_utils::reject::custom_server_error("Missing execution layer".to_string())
@ -365,7 +368,7 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
);
let full_payload = el
.propose_blinded_beacon_block(block_root, &block_contents)
.propose_blinded_beacon_block(block_root, &block)
.await
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
@ -385,15 +388,15 @@ pub async fn reconstruct_block<T: BeaconChainTypes>(
match full_payload_opt {
// A block without a payload is pre-merge and we consider it locally
// built.
None => block_contents
.try_into_full_block_and_blobs(None)
.map(ProvenancedBlock::local),
Some(ProvenancedPayload::Local(full_payload_contents)) => block_contents
.try_into_full_block_and_blobs(Some(full_payload_contents))
.map(ProvenancedBlock::local),
Some(ProvenancedPayload::Builder(full_payload_contents)) => block_contents
.try_into_full_block_and_blobs(Some(full_payload_contents))
.map(ProvenancedBlock::builder),
None => into_full_block_and_blobs(block, None).map(ProvenancedBlock::local),
Some(ProvenancedPayload::Local(full_payload_contents)) => {
into_full_block_and_blobs(block, Some(full_payload_contents))
.map(ProvenancedBlock::local)
}
Some(ProvenancedPayload::Builder(full_payload_contents)) => {
into_full_block_and_blobs(block, Some(full_payload_contents))
.map(ProvenancedBlock::builder)
}
}
.map_err(|e| {
warp_utils::reject::custom_server_error(format!("Unable to add payload to block: {e:?}"))

View File

@ -2,18 +2,12 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BlockStrategy},
GossipVerifiedBlock, IntoGossipVerifiedBlockContents,
};
use eth2::types::{
BroadcastValidation, SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlockContents,
SignedBlockContentsTuple,
};
use eth2::types::{BroadcastValidation, PublishBlockRequest, SignedBeaconBlock};
use http_api::test_utils::InteractiveTester;
use http_api::{publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock};
use std::sync::Arc;
use tree_hash::TreeHash;
use types::{
BlindedBlobSidecar, BlindedPayload, BlobSidecar, FullPayload, Hash256, MainnetEthSpec,
SignedSidecarList, Slot,
};
use types::{Hash256, MainnetEthSpec, Slot};
use warp::Rejection;
use warp_utils::reject::CustomBadRequest;
@ -80,7 +74,7 @@ pub async fn gossip_invalid() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
.post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level)
.await;
assert!(response.is_err());
@ -131,7 +125,7 @@ pub async fn gossip_partial_pass() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
.post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level)
.await;
assert!(response.is_err());
@ -174,7 +168,7 @@ pub async fn gossip_full_pass() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(
&SignedBlockContents::new(block.clone(), blobs),
&PublishBlockRequest::new(block.clone(), blobs),
validation_level,
)
.await;
@ -266,7 +260,7 @@ pub async fn consensus_invalid() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
.post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level)
.await;
assert!(response.is_err());
@ -315,7 +309,7 @@ pub async fn consensus_gossip() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
.post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level)
.await;
assert!(response.is_err());
@ -358,10 +352,8 @@ pub async fn consensus_partial_pass_only_consensus() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let ((block_a, _), state_after_a): ((SignedBeaconBlock<E>, _), _) =
tester.harness.make_block(state_a.clone(), slot_b).await;
let ((block_b, blobs_b), state_after_b): ((SignedBeaconBlock<E>, _), _) =
tester.harness.make_block(state_a, slot_b).await;
let ((block_a, _), state_after_a) = tester.harness.make_block(state_a.clone(), slot_b).await;
let ((block_b, blobs_b), state_after_b) = tester.harness.make_block(state_a, slot_b).await;
let block_b_root = block_b.canonical_root();
/* check for `make_block` curios */
@ -369,7 +361,7 @@ pub async fn consensus_partial_pass_only_consensus() {
assert_eq!(block_b.state_root(), state_after_b.tree_hash_root());
assert_ne!(block_a.state_root(), block_b.state_root());
let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b)
let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b)
.into_gossip_verified_block(&tester.harness.chain);
assert!(gossip_block_contents_b.is_ok());
let gossip_block_a = GossipVerifiedBlock::new(block_a.clone().into(), &tester.harness.chain);
@ -430,7 +422,7 @@ pub async fn consensus_full_pass() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(
&SignedBlockContents::new(block.clone(), blobs),
&PublishBlockRequest::new(block.clone(), blobs),
validation_level,
)
.await;
@ -481,7 +473,7 @@ pub async fn equivocation_invalid() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
.post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level)
.await;
assert!(response.is_err());
@ -538,7 +530,7 @@ pub async fn equivocation_consensus_early_equivocation() {
assert!(tester
.client
.post_beacon_blocks_v2(
&SignedBlockContents::new(block_a.clone(), blobs_a),
&PublishBlockRequest::new(block_a.clone(), blobs_a),
validation_level
)
.await
@ -552,7 +544,7 @@ pub async fn equivocation_consensus_early_equivocation() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(
&SignedBlockContents::new(block_b.clone(), blobs_b),
&PublishBlockRequest::new(block_b.clone(), blobs_b),
validation_level,
)
.await;
@ -603,7 +595,7 @@ pub async fn equivocation_gossip() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(&SignedBlockContents::new(block, blobs), validation_level)
.post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level)
.await;
assert!(response.is_err());
@ -661,10 +653,10 @@ pub async fn equivocation_consensus_late_equivocation() {
assert_eq!(block_b.state_root(), state_after_b.tree_hash_root());
assert_ne!(block_a.state_root(), block_b.state_root());
let gossip_block_contents_b = SignedBlockContents::new(block_b, blobs_b)
let gossip_block_contents_b = PublishBlockRequest::new(block_b, blobs_b)
.into_gossip_verified_block(&tester.harness.chain);
assert!(gossip_block_contents_b.is_ok());
let gossip_block_contents_a = SignedBlockContents::new(block_a, blobs_a)
let gossip_block_contents_a = PublishBlockRequest::new(block_a, blobs_a)
.into_gossip_verified_block(&tester.harness.chain);
assert!(gossip_block_contents_a.is_err());
@ -728,7 +720,7 @@ pub async fn equivocation_full_pass() {
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(
&SignedBlockContents::new(block.clone(), blobs),
&PublishBlockRequest::new(block.clone(), blobs),
validation_level,
)
.await;
@ -776,11 +768,9 @@ pub async fn blinded_gossip_invalid() {
})
.await;
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level)
.await;
assert!(response.is_err());
@ -829,11 +819,9 @@ pub async fn blinded_gossip_partial_pass() {
})
.await;
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level)
.await;
assert!(response.is_err());
@ -870,18 +858,17 @@ pub async fn blinded_gossip_full_pass() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let block_contents = block_contents_tuple.into();
let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
.await;
assert!(response.is_ok());
assert!(tester
.harness
.chain
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
.block_is_known_to_fork_choice(&blinded_block.canonical_root()));
}
// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`.
@ -912,19 +899,18 @@ pub async fn blinded_gossip_full_pass_ssz() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let block_contents = block_contents_tuple.into();
let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2_ssz(&block_contents, validation_level)
.post_beacon_blinded_blocks_v2_ssz(&blinded_block, validation_level)
.await;
assert!(response.is_ok());
assert!(tester
.harness
.chain
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
.block_is_known_to_fork_choice(&blinded_block.canonical_root()));
}
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`.
@ -963,11 +949,9 @@ pub async fn blinded_consensus_invalid() {
})
.await;
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level)
.await;
assert!(response.is_err());
@ -1014,11 +998,9 @@ pub async fn blinded_consensus_gossip() {
.make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero())
.await;
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level)
.await;
assert!(response.is_err());
@ -1060,19 +1042,18 @@ pub async fn blinded_consensus_full_pass() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let (block_contents_tuple, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let (blinded_block, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let block_contents = block_contents_tuple.into();
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&blinded_block, validation_level)
.await;
assert!(response.is_ok());
assert!(tester
.harness
.chain
.block_is_known_to_fork_choice(&block_contents.signed_block().canonical_root()));
.block_is_known_to_fork_choice(&blinded_block.canonical_root()));
}
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus_and_equivocation`.
@ -1112,11 +1093,9 @@ pub async fn blinded_equivocation_invalid() {
})
.await;
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level)
.await;
assert!(response.is_err());
@ -1159,18 +1138,13 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let (block_contents_tuple_a, state_after_a) = tester
let (block_a, state_after_a) = tester
.harness
.make_blinded_block(state_a.clone(), slot_b)
.await;
let (block_contents_tuple_b, state_after_b) =
tester.harness.make_blinded_block(state_a, slot_b).await;
let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await;
/* check for `make_blinded_block` curios */
let block_contents_a: SignedBlockContents<E, BlindedPayload<E>> = block_contents_tuple_a.into();
let block_contents_b: SignedBlockContents<E, BlindedPayload<E>> = block_contents_tuple_b.into();
let block_a = block_contents_a.signed_block();
let block_b = block_contents_b.signed_block();
assert_eq!(block_a.state_root(), state_after_a.tree_hash_root());
assert_eq!(block_b.state_root(), state_after_b.tree_hash_root());
assert_ne!(block_a.state_root(), block_b.state_root());
@ -1178,7 +1152,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
/* submit `block_a` as valid */
assert!(tester
.client
.post_beacon_blinded_blocks_v2(&block_contents_a, validation_level)
.post_beacon_blinded_blocks_v2(&block_a, validation_level)
.await
.is_ok());
assert!(tester
@ -1189,7 +1163,7 @@ pub async fn blinded_equivocation_consensus_early_equivocation() {
/* submit `block_b` which should induce equivocation */
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&block_contents_b, validation_level)
.post_beacon_blinded_blocks_v2(&block_b, validation_level)
.await;
assert!(response.is_err());
@ -1236,11 +1210,9 @@ pub async fn blinded_equivocation_gossip() {
.make_block_with_modifier(state_a, slot_b, |b| *b.state_root_mut() = Hash256::zero())
.await;
let blinded_block_contents = into_signed_blinded_block_contents(block_contents_tuple);
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2(&blinded_block_contents, validation_level)
.post_beacon_blinded_blocks_v2(&block_contents_tuple.0.clone_as_blinded(), validation_level)
.await;
assert!(response.is_err());
@ -1286,12 +1258,11 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let ((block_a, blobs_a), state_after_a): ((SignedBlindedBeaconBlock<E>, _), _) = tester
let (block_a, state_after_a) = tester
.harness
.make_blinded_block(state_a.clone(), slot_b)
.await;
let ((block_b, blobs_b), state_after_b): ((SignedBlindedBeaconBlock<E>, _), _) =
tester.harness.make_blinded_block(state_a, slot_b).await;
let (block_b, state_after_b) = tester.harness.make_blinded_block(state_a, slot_b).await;
/* check for `make_blinded_block` curios */
assert_eq!(block_a.state_root(), state_after_a.tree_hash_root());
@ -1301,7 +1272,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
let unblinded_block_a = reconstruct_block(
tester.harness.chain.clone(),
block_a.canonical_root(),
SignedBlockContents::new(block_a, blobs_a),
block_a,
test_logger.clone(),
)
.await
@ -1309,7 +1280,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
let unblinded_block_b = reconstruct_block(
tester.harness.chain.clone(),
block_b.canonical_root(),
SignedBlockContents::new(block_b.clone(), blobs_b.clone()),
block_b.clone(),
test_logger.clone(),
)
.await
@ -1338,7 +1309,7 @@ pub async fn blinded_equivocation_consensus_late_equivocation() {
let channel = tokio::sync::mpsc::unbounded_channel();
let publication_result = publish_blinded_block(
SignedBlockContents::new(block_b, blobs_b),
block_b,
tester.harness.chain,
&channel.0,
test_logger,
@ -1383,15 +1354,11 @@ pub async fn blinded_equivocation_full_pass() {
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let ((block, blobs), _): ((SignedBlindedBeaconBlock<E>, _), _) =
tester.harness.make_blinded_block(state_a, slot_b).await;
let (block, _) = tester.harness.make_blinded_block(state_a, slot_b).await;
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2(
&SignedBlockContents::new(block.clone(), blobs),
validation_level,
)
.post_beacon_blinded_blocks_v2(&block, validation_level)
.await;
assert!(response.is_ok());
@ -1400,20 +1367,3 @@ pub async fn blinded_equivocation_full_pass() {
.chain
.block_is_known_to_fork_choice(&block.canonical_root()));
}
fn into_signed_blinded_block_contents(
block_contents_tuple: SignedBlockContentsTuple<E, FullPayload<E>>,
) -> SignedBlockContents<E, BlindedPayload<E>> {
let (block, maybe_blobs) = block_contents_tuple;
SignedBlockContents::new(block.into(), maybe_blobs.map(into_blinded_blob_sidecars))
}
fn into_blinded_blob_sidecars(
blobs: SignedSidecarList<E, BlobSidecar<E>>,
) -> SignedSidecarList<E, BlindedBlobSidecar> {
blobs
.into_iter()
.map(|blob| blob.into())
.collect::<Vec<_>>()
.into()
}

View File

@ -17,8 +17,8 @@ use std::sync::Arc;
use std::time::Duration;
use tree_hash::TreeHash;
use types::{
Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload,
MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot,
Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, MainnetEthSpec,
MinimalEthSpec, ProposerPreparationData, Slot,
};
use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full};
@ -641,13 +641,9 @@ pub async fn proposer_boost_re_org_test(
assert_eq!(block_c.parent_root(), block_b_root);
}
// Sign blobs.
let block_c_signed_blobs =
block_c_blobs.map(|blobs| harness.sign_blobs(blobs, &state_b, proposer_index));
// Applying block C should cause it to become head regardless (re-org or continuation).
let block_root_c = harness
.process_block_result((block_c.clone(), block_c_signed_blobs))
.process_block_result((block_c.clone(), block_c_blobs))
.await
.unwrap()
.into();
@ -828,7 +824,7 @@ pub async fn fork_choice_before_proposal() {
.into();
let block_d = tester
.client
.get_validator_blocks::<E, FullPayload<E>>(slot_d, &randao_reveal, None)
.get_validator_blocks::<E>(slot_d, &randao_reveal, None)
.await
.unwrap()
.data

View File

@ -64,8 +64,8 @@ struct ApiTester {
harness: Arc<BeaconChainHarness<EphemeralHarnessType<E>>>,
chain: Arc<BeaconChain<EphemeralHarnessType<E>>>,
client: BeaconNodeHttpClient,
next_block: SignedBlockContents<E>,
reorg_block: SignedBlockContents<E>,
next_block: PublishBlockRequest<E>,
reorg_block: PublishBlockRequest<E>,
attestations: Vec<Attestation<E>>,
contribution_and_proofs: Vec<SignedContributionAndProof<E>>,
attester_slashing: AttesterSlashing<E>,
@ -173,13 +173,13 @@ impl ApiTester {
let (next_block, _next_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
.await;
let next_block = SignedBlockContents::from(next_block);
let next_block = PublishBlockRequest::from(next_block);
// `make_block` adds random graffiti, so this will produce an alternate block
let (reorg_block, _reorg_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap() + 1)
.await;
let reorg_block = SignedBlockContents::from(reorg_block);
let reorg_block = PublishBlockRequest::from(reorg_block);
let head_state_root = head.beacon_state_root();
let attestations = harness
@ -314,13 +314,13 @@ impl ApiTester {
let (next_block, _next_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
.await;
let next_block = SignedBlockContents::from(next_block);
let next_block = PublishBlockRequest::from(next_block);
// `make_block` adds random graffiti, so this will produce an alternate block
let (reorg_block, _reorg_state) = harness
.make_block(head.beacon_state.clone(), harness.chain.slot().unwrap())
.await;
let reorg_block = SignedBlockContents::from(reorg_block);
let reorg_block = PublishBlockRequest::from(reorg_block);
let head_state_root = head.beacon_state_root();
let attestations = harness
@ -1301,7 +1301,7 @@ impl ApiTester {
assert!(self
.client
.post_beacon_blocks(&SignedBlockContents::from(block))
.post_beacon_blocks(&PublishBlockRequest::from(block))
.await
.is_err());
@ -1328,7 +1328,7 @@ impl ApiTester {
assert!(self
.client
.post_beacon_blocks_ssz(&SignedBlockContents::from(block))
.post_beacon_blocks_ssz(&PublishBlockRequest::from(block))
.await
.is_err());
@ -1357,7 +1357,8 @@ impl ApiTester {
.await
.is_ok());
let blinded_block_contents = block_contents.clone_as_blinded();
// Blinded deneb block contents is just the blinded block
let blinded_block_contents = block_contents.signed_block().clone_as_blinded();
// Test all the POST methods in sequence, they should all behave the same.
let responses = vec![
@ -2567,7 +2568,7 @@ impl ApiTester {
let block = self
.client
.get_validator_blocks::<E, FullPayload<E>>(slot, &randao_reveal, None)
.get_validator_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
@ -2576,7 +2577,7 @@ impl ApiTester {
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
let signed_block_contents =
SignedBlockContents::try_from(signed_block.clone()).unwrap();
PublishBlockRequest::try_from(signed_block.clone()).unwrap();
self.client
.post_beacon_blocks(&signed_block_contents)
@ -2631,13 +2632,13 @@ impl ApiTester {
let block_bytes = self
.client
.get_validator_blocks_ssz::<E, FullPayload<E>>(slot, &randao_reveal, None)
.get_validator_blocks_ssz::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.expect("block bytes");
let block_contents =
BlockContents::<E, FullPayload<E>>::from_ssz_bytes(&block_bytes, &self.chain.spec)
FullBlockContents::<E>::from_ssz_bytes(&block_bytes, &self.chain.spec)
.expect("block contents bytes can be decoded");
let signed_block_contents =
@ -2704,28 +2705,26 @@ impl ApiTester {
.unwrap();
if is_blinded_payload {
let block_contents = <BlockContents<E, BlindedPayload<E>>>::from_ssz_bytes(
let blinded_block = <BlindedBeaconBlock<E>>::from_ssz_bytes(
&fork_version_response_bytes.unwrap(),
&self.chain.spec,
)
.expect("block contents bytes can be decoded");
let signed_block_contents =
block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
let signed_blinded_block =
blinded_block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
self.client
.post_beacon_blocks_ssz(&signed_block_contents)
.post_beacon_blinded_blocks_ssz(&signed_blinded_block)
.await
.unwrap();
// This converts the generic `Payload` to a concrete type for comparison.
let signed_block = signed_block_contents.deconstruct().0;
let head_block = SignedBeaconBlock::from(signed_block.clone());
assert_eq!(head_block, signed_block);
let head_block = self.chain.head_beacon_block().clone_as_blinded();
assert_eq!(head_block, signed_blinded_block);
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
} else {
let block_contents = <BlockContents<E, FullPayload<E>>>::from_ssz_bytes(
let block_contents = <FullBlockContents<E>>::from_ssz_bytes(
&fork_version_response_bytes.unwrap(),
&self.chain.spec,
)
@ -2757,7 +2756,7 @@ impl ApiTester {
let block = self
.client
.get_validator_blocks_modular::<E, FullPayload<E>>(
.get_validator_blocks_modular::<E>(
slot,
&Signature::infinity().unwrap().into(),
None,
@ -2815,13 +2814,13 @@ impl ApiTester {
// Check failure with no `skip_randao_verification` passed.
self.client
.get_validator_blocks::<E, FullPayload<E>>(slot, &bad_randao_reveal, None)
.get_validator_blocks::<E>(slot, &bad_randao_reveal, None)
.await
.unwrap_err();
// Check failure with `skip_randao_verification` (requires infinity sig).
self.client
.get_validator_blocks_modular::<E, FullPayload<E>>(
.get_validator_blocks_modular::<E>(
slot,
&bad_randao_reveal,
None,
@ -2836,7 +2835,7 @@ impl ApiTester {
self
}
pub async fn test_blinded_block_production<Payload: AbstractExecPayload<E>>(&self) {
pub async fn test_blinded_block_production(&self) {
let fork = self.chain.canonical_head.cached_head().head_fork();
let genesis_validators_root = self.chain.genesis_validators_root;
@ -2876,29 +2875,33 @@ impl ApiTester {
let block = self
.client
.get_validator_blinded_blocks::<E, Payload>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data;
let signed_block_contents =
block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
self.client
.post_beacon_blinded_blocks(&signed_block_contents)
.post_beacon_blinded_blocks(&signed_block)
.await
.unwrap();
// This converts the generic `Payload` to a concrete type for comparison.
let signed_block = signed_block_contents.deconstruct().0;
let head_block = SignedBeaconBlock::from(signed_block.clone());
assert_eq!(head_block, signed_block);
let head_block = self
.client
.get_beacon_blocks(CoreBlockId::Head)
.await
.unwrap()
.unwrap()
.data;
assert_eq!(head_block.clone_as_blinded(), signed_block);
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
}
}
pub async fn test_blinded_block_production_ssz<Payload: AbstractExecPayload<E>>(&self) {
pub async fn test_blinded_block_production_ssz(&self) {
let fork = self.chain.canonical_head.cached_head().head_fork();
let genesis_validators_root = self.chain.genesis_validators_root;
@ -2938,43 +2941,47 @@ impl ApiTester {
let block_contents_bytes = self
.client
.get_validator_blinded_blocks_ssz::<E, Payload>(slot, &randao_reveal, None)
.get_validator_blinded_blocks_ssz::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.expect("block bytes");
let block_contents = BlockContents::<E, Payload>::from_ssz_bytes(
&block_contents_bytes,
&self.chain.spec,
)
.expect("block contents bytes can be decoded");
let block_contents =
FullBlockContents::<E>::from_ssz_bytes(&block_contents_bytes, &self.chain.spec)
.expect("block contents bytes can be decoded");
let signed_block_contents =
block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
self.client
.post_beacon_blinded_blocks_ssz(&signed_block_contents)
.post_beacon_blinded_blocks_ssz(
&signed_block_contents.signed_block().clone_as_blinded(),
)
.await
.unwrap();
// This converts the generic `Payload` to a concrete type for comparison.
let signed_block = signed_block_contents.deconstruct().0;
let head_block = SignedBeaconBlock::from(signed_block.clone());
assert_eq!(head_block, signed_block);
let head_block = self
.client
.get_beacon_blocks(CoreBlockId::Head)
.await
.unwrap()
.unwrap()
.data;
let signed_block = signed_block_contents.signed_block();
assert_eq!(&head_block, signed_block);
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
}
}
pub async fn test_blinded_block_production_no_verify_randao<Payload: AbstractExecPayload<E>>(
self,
) -> Self {
pub async fn test_blinded_block_production_no_verify_randao(self) -> Self {
for _ in 0..E::slots_per_epoch() {
let slot = self.chain.slot().unwrap();
let block_contents = self
let blinded_block = self
.client
.get_validator_blinded_blocks_modular::<E, Payload>(
.get_validator_blinded_blocks_modular::<E>(
slot,
&Signature::infinity().unwrap().into(),
None,
@ -2983,18 +2990,14 @@ impl ApiTester {
.await
.unwrap()
.data;
assert_eq!(block_contents.block().slot(), slot);
assert_eq!(blinded_block.slot(), slot);
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
}
self
}
pub async fn test_blinded_block_production_verify_randao_invalid<
Payload: AbstractExecPayload<E>,
>(
self,
) -> Self {
pub async fn test_blinded_block_production_verify_randao_invalid(self) -> Self {
let fork = self.chain.canonical_head.cached_head().head_fork();
let genesis_validators_root = self.chain.genesis_validators_root;
@ -3034,13 +3037,13 @@ impl ApiTester {
// Check failure with full randao verification enabled.
self.client
.get_validator_blinded_blocks::<E, Payload>(slot, &bad_randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &bad_randao_reveal, None)
.await
.unwrap_err();
// Check failure with `skip_randao_verification` (requires infinity sig).
self.client
.get_validator_blinded_blocks_modular::<E, Payload>(
.get_validator_blinded_blocks_modular::<E>(
slot,
&bad_randao_reveal,
None,
@ -3520,13 +3523,7 @@ impl ApiTester {
.unwrap();
let payload: BlindedPayload<E> = match payload_type {
Blinded(payload) => payload
.data
.block()
.body()
.execution_payload()
.unwrap()
.into(),
Blinded(payload) => payload.data.body().execution_payload().unwrap().into(),
Full(_) => panic!("Expecting a blinded payload"),
};
@ -3545,11 +3542,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -3586,11 +3582,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -3630,13 +3625,7 @@ impl ApiTester {
.unwrap();
let payload: BlindedPayload<E> = match payload_type {
Blinded(payload) => payload
.data
.block()
.body()
.execution_payload()
.unwrap()
.into(),
Blinded(payload) => payload.data.body().execution_payload().unwrap().into(),
Full(_) => panic!("Expecting a blinded payload"),
};
@ -3665,11 +3654,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -3711,13 +3699,7 @@ impl ApiTester {
.unwrap();
let payload: BlindedPayload<E> = match payload_type {
Blinded(payload) => payload
.data
.block()
.body()
.execution_payload()
.unwrap()
.into(),
Blinded(payload) => payload.data.body().execution_payload().unwrap().into(),
Full(_) => panic!("Expecting a blinded payload"),
};
@ -3752,11 +3734,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -3845,11 +3826,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -3936,11 +3916,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4026,11 +4005,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4102,11 +4080,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4162,11 +4139,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4235,11 +4211,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(next_slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4265,11 +4240,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(next_slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4370,11 +4344,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(next_slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4410,11 +4383,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(next_slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4524,11 +4496,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4608,11 +4579,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4673,11 +4643,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4738,11 +4707,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4803,11 +4771,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4867,11 +4834,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -4907,16 +4873,11 @@ impl ApiTester {
.await
.unwrap();
let block_contents = match payload_type {
let _block_contents = match payload_type {
Blinded(payload) => payload.data,
Full(_) => panic!("Expecting a blinded payload"),
};
let (_, maybe_sidecars) = block_contents.deconstruct();
// Response should contain blob sidecars
assert!(maybe_sidecars.is_some());
self
}
@ -4940,11 +4901,10 @@ impl ApiTester {
let payload: BlindedPayload<E> = self
.client
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
.get_validator_blinded_blocks::<E>(slot, &randao_reveal, None)
.await
.unwrap()
.data
.block()
.body()
.execution_payload()
.unwrap()
@ -5892,17 +5852,14 @@ async fn block_production_v3_ssz_with_skip_slots() {
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn blinded_block_production_full_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production::<FullPayload<_>>()
.await;
ApiTester::new().await.test_blinded_block_production().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn blinded_block_production_ssz_full_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production_ssz::<FullPayload<_>>()
.test_blinded_block_production_ssz()
.await;
}
@ -5911,7 +5868,7 @@ async fn blinded_block_production_with_skip_slots_full_payload_premerge() {
ApiTester::new()
.await
.skip_slots(E::slots_per_epoch() * 2)
.test_blinded_block_production::<FullPayload<_>>()
.test_blinded_block_production()
.await;
}
@ -5920,7 +5877,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() {
ApiTester::new()
.await
.skip_slots(E::slots_per_epoch() * 2)
.test_blinded_block_production_ssz::<FullPayload<_>>()
.test_blinded_block_production_ssz()
.await;
}
@ -5928,7 +5885,7 @@ async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() {
async fn blinded_block_production_no_verify_randao_full_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production_no_verify_randao::<FullPayload<_>>()
.test_blinded_block_production_no_verify_randao()
.await;
}
@ -5936,16 +5893,13 @@ async fn blinded_block_production_no_verify_randao_full_payload_premerge() {
async fn blinded_block_production_verify_randao_invalid_full_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production_verify_randao_invalid::<FullPayload<_>>()
.test_blinded_block_production_verify_randao_invalid()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn blinded_block_production_blinded_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production::<BlindedPayload<_>>()
.await;
ApiTester::new().await.test_blinded_block_production().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -5953,7 +5907,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() {
ApiTester::new()
.await
.skip_slots(E::slots_per_epoch() * 2)
.test_blinded_block_production::<BlindedPayload<_>>()
.test_blinded_block_production()
.await;
}
@ -5961,7 +5915,7 @@ async fn blinded_block_production_with_skip_slots_blinded_payload_premerge() {
async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production_no_verify_randao::<BlindedPayload<_>>()
.test_blinded_block_production_no_verify_randao()
.await;
}
@ -5969,7 +5923,7 @@ async fn blinded_block_production_no_verify_randao_blinded_payload_premerge() {
async fn blinded_block_production_verify_randao_invalid_blinded_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production_verify_randao_invalid::<BlindedPayload<_>>()
.test_blinded_block_production_verify_randao_invalid()
.await;
}

View File

@ -563,10 +563,10 @@ impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
write!(f, "BlocksByRoot: Block slot: {}", block.slot())
}
RPCResponse::BlobsByRange(blob) => {
write!(f, "BlobsByRange: Blob slot: {}", blob.slot)
write!(f, "BlobsByRange: Blob slot: {}", blob.slot())
}
RPCResponse::BlobsByRoot(sidecar) => {
write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot)
write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot())
}
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),

View File

@ -9,19 +9,20 @@ use std::boxed::Box;
use std::io::{Error, ErrorKind};
use std::sync::Arc;
use types::{
Attestation, AttesterSlashing, EthSpec, ForkContext, ForkName, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
SignedBeaconBlockDeneb, SignedBeaconBlockMerge, SignedBlobSidecar, SignedBlsToExecutionChange,
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
Attestation, AttesterSlashing, BlobSidecar, EthSpec, ForkContext, ForkName,
LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing,
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase,
SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockMerge,
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
SyncCommitteeMessage, SyncSubnetId,
};
#[derive(Debug, Clone, PartialEq)]
pub enum PubsubMessage<T: EthSpec> {
/// Gossipsub message providing notification of a new block.
BeaconBlock(Arc<SignedBeaconBlock<T>>),
/// Gossipsub message providing notification of a [`SignedBlobSidecar`] along with the subnet id where it was received.
BlobSidecar(Box<(u64, SignedBlobSidecar<T>)>),
/// Gossipsub message providing notification of a [`BlobSidecar`] along with the subnet id where it was received.
BlobSidecar(Box<(u64, Arc<BlobSidecar<T>>)>),
/// Gossipsub message providing notification of a Aggregate attestation and associated proof.
AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>),
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
@ -204,8 +205,10 @@ impl<T: EthSpec> PubsubMessage<T> {
GossipKind::BlobSidecar(blob_index) => {
match fork_context.from_context_bytes(gossip_topic.fork_digest) {
Some(ForkName::Deneb) => {
let blob_sidecar = SignedBlobSidecar::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?;
let blob_sidecar = Arc::new(
BlobSidecar::from_ssz_bytes(data)
.map_err(|e| format!("{:?}", e))?,
);
Ok(PubsubMessage::BlobSidecar(Box::new((
*blob_index,
blob_sidecar,
@ -318,7 +321,8 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
PubsubMessage::BlobSidecar(data) => write!(
f,
"BlobSidecar: slot: {}, blob index: {}",
data.1.message.slot, data.1.message.index,
data.1.slot(),
data.1.index,
),
PubsubMessage::AggregateAndProofAttestation(att) => write!(
f,

View File

@ -33,10 +33,11 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH};
use store::hot_cold_store::HotColdDBError;
use tokio::sync::mpsc;
use types::{
Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, LightClientFinalityUpdate,
LightClientOptimisticUpdate, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock,
SignedBlobSidecar, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit,
Slot, SubnetId, SyncCommitteeMessage, SyncSubnetId,
Attestation, AttesterSlashing, BlobSidecar, EthSpec, Hash256, IndexedAttestation,
LightClientFinalityUpdate, LightClientOptimisticUpdate, ProposerSlashing,
SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange,
SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage,
SyncSubnetId,
};
use beacon_processor::{
@ -607,20 +608,20 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
peer_id: PeerId,
_peer_client: Client,
blob_index: u64,
signed_blob: SignedBlobSidecar<T::EthSpec>,
blob_sidecar: Arc<BlobSidecar<T::EthSpec>>,
seen_duration: Duration,
) {
let slot = signed_blob.message.slot;
let root = signed_blob.message.block_root;
let index = signed_blob.message.index;
let commitment = signed_blob.message.kzg_commitment;
let slot = blob_sidecar.slot();
let root = blob_sidecar.block_root();
let index = blob_sidecar.index;
let commitment = blob_sidecar.kzg_commitment;
let delay = get_slot_delay_ms(seen_duration, slot, &self.chain.slot_clock);
// Log metrics to track delay from other nodes on the network.
metrics::observe_duration(&metrics::BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME, delay);
metrics::set_gauge(&metrics::BEACON_BLOB_LAST_DELAY, delay.as_millis() as i64);
match self
.chain
.verify_blob_sidecar_for_gossip(signed_blob, blob_index)
.verify_blob_sidecar_for_gossip(blob_sidecar, blob_index)
{
Ok(gossip_verified_blob) => {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOB_VERIFIED_TOTAL);
@ -631,7 +632,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
self.log,
"Gossip blob arrived late";
"block_root" => ?gossip_verified_blob.block_root(),
"proposer_index" => gossip_verified_blob.proposer_index(),
"proposer_index" => gossip_verified_blob.block_proposer_index(),
"slot" => gossip_verified_blob.slot(),
"delay" => ?delay,
"commitment" => %gossip_verified_blob.kzg_commitment(),
@ -670,17 +671,30 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
self.log,
"Unknown parent hash for blob";
"action" => "requesting parent",
"block_root" => %blob.block_root,
"parent_root" => %blob.block_parent_root,
"block_root" => %blob.block_root(),
"parent_root" => %blob.block_parent_root(),
"commitment" => %commitment,
);
self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob));
}
GossipBlobError::ProposerSignatureInvalid
GossipBlobError::KzgNotInitialized
| GossipBlobError::PubkeyCacheTimeout
| GossipBlobError::BeaconChainError(_) => {
crit!(
self.log,
"Internal error when verifying blob sidecar";
"error" => ?err,
)
}
GossipBlobError::ProposalSignatureInvalid
| GossipBlobError::UnknownValidator(_)
| GossipBlobError::ProposerIndexMismatch { .. }
| GossipBlobError::BlobIsNotLaterThanParent { .. }
| GossipBlobError::InvalidSubnet { .. } => {
| GossipBlobError::InvalidSubnet { .. }
| GossipBlobError::InvalidInclusionProof
| GossipBlobError::KzgError(_)
| GossipBlobError::InclusionProof(_)
| GossipBlobError::NotFinalizedDescendant { .. } => {
warn!(
self.log,
"Could not verify blob sidecar for gossip. Rejecting the blob sidecar";
@ -703,7 +717,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
);
}
GossipBlobError::FutureSlot { .. }
| GossipBlobError::BeaconChainError(_)
| GossipBlobError::RepeatBlob { .. }
| GossipBlobError::PastFinalizedSlot { .. } => {
warn!(

View File

@ -212,7 +212,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
peer_id: PeerId,
peer_client: Client,
blob_index: u64,
blob: SignedBlobSidecar<T::EthSpec>,
blob_sidecar: Arc<BlobSidecar<T::EthSpec>>,
seen_timestamp: Duration,
) -> Result<(), Error<T::EthSpec>> {
let processor = self.clone();
@ -223,7 +223,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
peer_id,
peer_client,
blob_index,
blob,
blob_sidecar,
seen_timestamp,
)
.await
@ -231,7 +231,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
self.try_send(BeaconWorkEvent {
drop_during_sync: false,
work: Work::GossipSignedBlobSidecar(Box::pin(process_fn)),
work: Work::GossipBlobSidecar(Box::pin(process_fn)),
})
}

View File

@ -292,7 +292,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
) {
let Some(slot) = blobs
.iter()
.find_map(|blob| blob.as_ref().map(|blob| blob.slot))
.find_map(|blob| blob.as_ref().map(|blob| blob.slot()))
else {
return;
};

View File

@ -33,8 +33,8 @@ use std::time::Duration;
use tokio::sync::mpsc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{
Attestation, AttesterSlashing, Epoch, Hash256, MainnetEthSpec, ProposerSlashing,
SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecarList, SignedVoluntaryExit, Slot,
Attestation, AttesterSlashing, BlobSidecar, BlobSidecarList, Epoch, Hash256, MainnetEthSpec,
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, Slot,
SubnetId,
};
@ -55,7 +55,7 @@ const STANDARD_TIMEOUT: Duration = Duration::from_secs(10);
struct TestRig {
chain: Arc<BeaconChain<T>>,
next_block: Arc<SignedBeaconBlock<E>>,
next_blobs: Option<SignedBlobSidecarList<E>>,
next_blobs: Option<BlobSidecarList<E>>,
attestations: Vec<(Attestation<E>, SubnetId)>,
next_block_attestations: Vec<(Attestation<E>, SubnetId)>,
next_block_aggregate_attestations: Vec<SignedAggregateAndProof<E>>,
@ -186,8 +186,10 @@ impl TestRig {
let log = harness.logger().clone();
let mut beacon_processor_config = BeaconProcessorConfig::default();
beacon_processor_config.enable_backfill_rate_limiting = enable_backfill_rate_limiting;
let beacon_processor_config = BeaconProcessorConfig {
enable_backfill_rate_limiting,
..Default::default()
};
let BeaconProcessorChannels {
beacon_processor_tx,
beacon_processor_rx,
@ -243,12 +245,17 @@ impl TestRig {
chain.spec.maximum_gossip_clock_disparity(),
);
assert!(!beacon_processor.is_err());
assert!(beacon_processor.is_ok());
let block = next_block_tuple.0;
let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 {
Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap())
} else {
None
};
Self {
chain,
next_block: Arc::new(next_block_tuple.0),
next_blobs: next_block_tuple.1,
next_block: Arc::new(block),
next_blobs: blob_sidecars,
attestations,
next_block_attestations,
next_block_aggregate_attestations,
@ -293,7 +300,7 @@ impl TestRig {
junk_message_id(),
junk_peer_id(),
Client::default(),
blob.message.index,
blob.index,
blob.clone(),
Duration::from_secs(0),
)
@ -306,7 +313,7 @@ impl TestRig {
self.network_beacon_processor
.send_rpc_beacon_block(
block_root,
RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()),
RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()),
std::time::Duration::default(),
BlockProcessType::ParentLookup {
chain_hash: Hash256::random(),
@ -320,7 +327,7 @@ impl TestRig {
self.network_beacon_processor
.send_rpc_beacon_block(
block_root,
RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()),
RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone()),
std::time::Duration::default(),
BlockProcessType::SingleBlock { id: 1 },
)
@ -328,12 +335,7 @@ impl TestRig {
}
pub fn enqueue_single_lookup_rpc_blobs(&self) {
if let Some(blobs) = self.next_blobs.clone() {
let blobs = FixedBlobSidecarList::from(
blobs
.into_iter()
.map(|b| Some(b.message))
.collect::<Vec<_>>(),
);
let blobs = FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::<Vec<_>>());
self.network_beacon_processor
.send_rpc_blobs(
self.next_block.canonical_root(),

View File

@ -302,14 +302,14 @@ impl<T: BeaconChainTypes> Router<T> {
),
),
PubsubMessage::BlobSidecar(data) => {
let (blob_index, signed_blob) = *data;
let (blob_index, blob_sidecar) = *data;
self.handle_beacon_processor_send_result(
self.network_beacon_processor.send_gossip_blob_sidecar(
message_id,
peer_id,
self.network_globals.client(&peer_id),
blob_index,
signed_blob,
blob_sidecar,
timestamp_now(),
),
)

View File

@ -428,7 +428,7 @@ impl<L: Lookup, T: BeaconChainTypes> RequestState<L, T> for BlobRequestState<L,
verified_response
.into_iter()
.filter_map(|blob| blob.as_ref())
.map(|blob| blob.block_parent_root)
.map(|blob| blob.block_parent_root())
.next()
}

View File

@ -213,10 +213,8 @@ impl TestRig {
) -> (SignedBeaconBlock<E>, Vec<BlobSidecar<E>>) {
let (mut block, mut blobs) = self.rand_block_and_blobs(fork_name, num_blobs);
*block.message_mut().parent_root_mut() = parent_root;
let block_root = block.canonical_root();
blobs.iter_mut().for_each(|blob| {
blob.block_parent_root = parent_root;
blob.block_root = block_root;
blob.signed_block_header = block.signed_block_header();
});
(block, blobs)
}
@ -1293,7 +1291,7 @@ mod deneb_only {
let child_blob = blobs.first().cloned().unwrap();
let parent_root = block_root;
let child_root = child_blob.block_root;
let child_root = child_blob.block_root();
block_root = child_root;
let mut blobs = FixedBlobSidecarList::default();

View File

@ -46,7 +46,7 @@ impl<T: EthSpec> BlocksAndBlobsRequestInfo<T> {
while {
let pair_next_blob = blob_iter
.peek()
.map(|sidecar| sidecar.slot == block.slot())
.map(|sidecar| sidecar.slot() == block.slot())
.unwrap_or(false);
pair_next_blob
} {

View File

@ -637,9 +637,9 @@ impl<T: BeaconChainTypes> SyncManager<T> {
);
}
SyncMessage::UnknownParentBlob(peer_id, blob) => {
let blob_slot = blob.slot;
let block_root = blob.block_root;
let parent_root = blob.block_parent_root;
let blob_slot = blob.slot();
let block_root = blob.block_root();
let parent_root = blob.block_parent_root();
let blob_index = blob.index;
if blob_index >= T::EthSpec::max_blobs_per_block() as u64 {
warn!(self.log, "Peer sent blob with invalid index"; "index" => blob_index, "peer_id" => %peer_id);

View File

@ -769,9 +769,9 @@ impl BeaconNodeHttpClient {
/// `POST beacon/blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blocks<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn post_beacon_blocks<T: EthSpec>(
&self,
block_contents: &SignedBlockContents<T, Payload>,
block_contents: &PublishBlockRequest<T>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
@ -789,9 +789,9 @@ impl BeaconNodeHttpClient {
/// `POST beacon/blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn post_beacon_blocks_ssz<T: EthSpec>(
&self,
block_contents: &SignedBlockContents<T, Payload>,
block_contents: &PublishBlockRequest<T>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
@ -813,9 +813,9 @@ impl BeaconNodeHttpClient {
/// `POST beacon/blinded_blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blinded_blocks<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn post_beacon_blinded_blocks<T: EthSpec>(
&self,
block: &SignedBlockContents<T, Payload>,
block: &SignedBlindedBeaconBlock<T>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
@ -833,9 +833,9 @@ impl BeaconNodeHttpClient {
/// `POST beacon/blinded_blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blinded_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn post_beacon_blinded_blocks_ssz<T: EthSpec>(
&self,
block: &SignedBlockContents<T, Payload>,
block: &SignedBlindedBeaconBlock<T>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
@ -887,9 +887,9 @@ impl BeaconNodeHttpClient {
}
/// `POST v2/beacon/blocks`
pub async fn post_beacon_blocks_v2<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn post_beacon_blocks_v2<T: EthSpec>(
&self,
block_contents: &SignedBlockContents<T, Payload>,
block_contents: &PublishBlockRequest<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
@ -904,9 +904,9 @@ impl BeaconNodeHttpClient {
}
/// `POST v2/beacon/blocks`
pub async fn post_beacon_blocks_v2_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn post_beacon_blocks_v2_ssz<T: EthSpec>(
&self,
block_contents: &SignedBlockContents<T, Payload>,
block_contents: &PublishBlockRequest<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version_and_ssz_body(
@ -921,16 +921,16 @@ impl BeaconNodeHttpClient {
}
/// `POST v2/beacon/blinded_blocks`
pub async fn post_beacon_blinded_blocks_v2<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn post_beacon_blinded_blocks_v2<T: EthSpec>(
&self,
block_contents: &SignedBlockContents<T, Payload>,
signed_block: &SignedBlindedBeaconBlock<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version(
self.post_beacon_blinded_blocks_v2_path(validation_level)?,
block_contents,
signed_block,
Some(self.timeouts.proposal),
block_contents.signed_block().message().body().fork_name(),
signed_block.message().body().fork_name(),
)
.await?;
@ -940,14 +940,14 @@ impl BeaconNodeHttpClient {
/// `POST v2/beacon/blinded_blocks`
pub async fn post_beacon_blinded_blocks_v2_ssz<T: EthSpec>(
&self,
block_contents: &SignedBlindedBlockContents<T>,
signed_block: &SignedBlindedBeaconBlock<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version_and_ssz_body(
self.post_beacon_blinded_blocks_v2_path(validation_level)?,
block_contents.as_ssz_bytes(),
signed_block.as_ssz_bytes(),
Some(self.timeouts.proposal),
block_contents.signed_block().message().body().fork_name(),
signed_block.message().body().fork_name(),
)
.await?;
@ -1700,38 +1700,33 @@ impl BeaconNodeHttpClient {
}
/// `GET v2/validator/blocks/{slot}`
pub async fn get_validator_blocks<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blocks<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
graffiti: Option<&Graffiti>,
) -> Result<ForkVersionedResponse<BlockContents<T, Payload>>, Error> {
) -> Result<ForkVersionedResponse<FullBlockContents<T>>, Error> {
self.get_validator_blocks_modular(slot, randao_reveal, graffiti, SkipRandaoVerification::No)
.await
}
/// `GET v2/validator/blocks/{slot}`
pub async fn get_validator_blocks_modular<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blocks_modular<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
graffiti: Option<&Graffiti>,
skip_randao_verification: SkipRandaoVerification,
) -> Result<ForkVersionedResponse<BlockContents<T, Payload>>, Error> {
) -> Result<ForkVersionedResponse<FullBlockContents<T>>, Error> {
let path = self
.get_validator_blocks_path::<T, Payload>(
slot,
randao_reveal,
graffiti,
skip_randao_verification,
)
.get_validator_blocks_path::<T>(slot, randao_reveal, graffiti, skip_randao_verification)
.await?;
self.get(path).await
}
/// returns `GET v2/validator/blocks/{slot}` URL path
pub async fn get_validator_blocks_path<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blocks_path<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
@ -1837,12 +1832,12 @@ impl BeaconNodeHttpClient {
if is_blinded_payload {
let blinded_payload = response
.json::<ForkVersionedResponse<BlockContents<T, BlindedPayload<T>>>>()
.json::<ForkVersionedResponse<BlindedBeaconBlock<T>>>()
.await?;
Ok(ForkVersionedBeaconBlockType::Blinded(blinded_payload))
} else {
let full_payload = response
.json::<ForkVersionedResponse<BlockContents<T, FullPayload<T>>>>()
.json::<ForkVersionedResponse<FullBlockContents<T>>>()
.await?;
Ok(ForkVersionedBeaconBlockType::Full(full_payload))
}
@ -1901,13 +1896,13 @@ impl BeaconNodeHttpClient {
}
/// `GET v2/validator/blocks/{slot}` in ssz format
pub async fn get_validator_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blocks_ssz<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
graffiti: Option<&Graffiti>,
) -> Result<Option<Vec<u8>>, Error> {
self.get_validator_blocks_modular_ssz::<T, Payload>(
self.get_validator_blocks_modular_ssz::<T>(
slot,
randao_reveal,
graffiti,
@ -1917,7 +1912,7 @@ impl BeaconNodeHttpClient {
}
/// `GET v2/validator/blocks/{slot}` in ssz format
pub async fn get_validator_blocks_modular_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blocks_modular_ssz<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
@ -1925,12 +1920,7 @@ impl BeaconNodeHttpClient {
skip_randao_verification: SkipRandaoVerification,
) -> Result<Option<Vec<u8>>, Error> {
let path = self
.get_validator_blocks_path::<T, Payload>(
slot,
randao_reveal,
graffiti,
skip_randao_verification,
)
.get_validator_blocks_path::<T>(slot, randao_reveal, graffiti, skip_randao_verification)
.await?;
self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_validator_block_ssz)
@ -1938,12 +1928,12 @@ impl BeaconNodeHttpClient {
}
/// `GET v2/validator/blinded_blocks/{slot}`
pub async fn get_validator_blinded_blocks<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blinded_blocks<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
graffiti: Option<&Graffiti>,
) -> Result<ForkVersionedResponse<BlockContents<T, Payload>>, Error> {
) -> Result<ForkVersionedResponse<BlindedBeaconBlock<T>>, Error> {
self.get_validator_blinded_blocks_modular(
slot,
randao_reveal,
@ -1954,7 +1944,7 @@ impl BeaconNodeHttpClient {
}
/// returns `GET v1/validator/blinded_blocks/{slot}` URL path
pub async fn get_validator_blinded_blocks_path<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blinded_blocks_path<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
@ -1986,18 +1976,15 @@ impl BeaconNodeHttpClient {
}
/// `GET v1/validator/blinded_blocks/{slot}`
pub async fn get_validator_blinded_blocks_modular<
T: EthSpec,
Payload: AbstractExecPayload<T>,
>(
pub async fn get_validator_blinded_blocks_modular<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
graffiti: Option<&Graffiti>,
skip_randao_verification: SkipRandaoVerification,
) -> Result<ForkVersionedResponse<BlockContents<T, Payload>>, Error> {
) -> Result<ForkVersionedResponse<BlindedBeaconBlock<T>>, Error> {
let path = self
.get_validator_blinded_blocks_path::<T, Payload>(
.get_validator_blinded_blocks_path::<T>(
slot,
randao_reveal,
graffiti,
@ -2009,13 +1996,13 @@ impl BeaconNodeHttpClient {
}
/// `GET v2/validator/blinded_blocks/{slot}` in ssz format
pub async fn get_validator_blinded_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
pub async fn get_validator_blinded_blocks_ssz<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
graffiti: Option<&Graffiti>,
) -> Result<Option<Vec<u8>>, Error> {
self.get_validator_blinded_blocks_modular_ssz::<T, Payload>(
self.get_validator_blinded_blocks_modular_ssz::<T>(
slot,
randao_reveal,
graffiti,
@ -2024,10 +2011,7 @@ impl BeaconNodeHttpClient {
.await
}
pub async fn get_validator_blinded_blocks_modular_ssz<
T: EthSpec,
Payload: AbstractExecPayload<T>,
>(
pub async fn get_validator_blinded_blocks_modular_ssz<T: EthSpec>(
&self,
slot: Slot,
randao_reveal: &SignatureBytes,
@ -2035,7 +2019,7 @@ impl BeaconNodeHttpClient {
skip_randao_verification: SkipRandaoVerification,
) -> Result<Option<Vec<u8>>, Error> {
let path = self
.get_validator_blinded_blocks_path::<T, Payload>(
.get_validator_blinded_blocks_path::<T>(
slot,
randao_reveal,
graffiti,

View File

@ -12,9 +12,7 @@ use std::convert::TryFrom;
use std::fmt::{self, Display};
use std::str::{from_utf8, FromStr};
use std::time::Duration;
use tree_hash::TreeHash;
use types::beacon_block_body::KzgCommitments;
use types::builder_bid::BlindedBlobsBundle;
pub use types::*;
#[cfg(feature = "lighthouse")]
@ -901,9 +899,9 @@ pub struct SseBlobSidecar {
impl SseBlobSidecar {
pub fn from_blob_sidecar<E: EthSpec>(blob_sidecar: &BlobSidecar<E>) -> SseBlobSidecar {
SseBlobSidecar {
block_root: blob_sidecar.block_root,
block_root: blob_sidecar.block_root(),
index: blob_sidecar.index,
slot: blob_sidecar.slot,
slot: blob_sidecar.slot(),
kzg_commitment: blob_sidecar.kzg_commitment,
versioned_hash: blob_sidecar.kzg_commitment.calculate_versioned_hash(),
}
@ -1411,15 +1409,14 @@ pub mod serde_status_code {
}
pub enum ForkVersionedBeaconBlockType<T: EthSpec> {
Full(ForkVersionedResponse<BlockContents<T, FullPayload<T>>>),
Blinded(ForkVersionedResponse<BlockContents<T, BlindedPayload<T>>>),
Full(ForkVersionedResponse<FullBlockContents<T>>),
Blinded(ForkVersionedResponse<BlindedBeaconBlock<T>>),
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use std::sync::Arc;
#[test]
fn query_vec() {
@ -1460,17 +1457,17 @@ mod tests {
type E = MainnetEthSpec;
let spec = ForkName::Capella.make_genesis_spec(E::default_spec());
let block: SignedBlockContents<E, FullPayload<E>> = SignedBeaconBlock::from_block(
let block: PublishBlockRequest<E> = SignedBeaconBlock::from_block(
BeaconBlock::<E>::Capella(BeaconBlockCapella::empty(&spec)),
Signature::empty(),
)
.try_into()
.expect("should convert into signed block contents");
let decoded: SignedBlockContents<E> =
SignedBlockContents::from_ssz_bytes(&block.as_ssz_bytes(), &spec)
let decoded: PublishBlockRequest<E> =
PublishBlockRequest::from_ssz_bytes(&block.as_ssz_bytes(), &spec)
.expect("should decode Block");
assert!(matches!(decoded, SignedBlockContents::Block(_)));
assert!(matches!(decoded, PublishBlockRequest::Block(_)));
}
#[test]
@ -1482,87 +1479,49 @@ mod tests {
BeaconBlock::<E>::Deneb(BeaconBlockDeneb::empty(&spec)),
Signature::empty(),
);
let blobs = SignedSidecarList::from(vec![SignedSidecar {
message: Arc::new(BlobSidecar::empty()),
signature: Signature::empty(),
_phantom: Default::default(),
}]);
let signed_block_contents = SignedBlockContents::new(block, Some(blobs));
let blobs = BlobsList::<E>::from(vec![Blob::<E>::default()]);
let kzg_proofs = KzgProofs::<E>::from(vec![KzgProof::empty()]);
let signed_block_contents = PublishBlockRequest::new(block, Some((kzg_proofs, blobs)));
let decoded: SignedBlockContents<E, FullPayload<E>> =
SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec)
let decoded: PublishBlockRequest<E> =
PublishBlockRequest::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec)
.expect("should decode BlockAndBlobSidecars");
assert!(matches!(
decoded,
SignedBlockContents::BlockAndBlobSidecars(_)
));
}
#[test]
fn ssz_signed_blinded_block_contents_with_blobs() {
type E = MainnetEthSpec;
let mut spec = E::default_spec();
spec.altair_fork_epoch = Some(Epoch::new(0));
spec.bellatrix_fork_epoch = Some(Epoch::new(0));
spec.capella_fork_epoch = Some(Epoch::new(0));
spec.deneb_fork_epoch = Some(Epoch::new(0));
let blinded_block = SignedBeaconBlock::from_block(
BeaconBlock::<E, BlindedPayload<E>>::Deneb(BeaconBlockDeneb::empty(&spec)),
Signature::empty(),
);
let blinded_blobs = SignedSidecarList::from(vec![SignedSidecar {
message: Arc::new(BlindedBlobSidecar::empty()),
signature: Signature::empty(),
_phantom: Default::default(),
}]);
let signed_block_contents = SignedBlockContents::new(blinded_block, Some(blinded_blobs));
let decoded: SignedBlockContents<E, BlindedPayload<E>> =
SignedBlockContents::from_ssz_bytes(&signed_block_contents.as_ssz_bytes(), &spec)
.expect("should decode BlindedBlockAndBlobSidecars");
assert!(matches!(
decoded,
SignedBlockContents::BlindedBlockAndBlobSidecars(_)
));
assert!(matches!(decoded, PublishBlockRequest::BlockContents(_)));
}
}
/// A wrapper over a [`BeaconBlock`] or a [`BeaconBlockAndBlobSidecars`].
#[derive(Debug, Encode, Serialize, Deserialize)]
#[serde(untagged)]
#[serde(bound = "E: EthSpec")]
#[ssz(enum_behaviour = "transparent")]
pub enum ProduceBlockV3Response<E: EthSpec> {
Full(FullBlockContents<E>),
Blinded(BlindedBeaconBlock<E>),
}
/// A wrapper over a [`BeaconBlock`] or a [`BlockContents`].
#[derive(Debug, Encode, Serialize, Deserialize)]
#[serde(untagged)]
#[serde(bound = "T: EthSpec")]
#[ssz(enum_behaviour = "transparent")]
pub enum BlockContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
BlockAndBlobSidecars(BeaconBlockAndBlobSidecars<T, Payload>),
BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars<T, Payload>),
Block(BeaconBlock<T, Payload>),
pub enum FullBlockContents<T: EthSpec> {
/// This is a full deneb variant with block and blobs.
BlockContents(BlockContents<T>),
/// This variant is for all pre-deneb full blocks.
Block(BeaconBlock<T>),
}
pub type BlockContentsTuple<T, Payload> = (
BeaconBlock<T, Payload>,
Option<SidecarList<T, <Payload as AbstractExecPayload<T>>::Sidecar>>,
);
pub type BlockContentsTuple<T> = (BeaconBlock<T>, Option<(KzgProofs<T>, BlobsList<T>)>);
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockContents<T, Payload> {
pub fn new(
block: BeaconBlock<T, Payload>,
blobs: Option<SidecarList<T, Payload::Sidecar>>,
) -> Self {
match (Payload::block_type(), blobs) {
(BlockType::Full, Some(blobs)) => {
Self::BlockAndBlobSidecars(BeaconBlockAndBlobSidecars {
block,
blob_sidecars: blobs,
})
}
(BlockType::Blinded, Some(blobs)) => {
Self::BlindedBlockAndBlobSidecars(BlindedBeaconBlockAndBlobSidecars {
blinded_block: block,
blinded_blob_sidecars: blobs,
})
}
(_, None) => Self::Block(block),
impl<T: EthSpec> FullBlockContents<T> {
pub fn new(block: BeaconBlock<T>, blob_data: Option<(KzgProofs<T>, BlobsList<T>)>) -> Self {
match blob_data {
Some((kzg_proofs, blobs)) => Self::BlockContents(BlockContents {
block,
kzg_proofs,
blobs,
}),
None => Self::Block(block),
}
}
@ -1581,43 +1540,41 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockContents<T, Payload> {
match fork_at_slot {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
BeaconBlock::from_ssz_bytes(bytes, spec).map(|block| BlockContents::Block(block))
BeaconBlock::from_ssz_bytes(bytes, spec)
.map(|block| FullBlockContents::Block(block))
}
ForkName::Deneb => {
let mut builder = ssz::SszDecoderBuilder::new(bytes);
builder.register_anonymous_variable_length_item()?;
builder.register_type::<SidecarList<T, Payload::Sidecar>>()?;
builder.register_type::<KzgProofs<T>>()?;
builder.register_type::<BlobsList<T>>()?;
let mut decoder = builder.build()?;
let block =
decoder.decode_next_with(|bytes| BeaconBlock::from_ssz_bytes(bytes, spec))?;
let kzg_proofs = decoder.decode_next()?;
let blobs = decoder.decode_next()?;
Ok(BlockContents::new(block, Some(blobs)))
Ok(FullBlockContents::new(block, Some((kzg_proofs, blobs))))
}
}
}
pub fn block(&self) -> &BeaconBlock<T, Payload> {
pub fn block(&self) -> &BeaconBlock<T> {
match self {
BlockContents::BlockAndBlobSidecars(block_and_sidecars) => &block_and_sidecars.block,
BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => {
&block_and_sidecars.blinded_block
}
BlockContents::Block(block) => block,
FullBlockContents::BlockContents(block_and_sidecars) => &block_and_sidecars.block,
FullBlockContents::Block(block) => block,
}
}
pub fn deconstruct(self) -> BlockContentsTuple<T, Payload> {
pub fn deconstruct(self) -> BlockContentsTuple<T> {
match self {
BlockContents::BlockAndBlobSidecars(block_and_sidecars) => (
FullBlockContents::BlockContents(block_and_sidecars) => (
block_and_sidecars.block,
Some(block_and_sidecars.blob_sidecars),
Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)),
),
BlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => (
block_and_sidecars.blinded_block,
Some(block_and_sidecars.blinded_blob_sidecars),
),
BlockContents::Block(block) => (block, None),
FullBlockContents::Block(block) => (block, None),
}
}
@ -1628,104 +1585,64 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockContents<T, Payload> {
fork: &Fork,
genesis_validators_root: Hash256,
spec: &ChainSpec,
) -> SignedBlockContents<T, Payload> {
) -> PublishBlockRequest<T> {
let (block, maybe_blobs) = self.deconstruct();
let signed_block = block.sign(secret_key, fork, genesis_validators_root, spec);
let signed_blobs = maybe_blobs.map(|blobs| {
blobs
.into_iter()
.map(|blob| blob.sign(secret_key, fork, genesis_validators_root, spec))
.collect::<Vec<_>>()
.into()
});
SignedBlockContents::new(signed_block, signed_blobs)
PublishBlockRequest::new(signed_block, maybe_blobs)
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
for BlockContents<T, Payload>
{
impl<T: EthSpec> ForkVersionDeserialize for FullBlockContents<T> {
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
value: serde_json::value::Value,
fork_name: ForkName,
) -> Result<Self, D::Error> {
match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
Ok(BlockContents::Block(BeaconBlock::deserialize_by_fork::<
'de,
D,
>(value, fork_name)?))
}
ForkName::Deneb => {
let block_contents = match Payload::block_type() {
BlockType::Blinded => BlockContents::BlindedBlockAndBlobSidecars(
BlindedBeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>(
value, fork_name,
)?,
),
BlockType::Full => BlockContents::BlockAndBlobSidecars(
BeaconBlockAndBlobSidecars::deserialize_by_fork::<'de, D>(
value, fork_name,
)?,
),
};
Ok(block_contents)
Ok(FullBlockContents::Block(
BeaconBlock::deserialize_by_fork::<'de, D>(value, fork_name)?,
))
}
ForkName::Deneb => Ok(FullBlockContents::BlockContents(
BlockContents::deserialize_by_fork::<'de, D>(value, fork_name)?,
)),
}
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> Into<BeaconBlock<T, Payload>>
for BlockContents<T, Payload>
{
fn into(self) -> BeaconBlock<T, Payload> {
impl<T: EthSpec> Into<BeaconBlock<T>> for FullBlockContents<T> {
fn into(self) -> BeaconBlock<T> {
match self {
Self::BlockAndBlobSidecars(block_and_sidecars) => block_and_sidecars.block,
Self::BlindedBlockAndBlobSidecars(block_and_sidecars) => {
block_and_sidecars.blinded_block
}
Self::BlockContents(block_and_sidecars) => block_and_sidecars.block,
Self::Block(block) => block,
}
}
}
pub type SignedBlockContentsTuple<T, Payload> = (
SignedBeaconBlock<T, Payload>,
Option<SignedSidecarList<T, <Payload as AbstractExecPayload<T>>::Sidecar>>,
);
pub type SignedBlockContentsTuple<T> = (SignedBeaconBlock<T>, Option<(KzgProofs<T>, BlobsList<T>)>);
pub type SignedBlindedBlockContents<E> = SignedBlockContents<E, BlindedPayload<E>>;
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`].
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBlockContents`].
#[derive(Clone, Debug, Encode, Serialize, Deserialize)]
#[serde(untagged)]
#[serde(bound = "T: EthSpec")]
#[ssz(enum_behaviour = "transparent")]
pub enum SignedBlockContents<T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> {
BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars<T, Payload>),
BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars<T, Payload>),
Block(SignedBeaconBlock<T, Payload>),
pub enum PublishBlockRequest<T: EthSpec> {
BlockContents(SignedBlockContents<T>),
Block(SignedBeaconBlock<T>),
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload> {
impl<T: EthSpec> PublishBlockRequest<T> {
pub fn new(
block: SignedBeaconBlock<T, Payload>,
blobs: Option<SignedSidecarList<T, Payload::Sidecar>>,
block: SignedBeaconBlock<T>,
blob_items: Option<(KzgProofs<T>, BlobsList<T>)>,
) -> Self {
match (Payload::block_type(), blobs) {
(BlockType::Full, Some(blobs)) => {
Self::BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars {
signed_block: block,
signed_blob_sidecars: blobs,
})
}
(BlockType::Blinded, Some(blobs)) => {
Self::BlindedBlockAndBlobSidecars(SignedBlindedBeaconBlockAndBlobSidecars {
signed_blinded_block: block,
signed_blinded_blob_sidecars: blobs,
})
}
(_, None) => Self::Block(block),
match blob_items {
Some((kzg_proofs, blobs)) => Self::BlockContents(SignedBlockContents {
signed_block: block,
kzg_proofs,
blobs,
}),
None => Self::Block(block),
}
}
@ -1745,133 +1662,88 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload
match fork_at_slot {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
SignedBeaconBlock::from_ssz_bytes(bytes, spec)
.map(|block| SignedBlockContents::Block(block))
.map(|block| PublishBlockRequest::Block(block))
}
ForkName::Deneb => {
let mut builder = ssz::SszDecoderBuilder::new(bytes);
builder.register_anonymous_variable_length_item()?;
builder.register_type::<SignedSidecarList<T, Payload::Sidecar>>()?;
builder.register_type::<KzgProofs<T>>()?;
builder.register_type::<BlobsList<T>>()?;
let mut decoder = builder.build()?;
let block = decoder
.decode_next_with(|bytes| SignedBeaconBlock::from_ssz_bytes(bytes, spec))?;
let kzg_proofs = decoder.decode_next()?;
let blobs = decoder.decode_next()?;
Ok(SignedBlockContents::new(block, Some(blobs)))
Ok(PublishBlockRequest::new(block, Some((kzg_proofs, blobs))))
}
}
}
pub fn signed_block(&self) -> &SignedBeaconBlock<T, Payload> {
pub fn signed_block(&self) -> &SignedBeaconBlock<T> {
match self {
SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => {
PublishBlockRequest::BlockContents(block_and_sidecars) => {
&block_and_sidecars.signed_block
}
SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => {
&block_and_sidecars.signed_blinded_block
}
SignedBlockContents::Block(block) => block,
PublishBlockRequest::Block(block) => block,
}
}
pub fn blobs_cloned(&self) -> Option<SignedSidecarList<T, Payload::Sidecar>> {
pub fn deconstruct(self) -> SignedBlockContentsTuple<T> {
match self {
SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => {
Some(block_and_sidecars.signed_blob_sidecars.clone())
}
SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => {
Some(block_and_sidecars.signed_blinded_blob_sidecars.clone())
}
SignedBlockContents::Block(_block) => None,
}
}
pub fn deconstruct(self) -> SignedBlockContentsTuple<T, Payload> {
match self {
SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => (
PublishBlockRequest::BlockContents(block_and_sidecars) => (
block_and_sidecars.signed_block,
Some(block_and_sidecars.signed_blob_sidecars),
Some((block_and_sidecars.kzg_proofs, block_and_sidecars.blobs)),
),
SignedBlockContents::BlindedBlockAndBlobSidecars(block_and_sidecars) => (
block_and_sidecars.signed_blinded_block,
Some(block_and_sidecars.signed_blinded_blob_sidecars),
),
SignedBlockContents::Block(block) => (block, None),
PublishBlockRequest::Block(block) => (block, None),
}
}
}
impl<T: EthSpec> SignedBlockContents<T, BlindedPayload<T>> {
pub fn try_into_full_block_and_blobs(
self,
maybe_full_payload_contents: Option<FullPayloadContents<T>>,
) -> Result<SignedBlockContents<T, FullPayload<T>>, String> {
match self {
SignedBlockContents::BlindedBlockAndBlobSidecars(blinded_block_and_blob_sidecars) => {
match maybe_full_payload_contents {
None | Some(FullPayloadContents::Payload(_)) => {
Err("Can't build full block contents without payload and blobs".to_string())
}
Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => {
let signed_block = blinded_block_and_blob_sidecars
.signed_blinded_block
.try_into_full_block(Some(payload_and_blobs.execution_payload))
.ok_or("Failed to build full block with payload".to_string())?;
let signed_blob_sidecars: SignedBlobSidecarList<T> =
blinded_block_and_blob_sidecars
.signed_blinded_blob_sidecars
.into_iter()
.zip(payload_and_blobs.blobs_bundle.blobs)
.map(|(blinded_blob_sidecar, blob)| {
blinded_blob_sidecar.into_full_blob_sidecars(blob)
})
.collect::<Vec<_>>()
.into();
/// Converting from a `SignedBlindedBeaconBlock` into a full `SignedBlockContents`.
pub fn into_full_block_and_blobs<T: EthSpec>(
blinded_block: SignedBlindedBeaconBlock<T>,
maybe_full_payload_contents: Option<FullPayloadContents<T>>,
) -> Result<PublishBlockRequest<T>, String> {
match maybe_full_payload_contents {
None => {
let signed_block = blinded_block
.try_into_full_block(None)
.ok_or("Failed to build full block with payload".to_string())?;
Ok(PublishBlockRequest::new(signed_block, None))
}
// This variant implies a pre-deneb block
Some(FullPayloadContents::Payload(execution_payload)) => {
let signed_block = blinded_block
.try_into_full_block(Some(execution_payload))
.ok_or("Failed to build full block with payload".to_string())?;
Ok(PublishBlockRequest::new(signed_block, None))
}
// This variant implies a post-deneb block
Some(FullPayloadContents::PayloadAndBlobs(payload_and_blobs)) => {
let signed_block = blinded_block
.try_into_full_block(Some(payload_and_blobs.execution_payload))
.ok_or("Failed to build full block with payload".to_string())?;
Ok(SignedBlockContents::new(
signed_block,
Some(signed_blob_sidecars),
))
}
}
}
SignedBlockContents::Block(blinded_block) => {
let full_payload_opt = maybe_full_payload_contents.map(|o| o.deconstruct().0);
blinded_block
.try_into_full_block(full_payload_opt)
.map(SignedBlockContents::Block)
.ok_or("Can't build full block without payload".to_string())
}
SignedBlockContents::BlockAndBlobSidecars(_) => Err(
"BlockAndBlobSidecars variant not expected when constructing full block"
.to_string(),
),
Ok(PublishBlockRequest::new(
signed_block,
Some((
payload_and_blobs.blobs_bundle.proofs,
payload_and_blobs.blobs_bundle.blobs,
)),
))
}
}
}
impl<T: EthSpec> SignedBlockContents<T> {
pub fn clone_as_blinded(&self) -> SignedBlindedBlockContents<T> {
let blinded_blobs = self.blobs_cloned().map(|blob_sidecars| {
blob_sidecars
.into_iter()
.map(|blob| blob.into())
.collect::<Vec<_>>()
.into()
});
SignedBlockContents::new(self.signed_block().clone_as_blinded(), blinded_blobs)
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> TryFrom<SignedBeaconBlock<T, Payload>>
for SignedBlockContents<T, Payload>
{
impl<T: EthSpec> TryFrom<SignedBeaconBlock<T>> for PublishBlockRequest<T> {
type Error = &'static str;
fn try_from(block: SignedBeaconBlock<T, Payload>) -> Result<Self, Self::Error> {
fn try_from(block: SignedBeaconBlock<T>) -> Result<Self, Self::Error> {
match block {
SignedBeaconBlock::Base(_)
| SignedBeaconBlock::Altair(_)
| SignedBeaconBlock::Merge(_)
| SignedBeaconBlock::Capella(_) => Ok(SignedBlockContents::Block(block)),
| SignedBeaconBlock::Capella(_) => Ok(PublishBlockRequest::Block(block)),
SignedBeaconBlock::Deneb(_) => {
Err("deneb block contents cannot be fully constructed from just the signed block")
}
@ -1879,93 +1751,49 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> TryFrom<SignedBeaconBlock<T, P
}
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> From<SignedBlockContentsTuple<T, Payload>>
for SignedBlockContents<T, Payload>
{
fn from(block_contents_tuple: SignedBlockContentsTuple<T, Payload>) -> Self {
SignedBlockContents::new(block_contents_tuple.0, block_contents_tuple.1)
impl<T: EthSpec> From<SignedBlockContentsTuple<T>> for PublishBlockRequest<T> {
fn from(block_contents_tuple: SignedBlockContentsTuple<T>) -> Self {
PublishBlockRequest::new(block_contents_tuple.0, block_contents_tuple.1)
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Encode)]
#[serde(bound = "T: EthSpec")]
pub struct SignedBeaconBlockAndBlobSidecars<T: EthSpec, Payload: AbstractExecPayload<T>> {
pub signed_block: SignedBeaconBlock<T, Payload>,
pub signed_blob_sidecars: SignedSidecarList<T, Payload::Sidecar>,
pub struct SignedBlockContents<T: EthSpec> {
pub signed_block: SignedBeaconBlock<T>,
pub kzg_proofs: KzgProofs<T>,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: BlobsList<T>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Encode)]
#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload<T>")]
pub struct BeaconBlockAndBlobSidecars<T: EthSpec, Payload: AbstractExecPayload<T>> {
pub block: BeaconBlock<T, Payload>,
pub blob_sidecars: SidecarList<T, Payload::Sidecar>,
#[serde(bound = "T: EthSpec")]
pub struct BlockContents<T: EthSpec> {
pub block: BeaconBlock<T>,
pub kzg_proofs: KzgProofs<T>,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: BlobsList<T>,
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
for BeaconBlockAndBlobSidecars<T, Payload>
{
impl<T: EthSpec> ForkVersionDeserialize for BlockContents<T> {
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
value: serde_json::value::Value,
fork_name: ForkName,
) -> Result<Self, D::Error> {
#[derive(Deserialize)]
#[serde(bound = "T: EthSpec, S: Sidecar<T>")]
struct Helper<T: EthSpec, S: Sidecar<T>> {
#[serde(bound = "T: EthSpec")]
struct Helper<T: EthSpec> {
block: serde_json::Value,
blob_sidecars: SidecarList<T, S>,
kzg_proofs: KzgProofs<T>,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
blobs: BlobsList<T>,
}
let helper: Helper<T, Payload::Sidecar> =
serde_json::from_value(value).map_err(serde::de::Error::custom)?;
let helper: Helper<T> = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
Ok(Self {
block: BeaconBlock::deserialize_by_fork::<'de, D>(helper.block, fork_name)?,
blob_sidecars: helper.blob_sidecars,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Encode)]
#[serde(bound = "T: EthSpec")]
pub struct SignedBlindedBeaconBlockAndBlobSidecars<
T: EthSpec,
Payload: AbstractExecPayload<T> = BlindedPayload<T>,
> {
pub signed_blinded_block: SignedBeaconBlock<T, Payload>,
pub signed_blinded_blob_sidecars: SignedSidecarList<T, Payload::Sidecar>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Encode)]
#[serde(bound = "T: EthSpec, Payload: AbstractExecPayload<T>")]
pub struct BlindedBeaconBlockAndBlobSidecars<
T: EthSpec,
Payload: AbstractExecPayload<T> = BlindedPayload<T>,
> {
pub blinded_block: BeaconBlock<T, Payload>,
pub blinded_blob_sidecars: SidecarList<T, Payload::Sidecar>,
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
for BlindedBeaconBlockAndBlobSidecars<T, Payload>
{
fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>(
value: serde_json::value::Value,
fork_name: ForkName,
) -> Result<Self, D::Error> {
#[derive(Deserialize)]
#[serde(bound = "T: EthSpec, S: Sidecar<T>")]
struct Helper<T: EthSpec, S: Sidecar<T>> {
blinded_block: serde_json::Value,
blinded_blob_sidecars: SidecarList<T, S>,
}
let helper: Helper<T, Payload::Sidecar> =
serde_json::from_value(value).map_err(serde::de::Error::custom)?;
Ok(Self {
blinded_block: BeaconBlock::deserialize_by_fork::<'de, D>(
helper.blinded_block,
fork_name,
)?,
blinded_blob_sidecars: helper.blinded_blob_sidecars,
kzg_proofs: helper.kzg_proofs,
blobs: helper.blobs,
})
}
}
@ -2051,18 +1879,3 @@ pub struct BlobsBundle<E: EthSpec> {
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: BlobsList<E>,
}
impl<E: EthSpec> Into<BlindedBlobsBundle<E>> for BlobsBundle<E> {
fn into(self) -> BlindedBlobsBundle<E> {
BlindedBlobsBundle {
commitments: self.commitments,
proofs: self.proofs,
blob_roots: self
.blobs
.into_iter()
.map(|blob| blob.tree_hash_root())
.collect::<Vec<_>>()
.into(),
}
}
}

View File

@ -291,7 +291,7 @@ pub enum AttestationFromBlock {
}
/// Parameters which are cached between calls to `ForkChoice::get_head`.
#[derive(Clone, Copy)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ForkchoiceUpdateParameters {
/// The most recent result of running `ForkChoice::get_head`.
pub head_root: Hash256,

View File

@ -7,4 +7,6 @@ pub use crate::fork_choice::{
QueuedAttestation, ResetPayloadStatuses,
};
pub use fork_choice_store::ForkChoiceStore;
pub use proto_array::{Block as ProtoBlock, ExecutionStatus, InvalidationOperation};
pub use proto_array::{
Block as ProtoBlock, ExecutionStatus, InvalidationOperation, ProposerHeadError,
};

View File

@ -369,7 +369,7 @@ pub fn verify_merkle_proof(
}
/// Compute a root hash from a leaf and a Merkle proof.
fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 {
pub fn merkle_root_from_branch(leaf: H256, branch: &[H256], depth: usize, index: usize) -> H256 {
assert_eq!(branch.len(), depth, "proof length should equal depth");
let mut merkle_root = leaf.as_bytes().to_vec();

View File

@ -188,7 +188,7 @@ where
}
/// Information about the proposer head used for opportunistic re-orgs.
#[derive(Clone)]
#[derive(Debug, Clone)]
pub struct ProposerHeadInfo {
/// Information about the *current* head block, which may be re-orged.
pub head_node: ProtoNode,
@ -206,7 +206,7 @@ pub struct ProposerHeadInfo {
///
/// This type intentionally does not implement `Debug` so that callers are forced to handle the
/// enum.
#[derive(Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq)]
pub enum ProposerHeadError<E> {
DoNotReOrg(DoNotReOrg),
Error(E),
@ -243,7 +243,7 @@ impl<E1> ProposerHeadError<E1> {
/// Reasons why a re-org should not be attempted.
///
/// This type intentionally does not implement `Debug` so that the `Display` impl must be used.
#[derive(Clone, PartialEq)]
#[derive(Debug, Clone, PartialEq)]
pub enum DoNotReOrg {
MissingHeadOrParentNode,
MissingHeadFinalizedCheckpoint,

View File

@ -10,3 +10,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096
MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096
# `uint64(6)`
MAX_BLOBS_PER_BLOCK: 6
# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17

View File

@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096
MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096
# `uint64(6)`
MAX_BLOBS_PER_BLOCK: 6
# `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17

View File

@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096
MAX_BLOB_COMMITMENTS_PER_BLOCK: 16
# `uint64(6)`
MAX_BLOBS_PER_BLOCK: 6
# [customized] `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9

View File

@ -1,12 +1,14 @@
use crate::test_utils::TestRandom;
use crate::*;
use derivative::Derivative;
use merkle_proof::{MerkleTree, MerkleTreeError};
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use std::marker::PhantomData;
use superstruct::superstruct;
use test_random_derive::TestRandom;
use tree_hash::{TreeHash, BYTES_PER_CHUNK};
use tree_hash_derive::TreeHash;
pub type KzgCommitments<T> =
@ -14,6 +16,9 @@ pub type KzgCommitments<T> =
pub type KzgCommitmentOpts<T> =
FixedVector<Option<KzgCommitment>, <T as EthSpec>::MaxBlobsPerBlock>;
/// Index of the `blob_kzg_commitments` leaf in the `BeaconBlockBody` tree post-deneb.
pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11;
/// The body of a `BeaconChain` block, containing operations.
///
/// This *superstruct* abstracts over the hard-fork.
@ -98,6 +103,79 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T,
Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)),
}
}
/// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments`
/// at `index`.
pub fn kzg_commitment_merkle_proof(
&self,
index: usize,
) -> Result<FixedVector<Hash256, T::KzgCommitmentInclusionProofDepth>, Error> {
match self {
Self::Base(_) | Self::Altair(_) | Self::Merge(_) | Self::Capella(_) => {
Err(Error::IncorrectStateVariant)
}
Self::Deneb(body) => {
// We compute the branches by generating 2 merkle trees:
// 1. Merkle tree for the `blob_kzg_commitments` List object
// 2. Merkle tree for the `BeaconBlockBody` container
// We then merge the branches for both the trees all the way up to the root.
// Part1 (Branches for the subtree rooted at `blob_kzg_commitments`)
//
// Branches for `blob_kzg_commitments` without length mix-in
let depth = T::max_blob_commitments_per_block()
.next_power_of_two()
.ilog2();
let leaves: Vec<_> = body
.blob_kzg_commitments
.iter()
.map(|commitment| commitment.tree_hash_root())
.collect();
let tree = MerkleTree::create(&leaves, depth as usize);
let (_, mut proof) = tree
.generate_proof(index, depth as usize)
.map_err(Error::MerkleTreeError)?;
// Add the branch corresponding to the length mix-in.
let length = body.blob_kzg_commitments.len();
let usize_len = std::mem::size_of::<usize>();
let mut length_bytes = [0; BYTES_PER_CHUNK];
length_bytes
.get_mut(0..usize_len)
.ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))?
.copy_from_slice(&length.to_le_bytes());
let length_root = Hash256::from_slice(length_bytes.as_slice());
proof.push(length_root);
// Part 2
// Branches for `BeaconBlockBody` container
let leaves = [
body.randao_reveal.tree_hash_root(),
body.eth1_data.tree_hash_root(),
body.graffiti.tree_hash_root(),
body.proposer_slashings.tree_hash_root(),
body.attester_slashings.tree_hash_root(),
body.attestations.tree_hash_root(),
body.deposits.tree_hash_root(),
body.voluntary_exits.tree_hash_root(),
body.sync_aggregate.tree_hash_root(),
body.execution_payload.tree_hash_root(),
body.bls_to_execution_changes.tree_hash_root(),
body.blob_kzg_commitments.tree_hash_root(),
];
let beacon_block_body_depth = leaves.len().next_power_of_two().ilog2() as usize;
let tree = MerkleTree::create(&leaves, beacon_block_body_depth);
let (_, mut proof_body) = tree
.generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth)
.map_err(Error::MerkleTreeError)?;
// Join the proofs for the subtree and the main tree
proof.append(&mut proof_body);
debug_assert_eq!(proof.len(), T::kzg_proof_inclusion_proof_depth());
Ok(proof.into())
}
}
}
}
impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> BeaconBlockBodyRef<'a, T, Payload> {

View File

@ -60,6 +60,16 @@ impl BeaconBlockHeader {
signature,
}
}
pub fn empty() -> Self {
Self {
body_root: Default::default(),
parent_root: Default::default(),
proposer_index: Default::default(),
slot: Default::default(),
state_root: Default::default(),
}
}
}
#[cfg(test)]

View File

@ -1,11 +1,18 @@
use crate::test_utils::TestRandom;
use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot};
use crate::{
beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob,
EthSpec, Hash256, SignedBeaconBlockHeader, Slot,
};
use crate::{KzgProofs, SignedBeaconBlock};
use bls::Signature;
use derivative::Derivative;
use kzg::{
Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT,
FIELD_ELEMENTS_PER_BLOB,
};
use merkle_proof::{merkle_root_from_branch, verify_merkle_proof, MerkleTreeError};
use rand::Rng;
use safe_arith::{ArithError, SafeArith};
use serde::{Deserialize, Serialize};
use ssz::Encode;
use ssz_derive::{Decode, Encode};
@ -67,47 +74,14 @@ impl Ord for BlobIdentifier {
#[arbitrary(bound = "T: EthSpec")]
#[derivative(PartialEq, Eq, Hash(bound = "T: EthSpec"))]
pub struct BlobSidecar<T: EthSpec> {
pub block_root: Hash256,
#[serde(with = "serde_utils::quoted_u64")]
pub index: u64,
pub slot: Slot,
pub block_parent_root: Hash256,
#[serde(with = "serde_utils::quoted_u64")]
pub proposer_index: u64,
#[serde(with = "ssz_types::serde_utils::hex_fixed_vec")]
pub blob: Blob<T>,
pub kzg_commitment: KzgCommitment,
pub kzg_proof: KzgProof,
}
impl<E: EthSpec> From<Arc<BlobSidecar<E>>> for BlindedBlobSidecar {
fn from(blob_sidecar: Arc<BlobSidecar<E>>) -> Self {
BlindedBlobSidecar {
block_root: blob_sidecar.block_root,
index: blob_sidecar.index,
slot: blob_sidecar.slot,
block_parent_root: blob_sidecar.block_parent_root,
proposer_index: blob_sidecar.proposer_index,
blob_root: blob_sidecar.blob.tree_hash_root(),
kzg_commitment: blob_sidecar.kzg_commitment,
kzg_proof: blob_sidecar.kzg_proof,
}
}
}
impl<E: EthSpec> From<BlobSidecar<E>> for BlindedBlobSidecar {
fn from(blob_sidecar: BlobSidecar<E>) -> Self {
BlindedBlobSidecar {
block_root: blob_sidecar.block_root,
index: blob_sidecar.index,
slot: blob_sidecar.slot,
block_parent_root: blob_sidecar.block_parent_root,
proposer_index: blob_sidecar.proposer_index,
blob_root: blob_sidecar.blob.tree_hash_root(),
kzg_commitment: blob_sidecar.kzg_commitment,
kzg_proof: blob_sidecar.kzg_proof,
}
}
pub signed_block_header: SignedBeaconBlockHeader,
pub kzg_commitment_inclusion_proof: FixedVector<Hash256, T::KzgCommitmentInclusionProofDepth>,
}
impl<T: EthSpec> PartialOrd for BlobSidecar<T> {
@ -122,29 +96,130 @@ impl<T: EthSpec> Ord for BlobSidecar<T> {
}
}
impl<T: EthSpec> SignedRoot for BlobSidecar<T> {}
#[derive(Debug)]
pub enum BlobSidecarError {
PreDeneb,
MissingKzgCommitment,
BeaconState(BeaconStateError),
MerkleTree(MerkleTreeError),
ArithError(ArithError),
}
impl From<BeaconStateError> for BlobSidecarError {
fn from(e: BeaconStateError) -> Self {
BlobSidecarError::BeaconState(e)
}
}
impl From<MerkleTreeError> for BlobSidecarError {
fn from(e: MerkleTreeError) -> Self {
BlobSidecarError::MerkleTree(e)
}
}
impl From<ArithError> for BlobSidecarError {
fn from(e: ArithError) -> Self {
BlobSidecarError::ArithError(e)
}
}
impl<T: EthSpec> BlobSidecar<T> {
pub fn new(
index: usize,
blob: Blob<T>,
signed_block: &SignedBeaconBlock<T>,
kzg_proof: KzgProof,
) -> Result<Self, BlobSidecarError> {
let expected_kzg_commitments = signed_block
.message()
.body()
.blob_kzg_commitments()
.map_err(|_e| BlobSidecarError::PreDeneb)?;
let kzg_commitment = *expected_kzg_commitments
.get(index)
.ok_or(BlobSidecarError::MissingKzgCommitment)?;
let kzg_commitment_inclusion_proof = signed_block
.message()
.body()
.kzg_commitment_merkle_proof(index)?;
Ok(Self {
index: index as u64,
blob,
kzg_commitment,
kzg_proof,
signed_block_header: signed_block.signed_block_header(),
kzg_commitment_inclusion_proof,
})
}
pub fn id(&self) -> BlobIdentifier {
BlobIdentifier {
block_root: self.block_root,
block_root: self.block_root(),
index: self.index,
}
}
pub fn slot(&self) -> Slot {
self.signed_block_header.message.slot
}
pub fn block_root(&self) -> Hash256 {
self.signed_block_header.message.tree_hash_root()
}
pub fn block_parent_root(&self) -> Hash256 {
self.signed_block_header.message.parent_root
}
pub fn block_proposer_index(&self) -> u64 {
self.signed_block_header.message.proposer_index
}
pub fn empty() -> Self {
Self {
block_root: Hash256::zero(),
index: 0,
slot: Slot::new(0),
block_parent_root: Hash256::zero(),
proposer_index: 0,
blob: Blob::<T>::default(),
kzg_commitment: KzgCommitment::empty_for_testing(),
kzg_proof: KzgProof::empty(),
signed_block_header: SignedBeaconBlockHeader {
message: BeaconBlockHeader::empty(),
signature: Signature::empty(),
},
kzg_commitment_inclusion_proof: Default::default(),
}
}
/// Verifies the kzg commitment inclusion merkle proof.
pub fn verify_blob_sidecar_inclusion_proof(&self) -> Result<bool, MerkleTreeError> {
// Depth of the subtree rooted at `blob_kzg_commitments` in the `BeaconBlockBody`
// is equal to depth of the ssz List max size + 1 for the length mixin
let kzg_commitments_tree_depth = (T::max_blob_commitments_per_block()
.next_power_of_two()
.ilog2()
.safe_add(1))? as usize;
// Compute the `tree_hash_root` of the `blob_kzg_commitments` subtree using the
// inclusion proof branches
let blob_kzg_commitments_root = merkle_root_from_branch(
self.kzg_commitment.tree_hash_root(),
self.kzg_commitment_inclusion_proof
.get(0..kzg_commitments_tree_depth)
.ok_or(MerkleTreeError::PleaseNotifyTheDevs)?,
kzg_commitments_tree_depth,
self.index as usize,
);
// The remaining inclusion proof branches are for the top level `BeaconBlockBody` tree
Ok(verify_merkle_proof(
blob_kzg_commitments_root,
self.kzg_commitment_inclusion_proof
.get(kzg_commitments_tree_depth..T::kzg_proof_inclusion_proof_depth())
.ok_or(MerkleTreeError::PleaseNotifyTheDevs)?,
T::kzg_proof_inclusion_proof_depth().safe_sub(kzg_commitments_tree_depth)?,
BLOB_KZG_COMMITMENTS_INDEX,
self.signed_block_header.message.body_root,
))
}
pub fn random_valid<R: Rng>(rng: &mut R, kzg: &Kzg) -> Result<Self, String> {
let mut blob_bytes = vec![0u8; BYTES_PER_BLOB];
rng.fill_bytes(&mut blob_bytes);
@ -185,57 +260,22 @@ impl<T: EthSpec> BlobSidecar<T> {
// Fixed part
Self::empty().as_ssz_bytes().len()
}
}
#[derive(
Debug,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
TestRandom,
Derivative,
arbitrary::Arbitrary,
)]
#[derivative(PartialEq, Eq, Hash)]
pub struct BlindedBlobSidecar {
pub block_root: Hash256,
#[serde(with = "serde_utils::quoted_u64")]
pub index: u64,
pub slot: Slot,
pub block_parent_root: Hash256,
#[serde(with = "serde_utils::quoted_u64")]
pub proposer_index: u64,
pub blob_root: Hash256,
pub kzg_commitment: KzgCommitment,
pub kzg_proof: KzgProof,
}
impl BlindedBlobSidecar {
pub fn empty() -> Self {
Self {
block_root: Hash256::zero(),
index: 0,
slot: Slot::new(0),
block_parent_root: Hash256::zero(),
proposer_index: 0,
blob_root: Hash256::zero(),
kzg_commitment: KzgCommitment::empty_for_testing(),
kzg_proof: KzgProof::empty(),
pub fn build_sidecars(
blobs: BlobsList<T>,
block: &SignedBeaconBlock<T>,
kzg_proofs: KzgProofs<T>,
) -> Result<BlobSidecarList<T>, BlobSidecarError> {
let mut blob_sidecars = vec![];
for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() {
let blob_sidecar = BlobSidecar::new(i, blob, block, *kzg_proof)?;
blob_sidecars.push(Arc::new(blob_sidecar));
}
Ok(VariableList::from(blob_sidecars))
}
}
impl SignedRoot for BlindedBlobSidecar {}
pub type SidecarList<T, Sidecar> = VariableList<Arc<Sidecar>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type BlobSidecarList<T> = SidecarList<T, BlobSidecar<T>>;
pub type BlindedBlobSidecarList<T> = SidecarList<T, BlindedBlobSidecar>;
pub type BlobSidecarList<T> = VariableList<Arc<BlobSidecar<T>>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type FixedBlobSidecarList<T> =
FixedVector<Option<Arc<BlobSidecar<T>>>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type BlobsList<T> = VariableList<Blob<T>, <T as EthSpec>::MaxBlobCommitmentsPerBlock>;
pub type BlobRootsList<T> = VariableList<Hash256, <T as EthSpec>::MaxBlobCommitmentsPerBlock>;

View File

@ -1,24 +1,15 @@
use crate::beacon_block_body::KzgCommitments;
use crate::{
BlobRootsList, ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb,
ChainSpec, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb,
ExecutionPayloadHeaderMerge, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName,
ForkVersionDeserialize, KzgProofs, SignedRoot, Uint256,
ForkVersionDeserialize, SignedRoot, Uint256,
};
use bls::PublicKeyBytes;
use bls::Signature;
use serde::{Deserialize, Deserializer, Serialize};
use ssz_derive::Encode;
use superstruct::superstruct;
use tree_hash_derive::TreeHash;
#[derive(PartialEq, Debug, Default, Serialize, Deserialize, TreeHash, Clone, Encode)]
#[serde(bound = "E: EthSpec")]
pub struct BlindedBlobsBundle<E: EthSpec> {
pub commitments: KzgCommitments<E>,
pub proofs: KzgProofs<E>,
pub blob_roots: BlobRootsList<E>,
}
#[superstruct(
variants(Merge, Capella, Deneb),
variant_attributes(
@ -39,7 +30,7 @@ pub struct BuilderBid<E: EthSpec> {
#[superstruct(only(Deneb), partial_getter(rename = "header_deneb"))]
pub header: ExecutionPayloadHeaderDeneb<E>,
#[superstruct(only(Deneb))]
pub blinded_blobs_bundle: BlindedBlobsBundle<E>,
pub blob_kzg_commitments: KzgCommitments<E>,
#[serde(with = "serde_utils::quoted_u256")]
pub value: Uint256,
pub pubkey: PublicKeyBytes,

View File

@ -15,7 +15,6 @@ pub enum Domain {
BlsToExecutionChange,
BeaconProposer,
BeaconAttester,
BlobSidecar,
Randao,
Deposit,
VoluntaryExit,
@ -102,7 +101,6 @@ pub struct ChainSpec {
*/
pub(crate) domain_beacon_proposer: u32,
pub(crate) domain_beacon_attester: u32,
pub(crate) domain_blob_sidecar: u32,
pub(crate) domain_randao: u32,
pub(crate) domain_deposit: u32,
pub(crate) domain_voluntary_exit: u32,
@ -374,7 +372,6 @@ impl ChainSpec {
match domain {
Domain::BeaconProposer => self.domain_beacon_proposer,
Domain::BeaconAttester => self.domain_beacon_attester,
Domain::BlobSidecar => self.domain_blob_sidecar,
Domain::Randao => self.domain_randao,
Domain::Deposit => self.domain_deposit,
Domain::VoluntaryExit => self.domain_voluntary_exit,
@ -579,7 +576,6 @@ impl ChainSpec {
domain_voluntary_exit: 4,
domain_selection_proof: 5,
domain_aggregate_and_proof: 6,
domain_blob_sidecar: 11, // 0x0B000000
/*
* Fork choice
@ -822,7 +818,6 @@ impl ChainSpec {
domain_voluntary_exit: 4,
domain_selection_proof: 5,
domain_aggregate_and_proof: 6,
domain_blob_sidecar: 11,
/*
* Fork choice
@ -1416,7 +1411,6 @@ mod tests {
test_domain(Domain::BeaconProposer, spec.domain_beacon_proposer, &spec);
test_domain(Domain::BeaconAttester, spec.domain_beacon_attester, &spec);
test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec);
test_domain(Domain::Randao, spec.domain_randao, &spec);
test_domain(Domain::Deposit, spec.domain_deposit, &spec);
test_domain(Domain::VoluntaryExit, spec.domain_voluntary_exit, &spec);
@ -1441,8 +1435,6 @@ mod tests {
spec.domain_bls_to_execution_change,
&spec,
);
test_domain(Domain::BlobSidecar, spec.domain_blob_sidecar, &spec);
}
fn apply_bit_mask(domain_bytes: [u8; 4], spec: &ChainSpec) -> u32 {

View File

@ -82,7 +82,6 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap<String, Value> {
"bls_withdrawal_prefix".to_uppercase() => u8_hex(spec.bls_withdrawal_prefix_byte),
"domain_beacon_proposer".to_uppercase() => u32_hex(spec.domain_beacon_proposer),
"domain_beacon_attester".to_uppercase() => u32_hex(spec.domain_beacon_attester),
"domain_blob_sidecar".to_uppercase() => u32_hex(spec.domain_blob_sidecar),
"domain_randao".to_uppercase()=> u32_hex(spec.domain_randao),
"domain_deposit".to_uppercase()=> u32_hex(spec.domain_deposit),
"domain_voluntary_exit".to_uppercase() => u32_hex(spec.domain_voluntary_exit),

View File

@ -6,6 +6,7 @@ use ssz_types::typenum::{
bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16,
U16777216, U2, U2048, U256, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, U8192,
};
use ssz_types::typenum::{U17, U9};
use std::fmt::{self, Debug};
use std::str::FromStr;
@ -109,6 +110,7 @@ pub trait EthSpec:
type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin;
type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type KzgCommitmentInclusionProofDepth: Unsigned + Clone + Sync + Send + Debug + PartialEq;
/*
* Derived values (set these CAREFULLY)
*/
@ -271,6 +273,10 @@ pub trait EthSpec:
fn bytes_per_blob() -> usize {
Self::BytesPerBlob::to_usize()
}
/// Returns the `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` preset for this specification.
fn kzg_proof_inclusion_proof_depth() -> usize {
Self::KzgCommitmentInclusionProofDepth::to_usize()
}
}
/// Macro to inherit some type values from another EthSpec.
@ -315,6 +321,7 @@ impl EthSpec for MainnetEthSpec {
type BytesPerFieldElement = U32;
type FieldElementsPerBlob = U4096;
type BytesPerBlob = U131072;
type KzgCommitmentInclusionProofDepth = U17;
type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count
type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch
type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch
@ -348,6 +355,7 @@ impl EthSpec for MinimalEthSpec {
type FieldElementsPerBlob = U4096;
type BytesPerBlob = U131072;
type MaxBlobCommitmentsPerBlock = U16;
type KzgCommitmentInclusionProofDepth = U9;
params_from_eth_spec!(MainnetEthSpec {
JustificationBitsLength,
@ -421,6 +429,7 @@ impl EthSpec for GnosisEthSpec {
type FieldElementsPerBlob = U4096;
type BytesPerFieldElement = U32;
type BytesPerBlob = U131072;
type KzgCommitmentInclusionProofDepth = U17;
fn default_spec() -> ChainSpec {
ChainSpec::gnosis()

View File

@ -100,8 +100,6 @@ pub mod sqlite;
pub mod blob_sidecar;
pub mod light_client_header;
pub mod sidecar;
pub mod signed_blob;
use ethereum_types::{H160, H256};
@ -121,10 +119,7 @@ pub use crate::beacon_block_body::{
pub use crate::beacon_block_header::BeaconBlockHeader;
pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee};
pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *};
pub use crate::blob_sidecar::{
BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList, BlobSidecar, BlobSidecarList,
BlobsList, SidecarList,
};
pub use crate::blob_sidecar::{BlobSidecar, BlobSidecarList, BlobsList};
pub use crate::bls_to_execution_change::BlsToExecutionChange;
pub use crate::chain_spec::{ChainSpec, Config, Domain};
pub use crate::checkpoint::Checkpoint;
@ -182,7 +177,6 @@ pub use crate::signed_beacon_block::{
SignedBlindedBeaconBlock,
};
pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader;
pub use crate::signed_blob::*;
pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange;
pub use crate::signed_contribution_and_proof::SignedContributionAndProof;
pub use crate::signed_voluntary_exit::SignedVoluntaryExit;
@ -223,6 +217,5 @@ pub use bls::{
pub use kzg::{KzgCommitment, KzgProof};
pub use sidecar::Sidecar;
pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList};
pub use superstruct::superstruct;

View File

@ -83,8 +83,6 @@ pub trait AbstractExecPayload<T: EthSpec>:
+ TryInto<Self::Capella>
+ TryInto<Self::Deneb>
{
type Sidecar: Sidecar<T>;
type Ref<'a>: ExecPayload<T>
+ Copy
+ From<&'a Self::Merge>
@ -103,11 +101,6 @@ pub trait AbstractExecPayload<T: EthSpec>:
+ Into<Self>
+ for<'a> From<Cow<'a, ExecutionPayloadDeneb<T>>>
+ TryFrom<ExecutionPayloadHeaderDeneb<T>>;
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error>;
fn default_blobs_at_fork(
fork_name: ForkName,
) -> Result<<Self::Sidecar as Sidecar<T>>::BlobItems, Error>;
}
#[superstruct(
@ -280,6 +273,15 @@ impl<T: EthSpec> FullPayload<T> {
cons(inner.execution_payload)
})
}
pub fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> {
match fork_name {
ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant),
ForkName::Merge => Ok(FullPayloadMerge::default().into()),
ForkName::Capella => Ok(FullPayloadCapella::default().into()),
ForkName::Deneb => Ok(FullPayloadDeneb::default().into()),
}
}
}
impl<'a, T: EthSpec> FullPayloadRef<'a, T> {
@ -384,28 +386,10 @@ impl<'b, T: EthSpec> ExecPayload<T> for FullPayloadRef<'b, T> {
}
impl<T: EthSpec> AbstractExecPayload<T> for FullPayload<T> {
type Sidecar = BlobSidecar<T>;
type Ref<'a> = FullPayloadRef<'a, T>;
type Merge = FullPayloadMerge<T>;
type Capella = FullPayloadCapella<T>;
type Deneb = FullPayloadDeneb<T>;
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> {
match fork_name {
ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant),
ForkName::Merge => Ok(FullPayloadMerge::default().into()),
ForkName::Capella => Ok(FullPayloadCapella::default().into()),
ForkName::Deneb => Ok(FullPayloadDeneb::default().into()),
}
}
fn default_blobs_at_fork(fork_name: ForkName) -> Result<BlobsList<T>, Error> {
match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
Err(Error::IncorrectStateVariant)
}
ForkName::Deneb => Ok(VariableList::default()),
}
}
}
impl<T: EthSpec> From<ExecutionPayload<T>> for FullPayload<T> {
@ -910,25 +894,6 @@ impl<T: EthSpec> AbstractExecPayload<T> for BlindedPayload<T> {
type Merge = BlindedPayloadMerge<T>;
type Capella = BlindedPayloadCapella<T>;
type Deneb = BlindedPayloadDeneb<T>;
type Sidecar = BlindedBlobSidecar;
fn default_at_fork(fork_name: ForkName) -> Result<Self, Error> {
match fork_name {
ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant),
ForkName::Merge => Ok(BlindedPayloadMerge::default().into()),
ForkName::Capella => Ok(BlindedPayloadCapella::default().into()),
ForkName::Deneb => Ok(BlindedPayloadDeneb::default().into()),
}
}
fn default_blobs_at_fork(fork_name: ForkName) -> Result<BlobRootsList<T>, Error> {
match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
Err(Error::IncorrectStateVariant)
}
ForkName::Deneb => Ok(VariableList::default()),
}
}
}
impl<T: EthSpec> From<ExecutionPayload<T>> for BlindedPayload<T> {

View File

@ -1,221 +0,0 @@
use crate::beacon_block_body::KzgCommitments;
use crate::test_utils::TestRandom;
use crate::{
AbstractExecPayload, BeaconBlock, BlindedBlobSidecar, BlindedBlobSidecarList, BlobRootsList,
BlobSidecar, BlobSidecarList, BlobsList, ChainSpec, Domain, EthSpec, Fork, Hash256,
SidecarList, SignedRoot, SignedSidecar, Slot,
};
use bls::SecretKey;
use kzg::KzgProof;
use serde::de::DeserializeOwned;
use ssz::{Decode, Encode};
use ssz_types::VariableList;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
use std::sync::Arc;
use tree_hash::TreeHash;
pub trait Sidecar<E: EthSpec>:
serde::Serialize
+ Clone
+ DeserializeOwned
+ Encode
+ Decode
+ Hash
+ TreeHash
+ TestRandom
+ Debug
+ SignedRoot
+ Sync
+ Send
+ for<'a> arbitrary::Arbitrary<'a>
{
type BlobItems: BlobItems<E>;
fn slot(&self) -> Slot;
fn build_sidecar<Payload: AbstractExecPayload<E>>(
blob_items: Self::BlobItems,
block: &BeaconBlock<E, Payload>,
expected_kzg_commitments: &KzgCommitments<E>,
kzg_proofs: Vec<KzgProof>,
) -> Result<SidecarList<E, Self>, String>;
// this is mostly not used except for in testing
fn sign(
self: Arc<Self>,
secret_key: &SecretKey,
fork: &Fork,
genesis_validators_root: Hash256,
spec: &ChainSpec,
) -> SignedSidecar<E, Self> {
let signing_epoch = self.slot().epoch(E::slots_per_epoch());
let domain = spec.get_domain(
signing_epoch,
Domain::BlobSidecar,
fork,
genesis_validators_root,
);
let message = self.signing_root(domain);
let signature = secret_key.sign(message);
SignedSidecar {
message: self,
signature,
_phantom: PhantomData,
}
}
}
pub trait BlobItems<T: EthSpec>: Sync + Send + Sized {
fn try_from_blob_roots(roots: BlobRootsList<T>) -> Result<Self, String>;
fn try_from_blobs(blobs: BlobsList<T>) -> Result<Self, String>;
fn len(&self) -> usize;
fn is_empty(&self) -> bool;
fn blobs(&self) -> Option<&BlobsList<T>>;
}
impl<T: EthSpec> BlobItems<T> for BlobsList<T> {
fn try_from_blob_roots(_roots: BlobRootsList<T>) -> Result<Self, String> {
Err("Unexpected conversion from blob roots to blobs".to_string())
}
fn try_from_blobs(blobs: BlobsList<T>) -> Result<Self, String> {
Ok(blobs)
}
fn len(&self) -> usize {
VariableList::len(self)
}
fn is_empty(&self) -> bool {
VariableList::is_empty(self)
}
fn blobs(&self) -> Option<&BlobsList<T>> {
Some(self)
}
}
impl<T: EthSpec> BlobItems<T> for BlobRootsList<T> {
fn try_from_blob_roots(roots: BlobRootsList<T>) -> Result<Self, String> {
Ok(roots)
}
fn try_from_blobs(blobs: BlobsList<T>) -> Result<Self, String> {
VariableList::new(
blobs
.into_iter()
.map(|blob| blob.tree_hash_root())
.collect(),
)
.map_err(|e| format!("{e:?}"))
}
fn len(&self) -> usize {
VariableList::len(self)
}
fn is_empty(&self) -> bool {
VariableList::is_empty(self)
}
fn blobs(&self) -> Option<&BlobsList<T>> {
None
}
}
impl<E: EthSpec> Sidecar<E> for BlobSidecar<E> {
type BlobItems = BlobsList<E>;
fn slot(&self) -> Slot {
self.slot
}
fn build_sidecar<Payload: AbstractExecPayload<E>>(
blobs: BlobsList<E>,
block: &BeaconBlock<E, Payload>,
expected_kzg_commitments: &KzgCommitments<E>,
kzg_proofs: Vec<KzgProof>,
) -> Result<SidecarList<E, Self>, String> {
let beacon_block_root = block.canonical_root();
let slot = block.slot();
let blob_sidecars = BlobSidecarList::from(
blobs
.into_iter()
.enumerate()
.map(|(blob_index, blob)| {
let kzg_commitment = expected_kzg_commitments
.get(blob_index)
.ok_or("KZG commitment should exist for blob")?;
let kzg_proof = kzg_proofs
.get(blob_index)
.ok_or("KZG proof should exist for blob")?;
Ok(Arc::new(BlobSidecar {
block_root: beacon_block_root,
index: blob_index as u64,
slot,
block_parent_root: block.parent_root(),
proposer_index: block.proposer_index(),
blob,
kzg_commitment: *kzg_commitment,
kzg_proof: *kzg_proof,
}))
})
.collect::<Result<Vec<_>, String>>()?,
);
Ok(blob_sidecars)
}
}
impl<E: EthSpec> Sidecar<E> for BlindedBlobSidecar {
type BlobItems = BlobRootsList<E>;
fn slot(&self) -> Slot {
self.slot
}
fn build_sidecar<Payload: AbstractExecPayload<E>>(
blob_roots: BlobRootsList<E>,
block: &BeaconBlock<E, Payload>,
expected_kzg_commitments: &KzgCommitments<E>,
kzg_proofs: Vec<KzgProof>,
) -> Result<SidecarList<E, BlindedBlobSidecar>, String> {
let beacon_block_root = block.canonical_root();
let slot = block.slot();
let blob_sidecars = BlindedBlobSidecarList::<E>::from(
blob_roots
.into_iter()
.enumerate()
.map(|(blob_index, blob_root)| {
let kzg_commitment = expected_kzg_commitments
.get(blob_index)
.ok_or("KZG commitment should exist for blob")?;
let kzg_proof = kzg_proofs.get(blob_index).ok_or(format!(
"Missing KZG proof for slot {} blob index: {}",
slot, blob_index
))?;
Ok(Arc::new(BlindedBlobSidecar {
block_root: beacon_block_root,
index: blob_index as u64,
slot,
block_parent_root: block.parent_root(),
proposer_index: block.proposer_index(),
blob_root,
kzg_commitment: *kzg_commitment,
kzg_proof: *kzg_proof,
}))
})
.collect::<Result<Vec<_>, String>>()?,
);
Ok(blob_sidecars)
}
}

View File

@ -1,114 +0,0 @@
use crate::sidecar::Sidecar;
use crate::{
test_utils::TestRandom, BlindedBlobSidecar, Blob, BlobSidecar, ChainSpec, Domain, EthSpec,
Fork, Hash256, Signature, SignedRoot, SigningData,
};
use bls::PublicKey;
use derivative::Derivative;
use serde::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use std::marker::PhantomData;
use std::sync::Arc;
use test_random_derive::TestRandom;
use tree_hash::TreeHash;
use tree_hash_derive::TreeHash;
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Encode,
Decode,
TestRandom,
TreeHash,
Derivative,
arbitrary::Arbitrary,
)]
#[serde(bound = "T: EthSpec, S: Sidecar<T>")]
#[arbitrary(bound = "T: EthSpec, S: Sidecar<T>")]
#[derivative(Hash(bound = "T: EthSpec, S: Sidecar<T>"))]
pub struct SignedSidecar<T: EthSpec, S: Sidecar<T>> {
pub message: Arc<S>,
pub signature: Signature,
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]
#[serde(skip)]
#[arbitrary(default)]
pub _phantom: PhantomData<T>,
}
impl<T: EthSpec> SignedSidecar<T, BlindedBlobSidecar> {
pub fn into_full_blob_sidecars(self, blob: Blob<T>) -> SignedSidecar<T, BlobSidecar<T>> {
let blinded_sidecar = self.message;
SignedSidecar {
message: Arc::new(BlobSidecar {
block_root: blinded_sidecar.block_root,
index: blinded_sidecar.index,
slot: blinded_sidecar.slot,
block_parent_root: blinded_sidecar.block_parent_root,
proposer_index: blinded_sidecar.proposer_index,
blob,
kzg_commitment: blinded_sidecar.kzg_commitment,
kzg_proof: blinded_sidecar.kzg_proof,
}),
signature: self.signature,
_phantom: PhantomData,
}
}
}
impl<T: EthSpec> SignedBlobSidecar<T> {
/// Verify `self.signature`.
///
/// If the root of `block.message` is already known it can be passed in via `object_root_opt`.
/// Otherwise, it will be computed locally.
pub fn verify_signature(
&self,
object_root_opt: Option<Hash256>,
pubkey: &PublicKey,
fork: &Fork,
genesis_validators_root: Hash256,
spec: &ChainSpec,
) -> bool {
let domain = spec.get_domain(
self.message.slot.epoch(T::slots_per_epoch()),
Domain::BlobSidecar,
fork,
genesis_validators_root,
);
let message = if let Some(object_root) = object_root_opt {
SigningData {
object_root,
domain,
}
.tree_hash_root()
} else {
self.message.signing_root(domain)
};
self.signature.verify(pubkey, message)
}
}
impl<T: EthSpec> From<SignedBlobSidecar<T>> for SignedBlindedBlobSidecar<T> {
fn from(signed: SignedBlobSidecar<T>) -> Self {
SignedBlindedBlobSidecar {
message: Arc::new(signed.message.into()),
signature: signed.signature,
_phantom: PhantomData,
}
}
}
pub type SignedBlobSidecar<T> = SignedSidecar<T, BlobSidecar<T>>;
pub type SignedBlindedBlobSidecar<T> = SignedSidecar<T, BlindedBlobSidecar>;
/// List of Signed Sidecars that implements `Sidecar`.
pub type SignedSidecarList<T, Sidecar> =
VariableList<SignedSidecar<T, Sidecar>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type SignedBlobSidecarList<T> = SignedSidecarList<T, BlobSidecar<T>>;
pub type SignedBlindedBlobSidecarList<T> = SignedSidecarList<T, BlindedBlobSidecar>;

View File

@ -6,10 +6,24 @@ use std::fmt::Debug;
pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof, trusted_setup::TrustedSetup};
pub use c_kzg::{
Blob, Bytes32, Bytes48, Error, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT,
Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT,
BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB,
};
#[derive(Debug)]
pub enum Error {
/// An error from the underlying kzg library.
Kzg(c_kzg::Error),
/// The kzg verification failed
KzgVerificationFailed,
}
impl From<c_kzg::Error> for Error {
fn from(value: c_kzg::Error) -> Self {
Error::Kzg(value)
}
}
/// A wrapper over a kzg library that holds the trusted setup parameters.
#[derive(Debug)]
pub struct Kzg {
@ -35,6 +49,7 @@ impl Kzg {
) -> Result<KzgProof, Error> {
c_kzg::KzgProof::compute_blob_kzg_proof(blob, &kzg_commitment.into(), &self.trusted_setup)
.map(|proof| KzgProof(proof.to_bytes().into_inner()))
.map_err(Into::into)
}
/// Verify a kzg proof given the blob, kzg commitment and kzg proof.
@ -43,13 +58,17 @@ impl Kzg {
blob: &Blob,
kzg_commitment: KzgCommitment,
kzg_proof: KzgProof,
) -> Result<bool, Error> {
c_kzg::KzgProof::verify_blob_kzg_proof(
) -> Result<(), Error> {
if !c_kzg::KzgProof::verify_blob_kzg_proof(
blob,
&kzg_commitment.into(),
&kzg_proof.into(),
&self.trusted_setup,
)
)? {
Err(Error::KzgVerificationFailed)
} else {
Ok(())
}
}
/// Verify a batch of blob commitment proof triplets.
@ -61,7 +80,7 @@ impl Kzg {
blobs: &[Blob],
kzg_commitments: &[KzgCommitment],
kzg_proofs: &[KzgProof],
) -> Result<bool, Error> {
) -> Result<(), Error> {
let commitments_bytes = kzg_commitments
.iter()
.map(|comm| Bytes48::from(*comm))
@ -72,18 +91,23 @@ impl Kzg {
.map(|proof| Bytes48::from(*proof))
.collect::<Vec<_>>();
c_kzg::KzgProof::verify_blob_kzg_proof_batch(
if !c_kzg::KzgProof::verify_blob_kzg_proof_batch(
blobs,
&commitments_bytes,
&proofs_bytes,
&self.trusted_setup,
)
)? {
Err(Error::KzgVerificationFailed)
} else {
Ok(())
}
}
/// Converts a blob to a kzg commitment.
pub fn blob_to_kzg_commitment(&self, blob: &Blob) -> Result<KzgCommitment, Error> {
c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, &self.trusted_setup)
.map(|commitment| KzgCommitment(commitment.to_bytes().into_inner()))
.map_err(Into::into)
}
/// Computes the kzg proof for a given `blob` and an evaluation point `z`
@ -94,6 +118,7 @@ impl Kzg {
) -> Result<(KzgProof, Bytes32), Error> {
c_kzg::KzgProof::compute_kzg_proof(blob, z, &self.trusted_setup)
.map(|(proof, y)| (KzgProof(proof.to_bytes().into_inner()), y))
.map_err(Into::into)
}
/// Verifies a `kzg_proof` for a `kzg_commitment` that evaluating a polynomial at `z` results in `y`
@ -111,5 +136,6 @@ impl Kzg {
&kzg_proof.into(),
&self.trusted_setup,
)
.map_err(Into::into)
}
}

View File

@ -1,17 +1,18 @@
use parking_lot::Mutex;
use std::collections::HashSet;
use types::SignedBeaconBlockHeader;
#[derive(Debug, Default)]
pub struct BlockQueue {
blocks: Mutex<Vec<SignedBeaconBlockHeader>>,
blocks: Mutex<HashSet<SignedBeaconBlockHeader>>,
}
impl BlockQueue {
pub fn queue(&self, block_header: SignedBeaconBlockHeader) {
self.blocks.lock().push(block_header)
self.blocks.lock().insert(block_header);
}
pub fn dequeue(&self) -> Vec<SignedBeaconBlockHeader> {
pub fn dequeue(&self) -> HashSet<SignedBeaconBlockHeader> {
let mut blocks = self.blocks.lock();
std::mem::take(&mut *blocks)
}

View File

@ -40,3 +40,4 @@ beacon_chain = { workspace = true }
store = { workspace = true }
fork_choice = { workspace = true }
execution_layer = { workspace = true }
logging = { workspace = true }

View File

@ -1,4 +1,4 @@
TESTS_TAG := v1.4.0-beta.3
TESTS_TAG := v1.4.0-beta.4
TESTS = general minimal mainnet
TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS))

View File

@ -1,6 +1,11 @@
use super::*;
use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file};
use ::fork_choice::PayloadVerificationStatus;
use ::fork_choice::{PayloadVerificationStatus, ProposerHeadError};
use beacon_chain::beacon_proposer_cache::compute_proposer_duties_from_head;
use beacon_chain::blob_verification::GossipBlobError;
use beacon_chain::chain_config::{
DisallowedReOrgOffsets, DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_THRESHOLD,
};
use beacon_chain::slot_clock::SlotClock;
use beacon_chain::{
attestation_verification::{
@ -20,7 +25,7 @@ use std::time::Duration;
use types::{
Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlobSidecar, BlobsList, Checkpoint,
EthSpec, ExecutionBlockHash, ForkName, Hash256, IndexedAttestation, KzgProof,
ProgressiveBalancesMode, Signature, SignedBeaconBlock, SignedBlobSidecar, Slot, Uint256,
ProgressiveBalancesMode, ProposerPreparationData, SignedBeaconBlock, Slot, Uint256,
};
#[derive(Default, Debug, PartialEq, Clone, Deserialize, Decode)]
@ -38,6 +43,13 @@ pub struct Head {
root: Hash256,
}
#[derive(Debug, Clone, Copy, PartialEq, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct ShouldOverrideFcu {
validator_is_connected: bool,
result: bool,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct Checks {
@ -50,6 +62,8 @@ pub struct Checks {
u_justified_checkpoint: Option<Checkpoint>,
u_finalized_checkpoint: Option<Checkpoint>,
proposer_boost_root: Option<Hash256>,
get_proposer_head: Option<Hash256>,
should_override_forkchoice_update: Option<ShouldOverrideFcu>,
}
#[derive(Debug, Clone, Deserialize)]
@ -256,6 +270,8 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> {
u_justified_checkpoint,
u_finalized_checkpoint,
proposer_boost_root,
get_proposer_head,
should_override_forkchoice_update: should_override_fcu,
} = checks.as_ref();
if let Some(expected_head) = head {
@ -294,6 +310,14 @@ impl<E: EthSpec> Case for ForkChoiceTest<E> {
if let Some(expected_proposer_boost_root) = proposer_boost_root {
tester.check_expected_proposer_boost_root(*expected_proposer_boost_root)?;
}
if let Some(should_override_fcu) = should_override_fcu {
tester.check_should_override_fcu(*should_override_fcu)?;
}
if let Some(expected_proposer_head) = get_proposer_head {
tester.check_expected_proposer_head(*expected_proposer_head)?;
}
}
}
}
@ -325,6 +349,7 @@ impl<E: EthSpec> Tester<E> {
}
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.logger(logging::test_logger())
.spec(spec.clone())
.keypairs(vec![])
.chain_config(ChainConfig {
@ -413,6 +438,8 @@ impl<E: EthSpec> Tester<E> {
) -> Result<(), Error> {
let block_root = block.canonical_root();
let mut blob_success = true;
// Convert blobs and kzg_proofs into sidecars, then plumb them into the availability tracker
if let Some(blobs) = blobs.clone() {
let proofs = kzg_proofs.unwrap();
@ -432,25 +459,32 @@ impl<E: EthSpec> Tester<E> {
.zip(commitments.into_iter())
.enumerate()
{
let signed_sidecar = SignedBlobSidecar {
message: Arc::new(BlobSidecar {
block_root,
index: i as u64,
slot: block.slot(),
block_parent_root: block.parent_root(),
proposer_index: block.message().proposer_index(),
blob,
kzg_commitment,
kzg_proof,
}),
signature: Signature::empty(),
_phantom: Default::default(),
};
let result = self.block_on_dangerous(
self.harness
.chain
.process_gossip_blob(GossipVerifiedBlob::__assumed_valid(signed_sidecar)),
)?;
let blob_sidecar = Arc::new(BlobSidecar {
index: i as u64,
blob,
kzg_commitment,
kzg_proof,
signed_block_header: block.signed_block_header(),
kzg_commitment_inclusion_proof: block
.message()
.body()
.kzg_commitment_merkle_proof(i)
.unwrap(),
});
let chain = self.harness.chain.clone();
let blob =
match GossipVerifiedBlob::new(blob_sidecar.clone(), blob_sidecar.index, &chain)
{
Ok(gossip_verified_blob) => gossip_verified_blob,
Err(GossipBlobError::KzgError(_)) => {
blob_success = false;
GossipVerifiedBlob::__assumed_valid(blob_sidecar)
}
Err(_) => GossipVerifiedBlob::__assumed_valid(blob_sidecar),
};
let result =
self.block_on_dangerous(self.harness.chain.process_gossip_blob(blob))?;
if valid {
assert!(result.is_ok());
}
@ -466,7 +500,7 @@ impl<E: EthSpec> Tester<E> {
|| Ok(()),
))?
.map(|avail: AvailabilityProcessingStatus| avail.try_into());
let success = result.as_ref().map_or(false, |inner| inner.is_ok());
let success = blob_success && result.as_ref().map_or(false, |inner| inner.is_ok());
if success != valid {
return Err(Error::DidntFail(format!(
"block with root {} was valid={} whilst test expects valid={}. result: {:?}",
@ -703,6 +737,82 @@ impl<E: EthSpec> Tester<E> {
expected_proposer_boost_root,
)
}
pub fn check_expected_proposer_head(
&self,
expected_proposer_head: Hash256,
) -> Result<(), Error> {
let mut fc = self.harness.chain.canonical_head.fork_choice_write_lock();
let slot = self.harness.chain.slot().unwrap();
let canonical_head = fc.get_head(slot, &self.harness.spec).unwrap();
let proposer_head_result = fc.get_proposer_head(
slot,
canonical_head,
DEFAULT_RE_ORG_THRESHOLD,
&DisallowedReOrgOffsets::default(),
DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION,
);
let proposer_head = match proposer_head_result {
Ok(head) => head.parent_node.root,
Err(ProposerHeadError::DoNotReOrg(_)) => canonical_head,
_ => panic!("Unexpected error in get proposer head"),
};
check_equal("proposer_head", proposer_head, expected_proposer_head)
}
pub fn check_should_override_fcu(
&self,
expected_should_override_fcu: ShouldOverrideFcu,
) -> Result<(), Error> {
// Determine proposer.
let cached_head = self.harness.chain.canonical_head.cached_head();
let next_slot = cached_head.snapshot.beacon_block.slot() + 1;
let next_slot_epoch = next_slot.epoch(E::slots_per_epoch());
let (proposer_indices, decision_root, _, fork) =
compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap();
let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize];
// Ensure the proposer index cache is primed.
self.harness
.chain
.beacon_proposer_cache
.lock()
.insert(next_slot_epoch, decision_root, proposer_indices, fork)
.unwrap();
// Update the execution layer proposer preparation to match the test config.
let el = self.harness.chain.execution_layer.clone().unwrap();
self.block_on_dangerous(async {
if expected_should_override_fcu.validator_is_connected {
el.update_proposer_preparation(
next_slot_epoch,
&[ProposerPreparationData {
validator_index: dbg!(proposer_index) as u64,
fee_recipient: Default::default(),
}],
)
.await;
} else {
el.clear_proposer_preparation(proposer_index as u64).await;
}
})
.unwrap();
// Check forkchoice override.
let canonical_fcu_params = cached_head.forkchoice_update_parameters();
let fcu_params = self
.harness
.chain
.overridden_forkchoice_update_params(canonical_fcu_params)
.unwrap();
check_equal(
"should_override_forkchoice_update",
fcu_params != canonical_fcu_params,
expected_should_override_fcu.result,
)
}
}
/// Checks that the `head` checkpoint from the beacon chain head matches the `fc` checkpoint gleaned

View File

@ -2,7 +2,7 @@ use super::*;
use crate::case_result::compare_result;
use beacon_chain::kzg_utils::validate_blob;
use eth2_network_config::TRUSTED_SETUP_BYTES;
use kzg::{Kzg, KzgCommitment, KzgProof, TrustedSetup};
use kzg::{Error as KzgError, Kzg, KzgCommitment, KzgProof, TrustedSetup};
use serde::Deserialize;
use std::convert::TryInto;
use std::marker::PhantomData;
@ -91,8 +91,14 @@ impl<E: EthSpec> Case for KZGVerifyBlobKZGProof<E> {
let kzg = get_kzg()?;
let result = parse_input(&self.input).and_then(|(blob, commitment, proof)| {
validate_blob::<E>(&kzg, &blob, commitment, proof)
.map_err(|e| Error::InternalError(format!("Failed to validate blob: {:?}", e)))
match validate_blob::<E>(&kzg, &blob, commitment, proof) {
Ok(_) => Ok(true),
Err(KzgError::KzgVerificationFailed) => Ok(false),
Err(e) => Err(Error::InternalError(format!(
"Failed to validate blob: {:?}",
e
))),
}
});
compare_result::<bool, _>(&result, &self.output)

View File

@ -1,6 +1,7 @@
use super::*;
use crate::case_result::compare_result;
use beacon_chain::kzg_utils::validate_blobs;
use kzg::Error as KzgError;
use serde::Deserialize;
use std::marker::PhantomData;
@ -53,10 +54,23 @@ impl<E: EthSpec> Case for KZGVerifyBlobKZGProofBatch<E> {
};
let kzg = get_kzg()?;
let result = parse_input(&self.input).and_then(|(commitments, blobs, proofs)| {
validate_blobs::<E>(&kzg, &commitments, blobs.iter().collect(), &proofs)
.map_err(|e| Error::InternalError(format!("Failed to validate blobs: {:?}", e)))
});
let result =
parse_input(&self.input).and_then(
|(commitments, blobs, proofs)| match validate_blobs::<E>(
&kzg,
&commitments,
blobs.iter().collect(),
&proofs,
) {
Ok(_) => Ok(true),
Err(KzgError::KzgVerificationFailed) => Ok(false),
Err(e) => Err(Error::InternalError(format!(
"Failed to validate blobs: {:?}",
e
))),
},
);
compare_result::<bool, _>(&result, &self.output)
}

View File

@ -1,9 +1,9 @@
use super::*;
use crate::decode::{ssz_decode_state, yaml_decode_file};
use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file};
use serde::Deserialize;
use std::path::Path;
use tree_hash::Hash256;
use types::{BeaconState, EthSpec, ForkName};
use types::{BeaconBlockBody, BeaconBlockBodyDeneb, BeaconState, EthSpec, ForkName};
#[derive(Debug, Clone, Deserialize)]
pub struct Metadata {
@ -82,3 +82,72 @@ impl<E: EthSpec> Case for MerkleProofValidity<E> {
Ok(())
}
}
#[derive(Debug, Clone, Deserialize)]
#[serde(bound = "E: EthSpec")]
pub struct KzgInclusionMerkleProofValidity<E: EthSpec> {
pub metadata: Option<Metadata>,
pub block: BeaconBlockBody<E>,
pub merkle_proof: MerkleProof,
}
impl<E: EthSpec> LoadCase for KzgInclusionMerkleProofValidity<E> {
fn load_from_dir(path: &Path, fork_name: ForkName) -> Result<Self, Error> {
let block = match fork_name {
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
return Err(Error::InternalError(format!(
"KZG inclusion merkle proof validity test skipped for {:?}",
fork_name
)))
}
ForkName::Deneb => {
ssz_decode_file::<BeaconBlockBodyDeneb<E>>(&path.join("object.ssz_snappy"))?
}
};
let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?;
// Metadata does not exist in these tests but it is left like this just in case.
let meta_path = path.join("meta.yaml");
let metadata = if meta_path.exists() {
Some(yaml_decode_file(&meta_path)?)
} else {
None
};
Ok(Self {
metadata,
block: block.into(),
merkle_proof,
})
}
}
impl<E: EthSpec> Case for KzgInclusionMerkleProofValidity<E> {
fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> {
let Ok(proof) = self.block.to_ref().kzg_commitment_merkle_proof(0) else {
return Err(Error::FailedToParseTest(
"Could not retrieve merkle proof".to_string(),
));
};
let proof_len = proof.len();
let branch_len = self.merkle_proof.branch.len();
if proof_len != branch_len {
return Err(Error::NotEqual(format!(
"Branches not equal in length computed: {}, expected {}",
proof_len, branch_len
)));
}
for (i, proof_leaf) in proof.iter().enumerate().take(proof_len) {
let expected_leaf = self.merkle_proof.branch[i];
if *proof_leaf != expected_leaf {
return Err(Error::NotEqual(format!(
"Leaves not equal in merkle proof computed: {}, expected: {}",
hex::encode(proof_leaf),
hex::encode(expected_leaf)
)));
}
}
Ok(())
}
}

View File

@ -560,6 +560,13 @@ impl<E: EthSpec + TypeName> Handler for ForkChoiceHandler<E> {
return false;
}
// No FCU override tests prior to bellatrix.
if self.handler_name == "should_override_forkchoice_update"
&& (fork_name == ForkName::Base || fork_name == ForkName::Altair)
{
return false;
}
// These tests check block validity (which may include signatures) and there is no need to
// run them with fake crypto.
cfg!(not(feature = "fake_crypto"))
@ -786,6 +793,34 @@ impl<E: EthSpec + TypeName> Handler for MerkleProofValidityHandler<E> {
}
}
#[derive(Derivative)]
#[derivative(Default(bound = ""))]
pub struct KzgInclusionMerkleProofValidityHandler<E>(PhantomData<E>);
impl<E: EthSpec + TypeName> Handler for KzgInclusionMerkleProofValidityHandler<E> {
type Case = cases::KzgInclusionMerkleProofValidity<E>;
fn config_name() -> &'static str {
E::name()
}
fn runner_name() -> &'static str {
"merkle_proof"
}
fn handler_name(&self) -> String {
"single_merkle_proof".into()
}
fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool {
// Enabled in Deneb
fork_name != ForkName::Base
&& fork_name != ForkName::Altair
&& fork_name != ForkName::Merge
&& fork_name != ForkName::Capella
}
}
#[derive(Derivative)]
#[derivative(Default(bound = ""))]
pub struct OperationsHandler<E, O>(PhantomData<(E, O)>);

View File

@ -78,7 +78,6 @@ type_name!(ProposerSlashing);
type_name_generic!(SignedAggregateAndProof);
type_name_generic!(SignedBeaconBlock);
type_name!(SignedBeaconBlockHeader);
type_name_generic!(SignedBlobSidecar);
type_name_generic!(SignedContributionAndProof);
type_name!(SignedVoluntaryExit);
type_name!(SigningData);

View File

@ -1,7 +1,7 @@
#![cfg(feature = "ef_tests")]
use ef_tests::*;
use types::*;
use ef_tests::{KzgInclusionMerkleProofValidityHandler, *};
use types::{MainnetEthSpec, MinimalEthSpec, *};
// Check that the hand-computed multiplications on EthSpec are correctly computed.
// This test lives here because one is most likely to muck these up during a spec update.
@ -378,12 +378,6 @@ mod ssz_static {
SszStaticHandler::<BlobSidecar<MainnetEthSpec>, MainnetEthSpec>::deneb_only().run();
}
#[test]
fn signed_blob_sidecar() {
SszStaticHandler::<SignedBlobSidecar<MinimalEthSpec>, MinimalEthSpec>::deneb_only().run();
SszStaticHandler::<SignedBlobSidecar<MainnetEthSpec>, MainnetEthSpec>::deneb_only().run();
}
#[test]
fn blob_identifier() {
SszStaticHandler::<BlobIdentifier, MinimalEthSpec>::deneb_only().run();
@ -546,6 +540,18 @@ fn fork_choice_withholding() {
// There is no mainnet variant for this test.
}
#[test]
fn fork_choice_should_override_forkchoice_update() {
ForkChoiceHandler::<MinimalEthSpec>::new("should_override_forkchoice_update").run();
ForkChoiceHandler::<MainnetEthSpec>::new("should_override_forkchoice_update").run();
}
#[test]
fn fork_choice_get_proposer_head() {
ForkChoiceHandler::<MinimalEthSpec>::new("get_proposer_head").run();
ForkChoiceHandler::<MainnetEthSpec>::new("get_proposer_head").run();
}
#[test]
fn optimistic_sync() {
OptimisticSyncHandler::<MinimalEthSpec>::default().run();
@ -598,6 +604,12 @@ fn merkle_proof_validity() {
MerkleProofValidityHandler::<MainnetEthSpec>::default().run();
}
#[test]
fn kzg_inclusion_merkle_proof_validity() {
KzgInclusionMerkleProofValidityHandler::<MainnetEthSpec>::default().run();
KzgInclusionMerkleProofValidityHandler::<MinimalEthSpec>::default().run();
}
#[test]
fn rewards() {
for handler in &["basic", "leak", "random"] {

View File

@ -11,7 +11,7 @@ use crate::{
};
use bls::SignatureBytes;
use environment::RuntimeContext;
use eth2::types::{BlockContents, SignedBlockContents};
use eth2::types::{FullBlockContents, PublishBlockRequest};
use eth2::{BeaconNodeHttpClient, StatusCode};
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
@ -22,7 +22,7 @@ use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use types::{
AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes,
BlindedBeaconBlock, BlockType, EthSpec, Graffiti, PublicKeyBytes, SignedBlindedBeaconBlock,
Slot,
};
@ -329,10 +329,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
self.inner.context.executor.spawn(
async move {
if builder_proposals {
let result = service
.clone()
.publish_block::<BlindedPayload<E>>(slot, validator_pubkey)
.await;
let result = service.publish_block(slot, validator_pubkey, true).await;
match result {
Err(BlockError::Recoverable(e)) => {
error!(
@ -342,9 +339,8 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
"block_slot" => ?slot,
"info" => "blinded proposal failed, attempting full block"
);
if let Err(e) = service
.publish_block::<FullPayload<E>>(slot, validator_pubkey)
.await
if let Err(e) =
service.publish_block(slot, validator_pubkey, false).await
{
// Log a `crit` since a full block
// (non-builder) proposal failed.
@ -371,9 +367,8 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
}
Ok(_) => {}
};
} else if let Err(e) = service
.publish_block::<FullPayload<E>>(slot, validator_pubkey)
.await
} else if let Err(e) =
service.publish_block(slot, validator_pubkey, false).await
{
// Log a `crit` since a full block (non-builder)
// proposal failed.
@ -394,10 +389,11 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
}
/// Produce a block at the given slot for validator_pubkey
async fn publish_block<Payload: AbstractExecPayload<E>>(
self,
async fn publish_block(
&self,
slot: Slot,
validator_pubkey: PublicKeyBytes,
builder_proposal: bool,
) -> Result<(), BlockError> {
let log = self.context.log();
let _timer =
@ -460,7 +456,7 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
//
// Try the proposer nodes last, since it's likely that they don't have a
// great view of attestations on the network.
let block_contents = proposer_fallback
let unsigned_block = proposer_fallback
.request_proposers_last(
RequireSynced::No,
OfflineOnFailure::Yes,
@ -471,20 +467,32 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
randao_reveal_ref,
graffiti,
proposer_index,
builder_proposal,
log,
)
},
)
.await?;
let (block, maybe_blob_sidecars) = block_contents.deconstruct();
let signing_timer = metrics::start_timer(&metrics::BLOCK_SIGNING_TIMES);
let signed_block = match self_ref
.validator_store
.sign_block::<Payload>(*validator_pubkey_ref, block, current_slot)
.await
{
let res = match unsigned_block {
UnsignedBlock::Full(block_contents) => {
let (block, maybe_blobs) = block_contents.deconstruct();
self_ref
.validator_store
.sign_block(*validator_pubkey_ref, block, current_slot)
.await
.map(|b| SignedBlock::Full(PublishBlockRequest::new(b, maybe_blobs)))
}
UnsignedBlock::Blinded(block) => self_ref
.validator_store
.sign_block(*validator_pubkey_ref, block, current_slot)
.await
.map(SignedBlock::Blinded),
};
let signed_block = match res {
Ok(block) => block,
Err(ValidatorStoreError::UnknownPubkey(pubkey)) => {
// A pubkey can be missing when a validator was recently removed
@ -506,36 +514,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
}
};
let maybe_signed_blobs = match maybe_blob_sidecars {
Some(blob_sidecars) => {
match self_ref
.validator_store
.sign_blobs::<Payload>(*validator_pubkey_ref, blob_sidecars)
.await
{
Ok(signed_blobs) => Some(signed_blobs),
Err(ValidatorStoreError::UnknownPubkey(pubkey)) => {
// A pubkey can be missing when a validator was recently removed
// via the API.
warn!(
log,
"Missing pubkey for blobs";
"info" => "a validator may have recently been removed from this VC",
"pubkey" => ?pubkey,
"slot" => ?slot
);
return Ok(());
}
Err(e) => {
return Err(BlockError::Recoverable(format!(
"Unable to sign blobs: {:?}",
e
)))
}
}
}
None => None,
};
let signing_time_ms =
Duration::from_secs_f64(signing_timer.map_or(0.0, |t| t.stop_and_record())).as_millis();
@ -546,8 +524,6 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
"signing_time_ms" => signing_time_ms,
);
let signed_block_contents = SignedBlockContents::from((signed_block, maybe_signed_blobs));
// Publish block with first available beacon node.
//
// Try the proposer nodes first, since we've likely gone to efforts to
@ -558,11 +534,8 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
RequireSynced::No,
OfflineOnFailure::Yes,
|beacon_node| async {
self.publish_signed_block_contents::<Payload>(
&signed_block_contents,
beacon_node,
)
.await
self.publish_signed_block_contents(&signed_block, beacon_node)
.await
},
)
.await?;
@ -570,41 +543,41 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
info!(
log,
"Successfully published block";
"block_type" => ?Payload::block_type(),
"deposits" => signed_block_contents.signed_block().message().body().deposits().len(),
"attestations" => signed_block_contents.signed_block().message().body().attestations().len(),
"block_type" => ?signed_block.block_type(),
"deposits" => signed_block.num_deposits(),
"attestations" => signed_block.num_attestations(),
"graffiti" => ?graffiti.map(|g| g.as_utf8_lossy()),
"slot" => signed_block_contents.signed_block().slot().as_u64(),
"slot" => signed_block.slot().as_u64(),
);
Ok(())
}
async fn publish_signed_block_contents<Payload: AbstractExecPayload<E>>(
async fn publish_signed_block_contents(
&self,
signed_block_contents: &SignedBlockContents<E, Payload>,
signed_block: &SignedBlock<E>,
beacon_node: &BeaconNodeHttpClient,
) -> Result<(), BlockError> {
let log = self.context.log();
let slot = signed_block_contents.signed_block().slot();
match Payload::block_type() {
BlockType::Full => {
let slot = signed_block.slot();
match signed_block {
SignedBlock::Full(signed_block) => {
let _post_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BEACON_BLOCK_HTTP_POST],
);
beacon_node
.post_beacon_blocks(signed_block_contents)
.post_beacon_blocks(signed_block)
.await
.or_else(|e| handle_block_post_error(e, slot, log))?
}
BlockType::Blinded => {
SignedBlock::Blinded(signed_block) => {
let _post_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BLINDED_BEACON_BLOCK_HTTP_POST],
);
beacon_node
.post_beacon_blinded_blocks(signed_block_contents)
.post_beacon_blinded_blocks(signed_block)
.await
.or_else(|e| handle_block_post_error(e, slot, log))?
}
@ -612,22 +585,23 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
Ok::<_, BlockError>(())
}
async fn get_validator_block<Payload: AbstractExecPayload<E>>(
async fn get_validator_block(
beacon_node: &BeaconNodeHttpClient,
slot: Slot,
randao_reveal_ref: &SignatureBytes,
graffiti: Option<Graffiti>,
proposer_index: Option<u64>,
builder_proposal: bool,
log: &Logger,
) -> Result<BlockContents<E, Payload>, BlockError> {
let block_contents: BlockContents<E, Payload> = match Payload::block_type() {
BlockType::Full => {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BEACON_BLOCK_HTTP_GET],
);
) -> Result<UnsignedBlock<E>, BlockError> {
let unsigned_block = if !builder_proposal {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BEACON_BLOCK_HTTP_GET],
);
UnsignedBlock::Full(
beacon_node
.get_validator_blocks::<E, Payload>(slot, randao_reveal_ref, graffiti.as_ref())
.get_validator_blocks::<E>(slot, randao_reveal_ref, graffiti.as_ref())
.await
.map_err(|e| {
BlockError::Recoverable(format!(
@ -635,19 +609,16 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
e
))
})?
.data
}
BlockType::Blinded => {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BLINDED_BEACON_BLOCK_HTTP_GET],
);
.data,
)
} else {
let _get_timer = metrics::start_timer_vec(
&metrics::BLOCK_SERVICE_TIMES,
&[metrics::BLINDED_BEACON_BLOCK_HTTP_GET],
);
UnsignedBlock::Blinded(
beacon_node
.get_validator_blinded_blocks::<E, Payload>(
slot,
randao_reveal_ref,
graffiti.as_ref(),
)
.get_validator_blinded_blocks::<E>(slot, randao_reveal_ref, graffiti.as_ref())
.await
.map_err(|e| {
BlockError::Recoverable(format!(
@ -655,8 +626,8 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
e
))
})?
.data
}
.data,
)
};
info!(
@ -664,13 +635,59 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
"Received unsigned block";
"slot" => slot.as_u64(),
);
if proposer_index != Some(block_contents.block().proposer_index()) {
if proposer_index != Some(unsigned_block.proposer_index()) {
return Err(BlockError::Recoverable(
"Proposer index does not match block proposer. Beacon chain re-orged".to_string(),
));
}
Ok::<_, BlockError>(block_contents)
Ok::<_, BlockError>(unsigned_block)
}
}
pub enum UnsignedBlock<E: EthSpec> {
Full(FullBlockContents<E>),
Blinded(BlindedBeaconBlock<E>),
}
impl<E: EthSpec> UnsignedBlock<E> {
pub fn proposer_index(&self) -> u64 {
match self {
UnsignedBlock::Full(block) => block.block().proposer_index(),
UnsignedBlock::Blinded(block) => block.proposer_index(),
}
}
}
pub enum SignedBlock<E: EthSpec> {
Full(PublishBlockRequest<E>),
Blinded(SignedBlindedBeaconBlock<E>),
}
impl<E: EthSpec> SignedBlock<E> {
pub fn block_type(&self) -> BlockType {
match self {
SignedBlock::Full(_) => BlockType::Full,
SignedBlock::Blinded(_) => BlockType::Blinded,
}
}
pub fn slot(&self) -> Slot {
match self {
SignedBlock::Full(block) => block.signed_block().message().slot(),
SignedBlock::Blinded(block) => block.message().slot(),
}
}
pub fn num_deposits(&self) -> usize {
match self {
SignedBlock::Full(block) => block.signed_block().message().body().deposits().len(),
SignedBlock::Blinded(block) => block.message().body().deposits().len(),
}
}
pub fn num_attestations(&self) -> usize {
match self {
SignedBlock::Full(block) => block.signed_block().message().body().attestations().len(),
SignedBlock::Blinded(block) => block.message().body().attestations().len(),
}
}
}

View File

@ -59,11 +59,6 @@ lazy_static::lazy_static! {
"Total count of attempted block signings",
&["status"]
);
pub static ref SIGNED_BLOBS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"vc_signed_beacon_blobs_total",
"Total count of attempted blob signings",
&["status"]
);
pub static ref SIGNED_ATTESTATIONS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
"vc_signed_attestations_total",
"Total count of attempted Attestation signings",

View File

@ -37,7 +37,6 @@ pub enum Error {
pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> {
RandaoReveal(Epoch),
BeaconBlock(&'a BeaconBlock<T, Payload>),
BlobSidecar(&'a Payload::Sidecar),
AttestationData(&'a AttestationData),
SignedAggregateAndProof(&'a AggregateAndProof<T>),
SelectionProof(Slot),
@ -60,7 +59,6 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Pay
match self {
SignableMessage::RandaoReveal(epoch) => epoch.signing_root(domain),
SignableMessage::BeaconBlock(b) => b.signing_root(domain),
SignableMessage::BlobSidecar(b) => b.signing_root(domain),
SignableMessage::AttestationData(a) => a.signing_root(domain),
SignableMessage::SignedAggregateAndProof(a) => a.signing_root(domain),
SignableMessage::SelectionProof(slot) => slot.signing_root(domain),
@ -184,10 +182,6 @@ impl SigningMethod {
Web3SignerObject::RandaoReveal { epoch }
}
SignableMessage::BeaconBlock(block) => Web3SignerObject::beacon_block(block)?,
SignableMessage::BlobSidecar(_) => {
// https://github.com/ConsenSys/web3signer/issues/726
unimplemented!("Web3Signer blob signing not implemented.")
}
SignableMessage::AttestationData(a) => Web3SignerObject::Attestation(a),
SignableMessage::SignedAggregateAndProof(a) => {
Web3SignerObject::AggregateAndProof(a)

View File

@ -6,7 +6,6 @@ use crate::{
Config,
};
use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition};
use eth2::types::VariableList;
use parking_lot::{Mutex, RwLock};
use slashing_protection::{
interchange::Interchange, InterchangeError, NotSafe, Safe, SlashingDatabase,
@ -18,16 +17,14 @@ use std::marker::PhantomData;
use std::path::Path;
use std::sync::Arc;
use task_executor::TaskExecutor;
use types::sidecar::Sidecar;
use types::{
attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address,
AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof,
Domain, Epoch, EthSpec, Fork, ForkName, Graffiti, Hash256, Keypair, PublicKeyBytes,
SelectionProof, SidecarList, Signature, SignedAggregateAndProof, SignedBeaconBlock,
SignedContributionAndProof, SignedRoot, SignedSidecar, SignedSidecarList,
SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, SyncAggregatorSelectionData,
SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId,
ValidatorRegistrationData, VoluntaryExit,
SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock,
SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit,
Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage,
SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit,
};
use validator_dir::ValidatorDir;
@ -567,39 +564,6 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
}
}
pub async fn sign_blobs<Payload: AbstractExecPayload<E>>(
&self,
validator_pubkey: PublicKeyBytes,
blob_sidecars: SidecarList<E, Payload::Sidecar>,
) -> Result<SignedSidecarList<E, Payload::Sidecar>, Error> {
let mut signed_blob_sidecars = Vec::new();
for blob_sidecar in blob_sidecars.into_iter() {
let slot = blob_sidecar.slot();
let signing_epoch = slot.epoch(E::slots_per_epoch());
let signing_context = self.signing_context(Domain::BlobSidecar, signing_epoch);
let signing_method = self.doppelganger_checked_signing_method(validator_pubkey)?;
let signature = signing_method
.get_signature::<E, Payload>(
SignableMessage::BlobSidecar(blob_sidecar.as_ref()),
signing_context,
&self.spec,
&self.task_executor,
)
.await?;
metrics::inc_counter_vec(&metrics::SIGNED_BLOBS_TOTAL, &[metrics::SUCCESS]);
signed_blob_sidecars.push(SignedSidecar {
message: blob_sidecar,
signature,
_phantom: PhantomData,
});
}
Ok(VariableList::from(signed_blob_sidecars))
}
pub async fn sign_attestation(
&self,
validator_pubkey: PublicKeyBytes,