Single blob lookups (#4152)

* some blob reprocessing work

* remove ForceBlockLookup

* reorder enum match arms in sync manager

* a lot more reprocessing work

* impl logic for triggerng blob lookups along with block lookups

* deal with rpc blobs in groups per block in the da checker. don't cache missing blob ids in the da checker.

* make single block lookup generic

* more work

* add delayed processing logic and combine some requests

* start fixing some compile errors

* fix compilation in main block lookup mod

* much work

* get things compiling

* parent blob lookups

* fix compile

* revert red/stevie changes

* fix up sync manager delay message logic

* add peer usefulness enum

* should remove lookup refactor

* consolidate retry error handling

* improve peer scoring during certain failures in parent lookups

* improve retry code

* drop parent lookup if either req has a peer disconnect during download

* refactor single block processed method

* processing peer refactor

* smol bugfix

* fix some todos

* fix lints

* fix lints

* fix compile in lookup tests

* fix lints

* fix lints

* fix existing block lookup tests

* renamings

* fix after merge

* cargo fmt

* compilation fix in beacon chain tests

* fix

* refactor lookup tests to work with multiple forks and response types

* make tests into macros

* wrap availability check error

* fix compile after merge

* add random blobs

* start fixing up lookup verify error handling

* some bug fixes and the start of deneb only tests

* make tests work for all forks

* track information about peer source

* error refactoring

* improve peer scoring

* fix test compilation

* make sure blobs are sent for processing after stream termination, delete copied tests

* add some tests and fix a bug

* smol bugfixes and moar tests

* add tests and fix some things

* compile after merge

* lots of refactoring

* retry on invalid block/blob

* merge unknown parent messages before current slot lookup

* get tests compiling

* penalize blob peer on invalid blobs

* Check disk on in-memory cache miss

* Update beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs

* Update beacon_node/network/src/sync/network_context.rs

Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>

* fix bug in matching blocks and blobs in range sync

* pr feedback

* fix conflicts

* upgrade logs from warn to crit when we receive incorrect response in range

* synced_and_connected_within_tolerance -> should_search_for_block

* remove todo

* Fix Broken Overflow Tests

* fix merge conflicts

* checkpoint sync without alignment

* add import

* query for checkpoint state by slot rather than state root (teku doesn't serve by state root)

* get state first and query by most recent block root

* simplify delay logic

* rename unknown parent sync message variants

* rename parameter, block_slot -> slot

* add some docs to the lookup module

* use interval instead of sleep

* drop request if blocks and blobs requests both return `None` for `Id`

* clean up `find_single_lookup` logic

* add lookup source enum

* clean up `find_single_lookup` logic

* add docs to find_single_lookup_request

* move LookupSource our of param where unnecessary

* remove unnecessary todo

* query for block by `state.latest_block_header.slot`

* fix lint

* fix test

* fix test

* fix observed  blob sidecars test

* PR updates

* use optional params instead of a closure

* create lookup and trigger request in separate method calls

* remove `LookupSource`

* make sure duplicate lookups are not dropped

---------

Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com>
Co-authored-by: Mark Mackey <mark@sigmaprime.io>
Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
This commit is contained in:
realbigsean 2023-06-15 12:59:10 -04:00 committed by GitHub
parent 5428e68943
commit a62e52f319
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 4981 additions and 1309 deletions

View File

@ -119,6 +119,20 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run operation_pool tests for all known forks - name: Run operation_pool tests for all known forks
run: make test-op-pool run: make test-op-pool
network-minimal-tests:
name: network-minimal-tests
runs-on: ubuntu-latest
needs: cargo-fmt
steps:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run network tests for all known forks using the minimal spec
run: make test-network-minimal
slasher-tests: slasher-tests:
name: slasher-tests name: slasher-tests
runs-on: ubuntu-latest runs-on: ubuntu-latest

12
Cargo.lock generated
View File

@ -2158,6 +2158,15 @@ dependencies = [
"types", "types",
] ]
[[package]]
name = "erased-serde"
version = "0.3.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f2b0c2380453a92ea8b6c8e5f64ecaafccddde8ceab55ff7a8ac1029f894569"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "errno" name = "errno"
version = "0.3.1" version = "0.3.1"
@ -7521,6 +7530,9 @@ name = "slog"
version = "2.7.0" version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06"
dependencies = [
"erased-serde",
]
[[package]] [[package]]
name = "slog-async" name = "slog-async"

View File

@ -143,6 +143,13 @@ test-op-pool-%:
--features 'beacon_chain/fork_from_env'\ --features 'beacon_chain/fork_from_env'\
-p operation_pool -p operation_pool
test-network-minimal: $(patsubst %,test-network-minimal-%,$(FORKS))
test-network-minimal-%:
env FORK_NAME=$* cargo test --release \
--features 'fork_from_env,spec-minimal'\
-p network
# Run the tests in the `slasher` crate for all supported database backends. # Run the tests in the `slasher` crate for all supported database backends.
test-slasher: test-slasher:
cargo test --release -p slasher --features lmdb cargo test --release -p slasher --features lmdb

View File

@ -117,7 +117,7 @@ use tokio_stream::Stream;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::beacon_block_body::KzgCommitments; use types::beacon_block_body::KzgCommitments;
use types::beacon_state::CloneConfig; use types::beacon_state::CloneConfig;
use types::blob_sidecar::{BlobIdentifier, BlobSidecarList, Blobs}; use types::blob_sidecar::{BlobSidecarList, Blobs};
use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS;
use types::*; use types::*;
@ -185,12 +185,10 @@ pub enum WhenSlotSkipped {
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum AvailabilityProcessingStatus { pub enum AvailabilityProcessingStatus {
PendingBlobs(Vec<BlobIdentifier>), MissingComponents(Slot, Hash256),
PendingBlock(Hash256),
Imported(Hash256), Imported(Hash256),
} }
//TODO(sean) using this in tests for now
impl TryInto<SignedBeaconBlockHash> for AvailabilityProcessingStatus { impl TryInto<SignedBeaconBlockHash> for AvailabilityProcessingStatus {
type Error = (); type Error = ();
@ -468,7 +466,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
/// The slot at which blocks are downloaded back to. /// The slot at which blocks are downloaded back to.
pub genesis_backfill_slot: Slot, pub genesis_backfill_slot: Slot,
pub proposal_blob_cache: BlobCache<T::EthSpec>, pub proposal_blob_cache: BlobCache<T::EthSpec>,
pub data_availability_checker: DataAvailabilityChecker<T>, pub data_availability_checker: Arc<DataAvailabilityChecker<T>>,
pub kzg: Option<Arc<Kzg>>, pub kzg: Option<Arc<Kzg>>,
} }
@ -1985,8 +1983,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
self: &Arc<Self>, self: &Arc<Self>,
blob_sidecar: SignedBlobSidecar<T::EthSpec>, blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet_id: u64, subnet_id: u64,
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError> // TODO(pawan): make a GossipVerifedBlob type ) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError<T::EthSpec>> {
{
blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self) blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self)
} }
@ -2674,7 +2671,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) )
.await .await
{ {
Ok(_) => imported_blocks += 1, Ok(status) => {
match status {
AvailabilityProcessingStatus::Imported(_) => {
// The block was imported successfully.
imported_blocks += 1;
}
AvailabilityProcessingStatus::MissingComponents(slot, block_root) => {
warn!(self.log, "Blobs missing in response to range request";
"block_root" => ?block_root, "slot" => slot);
return ChainSegmentResult::Failed {
imported_blocks,
error: BlockError::AvailabilityCheck(
AvailabilityCheckError::MissingBlobs,
),
};
}
}
}
Err(error) => { Err(error) => {
return ChainSegmentResult::Failed { return ChainSegmentResult::Failed {
imported_blocks, imported_blocks,
@ -2748,6 +2762,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
count_unrealized: CountUnrealized, count_unrealized: CountUnrealized,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> { ) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
self.check_availability_and_maybe_import( self.check_availability_and_maybe_import(
blob.slot(),
|chain| chain.data_availability_checker.put_gossip_blob(blob), |chain| chain.data_availability_checker.put_gossip_blob(blob),
count_unrealized, count_unrealized,
) )
@ -2804,6 +2819,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
} }
ExecutedBlock::AvailabilityPending(block) => { ExecutedBlock::AvailabilityPending(block) => {
self.check_availability_and_maybe_import( self.check_availability_and_maybe_import(
block.block.slot(),
|chain| { |chain| {
chain chain
.data_availability_checker .data_availability_checker
@ -2907,6 +2923,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// (i.e., this function is not atomic). /// (i.e., this function is not atomic).
pub async fn check_availability_and_maybe_import( pub async fn check_availability_and_maybe_import(
self: &Arc<Self>, self: &Arc<Self>,
slot: Slot,
cache_fn: impl FnOnce(Arc<Self>) -> Result<Availability<T::EthSpec>, AvailabilityCheckError>, cache_fn: impl FnOnce(Arc<Self>) -> Result<Availability<T::EthSpec>, AvailabilityCheckError>,
count_unrealized: CountUnrealized, count_unrealized: CountUnrealized,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> { ) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
@ -2915,12 +2932,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Availability::Available(block) => { Availability::Available(block) => {
self.import_available_block(block, count_unrealized).await self.import_available_block(block, count_unrealized).await
} }
Availability::PendingBlock(block_root) => { Availability::MissingComponents(block_root) => Ok(
Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) AvailabilityProcessingStatus::MissingComponents(slot, block_root),
} ),
Availability::PendingBlobs(blob_ids) => {
Ok(AvailabilityProcessingStatus::PendingBlobs(blob_ids))
}
} }
} }

View File

@ -16,15 +16,17 @@ use eth2::types::BlockContentsTuple;
use kzg::Kzg; use kzg::Kzg;
use slog::{debug, warn}; use slog::{debug, warn};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use ssz_types::FixedVector;
use std::borrow::Cow; use std::borrow::Cow;
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
use types::{ use types::{
BeaconBlockRef, BeaconState, BeaconStateError, BlobSidecar, BlobSidecarList, ChainSpec, BeaconBlockRef, BeaconState, BeaconStateError, BlobSidecar, ChainSpec, CloneConfig, Epoch,
CloneConfig, Epoch, EthSpec, FullPayload, Hash256, KzgCommitment, RelativeEpoch, EthSpec, FullPayload, Hash256, KzgCommitment, RelativeEpoch, SignedBeaconBlock,
SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlobSidecar, Slot, SignedBeaconBlockHeader, SignedBlobSidecar, Slot,
}; };
#[derive(Debug)] #[derive(Debug)]
pub enum BlobError { pub enum BlobError<T: EthSpec> {
/// The blob sidecar is from a slot that is later than the current slot (with respect to the /// The blob sidecar is from a slot that is later than the current slot (with respect to the
/// gossip clock disparity). /// gossip clock disparity).
/// ///
@ -96,10 +98,7 @@ pub enum BlobError {
/// ## Peer scoring /// ## Peer scoring
/// ///
/// We cannot process the blob without validating its parent, the peer isn't necessarily faulty. /// We cannot process the blob without validating its parent, the peer isn't necessarily faulty.
BlobParentUnknown { BlobParentUnknown(Arc<BlobSidecar<T>>),
blob_root: Hash256,
blob_parent_root: Hash256,
},
/// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple /// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple
/// over gossip or no gossip sources. /// over gossip or no gossip sources.
@ -114,13 +113,13 @@ pub enum BlobError {
}, },
} }
impl From<BeaconChainError> for BlobError { impl<T: EthSpec> From<BeaconChainError> for BlobError<T> {
fn from(e: BeaconChainError) -> Self { fn from(e: BeaconChainError) -> Self {
BlobError::BeaconChainError(e) BlobError::BeaconChainError(e)
} }
} }
impl From<BeaconStateError> for BlobError { impl<T: EthSpec> From<BeaconStateError> for BlobError<T> {
fn from(e: BeaconStateError) -> Self { fn from(e: BeaconStateError) -> Self {
BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e)) BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
} }
@ -128,27 +127,36 @@ impl From<BeaconStateError> for BlobError {
/// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on /// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on
/// the p2p network. /// the p2p network.
#[derive(Debug)] #[derive(Debug, Clone)]
pub struct GossipVerifiedBlob<T: EthSpec> { pub struct GossipVerifiedBlob<T: EthSpec> {
blob: Arc<BlobSidecar<T>>, blob: Arc<BlobSidecar<T>>,
} }
impl<T: EthSpec> GossipVerifiedBlob<T> { impl<T: EthSpec> GossipVerifiedBlob<T> {
pub fn id(&self) -> BlobIdentifier {
self.blob.id()
}
pub fn block_root(&self) -> Hash256 { pub fn block_root(&self) -> Hash256 {
self.blob.block_root self.blob.block_root
} }
pub fn to_blob(self) -> Arc<BlobSidecar<T>> {
self.blob
}
pub fn slot(&self) -> Slot {
self.blob.slot
}
} }
pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>( pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>, signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
subnet: u64, subnet: u64,
chain: &BeaconChain<T>, chain: &BeaconChain<T>,
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError> { ) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError<T::EthSpec>> {
let blob_slot = signed_blob_sidecar.message.slot; let blob_slot = signed_blob_sidecar.message.slot;
let blob_index = signed_blob_sidecar.message.index; let blob_index = signed_blob_sidecar.message.index;
let block_root = signed_blob_sidecar.message.block_root;
let block_parent_root = signed_blob_sidecar.message.block_parent_root; let block_parent_root = signed_blob_sidecar.message.block_parent_root;
let blob_proposer_index = signed_blob_sidecar.message.proposer_index; let blob_proposer_index = signed_blob_sidecar.message.proposer_index;
let block_root = signed_blob_sidecar.message.block_root;
// Verify that the blob_sidecar was received on the correct subnet. // Verify that the blob_sidecar was received on the correct subnet.
if blob_index != subnet { if blob_index != subnet {
@ -211,10 +219,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
}); });
} }
} else { } else {
return Err(BlobError::BlobParentUnknown { return Err(BlobError::BlobParentUnknown(signed_blob_sidecar.message));
blob_root: block_root,
blob_parent_root: block_parent_root,
});
} }
// Note: We check that the proposer_index matches against the shuffling first to avoid // Note: We check that the proposer_index matches against the shuffling first to avoid
@ -366,7 +371,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
state_root_opt: Option<Hash256>, state_root_opt: Option<Hash256>,
blob_slot: Slot, blob_slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Cow<'a, BeaconState<E>>, BlobError> { ) -> Result<Cow<'a, BeaconState<E>>, BlobError<E>> {
let block_epoch = blob_slot.epoch(E::slots_per_epoch()); let block_epoch = blob_slot.epoch(E::slots_per_epoch());
if state.current_epoch() == block_epoch { if state.current_epoch() == block_epoch {
@ -443,19 +448,14 @@ impl<T: EthSpec> KzgVerifiedBlob<T> {
/// ///
/// Returns an error if the kzg verification check fails. /// Returns an error if the kzg verification check fails.
pub fn verify_kzg_for_blob<T: EthSpec>( pub fn verify_kzg_for_blob<T: EthSpec>(
blob: GossipVerifiedBlob<T>, blob: Arc<BlobSidecar<T>>,
kzg: &Kzg, kzg: &Kzg,
) -> Result<KzgVerifiedBlob<T>, AvailabilityCheckError> { ) -> Result<KzgVerifiedBlob<T>, AvailabilityCheckError> {
//TODO(sean) remove clone //TODO(sean) remove clone
if validate_blob::<T>( if validate_blob::<T>(kzg, blob.blob.clone(), blob.kzg_commitment, blob.kzg_proof)
kzg,
blob.blob.blob.clone(),
blob.blob.kzg_commitment,
blob.blob.kzg_proof,
)
.map_err(AvailabilityCheckError::Kzg)? .map_err(AvailabilityCheckError::Kzg)?
{ {
Ok(KzgVerifiedBlob { blob: blob.blob }) Ok(KzgVerifiedBlob { blob })
} else { } else {
Err(AvailabilityCheckError::KzgVerificationFailed) Err(AvailabilityCheckError::KzgVerificationFailed)
} }
@ -467,7 +467,7 @@ pub fn verify_kzg_for_blob<T: EthSpec>(
/// Note: This function should be preferred over calling `verify_kzg_for_blob` /// Note: This function should be preferred over calling `verify_kzg_for_blob`
/// in a loop since this function kzg verifies a list of blobs more efficiently. /// in a loop since this function kzg verifies a list of blobs more efficiently.
pub fn verify_kzg_for_blob_list<T: EthSpec>( pub fn verify_kzg_for_blob_list<T: EthSpec>(
blob_list: BlobSidecarList<T>, blob_list: Vec<Arc<BlobSidecar<T>>>,
kzg: &Kzg, kzg: &Kzg,
) -> Result<KzgVerifiedBlobList<T>, AvailabilityCheckError> { ) -> Result<KzgVerifiedBlobList<T>, AvailabilityCheckError> {
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list
@ -608,7 +608,16 @@ impl<E: EthSpec> AsBlock<E> for &MaybeAvailableBlock<E> {
#[derivative(Hash(bound = "E: EthSpec"))] #[derivative(Hash(bound = "E: EthSpec"))]
pub enum BlockWrapper<E: EthSpec> { pub enum BlockWrapper<E: EthSpec> {
Block(Arc<SignedBeaconBlock<E>>), Block(Arc<SignedBeaconBlock<E>>),
BlockAndBlobs(Arc<SignedBeaconBlock<E>>, Vec<Arc<BlobSidecar<E>>>), BlockAndBlobs(Arc<SignedBeaconBlock<E>>, FixedBlobSidecarList<E>),
}
impl<E: EthSpec> BlockWrapper<E> {
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<FixedBlobSidecarList<E>>) {
match self {
BlockWrapper::Block(block) => (block, None),
BlockWrapper::BlockAndBlobs(block, blobs) => (block, Some(blobs)),
}
}
} }
impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> { impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
@ -675,13 +684,15 @@ impl<E: EthSpec> From<SignedBeaconBlock<E>> for BlockWrapper<E> {
impl<E: EthSpec> From<BlockContentsTuple<E, FullPayload<E>>> for BlockWrapper<E> { impl<E: EthSpec> From<BlockContentsTuple<E, FullPayload<E>>> for BlockWrapper<E> {
fn from(value: BlockContentsTuple<E, FullPayload<E>>) -> Self { fn from(value: BlockContentsTuple<E, FullPayload<E>>) -> Self {
match value.1 { match value.1 {
Some(variable_list) => Self::BlockAndBlobs( Some(variable_list) => {
Arc::new(value.0), let mut blobs = Vec::with_capacity(E::max_blobs_per_block());
Vec::from(variable_list) for blob in variable_list {
.into_iter() if blob.message.index < E::max_blobs_per_block() as u64 {
.map(|signed_blob| signed_blob.message) blobs.insert(blob.message.index as usize, Some(blob.message));
.collect::<Vec<_>>(), }
), }
Self::BlockAndBlobs(Arc::new(value.0), FixedVector::from(blobs))
}
None => Self::Block(Arc::new(value.0)), None => Self::Block(Arc::new(value.0)),
} }
} }

View File

@ -70,7 +70,7 @@ use crate::{
use derivative::Derivative; use derivative::Derivative;
use eth2::types::EventKind; use eth2::types::EventKind;
use execution_layer::PayloadStatus; use execution_layer::PayloadStatus;
use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
use parking_lot::RwLockReadGuard; use parking_lot::RwLockReadGuard;
use proto_array::Block as ProtoBlock; use proto_array::Block as ProtoBlock;
use safe_arith::ArithError; use safe_arith::ArithError;
@ -150,10 +150,7 @@ pub enum BlockError<T: EthSpec> {
/// its parent. /// its parent.
ParentUnknown(BlockWrapper<T>), ParentUnknown(BlockWrapper<T>),
/// The block skips too many slots and is a DoS risk. /// The block skips too many slots and is a DoS risk.
TooManySkippedSlots { TooManySkippedSlots { parent_slot: Slot, block_slot: Slot },
parent_slot: Slot,
block_slot: Slot,
},
/// The block slot is greater than the present slot. /// The block slot is greater than the present slot.
/// ///
/// ## Peer scoring /// ## Peer scoring
@ -168,10 +165,7 @@ pub enum BlockError<T: EthSpec> {
/// ## Peer scoring /// ## Peer scoring
/// ///
/// The peer has incompatible state transition logic and is faulty. /// The peer has incompatible state transition logic and is faulty.
StateRootMismatch { StateRootMismatch { block: Hash256, local: Hash256 },
block: Hash256,
local: Hash256,
},
/// The block was a genesis block, these blocks cannot be re-imported. /// The block was a genesis block, these blocks cannot be re-imported.
GenesisBlock, GenesisBlock,
/// The slot is finalized, no need to import. /// The slot is finalized, no need to import.
@ -190,9 +184,7 @@ pub enum BlockError<T: EthSpec> {
/// ///
/// It's unclear if this block is valid, but it conflicts with finality and shouldn't be /// It's unclear if this block is valid, but it conflicts with finality and shouldn't be
/// imported. /// imported.
NotFinalizedDescendant { NotFinalizedDescendant { block_parent_root: Hash256 },
block_parent_root: Hash256,
},
/// Block is already known, no need to re-import. /// Block is already known, no need to re-import.
/// ///
/// ## Peer scoring /// ## Peer scoring
@ -205,10 +197,7 @@ pub enum BlockError<T: EthSpec> {
/// ///
/// The `proposer` has already proposed a block at this slot. The existing block may or may not /// The `proposer` has already proposed a block at this slot. The existing block may or may not
/// be equal to the given block. /// be equal to the given block.
RepeatProposal { RepeatProposal { proposer: u64, slot: Slot },
proposer: u64,
slot: Slot,
},
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER. /// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
/// ///
/// ## Peer scoring /// ## Peer scoring
@ -223,10 +212,7 @@ pub enum BlockError<T: EthSpec> {
/// ## Peer scoring /// ## Peer scoring
/// ///
/// The block is invalid and the peer is faulty. /// The block is invalid and the peer is faulty.
IncorrectBlockProposer { IncorrectBlockProposer { block: u64, local_shuffling: u64 },
block: u64,
local_shuffling: u64,
},
/// The proposal signature in invalid. /// The proposal signature in invalid.
/// ///
/// ## Peer scoring /// ## Peer scoring
@ -250,10 +236,7 @@ pub enum BlockError<T: EthSpec> {
/// ## Peer scoring /// ## Peer scoring
/// ///
/// The block is invalid and the peer is faulty. /// The block is invalid and the peer is faulty.
BlockIsNotLaterThanParent { BlockIsNotLaterThanParent { block_slot: Slot, parent_slot: Slot },
block_slot: Slot,
parent_slot: Slot,
},
/// At least one block in the chain segment did not have it's parent root set to the root of /// At least one block in the chain segment did not have it's parent root set to the root of
/// the prior block. /// the prior block.
/// ///
@ -309,15 +292,15 @@ pub enum BlockError<T: EthSpec> {
/// If it's actually our fault (e.g. our execution node database is corrupt) we have bigger /// If it's actually our fault (e.g. our execution node database is corrupt) we have bigger
/// problems to worry about than losing peers, and we're doing the network a favour by /// problems to worry about than losing peers, and we're doing the network a favour by
/// disconnecting. /// disconnecting.
ParentExecutionPayloadInvalid { ParentExecutionPayloadInvalid { parent_root: Hash256 },
parent_root: Hash256, /// A blob alone failed validation.
}, BlobValidation(BlobError<T>),
BlobValidation(BlobError), /// The block and blob together failed validation.
AvailabilityCheck(AvailabilityCheckError), AvailabilityCheck(AvailabilityCheckError),
} }
impl<T: EthSpec> From<BlobError> for BlockError<T> { impl<T: EthSpec> From<BlobError<T>> for BlockError<T> {
fn from(e: BlobError) -> Self { fn from(e: BlobError<T>) -> Self {
Self::BlobValidation(e) Self::BlobValidation(e)
} }
} }
@ -785,21 +768,17 @@ impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> {
} }
pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> { pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> {
self.get_filtered_blob_ids(|_| true) let block_root = self.import_data.block_root;
self.block
.get_filtered_blob_ids(Some(block_root), |_, _| true)
} }
pub fn get_filtered_blob_ids(&self, filter: impl Fn(u64) -> bool) -> Vec<BlobIdentifier> { pub fn get_filtered_blob_ids(
let num_blobs_expected = self.num_blobs_expected(); &self,
let mut blob_ids = Vec::with_capacity(num_blobs_expected); filter: impl Fn(usize, Hash256) -> bool,
for i in 0..num_blobs_expected as u64 { ) -> Vec<BlobIdentifier> {
if filter(i) { self.block
blob_ids.push(BlobIdentifier { .get_filtered_blob_ids(Some(self.import_data.block_root), filter)
block_root: self.import_data.block_root,
index: i,
});
}
}
blob_ids
} }
} }

View File

@ -419,23 +419,14 @@ where
let weak_subj_block_root = weak_subj_block.canonical_root(); let weak_subj_block_root = weak_subj_block.canonical_root();
let weak_subj_state_root = weak_subj_block.state_root(); let weak_subj_state_root = weak_subj_block.state_root();
// Check that the given block lies on an epoch boundary. Due to the database only storing // Check that the given state lies on an epoch boundary. Due to the database only storing
// full states on epoch boundaries and at restore points it would be difficult to support // full states on epoch boundaries and at restore points it would be difficult to support
// starting from a mid-epoch state. // starting from a mid-epoch state.
if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 { if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 {
return Err(format!( return Err(format!(
"Checkpoint block at slot {} is not aligned to epoch start. \ "Checkpoint state at slot {} is not aligned to epoch start. \
Please supply an aligned checkpoint with block.slot % 32 == 0", Please supply an aligned checkpoint with state.slot % 32 == 0",
weak_subj_block.slot(), weak_subj_slot,
));
}
// Check that the block and state have consistent slots and state roots.
if weak_subj_state.slot() != weak_subj_block.slot() {
return Err(format!(
"Slot of snapshot block ({}) does not match snapshot state ({})",
weak_subj_block.slot(),
weak_subj_state.slot(),
)); ));
} }
@ -444,17 +435,22 @@ where
weak_subj_state weak_subj_state
.build_all_caches(&self.spec) .build_all_caches(&self.spec)
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?; .map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
weak_subj_state
let computed_state_root = weak_subj_state
.update_tree_hash_cache() .update_tree_hash_cache()
.map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?; .map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?;
if weak_subj_state_root != computed_state_root { let latest_block_slot = weak_subj_state.latest_block_header().slot;
// We can only validate the block root if it exists in the state. We can't calculated it
// from the `latest_block_header` because the state root might be set to the zero hash.
if let Ok(state_slot_block_root) = weak_subj_state.get_block_root(latest_block_slot) {
if weak_subj_block_root != *state_slot_block_root {
return Err(format!( return Err(format!(
"Snapshot state root does not match block, expected: {:?}, got: {:?}", "Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}",
weak_subj_state_root, computed_state_root weak_subj_block_root, state_slot_block_root
)); ));
} }
}
// Check that the checkpoint state is for the same network as the genesis state. // Check that the checkpoint state is for the same network as the genesis state.
// This check doesn't do much for security but should prevent mistakes. // This check doesn't do much for security but should prevent mistakes.
@ -508,13 +504,12 @@ where
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot) let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot)
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?; .map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
let current_slot = Some(snapshot.beacon_block.slot());
let fork_choice = ForkChoice::from_anchor( let fork_choice = ForkChoice::from_anchor(
fc_store, fc_store,
snapshot.beacon_block_root, snapshot.beacon_block_root,
&snapshot.beacon_block, &snapshot.beacon_block,
&snapshot.beacon_state, &snapshot.beacon_state,
current_slot, Some(weak_subj_slot),
&self.spec, &self.spec,
) )
.map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?; .map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?;
@ -891,13 +886,10 @@ where
validator_monitor: RwLock::new(validator_monitor), validator_monitor: RwLock::new(validator_monitor),
genesis_backfill_slot, genesis_backfill_slot,
//TODO(sean) should we move kzg solely to the da checker? //TODO(sean) should we move kzg solely to the da checker?
data_availability_checker: DataAvailabilityChecker::new( data_availability_checker: Arc::new(
slot_clock, DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, self.spec)
kzg.clone(),
store,
self.spec,
)
.map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?,
),
proposal_blob_cache: BlobCache::default(), proposal_blob_cache: BlobCache::default(),
kzg, kzg,
}; };

View File

@ -10,12 +10,14 @@ use kzg::Error as KzgError;
use kzg::Kzg; use kzg::Kzg;
use slog::{debug, error}; use slog::{debug, error};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz_types::{Error, VariableList}; use ssz_types::{Error, FixedVector, VariableList};
use state_processing::per_block_processing::deneb::deneb::verify_kzg_commitments_against_transactions; use state_processing::per_block_processing::deneb::deneb::verify_kzg_commitments_against_transactions;
use std::collections::HashSet;
use std::sync::Arc; use std::sync::Arc;
use strum::IntoStaticStr;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use types::beacon_block_body::KzgCommitments; use types::beacon_block_body::KzgCommitments;
use types::blob_sidecar::{BlobIdentifier, BlobSidecar}; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS; use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS;
use types::ssz_tagged_signed_beacon_block; use types::ssz_tagged_signed_beacon_block;
use types::{ use types::{
@ -27,27 +29,29 @@ mod overflow_lru_cache;
pub const OVERFLOW_LRU_CAPACITY: usize = 1024; pub const OVERFLOW_LRU_CAPACITY: usize = 1024;
#[derive(Debug)] #[derive(Debug, IntoStaticStr)]
pub enum AvailabilityCheckError { pub enum AvailabilityCheckError {
DuplicateBlob(Hash256),
Kzg(KzgError), Kzg(KzgError),
KzgVerificationFailed,
KzgNotInitialized, KzgNotInitialized,
KzgVerificationFailed,
SszTypes(ssz_types::Error), SszTypes(ssz_types::Error),
MissingBlobs,
NumBlobsMismatch { NumBlobsMismatch {
num_kzg_commitments: usize, num_kzg_commitments: usize,
num_blobs: usize, num_blobs: usize,
}, },
TxKzgCommitmentMismatch, MissingBlobs,
TxKzgCommitmentMismatch(String),
KzgCommitmentMismatch { KzgCommitmentMismatch {
blob_index: u64, blob_index: u64,
}, },
Pending,
IncorrectFork, IncorrectFork,
BlobIndexInvalid(u64), BlobIndexInvalid(u64),
StoreError(store::Error), StoreError(store::Error),
DecodeError(ssz::DecodeError), DecodeError(ssz::DecodeError),
BlockBlobRootMismatch {
block_root: Hash256,
blob_block_root: Hash256,
},
} }
impl From<ssz_types::Error> for AvailabilityCheckError { impl From<ssz_types::Error> for AvailabilityCheckError {
@ -86,8 +90,7 @@ pub struct DataAvailabilityChecker<T: BeaconChainTypes> {
/// to "complete" the requirements for an `AvailableBlock`. /// to "complete" the requirements for an `AvailableBlock`.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Availability<T: EthSpec> { pub enum Availability<T: EthSpec> {
PendingBlobs(Vec<BlobIdentifier>), MissingComponents(Hash256),
PendingBlock(Hash256),
Available(Box<AvailableExecutedBlock<T>>), Available(Box<AvailableExecutedBlock<T>>),
} }
@ -119,6 +122,52 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
}) })
} }
pub fn has_block(&self, block_root: &Hash256) -> bool {
self.availability_cache.has_block(block_root)
}
pub fn get_missing_blob_ids_checking_cache(
&self,
block_root: Hash256,
) -> Option<Vec<BlobIdentifier>> {
let (block, blob_indices) = self.availability_cache.get_missing_blob_info(block_root);
self.get_missing_blob_ids(block_root, block.as_ref(), Some(blob_indices))
}
/// A `None` indicates blobs are not required.
///
/// If there's no block, all possible ids will be returned that don't exist in the given blobs.
/// If there no blobs, all possible ids will be returned.
pub fn get_missing_blob_ids(
&self,
block_root: Hash256,
block_opt: Option<&Arc<SignedBeaconBlock<T::EthSpec>>>,
blobs_opt: Option<HashSet<usize>>,
) -> Option<Vec<BlobIdentifier>> {
let epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch());
self.da_check_required(epoch).then(|| {
block_opt
.map(|block| {
block.get_filtered_blob_ids(Some(block_root), |i, _| {
blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i))
})
})
.unwrap_or_else(|| {
let mut blob_ids = Vec::with_capacity(T::EthSpec::max_blobs_per_block());
for i in 0..T::EthSpec::max_blobs_per_block() {
if blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i)) {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
});
}
}
blob_ids
})
})
}
/// Get a blob from the availability cache. /// Get a blob from the availability cache.
pub fn get_blob( pub fn get_blob(
&self, &self,
@ -127,6 +176,23 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
self.availability_cache.peek_blob(blob_id) self.availability_cache.peek_blob(blob_id)
} }
pub fn put_rpc_blobs(
&self,
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
let mut verified_blobs = vec![];
if let Some(kzg) = self.kzg.as_ref() {
for blob in blobs.iter().flatten() {
verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?)
}
} else {
return Err(AvailabilityCheckError::KzgNotInitialized);
};
self.availability_cache
.put_kzg_verified_blobs(block_root, &verified_blobs)
}
/// This first validates the KZG commitments included in the blob sidecar. /// This first validates the KZG commitments included in the blob sidecar.
/// Check if we've cached other blobs for this block. If it completes a set and we also /// Check if we've cached other blobs for this block. If it completes a set and we also
/// have a block cached, return the `Availability` variant triggering block import. /// have a block cached, return the `Availability` variant triggering block import.
@ -139,13 +205,13 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> { ) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
// Verify the KZG commitments. // Verify the KZG commitments.
let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() { let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() {
verify_kzg_for_blob(gossip_blob, kzg)? verify_kzg_for_blob(gossip_blob.to_blob(), kzg)?
} else { } else {
return Err(AvailabilityCheckError::KzgNotInitialized); return Err(AvailabilityCheckError::KzgNotInitialized);
}; };
self.availability_cache self.availability_cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(kzg_verified_blob.block_root(), &[kzg_verified_blob])
} }
/// Check if we have all the blobs for a block. If we do, return the Availability variant that /// Check if we have all the blobs for a block. If we do, return the Availability variant that
@ -171,7 +237,8 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
.kzg .kzg
.as_ref() .as_ref()
.ok_or(AvailabilityCheckError::KzgNotInitialized)?; .ok_or(AvailabilityCheckError::KzgNotInitialized)?;
let verified_blobs = verify_kzg_for_blob_list(VariableList::new(blob_list)?, kzg)?; let filtered_blobs = blob_list.iter().flatten().cloned().collect();
let verified_blobs = verify_kzg_for_blob_list(filtered_blobs, kzg)?;
Ok(MaybeAvailableBlock::Available( Ok(MaybeAvailableBlock::Available(
self.check_availability_with_blobs(block, verified_blobs)?, self.check_availability_with_blobs(block, verified_blobs)?,
@ -180,27 +247,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
} }
} }
/// Checks if a block is available, returning an error if the block is not immediately available.
/// Does not access the gossip cache.
pub fn try_check_availability(
&self,
block: BlockWrapper<T::EthSpec>,
) -> Result<AvailableBlock<T::EthSpec>, AvailabilityCheckError> {
match block {
BlockWrapper::Block(block) => {
let blob_requirements = self.get_blob_requirements(&block)?;
let blobs = match blob_requirements {
BlobRequirements::EmptyBlobs => VerifiedBlobs::EmptyBlobs,
BlobRequirements::NotRequired => VerifiedBlobs::NotRequired,
BlobRequirements::PreDeneb => VerifiedBlobs::PreDeneb,
BlobRequirements::Required => return Err(AvailabilityCheckError::MissingBlobs),
};
Ok(AvailableBlock { block, blobs })
}
BlockWrapper::BlockAndBlobs(_, _) => Err(AvailabilityCheckError::Pending),
}
}
/// Verifies a block against a set of KZG verified blobs. Returns an AvailableBlock if block's /// Verifies a block against a set of KZG verified blobs. Returns an AvailableBlock if block's
/// commitments are consistent with the provided verified blob commitments. /// commitments are consistent with the provided verified blob commitments.
pub fn check_availability_with_blobs( pub fn check_availability_with_blobs(
@ -254,9 +300,11 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
transactions, transactions,
block_kzg_commitments, block_kzg_commitments,
) )
.map_err(|_| AvailabilityCheckError::TxKzgCommitmentMismatch)?; .map_err(|e| AvailabilityCheckError::TxKzgCommitmentMismatch(format!("{e:?}")))?;
if !verified { if !verified {
return Err(AvailabilityCheckError::TxKzgCommitmentMismatch); return Err(AvailabilityCheckError::TxKzgCommitmentMismatch(
"a commitment and version didn't match".to_string(),
));
} }
} }
@ -410,6 +458,27 @@ pub struct AvailabilityPendingBlock<E: EthSpec> {
block: Arc<SignedBeaconBlock<E>>, block: Arc<SignedBeaconBlock<E>>,
} }
impl<E: EthSpec> AvailabilityPendingBlock<E> {
pub fn slot(&self) -> Slot {
self.block.slot()
}
pub fn num_blobs_expected(&self) -> usize {
self.block.num_expected_blobs()
}
pub fn get_all_blob_ids(&self, block_root: Option<Hash256>) -> Vec<BlobIdentifier> {
self.block.get_expected_blob_ids(block_root)
}
pub fn get_filtered_blob_ids(
&self,
block_root: Option<Hash256>,
filter: impl Fn(usize, Hash256) -> bool,
) -> Vec<BlobIdentifier> {
self.block.get_filtered_blob_ids(block_root, filter)
}
}
impl<E: EthSpec> AvailabilityPendingBlock<E> { impl<E: EthSpec> AvailabilityPendingBlock<E> {
pub fn to_block(self) -> Arc<SignedBeaconBlock<E>> { pub fn to_block(self) -> Arc<SignedBeaconBlock<E>> {
self.block self.block
@ -429,7 +498,7 @@ impl<E: EthSpec> AvailabilityPendingBlock<E> {
} }
/// Verifies an AvailabilityPendingBlock against a set of KZG verified blobs. /// Verifies an AvailabilityPendingBlock against a set of KZG verified blobs.
/// This does not check whether a block *should* have blobs, these checks should must have been /// This does not check whether a block *should* have blobs, these checks should have been
/// completed when producing the `AvailabilityPendingBlock`. /// completed when producing the `AvailabilityPendingBlock`.
pub fn make_available( pub fn make_available(
self, self,
@ -485,6 +554,13 @@ impl<E: EthSpec> AvailableBlock<E> {
&self.block &self.block
} }
pub fn da_check_required(&self) -> bool {
match self.blobs {
VerifiedBlobs::PreDeneb | VerifiedBlobs::NotRequired => false,
VerifiedBlobs::EmptyBlobs | VerifiedBlobs::Available(_) => true,
}
}
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<BlobSidecarList<E>>) { pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<BlobSidecarList<E>>) {
match self.blobs { match self.blobs {
VerifiedBlobs::EmptyBlobs | VerifiedBlobs::NotRequired | VerifiedBlobs::PreDeneb => { VerifiedBlobs::EmptyBlobs | VerifiedBlobs::NotRequired | VerifiedBlobs::PreDeneb => {
@ -542,7 +618,8 @@ impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
fn into_block_wrapper(self) -> BlockWrapper<E> { fn into_block_wrapper(self) -> BlockWrapper<E> {
let (block, blobs_opt) = self.deconstruct(); let (block, blobs_opt) = self.deconstruct();
if let Some(blobs) = blobs_opt { if let Some(blobs) = blobs_opt {
BlockWrapper::BlockAndBlobs(block, blobs.to_vec()) let blobs_vec = blobs.iter().cloned().map(Option::Some).collect::<Vec<_>>();
BlockWrapper::BlockAndBlobs(block, FixedVector::from(blobs_vec))
} else { } else {
BlockWrapper::Block(block) BlockWrapper::Block(block)
} }

View File

@ -11,7 +11,9 @@ use ssz_derive::{Decode, Encode};
use ssz_types::FixedVector; use ssz_types::FixedVector;
use std::{collections::HashSet, sync::Arc}; use std::{collections::HashSet, sync::Arc};
use types::blob_sidecar::BlobIdentifier; use types::blob_sidecar::BlobIdentifier;
use types::{BlobSidecar, Epoch, EthSpec, Hash256}; use types::{BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock};
type MissingBlobInfo<T> = (Option<Arc<SignedBeaconBlock<T>>>, HashSet<usize>);
/// Caches partially available blobs and execution verified blocks corresponding /// Caches partially available blobs and execution verified blocks corresponding
/// to a given `block_hash` that are received over gossip. /// to a given `block_hash` that are received over gossip.
@ -25,10 +27,12 @@ pub struct PendingComponents<T: EthSpec> {
} }
impl<T: EthSpec> PendingComponents<T> { impl<T: EthSpec> PendingComponents<T> {
pub fn new_from_blob(blob: KzgVerifiedBlob<T>) -> Self { pub fn new_from_blobs(blobs: &[KzgVerifiedBlob<T>]) -> Self {
let mut verified_blobs = FixedVector::<_, _>::default(); let mut verified_blobs = FixedVector::<_, _>::default();
for blob in blobs {
if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) { if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) {
*mut_maybe_blob = Some(blob); *mut_maybe_blob = Some(blob.clone());
}
} }
Self { Self {
@ -82,6 +86,20 @@ impl<T: EthSpec> PendingComponents<T> {
None None
}) })
} }
pub fn get_missing_blob_info(&self) -> MissingBlobInfo<T> {
let block_opt = self
.executed_block
.as_ref()
.map(|block| block.block.block.clone());
let blobs = self
.verified_blobs
.iter()
.enumerate()
.filter_map(|(i, maybe_blob)| maybe_blob.as_ref().map(|_| i))
.collect::<HashSet<_>>();
(block_opt, blobs)
}
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -193,11 +211,27 @@ impl<T: BeaconChainTypes> OverflowStore<T> {
Ok(disk_keys) Ok(disk_keys)
} }
pub fn load_block(
&self,
block_root: &Hash256,
) -> Result<Option<AvailabilityPendingExecutedBlock<T::EthSpec>>, AvailabilityCheckError> {
let key = OverflowKey::from_block_root(*block_root);
self.0
.hot_db
.get_bytes(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())?
.map(|block_bytes| {
AvailabilityPendingExecutedBlock::from_ssz_bytes(block_bytes.as_slice())
})
.transpose()
.map_err(|e| e.into())
}
pub fn load_blob( pub fn load_blob(
&self, &self,
blob_id: &BlobIdentifier, blob_id: &BlobIdentifier,
) -> Result<Option<Arc<BlobSidecar<T::EthSpec>>>, AvailabilityCheckError> { ) -> Result<Option<Arc<BlobSidecar<T::EthSpec>>>, AvailabilityCheckError> {
let key = OverflowKey::from_blob_id::<T::EthSpec>(blob_id.clone())?; let key = OverflowKey::from_blob_id::<T::EthSpec>(*blob_id)?;
self.0 self.0
.hot_db .hot_db
@ -320,6 +354,41 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
}) })
} }
pub fn has_block(&self, block_root: &Hash256) -> bool {
let read_lock = self.critical.read();
if read_lock
.in_memory
.peek(block_root)
.map_or(false, |cache| cache.executed_block.is_some())
{
true
} else if read_lock.store_keys.contains(block_root) {
drop(read_lock);
// If there's some kind of error reading from the store, we should just return false
self.overflow_store
.load_block(block_root)
.map_or(false, |maybe_block| maybe_block.is_some())
} else {
false
}
}
pub fn get_missing_blob_info(&self, block_root: Hash256) -> MissingBlobInfo<T::EthSpec> {
let read_lock = self.critical.read();
if let Some(cache) = read_lock.in_memory.peek(&block_root) {
cache.get_missing_blob_info()
} else if read_lock.store_keys.contains(&block_root) {
drop(read_lock);
// return default if there's an error reading from the store
match self.overflow_store.get_pending_components(block_root) {
Ok(Some(pending_components)) => pending_components.get_missing_blob_info(),
_ => Default::default(),
}
} else {
Default::default()
}
}
pub fn peek_blob( pub fn peek_blob(
&self, &self,
blob_id: &BlobIdentifier, blob_id: &BlobIdentifier,
@ -335,27 +404,39 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
} }
} }
pub fn put_kzg_verified_blob( pub fn put_kzg_verified_blobs(
&self, &self,
kzg_verified_blob: KzgVerifiedBlob<T::EthSpec>, block_root: Hash256,
kzg_verified_blobs: &[KzgVerifiedBlob<T::EthSpec>],
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> { ) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
for blob in kzg_verified_blobs {
let blob_block_root = blob.block_root();
if blob_block_root != block_root {
return Err(AvailabilityCheckError::BlockBlobRootMismatch {
block_root,
blob_block_root,
});
}
}
let mut write_lock = self.critical.write(); let mut write_lock = self.critical.write();
let block_root = kzg_verified_blob.block_root();
let availability = if let Some(mut pending_components) = let availability = if let Some(mut pending_components) =
write_lock.pop_pending_components(block_root, &self.overflow_store)? write_lock.pop_pending_components(block_root, &self.overflow_store)?
{ {
let blob_index = kzg_verified_blob.blob_index(); for kzg_verified_blob in kzg_verified_blobs {
*pending_components let blob_index = kzg_verified_blob.blob_index() as usize;
.verified_blobs if let Some(maybe_verified_blob) =
.get_mut(blob_index as usize) pending_components.verified_blobs.get_mut(blob_index)
.ok_or(AvailabilityCheckError::BlobIndexInvalid(blob_index))? = {
Some(kzg_verified_blob); *maybe_verified_blob = Some(kzg_verified_blob.clone())
} else {
return Err(AvailabilityCheckError::BlobIndexInvalid(blob_index as u64));
}
}
if let Some(executed_block) = pending_components.executed_block.take() { if let Some(executed_block) = pending_components.executed_block.take() {
self.check_block_availability_maybe_cache( self.check_block_availability_maybe_cache(
write_lock, write_lock,
block_root,
pending_components, pending_components,
executed_block, executed_block,
)? )?
@ -365,17 +446,17 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
pending_components, pending_components,
&self.overflow_store, &self.overflow_store,
)?; )?;
Availability::PendingBlock(block_root) Availability::MissingComponents(block_root)
} }
} else { } else {
// not in memory or store -> put new in memory // not in memory or store -> put new in memory
let new_pending_components = PendingComponents::new_from_blob(kzg_verified_blob); let new_pending_components = PendingComponents::new_from_blobs(kzg_verified_blobs);
write_lock.put_pending_components( write_lock.put_pending_components(
block_root, block_root,
new_pending_components, new_pending_components,
&self.overflow_store, &self.overflow_store,
)?; )?;
Availability::PendingBlock(block_root) Availability::MissingComponents(block_root)
}; };
Ok(availability) Ok(availability)
@ -394,7 +475,6 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
match write_lock.pop_pending_components(block_root, &self.overflow_store)? { match write_lock.pop_pending_components(block_root, &self.overflow_store)? {
Some(pending_components) => self.check_block_availability_maybe_cache( Some(pending_components) => self.check_block_availability_maybe_cache(
write_lock, write_lock,
block_root,
pending_components, pending_components,
executed_block, executed_block,
)?, )?,
@ -422,7 +502,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
new_pending_components, new_pending_components,
&self.overflow_store, &self.overflow_store,
)?; )?;
Availability::PendingBlobs(all_blob_ids) Availability::MissingComponents(block_root)
} }
}; };
@ -435,11 +515,10 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
/// Returns an error if there was an error when matching the block commitments against blob commitments. /// Returns an error if there was an error when matching the block commitments against blob commitments.
/// ///
/// Returns `Ok(Availability::Available(_))` if all blobs for the block are present in cache. /// Returns `Ok(Availability::Available(_))` if all blobs for the block are present in cache.
/// Returns `Ok(Availability::PendingBlobs(_))` if all corresponding blobs have not been received in the cache. /// Returns `Ok(Availability::MissingComponents(_))` if all corresponding blobs have not been received in the cache.
fn check_block_availability_maybe_cache( fn check_block_availability_maybe_cache(
&self, &self,
mut write_lock: RwLockWriteGuard<Critical<T>>, mut write_lock: RwLockWriteGuard<Critical<T>>,
block_root: Hash256,
mut pending_components: PendingComponents<T::EthSpec>, mut pending_components: PendingComponents<T::EthSpec>,
executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>, executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> { ) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
@ -451,11 +530,12 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
payload_verification_outcome, payload_verification_outcome,
} = executed_block; } = executed_block;
let verified_blobs = Vec::from(pending_components.verified_blobs) let Some(verified_blobs) = Vec::from(pending_components.verified_blobs)
.into_iter() .into_iter()
.take(num_blobs_expected) .take(num_blobs_expected)
.map(|maybe_blob| maybe_blob.ok_or(AvailabilityCheckError::MissingBlobs)) .collect::<Option<Vec<_>>>() else {
.collect::<Result<Vec<_>, _>>()?; return Ok(Availability::MissingComponents(import_data.block_root))
};
let available_block = block.make_available(verified_blobs)?; let available_block = block.make_available(verified_blobs)?;
Ok(Availability::Available(Box::new( Ok(Availability::Available(Box::new(
@ -466,14 +546,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
), ),
))) )))
} else { } else {
let missing_blob_ids = executed_block.get_filtered_blob_ids(|index| { let block_root = executed_block.import_data.block_root;
pending_components
.verified_blobs
.get(index as usize)
.map(|maybe_blob| maybe_blob.is_none())
.unwrap_or(true)
});
let _ = pending_components.executed_block.insert(executed_block); let _ = pending_components.executed_block.insert(executed_block);
write_lock.put_pending_components( write_lock.put_pending_components(
block_root, block_root,
@ -481,7 +554,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
&self.overflow_store, &self.overflow_store,
)?; )?;
Ok(Availability::PendingBlobs(missing_blob_ids)) Ok(Availability::MissingComponents(block_root))
} }
} }
@ -1080,7 +1153,7 @@ mod test {
); );
} else { } else {
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"should be pending blobs" "should be pending blobs"
); );
assert_eq!( assert_eq!(
@ -1100,16 +1173,18 @@ mod test {
.as_ref() .as_ref()
.cloned() .cloned()
.expect("kzg should exist"); .expect("kzg should exist");
let mut kzg_verified_blobs = Vec::new();
for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() {
let kzg_verified_blob = let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); .expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = cache let availability = cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
if blob_index == blobs_expected - 1 { if blob_index == blobs_expected - 1 {
assert!(matches!(availability, Availability::Available(_))); assert!(matches!(availability, Availability::Available(_)));
} else { } else {
assert!(matches!(availability, Availability::PendingBlobs(_))); assert!(matches!(availability, Availability::MissingComponents(_)));
assert_eq!(cache.critical.read().in_memory.len(), 1); assert_eq!(cache.critical.read().in_memory.len(), 1);
} }
} }
@ -1126,15 +1201,17 @@ mod test {
"should have expected number of blobs" "should have expected number of blobs"
); );
let root = pending_block.import_data.block_root; let root = pending_block.import_data.block_root;
let mut kzg_verified_blobs = vec![];
for gossip_blob in blobs { for gossip_blob in blobs {
let kzg_verified_blob = let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); .expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = cache let availability = cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
assert_eq!( assert_eq!(
availability, availability,
Availability::PendingBlock(root), Availability::MissingComponents(root),
"should be pending block" "should be pending block"
); );
assert_eq!(cache.critical.read().in_memory.len(), 1); assert_eq!(cache.critical.read().in_memory.len(), 1);
@ -1270,11 +1347,13 @@ mod test {
let blobs_0 = pending_blobs.pop_front().expect("should have blobs"); let blobs_0 = pending_blobs.pop_front().expect("should have blobs");
let expected_blobs = blobs_0.len(); let expected_blobs = blobs_0.len();
let mut kzg_verified_blobs = vec![];
for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() { for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() {
let kzg_verified_blob = let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); .expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = cache let availability = cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(roots[0], kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
if blob_index == expected_blobs - 1 { if blob_index == expected_blobs - 1 {
assert!(matches!(availability, Availability::Available(_))); assert!(matches!(availability, Availability::Available(_)));
@ -1284,7 +1363,7 @@ mod test {
cache.critical.read().in_memory.peek(&roots[0]).is_some(), cache.critical.read().in_memory.peek(&roots[0]).is_some(),
"first block should be in memory" "first block should be in memory"
); );
assert!(matches!(availability, Availability::PendingBlobs(_))); assert!(matches!(availability, Availability::MissingComponents(_)));
} }
} }
assert_eq!( assert_eq!(
@ -1360,13 +1439,17 @@ mod test {
for _ in 0..(n_epochs * capacity) { for _ in 0..(n_epochs * capacity) {
let pending_block = pending_blocks.pop_front().expect("should have block"); let pending_block = pending_blocks.pop_front().expect("should have block");
let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs");
let block_root = pending_block.block.as_block().canonical_root();
let expected_blobs = pending_block.num_blobs_expected(); let expected_blobs = pending_block.num_blobs_expected();
if expected_blobs > 1 { if expected_blobs > 1 {
// might as well add a blob too // might as well add a blob too
let mut pending_blobs = pending_blobs.pop_front().expect("should have blobs"); let one_blob = pending_block_blobs
let one_blob = pending_blobs.pop().expect("should have at least one blob"); .pop()
let kzg_verified_blob = .expect("should have at least one blob");
verify_kzg_for_blob(one_blob, kzg.as_ref()).expect("kzg should verify"); let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
let kzg_verified_blobs = vec![kzg_verified_blob];
// generate random boolean // generate random boolean
let block_first = (rand::random::<usize>() % 2) == 0; let block_first = (rand::random::<usize>() % 2) == 0;
if block_first { if block_first {
@ -1374,43 +1457,41 @@ mod test {
.put_pending_executed_block(pending_block) .put_pending_executed_block(pending_block)
.expect("should put block"); .expect("should put block");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"should have pending blobs" "should have pending blobs"
); );
let availability = cache let availability = cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"availabilty should be pending blobs: {:?}", "availabilty should be pending blobs: {:?}",
availability availability
); );
} else { } else {
let availability = cache let availability = cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
let root = pending_block.block.as_block().canonical_root(); let root = pending_block.block.as_block().canonical_root();
assert_eq!( assert_eq!(
availability, availability,
Availability::PendingBlock(root), Availability::MissingComponents(root),
"should be pending block" "should be pending block"
); );
let availability = cache let availability = cache
.put_pending_executed_block(pending_block) .put_pending_executed_block(pending_block)
.expect("should put block"); .expect("should put block");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"should have pending blobs" "should have pending blobs"
); );
} }
} else { } else {
// still need to pop front so the blob count is correct
pending_blobs.pop_front().expect("should have blobs");
let availability = cache let availability = cache
.put_pending_executed_block(pending_block) .put_pending_executed_block(pending_block)
.expect("should put block"); .expect("should put block");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"should be pending blobs" "should be pending blobs"
); );
} }
@ -1511,63 +1592,63 @@ mod test {
let mut remaining_blobs = HashMap::new(); let mut remaining_blobs = HashMap::new();
for _ in 0..(n_epochs * capacity) { for _ in 0..(n_epochs * capacity) {
let pending_block = pending_blocks.pop_front().expect("should have block"); let pending_block = pending_blocks.pop_front().expect("should have block");
let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs");
let block_root = pending_block.block.as_block().canonical_root(); let block_root = pending_block.block.as_block().canonical_root();
let expected_blobs = pending_block.num_blobs_expected(); let expected_blobs = pending_block.num_blobs_expected();
if expected_blobs > 1 { if expected_blobs > 1 {
// might as well add a blob too // might as well add a blob too
let mut pending_blobs = pending_blobs.pop_front().expect("should have blobs"); let one_blob = pending_block_blobs
let one_blob = pending_blobs.pop().expect("should have at least one blob"); .pop()
let kzg_verified_blob = .expect("should have at least one blob");
verify_kzg_for_blob(one_blob, kzg.as_ref()).expect("kzg should verify"); let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref())
.expect("kzg should verify");
let kzg_verified_blobs = vec![kzg_verified_blob];
// generate random boolean // generate random boolean
let block_first = (rand::random::<usize>() % 2) == 0; let block_first = (rand::random::<usize>() % 2) == 0;
remaining_blobs.insert(block_root, pending_blobs);
if block_first { if block_first {
let availability = cache let availability = cache
.put_pending_executed_block(pending_block) .put_pending_executed_block(pending_block)
.expect("should put block"); .expect("should put block");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"should have pending blobs" "should have pending blobs"
); );
let availability = cache let availability = cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"availabilty should be pending blobs: {:?}", "availabilty should be pending blobs: {:?}",
availability availability
); );
} else { } else {
let availability = cache let availability = cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
let root = pending_block.block.as_block().canonical_root(); let root = pending_block.block.as_block().canonical_root();
assert_eq!( assert_eq!(
availability, availability,
Availability::PendingBlock(root), Availability::MissingComponents(root),
"should be pending block" "should be pending block"
); );
let availability = cache let availability = cache
.put_pending_executed_block(pending_block) .put_pending_executed_block(pending_block)
.expect("should put block"); .expect("should put block");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"should have pending blobs" "should have pending blobs"
); );
} }
} else { } else {
// still need to pop front so the blob count is correct
let pending_blobs = pending_blobs.pop_front().expect("should have blobs");
remaining_blobs.insert(block_root, pending_blobs);
let availability = cache let availability = cache
.put_pending_executed_block(pending_block) .put_pending_executed_block(pending_block)
.expect("should put block"); .expect("should put block");
assert!( assert!(
matches!(availability, Availability::PendingBlobs(_)), matches!(availability, Availability::MissingComponents(_)),
"should be pending blobs" "should be pending blobs"
); );
} }
remaining_blobs.insert(block_root, pending_block_blobs);
} }
// now we should have a full cache spanning multiple epochs // now we should have a full cache spanning multiple epochs
@ -1626,18 +1707,20 @@ mod test {
); );
// now lets insert the remaining blobs until the cache is empty // now lets insert the remaining blobs until the cache is empty
for (_, blobs) in remaining_blobs { for (root, blobs) in remaining_blobs {
let additional_blobs = blobs.len(); let additional_blobs = blobs.len();
let mut kzg_verified_blobs = vec![];
for (i, gossip_blob) in blobs.into_iter().enumerate() { for (i, gossip_blob) in blobs.into_iter().enumerate() {
let kzg_verified_blob = let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify"); .expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = recovered_cache let availability = recovered_cache
.put_kzg_verified_blob(kzg_verified_blob) .put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
.expect("should put blob"); .expect("should put blob");
if i == additional_blobs - 1 { if i == additional_blobs - 1 {
assert!(matches!(availability, Availability::Available(_))) assert!(matches!(availability, Availability::Available(_)))
} else { } else {
assert!(matches!(availability, Availability::PendingBlobs(_))); assert!(matches!(availability, Availability::MissingComponents(_)));
} }
} }
} }

View File

@ -69,7 +69,9 @@ pub use self::historical_blocks::HistoricalBlockError;
pub use attestation_verification::Error as AttestationError; pub use attestation_verification::Error as AttestationError;
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError}; pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
pub use block_verification::{ pub use block_verification::{
get_block_root, BlockError, ExecutedBlock, ExecutionPayloadError, GossipVerifiedBlock, get_block_root, AvailabilityPendingExecutedBlock, BlockError, ExecutedBlock,
ExecutionPayloadError, GossipVerifiedBlock, IntoExecutionPendingBlock,
PayloadVerificationOutcome, PayloadVerificationStatus,
}; };
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};

View File

@ -379,7 +379,7 @@ mod tests {
// Try adding an out of bounds index // Try adding an out of bounds index
let invalid_index = E::max_blobs_per_block() as u64; let invalid_index = E::max_blobs_per_block() as u64;
let sidecar_d = get_blob_sidecar(0, block_root_a, 4); let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index);
assert_eq!( assert_eq!(
cache.observe_sidecar(&sidecar_d), cache.observe_sidecar(&sidecar_d),
Err(Error::InvalidBlobIndex(invalid_index)), Err(Error::InvalidBlobIndex(invalid_index)),

View File

@ -63,7 +63,7 @@ use types::{typenum::U4294967296, *};
// 4th September 2019 // 4th September 2019
pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
// Environment variable to read if `fork_from_env` feature is enabled. // Environment variable to read if `fork_from_env` feature is enabled.
const FORK_NAME_ENV_VAR: &str = "FORK_NAME"; pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
// Default target aggregators to set during testing, this ensures an aggregator at each slot. // Default target aggregators to set during testing, this ensures an aggregator at each slot.
// //

View File

@ -133,10 +133,13 @@ async fn produces_attestations() {
assert_eq!(data.target.root, target_root, "bad target root"); assert_eq!(data.target.root, target_root, "bad target root");
let block_wrapper: BlockWrapper<MainnetEthSpec> = Arc::new(block.clone()).into(); let block_wrapper: BlockWrapper<MainnetEthSpec> = Arc::new(block.clone()).into();
let available_block = chain let beacon_chain::blob_verification::MaybeAvailableBlock::Available(available_block) = chain
.data_availability_checker .data_availability_checker
.try_check_availability(block_wrapper) .check_availability(block_wrapper)
.unwrap(); .unwrap()
else {
panic!("block should be available")
};
let early_attestation = { let early_attestation = {
let proto_block = chain let proto_block = chain
@ -200,11 +203,13 @@ async fn early_attester_cache_old_request() {
.unwrap(); .unwrap();
let block_wrapper: BlockWrapper<MainnetEthSpec> = head.beacon_block.clone().into(); let block_wrapper: BlockWrapper<MainnetEthSpec> = head.beacon_block.clone().into();
let available_block = harness let beacon_chain::blob_verification::MaybeAvailableBlock::Available(available_block) = harness.chain
.chain
.data_availability_checker .data_availability_checker
.try_check_availability(block_wrapper) .check_availability(block_wrapper)
.unwrap(); .unwrap()
else {
panic!("block should be available")
};
harness harness
.chain .chain

View File

@ -6,11 +6,11 @@ edition = "2021"
[dev-dependencies] [dev-dependencies]
serde_yaml = "0.8.13" serde_yaml = "0.8.13"
state_processing = { path = "../../consensus/state_processing" }
operation_pool = { path = "../operation_pool" } operation_pool = { path = "../operation_pool" }
tokio = "1.14.0" tokio = "1.14.0"
[dependencies] [dependencies]
state_processing = { path = "../../consensus/state_processing" }
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
store = { path = "../store" } store = { path = "../store" }
network = { path = "../network" } network = { path = "../network" }

View File

@ -28,6 +28,7 @@ use network::{NetworkConfig, NetworkSenders, NetworkService};
use slasher::Slasher; use slasher::Slasher;
use slasher_service::SlasherService; use slasher_service::SlasherService;
use slog::{debug, info, warn, Logger}; use slog::{debug, info, warn, Logger};
use state_processing::per_slot_processing;
use std::net::TcpListener; use std::net::TcpListener;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
@ -346,10 +347,23 @@ where
None None
}; };
debug!(context.log(), "Downloading finalized block"); debug!(
// Find a suitable finalized block on an epoch boundary. context.log(),
let mut block = remote "Downloading finalized state";
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec) );
let mut state = remote
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Finalized, &spec)
.await
.map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))?
.ok_or_else(|| "Checkpoint state missing from remote".to_string())?;
debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot());
let finalized_block_slot = state.latest_block_header().slot;
debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot);
let block = remote
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(finalized_block_slot), &spec)
.await .await
.map_err(|e| match e { .map_err(|e| match e {
ApiError::InvalidSsz(e) => format!( ApiError::InvalidSsz(e) => format!(
@ -363,55 +377,15 @@ where
debug!(context.log(), "Downloaded finalized block"); debug!(context.log(), "Downloaded finalized block");
let mut block_slot = block.slot(); let epoch_boundary_slot = state.slot() % slots_per_epoch;
if epoch_boundary_slot != 0 {
while block.slot() % slots_per_epoch != 0 { debug!(context.log(), "Advancing state to epoch boundary"; "state_slot" => state.slot(), "epoch_boundary_slot" => epoch_boundary_slot);
block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch;
debug!(
context.log(),
"Searching for aligned checkpoint block";
"block_slot" => block_slot
);
if let Some(found_block) = remote
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(block_slot), &spec)
.await
.map_err(|e| {
format!("Error fetching block at slot {}: {:?}", block_slot, e)
})?
{
block = found_block;
}
} }
debug!( while state.slot() % slots_per_epoch != 0 {
context.log(), per_slot_processing(&mut state, None, &spec)
"Downloaded aligned finalized block"; .map_err(|e| format!("Error advancing state: {:?}", e))?;
"block_root" => ?block.canonical_root(), }
"block_slot" => block.slot(),
);
let state_root = block.state_root();
debug!(
context.log(),
"Downloading finalized state";
"state_root" => ?state_root
);
let state = remote
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Root(state_root), &spec)
.await
.map_err(|e| {
format!(
"Error loading checkpoint state from remote {:?}: {:?}",
state_root, e
)
})?
.ok_or_else(|| {
format!("Checkpoint state missing from remote: {:?}", state_root)
})?;
debug!(context.log(), "Downloaded finalized state");
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec) let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?; .map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
@ -419,9 +393,9 @@ where
info!( info!(
context.log(), context.log(),
"Loaded checkpoint block and state"; "Loaded checkpoint block and state";
"slot" => block.slot(), "block_slot" => block.slot(),
"state_slot" => state.slot(),
"block_root" => ?block.canonical_root(), "block_root" => ?block.canonical_root(),
"state_root" => ?state_root,
); );
let service = let service =

View File

@ -586,7 +586,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
ForkName::Deneb => { ForkName::Deneb => {
// get random number between 0 and Max Blobs // get random number between 0 and Max Blobs
let num_blobs = rand::random::<usize>() % T::max_blobs_per_block(); let num_blobs = rand::random::<usize>() % T::max_blobs_per_block();
let (bundle, transactions) = self.generate_random_blobs(num_blobs)?; let kzg = self.kzg.as_ref().ok_or("kzg not initialized")?;
let (bundle, transactions) = generate_random_blobs(num_blobs, kzg)?;
for tx in Vec::from(transactions) { for tx in Vec::from(transactions) {
execution_payload execution_payload
.transactions_mut() .transactions_mut()
@ -626,10 +627,11 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
payload_id: id.map(Into::into), payload_id: id.map(Into::into),
}) })
} }
}
fn generate_random_blobs( pub fn generate_random_blobs<T: EthSpec>(
&self,
n_blobs: usize, n_blobs: usize,
kzg: &Kzg,
) -> Result<(BlobsBundleV1<T>, Transactions<T>), String> { ) -> Result<(BlobsBundleV1<T>, Transactions<T>), String> {
let mut bundle = BlobsBundleV1::<T>::default(); let mut bundle = BlobsBundleV1::<T>::default();
let mut transactions = vec![]; let mut transactions = vec![];
@ -646,17 +648,11 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
let blob = Blob::<T>::new(Vec::from(blob_bytes)) let blob = Blob::<T>::new(Vec::from(blob_bytes))
.map_err(|e| format!("error constructing random blob: {:?}", e))?; .map_err(|e| format!("error constructing random blob: {:?}", e))?;
let commitment = self let commitment = kzg
.kzg
.as_ref()
.ok_or("kzg not initialized")?
.blob_to_kzg_commitment(blob_bytes.into()) .blob_to_kzg_commitment(blob_bytes.into())
.map_err(|e| format!("error computing kzg commitment: {:?}", e))?; .map_err(|e| format!("error computing kzg commitment: {:?}", e))?;
let proof = self let proof = kzg
.kzg
.as_ref()
.ok_or("kzg not initialized")?
.compute_blob_kzg_proof(blob_bytes.into(), commitment) .compute_blob_kzg_proof(blob_bytes.into(), commitment)
.map_err(|e| format!("error computing kzg proof: {:?}", e))?; .map_err(|e| format!("error computing kzg proof: {:?}", e))?;
@ -708,7 +704,6 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
Ok((bundle, transactions.into())) Ok((bundle, transactions.into()))
} }
}
fn payload_id_from_u64(n: u64) -> PayloadId { fn payload_id_from_u64(n: u64) -> PayloadId {
n.to_le_bytes() n.to_le_bytes()

View File

@ -24,7 +24,9 @@ use types::{EthSpec, ExecutionBlockHash, Uint256};
use warp::{http::StatusCode, Filter, Rejection}; use warp::{http::StatusCode, Filter, Rejection};
use crate::EngineCapabilities; use crate::EngineCapabilities;
pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator}; pub use execution_block_generator::{
generate_pow_block, generate_random_blobs, Block, ExecutionBlockGenerator,
};
pub use hook::Hook; pub use hook::Hook;
pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder}; pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder};
pub use mock_execution_layer::MockExecutionLayer; pub use mock_execution_layer::MockExecutionLayer;

View File

@ -12,6 +12,7 @@ use slog::{debug, error, info, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::FixedVector;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
@ -77,8 +78,11 @@ pub async fn publish_block<T: BeaconChainTypes>(
PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))), PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))),
)?; )?;
} }
let blobs = signed_blobs.into_iter().map(|blob| blob.message).collect(); let blobs = signed_blobs
BlockWrapper::BlockAndBlobs(block, blobs) .into_iter()
.map(|blob| Some(blob.message))
.collect::<Vec<_>>();
BlockWrapper::BlockAndBlobs(block, FixedVector::from(blobs))
} else { } else {
block.into() block.into()
} }
@ -136,17 +140,8 @@ pub async fn publish_block<T: BeaconChainTypes>(
Ok(()) Ok(())
} }
Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) => { Ok(AvailabilityProcessingStatus::MissingComponents(_, block_root)) => {
let msg = format!("Missing block with root {:?}", block_root); let msg = format!("Missing parts of block with root {:?}", block_root);
error!(
log,
"Invalid block provided to HTTP API";
"reason" => &msg
);
Err(warp_utils::reject::broadcast_without_import(msg))
}
Ok(AvailabilityProcessingStatus::PendingBlobs(blob_ids)) => {
let msg = format!("Missing blobs {:?}", blob_ids);
error!( error!(
log, log,
"Invalid block provided to HTTP API"; "Invalid block provided to HTTP API";

View File

@ -19,7 +19,7 @@ store = { path = "../store" }
lighthouse_network = { path = "../lighthouse_network" } lighthouse_network = { path = "../lighthouse_network" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
slot_clock = { path = "../../common/slot_clock" } slot_clock = { path = "../../common/slot_clock" }
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace", "nested-values"] }
hex = "0.4.2" hex = "0.4.2"
ethereum_ssz = "0.5.0" ethereum_ssz = "0.5.0"
ssz_types = "0.5.0" ssz_types = "0.5.0"
@ -47,3 +47,7 @@ delay_map = "0.3.0"
ethereum-types = { version = "0.14.1", optional = true } ethereum-types = { version = "0.14.1", optional = true }
operation_pool = { path = "../operation_pool" } operation_pool = { path = "../operation_pool" }
execution_layer = { path = "../execution_layer" } execution_layer = { path = "../execution_layer" }
[features]
spec-minimal = ["beacon_chain/spec-minimal"]
fork_from_env = ["beacon_chain/fork_from_env"]

View File

@ -65,6 +65,7 @@ use std::{cmp, collections::HashSet};
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::sync::mpsc::error::TrySendError; use tokio::sync::mpsc::error::TrySendError;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{ use types::{
Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate, Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate,
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecar, ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecar,
@ -121,9 +122,9 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
/// before we start dropping them. /// before we start dropping them.
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024; const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that /// The maximum number of queued `SignedBlobSidecar` objects received on gossip that
/// will be stored before we start dropping them. /// will be stored before we start dropping them.
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024; const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024;
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but /// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
/// within acceptable clock disparity) that will be queued before we start dropping them. /// within acceptable clock disparity) that will be queued before we start dropping them.
@ -164,6 +165,7 @@ const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024;
/// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that /// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that
/// will be stored before we start dropping them. /// will be stored before we start dropping them.
const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024; const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024;
const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024 * 4;
/// The maximum number of queued `Vec<SignedBeaconBlock>` objects received during syncing that will /// The maximum number of queued `Vec<SignedBeaconBlock>` objects received during syncing that will
/// be stored before we start dropping them. /// be stored before we start dropping them.
@ -233,6 +235,7 @@ pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution";
pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update"; pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update";
pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update"; pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
pub const RPC_BLOCK: &str = "rpc_block"; pub const RPC_BLOCK: &str = "rpc_block";
pub const RPC_BLOB: &str = "rpc_blob";
pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT: &str = "chain_segment";
pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill";
pub const STATUS_PROCESSING: &str = "status_processing"; pub const STATUS_PROCESSING: &str = "status_processing";
@ -628,6 +631,23 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
} }
} }
pub fn rpc_blobs(
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
seen_timestamp: Duration,
process_type: BlockProcessType,
) -> Self {
Self {
drop_during_sync: false,
work: Work::RpcBlobs {
block_root,
blobs,
seen_timestamp,
process_type,
},
}
}
/// Create a new work event to import `blocks` as a beacon chain segment. /// Create a new work event to import `blocks` as a beacon chain segment.
pub fn chain_segment( pub fn chain_segment(
process_id: ChainSegmentProcessId, process_id: ChainSegmentProcessId,
@ -927,6 +947,12 @@ pub enum Work<T: BeaconChainTypes> {
process_type: BlockProcessType, process_type: BlockProcessType,
should_process: bool, should_process: bool,
}, },
RpcBlobs {
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
seen_timestamp: Duration,
process_type: BlockProcessType,
},
ChainSegment { ChainSegment {
process_id: ChainSegmentProcessId, process_id: ChainSegmentProcessId,
blocks: Vec<BlockWrapper<T::EthSpec>>, blocks: Vec<BlockWrapper<T::EthSpec>>,
@ -986,6 +1012,7 @@ impl<T: BeaconChainTypes> Work<T> {
Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE, Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE,
Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE,
Work::RpcBlock { .. } => RPC_BLOCK, Work::RpcBlock { .. } => RPC_BLOCK,
Work::RpcBlobs { .. } => RPC_BLOB,
Work::ChainSegment { Work::ChainSegment {
process_id: ChainSegmentProcessId::BackSyncBatchId { .. }, process_id: ChainSegmentProcessId::BackSyncBatchId { .. },
.. ..
@ -1148,11 +1175,11 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
// Using a FIFO queue since blocks need to be imported sequentially. // Using a FIFO queue since blocks need to be imported sequentially.
let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN); let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN);
let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN);
let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN); let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN); let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN);
let mut gossip_block_and_blobs_sidecar_queue = let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN);
FifoQueue::new(MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN);
let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN); let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN);
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN); let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
@ -1302,6 +1329,8 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
// evolves. // evolves.
} else if let Some(item) = rpc_block_queue.pop() { } else if let Some(item) = rpc_block_queue.pop() {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
} else if let Some(item) = rpc_blob_queue.pop() {
self.spawn_worker(item, toolbox);
// Check delayed blocks before gossip blocks, the gossip blocks might rely // Check delayed blocks before gossip blocks, the gossip blocks might rely
// on the delayed ones. // on the delayed ones.
} else if let Some(item) = delayed_block_queue.pop() { } else if let Some(item) = delayed_block_queue.pop() {
@ -1310,7 +1339,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
// required to verify some attestations. // required to verify some attestations.
} else if let Some(item) = gossip_block_queue.pop() { } else if let Some(item) = gossip_block_queue.pop() {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() { } else if let Some(item) = gossip_blob_queue.pop() {
self.spawn_worker(item, toolbox); self.spawn_worker(item, toolbox);
// Check the aggregates, *then* the unaggregates since we assume that // Check the aggregates, *then* the unaggregates since we assume that
// aggregates are more valuable to local validators and effectively give us // aggregates are more valuable to local validators and effectively give us
@ -1526,7 +1555,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
gossip_block_queue.push(work, work_id, &self.log) gossip_block_queue.push(work, work_id, &self.log)
} }
Work::GossipSignedBlobSidecar { .. } => { Work::GossipSignedBlobSidecar { .. } => {
gossip_block_and_blobs_sidecar_queue.push(work, work_id, &self.log) gossip_blob_queue.push(work, work_id, &self.log)
} }
Work::DelayedImportBlock { .. } => { Work::DelayedImportBlock { .. } => {
delayed_block_queue.push(work, work_id, &self.log) delayed_block_queue.push(work, work_id, &self.log)
@ -1551,6 +1580,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
optimistic_update_queue.push(work, work_id, &self.log) optimistic_update_queue.push(work, work_id, &self.log)
} }
Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log), Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log),
Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log),
Work::ChainSegment { ref process_id, .. } => match process_id { Work::ChainSegment { ref process_id, .. } => match process_id {
ChainSegmentProcessId::RangeBatchId { .. } ChainSegmentProcessId::RangeBatchId { .. }
| ChainSegmentProcessId::ParentLookup { .. } => { | ChainSegmentProcessId::ParentLookup { .. } => {
@ -1620,6 +1650,10 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
&metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL, &metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL,
rpc_block_queue.len() as i64, rpc_block_queue.len() as i64,
); );
metrics::set_gauge(
&metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL,
rpc_blob_queue.len() as i64,
);
metrics::set_gauge( metrics::set_gauge(
&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL, &metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL,
chain_segment_queue.len() as i64, chain_segment_queue.len() as i64,
@ -1977,6 +2011,17 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
duplicate_cache, duplicate_cache,
should_process, should_process,
)), )),
Work::RpcBlobs {
block_root,
blobs,
seen_timestamp,
process_type,
} => task_spawner.spawn_async(worker.process_rpc_blobs(
block_root,
blobs,
seen_timestamp,
process_type,
)),
/* /*
* Verification for a chain segment (multiple blocks). * Verification for a chain segment (multiple blocks).
*/ */

View File

@ -14,8 +14,7 @@ use super::MAX_SCHEDULED_WORK_QUEUE_LEN;
use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent}; use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent};
use crate::metrics; use crate::metrics;
use crate::sync::manager::BlockProcessType; use crate::sync::manager::BlockProcessType;
use beacon_chain::blob_verification::AsBlock; use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use fnv::FnvHashMap; use fnv::FnvHashMap;
use futures::task::Poll; use futures::task::Poll;

View File

@ -682,19 +682,15 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
Err(err) => { Err(err) => {
match err { match err {
BlobError::BlobParentUnknown { BlobError::BlobParentUnknown(blob) => {
blob_root,
blob_parent_root,
} => {
debug!( debug!(
self.log, self.log,
"Unknown parent hash for blob"; "Unknown parent hash for blob";
"action" => "requesting parent", "action" => "requesting parent",
"blob_root" => %blob_root, "blob_root" => %blob.block_root,
"parent_root" => %blob_parent_root "parent_root" => %blob.block_parent_root
); );
// TODO: send blob to reprocessing queue and queue a sync request for the blob. self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob));
todo!();
} }
BlobError::ProposerSignatureInvalid BlobError::ProposerSignatureInvalid
| BlobError::UnknownValidator(_) | BlobError::UnknownValidator(_)
@ -757,28 +753,42 @@ impl<T: BeaconChainTypes> Worker<T> {
// This value is not used presently, but it might come in handy for debugging. // This value is not used presently, but it might come in handy for debugging.
_seen_duration: Duration, _seen_duration: Duration,
) { ) {
// TODO let blob_root = verified_blob.block_root();
let blob_slot = verified_blob.slot();
let blob_clone = verified_blob.clone().to_blob();
match self match self
.chain .chain
.process_blob(verified_blob, CountUnrealized::True) .process_blob(verified_blob, CountUnrealized::True)
.await .await
{ {
Ok(AvailabilityProcessingStatus::Imported(_hash)) => { Ok(AvailabilityProcessingStatus::Imported(_hash)) => {
todo!() //TODO(sean) add metrics and logging
// add to metrics self.chain.recompute_head_at_current_slot().await;
// logging
} }
Ok(AvailabilityProcessingStatus::PendingBlobs(pending_blobs)) => self Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_hash)) => {
.send_sync_message(SyncMessage::UnknownBlobHash { self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
slot, peer_id, block_hash,
));
}
Err(err) => {
debug!(
self.log,
"Invalid gossip blob";
"outcome" => ?err,
"block root" => ?blob_root,
"block slot" => blob_slot,
"blob index" => blob_clone.index,
);
self.gossip_penalize_peer(
peer_id, peer_id,
pending_blobs, PeerAction::MidToleranceError,
}), "bad_gossip_blob_ssz",
Ok(AvailabilityProcessingStatus::PendingBlock(block_hash)) => { );
self.send_sync_message(SyncMessage::UnknownBlockHash(peer_id, block_hash)); trace!(
} self.log,
Err(_err) => { "Invalid gossip blob ssz";
// handle errors "ssz" => format_args!("0x{}", hex::encode(blob_clone.as_ssz_bytes())),
todo!() );
} }
} }
} }
@ -918,16 +928,13 @@ impl<T: BeaconChainTypes> Worker<T> {
verified_block verified_block
} }
Err(BlockError::AvailabilityCheck(_err)) => {
todo!()
}
Err(BlockError::ParentUnknown(block)) => { Err(BlockError::ParentUnknown(block)) => {
debug!( debug!(
self.log, self.log,
"Unknown parent for gossip block"; "Unknown parent for gossip block";
"root" => ?block_root "root" => ?block_root
); );
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root)); self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root));
return None; return None;
} }
Err(e @ BlockError::BeaconChainError(_)) => { Err(e @ BlockError::BeaconChainError(_)) => {
@ -987,8 +994,8 @@ impl<T: BeaconChainTypes> Worker<T> {
); );
return None; return None;
} }
Err(e @ BlockError::BlobValidation(_)) => { Err(e @ BlockError::BlobValidation(_)) | Err(e @ BlockError::AvailabilityCheck(_)) => {
warn!(self.log, "Could not verify blob for gossip. Rejecting the block and blob"; warn!(self.log, "Could not verify block against known blobs in gossip. Rejecting the block";
"error" => %e); "error" => %e);
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
self.gossip_penalize_peer( self.gossip_penalize_peer(
@ -1132,23 +1139,13 @@ impl<T: BeaconChainTypes> Worker<T> {
self.chain.recompute_head_at_current_slot().await; self.chain.recompute_head_at_current_slot().await;
} }
Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) => { Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => {
// This error variant doesn't make any sense in this context
crit!(
self.log,
"Internal error. Cannot get AvailabilityProcessingStatus::PendingBlock on processing block";
"block_root" => %block_root
);
}
Ok(AvailabilityProcessingStatus::PendingBlobs(pending_blobs)) => {
// make rpc request for blob // make rpc request for blob
self.send_sync_message(SyncMessage::UnknownBlobHash { self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
*slot,
peer_id, peer_id,
pending_blobs: pending_blobs.to_vec(), *block_root,
}); ));
}
Err(BlockError::AvailabilityCheck(_)) => {
todo!()
} }
Err(BlockError::ParentUnknown(block)) => { Err(BlockError::ParentUnknown(block)) => {
// Inform the sync manager to find parents for this block // Inform the sync manager to find parents for this block
@ -1158,7 +1155,7 @@ impl<T: BeaconChainTypes> Worker<T> {
"Block with unknown parent attempted to be processed"; "Block with unknown parent attempted to be processed";
"peer_id" => %peer_id "peer_id" => %peer_id
); );
self.send_sync_message(SyncMessage::UnknownBlock( self.send_sync_message(SyncMessage::UnknownParentBlock(
peer_id, peer_id,
block.clone(), block.clone(),
block_root, block_root,
@ -1997,7 +1994,10 @@ impl<T: BeaconChainTypes> Worker<T> {
// We don't know the block, get the sync manager to handle the block lookup, and // We don't know the block, get the sync manager to handle the block lookup, and
// send the attestation to be scheduled for re-processing. // send the attestation to be scheduled for re-processing.
self.sync_tx self.sync_tx
.send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root)) .send(SyncMessage::UnknownBlockHashFromAttestation(
peer_id,
*beacon_block_root,
))
.unwrap_or_else(|_| { .unwrap_or_else(|_| {
warn!( warn!(
self.log, self.log,

View File

@ -5,7 +5,7 @@ use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock;
use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE; use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE;
use crate::beacon_processor::DuplicateCache; use crate::beacon_processor::DuplicateCache;
use crate::metrics; use crate::metrics;
use crate::sync::manager::{BlockProcessType, SyncMessage}; use crate::sync::manager::{BlockProcessType, ResponseType, SyncMessage};
use crate::sync::{BatchProcessResult, ChainId}; use crate::sync::{BatchProcessResult, ChainId};
use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock}; use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock};
@ -21,6 +21,7 @@ use slog::{debug, error, info, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{Epoch, Hash256}; use types::{Epoch, Hash256};
/// Id associated to a batch processing request, either a sync batch or a parent lookup. /// Id associated to a batch processing request, either a sync batch or a parent lookup.
@ -57,9 +58,10 @@ impl<T: BeaconChainTypes> Worker<T> {
) { ) {
if !should_process { if !should_process {
// Sync handles these results // Sync handles these results
self.send_sync_message(SyncMessage::BlockProcessed { self.send_sync_message(SyncMessage::BlockComponentProcessed {
process_type, process_type,
result: crate::sync::manager::BlockProcessResult::Ignored, result: crate::sync::manager::BlockProcessingResult::Ignored,
response_type: crate::sync::manager::ResponseType::Block,
}); });
return; return;
} }
@ -180,7 +182,8 @@ impl<T: BeaconChainTypes> Worker<T> {
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL); metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
// RPC block imported, regardless of process type // RPC block imported, regardless of process type
//TODO(sean) handle pending availability variants //TODO(sean) do we need to do anything here for missing blobs? or is passing the result
// along to sync enough?
if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result { if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result {
info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash); info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash);
@ -205,15 +208,50 @@ impl<T: BeaconChainTypes> Worker<T> {
} }
} }
// Sync handles these results // Sync handles these results
self.send_sync_message(SyncMessage::BlockProcessed { self.send_sync_message(SyncMessage::BlockComponentProcessed {
process_type, process_type,
result: result.into(), result: result.into(),
response_type: ResponseType::Block,
}); });
// Drop the handle to remove the entry from the cache // Drop the handle to remove the entry from the cache
drop(handle); drop(handle);
} }
pub async fn process_rpc_blobs(
self,
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
_seen_timestamp: Duration,
process_type: BlockProcessType,
) {
let Some(slot) = blobs.iter().find_map(|blob|{
blob.as_ref().map(|blob| blob.slot)
}) else {
return;
};
let result = self
.chain
.check_availability_and_maybe_import(
slot,
|chain| {
chain
.data_availability_checker
.put_rpc_blobs(block_root, blobs)
},
CountUnrealized::True,
)
.await;
// Sync handles these results
self.send_sync_message(SyncMessage::BlockComponentProcessed {
process_type,
result: result.into(),
response_type: ResponseType::Blob,
});
}
/// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync
/// thread if more blocks are needed to process it. /// thread if more blocks are needed to process it.
pub async fn process_chain_segment( pub async fn process_chain_segment(

View File

@ -453,8 +453,8 @@ impl<T: BeaconChainTypes> Router<T> {
} }
id @ (SyncId::BackFillBlocks { .. } id @ (SyncId::BackFillBlocks { .. }
| SyncId::RangeBlocks { .. } | SyncId::RangeBlocks { .. }
| SyncId::BackFillBlobs { .. } | SyncId::BackFillBlockAndBlobs { .. }
| SyncId::RangeBlobs { .. }) => id, | SyncId::RangeBlockAndBlobs { .. }) => id,
}, },
RequestId::Router => unreachable!("All BBRange requests belong to sync"), RequestId::Router => unreachable!("All BBRange requests belong to sync"),
}; };
@ -512,8 +512,8 @@ impl<T: BeaconChainTypes> Router<T> {
id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id,
SyncId::BackFillBlocks { .. } SyncId::BackFillBlocks { .. }
| SyncId::RangeBlocks { .. } | SyncId::RangeBlocks { .. }
| SyncId::RangeBlobs { .. } | SyncId::RangeBlockAndBlobs { .. }
| SyncId::BackFillBlobs { .. } => { | SyncId::BackFillBlockAndBlobs { .. } => {
unreachable!("Batch syncing do not request BBRoot requests") unreachable!("Batch syncing do not request BBRoot requests")
} }
}, },
@ -545,8 +545,8 @@ impl<T: BeaconChainTypes> Router<T> {
id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id, id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id,
SyncId::BackFillBlocks { .. } SyncId::BackFillBlocks { .. }
| SyncId::RangeBlocks { .. } | SyncId::RangeBlocks { .. }
| SyncId::RangeBlobs { .. } | SyncId::RangeBlockAndBlobs { .. }
| SyncId::BackFillBlobs { .. } => { | SyncId::BackFillBlockAndBlobs { .. } => {
unreachable!("Batch syncing does not request BBRoot requests") unreachable!("Batch syncing does not request BBRoot requests")
} }
}, },

View File

@ -151,7 +151,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
} }
/// Return count of all currently subscribed subnets (long-lived **and** short-lived). /// Return count of all currently subscribed subnets (long-lived **and** short-lived).
#[cfg(test)] #[cfg(all(test, feature = "spec-mainnet"))]
pub fn subscription_count(&self) -> usize { pub fn subscription_count(&self) -> usize {
if self.subscribe_all_subnets { if self.subscribe_all_subnets {
self.beacon_chain.spec.attestation_subnet_count as usize self.beacon_chain.spec.attestation_subnet_count as usize
@ -167,7 +167,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
} }
/// Returns whether we are subscribed to a subnet for testing purposes. /// Returns whether we are subscribed to a subnet for testing purposes.
#[cfg(test)] #[cfg(all(test, feature = "spec-mainnet"))]
pub(crate) fn is_subscribed( pub(crate) fn is_subscribed(
&self, &self,
subnet_id: &SubnetId, subnet_id: &SubnetId,
@ -179,7 +179,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
} }
} }
#[cfg(test)] #[cfg(all(test, feature = "spec-mainnet"))]
pub(crate) fn long_lived_subscriptions(&self) -> &HashSet<SubnetId> { pub(crate) fn long_lived_subscriptions(&self) -> &HashSet<SubnetId> {
&self.long_lived_subscriptions &self.long_lived_subscriptions
} }

View File

@ -91,7 +91,7 @@ impl<T: BeaconChainTypes> SyncCommitteeService<T> {
} }
/// Return count of all currently subscribed subnets. /// Return count of all currently subscribed subnets.
#[cfg(test)] #[cfg(all(test, feature = "spec-mainnet"))]
pub fn subscription_count(&self) -> usize { pub fn subscription_count(&self) -> usize {
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
if self.subscribe_all_subnets { if self.subscribe_all_subnets {

View File

@ -1,3 +1,4 @@
#![cfg(feature = "spec-mainnet")]
use super::*; use super::*;
use beacon_chain::{ use beacon_chain::{
builder::{BeaconChainBuilder, Witness}, builder::{BeaconChainBuilder, Witness},

View File

@ -0,0 +1,84 @@
use crate::sync::SyncMessage;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use slog::{crit, warn};
use slot_clock::SlotClock;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::time::interval_at;
use tokio::time::Instant;
use types::Hash256;
#[derive(Debug)]
pub enum DelayedLookupMessage {
/// A lookup for all components of a block or blob seen over gossip.
MissingComponents(Hash256),
}
/// This service is responsible for collecting lookup messages and sending them back to sync
/// for processing after a short delay.
///
/// We want to delay lookups triggered from gossip for the following reasons:
///
/// - We only want to make one request for components we are unlikely to see on gossip. This means
/// we don't have to repeatedly update our RPC request's state as we receive gossip components.
///
/// - We are likely to receive blocks/blobs over gossip more quickly than we could via an RPC request.
///
/// - Delaying a lookup means we are less likely to simultaneously download the same blocks/blobs
/// over gossip and RPC.
///
/// - We would prefer to request peers based on whether we've seen them attest, because this gives
/// us an idea about whether they *should* have the block/blobs we're missing. This is because a
/// node should not attest to a block unless it has all the blobs for that block. This gives us a
/// stronger basis for peer scoring.
pub fn spawn_delayed_lookup_service<T: BeaconChainTypes>(
executor: &task_executor::TaskExecutor,
beacon_chain: Arc<BeaconChain<T>>,
mut delayed_lookups_recv: mpsc::Receiver<DelayedLookupMessage>,
sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
log: slog::Logger,
) {
executor.spawn(
async move {
let slot_duration = beacon_chain.slot_clock.slot_duration();
let delay = beacon_chain.slot_clock.single_lookup_delay();
let interval_start = match (
beacon_chain.slot_clock.duration_to_next_slot(),
beacon_chain.slot_clock.seconds_from_current_slot_start(),
) {
(Some(duration_to_next_slot), Some(seconds_from_current_slot_start)) => {
let duration_until_start = if seconds_from_current_slot_start > delay {
duration_to_next_slot + delay
} else {
delay - seconds_from_current_slot_start
};
tokio::time::Instant::now() + duration_until_start
}
_ => {
crit!(log,
"Failed to read slot clock, delayed lookup service timing will be inaccurate.\
This may degrade performance"
);
Instant::now()
}
};
let mut interval = interval_at(interval_start, slot_duration);
loop {
interval.tick().await;
while let Ok(msg) = delayed_lookups_recv.try_recv() {
match msg {
DelayedLookupMessage::MissingComponents(block_root) => {
if let Err(e) = sync_send
.send(SyncMessage::MissingGossipBlockComponentsDelayed(block_root))
{
warn!(log, "Failed to send delayed lookup message"; "error" => ?e);
}
}
}
}
}
},
"delayed_lookups",
);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +1,18 @@
use super::RootBlockTuple; use super::single_block_lookup::{LookupRequestError, LookupVerifyError, SingleBlockLookup};
use super::{BlobRequestId, BlockRequestId, DownloadedBlocks, PeerShouldHave, ResponseType};
use crate::sync::block_lookups::single_block_lookup::{State, UnknownParentComponents};
use crate::sync::block_lookups::{RootBlobsTuple, RootBlockTuple};
use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext};
use beacon_chain::blob_verification::AsBlock; use beacon_chain::blob_verification::AsBlock;
use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::data_availability_checker::DataAvailabilityChecker;
use beacon_chain::BeaconChainTypes; use beacon_chain::BeaconChainTypes;
use lighthouse_network::PeerId; use lighthouse_network::PeerId;
use std::sync::Arc;
use store::Hash256; use store::Hash256;
use strum::IntoStaticStr; use strum::IntoStaticStr;
use types::blob_sidecar::FixedBlobSidecarList;
use crate::sync::block_lookups::ForceBlockRequest; use types::{BlobSidecar, SignedBeaconBlock};
use crate::sync::{
manager::{Id, SLOT_IMPORT_TOLERANCE},
network_context::SyncNetworkContext,
};
use super::single_block_lookup::{self, SingleBlockRequest};
/// How many attempts we try to find a parent of a block before we give up trying. /// How many attempts we try to find a parent of a block before we give up trying.
pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5; pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5;
@ -26,19 +26,22 @@ pub(crate) struct ParentLookup<T: BeaconChainTypes> {
/// The root of the block triggering this parent request. /// The root of the block triggering this parent request.
chain_hash: Hash256, chain_hash: Hash256,
/// The blocks that have currently been downloaded. /// The blocks that have currently been downloaded.
downloaded_blocks: Vec<RootBlockTuple<T::EthSpec>>, downloaded_blocks: Vec<DownloadedBlocks<T::EthSpec>>,
/// Request of the last parent. /// Request of the last parent.
current_parent_request: SingleBlockRequest<PARENT_FAIL_TOLERANCE>, pub current_parent_request: SingleBlockLookup<PARENT_FAIL_TOLERANCE, T>,
/// Id of the last parent request.
current_parent_request_id: Option<Id>,
} }
#[derive(Debug, PartialEq, Eq, IntoStaticStr)] #[derive(Debug, PartialEq, Eq, IntoStaticStr)]
pub enum VerifyError { pub enum ParentVerifyError {
RootMismatch, RootMismatch,
NoBlockReturned, NoBlockReturned,
NotEnoughBlobsReturned,
ExtraBlocksReturned, ExtraBlocksReturned,
UnrequestedBlobId,
ExtraBlobsReturned,
InvalidIndex(u64),
PreviousFailure { parent_root: Hash256 }, PreviousFailure { parent_root: Hash256 },
BenignFailure,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
@ -55,62 +58,143 @@ pub enum RequestError {
} }
impl<T: BeaconChainTypes> ParentLookup<T> { impl<T: BeaconChainTypes> ParentLookup<T> {
pub fn new(
block_root: Hash256,
parent_root: Hash256,
peer_id: PeerShouldHave,
da_checker: Arc<DataAvailabilityChecker<T>>,
) -> Self {
let current_parent_request =
SingleBlockLookup::new(parent_root, Some(<_>::default()), &[peer_id], da_checker);
Self {
chain_hash: block_root,
downloaded_blocks: vec![],
current_parent_request,
}
}
pub fn contains_block(&self, block_root: &Hash256) -> bool { pub fn contains_block(&self, block_root: &Hash256) -> bool {
self.downloaded_blocks self.downloaded_blocks
.iter() .iter()
.any(|(root, _d_block)| root == block_root) .any(|(root, _d_block)| root == block_root)
} }
pub fn new(block_root: Hash256, block: BlockWrapper<T::EthSpec>, peer_id: PeerId) -> Self { pub fn is_for_block(&self, block_root: Hash256) -> bool {
let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id); self.current_parent_request.is_for_block(block_root)
Self {
chain_hash: block_root,
downloaded_blocks: vec![(block_root, block)],
current_parent_request,
current_parent_request_id: None,
}
} }
/// Attempts to request the next unknown parent. If the request fails, it should be removed. /// Attempts to request the next unknown parent. If the request fails, it should be removed.
pub fn request_parent( pub fn request_parent_block(
&mut self, &mut self,
cx: &mut SyncNetworkContext<T>, cx: &mut SyncNetworkContext<T>,
force_block_request: ForceBlockRequest,
) -> Result<(), RequestError> { ) -> Result<(), RequestError> {
// check to make sure this request hasn't failed // check to make sure this request hasn't failed
if self.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE {
return Err(RequestError::ChainTooLong); return Err(RequestError::ChainTooLong);
} }
let (peer_id, request) = self.current_parent_request.request_block()?; if let Some((peer_id, request)) = self.current_parent_request.request_block()? {
match cx.parent_lookup_request(peer_id, request, force_block_request) { match cx.parent_lookup_block_request(peer_id, request) {
Ok(request_id) => { Ok(request_id) => {
self.current_parent_request_id = Some(request_id); self.current_parent_request.id.block_request_id = Some(request_id);
Ok(()) return Ok(());
} }
Err(reason) => { Err(reason) => {
self.current_parent_request_id = None; self.current_parent_request.id.block_request_id = None;
Err(RequestError::SendFailed(reason)) return Err(RequestError::SendFailed(reason));
} }
} }
} }
Ok(())
}
pub fn check_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> { pub fn request_parent_blobs(
self.current_parent_request.check_peer_disconnected(peer_id) &mut self,
cx: &mut SyncNetworkContext<T>,
) -> Result<(), RequestError> {
// check to make sure this request hasn't failed
if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE {
return Err(RequestError::ChainTooLong);
} }
pub fn add_block(&mut self, block: BlockWrapper<T::EthSpec>) { if let Some((peer_id, request)) = self.current_parent_request.request_blobs()? {
match cx.parent_lookup_blobs_request(peer_id, request) {
Ok(request_id) => {
self.current_parent_request.id.blob_request_id = Some(request_id);
return Ok(());
}
Err(reason) => {
self.current_parent_request.id.blob_request_id = None;
return Err(RequestError::SendFailed(reason));
}
}
}
Ok(())
}
pub fn check_block_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> {
self.current_parent_request
.block_request_state
.state
.check_peer_disconnected(peer_id)
}
pub fn check_blob_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> {
self.current_parent_request
.blob_request_state
.state
.check_peer_disconnected(peer_id)
}
pub fn add_unknown_parent_block(&mut self, block: BlockWrapper<T::EthSpec>) {
let next_parent = block.parent_root(); let next_parent = block.parent_root();
let current_root = self.current_parent_request.hash;
// Cache the block.
let current_root = self
.current_parent_request
.block_request_state
.requested_block_root;
self.downloaded_blocks.push((current_root, block)); self.downloaded_blocks.push((current_root, block));
self.current_parent_request.hash = next_parent;
self.current_parent_request.state = single_block_lookup::State::AwaitingDownload; // Update the block request.
self.current_parent_request_id = None; self.current_parent_request
.block_request_state
.requested_block_root = next_parent;
self.current_parent_request.block_request_state.state.state = State::AwaitingDownload;
self.current_parent_request.id.block_request_id = None;
// Update the blobs request.
self.current_parent_request.blob_request_state.state.state = State::AwaitingDownload;
self.current_parent_request.id.blob_request_id = None;
// Reset the unknown parent components.
self.current_parent_request.unknown_parent_components =
Some(UnknownParentComponents::default());
} }
pub fn pending_response(&self, req_id: Id) -> bool { pub fn add_current_request_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
self.current_parent_request_id == Some(req_id) // Cache the block.
self.current_parent_request.add_unknown_parent_block(block);
// Update the request.
self.current_parent_request.id.block_request_id = None;
}
pub fn add_current_request_blobs(&mut self, blobs: FixedBlobSidecarList<T::EthSpec>) {
// Cache the blobs.
self.current_parent_request.add_unknown_parent_blobs(blobs);
// Update the request.
self.current_parent_request.id.blob_request_id = None;
}
pub fn pending_block_response(&self, req_id: BlockRequestId) -> bool {
self.current_parent_request.id.block_request_id == Some(req_id)
}
pub fn pending_blob_response(&self, req_id: BlobRequestId) -> bool {
self.current_parent_request.id.blob_request_id == Some(req_id)
} }
/// Consumes the parent request and destructures it into it's parts. /// Consumes the parent request and destructures it into it's parts.
@ -121,18 +205,17 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
Hash256, Hash256,
Vec<BlockWrapper<T::EthSpec>>, Vec<BlockWrapper<T::EthSpec>>,
Vec<Hash256>, Vec<Hash256>,
SingleBlockRequest<PARENT_FAIL_TOLERANCE>, SingleBlockLookup<PARENT_FAIL_TOLERANCE, T>,
) { ) {
let ParentLookup { let ParentLookup {
chain_hash, chain_hash,
downloaded_blocks, downloaded_blocks,
current_parent_request, current_parent_request,
current_parent_request_id: _,
} = self; } = self;
let block_count = downloaded_blocks.len(); let block_count = downloaded_blocks.len();
let mut blocks = Vec::with_capacity(block_count); let mut blocks = Vec::with_capacity(block_count);
let mut hashes = Vec::with_capacity(block_count); let mut hashes = Vec::with_capacity(block_count);
for (hash, block) in downloaded_blocks { for (hash, block) in downloaded_blocks.into_iter() {
blocks.push(block); blocks.push(block);
hashes.push(hash); hashes.push(hash);
} }
@ -144,23 +227,59 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
self.chain_hash self.chain_hash
} }
pub fn download_failed(&mut self) { pub fn block_download_failed(&mut self) {
self.current_parent_request.register_failure_downloading(); self.current_parent_request
self.current_parent_request_id = None; .block_request_state
.state
.register_failure_downloading();
self.current_parent_request.id.block_request_id = None;
} }
pub fn processing_failed(&mut self) { pub fn blob_download_failed(&mut self) {
self.current_parent_request.register_failure_processing(); self.current_parent_request
self.current_parent_request_id = None; .blob_request_state
.state
.register_failure_downloading();
self.current_parent_request.id.blob_request_id = None;
}
pub fn block_processing_failed(&mut self) {
self.current_parent_request
.block_request_state
.state
.register_failure_processing();
if let Some(components) = self
.current_parent_request
.unknown_parent_components
.as_mut()
{
components.downloaded_block = None;
}
self.current_parent_request.id.block_request_id = None;
}
pub fn blob_processing_failed(&mut self) {
self.current_parent_request
.blob_request_state
.state
.register_failure_processing();
if let Some(components) = self
.current_parent_request
.unknown_parent_components
.as_mut()
{
components.downloaded_blobs = <_>::default();
}
self.current_parent_request.id.blob_request_id = None;
} }
/// Verifies that the received block is what we requested. If so, parent lookup now waits for /// Verifies that the received block is what we requested. If so, parent lookup now waits for
/// the processing result of the block. /// the processing result of the block.
pub fn verify_block( pub fn verify_block(
&mut self, &mut self,
block: Option<BlockWrapper<T::EthSpec>>, block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>, failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
) -> Result<Option<RootBlockTuple<T::EthSpec>>, VerifyError> { ) -> Result<Option<RootBlockTuple<T::EthSpec>>, ParentVerifyError> {
let root_and_block = self.current_parent_request.verify_block(block)?; let root_and_block = self.current_parent_request.verify_block(block)?;
// check if the parent of this block isn't in the failed cache. If it is, this chain should // check if the parent of this block isn't in the failed cache. If it is, this chain should
@ -170,50 +289,83 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
.map(|(_, block)| block.parent_root()) .map(|(_, block)| block.parent_root())
{ {
if failed_chains.contains(&parent_root) { if failed_chains.contains(&parent_root) {
self.current_parent_request.register_failure_downloading(); self.current_parent_request
self.current_parent_request_id = None; .block_request_state
return Err(VerifyError::PreviousFailure { parent_root }); .state
.register_failure_downloading();
self.current_parent_request.id.block_request_id = None;
return Err(ParentVerifyError::PreviousFailure { parent_root });
} }
} }
Ok(root_and_block) Ok(root_and_block)
} }
pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option<PeerId> { pub fn verify_blob(
if self.chain_hash == chain_hash { &mut self,
return self.current_parent_request.processing_peer().ok(); blob: Option<Arc<BlobSidecar<T::EthSpec>>>,
} failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
None ) -> Result<Option<RootBlobsTuple<T::EthSpec>>, ParentVerifyError> {
} let parent_root_opt = blob.as_ref().map(|b| b.block_parent_root);
let blobs = self.current_parent_request.verify_blob(blob)?;
#[cfg(test)] // check if the parent of this block isn't in the failed cache. If it is, this chain should
pub fn failed_attempts(&self) -> u8 { // be dropped and the peer downscored.
self.current_parent_request.failed_attempts() if let Some(parent_root) = parent_root_opt {
} if failed_chains.contains(&parent_root) {
self.current_parent_request
pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool { .blob_request_state
self.current_parent_request.add_peer(block_root, peer_id) .state
} .register_failure_downloading();
self.current_parent_request.id.blob_request_id = None;
pub fn used_peers(&self) -> impl Iterator<Item = &PeerId> + '_ { return Err(ParentVerifyError::PreviousFailure { parent_root });
self.current_parent_request.used_peers.iter()
} }
} }
impl From<super::single_block_lookup::VerifyError> for VerifyError { Ok(blobs)
fn from(e: super::single_block_lookup::VerifyError) -> Self { }
use super::single_block_lookup::VerifyError as E;
pub fn add_peers(&mut self, peer_source: &[PeerShouldHave]) {
self.current_parent_request.add_peers(peer_source)
}
pub fn used_peers(&self, response_type: ResponseType) -> impl Iterator<Item = &PeerId> + '_ {
match response_type {
ResponseType::Block => self
.current_parent_request
.block_request_state
.state
.used_peers
.iter(),
ResponseType::Blob => self
.current_parent_request
.blob_request_state
.state
.used_peers
.iter(),
}
}
}
impl From<LookupVerifyError> for ParentVerifyError {
fn from(e: LookupVerifyError) -> Self {
use LookupVerifyError as E;
match e { match e {
E::RootMismatch => VerifyError::RootMismatch, E::RootMismatch => ParentVerifyError::RootMismatch,
E::NoBlockReturned => VerifyError::NoBlockReturned, E::NoBlockReturned => ParentVerifyError::NoBlockReturned,
E::ExtraBlocksReturned => VerifyError::ExtraBlocksReturned, E::ExtraBlocksReturned => ParentVerifyError::ExtraBlocksReturned,
E::UnrequestedBlobId => ParentVerifyError::UnrequestedBlobId,
E::ExtraBlobsReturned => ParentVerifyError::ExtraBlobsReturned,
E::InvalidIndex(index) => ParentVerifyError::InvalidIndex(index),
E::NotEnoughBlobsReturned => ParentVerifyError::NotEnoughBlobsReturned,
E::BenignFailure => ParentVerifyError::BenignFailure,
} }
} }
} }
impl From<super::single_block_lookup::LookupRequestError> for RequestError { impl From<LookupRequestError> for RequestError {
fn from(e: super::single_block_lookup::LookupRequestError) -> Self { fn from(e: LookupRequestError) -> Self {
use super::single_block_lookup::LookupRequestError as E; use LookupRequestError as E;
match e { match e {
E::TooManyAttempts { cannot_process } => { E::TooManyAttempts { cannot_process } => {
RequestError::TooManyAttempts { cannot_process } RequestError::TooManyAttempts { cannot_process }

View File

@ -1,43 +1,210 @@
use super::RootBlockTuple; use crate::sync::block_lookups::{BlobRequestId, BlockRequestId, RootBlobsTuple, RootBlockTuple};
use beacon_chain::blob_verification::AsBlock; use crate::sync::network_context::SyncNetworkContext;
use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::get_block_root; use beacon_chain::data_availability_checker::DataAvailabilityChecker;
use beacon_chain::{get_block_root, BeaconChainTypes};
use lighthouse_network::rpc::methods::BlobsByRootRequest;
use lighthouse_network::{rpc::BlocksByRootRequest, PeerId}; use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
use rand::seq::IteratorRandom; use rand::seq::IteratorRandom;
use ssz_types::VariableList; use ssz_types::VariableList;
use std::collections::HashSet; use std::collections::HashSet;
use store::{EthSpec, Hash256}; use std::ops::IndexMut;
use std::sync::Arc;
use store::Hash256;
use strum::IntoStaticStr; use strum::IntoStaticStr;
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
/// Object representing a single block lookup request. use super::{PeerShouldHave, ResponseType};
#[derive(PartialEq, Eq)]
pub struct SingleBlockRequest<const MAX_ATTEMPTS: u8> { pub struct SingleBlockLookup<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> {
/// The hash of the requested block. pub id: LookupId,
pub hash: Hash256, pub block_request_state: BlockRequestState<MAX_ATTEMPTS>,
pub blob_request_state: BlobRequestState<MAX_ATTEMPTS, T::EthSpec>,
pub da_checker: Arc<DataAvailabilityChecker<T>>,
/// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` because any
/// blocks or blobs without parents won't hit the data availability cache.
pub unknown_parent_components: Option<UnknownParentComponents<T::EthSpec>>,
/// We may want to delay the actual request trigger to give us a chance to receive all block
/// components over gossip.
pub triggered: bool,
}
#[derive(Default, Clone)]
pub struct LookupId {
pub block_request_id: Option<BlockRequestId>,
pub blob_request_id: Option<BlobRequestId>,
}
pub struct BlobRequestState<const MAX_ATTEMPTS: u8, T: EthSpec> {
pub requested_ids: Vec<BlobIdentifier>,
/// Where we store blobs until we receive the stream terminator.
pub blob_download_queue: FixedBlobSidecarList<T>,
pub state: SingleLookupRequestState<MAX_ATTEMPTS>,
}
impl<const MAX_ATTEMPTS: u8, T: EthSpec> BlobRequestState<MAX_ATTEMPTS, T> {
pub fn new(peer_source: &[PeerShouldHave]) -> Self {
Self {
requested_ids: <_>::default(),
blob_download_queue: <_>::default(),
state: SingleLookupRequestState::new(peer_source),
}
}
}
pub struct BlockRequestState<const MAX_ATTEMPTS: u8> {
pub requested_block_root: Hash256,
pub state: SingleLookupRequestState<MAX_ATTEMPTS>,
}
impl<const MAX_ATTEMPTS: u8> BlockRequestState<MAX_ATTEMPTS> {
pub fn new(block_root: Hash256, peers: &[PeerShouldHave]) -> Self {
Self {
requested_block_root: block_root,
state: SingleLookupRequestState::new(peers),
}
}
}
impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS, T> {
pub(crate) fn register_failure_downloading(&mut self, response_type: ResponseType) {
match response_type {
ResponseType::Block => self
.block_request_state
.state
.register_failure_downloading(),
ResponseType::Blob => self.blob_request_state.state.register_failure_downloading(),
}
}
}
impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS, T> {
pub(crate) fn downloading(&mut self, response_type: ResponseType) -> bool {
match response_type {
ResponseType::Block => {
matches!(
self.block_request_state.state.state,
State::Downloading { .. }
)
}
ResponseType::Blob => {
matches!(
self.blob_request_state.state.state,
State::Downloading { .. }
)
}
}
}
pub(crate) fn remove_peer_if_useless(&mut self, peer_id: &PeerId, response_type: ResponseType) {
match response_type {
ResponseType::Block => self
.block_request_state
.state
.remove_peer_if_useless(peer_id),
ResponseType::Blob => self
.blob_request_state
.state
.remove_peer_if_useless(peer_id),
}
}
pub(crate) fn check_peer_disconnected(
&mut self,
peer_id: &PeerId,
response_type: ResponseType,
) -> Result<(), ()> {
match response_type {
ResponseType::Block => self
.block_request_state
.state
.check_peer_disconnected(peer_id),
ResponseType::Blob => self
.blob_request_state
.state
.check_peer_disconnected(peer_id),
}
}
}
/// For requests triggered by an `UnknownBlockParent` or `UnknownBlockParent`, this struct
/// is used to cache components as they are sent to the networking layer. We can't use the
/// data availability cache currently because any blocks or blobs without parents won't hit
/// won't pass validation and therefore won't make it into the cache.
#[derive(Default)]
pub struct UnknownParentComponents<E: EthSpec> {
pub downloaded_block: Option<Arc<SignedBeaconBlock<E>>>,
pub downloaded_blobs: FixedBlobSidecarList<E>,
}
impl<E: EthSpec> UnknownParentComponents<E> {
pub fn new(
block: Option<Arc<SignedBeaconBlock<E>>>,
blobs: Option<FixedBlobSidecarList<E>>,
) -> Self {
Self {
downloaded_block: block,
downloaded_blobs: blobs.unwrap_or_default(),
}
}
pub fn add_unknown_parent_block(&mut self, block: Arc<SignedBeaconBlock<E>>) {
self.downloaded_block = Some(block);
}
pub fn add_unknown_parent_blobs(&mut self, blobs: FixedBlobSidecarList<E>) {
for (index, blob_opt) in self.downloaded_blobs.iter_mut().enumerate() {
if let Some(Some(downloaded_blob)) = blobs.get(index) {
*blob_opt = Some(downloaded_blob.clone());
}
}
}
pub fn downloaded_indices(&self) -> HashSet<usize> {
self.downloaded_blobs
.iter()
.enumerate()
.filter_map(|(i, blob_opt)| blob_opt.as_ref().map(|_| i))
.collect::<HashSet<_>>()
}
}
/// Object representing the state of a single block or blob lookup request.
#[derive(PartialEq, Eq, Debug)]
pub struct SingleLookupRequestState<const MAX_ATTEMPTS: u8> {
/// State of this request. /// State of this request.
pub state: State, pub state: State,
/// Peers that should have this block. /// Peers that should have this block or blob.
pub available_peers: HashSet<PeerId>, pub available_peers: HashSet<PeerId>,
/// Peers that mar or may not have this block or blob.
pub potential_peers: HashSet<PeerId>,
/// Peers from which we have requested this block. /// Peers from which we have requested this block.
pub used_peers: HashSet<PeerId>, pub used_peers: HashSet<PeerId>,
/// How many times have we attempted to process this block. /// How many times have we attempted to process this block or blob.
failed_processing: u8, failed_processing: u8,
/// How many times have we attempted to download this block. /// How many times have we attempted to download this block or blob.
failed_downloading: u8, failed_downloading: u8,
pub component_processed: bool,
} }
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub enum State { pub enum State {
AwaitingDownload, AwaitingDownload,
Downloading { peer_id: PeerId }, Downloading { peer_id: PeerShouldHave },
Processing { peer_id: PeerId }, Processing { peer_id: PeerShouldHave },
} }
#[derive(Debug, PartialEq, Eq, IntoStaticStr)] #[derive(Debug, PartialEq, Eq, IntoStaticStr)]
pub enum VerifyError { pub enum LookupVerifyError {
RootMismatch, RootMismatch,
NoBlockReturned, NoBlockReturned,
ExtraBlocksReturned, ExtraBlocksReturned,
UnrequestedBlobId,
ExtraBlobsReturned,
NotEnoughBlobsReturned,
InvalidIndex(u64),
/// We don't have enough information to know
/// whether the peer is at fault or simply missed
/// what was requested on gossip.
BenignFailure,
} }
#[derive(Debug, PartialEq, Eq, IntoStaticStr)] #[derive(Debug, PartialEq, Eq, IntoStaticStr)]
@ -50,15 +217,465 @@ pub enum LookupRequestError {
NoPeers, NoPeers,
} }
impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> { impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS, T> {
pub fn new(hash: Hash256, peer_id: PeerId) -> Self { pub fn new(
requested_block_root: Hash256,
unknown_parent_components: Option<UnknownParentComponents<T::EthSpec>>,
peers: &[PeerShouldHave],
da_checker: Arc<DataAvailabilityChecker<T>>,
) -> Self {
Self {
id: <_>::default(),
block_request_state: BlockRequestState::new(requested_block_root, peers),
blob_request_state: BlobRequestState::new(peers),
da_checker,
unknown_parent_components,
triggered: false,
}
}
pub fn is_for_block(&self, block_root: Hash256) -> bool {
self.block_request_state.requested_block_root == block_root
}
/// Send the necessary request for blobs and blocks and update `self.id` with the latest
/// request `Id`s. This will return `Err(())` if neither the block nor blob request could be made
/// or are no longer required.
pub fn request_block_and_blobs(&mut self, cx: &mut SyncNetworkContext<T>) -> Result<(), ()> {
let block_request_id = if let Ok(Some((peer_id, block_request))) = self.request_block() {
cx.single_block_lookup_request(peer_id, block_request).ok()
} else {
None
};
let blob_request_id = if let Ok(Some((peer_id, blob_request))) = self.request_blobs() {
cx.single_blobs_lookup_request(peer_id, blob_request).ok()
} else {
None
};
if block_request_id.is_none() && blob_request_id.is_none() {
return Err(());
}
self.id = LookupId {
block_request_id,
blob_request_id,
};
Ok(())
}
pub fn update_blobs_request(&mut self) {
self.blob_request_state.requested_ids = if let Some(components) =
self.unknown_parent_components.as_ref()
{
let blobs = components.downloaded_indices();
self.da_checker
.get_missing_blob_ids(
self.block_request_state.requested_block_root,
components.downloaded_block.as_ref(),
Some(blobs),
)
.unwrap_or_default()
} else {
self.da_checker
.get_missing_blob_ids_checking_cache(self.block_request_state.requested_block_root)
.unwrap_or_default()
};
}
pub fn get_downloaded_block(&mut self) -> Option<BlockWrapper<T::EthSpec>> {
self.unknown_parent_components
.as_mut()
.and_then(|components| {
let downloaded_block = components.downloaded_block.as_ref();
let downloaded_indices = components.downloaded_indices();
let missing_ids = self.da_checker.get_missing_blob_ids(
self.block_request_state.requested_block_root,
downloaded_block,
Some(downloaded_indices),
);
let download_complete =
missing_ids.map_or(true, |missing_ids| missing_ids.is_empty());
if download_complete {
let UnknownParentComponents {
downloaded_block,
downloaded_blobs,
} = components;
downloaded_block.as_ref().map(|block| {
BlockWrapper::BlockAndBlobs(block.clone(), std::mem::take(downloaded_blobs))
})
} else {
None
}
})
}
pub fn add_unknown_parent_components(
&mut self,
components: UnknownParentComponents<T::EthSpec>,
) {
if let Some(ref mut existing_components) = self.unknown_parent_components {
let UnknownParentComponents {
downloaded_block,
downloaded_blobs,
} = components;
if let Some(block) = downloaded_block {
existing_components.add_unknown_parent_block(block);
}
existing_components.add_unknown_parent_blobs(downloaded_blobs);
} else {
self.unknown_parent_components = Some(components);
}
}
pub fn add_unknown_parent_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
if let Some(ref mut components) = self.unknown_parent_components {
components.add_unknown_parent_block(block)
} else {
self.unknown_parent_components = Some(UnknownParentComponents {
downloaded_block: Some(block),
downloaded_blobs: FixedBlobSidecarList::default(),
})
}
}
pub fn add_unknown_parent_blobs(&mut self, blobs: FixedBlobSidecarList<T::EthSpec>) {
if let Some(ref mut components) = self.unknown_parent_components {
components.add_unknown_parent_blobs(blobs)
} else {
self.unknown_parent_components = Some(UnknownParentComponents {
downloaded_block: None,
downloaded_blobs: blobs,
})
}
}
/// Verifies if the received block matches the requested one.
/// Returns the block for processing if the response is what we expected.
pub fn verify_block(
&mut self,
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
) -> Result<Option<RootBlockTuple<T::EthSpec>>, LookupVerifyError> {
match self.block_request_state.state.state {
State::AwaitingDownload => {
self.block_request_state
.state
.register_failure_downloading();
Err(LookupVerifyError::ExtraBlocksReturned)
}
State::Downloading { peer_id } => {
match block {
Some(block) => {
// Compute the block root using this specific function so that we can get timing
// metrics.
let block_root = get_block_root(&block);
if block_root != self.block_request_state.requested_block_root {
// return an error and drop the block
// NOTE: we take this is as a download failure to prevent counting the
// attempt as a chain failure, but simply a peer failure.
self.block_request_state
.state
.register_failure_downloading();
Err(LookupVerifyError::RootMismatch)
} else {
// Return the block for processing.
self.block_request_state.state.state = State::Processing { peer_id };
Ok(Some((block_root, block)))
}
}
None => {
if peer_id.should_have_block() {
self.block_request_state
.state
.register_failure_downloading();
Err(LookupVerifyError::NoBlockReturned)
} else {
self.block_request_state.state.state = State::AwaitingDownload;
Err(LookupVerifyError::BenignFailure)
}
}
}
}
State::Processing { peer_id: _ } => match block {
Some(_) => {
// We sent the block for processing and received an extra block.
self.block_request_state
.state
.register_failure_downloading();
Err(LookupVerifyError::ExtraBlocksReturned)
}
None => {
// This is simply the stream termination and we are already processing the
// block
Ok(None)
}
},
}
}
pub fn verify_blob(
&mut self,
blob: Option<Arc<BlobSidecar<T::EthSpec>>>,
) -> Result<Option<RootBlobsTuple<T::EthSpec>>, LookupVerifyError> {
match self.blob_request_state.state.state {
State::AwaitingDownload => {
self.blob_request_state.state.register_failure_downloading();
Err(LookupVerifyError::ExtraBlobsReturned)
}
State::Downloading {
peer_id: peer_source,
} => match blob {
Some(blob) => {
let received_id = blob.id();
if !self.blob_request_state.requested_ids.contains(&received_id) {
self.blob_request_state.state.register_failure_downloading();
Err(LookupVerifyError::UnrequestedBlobId)
} else {
// State should remain downloading until we receive the stream terminator.
self.blob_request_state
.requested_ids
.retain(|id| *id != received_id);
let blob_index = blob.index;
if blob_index >= T::EthSpec::max_blobs_per_block() as u64 {
return Err(LookupVerifyError::InvalidIndex(blob.index));
}
*self
.blob_request_state
.blob_download_queue
.index_mut(blob_index as usize) = Some(blob);
Ok(None)
}
}
None => {
self.blob_request_state.state.state = State::Processing {
peer_id: peer_source,
};
Ok(Some((
self.block_request_state.requested_block_root,
std::mem::take(&mut self.blob_request_state.blob_download_queue),
)))
}
},
State::Processing { peer_id: _ } => match blob {
Some(_) => {
// We sent the blob for processing and received an extra blob.
self.blob_request_state.state.register_failure_downloading();
Err(LookupVerifyError::ExtraBlobsReturned)
}
None => {
// This is simply the stream termination and we are already processing the
// block
Ok(None)
}
},
}
}
pub fn request_block(
&mut self,
) -> Result<Option<(PeerId, BlocksByRootRequest)>, LookupRequestError> {
let block_already_downloaded =
if let Some(components) = self.unknown_parent_components.as_ref() {
components.downloaded_block.is_some()
} else {
self.da_checker
.has_block(&self.block_request_state.requested_block_root)
};
if block_already_downloaded {
return Ok(None);
}
debug_assert!(matches!(
self.block_request_state.state.state,
State::AwaitingDownload
));
let request = BlocksByRootRequest {
block_roots: VariableList::from(vec![self.block_request_state.requested_block_root]),
};
let response_type = ResponseType::Block;
if self.too_many_attempts(response_type) {
Err(LookupRequestError::TooManyAttempts {
cannot_process: self.cannot_process(response_type),
})
} else if let Some(peer_id) = self.get_peer(response_type) {
self.add_used_peer(peer_id, response_type);
Ok(Some((peer_id.to_peer_id(), request)))
} else {
Err(LookupRequestError::NoPeers)
}
}
pub fn request_blobs(
&mut self,
) -> Result<Option<(PeerId, BlobsByRootRequest)>, LookupRequestError> {
self.update_blobs_request();
if self.blob_request_state.requested_ids.is_empty() {
return Ok(None);
}
debug_assert!(matches!(
self.blob_request_state.state.state,
State::AwaitingDownload
));
let request = BlobsByRootRequest {
blob_ids: VariableList::from(self.blob_request_state.requested_ids.clone()),
};
let response_type = ResponseType::Blob;
if self.too_many_attempts(response_type) {
Err(LookupRequestError::TooManyAttempts {
cannot_process: self.cannot_process(response_type),
})
} else if let Some(peer_id) = self.get_peer(response_type) {
self.add_used_peer(peer_id, response_type);
Ok(Some((peer_id.to_peer_id(), request)))
} else {
Err(LookupRequestError::NoPeers)
}
}
fn too_many_attempts(&self, response_type: ResponseType) -> bool {
match response_type {
ResponseType::Block => self.block_request_state.state.failed_attempts() >= MAX_ATTEMPTS,
ResponseType::Blob => self.blob_request_state.state.failed_attempts() >= MAX_ATTEMPTS,
}
}
fn cannot_process(&self, response_type: ResponseType) -> bool {
match response_type {
ResponseType::Block => {
self.block_request_state.state.failed_processing
>= self.block_request_state.state.failed_downloading
}
ResponseType::Blob => {
self.blob_request_state.state.failed_processing
>= self.blob_request_state.state.failed_downloading
}
}
}
fn get_peer(&self, response_type: ResponseType) -> Option<PeerShouldHave> {
match response_type {
ResponseType::Block => self
.block_request_state
.state
.available_peers
.iter()
.choose(&mut rand::thread_rng())
.copied()
.map(PeerShouldHave::BlockAndBlobs)
.or(self
.block_request_state
.state
.potential_peers
.iter()
.choose(&mut rand::thread_rng())
.copied()
.map(PeerShouldHave::Neither)),
ResponseType::Blob => self
.blob_request_state
.state
.available_peers
.iter()
.choose(&mut rand::thread_rng())
.copied()
.map(PeerShouldHave::BlockAndBlobs)
.or(self
.blob_request_state
.state
.potential_peers
.iter()
.choose(&mut rand::thread_rng())
.copied()
.map(PeerShouldHave::Neither)),
}
}
fn add_used_peer(&mut self, peer_id: PeerShouldHave, response_type: ResponseType) {
match response_type {
ResponseType::Block => {
self.block_request_state
.state
.used_peers
.insert(peer_id.to_peer_id());
self.block_request_state.state.state = State::Downloading { peer_id };
}
ResponseType::Blob => {
self.blob_request_state
.state
.used_peers
.insert(peer_id.to_peer_id());
self.blob_request_state.state.state = State::Downloading { peer_id };
}
}
}
pub fn add_peers(&mut self, peers: &[PeerShouldHave]) {
for peer in peers {
match peer {
PeerShouldHave::BlockAndBlobs(peer_id) => {
self.block_request_state.state.add_peer(peer_id);
self.blob_request_state.state.add_peer(peer_id);
}
PeerShouldHave::Neither(peer_id) => {
self.block_request_state.state.add_potential_peer(peer_id);
self.blob_request_state.state.add_potential_peer(peer_id);
}
}
}
}
pub fn processing_peer(&self, response_type: ResponseType) -> Result<PeerShouldHave, ()> {
match response_type {
ResponseType::Block => self.block_request_state.state.processing_peer(),
ResponseType::Blob => self.blob_request_state.state.processing_peer(),
}
}
pub fn downloading_peer(&self, response_type: ResponseType) -> Result<PeerShouldHave, ()> {
match response_type {
ResponseType::Block => self.block_request_state.state.peer(),
ResponseType::Blob => self.blob_request_state.state.peer(),
}
}
pub fn both_components_processed(&self) -> bool {
self.block_request_state.state.component_processed
&& self.blob_request_state.state.component_processed
}
pub fn set_component_processed(&mut self, response_type: ResponseType) {
match response_type {
ResponseType::Block => self.block_request_state.state.component_processed = true,
ResponseType::Blob => self.blob_request_state.state.component_processed = true,
}
}
}
impl<const MAX_ATTEMPTS: u8> SingleLookupRequestState<MAX_ATTEMPTS> {
pub fn new(peers: &[PeerShouldHave]) -> Self {
let mut available_peers = HashSet::default();
let mut potential_peers = HashSet::default();
for peer in peers {
match peer {
PeerShouldHave::BlockAndBlobs(peer_id) => {
available_peers.insert(*peer_id);
}
PeerShouldHave::Neither(peer_id) => {
potential_peers.insert(*peer_id);
}
}
}
Self { Self {
hash,
state: State::AwaitingDownload, state: State::AwaitingDownload,
available_peers: HashSet::from([peer_id]), available_peers,
potential_peers,
used_peers: HashSet::default(), used_peers: HashSet::default(),
failed_processing: 0, failed_processing: 0,
failed_downloading: 0, failed_downloading: 0,
component_processed: false,
} }
} }
@ -80,19 +697,23 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
self.failed_processing + self.failed_downloading self.failed_processing + self.failed_downloading
} }
pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool { pub fn add_peer(&mut self, peer_id: &PeerId) {
let is_useful = &self.hash == hash; self.potential_peers.remove(peer_id);
if is_useful {
self.available_peers.insert(*peer_id); self.available_peers.insert(*peer_id);
} }
is_useful
pub fn add_potential_peer(&mut self, peer_id: &PeerId) {
if !self.available_peers.contains(peer_id) {
self.potential_peers.insert(*peer_id);
}
} }
/// If a peer disconnects, this request could be failed. If so, an error is returned /// If a peer disconnects, this request could be failed. If so, an error is returned
pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> { pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> {
self.available_peers.remove(dc_peer_id); self.available_peers.remove(dc_peer_id);
self.potential_peers.remove(dc_peer_id);
if let State::Downloading { peer_id } = &self.state { if let State::Downloading { peer_id } = &self.state {
if peer_id == dc_peer_id { if peer_id.as_peer_id() == dc_peer_id {
// Peer disconnected before providing a block // Peer disconnected before providing a block
self.register_failure_downloading(); self.register_failure_downloading();
return Err(()); return Err(());
@ -101,90 +722,67 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
Ok(()) Ok(())
} }
/// Verifies if the received block matches the requested one. pub fn processing_peer(&self) -> Result<PeerShouldHave, ()> {
/// Returns the block for processing if the response is what we expected.
pub fn verify_block<T: EthSpec>(
&mut self,
block: Option<BlockWrapper<T>>,
) -> Result<Option<RootBlockTuple<T>>, VerifyError> {
match self.state {
State::AwaitingDownload => {
self.register_failure_downloading();
Err(VerifyError::ExtraBlocksReturned)
}
State::Downloading { peer_id } => match block {
Some(block) => {
// Compute the block root using this specific function so that we can get timing
// metrics.
let block_root = get_block_root(block.as_block());
if block_root != self.hash {
// return an error and drop the block
// NOTE: we take this is as a download failure to prevent counting the
// attempt as a chain failure, but simply a peer failure.
self.register_failure_downloading();
Err(VerifyError::RootMismatch)
} else {
// Return the block for processing.
self.state = State::Processing { peer_id };
Ok(Some((block_root, block)))
}
}
None => {
self.register_failure_downloading();
Err(VerifyError::NoBlockReturned)
}
},
State::Processing { peer_id: _ } => match block {
Some(_) => {
// We sent the block for processing and received an extra block.
self.register_failure_downloading();
Err(VerifyError::ExtraBlocksReturned)
}
None => {
// This is simply the stream termination and we are already processing the
// block
Ok(None)
}
},
}
}
pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> {
debug_assert!(matches!(self.state, State::AwaitingDownload));
if self.failed_attempts() >= MAX_ATTEMPTS {
Err(LookupRequestError::TooManyAttempts {
cannot_process: self.failed_processing >= self.failed_downloading,
})
} else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) {
let request = BlocksByRootRequest {
block_roots: VariableList::from(vec![self.hash]),
};
self.state = State::Downloading { peer_id };
self.used_peers.insert(peer_id);
Ok((peer_id, request))
} else {
Err(LookupRequestError::NoPeers)
}
}
pub fn processing_peer(&self) -> Result<PeerId, ()> {
if let State::Processing { peer_id } = &self.state { if let State::Processing { peer_id } = &self.state {
Ok(*peer_id) Ok(*peer_id)
} else { } else {
Err(()) Err(())
} }
} }
pub fn peer(&self) -> Result<PeerShouldHave, ()> {
match &self.state {
State::Processing { peer_id } => Ok(*peer_id),
State::Downloading { peer_id } => Ok(*peer_id),
_ => Err(()),
}
} }
impl<const MAX_ATTEMPTS: u8> slog::Value for SingleBlockRequest<MAX_ATTEMPTS> { pub fn remove_peer_if_useless(&mut self, peer_id: &PeerId) {
if !self.available_peers.is_empty() || self.potential_peers.len() > 1 {
self.potential_peers.remove(peer_id);
}
}
}
impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> slog::Value
for SingleBlockLookup<MAX_ATTEMPTS, T>
{
fn serialize(
&self,
_record: &slog::Record,
key: slog::Key,
serializer: &mut dyn slog::Serializer,
) -> slog::Result {
serializer.emit_str("request", key)?;
serializer.emit_arguments(
"hash",
&format_args!("{}", self.block_request_state.requested_block_root),
)?;
serializer.emit_arguments(
"blob_ids",
&format_args!("{:?}", self.blob_request_state.requested_ids),
)?;
serializer.emit_arguments(
"block_request_state.state",
&format_args!("{:?}", self.block_request_state.state),
)?;
serializer.emit_arguments(
"blob_request_state.state",
&format_args!("{:?}", self.blob_request_state.state),
)?;
slog::Result::Ok(())
}
}
impl<const MAX_ATTEMPTS: u8> slog::Value for SingleLookupRequestState<MAX_ATTEMPTS> {
fn serialize( fn serialize(
&self, &self,
record: &slog::Record, record: &slog::Record,
key: slog::Key, key: slog::Key,
serializer: &mut dyn slog::Serializer, serializer: &mut dyn slog::Serializer,
) -> slog::Result { ) -> slog::Result {
serializer.emit_str("request", key)?; serializer.emit_str("request_state", key)?;
serializer.emit_arguments("hash", &format_args!("{}", self.hash))?;
match &self.state { match &self.state {
State::AwaitingDownload => { State::AwaitingDownload => {
"awaiting_download".serialize(record, "state", serializer)? "awaiting_download".serialize(record, "state", serializer)?
@ -205,9 +803,16 @@ impl<const MAX_ATTEMPTS: u8> slog::Value for SingleBlockRequest<MAX_ATTEMPTS> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use beacon_chain::builder::Witness;
use beacon_chain::eth1_chain::CachingEth1Backend;
use sloggers::null::NullLoggerBuilder;
use sloggers::Build;
use slot_clock::{SlotClock, TestingSlotClock};
use std::time::Duration;
use store::{HotColdDB, MemoryStore, StoreConfig};
use types::{ use types::{
test_utils::{SeedableRng, TestRandom, XorShiftRng}, test_utils::{SeedableRng, TestRandom, XorShiftRng},
MinimalEthSpec as E, SignedBeaconBlock, ChainSpec, EthSpec, MinimalEthSpec as E, SignedBeaconBlock, Slot,
}; };
fn rand_block() -> SignedBeaconBlock<E> { fn rand_block() -> SignedBeaconBlock<E> {
@ -219,13 +824,27 @@ mod tests {
types::Signature::random_for_test(&mut rng), types::Signature::random_for_test(&mut rng),
) )
} }
type T = Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
#[test] #[test]
fn test_happy_path() { fn test_happy_path() {
let peer_id = PeerId::random(); let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random());
let block = rand_block(); let block = rand_block();
let spec = E::default_spec();
let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id); let slot_clock = TestingSlotClock::new(
Slot::new(0),
Duration::from_secs(0),
Duration::from_secs(spec.seconds_per_slot),
);
let log = NullLoggerBuilder.build().expect("logger should build");
let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log)
.expect("store");
let da_checker = Arc::new(
DataAvailabilityChecker::new(slot_clock, None, store.into(), spec)
.expect("data availability checker"),
);
let mut sl =
SingleBlockLookup::<4, T>::new(block.canonical_root(), None, &[peer_id], da_checker);
sl.request_block().unwrap(); sl.request_block().unwrap();
sl.verify_block(Some(block.into())).unwrap().unwrap(); sl.verify_block(Some(block.into())).unwrap().unwrap();
} }
@ -233,13 +852,32 @@ mod tests {
#[test] #[test]
fn test_block_lookup_failures() { fn test_block_lookup_failures() {
const FAILURES: u8 = 3; const FAILURES: u8 = 3;
let peer_id = PeerId::random(); let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random());
let block = rand_block(); let block = rand_block();
let spec = E::default_spec();
let slot_clock = TestingSlotClock::new(
Slot::new(0),
Duration::from_secs(0),
Duration::from_secs(spec.seconds_per_slot),
);
let log = NullLoggerBuilder.build().expect("logger should build");
let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log)
.expect("store");
let mut sl = SingleBlockRequest::<FAILURES>::new(block.canonical_root(), peer_id); let da_checker = Arc::new(
DataAvailabilityChecker::new(slot_clock, None, store.into(), spec)
.expect("data availability checker"),
);
let mut sl = SingleBlockLookup::<FAILURES, T>::new(
block.canonical_root(),
None,
&[peer_id],
da_checker,
);
for _ in 1..FAILURES { for _ in 1..FAILURES {
sl.request_block().unwrap(); sl.request_block().unwrap();
sl.register_failure_downloading(); sl.block_request_state.state.register_failure_downloading();
} }
// Now we receive the block and send it for processing // Now we receive the block and send it for processing
@ -247,7 +885,7 @@ mod tests {
sl.verify_block(Some(block.into())).unwrap().unwrap(); sl.verify_block(Some(block.into())).unwrap().unwrap();
// One processing failure maxes the available attempts // One processing failure maxes the available attempts
sl.register_failure_processing(); sl.block_request_state.state.register_failure_processing();
assert_eq!( assert_eq!(
sl.request_block(), sl.request_block(),
Err(LookupRequestError::TooManyAttempts { Err(LookupRequestError::TooManyAttempts {

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,5 @@
use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::blob_verification::BlockWrapper;
use ssz_types::FixedVector;
use std::{collections::VecDeque, sync::Arc}; use std::{collections::VecDeque, sync::Arc};
use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
@ -55,7 +56,22 @@ impl<T: EthSpec> BlocksAndBlobsRequestInfo<T> {
if blob_list.is_empty() { if blob_list.is_empty() {
responses.push(BlockWrapper::Block(block)) responses.push(BlockWrapper::Block(block))
} else { } else {
responses.push(BlockWrapper::BlockAndBlobs(block, blob_list)) let mut blobs_fixed = vec![None; T::max_blobs_per_block()];
for blob in blob_list {
let blob_index = blob.index as usize;
let Some(blob_opt) = blobs_fixed.get_mut(blob_index) else {
return Err("Invalid blob index");
};
if blob_opt.is_some() {
return Err("Repeat blob index");
} else {
*blob_opt = Some(blob);
}
}
responses.push(BlockWrapper::BlockAndBlobs(
block,
FixedVector::from(blobs_fixed),
))
} }
} }

View File

@ -34,17 +34,24 @@
//! search for the block and subsequently search for parents if needed. //! search for the block and subsequently search for parents if needed.
use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart}; use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart};
use super::block_lookups::BlockLookups; use super::block_lookups::{BlockLookups, PeerShouldHave};
use super::network_context::{BlockOrBlob, SyncNetworkContext}; use super::network_context::{BlockOrBlob, SyncNetworkContext};
use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::peer_sync_info::{remote_sync_type, PeerSyncType};
use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent}; use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
use crate::service::NetworkMessage; use crate::service::NetworkMessage;
use crate::status::ToStatusMessage; use crate::status::ToStatusMessage;
use crate::sync::block_lookups::delayed_lookup;
use crate::sync::block_lookups::delayed_lookup::DelayedLookupMessage;
pub use crate::sync::block_lookups::ResponseType;
use crate::sync::block_lookups::UnknownParentComponents;
use crate::sync::range_sync::ByRangeRequestType; use crate::sync::range_sync::ByRangeRequestType;
use beacon_chain::blob_verification::AsBlock; use beacon_chain::blob_verification::AsBlock;
use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState}; use beacon_chain::{
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState,
MAXIMUM_GOSSIP_CLOCK_DISPARITY,
};
use futures::StreamExt; use futures::StreamExt;
use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
use lighthouse_network::rpc::RPCError; use lighthouse_network::rpc::RPCError;
@ -52,12 +59,14 @@ use lighthouse_network::types::{NetworkGlobals, SyncState};
use lighthouse_network::SyncInfo; use lighthouse_network::SyncInfo;
use lighthouse_network::{PeerAction, PeerId}; use lighthouse_network::{PeerAction, PeerId};
use slog::{crit, debug, error, info, trace, warn, Logger}; use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use std::boxed::Box; use std::boxed::Box;
use std::ops::IndexMut;
use std::ops::Sub; use std::ops::Sub;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::blob_sidecar::BlobIdentifier; use types::blob_sidecar::FixedBlobSidecarList;
use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync
@ -68,6 +77,9 @@ use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
/// gossip if no peers are further than this range ahead of us that we have not already downloaded /// gossip if no peers are further than this range ahead of us that we have not already downloaded
/// blocks for. /// blocks for.
pub const SLOT_IMPORT_TOLERANCE: usize = 32; pub const SLOT_IMPORT_TOLERANCE: usize = 32;
/// The maximum number of messages the delay queue can handle in a single slot before messages are
/// dropped.
pub const DELAY_QUEUE_CHANNEL_SIZE: usize = 128;
pub type Id = u32; pub type Id = u32;
@ -81,11 +93,11 @@ pub enum RequestId {
/// Request was from the backfill sync algorithm. /// Request was from the backfill sync algorithm.
BackFillBlocks { id: Id }, BackFillBlocks { id: Id },
/// Backfill request that is composed by both a block range request and a blob range request. /// Backfill request that is composed by both a block range request and a blob range request.
BackFillBlobs { id: Id }, BackFillBlockAndBlobs { id: Id },
/// The request was from a chain in the range sync algorithm. /// The request was from a chain in the range sync algorithm.
RangeBlocks { id: Id }, RangeBlocks { id: Id },
/// Range request that is composed by both a block range request and a blob range request. /// Range request that is composed by both a block range request and a blob range request.
RangeBlobs { id: Id }, RangeBlockAndBlobs { id: Id },
} }
// TODO(diva) I'm updating functions what at a time, but this should be revisited because I think // TODO(diva) I'm updating functions what at a time, but this should be revisited because I think
@ -115,18 +127,24 @@ pub enum SyncMessage<T: EthSpec> {
}, },
/// A block with an unknown parent has been received. /// A block with an unknown parent has been received.
UnknownBlock(PeerId, BlockWrapper<T>, Hash256), UnknownParentBlock(PeerId, BlockWrapper<T>, Hash256),
/// A peer has sent an object that references a block that is unknown. This triggers the /// A blob with an unknown parent has been received.
UnknownParentBlob(PeerId, Arc<BlobSidecar<T>>),
/// A peer has sent an attestation that references a block that is unknown. This triggers the
/// manager to attempt to find the block matching the unknown hash. /// manager to attempt to find the block matching the unknown hash.
UnknownBlockHash(PeerId, Hash256), UnknownBlockHashFromAttestation(PeerId, Hash256),
/// A peer has sent us a block that we haven't received all the blobs for. This triggers /// A peer has sent a blob that references a block that is unknown or a peer has sent a block for
/// the manager to attempt to find the pending blobs for the given block root. /// which we haven't received blobs.
UnknownBlobHash { ///
peer_id: PeerId, /// We will either attempt to find the block matching the unknown hash immediately or queue a lookup,
pending_blobs: Vec<BlobIdentifier>, /// which will then trigger the request when we receive `MissingGossipBlockComponentsDelayed`.
}, MissingGossipBlockComponents(Slot, PeerId, Hash256),
/// This message triggers a request for missing block components after a delay.
MissingGossipBlockComponentsDelayed(Hash256),
/// A peer has disconnected. /// A peer has disconnected.
Disconnect(PeerId), Disconnect(PeerId),
@ -145,9 +163,10 @@ pub enum SyncMessage<T: EthSpec> {
}, },
/// Block processed /// Block processed
BlockProcessed { BlockComponentProcessed {
process_type: BlockProcessType, process_type: BlockProcessType,
result: BlockProcessResult<T>, result: BlockProcessingResult<T>,
response_type: ResponseType,
}, },
} }
@ -159,8 +178,8 @@ pub enum BlockProcessType {
} }
#[derive(Debug)] #[derive(Debug)]
pub enum BlockProcessResult<T: EthSpec> { pub enum BlockProcessingResult<T: EthSpec> {
Ok, Ok(AvailabilityProcessingStatus),
Err(BlockError<T>), Err(BlockError<T>),
Ignored, Ignored,
} }
@ -205,6 +224,8 @@ pub struct SyncManager<T: BeaconChainTypes> {
block_lookups: BlockLookups<T>, block_lookups: BlockLookups<T>,
delayed_lookups: mpsc::Sender<DelayedLookupMessage>,
/// The logger for the import manager. /// The logger for the import manager.
log: Logger, log: Logger,
} }
@ -226,6 +247,8 @@ pub fn spawn<T: BeaconChainTypes>(
); );
// generate the message channel // generate the message channel
let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>(); let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>();
let (delayed_lookups_send, delayed_lookups_recv) =
mpsc::channel::<DelayedLookupMessage>(DELAY_QUEUE_CHANNEL_SIZE);
// create an instance of the SyncManager // create an instance of the SyncManager
let mut sync_manager = SyncManager { let mut sync_manager = SyncManager {
@ -240,15 +263,29 @@ pub fn spawn<T: BeaconChainTypes>(
log.clone(), log.clone(),
), ),
range_sync: RangeSync::new(beacon_chain.clone(), log.clone()), range_sync: RangeSync::new(beacon_chain.clone(), log.clone()),
backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()), backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals, log.clone()),
block_lookups: BlockLookups::new(log.clone()), block_lookups: BlockLookups::new(
beacon_chain.data_availability_checker.clone(),
log.clone(),
),
delayed_lookups: delayed_lookups_send,
log: log.clone(), log: log.clone(),
}; };
let log_clone = log.clone();
let sync_send_clone = sync_send.clone();
delayed_lookup::spawn_delayed_lookup_service(
&executor,
beacon_chain,
delayed_lookups_recv,
sync_send,
log,
);
// spawn the sync manager thread // spawn the sync manager thread
debug!(log, "Sync Manager started"); debug!(log_clone, "Sync Manager started");
executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync"); executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync");
sync_send sync_send_clone
} }
impl<T: BeaconChainTypes> SyncManager<T> { impl<T: BeaconChainTypes> SyncManager<T> {
@ -291,8 +328,12 @@ impl<T: BeaconChainTypes> SyncManager<T> {
trace!(self.log, "Sync manager received a failed RPC"); trace!(self.log, "Sync manager received a failed RPC");
match request_id { match request_id {
RequestId::SingleBlock { id } => { RequestId::SingleBlock { id } => {
self.block_lookups self.block_lookups.single_block_lookup_failed(
.single_block_lookup_failed(id, &mut self.network); id,
&peer_id,
&mut self.network,
error,
);
} }
RequestId::ParentLookup { id } => { RequestId::ParentLookup { id } => {
self.block_lookups self.block_lookups
@ -313,7 +354,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
} }
RequestId::BackFillBlobs { id } => { RequestId::BackFillBlockAndBlobs { id } => {
if let Some(batch_id) = self if let Some(batch_id) = self
.network .network
.backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs) .backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs)
@ -342,7 +383,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
self.update_sync_state() self.update_sync_state()
} }
} }
RequestId::RangeBlobs { id } => { RequestId::RangeBlockAndBlobs { id } => {
if let Some((chain_id, batch_id)) = self if let Some((chain_id, batch_id)) = self
.network .network
.range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs) .range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs)
@ -567,48 +608,83 @@ impl<T: BeaconChainTypes> SyncManager<T> {
beacon_block, beacon_block,
seen_timestamp, seen_timestamp,
} => { } => {
self.rpc_block_or_blob_received( self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp);
}
SyncMessage::RpcBlob {
request_id, request_id,
peer_id, peer_id,
beacon_block.into(), blob_sidecar,
seen_timestamp, seen_timestamp,
} => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp),
SyncMessage::UnknownParentBlock(peer_id, block, block_root) => {
let block_slot = block.slot();
let (block, blobs) = block.deconstruct();
let parent_root = block.parent_root();
let parent_components = UnknownParentComponents::new(Some(block), blobs);
self.handle_unknown_parent(
peer_id,
block_root,
parent_root,
block_slot,
Some(parent_components),
); );
} }
SyncMessage::UnknownBlock(peer_id, block, block_root) => { SyncMessage::UnknownParentBlob(peer_id, blob) => {
// If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore let blob_slot = blob.slot;
if !self.network_globals.sync_state.read().is_synced() { let block_root = blob.block_root;
let head_slot = self.chain.canonical_head.cached_head().head_slot(); let parent_root = blob.block_parent_root;
let unknown_block_slot = block.slot(); let blob_index = blob.index;
let mut blobs = FixedBlobSidecarList::default();
// if the block is far in the future, ignore it. If its within the slot tolerance of *blobs.index_mut(blob_index as usize) = Some(blob);
// our current head, regardless of the syncing state, fetch it. self.handle_unknown_parent(
if (head_slot >= unknown_block_slot peer_id,
&& head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE) block_root,
|| (head_slot < unknown_block_slot parent_root,
&& unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE) blob_slot,
{ Some(UnknownParentComponents::new(None, Some(blobs))),
return; );
} }
} SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_hash) => {
if self.network_globals.peers.read().is_connected(&peer_id)
&& self.network.is_execution_engine_online()
{
self.block_lookups
.search_parent(block_root, block, peer_id, &mut self.network);
}
}
SyncMessage::UnknownBlockHash(peer_id, block_hash) => {
// If we are not synced, ignore this block. // If we are not synced, ignore this block.
if self.network_globals.sync_state.read().is_synced() if self.synced_and_connected(&peer_id) {
&& self.network_globals.peers.read().is_connected(&peer_id) self.block_lookups.search_block(
&& self.network.is_execution_engine_online() block_hash,
{ PeerShouldHave::BlockAndBlobs(peer_id),
&mut self.network,
);
}
}
SyncMessage::MissingGossipBlockComponents(slot, peer_id, block_root) => {
// If we are not synced, ignore this block.
if self.synced_and_connected(&peer_id) {
if self.should_delay_lookup(slot) {
self.block_lookups self.block_lookups
.search_block(block_hash, peer_id, &mut self.network); .search_block_delayed(block_root, PeerShouldHave::Neither(peer_id));
if let Err(e) = self
.delayed_lookups
.try_send(DelayedLookupMessage::MissingComponents(block_root))
{
warn!(self.log, "Delayed lookup dropped for block referenced by a blob";
"block_root" => ?block_root, "error" => ?e);
}
} else {
self.block_lookups.search_block(
block_root,
PeerShouldHave::Neither(peer_id),
&mut self.network,
)
} }
} }
SyncMessage::UnknownBlobHash { .. } => { }
unimplemented!() SyncMessage::MissingGossipBlockComponentsDelayed(block_root) => {
if self
.block_lookups
.trigger_lookup_by_root(block_root, &mut self.network)
.is_err()
{
// No request was made for block or blob so the lookup is dropped.
self.block_lookups.remove_lookup_by_root(block_root);
}
} }
SyncMessage::Disconnect(peer_id) => { SyncMessage::Disconnect(peer_id) => {
self.peer_disconnect(&peer_id); self.peer_disconnect(&peer_id);
@ -618,17 +694,17 @@ impl<T: BeaconChainTypes> SyncManager<T> {
request_id, request_id,
error, error,
} => self.inject_error(peer_id, request_id, error), } => self.inject_error(peer_id, request_id, error),
SyncMessage::BlockProcessed { SyncMessage::BlockComponentProcessed {
process_type, process_type,
result, result,
response_type,
} => match process_type { } => match process_type {
BlockProcessType::SingleBlock { id } => { BlockProcessType::SingleBlock { id } => self
self.block_lookups .block_lookups
.single_block_processed(id, result, &mut self.network) .single_block_component_processed(id, result, response_type, &mut self.network),
}
BlockProcessType::ParentLookup { chain_hash } => self BlockProcessType::ParentLookup { chain_hash } => self
.block_lookups .block_lookups
.parent_block_processed(chain_hash, result, &mut self.network), .parent_block_processed(chain_hash, result, response_type, &mut self.network),
}, },
SyncMessage::BatchProcessed { sync_type, result } => match sync_type { SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => { ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => {
@ -659,20 +735,97 @@ impl<T: BeaconChainTypes> SyncManager<T> {
.block_lookups .block_lookups
.parent_chain_processed(chain_hash, result, &mut self.network), .parent_chain_processed(chain_hash, result, &mut self.network),
}, },
SyncMessage::RpcBlob {
request_id,
peer_id,
blob_sidecar,
seen_timestamp,
} => self.rpc_block_or_blob_received(
request_id,
peer_id,
blob_sidecar.into(),
seen_timestamp,
),
} }
} }
fn handle_unknown_parent(
&mut self,
peer_id: PeerId,
block_root: Hash256,
parent_root: Hash256,
slot: Slot,
parent_components: Option<UnknownParentComponents<T::EthSpec>>,
) {
if self.should_search_for_block(slot, &peer_id) {
self.block_lookups.search_parent(
slot,
block_root,
parent_root,
peer_id,
&mut self.network,
);
if self.should_delay_lookup(slot) {
self.block_lookups.search_child_delayed(
block_root,
parent_components,
&[PeerShouldHave::Neither(peer_id)],
);
if let Err(e) = self
.delayed_lookups
.try_send(DelayedLookupMessage::MissingComponents(block_root))
{
warn!(self.log, "Delayed lookups dropped for block"; "block_root" => ?block_root, "error" => ?e);
}
} else {
self.block_lookups.search_child_block(
block_root,
parent_components,
&[PeerShouldHave::Neither(peer_id)],
&mut self.network,
);
}
}
}
fn should_delay_lookup(&mut self, slot: Slot) -> bool {
let earliest_slot = self
.chain
.slot_clock
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY);
let latest_slot = self
.chain
.slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY);
if let (Some(earliest_slot), Some(latest_slot)) = (earliest_slot, latest_slot) {
let msg_for_current_slot = slot >= earliest_slot && slot <= latest_slot;
let delay_threshold_unmet = self
.chain
.slot_clock
.seconds_from_current_slot_start()
.map_or(false, |secs_into_slot| {
secs_into_slot < self.chain.slot_clock.single_lookup_delay()
});
msg_for_current_slot && delay_threshold_unmet
} else {
false
}
}
fn should_search_for_block(&mut self, block_slot: Slot, peer_id: &PeerId) -> bool {
if !self.network_globals.sync_state.read().is_synced() {
let head_slot = self.chain.canonical_head.cached_head().head_slot();
// if the block is far in the future, ignore it. If its within the slot tolerance of
// our current head, regardless of the syncing state, fetch it.
if (head_slot >= block_slot
&& head_slot.sub(block_slot).as_usize() > SLOT_IMPORT_TOLERANCE)
|| (head_slot < block_slot
&& block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE)
{
return false;
}
}
self.network_globals.peers.read().is_connected(peer_id)
&& self.network.is_execution_engine_online()
}
fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool {
self.network_globals.sync_state.read().is_synced()
&& self.network_globals.peers.read().is_connected(peer_id)
&& self.network.is_execution_engine_online()
}
fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) { fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) {
self.network.update_execution_engine_state(engine_state); self.network.update_execution_engine_state(engine_state);
@ -728,50 +881,30 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
} }
fn rpc_block_or_blob_received( fn rpc_block_received(
&mut self, &mut self,
request_id: RequestId, request_id: RequestId,
peer_id: PeerId, peer_id: PeerId,
block_or_blob: BlockOrBlob<T::EthSpec>, block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
seen_timestamp: Duration, seen_timestamp: Duration,
) { ) {
match request_id { match request_id {
RequestId::SingleBlock { id } => { RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response(
// TODO(diva) adjust when dealing with by root requests. This code is here to
// satisfy dead code analysis
match block_or_blob {
BlockOrBlob::Block(maybe_block) => {
self.block_lookups.single_block_lookup_response(
id, id,
peer_id, peer_id,
maybe_block.map(BlockWrapper::Block), block,
seen_timestamp, seen_timestamp,
&mut self.network, &mut self.network,
) ),
} RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response(
BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."), id,
} peer_id,
} block,
RequestId::ParentLookup { id } => {
// TODO(diva) adjust when dealing with by root requests. This code is here to
// satisfy dead code analysis
match block_or_blob {
BlockOrBlob::Block(maybe_block) => self.block_lookups.parent_lookup_response(
id,
peer_id,
maybe_block.map(BlockWrapper::Block),
seen_timestamp, seen_timestamp,
&mut self.network, &mut self.network,
), ),
BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."),
}
}
RequestId::BackFillBlocks { id } => { RequestId::BackFillBlocks { id } => {
let maybe_block = match block_or_blob { let is_stream_terminator = block.is_none();
BlockOrBlob::Block(maybe_block) => maybe_block,
BlockOrBlob::Sidecar(_) => todo!("I think this is unreachable"),
};
let is_stream_terminator = maybe_block.is_none();
if let Some(batch_id) = self if let Some(batch_id) = self
.network .network
.backfill_sync_only_blocks_response(id, is_stream_terminator) .backfill_sync_only_blocks_response(id, is_stream_terminator)
@ -781,7 +914,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
batch_id, batch_id,
&peer_id, &peer_id,
id, id,
maybe_block.map(|block| block.into()), block.map(BlockWrapper::Block),
) { ) {
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(), Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
Ok(ProcessResult::Successful) => {} Ok(ProcessResult::Successful) => {}
@ -794,14 +927,10 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
} }
RequestId::RangeBlocks { id } => { RequestId::RangeBlocks { id } => {
let maybe_block = match block_or_blob { let is_stream_terminator = block.is_none();
BlockOrBlob::Block(maybe_block) => maybe_block,
BlockOrBlob::Sidecar(_) => todo!("I think this should be unreachable, since this is a range only-blocks request, and the network should not accept this chunk at all. Needs better handling"),
};
let is_stream_terminator = maybe_block.is_none();
if let Some((chain_id, batch_id)) = self if let Some((chain_id, batch_id)) = self
.network .network
.range_sync_block_response(id, is_stream_terminator) .range_sync_block_only_response(id, is_stream_terminator)
{ {
self.range_sync.blocks_by_range_response( self.range_sync.blocks_by_range_response(
&mut self.network, &mut self.network,
@ -809,17 +938,53 @@ impl<T: BeaconChainTypes> SyncManager<T> {
chain_id, chain_id,
batch_id, batch_id,
id, id,
maybe_block.map(|block| block.into()), block.map(BlockWrapper::Block),
); );
self.update_sync_state(); self.update_sync_state();
} }
} }
RequestId::BackFillBlockAndBlobs { id } => {
RequestId::BackFillBlobs { id } => { self.backfill_block_and_blobs_response(id, peer_id, block.into())
self.backfill_block_and_blobs_response(id, peer_id, block_or_blob)
} }
RequestId::RangeBlobs { id } => { RequestId::RangeBlockAndBlobs { id } => {
self.range_block_and_blobs_response(id, peer_id, block_or_blob) self.range_block_and_blobs_response(id, peer_id, block.into())
}
}
}
fn rpc_blob_received(
&mut self,
request_id: RequestId,
peer_id: PeerId,
blob: Option<Arc<BlobSidecar<T::EthSpec>>>,
seen_timestamp: Duration,
) {
match request_id {
RequestId::SingleBlock { id } => self.block_lookups.single_blob_lookup_response(
id,
peer_id,
blob,
seen_timestamp,
&mut self.network,
),
RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_blob_response(
id,
peer_id,
blob,
seen_timestamp,
&mut self.network,
),
RequestId::BackFillBlocks { id: _ } => {
crit!(self.log, "Blob received during backfill block request"; "peer_id" => %peer_id );
}
RequestId::RangeBlocks { id: _ } => {
crit!(self.log, "Blob received during range block request"; "peer_id" => %peer_id );
}
RequestId::BackFillBlockAndBlobs { id } => {
self.backfill_block_and_blobs_response(id, peer_id, blob.into())
}
RequestId::RangeBlockAndBlobs { id } => {
self.range_block_and_blobs_response(id, peer_id, blob.into())
} }
} }
} }
@ -863,7 +1028,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
); );
// TODO: penalize the peer for being a bad boy // TODO: penalize the peer for being a bad boy
let id = RequestId::RangeBlobs { id }; let id = RequestId::RangeBlockAndBlobs { id };
self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) self.inject_error(peer_id, id, RPCError::InvalidData(e.into()))
} }
} }
@ -915,7 +1080,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
); );
// TODO: penalize the peer for being a bad boy // TODO: penalize the peer for being a bad boy
let id = RequestId::BackFillBlobs { id }; let id = RequestId::BackFillBlockAndBlobs { id };
self.inject_error(peer_id, id, RPCError::InvalidData(e.into())) self.inject_error(peer_id, id, RPCError::InvalidData(e.into()))
} }
} }
@ -923,17 +1088,19 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
} }
impl<IgnoredOkVal, T: EthSpec> From<Result<IgnoredOkVal, BlockError<T>>> for BlockProcessResult<T> { impl<T: EthSpec> From<Result<AvailabilityProcessingStatus, BlockError<T>>>
fn from(result: Result<IgnoredOkVal, BlockError<T>>) -> Self { for BlockProcessingResult<T>
{
fn from(result: Result<AvailabilityProcessingStatus, BlockError<T>>) -> Self {
match result { match result {
Ok(_) => BlockProcessResult::Ok, Ok(status) => BlockProcessingResult::Ok(status),
Err(e) => e.into(), Err(e) => BlockProcessingResult::Err(e),
} }
} }
} }
impl<T: EthSpec> From<BlockError<T>> for BlockProcessResult<T> { impl<T: EthSpec> From<BlockError<T>> for BlockProcessingResult<T> {
fn from(e: BlockError<T>) -> Self { fn from(e: BlockError<T>) -> Self {
BlockProcessResult::Err(e) BlockProcessingResult::Err(e)
} }
} }

View File

@ -9,5 +9,6 @@ mod network_context;
mod peer_sync_info; mod peer_sync_info;
mod range_sync; mod range_sync;
pub use block_lookups::UnknownParentComponents;
pub use manager::{BatchProcessResult, SyncMessage}; pub use manager::{BatchProcessResult, SyncMessage};
pub use range_sync::{BatchOperationOutcome, ChainId}; pub use range_sync::{BatchOperationOutcome, ChainId};

View File

@ -7,11 +7,11 @@ use super::range_sync::{BatchId, ByRangeRequestType, ChainId};
use crate::beacon_processor::WorkEvent; use crate::beacon_processor::WorkEvent;
use crate::service::{NetworkMessage, RequestId}; use crate::service::{NetworkMessage, RequestId};
use crate::status::ToStatusMessage; use crate::status::ToStatusMessage;
use crate::sync::block_lookups::ForceBlockRequest; use crate::sync::block_lookups::{BlobRequestId, BlockRequestId};
use beacon_chain::blob_verification::BlockWrapper; use beacon_chain::blob_verification::BlockWrapper;
use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState}; use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState};
use fnv::FnvHashMap; use fnv::FnvHashMap;
use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason}; use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason};
use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request}; use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request};
use slog::{debug, trace, warn}; use slog::{debug, trace, warn};
@ -62,7 +62,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// Channel to send work to the beacon processor. /// Channel to send work to the beacon processor.
beacon_processor_send: mpsc::Sender<WorkEvent<T>>, beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
chain: Arc<BeaconChain<T>>, pub chain: Arc<BeaconChain<T>>,
/// Logger for the `SyncNetworkContext`. /// Logger for the `SyncNetworkContext`.
log: slog::Logger, log: slog::Logger,
@ -71,7 +71,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// Small enumeration to make dealing with block and blob requests easier. /// Small enumeration to make dealing with block and blob requests easier.
pub enum BlockOrBlob<T: EthSpec> { pub enum BlockOrBlob<T: EthSpec> {
Block(Option<Arc<SignedBeaconBlock<T>>>), Block(Option<Arc<SignedBeaconBlock<T>>>),
Sidecar(Option<Arc<BlobSidecar<T>>>), Blob(Option<Arc<BlobSidecar<T>>>),
} }
impl<T: EthSpec> From<Option<Arc<SignedBeaconBlock<T>>>> for BlockOrBlob<T> { impl<T: EthSpec> From<Option<Arc<SignedBeaconBlock<T>>>> for BlockOrBlob<T> {
@ -82,7 +82,7 @@ impl<T: EthSpec> From<Option<Arc<SignedBeaconBlock<T>>>> for BlockOrBlob<T> {
impl<T: EthSpec> From<Option<Arc<BlobSidecar<T>>>> for BlockOrBlob<T> { impl<T: EthSpec> From<Option<Arc<BlobSidecar<T>>>> for BlockOrBlob<T> {
fn from(blob: Option<Arc<BlobSidecar<T>>>) -> Self { fn from(blob: Option<Arc<BlobSidecar<T>>>) -> Self {
BlockOrBlob::Sidecar(blob) BlockOrBlob::Blob(blob)
} }
} }
@ -187,7 +187,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
// create the shared request id. This is fine since the rpc handles substream ids. // create the shared request id. This is fine since the rpc handles substream ids.
let id = self.next_id(); let id = self.next_id();
let request_id = RequestId::Sync(SyncRequestId::RangeBlobs { id }); let request_id = RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id });
// Create the blob request based on the blob request. // Create the blob request based on the blob request.
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
@ -260,7 +260,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
// create the shared request id. This is fine since the rpc handles substream ids. // create the shared request id. This is fine since the rpc handles substream ids.
let id = self.next_id(); let id = self.next_id();
let request_id = RequestId::Sync(SyncRequestId::BackFillBlobs { id }); let request_id = RequestId::Sync(SyncRequestId::BackFillBlockAndBlobs { id });
// Create the blob request based on the blob request. // Create the blob request based on the blob request.
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest { let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
@ -289,7 +289,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
} }
/// Response for a request that is only for blocks. /// Response for a request that is only for blocks.
pub fn range_sync_block_response( pub fn range_sync_block_only_response(
&mut self, &mut self,
request_id: Id, request_id: Id,
is_stream_terminator: bool, is_stream_terminator: bool,
@ -313,7 +313,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
let info = &mut req.block_blob_info; let info = &mut req.block_blob_info;
match block_or_blob { match block_or_blob {
BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
} }
if info.is_finished() { if info.is_finished() {
// If the request is finished, dequeue everything // If the request is finished, dequeue everything
@ -390,7 +390,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
let (_, info) = entry.get_mut(); let (_, info) = entry.get_mut();
match block_or_blob { match block_or_blob {
BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block), BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
} }
if info.is_finished() { if info.is_finished() {
// If the request is finished, dequeue everything // If the request is finished, dequeue everything
@ -409,26 +409,15 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
} }
} }
/// Sends a blocks by root request for a single block lookup. /// Sends a blocks by root request for a parent request.
pub fn single_block_lookup_request( pub fn single_block_lookup_request(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request: BlocksByRootRequest, request: BlocksByRootRequest,
) -> Result<Id, &'static str> { ) -> Result<Id, &'static str> {
let request = if self let id = self.next_id();
.chain let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
.is_data_availability_check_required()
.map_err(|_| "Unable to read slot clock")?
{
trace!(
self.log,
"Sending BlobsByRoot Request";
"method" => "BlobsByRoot",
"count" => request.block_roots.len(),
"peer" => %peer_id
);
unimplemented!("There is no longer such thing as a single block lookup, since we nede to ask for blobs and blocks separetely");
} else {
trace!( trace!(
self.log, self.log,
"Sending BlocksByRoot Request"; "Sending BlocksByRoot Request";
@ -436,56 +425,85 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
"count" => request.block_roots.len(), "count" => request.block_roots.len(),
"peer" => %peer_id "peer" => %peer_id
); );
Request::BlocksByRoot(request)
};
let id = self.next_id();
let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
self.send_network_msg(NetworkMessage::SendRequest { self.send_network_msg(NetworkMessage::SendRequest {
peer_id, peer_id,
request, request: Request::BlocksByRoot(request),
request_id,
})?;
Ok(id)
}
/// Sends a blobs by root request for a parent request.
pub fn single_blobs_lookup_request(
&mut self,
peer_id: PeerId,
request: BlobsByRootRequest,
) -> Result<Id, &'static str> {
let id = self.next_id();
let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
trace!(
self.log,
"Sending BlobsByRoot Request";
"method" => "BlobsByRoot",
"count" => request.blob_ids.len(),
"peer" => %peer_id
);
self.send_network_msg(NetworkMessage::SendRequest {
peer_id,
request: Request::BlobsByRoot(request),
request_id, request_id,
})?; })?;
Ok(id) Ok(id)
} }
/// Sends a blocks by root request for a parent request. /// Sends a blocks by root request for a parent request.
pub fn parent_lookup_request( pub fn parent_lookup_block_request(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
request: BlocksByRootRequest, request: BlocksByRootRequest,
force_block_request: ForceBlockRequest, ) -> Result<BlockRequestId, &'static str> {
) -> Result<Id, &'static str> { let id = self.next_id();
let request = if self let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
.chain
.is_data_availability_check_required()
.map_err(|_| "Unable to read slot clock")?
&& matches!(force_block_request, ForceBlockRequest::False)
{
trace!( trace!(
self.log, self.log,
"Sending BlobsByRoot Request"; "Sending parent BlocksByRoot Request";
"method" => "BlobsByRoot",
"count" => request.block_roots.len(),
"peer" => %peer_id
);
unimplemented!(
"Parent requests now need to interleave blocks and blobs or something like that."
)
} else {
trace!(
self.log,
"Sending BlocksByRoot Request";
"method" => "BlocksByRoot", "method" => "BlocksByRoot",
"count" => request.block_roots.len(), "count" => request.block_roots.len(),
"peer" => %peer_id "peer" => %peer_id
); );
Request::BlocksByRoot(request)
};
let id = self.next_id();
let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
self.send_network_msg(NetworkMessage::SendRequest { self.send_network_msg(NetworkMessage::SendRequest {
peer_id, peer_id,
request, request: Request::BlocksByRoot(request),
request_id,
})?;
Ok(id)
}
/// Sends a blocks by root request for a parent request.
pub fn parent_lookup_blobs_request(
&mut self,
peer_id: PeerId,
request: BlobsByRootRequest,
) -> Result<BlobRequestId, &'static str> {
let id = self.next_id();
let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
trace!(
self.log,
"Sending parent BlobsByRoot Request";
"method" => "BlobsByRoot",
"count" => request.blob_ids.len(),
"peer" => %peer_id
);
self.send_network_msg(NetworkMessage::SendRequest {
peer_id,
request: Request::BlobsByRoot(request),
request_id, request_id,
})?; })?;
Ok(id) Ok(id)

View File

@ -685,7 +685,7 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, peer1, head_info); range.add_peer(&mut rig.cx, local_info, peer1, head_info);
let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_block_response(id, true).unwrap(), id) (rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
} }
other => panic!("unexpected request {:?}", other), other => panic!("unexpected request {:?}", other),
}; };
@ -704,7 +704,7 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_block_response(id, true).unwrap(), id) (rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
} }
other => panic!("unexpected request {:?}", other), other => panic!("unexpected request {:?}", other),
}; };

View File

@ -1952,7 +1952,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
&& last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune && last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune
> end_epoch.as_u64() > end_epoch.as_u64()
{ {
info!(self.log, "Blobs sidecars are pruned"); debug!(self.log, "Blobs sidecars are pruned");
return Ok(()); return Ok(());
} }

View File

@ -137,4 +137,13 @@ pub trait SlotClock: Send + Sync + Sized + Clone {
slot_clock.set_current_time(freeze_at); slot_clock.set_current_time(freeze_at);
slot_clock slot_clock
} }
/// Returns the delay between the start of the slot and when a request for block components
/// missed over gossip in the current slot should be made via RPC.
///
/// Currently set equal to 1/2 of the `unagg_attestation_production_delay`, but this may be
/// changed in the future.
fn single_lookup_delay(&self) -> Duration {
self.unagg_attestation_production_delay() / 2
}
} }

View File

@ -352,7 +352,7 @@ where
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Self, Error<T::Error>> { ) -> Result<Self, Error<T::Error>> {
// Sanity check: the anchor must lie on an epoch boundary. // Sanity check: the anchor must lie on an epoch boundary.
if anchor_block.slot() % E::slots_per_epoch() != 0 { if anchor_state.slot() % E::slots_per_epoch() != 0 {
return Err(Error::InvalidAnchor { return Err(Error::InvalidAnchor {
block_slot: anchor_block.slot(), block_slot: anchor_block.slot(),
state_slot: anchor_state.slot(), state_slot: anchor_state.slot(),
@ -388,6 +388,7 @@ where
let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot()); let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot());
let proto_array = ProtoArrayForkChoice::new::<E>( let proto_array = ProtoArrayForkChoice::new::<E>(
current_slot,
finalized_block_slot, finalized_block_slot,
finalized_block_state_root, finalized_block_state_root,
*fc_store.justified_checkpoint(), *fc_store.justified_checkpoint(),

View File

@ -80,6 +80,7 @@ impl ForkChoiceTestDefinition {
let junk_shuffling_id = let junk_shuffling_id =
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero()); AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>( let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>(
self.finalized_block_slot,
self.finalized_block_slot, self.finalized_block_slot,
Hash256::zero(), Hash256::zero(),
self.justified_checkpoint, self.justified_checkpoint,

View File

@ -345,6 +345,7 @@ pub struct ProtoArrayForkChoice {
impl ProtoArrayForkChoice { impl ProtoArrayForkChoice {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new<E: EthSpec>( pub fn new<E: EthSpec>(
current_slot: Slot,
finalized_block_slot: Slot, finalized_block_slot: Slot,
finalized_block_state_root: Hash256, finalized_block_state_root: Hash256,
justified_checkpoint: Checkpoint, justified_checkpoint: Checkpoint,
@ -380,7 +381,7 @@ impl ProtoArrayForkChoice {
}; };
proto_array proto_array
.on_block::<E>(block, finalized_block_slot) .on_block::<E>(block, current_slot)
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
Ok(Self { Ok(Self {
@ -983,6 +984,7 @@ mod test_compute_deltas {
}; };
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
genesis_slot,
genesis_slot, genesis_slot,
state_root, state_root,
genesis_checkpoint, genesis_checkpoint,
@ -1108,6 +1110,7 @@ mod test_compute_deltas {
}; };
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>( let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
genesis_slot,
genesis_slot, genesis_slot,
junk_state_root, junk_state_root,
genesis_checkpoint, genesis_checkpoint,

View File

@ -23,7 +23,7 @@ impl From<ArithError> for Error {
/// ///
/// If the root of the supplied `state` is known, then it can be passed as `state_root`. If /// If the root of the supplied `state` is known, then it can be passed as `state_root`. If
/// `state_root` is `None`, the root of `state` will be computed using a cached tree hash. /// `state_root` is `None`, the root of `state` will be computed using a cached tree hash.
/// Providing the `state_root` makes this function several orders of magniude faster. /// Providing the `state_root` makes this function several orders of magnitude faster.
pub fn per_slot_processing<T: EthSpec>( pub fn per_slot_processing<T: EthSpec>(
state: &mut BeaconState<T>, state: &mut BeaconState<T>,
state_root: Option<Hash256>, state_root: Option<Hash256>,

View File

@ -6,13 +6,15 @@ use kzg::{KzgCommitment, KzgProof};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz::Encode; use ssz::Encode;
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use ssz_types::VariableList; use ssz_types::{FixedVector, VariableList};
use std::sync::Arc; use std::sync::Arc;
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::TreeHash; use tree_hash_derive::TreeHash;
/// Container of the data that identifies an individual blob. /// Container of the data that identifies an individual blob.
#[derive(Serialize, Deserialize, Encode, Decode, TreeHash, Clone, Debug, PartialEq, Eq, Hash)] #[derive(
Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash,
)]
pub struct BlobIdentifier { pub struct BlobIdentifier {
pub block_root: Hash256, pub block_root: Hash256,
pub index: u64, pub index: u64,
@ -73,6 +75,8 @@ impl<T: EthSpec> Ord for BlobSidecar<T> {
} }
pub type BlobSidecarList<T> = VariableList<Arc<BlobSidecar<T>>, <T as EthSpec>::MaxBlobsPerBlock>; pub type BlobSidecarList<T> = VariableList<Arc<BlobSidecar<T>>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type FixedBlobSidecarList<T> =
FixedVector<Option<Arc<BlobSidecar<T>>>, <T as EthSpec>::MaxBlobsPerBlock>;
pub type Blobs<T> = VariableList<Blob<T>, <T as EthSpec>::MaxBlobsPerBlock>; pub type Blobs<T> = VariableList<Blob<T>, <T as EthSpec>::MaxBlobsPerBlock>;
impl<T: EthSpec> SignedRoot for BlobSidecar<T> {} impl<T: EthSpec> SignedRoot for BlobSidecar<T> {}

View File

@ -105,7 +105,7 @@ pub trait EthSpec:
/* /*
* New in Deneb * New in Deneb
*/ */
type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin;
type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq;
type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq;
/* /*
@ -255,6 +255,11 @@ pub trait EthSpec:
fn max_blobs_per_block() -> usize { fn max_blobs_per_block() -> usize {
Self::MaxBlobsPerBlock::to_usize() Self::MaxBlobsPerBlock::to_usize()
} }
/// Returns the `BYTES_PER_BLOB` constant for this specification.
fn bytes_per_blob() -> usize {
Self::BytesPerBlob::to_usize()
}
} }
/// Macro to inherit some type values from another EthSpec. /// Macro to inherit some type values from another EthSpec.

View File

@ -1,3 +1,4 @@
use crate::blob_sidecar::BlobIdentifier;
use crate::*; use crate::*;
use bls::Signature; use bls::Signature;
use derivative::Derivative; use derivative::Derivative;
@ -248,6 +249,38 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
pub fn canonical_root(&self) -> Hash256 { pub fn canonical_root(&self) -> Hash256 {
self.message().tree_hash_root() self.message().tree_hash_root()
} }
pub fn num_expected_blobs(&self) -> usize {
self.message()
.body()
.blob_kzg_commitments()
.map(|c| c.len())
.unwrap_or(0)
}
pub fn get_expected_blob_ids(&self, block_root: Option<Hash256>) -> Vec<BlobIdentifier> {
self.get_filtered_blob_ids(block_root, |_, _| true)
}
/// If the filter returns `true` the id for the corresponding index and root will be included.
pub fn get_filtered_blob_ids(
&self,
block_root: Option<Hash256>,
filter: impl Fn(usize, Hash256) -> bool,
) -> Vec<BlobIdentifier> {
let block_root = block_root.unwrap_or_else(|| self.canonical_root());
let num_blobs_expected = self.num_expected_blobs();
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
for i in 0..num_blobs_expected {
if filter(i, block_root) {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
});
}
}
blob_ids
}
} }
// We can convert pre-Bellatrix blocks without payloads into blocks with payloads. // We can convert pre-Bellatrix blocks without payloads into blocks with payloads.

View File

@ -4,8 +4,21 @@ use smallvec::smallvec;
impl<N: Unsigned + Clone> TestRandom for BitList<N> { impl<N: Unsigned + Clone> TestRandom for BitList<N> {
fn random_for_test(rng: &mut impl RngCore) -> Self { fn random_for_test(rng: &mut impl RngCore) -> Self {
let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8);
let mut raw_bytes = smallvec![0; initial_len];
rng.fill_bytes(&mut raw_bytes); rng.fill_bytes(&mut raw_bytes);
let non_zero_bytes = raw_bytes
.iter()
.enumerate()
.rev()
.find_map(|(i, byte)| (*byte > 0).then_some(i + 1))
.unwrap_or(0);
if non_zero_bytes < initial_len {
raw_bytes.truncate(non_zero_bytes);
}
Self::from_bytes(raw_bytes).expect("we generate a valid BitList") Self::from_bytes(raw_bytes).expect("we generate a valid BitList")
} }
} }