add processing and processed caching to the DA checker (#4732)

* add processing and processed caching to the DA checker

* move processing cache out of critical cache

* get it compiling

* fix lints

* add docs to `AvailabilityView`

* some self review

* fix lints

* fix beacon chain tests

* cargo fmt

* make availability view easier to implement, start on testing

* move child component cache and finish test

* cargo fix

* cargo fix

* cargo fix

* fmt and lint

* make blob commitments not optional, rename some caches, add missing blobs struct

* Update beacon_node/beacon_chain/src/data_availability_checker/processing_cache.rs

Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>

* marks review feedback and other general cleanup

* cargo fix

* improve availability view docs

* some renames

* some renames and docs

* fix should delay lookup logic

* get rid of some wrapper methods

* fix up single lookup changes

* add a couple docs

* add single blob merge method and improve process_... docs

* update some names

* lints

* fix merge

* remove blob indices from lookup creation log

* remove blob indices from lookup creation log

* delayed lookup logging improvement

* check fork choice before doing any blob processing

* remove unused dep

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/beacon_chain/src/data_availability_checker/availability_view.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* Update beacon_node/network/src/sync/block_lookups/delayed_lookup.rs

Co-authored-by: Michael Sproul <micsproul@gmail.com>

* remove duplicate deps

* use gen range in random blobs geneartor

* rename processing cache fields

* require block root in rpc block construction and check block root consistency

* send peers as vec in single message

* spawn delayed lookup service from network beacon processor

* fix tests

---------

Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
Co-authored-by: Michael Sproul <micsproul@gmail.com>
This commit is contained in:
realbigsean 2023-10-03 09:59:33 -04:00 committed by GitHub
parent 67aeb6bf6b
commit c7ddf1f0b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 1894 additions and 1190 deletions

68
Cargo.lock generated
View File

@ -197,30 +197,29 @@ dependencies = [
[[package]]
name = "anstream"
version = "0.3.2"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is-terminal",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.1"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd"
checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]]
name = "anstyle-parse"
version = "0.2.1"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333"
checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140"
dependencies = [
"utf8parse",
]
@ -236,9 +235,9 @@ dependencies = [
[[package]]
name = "anstyle-wincon"
version = "1.0.2"
version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c"
checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
dependencies = [
"anstyle",
"windows-sys 0.48.0",
@ -572,7 +571,7 @@ name = "beacon-api-client"
version = "0.1.0"
source = "git+https://github.com/ralexstokes/beacon-api-client?rev=7f28993615fde52d563dd601a0511c34fe9b7c38#7f28993615fde52d563dd601a0511c34fe9b7c38"
dependencies = [
"clap 4.3.21",
"clap 4.4.6",
"ethereum-consensus",
"http",
"itertools",
@ -1161,20 +1160,19 @@ dependencies = [
[[package]]
name = "clap"
version = "4.3.21"
version = "4.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd"
checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956"
dependencies = [
"clap_builder",
"clap_derive",
"once_cell",
]
[[package]]
name = "clap_builder"
version = "4.3.21"
version = "4.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa"
checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45"
dependencies = [
"anstream",
"anstyle",
@ -1184,9 +1182,9 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.3.12"
version = "4.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050"
checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873"
dependencies = [
"heck",
"proc-macro2",
@ -1196,9 +1194,9 @@ dependencies = [
[[package]]
name = "clap_lex"
version = "0.5.0"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961"
[[package]]
name = "clap_utils"
@ -3472,6 +3470,15 @@ dependencies = [
"hmac 0.8.1",
]
[[package]]
name = "home"
version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb"
dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "hostname"
version = "0.3.1"
@ -3905,17 +3912,6 @@ version = "2.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6"
[[package]]
name = "is-terminal"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
dependencies = [
"hermit-abi 0.3.2",
"rustix 0.38.13",
"windows-sys 0.48.0",
]
[[package]]
name = "itertools"
version = "0.10.5"
@ -5351,6 +5347,7 @@ dependencies = [
"lighthouse_metrics",
"lighthouse_network",
"logging",
"lru 0.7.8",
"lru_cache",
"matches",
"num_cpus",
@ -6101,9 +6098,9 @@ dependencies = [
[[package]]
name = "prettyplease"
version = "0.2.12"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62"
checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d"
dependencies = [
"proc-macro2",
"syn 2.0.37",
@ -9075,13 +9072,14 @@ checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc"
[[package]]
name = "which"
version = "4.4.0"
version = "4.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269"
checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7"
dependencies = [
"either",
"libc",
"home",
"once_cell",
"rustix 0.38.13",
]
[[package]]

View File

@ -11,7 +11,7 @@ use crate::blob_verification::{self, GossipBlobError, GossipVerifiedBlob};
use crate::block_times_cache::BlockTimesCache;
use crate::block_verification::POS_PANDA_BANNER;
use crate::block_verification::{
check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy, get_block_root,
check_block_is_finalized_checkpoint_or_descendant, check_block_relevancy,
signature_verify_chain_segment, BlockError, ExecutionPendingBlock, GossipVerifiedBlock,
IntoExecutionPendingBlock,
};
@ -477,8 +477,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
pub validator_monitor: RwLock<ValidatorMonitor<T::EthSpec>>,
/// The slot at which blocks are downloaded back to.
pub genesis_backfill_slot: Slot,
// Provides a KZG verification and temporary storage for blocks and blobs as
// they are collected and combined.
/// Provides a KZG verification and temporary storage for blocks and blobs as
/// they are collected and combined.
pub data_availability_checker: Arc<DataAvailabilityChecker<T>>,
/// The KZG trusted setup used by this chain.
pub kzg: Option<Arc<Kzg<<T::EthSpec as EthSpec>::Kzg>>>,
@ -2552,7 +2552,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
});
}
let block_root = get_block_root(block.as_block());
let block_root = block.block_root();
if let Some((child_parent_root, child_slot)) = children.get(i) {
// If this block has a child in this chain segment, ensure that its parent root matches
@ -2791,11 +2791,97 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.map_err(BeaconChainError::TokioJoin)?
}
pub async fn process_blob(
/// Cache the blob in the processing cache, process it, then evict it from the cache if it was
/// imported or errors.
pub async fn process_gossip_blob(
self: &Arc<Self>,
blob: GossipVerifiedBlob<T>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
self.check_gossip_blob_availability_and_import(blob).await
let block_root = blob.block_root();
// If this block has already been imported to forkchoice it must have been available, so
// we don't need to process its blobs again.
if self
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
{
return Err(BlockError::BlockIsAlreadyKnown);
}
self.data_availability_checker
.notify_gossip_blob(blob.as_blob().slot, block_root, &blob);
let r = self.check_gossip_blob_availability_and_import(blob).await;
self.remove_notified(&block_root, r)
}
/// Cache the blobs in the processing cache, process it, then evict it from the cache if it was
/// imported or errors.
pub async fn process_rpc_blobs(
self: &Arc<Self>,
slot: Slot,
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
// If this block has already been imported to forkchoice it must have been available, so
// we don't need to process its blobs again.
if self
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
{
return Err(BlockError::BlockIsAlreadyKnown);
}
self.data_availability_checker
.notify_rpc_blobs(slot, block_root, &blobs);
let r = self
.check_rpc_blob_availability_and_import(slot, block_root, blobs)
.await;
self.remove_notified(&block_root, r)
}
/// Remove any block components from the *processing cache* if we no longer require them. If the
/// block was imported full or erred, we no longer require them.
fn remove_notified(
&self,
block_root: &Hash256,
r: Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
let has_missing_components =
matches!(r, Ok(AvailabilityProcessingStatus::MissingComponents(_, _)));
if !has_missing_components {
self.data_availability_checker.remove_notified(block_root);
}
r
}
/// Wraps `process_block` in logic to cache the block's commitments in the processing cache
/// and evict if the block was imported or erred.
pub async fn process_block_with_early_caching<B: IntoExecutionPendingBlock<T>>(
self: &Arc<Self>,
block_root: Hash256,
unverified_block: B,
notify_execution_layer: NotifyExecutionLayer,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
if let Ok(commitments) = unverified_block
.block()
.message()
.body()
.blob_kzg_commitments()
{
self.data_availability_checker.notify_block_commitments(
unverified_block.block().slot(),
block_root,
commitments.clone(),
);
};
let r = self
.process_block(block_root, unverified_block, notify_execution_layer, || {
Ok(())
})
.await;
self.remove_notified(&block_root, r)
}
/// Returns `Ok(block_root)` if the given `unverified_block` was successfully verified and
@ -2961,7 +3047,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Checks if the block is available, and imports immediately if so, otherwise caches the block
/// in the data availability checker.
pub async fn check_block_availability_and_import(
async fn check_block_availability_and_import(
self: &Arc<Self>,
block: AvailabilityPendingExecutedBlock<T::EthSpec>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
@ -2974,7 +3060,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Checks if the provided blob can make any cached blocks available, and imports immediately
/// if so, otherwise caches the blob in the data availability checker.
pub async fn check_gossip_blob_availability_and_import(
async fn check_gossip_blob_availability_and_import(
self: &Arc<Self>,
blob: GossipVerifiedBlob<T>,
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
@ -2986,7 +3072,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Checks if the provided blobs can make any cached blocks available, and imports immediately
/// if so, otherwise caches the blob in the data availability checker.
pub async fn check_rpc_blob_availability_and_import(
async fn check_rpc_blob_availability_and_import(
self: &Arc<Self>,
slot: Slot,
block_root: Hash256,
@ -3238,7 +3324,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// If the write fails, revert fork choice to the version from disk, else we can
// end up with blocks in fork choice that are missing from disk.
// See https://github.com/sigp/lighthouse/issues/2028
let (signed_block, blobs) = signed_block.deconstruct();
let (_, signed_block, blobs) = signed_block.deconstruct();
let block = signed_block.message();
ops.extend(
confirmed_state_roots
@ -5250,7 +5336,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
return Ok(());
}
// Fetch payoad attributes from the execution layer's cache, or compute them from scratch
// Fetch payload attributes from the execution layer's cache, or compute them from scratch
// if no matching entry is found. This saves recomputing the withdrawals which can take
// considerable time to compute if a state load is required.
let head_root = forkchoice_update_params.head_root;

View File

@ -9,11 +9,12 @@ use crate::beacon_chain::{
use crate::block_verification::cheap_state_advance_to_obtain_committees;
use crate::data_availability_checker::AvailabilityCheckError;
use crate::kzg_utils::{validate_blob, validate_blobs};
use crate::BeaconChainError;
use crate::{metrics, BeaconChainError};
use kzg::Kzg;
use slog::{debug, warn};
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
use tree_hash::TreeHash;
use types::blob_sidecar::BlobIdentifier;
use types::{
BeaconStateError, BlobSidecar, BlobSidecarList, CloneConfig, EthSpec, Hash256,
@ -172,6 +173,9 @@ impl<T: BeaconChainTypes> GossipVerifiedBlob<T> {
pub fn to_blob(self) -> Arc<BlobSidecar<T::EthSpec>> {
self.blob.message
}
pub fn as_blob(&self) -> &BlobSidecar<T::EthSpec> {
&self.blob.message
}
pub fn signed_blob(&self) -> SignedBlobSidecar<T::EthSpec> {
self.blob.clone()
}
@ -203,6 +207,8 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
});
}
let blob_root = get_blob_root(&signed_blob_sidecar);
// Verify that the sidecar is not from a future slot.
let latest_permissible_slot = chain
.slot_clock
@ -393,7 +399,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
.ok_or_else(|| GossipBlobError::UnknownValidator(proposer_index as u64))?;
signed_blob_sidecar.verify_signature(
None,
Some(blob_root),
pubkey,
&fork,
chain.genesis_validators_root,
@ -473,6 +479,15 @@ impl<T: EthSpec> KzgVerifiedBlob<T> {
}
}
#[cfg(test)]
impl<T: EthSpec> KzgVerifiedBlob<T> {
pub fn new(blob: BlobSidecar<T>) -> Self {
Self {
blob: Arc::new(blob),
}
}
}
/// Complete kzg verification for a `GossipVerifiedBlob`.
///
/// Returns an error if the kzg verification check fails.
@ -518,3 +533,16 @@ pub fn verify_kzg_for_blob_list<T: EthSpec>(
Err(AvailabilityCheckError::KzgVerificationFailed)
}
}
/// Returns the canonical root of the given `blob`.
///
/// Use this function to ensure that we report the blob hashing time Prometheus metric.
pub fn get_blob_root<E: EthSpec>(blob: &SignedBlobSidecar<E>) -> Hash256 {
let blob_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOB_ROOT);
let blob_root = blob.message.tree_hash_root();
metrics::stop_timer(blob_root_timer);
blob_root
}

View File

@ -827,7 +827,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
drop(fork_choice_read_lock);
let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
let (parent_block, block) = verify_parent_block_is_known(chain, block)?;
let (parent_block, block) = verify_parent_block_is_known(block_root, chain, block)?;
// Track the number of skip slots between the block and its parent.
metrics::set_gauge(
@ -1085,7 +1085,10 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
if signature_verifier.verify().is_ok() {
Ok(Self {
block: MaybeAvailableBlock::AvailabilityPending(block),
block: MaybeAvailableBlock::AvailabilityPending {
block_root: from.block_root,
block,
},
block_root: from.block_root,
parent: Some(parent),
consensus_context,
@ -1156,7 +1159,10 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
let maybe_available = chain
.data_availability_checker
.check_rpc_block_availability(RpcBlock::new_without_blobs(self.clone()))
.check_rpc_block_availability(RpcBlock::new_without_blobs(
Some(block_root),
self.clone(),
))
.map_err(|e| {
BlockSlashInfo::SignatureNotChecked(
self.signed_block_header(),
@ -1756,6 +1762,7 @@ pub fn get_block_root<E: EthSpec>(block: &SignedBeaconBlock<E>) -> Hash256 {
/// fork choice.
#[allow(clippy::type_complexity)]
fn verify_parent_block_is_known<T: BeaconChainTypes>(
block_root: Hash256,
chain: &BeaconChain<T>,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
) -> Result<(ProtoBlock, Arc<SignedBeaconBlock<T::EthSpec>>), BlockError<T::EthSpec>> {
@ -1767,6 +1774,7 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
Ok((proto_block, block))
} else {
Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs(
Some(block_root),
block,
)))
}

View File

@ -3,7 +3,7 @@ use crate::block_verification::BlockError;
use crate::data_availability_checker::AvailabilityCheckError;
pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock};
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::{data_availability_checker, GossipVerifiedBlock, PayloadVerificationOutcome};
use crate::{get_block_root, GossipVerifiedBlock, PayloadVerificationOutcome};
use derivative::Derivative;
use ssz_derive::{Decode, Encode};
use ssz_types::VariableList;
@ -35,9 +35,16 @@ use types::{
#[derive(Debug, Clone, Derivative)]
#[derivative(Hash(bound = "E: EthSpec"))]
pub struct RpcBlock<E: EthSpec> {
block_root: Hash256,
block: RpcBlockInner<E>,
}
impl<E: EthSpec> RpcBlock<E> {
pub fn block_root(&self) -> Hash256 {
self.block_root
}
}
/// Note: This variant is intentionally private because we want to safely construct the
/// internal variants after applying consistency checks to ensure that the block and blobs
/// are consistent with respect to each other.
@ -53,8 +60,14 @@ enum RpcBlockInner<E: EthSpec> {
impl<E: EthSpec> RpcBlock<E> {
/// Constructs a `Block` variant.
pub fn new_without_blobs(block: Arc<SignedBeaconBlock<E>>) -> Self {
pub fn new_without_blobs(
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<E>>,
) -> Self {
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
Self {
block_root,
block: RpcBlockInner::Block(block),
}
}
@ -62,20 +75,48 @@ impl<E: EthSpec> RpcBlock<E> {
/// Constructs a new `BlockAndBlobs` variant after making consistency
/// checks between the provided blocks and blobs.
pub fn new(
block_root: Option<Hash256>,
block: Arc<SignedBeaconBlock<E>>,
blobs: Option<BlobSidecarList<E>>,
) -> Result<Self, AvailabilityCheckError> {
if let Some(blobs) = blobs.as_ref() {
data_availability_checker::consistency_checks(&block, blobs)?;
let block_root = block_root.unwrap_or_else(|| get_block_root(&block));
if let (Some(blobs), Ok(block_commitments)) = (
blobs.as_ref(),
block.message().body().blob_kzg_commitments(),
) {
if blobs.len() != block_commitments.len() {
return Err(AvailabilityCheckError::MissingBlobs);
}
for (blob, &block_commitment) in blobs.iter().zip(block_commitments.iter()) {
let blob_block_root = blob.block_root;
if blob_block_root != block_root {
return Err(AvailabilityCheckError::InconsistentBlobBlockRoots {
block_root,
blob_block_root,
});
}
let blob_commitment = blob.kzg_commitment;
if blob_commitment != block_commitment {
return Err(AvailabilityCheckError::KzgCommitmentMismatch {
block_commitment,
blob_commitment,
});
}
}
}
let inner = match blobs {
Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs),
None => RpcBlockInner::Block(block),
};
Ok(Self { block: inner })
Ok(Self {
block_root,
block: inner,
})
}
pub fn new_from_fixed(
block_root: Hash256,
block: Arc<SignedBeaconBlock<E>>,
blobs: FixedBlobSidecarList<E>,
) -> Result<Self, AvailabilityCheckError> {
@ -88,13 +129,20 @@ impl<E: EthSpec> RpcBlock<E> {
} else {
Some(VariableList::from(filtered))
};
Self::new(block, blobs)
Self::new(Some(block_root), block, blobs)
}
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<BlobSidecarList<E>>) {
pub fn deconstruct(
self,
) -> (
Hash256,
Arc<SignedBeaconBlock<E>>,
Option<BlobSidecarList<E>>,
) {
let block_root = self.block_root();
match self.block {
RpcBlockInner::Block(block) => (block, None),
RpcBlockInner::BlockAndBlobs(block, blobs) => (block, Some(blobs)),
RpcBlockInner::Block(block) => (block_root, block, None),
RpcBlockInner::BlockAndBlobs(block, blobs) => (block_root, block, Some(blobs)),
}
}
pub fn n_blobs(&self) -> usize {
@ -105,18 +153,6 @@ impl<E: EthSpec> RpcBlock<E> {
}
}
impl<E: EthSpec> From<Arc<SignedBeaconBlock<E>>> for RpcBlock<E> {
fn from(value: Arc<SignedBeaconBlock<E>>) -> Self {
Self::new_without_blobs(value)
}
}
impl<E: EthSpec> From<SignedBeaconBlock<E>> for RpcBlock<E> {
fn from(value: SignedBeaconBlock<E>) -> Self {
Self::new_without_blobs(Arc::new(value))
}
}
/// A block that has gone through all pre-deneb block processing checks including block processing
/// and execution by an EL client. This block hasn't necessarily completed data availability checks.
///
@ -146,13 +182,14 @@ impl<E: EthSpec> ExecutedBlock<E> {
payload_verification_outcome,
))
}
MaybeAvailableBlock::AvailabilityPending(pending_block) => {
Self::AvailabilityPending(AvailabilityPendingExecutedBlock::new(
pending_block,
import_data,
payload_verification_outcome,
))
}
MaybeAvailableBlock::AvailabilityPending {
block_root: _,
block: pending_block,
} => Self::AvailabilityPending(AvailabilityPendingExecutedBlock::new(
pending_block,
import_data,
payload_verification_outcome,
)),
}
}
@ -235,6 +272,10 @@ impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> {
}
}
pub fn as_block(&self) -> &SignedBeaconBlock<E> {
&self.block
}
pub fn num_blobs_expected(&self) -> usize {
self.block
.message()
@ -242,20 +283,6 @@ impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> {
.blob_kzg_commitments()
.map_or(0, |commitments| commitments.len())
}
pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> {
let block_root = self.import_data.block_root;
self.block
.get_filtered_blob_ids(Some(block_root), |_, _| true)
}
pub fn get_filtered_blob_ids(
&self,
filter: impl Fn(usize, Hash256) -> bool,
) -> Vec<BlobIdentifier> {
self.block
.get_filtered_blob_ids(Some(self.import_data.block_root), filter)
}
}
#[derive(Debug, PartialEq, Encode, Decode, Clone)]
@ -358,7 +385,7 @@ impl<E: EthSpec> AsBlock<E> for Arc<SignedBeaconBlock<E>> {
}
fn into_rpc_block(self) -> RpcBlock<E> {
RpcBlock::new_without_blobs(self)
RpcBlock::new_without_blobs(None, self)
}
}
@ -384,13 +411,19 @@ impl<E: EthSpec> AsBlock<E> for MaybeAvailableBlock<E> {
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self {
MaybeAvailableBlock::Available(block) => block.as_block(),
MaybeAvailableBlock::AvailabilityPending(block) => block,
MaybeAvailableBlock::AvailabilityPending {
block_root: _,
block,
} => block,
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
match &self {
MaybeAvailableBlock::Available(block) => block.block_cloned(),
MaybeAvailableBlock::AvailabilityPending(block) => block.clone(),
MaybeAvailableBlock::AvailabilityPending {
block_root: _,
block,
} => block.clone(),
}
}
fn canonical_root(&self) -> Hash256 {
@ -400,7 +433,9 @@ impl<E: EthSpec> AsBlock<E> for MaybeAvailableBlock<E> {
fn into_rpc_block(self) -> RpcBlock<E> {
match self {
MaybeAvailableBlock::Available(available_block) => available_block.into_rpc_block(),
MaybeAvailableBlock::AvailabilityPending(block) => RpcBlock::new_without_blobs(block),
MaybeAvailableBlock::AvailabilityPending { block_root, block } => {
RpcBlock::new_without_blobs(Some(block_root), block)
}
}
}
}
@ -443,14 +478,17 @@ impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
}
fn into_rpc_block(self) -> RpcBlock<E> {
let (block, blobs_opt) = self.deconstruct();
let (block_root, block, blobs_opt) = self.deconstruct();
// Circumvent the constructor here, because an Available block will have already had
// consistency checks performed.
let inner = match blobs_opt {
None => RpcBlockInner::Block(block),
Some(blobs) => RpcBlockInner::BlockAndBlobs(block, blobs),
};
RpcBlock { block: inner }
RpcBlock {
block_root,
block: inner,
}
}
}

View File

@ -926,7 +926,7 @@ where
validator_monitor: RwLock::new(validator_monitor),
genesis_backfill_slot,
data_availability_checker: Arc::new(
DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, self.spec)
DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, &log, self.spec)
.map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?,
),
kzg,

View File

@ -1,27 +1,36 @@
use crate::blob_verification::{
verify_kzg_for_blob, verify_kzg_for_blob_list, GossipVerifiedBlob, KzgVerifiedBlob,
};
use crate::blob_verification::{verify_kzg_for_blob, verify_kzg_for_blob_list, GossipVerifiedBlob};
use crate::block_verification_types::{
AvailabilityPendingExecutedBlock, AvailableExecutedBlock, RpcBlock,
};
pub use crate::data_availability_checker::availability_view::{
AvailabilityView, GetCommitment, GetCommitments,
};
pub use crate::data_availability_checker::child_components::ChildComponents;
use crate::data_availability_checker::overflow_lru_cache::OverflowLRUCache;
use crate::data_availability_checker::processing_cache::ProcessingCache;
use crate::{BeaconChain, BeaconChainTypes, BeaconStore};
use kzg::Error as KzgError;
use kzg::Kzg;
use slog::{debug, error};
use kzg::{Error as KzgError, KzgCommitment};
use parking_lot::RwLock;
pub use processing_cache::ProcessingComponents;
use slasher::test_utils::E;
use slog::{debug, error, Logger};
use slot_clock::SlotClock;
use ssz_types::{Error, VariableList};
use std::collections::HashSet;
use ssz_types::Error;
use std::fmt;
use std::fmt::Debug;
use std::sync::Arc;
use strum::IntoStaticStr;
use task_executor::TaskExecutor;
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS;
use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
mod availability_view;
mod child_components;
mod overflow_lru_cache;
mod processing_cache;
/// The LRU Cache stores `PendingComponents` which can store up to
/// `MAX_BLOBS_PER_BLOCK = 6` blobs each. A `BlobSidecar` is 0.131256 MB. So
@ -35,30 +44,20 @@ pub enum AvailabilityCheckError {
Kzg(KzgError),
KzgNotInitialized,
KzgVerificationFailed,
SszTypes(ssz_types::Error),
NumBlobsMismatch {
num_kzg_commitments: usize,
num_blobs: usize,
},
MissingBlobs,
KzgCommitmentMismatch {
blob_index: u64,
blob_commitment: KzgCommitment,
block_commitment: KzgCommitment,
},
Unexpected,
SszTypes(ssz_types::Error),
MissingBlobs,
BlobIndexInvalid(u64),
UnorderedBlobs {
blob_index: u64,
expected_index: u64,
},
StoreError(store::Error),
DecodeError(ssz::DecodeError),
BlockBlobRootMismatch {
InconsistentBlobBlockRoots {
block_root: Hash256,
blob_block_root: Hash256,
},
BlockBlobSlotMismatch {
block_slot: Slot,
blob_slot: Slot,
},
}
impl From<ssz_types::Error> for AvailabilityCheckError {
@ -84,9 +83,11 @@ impl From<ssz::DecodeError> for AvailabilityCheckError {
/// `DataAvailabilityChecker` is responsible for KZG verification of block components as well as
/// checking whether a "availability check" is required at all.
pub struct DataAvailabilityChecker<T: BeaconChainTypes> {
processing_cache: RwLock<ProcessingCache<T::EthSpec>>,
availability_cache: Arc<OverflowLRUCache<T>>,
slot_clock: T::SlotClock,
kzg: Option<Arc<Kzg<<T::EthSpec as EthSpec>::Kzg>>>,
log: Logger,
spec: ChainSpec,
}
@ -116,12 +117,15 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
slot_clock: T::SlotClock,
kzg: Option<Arc<Kzg<<T::EthSpec as EthSpec>::Kzg>>>,
store: BeaconStore<T>,
log: &Logger,
spec: ChainSpec,
) -> Result<Self, AvailabilityCheckError> {
let overflow_cache = OverflowLRUCache::new(OVERFLOW_LRU_CAPACITY, store)?;
Ok(Self {
processing_cache: <_>::default(),
availability_cache: Arc::new(overflow_cache),
slot_clock,
log: log.clone(),
kzg,
spec,
})
@ -129,51 +133,89 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
/// Checks if the given block root is cached.
pub fn has_block(&self, block_root: &Hash256) -> bool {
self.availability_cache.has_block(block_root)
self.processing_cache.read().has_block(block_root)
}
/// Checks which blob ids are still required for a given block root, taking any cached
/// components into consideration.
pub fn get_missing_blob_ids_checking_cache(
/// Get the processing info for a block.
pub fn get_processing_components(
&self,
block_root: Hash256,
) -> Option<Vec<BlobIdentifier>> {
let (block, blob_indices) = self.availability_cache.get_missing_blob_info(block_root);
self.get_missing_blob_ids(block_root, block.as_ref(), Some(blob_indices))
) -> Option<ProcessingComponents<T::EthSpec>> {
self.processing_cache.read().get(&block_root).cloned()
}
/// A `None` indicates blobs are not required.
///
/// If there's no block, all possible ids will be returned that don't exist in the given blobs.
/// If there no blobs, all possible ids will be returned.
pub fn get_missing_blob_ids(
pub fn get_missing_blob_ids<V: AvailabilityView<T::EthSpec>>(
&self,
block_root: Hash256,
block_opt: Option<&Arc<SignedBeaconBlock<T::EthSpec>>>,
blobs_opt: Option<HashSet<usize>>,
) -> Option<Vec<BlobIdentifier>> {
let epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch());
availability_view: &V,
) -> MissingBlobs {
let Some(current_slot) = self.slot_clock.now_or_genesis() else {
error!(
self.log,
"Failed to read slot clock when checking for missing blob ids"
);
return MissingBlobs::BlobsNotRequired;
};
self.da_check_required_for_epoch(epoch).then(|| {
block_opt
.map(|block| {
block.get_filtered_blob_ids(Some(block_root), |i, _| {
blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i))
})
})
.unwrap_or_else(|| {
let mut blob_ids = Vec::with_capacity(T::EthSpec::max_blobs_per_block());
for i in 0..T::EthSpec::max_blobs_per_block() {
if blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i)) {
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
if self.da_check_required_for_epoch(current_epoch) {
match availability_view.get_cached_block() {
Some(cached_block) => {
let block_commitments = cached_block.get_commitments();
let blob_commitments = availability_view.get_cached_blobs();
let num_blobs_expected = block_commitments.len();
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
// Zip here will always limit the number of iterations to the size of
// `block_commitment` because `blob_commitments` will always be populated
// with `Option` values up to `MAX_BLOBS_PER_BLOCK`.
for (index, (block_commitment, blob_commitment_opt)) in block_commitments
.into_iter()
.zip(blob_commitments.iter())
.enumerate()
{
// Always add a missing blob.
let Some(blob_commitment) = blob_commitment_opt else {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
index: index as u64,
});
continue;
};
let blob_commitment = *blob_commitment.get_commitment();
// Check for consistency, but this shouldn't happen, an availability view
// should guaruntee consistency.
if blob_commitment != block_commitment {
error!(self.log,
"Inconsistent availability view";
"block_root" => ?block_root,
"block_commitment" => ?block_commitment,
"blob_commitment" => ?blob_commitment,
"index" => index
);
blob_ids.push(BlobIdentifier {
block_root,
index: index as u64,
});
}
}
blob_ids
})
})
MissingBlobs::KnownMissing(blob_ids)
}
None => {
MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::<E>(block_root))
}
}
} else {
MissingBlobs::BlobsNotRequired
}
}
/// Get a blob from the availability cache.
@ -200,7 +242,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
return Err(AvailabilityCheckError::KzgNotInitialized);
};
self.availability_cache
.put_kzg_verified_blobs(block_root, &verified_blobs)
.put_kzg_verified_blobs(block_root, verified_blobs)
}
/// This first validates the KZG commitments included in the blob sidecar.
@ -221,7 +263,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
};
self.availability_cache
.put_kzg_verified_blobs(kzg_verified_blob.block_root(), &[kzg_verified_blob])
.put_kzg_verified_blobs(kzg_verified_blob.block_root(), vec![kzg_verified_blob])
}
/// Check if we have all the blobs for a block. Returns `Availability` which has information
@ -240,13 +282,14 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
&self,
block: RpcBlock<T::EthSpec>,
) -> Result<MaybeAvailableBlock<T::EthSpec>, AvailabilityCheckError> {
let (block, blobs) = block.deconstruct();
let (block_root, block, blobs) = block.deconstruct();
match blobs {
None => {
if self.blobs_required_for_block(&block) {
Ok(MaybeAvailableBlock::AvailabilityPending(block))
Ok(MaybeAvailableBlock::AvailabilityPending { block_root, block })
} else {
Ok(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: None,
}))
@ -264,6 +307,7 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
None
};
Ok(MaybeAvailableBlock::Available(AvailableBlock {
block_root,
block,
blobs: verified_blobs,
}))
@ -271,16 +315,104 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
}
}
/// Determines the blob requirements for a block. Answers the question: "Does this block require
/// blobs?".
/// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required.
/// If the block's epoch is from prior to the data availability boundary, no blobs are required.
fn blobs_required_for_block(&self, block: &SignedBeaconBlock<T::EthSpec>) -> bool {
let block_within_da_period = self.da_check_required_for_epoch(block.epoch());
let block_has_kzg_commitments = block
.message()
.body()
.blob_kzg_commitments()
.map_or(false, |commitments| !commitments.is_empty());
block_within_da_period && block_has_kzg_commitments
block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch())
}
/// Adds block commitments to the processing cache. These commitments are unverified but caching
/// them here is useful to avoid duplicate downloads of blocks, as well as understanding
/// our blob download requirements.
pub fn notify_block_commitments(
&self,
slot: Slot,
block_root: Hash256,
commitments: KzgCommitments<T::EthSpec>,
) {
self.processing_cache
.write()
.entry(block_root)
.or_insert_with(|| ProcessingComponents::new(slot))
.merge_block(commitments);
}
/// Add a single blob commitment to the processing cache. This commitment is unverified but caching
/// them here is useful to avoid duplicate downloads of blobs, as well as understanding
/// our block and blob download requirements.
pub fn notify_gossip_blob(
&self,
slot: Slot,
block_root: Hash256,
blob: &GossipVerifiedBlob<T>,
) {
let index = blob.as_blob().index;
let commitment = blob.as_blob().kzg_commitment;
self.processing_cache
.write()
.entry(block_root)
.or_insert_with(|| ProcessingComponents::new(slot))
.merge_single_blob(index as usize, commitment);
}
/// Adds blob commitments to the processing cache. These commitments are unverified but caching
/// them here is useful to avoid duplicate downloads of blobs, as well as understanding
/// our block and blob download requirements.
pub fn notify_rpc_blobs(
&self,
slot: Slot,
block_root: Hash256,
blobs: &FixedBlobSidecarList<T::EthSpec>,
) {
let mut commitments = KzgCommitmentOpts::<T::EthSpec>::default();
for blob in blobs.iter().flatten() {
if let Some(commitment) = commitments.get_mut(blob.index as usize) {
*commitment = Some(blob.kzg_commitment);
}
}
self.processing_cache
.write()
.entry(block_root)
.or_insert_with(|| ProcessingComponents::new(slot))
.merge_blobs(commitments);
}
/// Clears the block and all blobs from the processing cache for a give root if they exist.
pub fn remove_notified(&self, block_root: &Hash256) {
self.processing_cache.write().remove(block_root)
}
/// Gather all block roots for which we are not currently processing all components for the
/// given slot.
pub fn incomplete_processing_components(&self, slot: Slot) -> Vec<Hash256> {
self.processing_cache
.read()
.incomplete_processing_components(slot)
}
/// Determines whether we are at least the `single_lookup_delay` duration into the given slot.
/// If we are not currently in the Deneb fork, this delay is not considered.
///
/// The `single_lookup_delay` is the duration we wait for a blocks or blobs to arrive over
/// gossip before making single block or blob requests. This is to minimize the number of
/// single lookup requests we end up making.
pub fn should_delay_lookup(&self, slot: Slot) -> bool {
if !self.is_deneb() {
return false;
}
let current_or_future_slot = self
.slot_clock
.now()
.map_or(false, |current_slot| current_slot <= slot);
let delay_threshold_unmet = self
.slot_clock
.millis_from_current_slot_start()
.map_or(false, |millis_into_slot| {
millis_into_slot < self.slot_clock.single_lookup_delay()
});
current_or_future_slot && delay_threshold_unmet
}
/// The epoch at which we require a data availability check in block processing.
@ -321,84 +453,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
}
}
/// Verifies an `SignedBeaconBlock` against a set of KZG verified blobs.
/// This does not check whether a block *should* have blobs, these checks should have been
/// completed when producing the `AvailabilityPendingBlock`.
pub fn make_available<T: EthSpec>(
block: Arc<SignedBeaconBlock<T>>,
blobs: Vec<KzgVerifiedBlob<T>>,
) -> Result<AvailableBlock<T>, AvailabilityCheckError> {
let blobs = VariableList::new(blobs.into_iter().map(|blob| blob.to_blob()).collect())?;
consistency_checks(&block, &blobs)?;
Ok(AvailableBlock {
block,
blobs: Some(blobs),
})
}
/// Makes the following checks to ensure that the list of blobs correspond block:
///
/// * Check that a block is post-deneb
/// * Checks that the number of blobs is equal to the length of kzg commitments in the list
/// * Checks that the index, slot, root and kzg_commitment in the block match the blobs in the correct order
///
/// Returns `Ok(())` if all consistency checks pass and an error otherwise.
pub fn consistency_checks<T: EthSpec>(
block: &SignedBeaconBlock<T>,
blobs: &[Arc<BlobSidecar<T>>],
) -> Result<(), AvailabilityCheckError> {
let Ok(block_kzg_commitments) = block.message().body().blob_kzg_commitments() else {
return Ok(());
};
if blobs.len() != block_kzg_commitments.len() {
return Err(AvailabilityCheckError::NumBlobsMismatch {
num_kzg_commitments: block_kzg_commitments.len(),
num_blobs: blobs.len(),
});
}
if block_kzg_commitments.is_empty() {
return Ok(());
}
let block_root = blobs
.first()
.map(|blob| blob.block_root)
.unwrap_or(block.canonical_root());
for (index, (block_commitment, blob)) in
block_kzg_commitments.iter().zip(blobs.iter()).enumerate()
{
let index = index as u64;
if index != blob.index {
return Err(AvailabilityCheckError::UnorderedBlobs {
blob_index: blob.index,
expected_index: index,
});
}
if block_root != blob.block_root {
return Err(AvailabilityCheckError::BlockBlobRootMismatch {
block_root,
blob_block_root: blob.block_root,
});
}
if block.slot() != blob.slot {
return Err(AvailabilityCheckError::BlockBlobSlotMismatch {
block_slot: block.slot(),
blob_slot: blob.slot,
});
}
if *block_commitment != blob.kzg_commitment {
return Err(AvailabilityCheckError::KzgCommitmentMismatch {
blob_index: blob.index,
});
}
}
Ok(())
}
pub fn start_availability_cache_maintenance_service<T: BeaconChainTypes>(
executor: TaskExecutor,
chain: Arc<BeaconChain<T>>,
@ -487,6 +541,7 @@ async fn availability_cache_maintenance_service<T: BeaconChainTypes>(
/// A fully available block that is ready to be imported into fork choice.
#[derive(Clone, Debug, PartialEq)]
pub struct AvailableBlock<E: EthSpec> {
block_root: Hash256,
block: Arc<SignedBeaconBlock<E>>,
blobs: Option<BlobSidecarList<E>>,
}
@ -503,9 +558,19 @@ impl<E: EthSpec> AvailableBlock<E> {
self.blobs.as_ref()
}
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<BlobSidecarList<E>>) {
let AvailableBlock { block, blobs } = self;
(block, blobs)
pub fn deconstruct(
self,
) -> (
Hash256,
Arc<SignedBeaconBlock<E>>,
Option<BlobSidecarList<E>>,
) {
let AvailableBlock {
block_root,
block,
blobs,
} = self;
(block_root, block, blobs)
}
}
@ -516,5 +581,66 @@ pub enum MaybeAvailableBlock<E: EthSpec> {
/// post-4844 blocks, it contains a `SignedBeaconBlock` and a Blobs variant other than `Blobs::None`.
Available(AvailableBlock<E>),
/// This variant is not fully available and requires blobs to become fully available.
AvailabilityPending(Arc<SignedBeaconBlock<E>>),
AvailabilityPending {
block_root: Hash256,
block: Arc<SignedBeaconBlock<E>>,
},
}
#[derive(Debug, Clone)]
pub enum MissingBlobs {
/// We know for certain these blobs are missing.
KnownMissing(Vec<BlobIdentifier>),
/// We think these blobs might be missing.
PossibleMissing(Vec<BlobIdentifier>),
/// Blobs are not required.
BlobsNotRequired,
}
impl MissingBlobs {
pub fn new_without_block(block_root: Hash256, is_deneb: bool) -> Self {
if is_deneb {
MissingBlobs::PossibleMissing(BlobIdentifier::get_all_blob_ids::<E>(block_root))
} else {
MissingBlobs::BlobsNotRequired
}
}
pub fn is_empty(&self) -> bool {
match self {
MissingBlobs::KnownMissing(v) => v.is_empty(),
MissingBlobs::PossibleMissing(v) => v.is_empty(),
MissingBlobs::BlobsNotRequired => true,
}
}
pub fn contains(&self, blob_id: &BlobIdentifier) -> bool {
match self {
MissingBlobs::KnownMissing(v) => v.contains(blob_id),
MissingBlobs::PossibleMissing(v) => v.contains(blob_id),
MissingBlobs::BlobsNotRequired => false,
}
}
pub fn remove(&mut self, blob_id: &BlobIdentifier) {
match self {
MissingBlobs::KnownMissing(v) => v.retain(|id| id != blob_id),
MissingBlobs::PossibleMissing(v) => v.retain(|id| id != blob_id),
MissingBlobs::BlobsNotRequired => {}
}
}
pub fn indices(&self) -> Vec<u64> {
match self {
MissingBlobs::KnownMissing(v) => v.iter().map(|id| id.index).collect(),
MissingBlobs::PossibleMissing(v) => v.iter().map(|id| id.index).collect(),
MissingBlobs::BlobsNotRequired => vec![],
}
}
}
impl Into<Vec<BlobIdentifier>> for MissingBlobs {
fn into(self) -> Vec<BlobIdentifier> {
match self {
MissingBlobs::KnownMissing(v) => v,
MissingBlobs::PossibleMissing(v) => v,
MissingBlobs::BlobsNotRequired => vec![],
}
}
}

View File

@ -0,0 +1,566 @@
use super::child_components::ChildComponents;
use crate::blob_verification::KzgVerifiedBlob;
use crate::block_verification_types::AsBlock;
use crate::data_availability_checker::overflow_lru_cache::PendingComponents;
use crate::data_availability_checker::ProcessingComponents;
use crate::AvailabilityPendingExecutedBlock;
use kzg::KzgCommitment;
use ssz_types::FixedVector;
use std::sync::Arc;
use types::beacon_block_body::KzgCommitments;
use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
/// Defines an interface for managing data availability with two key invariants:
///
/// 1. If we haven't seen a block yet, we will insert the first blob for a given (block_root, index)
/// but we won't insert subsequent blobs for the same (block_root, index) if they have a different
/// commitment.
/// 2. On block insertion, any non-matching blob commitments are evicted.
///
/// Types implementing this trait can be used for validating and managing availability
/// of blocks and blobs in a cache-like data structure.
pub trait AvailabilityView<E: EthSpec> {
/// The type representing a block in the implementation.
type BlockType: GetCommitments<E>;
/// The type representing a blob in the implementation. Must implement `Clone`.
type BlobType: Clone + GetCommitment<E>;
/// Returns an immutable reference to the cached block.
fn get_cached_block(&self) -> &Option<Self::BlockType>;
/// Returns an immutable reference to the fixed vector of cached blobs.
fn get_cached_blobs(&self) -> &FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>;
/// Returns a mutable reference to the cached block.
fn get_cached_block_mut(&mut self) -> &mut Option<Self::BlockType>;
/// Returns a mutable reference to the fixed vector of cached blobs.
fn get_cached_blobs_mut(
&mut self,
) -> &mut FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>;
/// Checks if a block exists in the cache.
///
/// Returns:
/// - `true` if a block exists.
/// - `false` otherwise.
fn block_exists(&self) -> bool {
self.get_cached_block().is_some()
}
/// Checks if a blob exists at the given index in the cache.
///
/// Returns:
/// - `true` if a blob exists at the given index.
/// - `false` otherwise.
fn blob_exists(&self, blob_index: usize) -> bool {
self.get_cached_blobs()
.get(blob_index)
.map(|b| b.is_some())
.unwrap_or(false)
}
/// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a
/// block.
///
/// This corresponds to the number of commitments that are present in a block.
fn num_expected_blobs(&self) -> Option<usize> {
self.get_cached_block()
.as_ref()
.map(|b| b.get_commitments().len())
}
/// Returns the number of blobs that have been received and are stored in the cache.
fn num_received_blobs(&self) -> usize {
self.get_cached_blobs().iter().flatten().count()
}
/// Inserts a block into the cache.
fn insert_block(&mut self, block: Self::BlockType) {
*self.get_cached_block_mut() = Some(block)
}
/// Inserts a blob at a specific index in the cache.
///
/// Existing blob at the index will be replaced.
fn insert_blob_at_index(&mut self, blob_index: usize, blob: Self::BlobType) {
if let Some(b) = self.get_cached_blobs_mut().get_mut(blob_index) {
*b = Some(blob);
}
}
/// Merges a given set of blobs into the cache.
///
/// Blobs are only inserted if:
/// 1. The blob entry at the index is empty and no block exists.
/// 2. The block exists and its commitment matches the blob's commitment.
fn merge_blobs(&mut self, blobs: FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock>) {
for (index, blob) in blobs.iter().cloned().enumerate() {
let Some(blob) = blob else { continue };
self.merge_single_blob(index, blob);
}
}
/// Merges a single blob into the cache.
///
/// Blobs are only inserted if:
/// 1. The blob entry at the index is empty and no block exists, or
/// 2. The block exists and its commitment matches the blob's commitment.
fn merge_single_blob(&mut self, index: usize, blob: Self::BlobType) {
let commitment = *blob.get_commitment();
if let Some(cached_block) = self.get_cached_block() {
let block_commitment_opt = cached_block.get_commitments().get(index).copied();
if let Some(block_commitment) = block_commitment_opt {
if block_commitment == commitment {
self.insert_blob_at_index(index, blob)
}
}
} else if !self.blob_exists(index) {
self.insert_blob_at_index(index, blob)
}
}
/// Inserts a new block and revalidates the existing blobs against it.
///
/// Blobs that don't match the new block's commitments are evicted.
fn merge_block(&mut self, block: Self::BlockType) {
self.insert_block(block);
let reinsert = std::mem::take(self.get_cached_blobs_mut());
self.merge_blobs(reinsert);
}
/// Checks if the block and all of its expected blobs are available in the cache.
///
/// Returns `true` if both the block exists and the number of received blobs matches the number
/// of expected blobs.
fn is_available(&self) -> bool {
if let Some(num_expected_blobs) = self.num_expected_blobs() {
num_expected_blobs == self.num_received_blobs()
} else {
false
}
}
}
/// Implements the `AvailabilityView` trait for a given struct.
///
/// - `$struct_name`: The name of the struct for which to implement `AvailabilityView`.
/// - `$block_type`: The type to use for `BlockType` in the `AvailabilityView` trait.
/// - `$blob_type`: The type to use for `BlobType` in the `AvailabilityView` trait.
/// - `$block_field`: The field name in the struct that holds the cached block.
/// - `$blob_field`: The field name in the struct that holds the cached blobs.
#[macro_export]
macro_rules! impl_availability_view {
($struct_name:ident, $block_type:ty, $blob_type:ty, $block_field:ident, $blob_field:ident) => {
impl<E: EthSpec> AvailabilityView<E> for $struct_name<E> {
type BlockType = $block_type;
type BlobType = $blob_type;
fn get_cached_block(&self) -> &Option<Self::BlockType> {
&self.$block_field
}
fn get_cached_blobs(
&self,
) -> &FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock> {
&self.$blob_field
}
fn get_cached_block_mut(&mut self) -> &mut Option<Self::BlockType> {
&mut self.$block_field
}
fn get_cached_blobs_mut(
&mut self,
) -> &mut FixedVector<Option<Self::BlobType>, E::MaxBlobsPerBlock> {
&mut self.$blob_field
}
}
};
}
impl_availability_view!(
ProcessingComponents,
KzgCommitments<E>,
KzgCommitment,
block_commitments,
blob_commitments
);
impl_availability_view!(
PendingComponents,
AvailabilityPendingExecutedBlock<E>,
KzgVerifiedBlob<E>,
executed_block,
verified_blobs
);
impl_availability_view!(
ChildComponents,
Arc<SignedBeaconBlock<E>>,
Arc<BlobSidecar<E>>,
downloaded_block,
downloaded_blobs
);
pub trait GetCommitments<E: EthSpec> {
fn get_commitments(&self) -> KzgCommitments<E>;
}
pub trait GetCommitment<E: EthSpec> {
fn get_commitment(&self) -> &KzgCommitment;
}
// These implementations are required to implement `AvailabilityView` for `ProcessingView`.
impl<E: EthSpec> GetCommitments<E> for KzgCommitments<E> {
fn get_commitments(&self) -> KzgCommitments<E> {
self.clone()
}
}
impl<E: EthSpec> GetCommitment<E> for KzgCommitment {
fn get_commitment(&self) -> &KzgCommitment {
self
}
}
// These implementations are required to implement `AvailabilityView` for `PendingComponents`.
impl<E: EthSpec> GetCommitments<E> for AvailabilityPendingExecutedBlock<E> {
fn get_commitments(&self) -> KzgCommitments<E> {
self.as_block()
.message()
.body()
.blob_kzg_commitments()
.cloned()
.unwrap_or_default()
}
}
impl<E: EthSpec> GetCommitment<E> for KzgVerifiedBlob<E> {
fn get_commitment(&self) -> &KzgCommitment {
&self.as_blob().kzg_commitment
}
}
// These implementations are required to implement `AvailabilityView` for `ChildComponents`.
impl<E: EthSpec> GetCommitments<E> for Arc<SignedBeaconBlock<E>> {
fn get_commitments(&self) -> KzgCommitments<E> {
self.message()
.body()
.blob_kzg_commitments()
.ok()
.cloned()
.unwrap_or_default()
}
}
impl<E: EthSpec> GetCommitment<E> for Arc<BlobSidecar<E>> {
fn get_commitment(&self) -> &KzgCommitment {
&self.kzg_commitment
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::block_verification_types::BlockImportData;
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs};
use crate::PayloadVerificationOutcome;
use eth2_network_config::get_trusted_setup;
use fork_choice::PayloadVerificationStatus;
use kzg::{Kzg, TrustedSetup};
use rand::rngs::StdRng;
use rand::SeedableRng;
use state_processing::ConsensusContext;
use types::test_utils::TestRandom;
use types::{BeaconState, ChainSpec, ForkName, MainnetEthSpec, Slot};
type E = MainnetEthSpec;
type Setup<E> = (
SignedBeaconBlock<E>,
FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn pre_setup() -> Setup<E> {
let trusted_setup: TrustedSetup =
serde_json::from_reader(get_trusted_setup::<<E as EthSpec>::Kzg>()).unwrap();
let kzg = Kzg::new_from_trusted_setup(trusted_setup).unwrap();
let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64);
let (block, blobs_vec) =
generate_rand_block_and_blobs::<E>(ForkName::Deneb, NumBlobs::Random, &kzg, &mut rng);
let mut blobs: FixedVector<_, <E as EthSpec>::MaxBlobsPerBlock> = FixedVector::default();
for blob in blobs_vec {
if let Some(b) = blobs.get_mut(blob.index as usize) {
*b = Some(blob);
}
}
let mut invalid_blobs: FixedVector<
Option<BlobSidecar<E>>,
<E as EthSpec>::MaxBlobsPerBlock,
> = FixedVector::default();
for (index, blob) in blobs.iter().enumerate() {
let mut invalid_blob_opt = blob.clone();
if let Some(invalid_blob) = invalid_blob_opt.as_mut() {
invalid_blob.kzg_commitment = KzgCommitment::random_for_test(&mut rng);
}
*invalid_blobs.get_mut(index).unwrap() = invalid_blob_opt;
}
(block, blobs, invalid_blobs)
}
type ProcessingViewSetup<E> = (
KzgCommitments<E>,
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn setup_processing_components(
block: SignedBeaconBlock<E>,
valid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
invalid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
) -> ProcessingViewSetup<E> {
let commitments = block
.message()
.body()
.blob_kzg_commitments()
.unwrap()
.clone();
let blobs = FixedVector::from(
valid_blobs
.iter()
.map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment))
.collect::<Vec<_>>(),
);
let invalid_blobs = FixedVector::from(
invalid_blobs
.iter()
.map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment))
.collect::<Vec<_>>(),
);
(commitments, blobs, invalid_blobs)
}
type PendingComponentsSetup<E> = (
AvailabilityPendingExecutedBlock<E>,
FixedVector<Option<KzgVerifiedBlob<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<KzgVerifiedBlob<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn setup_pending_components(
block: SignedBeaconBlock<E>,
valid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
invalid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
) -> PendingComponentsSetup<E> {
let blobs = FixedVector::from(
valid_blobs
.iter()
.map(|blob_opt| {
blob_opt
.as_ref()
.map(|blob| KzgVerifiedBlob::new(blob.clone()))
})
.collect::<Vec<_>>(),
);
let invalid_blobs = FixedVector::from(
invalid_blobs
.iter()
.map(|blob_opt| {
blob_opt
.as_ref()
.map(|blob| KzgVerifiedBlob::new(blob.clone()))
})
.collect::<Vec<_>>(),
);
let dummy_parent = block.clone_as_blinded();
let block = AvailabilityPendingExecutedBlock {
block: Arc::new(block),
import_data: BlockImportData {
block_root: Default::default(),
state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()),
parent_block: dummy_parent,
parent_eth1_finalization_data: Eth1FinalizationData {
eth1_data: Default::default(),
eth1_deposit_index: 0,
},
confirmed_state_roots: vec![],
consensus_context: ConsensusContext::new(Slot::new(0)),
},
payload_verification_outcome: PayloadVerificationOutcome {
payload_verification_status: PayloadVerificationStatus::Verified,
is_valid_merge_transition_block: false,
},
};
(block, blobs, invalid_blobs)
}
type ChildComponentsSetup<E> = (
Arc<SignedBeaconBlock<E>>,
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
);
pub fn setup_child_components(
block: SignedBeaconBlock<E>,
valid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
invalid_blobs: FixedVector<Option<BlobSidecar<E>>, <E as EthSpec>::MaxBlobsPerBlock>,
) -> ChildComponentsSetup<E> {
let blobs = FixedVector::from(
valid_blobs
.into_iter()
.map(|blob_opt| blob_opt.clone().map(Arc::new))
.collect::<Vec<_>>(),
);
let invalid_blobs = FixedVector::from(
invalid_blobs
.into_iter()
.map(|blob_opt| blob_opt.clone().map(Arc::new))
.collect::<Vec<_>>(),
);
(Arc::new(block), blobs, invalid_blobs)
}
pub fn assert_cache_consistent<V: AvailabilityView<E>>(cache: V) {
if let Some(cached_block) = cache.get_cached_block() {
let cached_block_commitments = cached_block.get_commitments();
for index in 0..E::max_blobs_per_block() {
let block_commitment = cached_block_commitments.get(index).copied();
let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap();
let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment());
assert_eq!(block_commitment, blob_commitment);
}
} else {
panic!("No cached block")
}
}
pub fn assert_empty_blob_cache<V: AvailabilityView<E>>(cache: V) {
for blob in cache.get_cached_blobs().iter() {
assert!(blob.is_none());
}
}
#[macro_export]
macro_rules! generate_tests {
($module_name:ident, $type_name:ty, $block_field:ident, $blob_field:ident, $setup_fn:ident) => {
mod $module_name {
use super::*;
use types::Hash256;
#[test]
fn valid_block_invalid_blobs_valid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_block(block_commitments);
cache.merge_blobs(random_blobs);
cache.merge_blobs(blobs);
assert_cache_consistent(cache);
}
#[test]
fn invalid_blobs_block_valid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(random_blobs);
cache.merge_block(block_commitments);
cache.merge_blobs(blobs);
assert_cache_consistent(cache);
}
#[test]
fn invalid_blobs_valid_blobs_block() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(random_blobs);
cache.merge_blobs(blobs);
cache.merge_block(block_commitments);
assert_empty_blob_cache(cache);
}
#[test]
fn block_valid_blobs_invalid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_block(block_commitments);
cache.merge_blobs(blobs);
cache.merge_blobs(random_blobs);
assert_cache_consistent(cache);
}
#[test]
fn valid_blobs_block_invalid_blobs() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(blobs);
cache.merge_block(block_commitments);
cache.merge_blobs(random_blobs);
assert_cache_consistent(cache);
}
#[test]
fn valid_blobs_invalid_blobs_block() {
let (block_commitments, blobs, random_blobs) = pre_setup();
let (block_commitments, blobs, random_blobs) =
$setup_fn(block_commitments, blobs, random_blobs);
let block_root = Hash256::zero();
let mut cache = <$type_name>::empty(block_root);
cache.merge_blobs(blobs);
cache.merge_blobs(random_blobs);
cache.merge_block(block_commitments);
assert_cache_consistent(cache);
}
}
};
}
generate_tests!(
processing_components_tests,
ProcessingComponents::<E>,
kzg_commitments,
processing_blobs,
setup_processing_components
);
generate_tests!(
pending_components_tests,
PendingComponents<E>,
executed_block,
verified_blobs,
setup_pending_components
);
generate_tests!(
child_component_tests,
ChildComponents::<E>,
downloaded_block,
downloaded_blobs,
setup_child_components
);
}

View File

@ -0,0 +1,54 @@
use crate::block_verification_types::RpcBlock;
use crate::data_availability_checker::AvailabilityView;
use bls::Hash256;
use std::sync::Arc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{EthSpec, SignedBeaconBlock};
/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct
/// is used to cache components as they are sent to the network service. We can't use the
/// data availability cache currently because any blocks or blobs without parents
/// won't pass validation and therefore won't make it into the cache.
pub struct ChildComponents<E: EthSpec> {
pub block_root: Hash256,
pub downloaded_block: Option<Arc<SignedBeaconBlock<E>>>,
pub downloaded_blobs: FixedBlobSidecarList<E>,
}
impl<E: EthSpec> From<RpcBlock<E>> for ChildComponents<E> {
fn from(value: RpcBlock<E>) -> Self {
let (block_root, block, blobs) = value.deconstruct();
let fixed_blobs = blobs.map(|blobs| {
FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::<Vec<_>>())
});
Self::new(block_root, Some(block), fixed_blobs)
}
}
impl<E: EthSpec> ChildComponents<E> {
pub fn empty(block_root: Hash256) -> Self {
Self {
block_root,
downloaded_block: None,
downloaded_blobs: <_>::default(),
}
}
pub fn new(
block_root: Hash256,
block: Option<Arc<SignedBeaconBlock<E>>>,
blobs: Option<FixedBlobSidecarList<E>>,
) -> Self {
let mut cache = Self::empty(block_root);
if let Some(block) = block {
cache.merge_block(block);
}
if let Some(blobs) = blobs {
cache.merge_blobs(blobs);
}
cache
}
pub fn clear_blobs(&mut self) {
self.downloaded_blobs = FixedBlobSidecarList::default();
}
}

View File

@ -30,21 +30,20 @@
use crate::beacon_chain::BeaconStore;
use crate::blob_verification::KzgVerifiedBlob;
use crate::block_verification_types::{
AsBlock, AvailabilityPendingExecutedBlock, AvailableExecutedBlock,
AsBlock, AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock,
};
use crate::data_availability_checker::{make_available, Availability, AvailabilityCheckError};
use crate::data_availability_checker::availability_view::AvailabilityView;
use crate::data_availability_checker::{Availability, AvailabilityCheckError};
use crate::store::{DBColumn, KeyValueStore};
use crate::BeaconChainTypes;
use lru::LruCache;
use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard};
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use ssz_types::FixedVector;
use ssz_types::{FixedVector, VariableList};
use std::{collections::HashSet, sync::Arc};
use types::blob_sidecar::BlobIdentifier;
use types::{BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock};
type MissingBlobInfo<T> = (Option<Arc<SignedBeaconBlock<T>>>, HashSet<usize>);
use types::{BlobSidecar, Epoch, EthSpec, Hash256};
/// This represents the components of a partially available block
///
@ -52,53 +51,59 @@ type MissingBlobInfo<T> = (Option<Arc<SignedBeaconBlock<T>>>, HashSet<usize>);
/// The block has completed all verifications except the availability check.
#[derive(Encode, Decode, Clone)]
pub struct PendingComponents<T: EthSpec> {
verified_blobs: FixedVector<Option<KzgVerifiedBlob<T>>, T::MaxBlobsPerBlock>,
executed_block: Option<AvailabilityPendingExecutedBlock<T>>,
pub block_root: Hash256,
pub verified_blobs: FixedVector<Option<KzgVerifiedBlob<T>>, T::MaxBlobsPerBlock>,
pub executed_block: Option<AvailabilityPendingExecutedBlock<T>>,
}
impl<T: EthSpec> PendingComponents<T> {
pub fn new_from_blobs(blobs: &[KzgVerifiedBlob<T>]) -> Self {
let mut verified_blobs = FixedVector::<_, _>::default();
for blob in blobs {
if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) {
*mut_maybe_blob = Some(blob.clone());
}
}
pub fn empty(block_root: Hash256) -> Self {
Self {
block_root,
verified_blobs: FixedVector::default(),
executed_block: None,
}
}
/// Verifies an `SignedBeaconBlock` against a set of KZG verified blobs.
/// This does not check whether a block *should* have blobs, these checks should have been
/// completed when producing the `AvailabilityPendingBlock`.
pub fn make_available(self) -> Result<Availability<T>, AvailabilityCheckError> {
let Self {
block_root,
verified_blobs,
executed_block: None,
}
}
executed_block,
} = self;
pub fn new_from_block(block: AvailabilityPendingExecutedBlock<T>) -> Self {
Self {
verified_blobs: <_>::default(),
executed_block: Some(block),
}
}
let Some(executed_block) = executed_block else {
return Err(AvailabilityCheckError::Unexpected);
};
let num_blobs_expected = executed_block.num_blobs_expected();
let Some(verified_blobs) = verified_blobs
.into_iter()
.cloned()
.map(|b| b.map(|b| b.to_blob()))
.take(num_blobs_expected)
.collect::<Option<Vec<_>>>()
else {
return Err(AvailabilityCheckError::Unexpected);
};
let verified_blobs = VariableList::new(verified_blobs)?;
/// Returns `true` if the cache has all blobs corresponding to the
/// kzg commitments in the block.
pub fn has_all_blobs(&self, block: &AvailabilityPendingExecutedBlock<T>) -> bool {
for i in 0..block.num_blobs_expected() {
if self
.verified_blobs
.get(i)
.map(|maybe_blob| maybe_blob.is_none())
.unwrap_or(true)
{
return false;
}
}
true
}
let AvailabilityPendingExecutedBlock {
block,
import_data,
payload_verification_outcome,
} = executed_block;
pub fn empty() -> Self {
Self {
verified_blobs: <_>::default(),
executed_block: None,
}
let available_block = AvailableBlock {
block_root,
block,
blobs: Some(verified_blobs),
};
Ok(Availability::Available(Box::new(
AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome),
)))
}
pub fn epoch(&self) -> Option<Epoch> {
@ -116,20 +121,6 @@ impl<T: EthSpec> PendingComponents<T> {
None
})
}
pub fn get_missing_blob_info(&self) -> MissingBlobInfo<T> {
let block_opt = self
.executed_block
.as_ref()
.map(|block| block.block.clone());
let blobs = self
.verified_blobs
.iter()
.enumerate()
.filter_map(|(i, maybe_blob)| maybe_blob.as_ref().map(|_| i))
.collect::<HashSet<_>>();
(block_opt, blobs)
}
}
/// Blocks and blobs are stored in the database sequentially so that it's
@ -216,14 +207,14 @@ impl<T: BeaconChainTypes> OverflowStore<T> {
match OverflowKey::from_ssz_bytes(&key_bytes)? {
OverflowKey::Block(_) => {
maybe_pending_components
.get_or_insert_with(PendingComponents::empty)
.get_or_insert_with(|| PendingComponents::empty(block_root))
.executed_block = Some(AvailabilityPendingExecutedBlock::from_ssz_bytes(
value_bytes.as_slice(),
)?);
}
OverflowKey::Blob(_, index) => {
*maybe_pending_components
.get_or_insert_with(PendingComponents::empty)
.get_or_insert_with(|| PendingComponents::empty(block_root))
.verified_blobs
.get_mut(index as usize)
.ok_or(AvailabilityCheckError::BlobIndexInvalid(index as u64))? =
@ -245,23 +236,6 @@ impl<T: BeaconChainTypes> OverflowStore<T> {
Ok(disk_keys)
}
/// Load a single block from the database (ignoring blobs)
pub fn load_block(
&self,
block_root: &Hash256,
) -> Result<Option<AvailabilityPendingExecutedBlock<T::EthSpec>>, AvailabilityCheckError> {
let key = OverflowKey::from_block_root(*block_root);
self.0
.hot_db
.get_bytes(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())?
.map(|block_bytes| {
AvailabilityPendingExecutedBlock::from_ssz_bytes(block_bytes.as_slice())
})
.transpose()
.map_err(|e| e.into())
}
/// Load a single blob from the database
pub fn load_blob(
&self,
@ -404,43 +378,6 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
})
}
/// Returns whether or not a block is in the cache (in memory or on disk)
pub fn has_block(&self, block_root: &Hash256) -> bool {
let read_lock = self.critical.read();
if read_lock
.in_memory
.peek(block_root)
.map_or(false, |cache| cache.executed_block.is_some())
{
true
} else if read_lock.store_keys.contains(block_root) {
drop(read_lock);
// If there's some kind of error reading from the store, we should just return false
self.overflow_store
.load_block(block_root)
.map_or(false, |maybe_block| maybe_block.is_some())
} else {
false
}
}
/// Fetch the missing blob info for a block without affecting the LRU ordering
pub fn get_missing_blob_info(&self, block_root: Hash256) -> MissingBlobInfo<T::EthSpec> {
let read_lock = self.critical.read();
if let Some(cache) = read_lock.in_memory.peek(&block_root) {
cache.get_missing_blob_info()
} else if read_lock.store_keys.contains(&block_root) {
drop(read_lock);
// return default if there's an error reading from the store
match self.overflow_store.load_pending_components(block_root) {
Ok(Some(pending_components)) => pending_components.get_missing_blob_info(),
_ => Default::default(),
}
} else {
Default::default()
}
}
/// Fetch a blob from the cache without affecting the LRU ordering
pub fn peek_blob(
&self,
@ -460,59 +397,44 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
pub fn put_kzg_verified_blobs(
&self,
block_root: Hash256,
kzg_verified_blobs: &[KzgVerifiedBlob<T::EthSpec>],
kzg_verified_blobs: Vec<KzgVerifiedBlob<T::EthSpec>>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
let mut fixed_blobs = FixedVector::default();
// Initial check to ensure all provided blobs have a consistent block root.
for blob in kzg_verified_blobs {
let blob_block_root = blob.block_root();
if blob_block_root != block_root {
return Err(AvailabilityCheckError::BlockBlobRootMismatch {
return Err(AvailabilityCheckError::InconsistentBlobBlockRoots {
block_root,
blob_block_root,
});
}
if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) {
*blob_opt = Some(blob);
}
}
let mut write_lock = self.critical.write();
let availability = if let Some(mut pending_components) =
write_lock.pop_pending_components(block_root, &self.overflow_store)?
{
for kzg_verified_blob in kzg_verified_blobs {
let blob_index = kzg_verified_blob.blob_index() as usize;
if let Some(maybe_verified_blob) =
pending_components.verified_blobs.get_mut(blob_index)
{
*maybe_verified_blob = Some(kzg_verified_blob.clone())
} else {
return Err(AvailabilityCheckError::BlobIndexInvalid(blob_index as u64));
}
}
// Grab existing entry or create a new entry.
let mut pending_components = write_lock
.pop_pending_components(block_root, &self.overflow_store)?
.unwrap_or_else(|| PendingComponents::empty(block_root));
if let Some(executed_block) = pending_components.executed_block.take() {
self.check_block_availability_maybe_cache(
write_lock,
pending_components,
executed_block,
)?
} else {
write_lock.put_pending_components(
block_root,
pending_components,
&self.overflow_store,
)?;
Availability::MissingComponents(block_root)
}
// Merge in the blobs.
pending_components.merge_blobs(fixed_blobs);
if pending_components.is_available() {
pending_components.make_available()
} else {
// not in memory or store -> put new in memory
let new_pending_components = PendingComponents::new_from_blobs(kzg_verified_blobs);
write_lock.put_pending_components(
block_root,
new_pending_components,
pending_components,
&self.overflow_store,
)?;
Availability::MissingComponents(block_root)
};
Ok(availability)
Ok(Availability::MissingComponents(block_root))
}
}
/// Check if we have all the blobs for a block. If we do, return the Availability variant that
@ -524,90 +446,23 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
let mut write_lock = self.critical.write();
let block_root = executed_block.import_data.block_root;
let availability =
match write_lock.pop_pending_components(block_root, &self.overflow_store)? {
Some(pending_components) => self.check_block_availability_maybe_cache(
write_lock,
pending_components,
executed_block,
)?,
None => {
let all_blob_ids = executed_block.get_all_blob_ids();
if all_blob_ids.is_empty() {
// no blobs for this block, we can import it
let AvailabilityPendingExecutedBlock {
block,
import_data,
payload_verification_outcome,
} = executed_block;
let available_block = make_available(block, vec![])?;
return Ok(Availability::Available(Box::new(
AvailableExecutedBlock::new(
available_block,
import_data,
payload_verification_outcome,
),
)));
}
let new_pending_components = PendingComponents::new_from_block(executed_block);
write_lock.put_pending_components(
block_root,
new_pending_components,
&self.overflow_store,
)?;
Availability::MissingComponents(block_root)
}
};
// Grab existing entry or create a new entry.
let mut pending_components = write_lock
.pop_pending_components(block_root, &self.overflow_store)?
.unwrap_or_else(|| PendingComponents::empty(block_root));
Ok(availability)
}
// Merge in the block.
pending_components.merge_block(executed_block);
/// Checks if the provided `executed_block` contains all required blobs to be considered an
/// `AvailableBlock` based on blobs that are cached.
///
/// Returns an error if there was an error when matching the block commitments against blob commitments.
///
/// Returns `Ok(Availability::Available(_))` if all blobs for the block are present in cache.
/// Returns `Ok(Availability::MissingComponents(_))` if all corresponding blobs have not been received in the cache.
fn check_block_availability_maybe_cache(
&self,
mut write_lock: RwLockWriteGuard<Critical<T>>,
mut pending_components: PendingComponents<T::EthSpec>,
executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
if pending_components.has_all_blobs(&executed_block) {
let num_blobs_expected = executed_block.num_blobs_expected();
let AvailabilityPendingExecutedBlock {
block,
import_data,
payload_verification_outcome,
} = executed_block;
let Some(verified_blobs) = Vec::from(pending_components.verified_blobs)
.into_iter()
.take(num_blobs_expected)
.collect::<Option<Vec<_>>>()
else {
return Ok(Availability::MissingComponents(import_data.block_root));
};
let available_block = make_available(block, verified_blobs)?;
Ok(Availability::Available(Box::new(
AvailableExecutedBlock::new(
available_block,
import_data,
payload_verification_outcome,
),
)))
// Check if we have all components and entire set is consistent.
if pending_components.is_available() {
pending_components.make_available()
} else {
let block_root = executed_block.import_data.block_root;
let _ = pending_components.executed_block.insert(executed_block);
write_lock.put_pending_components(
block_root,
pending_components,
&self.overflow_store,
)?;
Ok(Availability::MissingComponents(block_root))
}
}
@ -1224,7 +1079,7 @@ mod test {
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = cache
.put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(root, kzg_verified_blobs.clone())
.expect("should put blob");
if blob_index == blobs_expected - 1 {
assert!(matches!(availability, Availability::Available(_)));
@ -1252,7 +1107,7 @@ mod test {
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = cache
.put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(root, kzg_verified_blobs.clone())
.expect("should put blob");
assert_eq!(
availability,
@ -1397,7 +1252,7 @@ mod test {
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = cache
.put_kzg_verified_blobs(roots[0], kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(roots[0], kzg_verified_blobs.clone())
.expect("should put blob");
if blob_index == expected_blobs - 1 {
assert!(matches!(availability, Availability::Available(_)));
@ -1504,7 +1359,7 @@ mod test {
"should have pending blobs"
);
let availability = cache
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(block_root, kzg_verified_blobs)
.expect("should put blob");
assert!(
matches!(availability, Availability::MissingComponents(_)),
@ -1513,7 +1368,7 @@ mod test {
);
} else {
let availability = cache
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(block_root, kzg_verified_blobs)
.expect("should put blob");
let root = pending_block.block.as_block().canonical_root();
assert_eq!(
@ -1656,7 +1511,7 @@ mod test {
"should have pending blobs"
);
let availability = cache
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(block_root, kzg_verified_blobs)
.expect("should put blob");
assert!(
matches!(availability, Availability::MissingComponents(_)),
@ -1665,7 +1520,7 @@ mod test {
);
} else {
let availability = cache
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(block_root, kzg_verified_blobs)
.expect("should put blob");
let root = pending_block.block.as_block().canonical_root();
assert_eq!(
@ -1757,7 +1612,7 @@ mod test {
.expect("kzg should verify");
kzg_verified_blobs.push(kzg_verified_blob);
let availability = recovered_cache
.put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
.put_kzg_verified_blobs(root, kzg_verified_blobs.clone())
.expect("should put blob");
if i == additional_blobs - 1 {
assert!(matches!(availability, Availability::Available(_)))

View File

@ -0,0 +1,74 @@
use crate::data_availability_checker::AvailabilityView;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
use types::{EthSpec, Hash256, Slot};
/// This cache is used only for gossip blocks/blobs and single block/blob lookups, to give req/resp
/// a view of what we have and what we require. This cache serves a slightly different purpose than
/// gossip caches because it allows us to process duplicate blobs that are valid in gossip.
/// See `AvailabilityView`'s trait definition.
#[derive(Default)]
pub struct ProcessingCache<E: EthSpec> {
processing_cache: HashMap<Hash256, ProcessingComponents<E>>,
}
impl<E: EthSpec> ProcessingCache<E> {
pub fn get(&self, block_root: &Hash256) -> Option<&ProcessingComponents<E>> {
self.processing_cache.get(block_root)
}
pub fn entry(&mut self, block_root: Hash256) -> Entry<'_, Hash256, ProcessingComponents<E>> {
self.processing_cache.entry(block_root)
}
pub fn remove(&mut self, block_root: &Hash256) {
self.processing_cache.remove(block_root);
}
pub fn has_block(&self, block_root: &Hash256) -> bool {
self.processing_cache
.get(block_root)
.map_or(false, |b| b.block_exists())
}
pub fn incomplete_processing_components(&self, slot: Slot) -> Vec<Hash256> {
let mut roots_missing_components = vec![];
for (&block_root, info) in self.processing_cache.iter() {
if info.slot == slot && !info.is_available() {
roots_missing_components.push(block_root);
}
}
roots_missing_components
}
}
#[derive(Debug, Clone)]
pub struct ProcessingComponents<E: EthSpec> {
slot: Slot,
/// Blobs required for a block can only be known if we have seen the block. So `Some` here
/// means we've seen it, a `None` means we haven't. The `kzg_commitments` value helps us figure
/// out whether incoming blobs actually match the block.
pub block_commitments: Option<KzgCommitments<E>>,
/// `KzgCommitments` for blobs are always known, even if we haven't seen the block. See
/// `AvailabilityView`'s trait definition for more details.
pub blob_commitments: KzgCommitmentOpts<E>,
}
impl<E: EthSpec> ProcessingComponents<E> {
pub fn new(slot: Slot) -> Self {
Self {
slot,
block_commitments: None,
blob_commitments: KzgCommitmentOpts::<E>::default(),
}
}
}
// Not safe for use outside of tests as this always required a slot.
#[cfg(test)]
impl<E: EthSpec> ProcessingComponents<E> {
pub fn empty(_block_root: Hash256) -> Self {
Self {
slot: Slot::new(0),
block_commitments: None,
blob_commitments: KzgCommitmentOpts::<E>::default(),
}
}
}

View File

@ -70,7 +70,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
},
};
let (block, blobs) = block.deconstruct();
let (_, block, blobs) = block.deconstruct();
let item = CacheItem {
epoch,
committee_lengths,

View File

@ -106,10 +106,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let mut signed_blocks = Vec::with_capacity(blocks_to_import.len());
for available_block in blocks_to_import.into_iter().rev() {
let (block, maybe_blobs) = available_block.deconstruct();
// Check chain integrity.
let block_root = block.canonical_root();
let (block_root, block, maybe_blobs) = available_block.deconstruct();
if block_root != expected_block_root {
return Err(HistoricalBlockError::MismatchedBlockRoot {

View File

@ -40,6 +40,10 @@ lazy_static! {
"beacon_block_processing_block_root_seconds",
"Time spent calculating the block root when processing a block."
);
pub static ref BLOCK_PROCESSING_BLOB_ROOT: Result<Histogram> = try_create_histogram(
"beacon_block_processing_blob_root_seconds",
"Time spent calculating the blob root when processing a block."
);
pub static ref BLOCK_PROCESSING_DB_READ: Result<Histogram> = try_create_histogram(
"beacon_block_processing_db_read_seconds",
"Time spent loading block and state from DB for block processing"
@ -282,6 +286,11 @@ lazy_static! {
"Count of times the early attester cache returns an attestation"
);
}
// Second lazy-static block is used to account for macro recursion limit.
lazy_static! {
/*
* Attestation Production
*/
@ -301,10 +310,7 @@ lazy_static! {
"attestation_production_cache_prime_seconds",
"Time spent loading a new state from the disk due to a cache miss"
);
}
// Second lazy-static block is used to account for macro recursion limit.
lazy_static! {
/*
* Fork Choice
*/

View File

@ -60,6 +60,7 @@ use task_executor::{test_utils::TestRuntime, ShutdownReason};
use tree_hash::TreeHash;
use types::sync_selection_proof::SyncSelectionProof;
pub use types::test_utils::generate_deterministic_keypairs;
use types::test_utils::TestRandom;
use types::{typenum::U4294967296, *};
// 4th September 2019
@ -709,14 +710,14 @@ where
let block = self.chain.head_beacon_block();
let block_root = block.canonical_root();
let blobs = self.chain.get_blobs(&block_root).unwrap();
RpcBlock::new(block, Some(blobs)).unwrap()
RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap()
}
pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock<E> {
let block = self.chain.get_blinded_block(block_root).unwrap().unwrap();
let full_block = self.chain.store.make_full_block(block_root, block).unwrap();
let blobs = self.chain.get_blobs(block_root).unwrap();
RpcBlock::new(Arc::new(full_block), Some(blobs)).unwrap()
RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap()
}
pub fn get_all_validators(&self) -> Vec<usize> {
@ -1922,7 +1923,7 @@ where
.chain
.process_block(
block_root,
RpcBlock::new(Arc::new(block), blobs_without_signatures).unwrap(),
RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
@ -1947,11 +1948,12 @@ where
.collect::<Vec<_>>(),
)
});
let block_root = block.canonical_root();
let block_hash: SignedBeaconBlockHash = self
.chain
.process_block(
block.canonical_root(),
RpcBlock::new(Arc::new(block), blobs_without_signatures).unwrap(),
block_root,
RpcBlock::new(Some(block_root), Arc::new(block), blobs_without_signatures).unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
@ -2513,3 +2515,64 @@ pub fn build_log(level: slog::Level, enabled: bool) -> Logger {
Logger::root(drain.filter(|_| false).fuse(), o!())
}
}
pub enum NumBlobs {
Random,
None,
}
pub fn generate_rand_block_and_blobs<E: EthSpec>(
fork_name: ForkName,
num_blobs: NumBlobs,
kzg: &Kzg<E::Kzg>,
rng: &mut impl Rng,
) -> (SignedBeaconBlock<E, FullPayload<E>>, Vec<BlobSidecar<E>>) {
let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng));
let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng));
let mut blob_sidecars = vec![];
if let Ok(message) = block.message_deneb_mut() {
// Get either zero blobs or a random number of blobs between 1 and Max Blobs.
let payload: &mut FullPayloadDeneb<E> = &mut message.body.execution_payload;
let num_blobs = match num_blobs {
NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()),
NumBlobs::None => 0,
};
let (bundle, transactions) =
execution_layer::test_utils::generate_random_blobs::<E, _>(num_blobs, kzg, rng)
.unwrap();
payload.execution_payload.transactions = <_>::default();
for tx in Vec::from(transactions) {
payload.execution_payload.transactions.push(tx).unwrap();
}
message.body.blob_kzg_commitments = bundle.commitments.clone();
let eth2::types::BlobsBundle {
commitments,
proofs,
blobs,
} = bundle;
let block_root = block.canonical_root();
for (index, ((blob, kzg_commitment), kzg_proof)) in blobs
.into_iter()
.zip(commitments.into_iter())
.zip(proofs.into_iter())
.enumerate()
{
blob_sidecars.push(BlobSidecar {
block_root,
index: index as u64,
slot: block.slot(),
block_parent_root: block.parent_root(),
proposer_index: block.message().proposer_index(),
blob: blob.clone(),
kzg_commitment,
kzg_proof,
});
}
}
(block, blob_sidecars)
}

View File

@ -134,7 +134,7 @@ async fn produces_attestations() {
assert_eq!(data.target.root, target_root, "bad target root");
let rpc_block =
RpcBlock::<MainnetEthSpec>::new(Arc::new(block.clone()), Some(blobs.clone()))
RpcBlock::<MainnetEthSpec>::new(None, Arc::new(block.clone()), Some(blobs.clone()))
.unwrap();
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(
available_block,
@ -213,7 +213,7 @@ async fn early_attester_cache_old_request() {
.expect("should get blobs");
let rpc_block =
RpcBlock::<MainnetEthSpec>::new(head.beacon_block.clone(), Some(head_blobs)).unwrap();
RpcBlock::<MainnetEthSpec>::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap();
let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) =
harness
.chain

View File

@ -161,7 +161,7 @@ fn chain_segment_blocks(
.iter()
.zip(blobs.into_iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect()
}
@ -204,17 +204,49 @@ fn update_proposal_signatures(
}
}
fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
fn update_parent_roots(
snapshots: &mut [BeaconSnapshot<E>],
blobs: &mut [Option<BlobSidecarList<E>>],
) {
for i in 0..snapshots.len() {
let root = snapshots[i].beacon_block.canonical_root();
if let Some(child) = snapshots.get_mut(i + 1) {
if let (Some(child), Some(child_blobs)) = (snapshots.get_mut(i + 1), blobs.get_mut(i + 1)) {
let (mut block, signature) = child.beacon_block.as_ref().clone().deconstruct();
*block.parent_root_mut() = root;
child.beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature))
let new_child = Arc::new(SignedBeaconBlock::from_block(block, signature));
let new_child_root = new_child.canonical_root();
child.beacon_block = new_child;
if let Some(blobs) = child_blobs {
update_blob_roots(new_child_root, blobs);
}
}
}
}
fn update_blob_roots<E: EthSpec>(block_root: Hash256, blobs: &mut BlobSidecarList<E>) {
for old_blob_sidecar in blobs.iter_mut() {
let index = old_blob_sidecar.index;
let slot = old_blob_sidecar.slot;
let block_parent_root = old_blob_sidecar.block_parent_root;
let proposer_index = old_blob_sidecar.proposer_index;
let blob = old_blob_sidecar.blob.clone();
let kzg_commitment = old_blob_sidecar.kzg_commitment;
let kzg_proof = old_blob_sidecar.kzg_proof;
let new_blob = Arc::new(BlobSidecar::<E> {
block_root,
index,
slot,
block_parent_root,
proposer_index,
blob,
kzg_commitment,
kzg_proof,
});
*old_blob_sidecar = new_blob;
}
}
#[tokio::test]
async fn chain_segment_full_segment() {
let harness = get_harness(VALIDATOR_COUNT);
@ -328,7 +360,10 @@ async fn chain_segment_non_linear_parent_roots() {
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.parent_root_mut() = Hash256::zero();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@ -362,7 +397,10 @@ async fn chain_segment_non_linear_slots() {
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = Slot::new(0);
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@ -386,7 +424,10 @@ async fn chain_segment_non_linear_slots() {
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = blocks[2].slot();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
blocks[3] = RpcBlock::new_without_blobs(
None,
Arc::new(SignedBeaconBlock::from_block(block, signature)),
);
assert!(
matches!(
@ -413,7 +454,7 @@ async fn assert_invalid_signature(
.iter()
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
@ -440,7 +481,7 @@ async fn assert_invalid_signature(
.take(block_index)
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
// We don't care if this fails, we just call this to ensure that all prior blocks have been
@ -456,6 +497,7 @@ async fn assert_invalid_signature(
.process_block(
snapshots[block_index].beacon_block.canonical_root(),
RpcBlock::new(
None,
snapshots[block_index].beacon_block.clone(),
chain_segment_blobs[block_index].clone(),
)
@ -511,7 +553,7 @@ async fn invalid_signature_gossip_block() {
.take(block_index)
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
harness
@ -558,7 +600,7 @@ async fn invalid_signature_block_proposal() {
.iter()
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect::<Vec<_>>();
// Ensure the block will be rejected if imported in a chain segment.
@ -578,7 +620,7 @@ async fn invalid_signature_block_proposal() {
#[tokio::test]
async fn invalid_signature_randao_reveal() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -590,7 +632,7 @@ async fn invalid_signature_randao_reveal() {
*block.body_mut().randao_reveal_mut() = junk_signature();
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@ -606,7 +648,7 @@ async fn invalid_signature_randao_reveal() {
#[tokio::test]
async fn invalid_signature_proposer_slashing() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -632,7 +674,7 @@ async fn invalid_signature_proposer_slashing() {
.expect("should update proposer slashing");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@ -648,7 +690,7 @@ async fn invalid_signature_proposer_slashing() {
#[tokio::test]
async fn invalid_signature_attester_slashing() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -685,7 +727,7 @@ async fn invalid_signature_attester_slashing() {
.expect("should update attester slashing");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@ -701,7 +743,7 @@ async fn invalid_signature_attester_slashing() {
#[tokio::test]
async fn invalid_signature_attestation() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
let mut checked_attestation = false;
for &block_index in BLOCK_INDICES {
@ -716,7 +758,7 @@ async fn invalid_signature_attestation() {
attestation.signature = junk_aggregate_signature();
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@ -739,7 +781,7 @@ async fn invalid_signature_attestation() {
#[tokio::test]
async fn invalid_signature_deposit() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
// Note: an invalid deposit signature is permitted!
let harness = get_invalid_sigs_harness(&chain_segment).await;
@ -765,13 +807,13 @@ async fn invalid_signature_deposit() {
.expect("should update deposit");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
let blocks: Vec<RpcBlock<E>> = snapshots
.iter()
.zip(chain_segment_blobs.iter())
.map(|(snapshot, blobs)| {
RpcBlock::new(snapshot.beacon_block.clone(), blobs.clone()).unwrap()
RpcBlock::new(None, snapshot.beacon_block.clone(), blobs.clone()).unwrap()
})
.collect();
assert!(
@ -790,7 +832,7 @@ async fn invalid_signature_deposit() {
#[tokio::test]
async fn invalid_signature_exit() {
let (chain_segment, chain_segment_blobs) = get_chain_segment().await;
let (chain_segment, mut chain_segment_blobs) = get_chain_segment().await;
for &block_index in BLOCK_INDICES {
let harness = get_invalid_sigs_harness(&chain_segment).await;
let mut snapshots = chain_segment.clone();
@ -813,7 +855,7 @@ async fn invalid_signature_exit() {
.expect("should update deposit");
snapshots[block_index].beacon_block =
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_parent_roots(&mut snapshots, &mut chain_segment_blobs);
update_proposal_signatures(&mut snapshots, &harness);
assert_invalid_signature(
&chain_segment,
@ -877,7 +919,7 @@ async fn block_gossip_verification() {
harness
.chain
.process_blob(gossip_verified)
.process_gossip_blob(gossip_verified)
.await
.expect("should import valid gossip verified blob");
}
@ -1143,7 +1185,11 @@ async fn verify_block_for_gossip_slashing_detection() {
.chain
.verify_blob_sidecar_for_gossip(blob, blob_index)
.unwrap();
harness.chain.process_blob(verified_blob).await.unwrap();
harness
.chain
.process_gossip_blob(verified_blob)
.await
.unwrap();
}
}
harness
@ -1355,7 +1401,10 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!(
harness
.chain
.process_chain_segment(vec![Arc::new(base_block).into()], NotifyExecutionLayer::Yes,)
.process_chain_segment(
vec![RpcBlock::new_without_blobs(None, Arc::new(base_block))],
NotifyExecutionLayer::Yes,
)
.await,
ChainSegmentResult::Failed {
imported_blocks: 0,
@ -1491,7 +1540,7 @@ async fn add_altair_block_to_base_chain() {
harness
.chain
.process_chain_segment(
vec![Arc::new(altair_block).into()],
vec![RpcBlock::new_without_blobs(None, Arc::new(altair_block))],
NotifyExecutionLayer::Yes
)
.await,

View File

@ -2217,7 +2217,7 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
beacon_chain
.process_block(
full_block.canonical_root(),
RpcBlock::new(Arc::new(full_block), Some(blobs)).unwrap(),
RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(),
NotifyExecutionLayer::Yes,
|| Ok(()),
)
@ -2277,7 +2277,9 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
if let MaybeAvailableBlock::Available(block) = harness
.chain
.data_availability_checker
.check_rpc_block_availability(RpcBlock::new(Arc::new(full_block), Some(blobs)).unwrap())
.check_rpc_block_availability(
RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(),
)
.expect("should check availability")
{
available_blocks.push(block);

View File

@ -198,7 +198,7 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
if let Some(gossip_verified_blobs) = gossip_verified_blobs {
for blob in gossip_verified_blobs {
if let Err(e) = chain.process_blob(blob).await {
if let Err(e) = chain.process_gossip_blob(blob).await {
return Err(warp_utils::reject::custom_bad_request(format!(
"Invalid blob: {e}"
)));

View File

@ -40,6 +40,7 @@ itertools = { workspace = true }
num_cpus = { workspace = true }
lru_cache = { workspace = true }
if-addrs = "0.6.4"
lru = { workspace = true }
strum = { workspace = true }
tokio-util = { workspace = true }
derivative = { workspace = true }

View File

@ -4,6 +4,7 @@ use crate::{
service::NetworkMessage,
sync::SyncMessage,
};
use std::collections::HashSet;
use beacon_chain::blob_verification::{GossipBlobError, GossipVerifiedBlob};
use beacon_chain::block_verification_types::AsBlock;
@ -639,8 +640,8 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
self.log,
"Successfully verified gossip blob";
"slot" => %slot,
"root" => %root,
"index" => %index
"root" => %root,
"index" => %index
);
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Accept);
@ -666,7 +667,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
self.log,
"Unknown parent hash for blob";
"action" => "requesting parent",
"blob_root" => %blob.block_root,
"block_root" => %blob.block_root,
"parent_root" => %blob.block_parent_root
);
self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob));
@ -732,10 +733,16 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
// This value is not used presently, but it might come in handy for debugging.
_seen_duration: Duration,
) {
let blob_root = verified_blob.block_root();
let block_root = verified_blob.block_root();
let blob_slot = verified_blob.slot();
let blob_index = verified_blob.id().index;
match self.chain.process_blob(verified_blob).await {
let delay_lookup = self
.chain
.data_availability_checker
.should_delay_lookup(blob_slot);
match self.chain.process_gossip_blob(verified_blob).await {
Ok(AvailabilityProcessingStatus::Imported(hash)) => {
// Note: Reusing block imported metric here
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_IMPORTED_TOTAL);
@ -746,24 +753,36 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
);
self.chain.recompute_head_at_current_slot().await;
}
Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_hash)) => {
trace!(
self.log,
"Missing block components for gossip verified blob";
"slot" => %blob_slot,
"blob_index" => %blob_index,
"blob_root" => %blob_root,
);
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
slot, peer_id, block_hash,
));
Ok(AvailabilityProcessingStatus::MissingComponents(_slot, block_root)) => {
if delay_lookup {
self.cache_peer(peer_id, &block_root);
trace!(
self.log,
"Processed blob, delaying lookup for other components";
"slot" => %blob_slot,
"blob_index" => %blob_index,
"block_root" => %block_root,
);
} else {
trace!(
self.log,
"Missing block components for gossip verified blob";
"slot" => %blob_slot,
"blob_index" => %blob_index,
"block_root" => %block_root,
);
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
vec![peer_id],
block_root,
));
}
}
Err(err) => {
debug!(
self.log,
"Invalid gossip blob";
"outcome" => ?err,
"block root" => ?blob_root,
"block root" => ?block_root,
"block slot" => blob_slot,
"blob index" => blob_index,
);
@ -780,6 +799,18 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
}
}
/// Cache the peer id for the given block root.
fn cache_peer(self: &Arc<Self>, peer_id: PeerId, block_root: &Hash256) {
let mut guard = self.delayed_lookup_peers.lock();
if let Some(peers) = guard.get_mut(block_root) {
peers.insert(peer_id);
} else {
let mut peers = HashSet::new();
peers.insert(peer_id);
guard.push(*block_root, peers);
}
}
/// Process the beacon block received from the gossip network and:
///
/// - If it passes gossip propagation criteria, tell the network thread to forward it.
@ -1112,14 +1143,14 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
let block = verified_block.block.block_cloned();
let block_root = verified_block.block_root;
let delay_lookup = self
.chain
.data_availability_checker
.should_delay_lookup(verified_block.block.slot());
let result = self
.chain
.process_block(
block_root,
verified_block,
NotifyExecutionLayer::Yes,
|| Ok(()),
)
.process_block_with_early_caching(block_root, verified_block, NotifyExecutionLayer::Yes)
.await;
match &result {
@ -1151,12 +1182,26 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
self.chain.recompute_head_at_current_slot().await;
}
Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => {
// make rpc request for blob
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
*slot,
peer_id,
*block_root,
));
if delay_lookup {
self.cache_peer(peer_id, block_root);
trace!(
self.log,
"Processed block, delaying lookup for other components";
"slot" => slot,
"block_root" => %block_root,
);
} else {
trace!(
self.log,
"Missing block components for gossip verified block";
"slot" => slot,
"block_root" => %block_root,
);
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
vec![peer_id],
*block_root,
));
}
}
Err(BlockError::ParentUnknown(block)) => {
// Inform the sync manager to find parents for this block
@ -1182,6 +1227,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
Err(BlockError::AvailabilityCheck(err)) => {
match err {
AvailabilityCheckError::KzgNotInitialized
| AvailabilityCheckError::Unexpected
| AvailabilityCheckError::SszTypes(_)
| AvailabilityCheckError::MissingBlobs
| AvailabilityCheckError::StoreError(_)
@ -1194,12 +1240,9 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
}
AvailabilityCheckError::Kzg(_)
| AvailabilityCheckError::KzgVerificationFailed
| AvailabilityCheckError::NumBlobsMismatch { .. }
| AvailabilityCheckError::KzgCommitmentMismatch { .. }
| AvailabilityCheckError::BlobIndexInvalid(_)
| AvailabilityCheckError::UnorderedBlobs { .. }
| AvailabilityCheckError::BlockBlobRootMismatch { .. }
| AvailabilityCheckError::BlockBlobSlotMismatch { .. }
| AvailabilityCheckError::KzgCommitmentMismatch { .. } => {
| AvailabilityCheckError::InconsistentBlobBlockRoots { .. } => {
// Note: we cannot penalize the peer that sent us the block
// over gossip here because these errors imply either an issue
// with:

View File

@ -18,8 +18,11 @@ use lighthouse_network::{
rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage},
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId,
};
use slog::{debug, Logger};
use slot_clock::ManualSlotClock;
use lru::LruCache;
use parking_lot::Mutex;
use slog::{crit, debug, error, trace, Logger};
use slot_clock::{ManualSlotClock, SlotClock};
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
@ -27,6 +30,7 @@ use store::MemoryStore;
use task_executor::test_utils::TestRuntime;
use task_executor::TaskExecutor;
use tokio::sync::mpsc::{self, error::TrySendError};
use tokio::time::{interval_at, Instant};
use types::*;
pub use sync_methods::ChainSegmentProcessId;
@ -40,6 +44,7 @@ mod sync_methods;
mod tests;
pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1;
pub const DELAYED_PEER_CACHE_SIZE: usize = 16;
/// Defines if and where we will store the SSZ files of invalid blocks.
#[derive(Clone)]
@ -60,6 +65,7 @@ pub struct NetworkBeaconProcessor<T: BeaconChainTypes> {
pub reprocess_tx: mpsc::Sender<ReprocessQueueMessage>,
pub network_globals: Arc<NetworkGlobals<T::EthSpec>>,
pub invalid_block_storage: InvalidBlockStorage,
pub delayed_lookup_peers: Mutex<LruCache<Hash256, HashSet<PeerId>>>,
pub executor: TaskExecutor,
pub log: Logger,
}
@ -624,6 +630,68 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
"error" => %e)
});
}
/// This service is responsible for collecting lookup messages and sending them back to sync
/// for processing after a short delay.
///
/// We want to delay lookups triggered from gossip for the following reasons:
///
/// - We only want to make one request for components we are unlikely to see on gossip. This means
/// we don't have to repeatedly update our RPC request's state as we receive gossip components.
///
/// - We are likely to receive blocks/blobs over gossip more quickly than we could via an RPC request.
///
/// - Delaying a lookup means we are less likely to simultaneously download the same blocks/blobs
/// over gossip and RPC.
///
/// - We would prefer to request peers based on whether we've seen them attest, because this gives
/// us an idea about whether they *should* have the block/blobs we're missing. This is because a
/// node should not attest to a block unless it has all the blobs for that block. This gives us a
/// stronger basis for peer scoring.
pub fn spawn_delayed_lookup_service(self: &Arc<Self>) {
let processor_clone = self.clone();
let executor = self.executor.clone();
let log = self.log.clone();
let beacon_chain = self.chain.clone();
executor.spawn(
async move {
let slot_duration = beacon_chain.slot_clock.slot_duration();
let delay = beacon_chain.slot_clock.single_lookup_delay();
let interval_start = match (
beacon_chain.slot_clock.duration_to_next_slot(),
beacon_chain.slot_clock.seconds_from_current_slot_start(),
) {
(Some(duration_to_next_slot), Some(seconds_from_current_slot_start)) => {
let duration_until_start = if seconds_from_current_slot_start > delay {
duration_to_next_slot + delay
} else {
delay - seconds_from_current_slot_start
};
Instant::now() + duration_until_start
}
_ => {
crit!(log,
"Failed to read slot clock, delayed lookup service timing will be inaccurate.\
This may degrade performance"
);
Instant::now()
}
};
let mut interval = interval_at(interval_start, slot_duration);
loop {
interval.tick().await;
let Some(slot) = beacon_chain.slot_clock.now_or_genesis() else {
error!(log, "Skipping delayed lookup poll, unable to read slot clock");
continue
};
trace!(log, "Polling delayed lookups for slot: {slot}");
processor_clone.poll_delayed_lookups(slot)
}
},
"delayed_lookups",
);
}
}
type TestBeaconChainType<E> =
@ -666,6 +734,7 @@ impl<E: EthSpec> NetworkBeaconProcessor<TestBeaconChainType<E>> {
reprocess_tx: work_reprocessing_tx,
network_globals,
invalid_block_storage: InvalidBlockStorage::Disabled,
delayed_lookup_peers: Mutex::new(LruCache::new(DELAYED_PEER_CACHE_SIZE)),
executor: runtime.task_executor.clone(),
log,
};

View File

@ -18,14 +18,14 @@ use beacon_processor::{
AsyncFn, BlockingFn, DuplicateCache,
};
use lighthouse_network::PeerAction;
use slog::{debug, error, info, warn};
use slog::{debug, error, info, trace, warn};
use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::Duration;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::mpsc;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{Epoch, Hash256};
use types::{Epoch, Hash256, Slot};
/// Id associated to a batch processing request, either a sync batch or a parent lookup.
#[derive(Clone, Debug, PartialEq)]
@ -214,7 +214,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
let result = self
.chain
.process_block(block_root, block, NotifyExecutionLayer::Yes, || Ok(()))
.process_block_with_early_caching(block_root, block, NotifyExecutionLayer::Yes)
.await;
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
@ -272,6 +272,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
Box::pin(process_fn)
}
/// Attempt to process a list of blobs received from a direct RPC request.
pub async fn process_rpc_blobs(
self: Arc<NetworkBeaconProcessor<T>>,
block_root: Hash256,
@ -286,10 +287,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
return;
};
let result = self
.chain
.check_rpc_blob_availability_and_import(slot, block_root, blobs)
.await;
let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await;
// Sync handles these results
self.send_sync_message(SyncMessage::BlockComponentProcessed {
@ -298,8 +296,25 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
});
}
pub fn send_delayed_lookup(&self, block_root: Hash256) {
self.send_sync_message(SyncMessage::MissingGossipBlockComponentsDelayed(block_root))
/// Poll the beacon chain for any delayed lookups that are now available.
pub fn poll_delayed_lookups(&self, slot: Slot) {
let block_roots = self
.chain
.data_availability_checker
.incomplete_processing_components(slot);
if block_roots.is_empty() {
trace!(self.log, "No delayed lookups found on poll");
} else {
debug!(self.log, "Found delayed lookups on poll"; "lookup_count" => block_roots.len());
}
for block_root in block_roots {
if let Some(peer_ids) = self.delayed_lookup_peers.lock().pop(&block_root) {
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
peer_ids.into_iter().collect(),
block_root,
));
}
}
}
/// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync
@ -481,7 +496,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
.into_iter()
.filter_map(|maybe_available| match maybe_available {
MaybeAvailableBlock::Available(block) => Some(block),
MaybeAvailableBlock::AvailabilityPending(_) => None,
MaybeAvailableBlock::AvailabilityPending { .. } => None,
})
.collect::<Vec<_>>(),
Err(e) => match e {

View File

@ -1,6 +1,7 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
#![cfg(test)]
use crate::network_beacon_processor::DELAYED_PEER_CACHE_SIZE;
use crate::{
network_beacon_processor::{
ChainSegmentProcessId, DuplicateCache, InvalidBlockStorage, NetworkBeaconProcessor,
@ -8,6 +9,7 @@ use crate::{
service::NetworkMessage,
sync::{manager::BlockProcessType, SyncMessage},
};
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
};
@ -22,6 +24,8 @@ use lighthouse_network::{
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield},
Client, MessageId, NetworkGlobals, PeerId, Response,
};
use lru::LruCache;
use parking_lot::Mutex;
use slot_clock::SlotClock;
use std::iter::Iterator;
use std::sync::Arc;
@ -217,6 +221,7 @@ impl TestRig {
reprocess_tx: work_reprocessing_tx.clone(),
network_globals: network_globals.clone(),
invalid_block_storage: InvalidBlockStorage::Disabled,
delayed_lookup_peers: Mutex::new(LruCache::new(DELAYED_PEER_CACHE_SIZE)),
executor: executor.clone(),
log: log.clone(),
};
@ -297,10 +302,11 @@ impl TestRig {
}
pub fn enqueue_rpc_block(&self) {
let block_root = self.next_block.canonical_root();
self.network_beacon_processor
.send_rpc_beacon_block(
self.next_block.canonical_root(),
self.next_block.clone().into(),
block_root,
RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()),
std::time::Duration::default(),
BlockProcessType::ParentLookup {
chain_hash: Hash256::random(),
@ -310,10 +316,11 @@ impl TestRig {
}
pub fn enqueue_single_lookup_rpc_block(&self) {
let block_root = self.next_block.canonical_root();
self.network_beacon_processor
.send_rpc_beacon_block(
self.next_block.canonical_root(),
self.next_block.clone().into(),
block_root,
RpcBlock::new_without_blobs(Some(block_root), self.next_block.clone().into()),
std::time::Duration::default(),
BlockProcessType::SingleBlock { id: 1 },
)

View File

@ -21,6 +21,8 @@ use lighthouse_network::{
MessageId, NetworkGlobals, PeerId, PeerRequestId, PubsubMessage, Request, Response,
};
use logging::TimeLatch;
use lru::LruCache;
use parking_lot::Mutex;
use slog::{crit, debug, o, trace};
use slog::{error, warn};
use std::sync::Arc;
@ -109,10 +111,14 @@ impl<T: BeaconChainTypes> Router<T> {
reprocess_tx: beacon_processor_reprocess_tx,
network_globals: network_globals.clone(),
invalid_block_storage,
delayed_lookup_peers: Mutex::new(LruCache::new(
crate::network_beacon_processor::DELAYED_PEER_CACHE_SIZE,
)),
executor: executor.clone(),
log: log.clone(),
};
let network_beacon_processor = Arc::new(network_beacon_processor);
network_beacon_processor.spawn_delayed_lookup_service();
// spawn the sync thread
crate::sync::manager::spawn(

View File

@ -8,8 +8,8 @@ use crate::sync::block_lookups::{
};
use crate::sync::manager::{BlockProcessType, Id, SingleLookupReqId};
use crate::sync::network_context::SyncNetworkContext;
use crate::sync::CachedChildComponents;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents};
use beacon_chain::{get_block_root, BeaconChainTypes};
use lighthouse_network::rpc::methods::BlobsByRootRequest;
use lighthouse_network::rpc::BlocksByRootRequest;
@ -19,7 +19,7 @@ use ssz_types::VariableList;
use std::ops::IndexMut;
use std::sync::Arc;
use std::time::Duration;
use types::blob_sidecar::FixedBlobSidecarList;
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock};
#[derive(Debug, Copy, Clone)]
@ -222,11 +222,12 @@ pub trait RequestState<L: Lookup, T: BeaconChainTypes> {
/// triggered by `UnknownParent` errors.
fn add_to_child_components(
verified_response: Self::VerifiedResponseType,
components: &mut CachedChildComponents<T::EthSpec>,
components: &mut ChildComponents<T::EthSpec>,
);
/// Convert a verified response to the type we send to the beacon processor.
fn verified_to_reconstructed(
block_root: Hash256,
verified: Self::VerifiedResponseType,
) -> Self::ReconstructedResponseType;
@ -326,15 +327,16 @@ impl<L: Lookup, T: BeaconChainTypes> RequestState<L, T> for BlockRequestState<L>
fn add_to_child_components(
verified_response: Arc<SignedBeaconBlock<T::EthSpec>>,
components: &mut CachedChildComponents<T::EthSpec>,
components: &mut ChildComponents<T::EthSpec>,
) {
components.add_cached_child_block(verified_response);
components.merge_block(verified_response);
}
fn verified_to_reconstructed(
block_root: Hash256,
block: Arc<SignedBeaconBlock<T::EthSpec>>,
) -> RpcBlock<T::EthSpec> {
RpcBlock::new_without_blobs(block)
RpcBlock::new_without_blobs(Some(block_root), block)
}
fn send_reconstructed_for_processing(
@ -375,9 +377,9 @@ impl<L: Lookup, T: BeaconChainTypes> RequestState<L, T> for BlobRequestState<L,
type ReconstructedResponseType = FixedBlobSidecarList<T::EthSpec>;
fn new_request(&self) -> BlobsByRootRequest {
BlobsByRootRequest {
blob_ids: self.requested_ids.clone().into(),
}
let blob_id_vec: Vec<BlobIdentifier> = self.requested_ids.clone().into();
let blob_ids = VariableList::from(blob_id_vec);
BlobsByRootRequest { blob_ids }
}
fn make_request(
@ -432,12 +434,13 @@ impl<L: Lookup, T: BeaconChainTypes> RequestState<L, T> for BlobRequestState<L,
fn add_to_child_components(
verified_response: FixedBlobSidecarList<T::EthSpec>,
components: &mut CachedChildComponents<T::EthSpec>,
components: &mut ChildComponents<T::EthSpec>,
) {
components.add_cached_child_blobs(verified_response);
components.merge_blobs(verified_response);
}
fn verified_to_reconstructed(
_block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> FixedBlobSidecarList<T::EthSpec> {
blobs

View File

@ -1,81 +0,0 @@
use crate::network_beacon_processor::NetworkBeaconProcessor;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use slog::crit;
use slot_clock::SlotClock;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::time::interval_at;
use tokio::time::Instant;
use types::Hash256;
#[derive(Debug)]
pub enum DelayedLookupMessage {
/// A lookup for all components of a block or blob seen over gossip.
MissingComponents(Hash256),
}
/// This service is responsible for collecting lookup messages and sending them back to sync
/// for processing after a short delay.
///
/// We want to delay lookups triggered from gossip for the following reasons:
///
/// - We only want to make one request for components we are unlikely to see on gossip. This means
/// we don't have to repeatedly update our RPC request's state as we receive gossip components.
///
/// - We are likely to receive blocks/blobs over gossip more quickly than we could via an RPC request.
///
/// - Delaying a lookup means we are less likely to simultaneously download the same blocks/blobs
/// over gossip and RPC.
///
/// - We would prefer to request peers based on whether we've seen them attest, because this gives
/// us an idea about whether they *should* have the block/blobs we're missing. This is because a
/// node should not attest to a block unless it has all the blobs for that block. This gives us a
/// stronger basis for peer scoring.
pub fn spawn_delayed_lookup_service<T: BeaconChainTypes>(
executor: &task_executor::TaskExecutor,
beacon_chain: Arc<BeaconChain<T>>,
mut delayed_lookups_recv: mpsc::Receiver<DelayedLookupMessage>,
beacon_processor: Arc<NetworkBeaconProcessor<T>>,
log: slog::Logger,
) {
executor.spawn(
async move {
let slot_duration = beacon_chain.slot_clock.slot_duration();
let delay = beacon_chain.slot_clock.single_lookup_delay();
let interval_start = match (
beacon_chain.slot_clock.duration_to_next_slot(),
beacon_chain.slot_clock.seconds_from_current_slot_start(),
) {
(Some(duration_to_next_slot), Some(seconds_from_current_slot_start)) => {
let duration_until_start = if seconds_from_current_slot_start > delay {
duration_to_next_slot + delay
} else {
delay - seconds_from_current_slot_start
};
Instant::now() + duration_until_start
}
_ => {
crit!(log,
"Failed to read slot clock, delayed lookup service timing will be inaccurate.\
This may degrade performance"
);
Instant::now()
}
};
let mut interval = interval_at(interval_start, slot_duration);
loop {
interval.tick().await;
while let Ok(msg) = delayed_lookups_recv.try_recv() {
match msg {
DelayedLookupMessage::MissingComponents(block_root) => {
beacon_processor
.send_delayed_lookup(block_root)
}
}
}
}
},
"delayed_lookups",
);
}

View File

@ -12,6 +12,7 @@ use crate::sync::block_lookups::single_block_lookup::{
};
use crate::sync::manager::{Id, SingleLookupReqId};
use beacon_chain::block_verification_types::{AsBlock, RpcBlock};
pub use beacon_chain::data_availability_checker::ChildComponents;
use beacon_chain::data_availability_checker::{AvailabilityCheckError, DataAvailabilityChecker};
use beacon_chain::validator_monitor::timestamp_now;
use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError};
@ -23,7 +24,6 @@ use fnv::FnvHashMap;
use lighthouse_network::rpc::RPCError;
use lighthouse_network::{PeerAction, PeerId};
use lru_cache::LRUTimeCache;
pub use single_block_lookup::CachedChildComponents;
pub use single_block_lookup::{BlobRequestState, BlockRequestState};
use slog::{debug, error, trace, warn, Logger};
use smallvec::SmallVec;
@ -37,7 +37,6 @@ use types::blob_sidecar::FixedBlobSidecarList;
use types::Slot;
pub mod common;
pub(crate) mod delayed_lookup;
mod parent_lookup;
mod single_block_lookup;
#[cfg(test)]
@ -122,34 +121,10 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
pub fn search_block(
&mut self,
block_root: Hash256,
peer_source: PeerShouldHave,
peer_source: &[PeerShouldHave],
cx: &mut SyncNetworkContext<T>,
) {
let lookup = self.new_current_lookup(block_root, None, &[peer_source], cx);
if let Some(lookup) = lookup {
let msg = "Searching for block";
lookup_creation_logging(msg, &lookup, peer_source, &self.log);
self.trigger_single_lookup(lookup, cx);
}
}
/// Creates a lookup for the block with the given `block_root`.
///
/// The request is not immediately triggered, and should be triggered by a call to
/// `trigger_lookup_by_root`.
pub fn search_block_delayed(
&mut self,
block_root: Hash256,
peer_source: PeerShouldHave,
cx: &mut SyncNetworkContext<T>,
) {
let lookup = self.new_current_lookup(block_root, None, &[peer_source], cx);
if let Some(lookup) = lookup {
let msg = "Initialized delayed lookup for block";
lookup_creation_logging(msg, &lookup, peer_source, &self.log);
self.add_single_lookup(lookup)
}
self.new_current_lookup(block_root, None, peer_source, cx)
}
/// Creates a lookup for the block with the given `block_root`, while caching other block
@ -161,44 +136,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
pub fn search_child_block(
&mut self,
block_root: Hash256,
child_components: CachedChildComponents<T::EthSpec>,
peer_source: PeerShouldHave,
child_components: ChildComponents<T::EthSpec>,
peer_source: &[PeerShouldHave],
cx: &mut SyncNetworkContext<T>,
) {
if child_components.is_missing_components() {
let lookup =
self.new_current_lookup(block_root, Some(child_components), &[peer_source], cx);
if let Some(lookup) = lookup {
let msg = "Searching for components of a block with unknown parent";
lookup_creation_logging(msg, &lookup, peer_source, &self.log);
self.trigger_single_lookup(lookup, cx);
}
}
}
/// Creates a lookup for the block with the given `block_root`, while caching other block
/// components we've already received. The block components are cached here because we haven't
/// imported it's parent and therefore can't fully validate it and store it in the data
/// availability cache.
///
/// The request is not immediately triggered, and should be triggered by a call to
/// `trigger_lookup_by_root`.
pub fn search_child_delayed(
&mut self,
block_root: Hash256,
child_components: CachedChildComponents<T::EthSpec>,
peer_source: PeerShouldHave,
cx: &mut SyncNetworkContext<T>,
) {
if child_components.is_missing_components() {
let lookup =
self.new_current_lookup(block_root, Some(child_components), &[peer_source], cx);
if let Some(lookup) = lookup {
let msg = "Initialized delayed lookup for block with unknown parent";
lookup_creation_logging(msg, &lookup, peer_source, &self.log);
self.add_single_lookup(lookup)
}
}
self.new_current_lookup(block_root, Some(child_components), peer_source, cx)
}
/// Attempts to trigger the request matching the given `block_root`.
@ -230,47 +172,15 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
);
}
/// Trigger any lookups that are waiting for the given `block_root`.
pub fn trigger_lookup_by_root(&mut self, block_root: Hash256, cx: &SyncNetworkContext<T>) {
self.single_block_lookups.retain(|_id, lookup| {
if lookup.block_root() == block_root {
if lookup.da_checker.is_deneb() {
let blob_indices = lookup.blob_request_state.requested_ids.indices();
debug!(
self.log,
"Triggering delayed single lookup";
"block" => ?block_root,
"blob_indices" => ?blob_indices
);
} else {
debug!(
self.log,
"Triggering delayed single lookup";
"block" => ?block_root,
);
}
if let Err(e) = lookup.request_block_and_blobs(cx) {
debug!(self.log, "Delayed single block lookup failed";
"error" => ?e,
"block_root" => ?block_root,
);
return false;
}
}
true
});
}
/// Searches for a single block hash. If the blocks parent is unknown, a chain of blocks is
/// constructed.
pub fn new_current_lookup(
&mut self,
block_root: Hash256,
child_components: Option<CachedChildComponents<T::EthSpec>>,
child_components: Option<ChildComponents<T::EthSpec>>,
peers: &[PeerShouldHave],
cx: &mut SyncNetworkContext<T>,
) -> Option<SingleBlockLookup<Current, T>> {
) {
// Do not re-request a block that is already being requested
if let Some((_, lookup)) = self
.single_block_lookups
@ -281,7 +191,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
if let Some(components) = child_components {
lookup.add_child_components(components);
}
return None;
return;
}
if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| {
@ -291,7 +201,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
// If the block was already downloaded, or is being downloaded in this moment, do not
// request it.
return None;
return;
}
if self
@ -300,16 +210,30 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
.any(|(hashes, _last_parent_request)| hashes.contains(&block_root))
{
// we are already processing this block, ignore it.
return None;
return;
}
Some(SingleBlockLookup::new(
let msg = if child_components.is_some() {
"Searching for components of a block with unknown parent"
} else {
"Searching for block components"
};
let lookup = SingleBlockLookup::new(
block_root,
child_components,
peers,
self.da_checker.clone(),
cx.next_id(),
))
);
debug!(
self.log,
"{}", msg;
"peer_ids" => ?peers,
"block" => ?block_root,
);
self.trigger_single_lookup(lookup, cx);
}
/// If a block is attempted to be processed but we do not know its parent, this function is
@ -337,7 +261,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
if let Some(parent_lookup) = self.parent_lookups.iter_mut().find(|parent_req| {
parent_req.contains_block(&block_root) || parent_req.is_for_block(block_root)
}) {
parent_lookup.add_peers(&[peer_source]);
parent_lookup.add_peer(peer_source);
// we are already searching for this block, ignore it
return;
}
@ -540,7 +464,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
id,
self,
block_root,
R::verified_to_reconstructed(verified_response),
R::verified_to_reconstructed(block_root, verified_response),
seen_timestamp,
cx,
)?,
@ -975,9 +899,9 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
AvailabilityCheckError::KzgNotInitialized
| AvailabilityCheckError::SszTypes(_)
| AvailabilityCheckError::MissingBlobs
| AvailabilityCheckError::UnorderedBlobs { .. }
| AvailabilityCheckError::StoreError(_)
| AvailabilityCheckError::DecodeError(_) => {
| AvailabilityCheckError::DecodeError(_)
| AvailabilityCheckError::Unexpected => {
warn!(self.log, "Internal availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e);
lookup
.block_request_state
@ -990,20 +914,12 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
lookup.request_block_and_blobs(cx)?
}
// Invalid block and blob comparison.
AvailabilityCheckError::NumBlobsMismatch { .. }
| AvailabilityCheckError::KzgCommitmentMismatch { .. }
| AvailabilityCheckError::BlockBlobRootMismatch { .. }
| AvailabilityCheckError::BlockBlobSlotMismatch { .. } => {
warn!(self.log, "Availability check failure in consistency"; "root" => %root, "peer_id" => %peer_id, "error" => ?e);
lookup.handle_consistency_failure(cx);
lookup.request_block_and_blobs(cx)?
}
// Malicious errors.
AvailabilityCheckError::Kzg(_)
| AvailabilityCheckError::BlobIndexInvalid(_)
| AvailabilityCheckError::KzgVerificationFailed => {
| AvailabilityCheckError::KzgCommitmentMismatch { .. }
| AvailabilityCheckError::KzgVerificationFailed
| AvailabilityCheckError::InconsistentBlobBlockRoots { .. } => {
warn!(self.log, "Availability check failure"; "root" => %root, "peer_id" => %peer_id, "error" => ?e);
lookup.handle_availability_check_failure(cx);
lookup.request_block_and_blobs(cx)?
@ -1480,29 +1396,3 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
self.parent_lookups.drain(..).len()
}
}
fn lookup_creation_logging<L: Lookup, T: BeaconChainTypes>(
msg: &str,
lookup: &SingleBlockLookup<L, T>,
peer_source: PeerShouldHave,
log: &Logger,
) {
let block_root = lookup.block_root();
if lookup.da_checker.is_deneb() {
let blob_indices = lookup.blob_request_state.requested_ids.indices();
debug!(
log,
"{}", msg;
"peer_id" => ?peer_source,
"block" => ?block_root,
"blob_indices" => ?blob_indices
);
} else {
debug!(
log,
"{}", msg;
"peer_id" => ?peer_source,
"block" => ?block_root,
);
}
}

View File

@ -5,7 +5,7 @@ use crate::sync::block_lookups::common::RequestState;
use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext};
use beacon_chain::block_verification_types::AsBlock;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::data_availability_checker::DataAvailabilityChecker;
use beacon_chain::data_availability_checker::{ChildComponents, DataAvailabilityChecker};
use beacon_chain::BeaconChainTypes;
use itertools::Itertools;
use lighthouse_network::PeerId;
@ -67,7 +67,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
) -> Self {
let current_parent_request = SingleBlockLookup::new(
parent_root,
Some(<_>::default()),
Some(ChildComponents::empty(block_root)),
&[peer_id],
da_checker,
cx.next_id(),
@ -179,7 +179,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
.blob_request_state
.state
.register_failure_processing();
if let Some(components) = self.current_parent_request.cached_child_components.as_mut() {
if let Some(components) = self.current_parent_request.child_components.as_mut() {
components.downloaded_block = None;
components.downloaded_blobs = <_>::default();
}
@ -211,8 +211,13 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
Ok(root_and_verified)
}
pub fn add_peers(&mut self, peer_source: &[PeerShouldHave]) {
self.current_parent_request.add_peers(peer_source)
pub fn add_peer(&mut self, peer: PeerShouldHave) {
self.current_parent_request.add_peer(peer)
}
/// Adds a list of peers to the parent request.
pub fn add_peers(&mut self, peers: &[PeerShouldHave]) {
self.current_parent_request.add_peers(peers)
}
pub fn used_peers(&self) -> impl Iterator<Item = &PeerId> + '_ {

View File

@ -3,20 +3,21 @@ use crate::sync::block_lookups::common::{Lookup, RequestState};
use crate::sync::block_lookups::Id;
use crate::sync::network_context::SyncNetworkContext;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::data_availability_checker::{AvailabilityCheckError, DataAvailabilityChecker};
use beacon_chain::data_availability_checker::{
AvailabilityCheckError, DataAvailabilityChecker, MissingBlobs,
};
use beacon_chain::data_availability_checker::{AvailabilityView, ChildComponents};
use beacon_chain::BeaconChainTypes;
use lighthouse_network::rpc::methods::MaxRequestBlobSidecars;
use lighthouse_network::{PeerAction, PeerId};
use slog::{trace, Logger};
use ssz_types::VariableList;
use std::collections::HashSet;
use std::fmt::Debug;
use std::marker::PhantomData;
use std::sync::Arc;
use store::Hash256;
use strum::IntoStaticStr;
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
use types::{EthSpec, SignedBeaconBlock};
use types::blob_sidecar::FixedBlobSidecarList;
use types::EthSpec;
#[derive(Debug, PartialEq, Eq)]
pub enum State {
@ -58,23 +59,24 @@ pub struct SingleBlockLookup<L: Lookup, T: BeaconChainTypes> {
pub da_checker: Arc<DataAvailabilityChecker<T>>,
/// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent`
/// because any blocks or blobs without parents won't hit the data availability cache.
pub cached_child_components: Option<CachedChildComponents<T::EthSpec>>,
pub child_components: Option<ChildComponents<T::EthSpec>>,
}
impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
pub fn new(
requested_block_root: Hash256,
unknown_parent_components: Option<CachedChildComponents<T::EthSpec>>,
child_components: Option<ChildComponents<T::EthSpec>>,
peers: &[PeerShouldHave],
da_checker: Arc<DataAvailabilityChecker<T>>,
id: Id,
) -> Self {
let is_deneb = da_checker.is_deneb();
Self {
id,
block_request_state: BlockRequestState::new(requested_block_root, peers),
blob_request_state: BlobRequestState::new(peers),
blob_request_state: BlobRequestState::new(requested_block_root, peers, is_deneb),
da_checker,
cached_child_components: unknown_parent_components,
child_components,
}
}
@ -94,7 +96,7 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
self.block_request_state.requested_block_root = block_root;
self.block_request_state.state.state = State::AwaitingDownload;
self.blob_request_state.state.state = State::AwaitingDownload;
self.cached_child_components = Some(CachedChildComponents::default());
self.child_components = Some(ChildComponents::empty(block_root));
}
/// Get all unique peers across block and blob requests.
@ -134,7 +136,7 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
/// 3. `Ok`: The child is required and we have downloaded it.
/// 4. `Err`: The child is required, but has failed consistency checks.
pub fn get_cached_child_block(&self) -> CachedChild<T::EthSpec> {
if let Some(components) = self.cached_child_components.as_ref() {
if let Some(components) = self.child_components.as_ref() {
let Some(block) = components.downloaded_block.as_ref() else {
return CachedChild::DownloadIncomplete;
};
@ -143,7 +145,11 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
return CachedChild::DownloadIncomplete;
}
match RpcBlock::new_from_fixed(block.clone(), components.downloaded_blobs.clone()) {
match RpcBlock::new_from_fixed(
self.block_request_state.requested_block_root,
block.clone(),
components.downloaded_blobs.clone(),
) {
Ok(rpc_block) => CachedChild::Ok(rpc_block),
Err(e) => CachedChild::Err(e),
}
@ -159,8 +165,8 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
&mut self,
verified_response: R::VerifiedResponseType,
) -> CachedChild<T::EthSpec> {
if let Some(cached_child_components) = self.cached_child_components.as_mut() {
R::add_to_child_components(verified_response, cached_child_components);
if let Some(child_components) = self.child_components.as_mut() {
R::add_to_child_components(verified_response, child_components);
self.get_cached_child_block()
} else {
CachedChild::NotRequired
@ -168,34 +174,40 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
}
/// Add a child component to the lookup request. Merges with any existing child components.
pub fn add_child_components(&mut self, components: CachedChildComponents<T::EthSpec>) {
if let Some(ref mut existing_components) = self.cached_child_components {
let CachedChildComponents {
pub fn add_child_components(&mut self, components: ChildComponents<T::EthSpec>) {
if let Some(ref mut existing_components) = self.child_components {
let ChildComponents {
block_root: _,
downloaded_block,
downloaded_blobs,
} = components;
if let Some(block) = downloaded_block {
existing_components.add_cached_child_block(block);
existing_components.merge_block(block);
}
existing_components.add_cached_child_blobs(downloaded_blobs);
existing_components.merge_blobs(downloaded_blobs);
} else {
self.cached_child_components = Some(components);
self.child_components = Some(components);
}
}
/// Add all given peers to both block and blob request states.
pub fn add_peer(&mut self, peer: PeerShouldHave) {
match peer {
PeerShouldHave::BlockAndBlobs(peer_id) => {
self.block_request_state.state.add_peer(&peer_id);
self.blob_request_state.state.add_peer(&peer_id);
}
PeerShouldHave::Neither(peer_id) => {
self.block_request_state.state.add_potential_peer(&peer_id);
self.blob_request_state.state.add_potential_peer(&peer_id);
}
}
}
/// Add all given peers to both block and blob request states.
pub fn add_peers(&mut self, peers: &[PeerShouldHave]) {
for peer in peers {
match peer {
PeerShouldHave::BlockAndBlobs(peer_id) => {
self.block_request_state.state.add_peer(peer_id);
self.blob_request_state.state.add_peer(peer_id);
}
PeerShouldHave::Neither(peer_id) => {
self.block_request_state.state.add_potential_peer(peer_id);
self.blob_request_state.state.add_potential_peer(peer_id);
}
}
self.add_peer(*peer);
}
}
@ -243,8 +255,8 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
/// Returns `true` if the block has already been downloaded.
pub(crate) fn block_already_downloaded(&self) -> bool {
if let Some(components) = self.cached_child_components.as_ref() {
components.downloaded_block.is_some()
if let Some(components) = self.child_components.as_ref() {
components.block_exists()
} else {
self.da_checker.has_block(&self.block_root())
}
@ -260,26 +272,24 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
/// Updates this request with the most recent picture of which blobs still need to be requested.
pub fn update_blobs_request(&mut self) {
self.blob_request_state.requested_ids = self.missing_blob_ids().into()
self.blob_request_state.requested_ids = self.missing_blob_ids();
}
/// If `unknown_parent_components` is `Some`, we know block components won't hit the data
/// availability cache, so we don't check it. In either case we use the data availability
/// checker to get a picture of outstanding blob requirements for the block root.
pub(crate) fn missing_blob_ids(&self) -> Vec<BlobIdentifier> {
if let Some(components) = self.cached_child_components.as_ref() {
let blobs = components.downloaded_indices();
self.da_checker
.get_missing_blob_ids(
self.block_root(),
components.downloaded_block.as_ref(),
Some(blobs),
)
.unwrap_or_default()
/// If `child_components` is `Some`, we know block components won't hit the data
/// availability cache, so we don't check its processing cache unless `child_components`
/// is `None`.
pub(crate) fn missing_blob_ids(&self) -> MissingBlobs {
let block_root = self.block_root();
if let Some(components) = self.child_components.as_ref() {
self.da_checker.get_missing_blob_ids(block_root, components)
} else {
let Some(processing_availability_view) =
self.da_checker.get_processing_components(block_root)
else {
return MissingBlobs::new_without_block(block_root, self.da_checker.is_deneb());
};
self.da_checker
.get_missing_blob_ids_checking_cache(self.block_root())
.unwrap_or_default()
.get_missing_blob_ids(block_root, &processing_availability_view)
}
}
@ -305,7 +315,7 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
/// necessary and clear the blob cache.
pub fn handle_consistency_failure(&mut self, cx: &SyncNetworkContext<T>) {
self.penalize_blob_peer(false, cx);
if let Some(cached_child) = self.cached_child_components.as_mut() {
if let Some(cached_child) = self.child_components.as_mut() {
cached_child.clear_blobs();
}
self.blob_request_state.state.register_failure_downloading()
@ -315,49 +325,19 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
/// necessary and clear the blob cache.
pub fn handle_availability_check_failure(&mut self, cx: &SyncNetworkContext<T>) {
self.penalize_blob_peer(true, cx);
if let Some(cached_child) = self.cached_child_components.as_mut() {
if let Some(cached_child) = self.child_components.as_mut() {
cached_child.clear_blobs();
}
self.blob_request_state.state.register_failure_processing()
}
}
#[derive(Clone, Default)]
pub struct RequestedBlobIds(Vec<BlobIdentifier>);
impl From<Vec<BlobIdentifier>> for RequestedBlobIds {
fn from(value: Vec<BlobIdentifier>) -> Self {
Self(value)
}
}
impl Into<VariableList<BlobIdentifier, MaxRequestBlobSidecars>> for RequestedBlobIds {
fn into(self) -> VariableList<BlobIdentifier, MaxRequestBlobSidecars> {
VariableList::from(self.0)
}
}
impl RequestedBlobIds {
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn contains(&self, blob_id: &BlobIdentifier) -> bool {
self.0.contains(blob_id)
}
pub fn remove(&mut self, blob_id: &BlobIdentifier) {
self.0.retain(|id| id != blob_id)
}
pub fn indices(&self) -> Vec<u64> {
self.0.iter().map(|id| id.index).collect()
}
}
/// The state of the blob request component of a `SingleBlockLookup`.
pub struct BlobRequestState<L: Lookup, T: EthSpec> {
/// The latest picture of which blobs still need to be requested. This includes information
/// from both block/blobs downloaded in the network layer and any blocks/blobs that exist in
/// the data availability checker.
pub requested_ids: RequestedBlobIds,
pub requested_ids: MissingBlobs,
/// Where we store blobs until we receive the stream terminator.
pub blob_download_queue: FixedBlobSidecarList<T>,
pub state: SingleLookupRequestState,
@ -365,9 +345,10 @@ pub struct BlobRequestState<L: Lookup, T: EthSpec> {
}
impl<L: Lookup, E: EthSpec> BlobRequestState<L, E> {
pub fn new(peer_source: &[PeerShouldHave]) -> Self {
pub fn new(block_root: Hash256, peer_source: &[PeerShouldHave], is_deneb: bool) -> Self {
let default_ids = MissingBlobs::new_without_block(block_root, is_deneb);
Self {
requested_ids: <_>::default(),
requested_ids: default_ids,
blob_download_queue: <_>::default(),
state: SingleLookupRequestState::new(peer_source),
_phantom: PhantomData,
@ -408,73 +389,6 @@ pub enum CachedChild<E: EthSpec> {
/// There was an error during consistency checks between block and blobs.
Err(AvailabilityCheckError),
}
/// For requests triggered by an `UnknownBlockParent` or `UnknownBlobParent`, this struct
/// is used to cache components as they are sent to the network service. We can't use the
/// data availability cache currently because any blocks or blobs without parents
/// won't pass validation and therefore won't make it into the cache.
#[derive(Default)]
pub struct CachedChildComponents<E: EthSpec> {
pub downloaded_block: Option<Arc<SignedBeaconBlock<E>>>,
pub downloaded_blobs: FixedBlobSidecarList<E>,
}
impl<E: EthSpec> From<RpcBlock<E>> for CachedChildComponents<E> {
fn from(value: RpcBlock<E>) -> Self {
let (block, blobs) = value.deconstruct();
let fixed_blobs = blobs.map(|blobs| {
FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::<Vec<_>>())
});
Self::new(Some(block), fixed_blobs)
}
}
impl<E: EthSpec> CachedChildComponents<E> {
pub fn new(
block: Option<Arc<SignedBeaconBlock<E>>>,
blobs: Option<FixedBlobSidecarList<E>>,
) -> Self {
Self {
downloaded_block: block,
downloaded_blobs: blobs.unwrap_or_default(),
}
}
pub fn clear_blobs(&mut self) {
self.downloaded_blobs = FixedBlobSidecarList::default();
}
pub fn add_cached_child_block(&mut self, block: Arc<SignedBeaconBlock<E>>) {
self.downloaded_block = Some(block);
}
pub fn add_cached_child_blobs(&mut self, blobs: FixedBlobSidecarList<E>) {
for (index, blob_opt) in self.downloaded_blobs.iter_mut().enumerate() {
if let Some(Some(downloaded_blob)) = blobs.get(index) {
*blob_opt = Some(downloaded_blob.clone());
}
}
}
pub fn downloaded_indices(&self) -> HashSet<usize> {
self.downloaded_blobs
.iter()
.enumerate()
.filter_map(|(i, blob_opt)| blob_opt.as_ref().map(|_| i))
.collect::<HashSet<_>>()
}
pub fn is_missing_components(&self) -> bool {
self.downloaded_block
.as_ref()
.map(|block| {
block.num_expected_blobs()
!= self.downloaded_blobs.iter().filter(|b| b.is_some()).count()
})
.unwrap_or(true)
}
}
/// Object representing the state of a single block or blob lookup request.
#[derive(PartialEq, Eq, Debug)]
pub struct SingleLookupRequestState {
@ -516,6 +430,7 @@ impl SingleLookupRequestState {
}
}
}
Self {
state: State::AwaitingDownload,
available_peers,
@ -703,10 +618,11 @@ mod tests {
Duration::from_secs(spec.seconds_per_slot),
);
let log = NullLoggerBuilder.build().expect("logger should build");
let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log)
.expect("store");
let store =
HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone())
.expect("store");
let da_checker = Arc::new(
DataAvailabilityChecker::new(slot_clock, None, store.into(), spec)
DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec)
.expect("data availability checker"),
);
let mut sl = SingleBlockLookup::<TestLookup1, T>::new(
@ -742,11 +658,12 @@ mod tests {
Duration::from_secs(spec.seconds_per_slot),
);
let log = NullLoggerBuilder.build().expect("logger should build");
let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log)
.expect("store");
let store =
HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone())
.expect("store");
let da_checker = Arc::new(
DataAvailabilityChecker::new(slot_clock, None, store.into(), spec)
DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec)
.expect("data availability checker"),
);

View File

@ -10,19 +10,18 @@ use super::*;
use crate::sync::block_lookups::common::ResponseType;
use beacon_chain::builder::Witness;
use beacon_chain::eth1_chain::CachingEth1Backend;
use beacon_chain::test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType};
use beacon_chain::test_utils::{
build_log, generate_rand_block_and_blobs, BeaconChainHarness, EphemeralHarnessType, NumBlobs,
};
use beacon_processor::WorkEvent;
use lighthouse_network::rpc::RPCResponseErrorCode;
use lighthouse_network::{NetworkGlobals, Request};
use rand::Rng;
use slot_clock::{ManualSlotClock, SlotClock, TestingSlotClock};
use store::MemoryStore;
use tokio::sync::mpsc;
use types::{
map_fork_name, map_fork_name_with,
test_utils::{SeedableRng, TestRandom, XorShiftRng},
BeaconBlock, BlobSidecar, EthSpec, ForkName, FullPayloadDeneb, MinimalEthSpec as E,
SignedBeaconBlock,
test_utils::{SeedableRng, XorShiftRng},
BlobSidecar, EthSpec, ForkName, MinimalEthSpec as E, SignedBeaconBlock,
};
type T = Witness<ManualSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
@ -36,11 +35,6 @@ struct TestRig {
const D: Duration = Duration::new(0, 0);
enum NumBlobs {
Random,
None,
}
impl TestRig {
fn test_setup(enable_log: bool) -> (BlockLookups<T>, SyncNetworkContext<T>, Self) {
let log = build_log(slog::Level::Debug, enable_log);
@ -97,59 +91,10 @@ impl TestRig {
fork_name: ForkName,
num_blobs: NumBlobs,
) -> (SignedBeaconBlock<E>, Vec<BlobSidecar<E>>) {
let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(&mut self.rng));
let mut block =
SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(&mut self.rng));
let mut blob_sidecars = vec![];
if let Ok(message) = block.message_deneb_mut() {
// get random number between 0 and Max Blobs
let payload: &mut FullPayloadDeneb<E> = &mut message.body.execution_payload;
let num_blobs = match num_blobs {
NumBlobs::Random => 1 + self.rng.gen::<usize>() % E::max_blobs_per_block(),
NumBlobs::None => 0,
};
let (bundle, transactions) =
execution_layer::test_utils::generate_random_blobs::<E, _>(
num_blobs,
self.harness.chain.kzg.as_ref().unwrap(),
&mut self.rng,
)
.unwrap();
let kzg = self.harness.chain.kzg.as_ref().unwrap();
let rng = &mut self.rng;
payload.execution_payload.transactions = <_>::default();
for tx in Vec::from(transactions) {
payload.execution_payload.transactions.push(tx).unwrap();
}
message.body.blob_kzg_commitments = bundle.commitments.clone();
let eth2::types::BlobsBundle {
commitments,
proofs,
blobs,
} = bundle;
let block_root = block.canonical_root();
for (index, ((blob, kzg_commitment), kzg_proof)) in blobs
.into_iter()
.zip(commitments.into_iter())
.zip(proofs.into_iter())
.enumerate()
{
blob_sidecars.push(BlobSidecar {
block_root,
index: index as u64,
slot: block.slot(),
block_parent_root: block.parent_root(),
proposer_index: block.message().proposer_index(),
blob: blob.clone(),
kzg_commitment,
kzg_proof,
});
}
}
(block, blob_sidecars)
generate_rand_block_and_blobs::<E>(fork_name, num_blobs, kzg.as_ref(), rng)
}
#[track_caller]
@ -292,7 +237,11 @@ fn test_single_block_lookup_happy_path() {
let peer_id = PeerId::random();
let block_root = block.canonical_root();
// Trigger the request
bl.search_block(block_root, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx);
bl.search_block(
block_root,
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut cx,
);
let id = rig.expect_lookup_request(response_type);
// If we're in deneb, a blob request should have been triggered as well,
// we don't require a response because we're generateing 0-blob blocks in this test.
@ -340,7 +289,11 @@ fn test_single_block_lookup_empty_response() {
let peer_id = PeerId::random();
// Trigger the request
bl.search_block(block_hash, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx);
bl.search_block(
block_hash,
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut cx,
);
let id = rig.expect_lookup_request(response_type);
// If we're in deneb, a blob request should have been triggered as well,
// we don't require a response because we're generateing 0-blob blocks in this test.
@ -368,7 +321,11 @@ fn test_single_block_lookup_wrong_response() {
let peer_id = PeerId::random();
// Trigger the request
bl.search_block(block_hash, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx);
bl.search_block(
block_hash,
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut cx,
);
let id = rig.expect_lookup_request(response_type);
// If we're in deneb, a blob request should have been triggered as well,
// we don't require a response because we're generateing 0-blob blocks in this test.
@ -406,7 +363,11 @@ fn test_single_block_lookup_failure() {
let peer_id = PeerId::random();
// Trigger the request
bl.search_block(block_hash, PeerShouldHave::BlockAndBlobs(peer_id), &mut cx);
bl.search_block(
block_hash,
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut cx,
);
let id = rig.expect_lookup_request(response_type);
// If we're in deneb, a blob request should have been triggered as well,
// we don't require a response because we're generateing 0-blob blocks in this test.
@ -440,7 +401,7 @@ fn test_single_block_lookup_becomes_parent_request() {
// Trigger the request
bl.search_block(
block.canonical_root(),
PeerShouldHave::BlockAndBlobs(peer_id),
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut cx,
);
let id = rig.expect_lookup_request(response_type);
@ -469,7 +430,7 @@ fn test_single_block_lookup_becomes_parent_request() {
// parent request after processing.
bl.single_block_component_processed::<BlockRequestState<Current>>(
id.id,
BlockError::ParentUnknown(block.into()).into(),
BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(),
&mut cx,
);
assert_eq!(bl.single_block_lookups.len(), 1);
@ -978,7 +939,7 @@ fn test_parent_lookup_too_deep() {
// the processing result
bl.parent_block_processed(
chain_hash,
BlockError::ParentUnknown(block.into()).into(),
BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(),
&mut cx,
)
}
@ -1026,7 +987,7 @@ fn test_single_block_lookup_ignored_response() {
// Trigger the request
bl.search_block(
block.canonical_root(),
PeerShouldHave::BlockAndBlobs(peer_id),
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut cx,
);
let id = rig.expect_lookup_request(response_type);
@ -1188,7 +1149,7 @@ fn test_same_chain_race_condition() {
} else {
bl.parent_block_processed(
chain_hash,
BlockError::ParentUnknown(block.into()).into(),
BlockError::ParentUnknown(RpcBlock::new_without_blobs(None, block)).into(),
&mut cx,
)
}
@ -1223,6 +1184,7 @@ mod deneb_only {
use super::*;
use crate::sync::block_lookups::common::ResponseType;
use beacon_chain::data_availability_checker::AvailabilityCheckError;
use beacon_chain::test_utils::NumBlobs;
use std::ops::IndexMut;
use std::str::FromStr;
@ -1278,7 +1240,7 @@ mod deneb_only {
RequestTrigger::AttestationUnknownBlock => {
bl.search_block(
block_root,
PeerShouldHave::BlockAndBlobs(peer_id),
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut cx,
);
let block_req_id = rig.expect_lookup_request(ResponseType::Block);
@ -1302,8 +1264,8 @@ mod deneb_only {
block_root = child_root;
bl.search_child_block(
child_root,
CachedChildComponents::new(Some(child_block), None),
PeerShouldHave::Neither(peer_id),
ChildComponents::new(child_root, Some(child_block), None),
&[PeerShouldHave::Neither(peer_id)],
&mut cx,
);
@ -1340,8 +1302,8 @@ mod deneb_only {
*blobs.index_mut(0) = Some(child_blob);
bl.search_child_block(
child_root,
CachedChildComponents::new(None, Some(blobs)),
PeerShouldHave::Neither(peer_id),
ChildComponents::new(child_root, None, Some(blobs)),
&[PeerShouldHave::Neither(peer_id)],
&mut cx,
);
@ -1359,7 +1321,7 @@ mod deneb_only {
)
}
RequestTrigger::GossipUnknownBlockOrBlob => {
bl.search_block(block_root, PeerShouldHave::Neither(peer_id), &mut cx);
bl.search_block(block_root, &[PeerShouldHave::Neither(peer_id)], &mut cx);
let block_req_id = rig.expect_lookup_request(ResponseType::Block);
let blob_req_id = rig.expect_lookup_request(ResponseType::Blob);
(Some(block_req_id), Some(blob_req_id), None, None)
@ -1563,6 +1525,7 @@ mod deneb_only {
self.bl.parent_block_processed(
self.block_root,
BlockProcessingResult::Err(BlockError::ParentUnknown(RpcBlock::new_without_blobs(
Some(self.block_root),
self.parent_block.clone().expect("parent block"),
))),
&mut self.cx,

View File

@ -66,7 +66,7 @@ impl<T: EthSpec> BlocksAndBlobsRequestInfo<T> {
}
}
let blobs = VariableList::from(blobs_buffer.into_iter().flatten().collect::<Vec<_>>());
responses.push(RpcBlock::new(block, Some(blobs))?)
responses.push(RpcBlock::new(None, block, Some(blobs))?)
}
// if accumulated sidecars is not empty, throw an error.

View File

@ -42,12 +42,11 @@ use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProces
use crate::service::NetworkMessage;
use crate::status::ToStatusMessage;
use crate::sync::block_lookups::common::{Current, Parent};
use crate::sync::block_lookups::delayed_lookup;
use crate::sync::block_lookups::delayed_lookup::DelayedLookupMessage;
use crate::sync::block_lookups::{BlobRequestState, BlockRequestState, CachedChildComponents};
use crate::sync::block_lookups::{BlobRequestState, BlockRequestState};
use crate::sync::range_sync::ByRangeRequestType;
use beacon_chain::block_verification_types::AsBlock;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::data_availability_checker::ChildComponents;
use beacon_chain::{
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState,
};
@ -58,7 +57,6 @@ use lighthouse_network::types::{NetworkGlobals, SyncState};
use lighthouse_network::SyncInfo;
use lighthouse_network::{PeerAction, PeerId};
use slog::{crit, debug, error, info, trace, warn, Logger};
use slot_clock::SlotClock;
use std::boxed::Box;
use std::ops::IndexMut;
use std::ops::Sub;
@ -76,9 +74,6 @@ use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
/// gossip if no peers are further than this range ahead of us that we have not already downloaded
/// blocks for.
pub const SLOT_IMPORT_TOLERANCE: usize = 32;
/// The maximum number of messages the delay queue can handle in a single slot before messages are
/// dropped.
pub const DELAY_QUEUE_CHANNEL_SIZE: usize = 128;
pub type Id = u32;
@ -148,10 +143,7 @@ pub enum SyncMessage<T: EthSpec> {
///
/// We will either attempt to find the block matching the unknown hash immediately or queue a lookup,
/// which will then trigger the request when we receive `MissingGossipBlockComponentsDelayed`.
MissingGossipBlockComponents(Slot, PeerId, Hash256),
/// This message triggers a request for missing block components after a delay.
MissingGossipBlockComponentsDelayed(Hash256),
MissingGossipBlockComponents(Vec<PeerId>, Hash256),
/// A peer has disconnected.
Disconnect(PeerId),
@ -228,8 +220,6 @@ pub struct SyncManager<T: BeaconChainTypes> {
block_lookups: BlockLookups<T>,
delayed_lookups: mpsc::Sender<DelayedLookupMessage>,
/// The logger for the import manager.
log: Logger,
}
@ -249,8 +239,6 @@ pub fn spawn<T: BeaconChainTypes>(
MAX_REQUEST_BLOCKS >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH,
"Max blocks that can be requested in a single batch greater than max allowed blocks in a single request"
);
let (delayed_lookups_send, delayed_lookups_recv) =
mpsc::channel::<DelayedLookupMessage>(DELAY_QUEUE_CHANNEL_SIZE);
// create an instance of the SyncManager
let network_globals = beacon_processor.network_globals.clone();
@ -269,21 +257,11 @@ pub fn spawn<T: BeaconChainTypes>(
beacon_chain.data_availability_checker.clone(),
log.clone(),
),
delayed_lookups: delayed_lookups_send,
log: log.clone(),
};
let log_clone = log.clone();
delayed_lookup::spawn_delayed_lookup_service(
&executor,
beacon_chain,
delayed_lookups_recv,
beacon_processor,
log,
);
// spawn the sync manager thread
debug!(log_clone, "Sync Manager started");
debug!(log, "Sync Manager started");
executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync");
}
@ -673,7 +651,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
block_root,
parent_root,
blob_slot,
CachedChildComponents::new(None, Some(blobs)),
ChildComponents::new(block_root, None, Some(blobs)),
);
}
SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_hash) => {
@ -681,39 +659,31 @@ impl<T: BeaconChainTypes> SyncManager<T> {
if self.synced_and_connected(&peer_id) {
self.block_lookups.search_block(
block_hash,
PeerShouldHave::BlockAndBlobs(peer_id),
&[PeerShouldHave::BlockAndBlobs(peer_id)],
&mut self.network,
);
}
}
SyncMessage::MissingGossipBlockComponents(slot, peer_id, block_root) => {
// If we are not synced, ignore this block.
if self.synced_and_connected(&peer_id) {
if self.should_delay_lookup(slot) {
self.block_lookups.search_block_delayed(
block_root,
PeerShouldHave::Neither(peer_id),
&mut self.network,
);
if let Err(e) = self
.delayed_lookups
.try_send(DelayedLookupMessage::MissingComponents(block_root))
{
warn!(self.log, "Delayed lookup dropped for block referenced by a blob";
"block_root" => ?block_root, "error" => ?e);
SyncMessage::MissingGossipBlockComponents(peer_id, block_root) => {
let peers_guard = self.network_globals().peers.read();
let connected_peers = peer_id
.into_iter()
.filter_map(|peer_id| {
if peers_guard.is_connected(&peer_id) {
Some(PeerShouldHave::Neither(peer_id))
} else {
None
}
} else {
self.block_lookups.search_block(
block_root,
PeerShouldHave::Neither(peer_id),
&mut self.network,
)
}
})
.collect::<Vec<_>>();
drop(peers_guard);
// If we are not synced, ignore this block.
if self.synced() && !connected_peers.is_empty() {
self.block_lookups
.search_block(block_root, &connected_peers, &mut self.network)
}
}
SyncMessage::MissingGossipBlockComponentsDelayed(block_root) => self
.block_lookups
.trigger_lookup_by_root(block_root, &self.network),
SyncMessage::Disconnect(peer_id) => {
self.peer_disconnect(&peer_id);
}
@ -782,7 +752,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
block_root: Hash256,
parent_root: Hash256,
slot: Slot,
child_components: CachedChildComponents<T::EthSpec>,
child_components: ChildComponents<T::EthSpec>,
) {
if self.should_search_for_block(slot, &peer_id) {
self.block_lookups.search_parent(
@ -792,56 +762,12 @@ impl<T: BeaconChainTypes> SyncManager<T> {
peer_id,
&mut self.network,
);
if self.should_delay_lookup(slot) {
self.block_lookups.search_child_delayed(
block_root,
child_components,
PeerShouldHave::Neither(peer_id),
&mut self.network,
);
if let Err(e) = self
.delayed_lookups
.try_send(DelayedLookupMessage::MissingComponents(block_root))
{
warn!(self.log, "Delayed lookups dropped for block"; "block_root" => ?block_root, "error" => ?e);
}
} else {
self.block_lookups.search_child_block(
block_root,
child_components,
PeerShouldHave::Neither(peer_id),
&mut self.network,
);
}
}
}
fn should_delay_lookup(&mut self, slot: Slot) -> bool {
if !self.block_lookups.da_checker.is_deneb() {
return false;
}
let maximum_gossip_clock_disparity = self.chain.spec.maximum_gossip_clock_disparity();
let earliest_slot = self
.chain
.slot_clock
.now_with_past_tolerance(maximum_gossip_clock_disparity);
let latest_slot = self
.chain
.slot_clock
.now_with_future_tolerance(maximum_gossip_clock_disparity);
if let (Some(earliest_slot), Some(latest_slot)) = (earliest_slot, latest_slot) {
let msg_for_current_slot = slot >= earliest_slot && slot <= latest_slot;
let delay_threshold_unmet = self
.chain
.slot_clock
.millis_from_current_slot_start()
.map_or(false, |millis_into_slot| {
millis_into_slot < self.chain.slot_clock.single_lookup_delay()
});
msg_for_current_slot && delay_threshold_unmet
} else {
false
self.block_lookups.search_child_block(
block_root,
child_components,
&[PeerShouldHave::Neither(peer_id)],
&mut self.network,
);
}
}
@ -864,12 +790,15 @@ impl<T: BeaconChainTypes> SyncManager<T> {
&& self.network.is_execution_engine_online()
}
fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool {
fn synced(&mut self) -> bool {
self.network_globals().sync_state.read().is_synced()
&& self.network_globals().peers.read().is_connected(peer_id)
&& self.network.is_execution_engine_online()
}
fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool {
self.synced() && self.network_globals().peers.read().is_connected(peer_id)
}
fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) {
self.network.update_execution_engine_state(engine_state);
@ -968,7 +897,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
batch_id,
&peer_id,
id,
block.map(Into::into),
block.map(|b| RpcBlock::new_without_blobs(None, b)),
) {
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
Ok(ProcessResult::Successful) => {}
@ -992,7 +921,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
chain_id,
batch_id,
id,
block.map(Into::into),
block.map(|b| RpcBlock::new_without_blobs(None, b)),
);
self.update_sync_state();
}

View File

@ -9,6 +9,5 @@ mod network_context;
mod peer_sync_info;
mod range_sync;
pub use block_lookups::CachedChildComponents;
pub use manager::{BatchProcessResult, SyncMessage};
pub use range_sync::{BatchOperationOutcome, ChainId};

View File

@ -11,6 +11,8 @@ use tree_hash_derive::TreeHash;
pub type KzgCommitments<T> =
VariableList<KzgCommitment, <T as EthSpec>::MaxBlobCommitmentsPerBlock>;
pub type KzgCommitmentOpts<T> =
FixedVector<Option<KzgCommitment>, <T as EthSpec>::MaxBlobsPerBlock>;
/// The body of a `BeaconChain` block, containing operations.
///

View File

@ -23,6 +23,19 @@ pub struct BlobIdentifier {
pub index: u64,
}
impl BlobIdentifier {
pub fn get_all_blob_ids<E: EthSpec>(block_root: Hash256) -> Vec<BlobIdentifier> {
let mut blob_ids = Vec::with_capacity(E::max_blobs_per_block());
for i in 0..E::max_blobs_per_block() {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
});
}
blob_ids
}
}
impl PartialOrd for BlobIdentifier {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.index.partial_cmp(&other.index)

View File

@ -1,4 +1,3 @@
use crate::blob_sidecar::BlobIdentifier;
use crate::*;
use bls::Signature;
use derivative::Derivative;
@ -257,30 +256,6 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
.map(|c| c.len())
.unwrap_or(0)
}
pub fn get_expected_blob_ids(&self, block_root: Option<Hash256>) -> Vec<BlobIdentifier> {
self.get_filtered_blob_ids(block_root, |_, _| true)
}
/// If the filter returns `true` the id for the corresponding index and root will be included.
pub fn get_filtered_blob_ids(
&self,
block_root: Option<Hash256>,
filter: impl Fn(usize, Hash256) -> bool,
) -> Vec<BlobIdentifier> {
let block_root = block_root.unwrap_or_else(|| self.canonical_root());
let num_blobs_expected = self.num_expected_blobs();
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
for i in 0..num_blobs_expected {
if filter(i, block_root) {
blob_ids.push(BlobIdentifier {
block_root,
index: i as u64,
});
}
}
blob_ids
}
}
// We can convert pre-Bellatrix blocks without payloads into blocks with payloads.

View File

@ -449,9 +449,7 @@ impl<E: EthSpec> Tester<E> {
let result = self.block_on_dangerous(
self.harness
.chain
.check_gossip_blob_availability_and_import(
GossipVerifiedBlob::__assumed_valid(signed_sidecar),
),
.process_gossip_blob(GossipVerifiedBlob::__assumed_valid(signed_sidecar)),
)?;
if valid {
assert!(result.is_ok());