Single blob lookups (#4152)
* some blob reprocessing work * remove ForceBlockLookup * reorder enum match arms in sync manager * a lot more reprocessing work * impl logic for triggerng blob lookups along with block lookups * deal with rpc blobs in groups per block in the da checker. don't cache missing blob ids in the da checker. * make single block lookup generic * more work * add delayed processing logic and combine some requests * start fixing some compile errors * fix compilation in main block lookup mod * much work * get things compiling * parent blob lookups * fix compile * revert red/stevie changes * fix up sync manager delay message logic * add peer usefulness enum * should remove lookup refactor * consolidate retry error handling * improve peer scoring during certain failures in parent lookups * improve retry code * drop parent lookup if either req has a peer disconnect during download * refactor single block processed method * processing peer refactor * smol bugfix * fix some todos * fix lints * fix lints * fix compile in lookup tests * fix lints * fix lints * fix existing block lookup tests * renamings * fix after merge * cargo fmt * compilation fix in beacon chain tests * fix * refactor lookup tests to work with multiple forks and response types * make tests into macros * wrap availability check error * fix compile after merge * add random blobs * start fixing up lookup verify error handling * some bug fixes and the start of deneb only tests * make tests work for all forks * track information about peer source * error refactoring * improve peer scoring * fix test compilation * make sure blobs are sent for processing after stream termination, delete copied tests * add some tests and fix a bug * smol bugfixes and moar tests * add tests and fix some things * compile after merge * lots of refactoring * retry on invalid block/blob * merge unknown parent messages before current slot lookup * get tests compiling * penalize blob peer on invalid blobs * Check disk on in-memory cache miss * Update beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs * Update beacon_node/network/src/sync/network_context.rs Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com> * fix bug in matching blocks and blobs in range sync * pr feedback * fix conflicts * upgrade logs from warn to crit when we receive incorrect response in range * synced_and_connected_within_tolerance -> should_search_for_block * remove todo * Fix Broken Overflow Tests * fix merge conflicts * checkpoint sync without alignment * add import * query for checkpoint state by slot rather than state root (teku doesn't serve by state root) * get state first and query by most recent block root * simplify delay logic * rename unknown parent sync message variants * rename parameter, block_slot -> slot * add some docs to the lookup module * use interval instead of sleep * drop request if blocks and blobs requests both return `None` for `Id` * clean up `find_single_lookup` logic * add lookup source enum * clean up `find_single_lookup` logic * add docs to find_single_lookup_request * move LookupSource our of param where unnecessary * remove unnecessary todo * query for block by `state.latest_block_header.slot` * fix lint * fix test * fix test * fix observed blob sidecars test * PR updates * use optional params instead of a closure * create lookup and trigger request in separate method calls * remove `LookupSource` * make sure duplicate lookups are not dropped --------- Co-authored-by: Pawan Dhananjay <pawandhananjay@gmail.com> Co-authored-by: Mark Mackey <mark@sigmaprime.io> Co-authored-by: Divma <26765164+divagant-martian@users.noreply.github.com>
This commit is contained in:
parent
5428e68943
commit
a62e52f319
14
.github/workflows/test-suite.yml
vendored
14
.github/workflows/test-suite.yml
vendored
@ -119,6 +119,20 @@ jobs:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Run operation_pool tests for all known forks
|
||||
run: make test-op-pool
|
||||
network-minimal-tests:
|
||||
name: network-minimal-tests
|
||||
runs-on: ubuntu-latest
|
||||
needs: cargo-fmt
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
- name: Install Protoc
|
||||
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Run network tests for all known forks using the minimal spec
|
||||
run: make test-network-minimal
|
||||
slasher-tests:
|
||||
name: slasher-tests
|
||||
runs-on: ubuntu-latest
|
||||
|
12
Cargo.lock
generated
12
Cargo.lock
generated
@ -2158,6 +2158,15 @@ dependencies = [
|
||||
"types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "erased-serde"
|
||||
version = "0.3.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f2b0c2380453a92ea8b6c8e5f64ecaafccddde8ceab55ff7a8ac1029f894569"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.1"
|
||||
@ -7521,6 +7530,9 @@ name = "slog"
|
||||
version = "2.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06"
|
||||
dependencies = [
|
||||
"erased-serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slog-async"
|
||||
|
9
Makefile
9
Makefile
@ -106,7 +106,7 @@ build-release-tarballs:
|
||||
# Runs the full workspace tests in **release**, without downloading any additional
|
||||
# test vectors.
|
||||
test-release:
|
||||
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher
|
||||
cargo test --workspace --release --exclude ef_tests --exclude beacon_chain --exclude slasher
|
||||
|
||||
# Runs the full workspace tests in **debug**, without downloading any additional test
|
||||
# vectors.
|
||||
@ -143,6 +143,13 @@ test-op-pool-%:
|
||||
--features 'beacon_chain/fork_from_env'\
|
||||
-p operation_pool
|
||||
|
||||
test-network-minimal: $(patsubst %,test-network-minimal-%,$(FORKS))
|
||||
|
||||
test-network-minimal-%:
|
||||
env FORK_NAME=$* cargo test --release \
|
||||
--features 'fork_from_env,spec-minimal'\
|
||||
-p network
|
||||
|
||||
# Run the tests in the `slasher` crate for all supported database backends.
|
||||
test-slasher:
|
||||
cargo test --release -p slasher --features lmdb
|
||||
|
@ -117,7 +117,7 @@ use tokio_stream::Stream;
|
||||
use tree_hash::TreeHash;
|
||||
use types::beacon_block_body::KzgCommitments;
|
||||
use types::beacon_state::CloneConfig;
|
||||
use types::blob_sidecar::{BlobIdentifier, BlobSidecarList, Blobs};
|
||||
use types::blob_sidecar::{BlobSidecarList, Blobs};
|
||||
use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS;
|
||||
use types::*;
|
||||
|
||||
@ -185,12 +185,10 @@ pub enum WhenSlotSkipped {
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum AvailabilityProcessingStatus {
|
||||
PendingBlobs(Vec<BlobIdentifier>),
|
||||
PendingBlock(Hash256),
|
||||
MissingComponents(Slot, Hash256),
|
||||
Imported(Hash256),
|
||||
}
|
||||
|
||||
//TODO(sean) using this in tests for now
|
||||
impl TryInto<SignedBeaconBlockHash> for AvailabilityProcessingStatus {
|
||||
type Error = ();
|
||||
|
||||
@ -468,7 +466,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
/// The slot at which blocks are downloaded back to.
|
||||
pub genesis_backfill_slot: Slot,
|
||||
pub proposal_blob_cache: BlobCache<T::EthSpec>,
|
||||
pub data_availability_checker: DataAvailabilityChecker<T>,
|
||||
pub data_availability_checker: Arc<DataAvailabilityChecker<T>>,
|
||||
pub kzg: Option<Arc<Kzg>>,
|
||||
}
|
||||
|
||||
@ -1985,8 +1983,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self: &Arc<Self>,
|
||||
blob_sidecar: SignedBlobSidecar<T::EthSpec>,
|
||||
subnet_id: u64,
|
||||
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError> // TODO(pawan): make a GossipVerifedBlob type
|
||||
{
|
||||
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError<T::EthSpec>> {
|
||||
blob_verification::validate_blob_sidecar_for_gossip(blob_sidecar, subnet_id, self)
|
||||
}
|
||||
|
||||
@ -2674,7 +2671,24 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => imported_blocks += 1,
|
||||
Ok(status) => {
|
||||
match status {
|
||||
AvailabilityProcessingStatus::Imported(_) => {
|
||||
// The block was imported successfully.
|
||||
imported_blocks += 1;
|
||||
}
|
||||
AvailabilityProcessingStatus::MissingComponents(slot, block_root) => {
|
||||
warn!(self.log, "Blobs missing in response to range request";
|
||||
"block_root" => ?block_root, "slot" => slot);
|
||||
return ChainSegmentResult::Failed {
|
||||
imported_blocks,
|
||||
error: BlockError::AvailabilityCheck(
|
||||
AvailabilityCheckError::MissingBlobs,
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(error) => {
|
||||
return ChainSegmentResult::Failed {
|
||||
imported_blocks,
|
||||
@ -2748,6 +2762,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
count_unrealized: CountUnrealized,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
self.check_availability_and_maybe_import(
|
||||
blob.slot(),
|
||||
|chain| chain.data_availability_checker.put_gossip_blob(blob),
|
||||
count_unrealized,
|
||||
)
|
||||
@ -2804,6 +2819,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
ExecutedBlock::AvailabilityPending(block) => {
|
||||
self.check_availability_and_maybe_import(
|
||||
block.block.slot(),
|
||||
|chain| {
|
||||
chain
|
||||
.data_availability_checker
|
||||
@ -2907,6 +2923,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// (i.e., this function is not atomic).
|
||||
pub async fn check_availability_and_maybe_import(
|
||||
self: &Arc<Self>,
|
||||
slot: Slot,
|
||||
cache_fn: impl FnOnce(Arc<Self>) -> Result<Availability<T::EthSpec>, AvailabilityCheckError>,
|
||||
count_unrealized: CountUnrealized,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
@ -2915,12 +2932,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
Availability::Available(block) => {
|
||||
self.import_available_block(block, count_unrealized).await
|
||||
}
|
||||
Availability::PendingBlock(block_root) => {
|
||||
Ok(AvailabilityProcessingStatus::PendingBlock(block_root))
|
||||
}
|
||||
Availability::PendingBlobs(blob_ids) => {
|
||||
Ok(AvailabilityProcessingStatus::PendingBlobs(blob_ids))
|
||||
}
|
||||
Availability::MissingComponents(block_root) => Ok(
|
||||
AvailabilityProcessingStatus::MissingComponents(slot, block_root),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,15 +16,17 @@ use eth2::types::BlockContentsTuple;
|
||||
use kzg::Kzg;
|
||||
use slog::{debug, warn};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::FixedVector;
|
||||
use std::borrow::Cow;
|
||||
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
|
||||
use types::{
|
||||
BeaconBlockRef, BeaconState, BeaconStateError, BlobSidecar, BlobSidecarList, ChainSpec,
|
||||
CloneConfig, Epoch, EthSpec, FullPayload, Hash256, KzgCommitment, RelativeEpoch,
|
||||
SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlobSidecar, Slot,
|
||||
BeaconBlockRef, BeaconState, BeaconStateError, BlobSidecar, ChainSpec, CloneConfig, Epoch,
|
||||
EthSpec, FullPayload, Hash256, KzgCommitment, RelativeEpoch, SignedBeaconBlock,
|
||||
SignedBeaconBlockHeader, SignedBlobSidecar, Slot,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BlobError {
|
||||
pub enum BlobError<T: EthSpec> {
|
||||
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
|
||||
/// gossip clock disparity).
|
||||
///
|
||||
@ -96,10 +98,7 @@ pub enum BlobError {
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// We cannot process the blob without validating its parent, the peer isn't necessarily faulty.
|
||||
BlobParentUnknown {
|
||||
blob_root: Hash256,
|
||||
blob_parent_root: Hash256,
|
||||
},
|
||||
BlobParentUnknown(Arc<BlobSidecar<T>>),
|
||||
|
||||
/// A blob has already been seen for the given `(sidecar.block_root, sidecar.index)` tuple
|
||||
/// over gossip or no gossip sources.
|
||||
@ -114,13 +113,13 @@ pub enum BlobError {
|
||||
},
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for BlobError {
|
||||
impl<T: EthSpec> From<BeaconChainError> for BlobError<T> {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
BlobError::BeaconChainError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for BlobError {
|
||||
impl<T: EthSpec> From<BeaconStateError> for BlobError<T> {
|
||||
fn from(e: BeaconStateError) -> Self {
|
||||
BlobError::BeaconChainError(BeaconChainError::BeaconStateError(e))
|
||||
}
|
||||
@ -128,27 +127,36 @@ impl From<BeaconStateError> for BlobError {
|
||||
|
||||
/// A wrapper around a `BlobSidecar` that indicates it has been approved for re-gossiping on
|
||||
/// the p2p network.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GossipVerifiedBlob<T: EthSpec> {
|
||||
blob: Arc<BlobSidecar<T>>,
|
||||
}
|
||||
|
||||
impl<T: EthSpec> GossipVerifiedBlob<T> {
|
||||
pub fn id(&self) -> BlobIdentifier {
|
||||
self.blob.id()
|
||||
}
|
||||
pub fn block_root(&self) -> Hash256 {
|
||||
self.blob.block_root
|
||||
}
|
||||
pub fn to_blob(self) -> Arc<BlobSidecar<T>> {
|
||||
self.blob
|
||||
}
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.blob.slot
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
|
||||
signed_blob_sidecar: SignedBlobSidecar<T::EthSpec>,
|
||||
subnet: u64,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError> {
|
||||
) -> Result<GossipVerifiedBlob<T::EthSpec>, BlobError<T::EthSpec>> {
|
||||
let blob_slot = signed_blob_sidecar.message.slot;
|
||||
let blob_index = signed_blob_sidecar.message.index;
|
||||
let block_root = signed_blob_sidecar.message.block_root;
|
||||
let block_parent_root = signed_blob_sidecar.message.block_parent_root;
|
||||
let blob_proposer_index = signed_blob_sidecar.message.proposer_index;
|
||||
let block_root = signed_blob_sidecar.message.block_root;
|
||||
|
||||
// Verify that the blob_sidecar was received on the correct subnet.
|
||||
if blob_index != subnet {
|
||||
@ -211,10 +219,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
|
||||
});
|
||||
}
|
||||
} else {
|
||||
return Err(BlobError::BlobParentUnknown {
|
||||
blob_root: block_root,
|
||||
blob_parent_root: block_parent_root,
|
||||
});
|
||||
return Err(BlobError::BlobParentUnknown(signed_blob_sidecar.message));
|
||||
}
|
||||
|
||||
// Note: We check that the proposer_index matches against the shuffling first to avoid
|
||||
@ -366,7 +371,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>(
|
||||
state_root_opt: Option<Hash256>,
|
||||
blob_slot: Slot,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Cow<'a, BeaconState<E>>, BlobError> {
|
||||
) -> Result<Cow<'a, BeaconState<E>>, BlobError<E>> {
|
||||
let block_epoch = blob_slot.epoch(E::slots_per_epoch());
|
||||
|
||||
if state.current_epoch() == block_epoch {
|
||||
@ -443,19 +448,14 @@ impl<T: EthSpec> KzgVerifiedBlob<T> {
|
||||
///
|
||||
/// Returns an error if the kzg verification check fails.
|
||||
pub fn verify_kzg_for_blob<T: EthSpec>(
|
||||
blob: GossipVerifiedBlob<T>,
|
||||
blob: Arc<BlobSidecar<T>>,
|
||||
kzg: &Kzg,
|
||||
) -> Result<KzgVerifiedBlob<T>, AvailabilityCheckError> {
|
||||
//TODO(sean) remove clone
|
||||
if validate_blob::<T>(
|
||||
kzg,
|
||||
blob.blob.blob.clone(),
|
||||
blob.blob.kzg_commitment,
|
||||
blob.blob.kzg_proof,
|
||||
)
|
||||
.map_err(AvailabilityCheckError::Kzg)?
|
||||
if validate_blob::<T>(kzg, blob.blob.clone(), blob.kzg_commitment, blob.kzg_proof)
|
||||
.map_err(AvailabilityCheckError::Kzg)?
|
||||
{
|
||||
Ok(KzgVerifiedBlob { blob: blob.blob })
|
||||
Ok(KzgVerifiedBlob { blob })
|
||||
} else {
|
||||
Err(AvailabilityCheckError::KzgVerificationFailed)
|
||||
}
|
||||
@ -467,7 +467,7 @@ pub fn verify_kzg_for_blob<T: EthSpec>(
|
||||
/// Note: This function should be preferred over calling `verify_kzg_for_blob`
|
||||
/// in a loop since this function kzg verifies a list of blobs more efficiently.
|
||||
pub fn verify_kzg_for_blob_list<T: EthSpec>(
|
||||
blob_list: BlobSidecarList<T>,
|
||||
blob_list: Vec<Arc<BlobSidecar<T>>>,
|
||||
kzg: &Kzg,
|
||||
) -> Result<KzgVerifiedBlobList<T>, AvailabilityCheckError> {
|
||||
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list
|
||||
@ -608,7 +608,16 @@ impl<E: EthSpec> AsBlock<E> for &MaybeAvailableBlock<E> {
|
||||
#[derivative(Hash(bound = "E: EthSpec"))]
|
||||
pub enum BlockWrapper<E: EthSpec> {
|
||||
Block(Arc<SignedBeaconBlock<E>>),
|
||||
BlockAndBlobs(Arc<SignedBeaconBlock<E>>, Vec<Arc<BlobSidecar<E>>>),
|
||||
BlockAndBlobs(Arc<SignedBeaconBlock<E>>, FixedBlobSidecarList<E>),
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BlockWrapper<E> {
|
||||
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<FixedBlobSidecarList<E>>) {
|
||||
match self {
|
||||
BlockWrapper::Block(block) => (block, None),
|
||||
BlockWrapper::BlockAndBlobs(block, blobs) => (block, Some(blobs)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
|
||||
@ -675,13 +684,15 @@ impl<E: EthSpec> From<SignedBeaconBlock<E>> for BlockWrapper<E> {
|
||||
impl<E: EthSpec> From<BlockContentsTuple<E, FullPayload<E>>> for BlockWrapper<E> {
|
||||
fn from(value: BlockContentsTuple<E, FullPayload<E>>) -> Self {
|
||||
match value.1 {
|
||||
Some(variable_list) => Self::BlockAndBlobs(
|
||||
Arc::new(value.0),
|
||||
Vec::from(variable_list)
|
||||
.into_iter()
|
||||
.map(|signed_blob| signed_blob.message)
|
||||
.collect::<Vec<_>>(),
|
||||
),
|
||||
Some(variable_list) => {
|
||||
let mut blobs = Vec::with_capacity(E::max_blobs_per_block());
|
||||
for blob in variable_list {
|
||||
if blob.message.index < E::max_blobs_per_block() as u64 {
|
||||
blobs.insert(blob.message.index as usize, Some(blob.message));
|
||||
}
|
||||
}
|
||||
Self::BlockAndBlobs(Arc::new(value.0), FixedVector::from(blobs))
|
||||
}
|
||||
None => Self::Block(Arc::new(value.0)),
|
||||
}
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ use crate::{
|
||||
use derivative::Derivative;
|
||||
use eth2::types::EventKind;
|
||||
use execution_layer::PayloadStatus;
|
||||
use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
|
||||
pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
|
||||
use parking_lot::RwLockReadGuard;
|
||||
use proto_array::Block as ProtoBlock;
|
||||
use safe_arith::ArithError;
|
||||
@ -150,10 +150,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
/// its parent.
|
||||
ParentUnknown(BlockWrapper<T>),
|
||||
/// The block skips too many slots and is a DoS risk.
|
||||
TooManySkippedSlots {
|
||||
parent_slot: Slot,
|
||||
block_slot: Slot,
|
||||
},
|
||||
TooManySkippedSlots { parent_slot: Slot, block_slot: Slot },
|
||||
/// The block slot is greater than the present slot.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
@ -168,10 +165,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The peer has incompatible state transition logic and is faulty.
|
||||
StateRootMismatch {
|
||||
block: Hash256,
|
||||
local: Hash256,
|
||||
},
|
||||
StateRootMismatch { block: Hash256, local: Hash256 },
|
||||
/// The block was a genesis block, these blocks cannot be re-imported.
|
||||
GenesisBlock,
|
||||
/// The slot is finalized, no need to import.
|
||||
@ -190,9 +184,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
///
|
||||
/// It's unclear if this block is valid, but it conflicts with finality and shouldn't be
|
||||
/// imported.
|
||||
NotFinalizedDescendant {
|
||||
block_parent_root: Hash256,
|
||||
},
|
||||
NotFinalizedDescendant { block_parent_root: Hash256 },
|
||||
/// Block is already known, no need to re-import.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
@ -205,10 +197,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
///
|
||||
/// The `proposer` has already proposed a block at this slot. The existing block may or may not
|
||||
/// be equal to the given block.
|
||||
RepeatProposal {
|
||||
proposer: u64,
|
||||
slot: Slot,
|
||||
},
|
||||
RepeatProposal { proposer: u64, slot: Slot },
|
||||
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
@ -223,10 +212,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
IncorrectBlockProposer {
|
||||
block: u64,
|
||||
local_shuffling: u64,
|
||||
},
|
||||
IncorrectBlockProposer { block: u64, local_shuffling: u64 },
|
||||
/// The proposal signature in invalid.
|
||||
///
|
||||
/// ## Peer scoring
|
||||
@ -250,10 +236,7 @@ pub enum BlockError<T: EthSpec> {
|
||||
/// ## Peer scoring
|
||||
///
|
||||
/// The block is invalid and the peer is faulty.
|
||||
BlockIsNotLaterThanParent {
|
||||
block_slot: Slot,
|
||||
parent_slot: Slot,
|
||||
},
|
||||
BlockIsNotLaterThanParent { block_slot: Slot, parent_slot: Slot },
|
||||
/// At least one block in the chain segment did not have it's parent root set to the root of
|
||||
/// the prior block.
|
||||
///
|
||||
@ -309,15 +292,15 @@ pub enum BlockError<T: EthSpec> {
|
||||
/// If it's actually our fault (e.g. our execution node database is corrupt) we have bigger
|
||||
/// problems to worry about than losing peers, and we're doing the network a favour by
|
||||
/// disconnecting.
|
||||
ParentExecutionPayloadInvalid {
|
||||
parent_root: Hash256,
|
||||
},
|
||||
BlobValidation(BlobError),
|
||||
ParentExecutionPayloadInvalid { parent_root: Hash256 },
|
||||
/// A blob alone failed validation.
|
||||
BlobValidation(BlobError<T>),
|
||||
/// The block and blob together failed validation.
|
||||
AvailabilityCheck(AvailabilityCheckError),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<BlobError> for BlockError<T> {
|
||||
fn from(e: BlobError) -> Self {
|
||||
impl<T: EthSpec> From<BlobError<T>> for BlockError<T> {
|
||||
fn from(e: BlobError<T>) -> Self {
|
||||
Self::BlobValidation(e)
|
||||
}
|
||||
}
|
||||
@ -785,21 +768,17 @@ impl<E: EthSpec> AvailabilityPendingExecutedBlock<E> {
|
||||
}
|
||||
|
||||
pub fn get_all_blob_ids(&self) -> Vec<BlobIdentifier> {
|
||||
self.get_filtered_blob_ids(|_| true)
|
||||
let block_root = self.import_data.block_root;
|
||||
self.block
|
||||
.get_filtered_blob_ids(Some(block_root), |_, _| true)
|
||||
}
|
||||
|
||||
pub fn get_filtered_blob_ids(&self, filter: impl Fn(u64) -> bool) -> Vec<BlobIdentifier> {
|
||||
let num_blobs_expected = self.num_blobs_expected();
|
||||
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
|
||||
for i in 0..num_blobs_expected as u64 {
|
||||
if filter(i) {
|
||||
blob_ids.push(BlobIdentifier {
|
||||
block_root: self.import_data.block_root,
|
||||
index: i,
|
||||
});
|
||||
}
|
||||
}
|
||||
blob_ids
|
||||
pub fn get_filtered_blob_ids(
|
||||
&self,
|
||||
filter: impl Fn(usize, Hash256) -> bool,
|
||||
) -> Vec<BlobIdentifier> {
|
||||
self.block
|
||||
.get_filtered_blob_ids(Some(self.import_data.block_root), filter)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -419,23 +419,14 @@ where
|
||||
let weak_subj_block_root = weak_subj_block.canonical_root();
|
||||
let weak_subj_state_root = weak_subj_block.state_root();
|
||||
|
||||
// Check that the given block lies on an epoch boundary. Due to the database only storing
|
||||
// Check that the given state lies on an epoch boundary. Due to the database only storing
|
||||
// full states on epoch boundaries and at restore points it would be difficult to support
|
||||
// starting from a mid-epoch state.
|
||||
if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 {
|
||||
return Err(format!(
|
||||
"Checkpoint block at slot {} is not aligned to epoch start. \
|
||||
Please supply an aligned checkpoint with block.slot % 32 == 0",
|
||||
weak_subj_block.slot(),
|
||||
));
|
||||
}
|
||||
|
||||
// Check that the block and state have consistent slots and state roots.
|
||||
if weak_subj_state.slot() != weak_subj_block.slot() {
|
||||
return Err(format!(
|
||||
"Slot of snapshot block ({}) does not match snapshot state ({})",
|
||||
weak_subj_block.slot(),
|
||||
weak_subj_state.slot(),
|
||||
"Checkpoint state at slot {} is not aligned to epoch start. \
|
||||
Please supply an aligned checkpoint with state.slot % 32 == 0",
|
||||
weak_subj_slot,
|
||||
));
|
||||
}
|
||||
|
||||
@ -444,16 +435,21 @@ where
|
||||
weak_subj_state
|
||||
.build_all_caches(&self.spec)
|
||||
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
|
||||
|
||||
let computed_state_root = weak_subj_state
|
||||
weak_subj_state
|
||||
.update_tree_hash_cache()
|
||||
.map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?;
|
||||
|
||||
if weak_subj_state_root != computed_state_root {
|
||||
return Err(format!(
|
||||
"Snapshot state root does not match block, expected: {:?}, got: {:?}",
|
||||
weak_subj_state_root, computed_state_root
|
||||
));
|
||||
let latest_block_slot = weak_subj_state.latest_block_header().slot;
|
||||
|
||||
// We can only validate the block root if it exists in the state. We can't calculated it
|
||||
// from the `latest_block_header` because the state root might be set to the zero hash.
|
||||
if let Ok(state_slot_block_root) = weak_subj_state.get_block_root(latest_block_slot) {
|
||||
if weak_subj_block_root != *state_slot_block_root {
|
||||
return Err(format!(
|
||||
"Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}",
|
||||
weak_subj_block_root, state_slot_block_root
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the checkpoint state is for the same network as the genesis state.
|
||||
@ -508,13 +504,12 @@ where
|
||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot)
|
||||
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
||||
|
||||
let current_slot = Some(snapshot.beacon_block.slot());
|
||||
let fork_choice = ForkChoice::from_anchor(
|
||||
fc_store,
|
||||
snapshot.beacon_block_root,
|
||||
&snapshot.beacon_block,
|
||||
&snapshot.beacon_state,
|
||||
current_slot,
|
||||
Some(weak_subj_slot),
|
||||
&self.spec,
|
||||
)
|
||||
.map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?;
|
||||
@ -891,13 +886,10 @@ where
|
||||
validator_monitor: RwLock::new(validator_monitor),
|
||||
genesis_backfill_slot,
|
||||
//TODO(sean) should we move kzg solely to the da checker?
|
||||
data_availability_checker: DataAvailabilityChecker::new(
|
||||
slot_clock,
|
||||
kzg.clone(),
|
||||
store,
|
||||
self.spec,
|
||||
)
|
||||
.map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?,
|
||||
data_availability_checker: Arc::new(
|
||||
DataAvailabilityChecker::new(slot_clock, kzg.clone(), store, self.spec)
|
||||
.map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?,
|
||||
),
|
||||
proposal_blob_cache: BlobCache::default(),
|
||||
kzg,
|
||||
};
|
||||
|
@ -10,12 +10,14 @@ use kzg::Error as KzgError;
|
||||
use kzg::Kzg;
|
||||
use slog::{debug, error};
|
||||
use slot_clock::SlotClock;
|
||||
use ssz_types::{Error, VariableList};
|
||||
use ssz_types::{Error, FixedVector, VariableList};
|
||||
use state_processing::per_block_processing::deneb::deneb::verify_kzg_commitments_against_transactions;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use strum::IntoStaticStr;
|
||||
use task_executor::TaskExecutor;
|
||||
use types::beacon_block_body::KzgCommitments;
|
||||
use types::blob_sidecar::{BlobIdentifier, BlobSidecar};
|
||||
use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
|
||||
use types::consts::deneb::MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS;
|
||||
use types::ssz_tagged_signed_beacon_block;
|
||||
use types::{
|
||||
@ -27,27 +29,29 @@ mod overflow_lru_cache;
|
||||
|
||||
pub const OVERFLOW_LRU_CAPACITY: usize = 1024;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, IntoStaticStr)]
|
||||
pub enum AvailabilityCheckError {
|
||||
DuplicateBlob(Hash256),
|
||||
Kzg(KzgError),
|
||||
KzgVerificationFailed,
|
||||
KzgNotInitialized,
|
||||
KzgVerificationFailed,
|
||||
SszTypes(ssz_types::Error),
|
||||
MissingBlobs,
|
||||
NumBlobsMismatch {
|
||||
num_kzg_commitments: usize,
|
||||
num_blobs: usize,
|
||||
},
|
||||
TxKzgCommitmentMismatch,
|
||||
MissingBlobs,
|
||||
TxKzgCommitmentMismatch(String),
|
||||
KzgCommitmentMismatch {
|
||||
blob_index: u64,
|
||||
},
|
||||
Pending,
|
||||
IncorrectFork,
|
||||
BlobIndexInvalid(u64),
|
||||
StoreError(store::Error),
|
||||
DecodeError(ssz::DecodeError),
|
||||
BlockBlobRootMismatch {
|
||||
block_root: Hash256,
|
||||
blob_block_root: Hash256,
|
||||
},
|
||||
}
|
||||
|
||||
impl From<ssz_types::Error> for AvailabilityCheckError {
|
||||
@ -86,8 +90,7 @@ pub struct DataAvailabilityChecker<T: BeaconChainTypes> {
|
||||
/// to "complete" the requirements for an `AvailableBlock`.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Availability<T: EthSpec> {
|
||||
PendingBlobs(Vec<BlobIdentifier>),
|
||||
PendingBlock(Hash256),
|
||||
MissingComponents(Hash256),
|
||||
Available(Box<AvailableExecutedBlock<T>>),
|
||||
}
|
||||
|
||||
@ -119,6 +122,52 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn has_block(&self, block_root: &Hash256) -> bool {
|
||||
self.availability_cache.has_block(block_root)
|
||||
}
|
||||
|
||||
pub fn get_missing_blob_ids_checking_cache(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
) -> Option<Vec<BlobIdentifier>> {
|
||||
let (block, blob_indices) = self.availability_cache.get_missing_blob_info(block_root);
|
||||
self.get_missing_blob_ids(block_root, block.as_ref(), Some(blob_indices))
|
||||
}
|
||||
|
||||
/// A `None` indicates blobs are not required.
|
||||
///
|
||||
/// If there's no block, all possible ids will be returned that don't exist in the given blobs.
|
||||
/// If there no blobs, all possible ids will be returned.
|
||||
pub fn get_missing_blob_ids(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
block_opt: Option<&Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||
blobs_opt: Option<HashSet<usize>>,
|
||||
) -> Option<Vec<BlobIdentifier>> {
|
||||
let epoch = self.slot_clock.now()?.epoch(T::EthSpec::slots_per_epoch());
|
||||
|
||||
self.da_check_required(epoch).then(|| {
|
||||
block_opt
|
||||
.map(|block| {
|
||||
block.get_filtered_blob_ids(Some(block_root), |i, _| {
|
||||
blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i))
|
||||
})
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
let mut blob_ids = Vec::with_capacity(T::EthSpec::max_blobs_per_block());
|
||||
for i in 0..T::EthSpec::max_blobs_per_block() {
|
||||
if blobs_opt.as_ref().map_or(true, |blobs| !blobs.contains(&i)) {
|
||||
blob_ids.push(BlobIdentifier {
|
||||
block_root,
|
||||
index: i as u64,
|
||||
});
|
||||
}
|
||||
}
|
||||
blob_ids
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a blob from the availability cache.
|
||||
pub fn get_blob(
|
||||
&self,
|
||||
@ -127,6 +176,23 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
self.availability_cache.peek_blob(blob_id)
|
||||
}
|
||||
|
||||
pub fn put_rpc_blobs(
|
||||
&self,
|
||||
block_root: Hash256,
|
||||
blobs: FixedBlobSidecarList<T::EthSpec>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
let mut verified_blobs = vec![];
|
||||
if let Some(kzg) = self.kzg.as_ref() {
|
||||
for blob in blobs.iter().flatten() {
|
||||
verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?)
|
||||
}
|
||||
} else {
|
||||
return Err(AvailabilityCheckError::KzgNotInitialized);
|
||||
};
|
||||
self.availability_cache
|
||||
.put_kzg_verified_blobs(block_root, &verified_blobs)
|
||||
}
|
||||
|
||||
/// This first validates the KZG commitments included in the blob sidecar.
|
||||
/// Check if we've cached other blobs for this block. If it completes a set and we also
|
||||
/// have a block cached, return the `Availability` variant triggering block import.
|
||||
@ -139,13 +205,13 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
// Verify the KZG commitments.
|
||||
let kzg_verified_blob = if let Some(kzg) = self.kzg.as_ref() {
|
||||
verify_kzg_for_blob(gossip_blob, kzg)?
|
||||
verify_kzg_for_blob(gossip_blob.to_blob(), kzg)?
|
||||
} else {
|
||||
return Err(AvailabilityCheckError::KzgNotInitialized);
|
||||
};
|
||||
|
||||
self.availability_cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(kzg_verified_blob.block_root(), &[kzg_verified_blob])
|
||||
}
|
||||
|
||||
/// Check if we have all the blobs for a block. If we do, return the Availability variant that
|
||||
@ -171,7 +237,8 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
.kzg
|
||||
.as_ref()
|
||||
.ok_or(AvailabilityCheckError::KzgNotInitialized)?;
|
||||
let verified_blobs = verify_kzg_for_blob_list(VariableList::new(blob_list)?, kzg)?;
|
||||
let filtered_blobs = blob_list.iter().flatten().cloned().collect();
|
||||
let verified_blobs = verify_kzg_for_blob_list(filtered_blobs, kzg)?;
|
||||
|
||||
Ok(MaybeAvailableBlock::Available(
|
||||
self.check_availability_with_blobs(block, verified_blobs)?,
|
||||
@ -180,27 +247,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a block is available, returning an error if the block is not immediately available.
|
||||
/// Does not access the gossip cache.
|
||||
pub fn try_check_availability(
|
||||
&self,
|
||||
block: BlockWrapper<T::EthSpec>,
|
||||
) -> Result<AvailableBlock<T::EthSpec>, AvailabilityCheckError> {
|
||||
match block {
|
||||
BlockWrapper::Block(block) => {
|
||||
let blob_requirements = self.get_blob_requirements(&block)?;
|
||||
let blobs = match blob_requirements {
|
||||
BlobRequirements::EmptyBlobs => VerifiedBlobs::EmptyBlobs,
|
||||
BlobRequirements::NotRequired => VerifiedBlobs::NotRequired,
|
||||
BlobRequirements::PreDeneb => VerifiedBlobs::PreDeneb,
|
||||
BlobRequirements::Required => return Err(AvailabilityCheckError::MissingBlobs),
|
||||
};
|
||||
Ok(AvailableBlock { block, blobs })
|
||||
}
|
||||
BlockWrapper::BlockAndBlobs(_, _) => Err(AvailabilityCheckError::Pending),
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies a block against a set of KZG verified blobs. Returns an AvailableBlock if block's
|
||||
/// commitments are consistent with the provided verified blob commitments.
|
||||
pub fn check_availability_with_blobs(
|
||||
@ -254,9 +300,11 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
transactions,
|
||||
block_kzg_commitments,
|
||||
)
|
||||
.map_err(|_| AvailabilityCheckError::TxKzgCommitmentMismatch)?;
|
||||
.map_err(|e| AvailabilityCheckError::TxKzgCommitmentMismatch(format!("{e:?}")))?;
|
||||
if !verified {
|
||||
return Err(AvailabilityCheckError::TxKzgCommitmentMismatch);
|
||||
return Err(AvailabilityCheckError::TxKzgCommitmentMismatch(
|
||||
"a commitment and version didn't match".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@ -410,6 +458,27 @@ pub struct AvailabilityPendingBlock<E: EthSpec> {
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AvailabilityPendingBlock<E> {
|
||||
pub fn slot(&self) -> Slot {
|
||||
self.block.slot()
|
||||
}
|
||||
pub fn num_blobs_expected(&self) -> usize {
|
||||
self.block.num_expected_blobs()
|
||||
}
|
||||
|
||||
pub fn get_all_blob_ids(&self, block_root: Option<Hash256>) -> Vec<BlobIdentifier> {
|
||||
self.block.get_expected_blob_ids(block_root)
|
||||
}
|
||||
|
||||
pub fn get_filtered_blob_ids(
|
||||
&self,
|
||||
block_root: Option<Hash256>,
|
||||
filter: impl Fn(usize, Hash256) -> bool,
|
||||
) -> Vec<BlobIdentifier> {
|
||||
self.block.get_filtered_blob_ids(block_root, filter)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AvailabilityPendingBlock<E> {
|
||||
pub fn to_block(self) -> Arc<SignedBeaconBlock<E>> {
|
||||
self.block
|
||||
@ -429,7 +498,7 @@ impl<E: EthSpec> AvailabilityPendingBlock<E> {
|
||||
}
|
||||
|
||||
/// Verifies an AvailabilityPendingBlock against a set of KZG verified blobs.
|
||||
/// This does not check whether a block *should* have blobs, these checks should must have been
|
||||
/// This does not check whether a block *should* have blobs, these checks should have been
|
||||
/// completed when producing the `AvailabilityPendingBlock`.
|
||||
pub fn make_available(
|
||||
self,
|
||||
@ -485,6 +554,13 @@ impl<E: EthSpec> AvailableBlock<E> {
|
||||
&self.block
|
||||
}
|
||||
|
||||
pub fn da_check_required(&self) -> bool {
|
||||
match self.blobs {
|
||||
VerifiedBlobs::PreDeneb | VerifiedBlobs::NotRequired => false,
|
||||
VerifiedBlobs::EmptyBlobs | VerifiedBlobs::Available(_) => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<E>>, Option<BlobSidecarList<E>>) {
|
||||
match self.blobs {
|
||||
VerifiedBlobs::EmptyBlobs | VerifiedBlobs::NotRequired | VerifiedBlobs::PreDeneb => {
|
||||
@ -542,7 +618,8 @@ impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
|
||||
fn into_block_wrapper(self) -> BlockWrapper<E> {
|
||||
let (block, blobs_opt) = self.deconstruct();
|
||||
if let Some(blobs) = blobs_opt {
|
||||
BlockWrapper::BlockAndBlobs(block, blobs.to_vec())
|
||||
let blobs_vec = blobs.iter().cloned().map(Option::Some).collect::<Vec<_>>();
|
||||
BlockWrapper::BlockAndBlobs(block, FixedVector::from(blobs_vec))
|
||||
} else {
|
||||
BlockWrapper::Block(block)
|
||||
}
|
||||
|
@ -11,7 +11,9 @@ use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::FixedVector;
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
use types::blob_sidecar::BlobIdentifier;
|
||||
use types::{BlobSidecar, Epoch, EthSpec, Hash256};
|
||||
use types::{BlobSidecar, Epoch, EthSpec, Hash256, SignedBeaconBlock};
|
||||
|
||||
type MissingBlobInfo<T> = (Option<Arc<SignedBeaconBlock<T>>>, HashSet<usize>);
|
||||
|
||||
/// Caches partially available blobs and execution verified blocks corresponding
|
||||
/// to a given `block_hash` that are received over gossip.
|
||||
@ -25,10 +27,12 @@ pub struct PendingComponents<T: EthSpec> {
|
||||
}
|
||||
|
||||
impl<T: EthSpec> PendingComponents<T> {
|
||||
pub fn new_from_blob(blob: KzgVerifiedBlob<T>) -> Self {
|
||||
pub fn new_from_blobs(blobs: &[KzgVerifiedBlob<T>]) -> Self {
|
||||
let mut verified_blobs = FixedVector::<_, _>::default();
|
||||
if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) {
|
||||
*mut_maybe_blob = Some(blob);
|
||||
for blob in blobs {
|
||||
if let Some(mut_maybe_blob) = verified_blobs.get_mut(blob.blob_index() as usize) {
|
||||
*mut_maybe_blob = Some(blob.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
@ -82,6 +86,20 @@ impl<T: EthSpec> PendingComponents<T> {
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_missing_blob_info(&self) -> MissingBlobInfo<T> {
|
||||
let block_opt = self
|
||||
.executed_block
|
||||
.as_ref()
|
||||
.map(|block| block.block.block.clone());
|
||||
let blobs = self
|
||||
.verified_blobs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, maybe_blob)| maybe_blob.as_ref().map(|_| i))
|
||||
.collect::<HashSet<_>>();
|
||||
(block_opt, blobs)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
@ -193,11 +211,27 @@ impl<T: BeaconChainTypes> OverflowStore<T> {
|
||||
Ok(disk_keys)
|
||||
}
|
||||
|
||||
pub fn load_block(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
) -> Result<Option<AvailabilityPendingExecutedBlock<T::EthSpec>>, AvailabilityCheckError> {
|
||||
let key = OverflowKey::from_block_root(*block_root);
|
||||
|
||||
self.0
|
||||
.hot_db
|
||||
.get_bytes(DBColumn::OverflowLRUCache.as_str(), &key.as_ssz_bytes())?
|
||||
.map(|block_bytes| {
|
||||
AvailabilityPendingExecutedBlock::from_ssz_bytes(block_bytes.as_slice())
|
||||
})
|
||||
.transpose()
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
pub fn load_blob(
|
||||
&self,
|
||||
blob_id: &BlobIdentifier,
|
||||
) -> Result<Option<Arc<BlobSidecar<T::EthSpec>>>, AvailabilityCheckError> {
|
||||
let key = OverflowKey::from_blob_id::<T::EthSpec>(blob_id.clone())?;
|
||||
let key = OverflowKey::from_blob_id::<T::EthSpec>(*blob_id)?;
|
||||
|
||||
self.0
|
||||
.hot_db
|
||||
@ -320,6 +354,41 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn has_block(&self, block_root: &Hash256) -> bool {
|
||||
let read_lock = self.critical.read();
|
||||
if read_lock
|
||||
.in_memory
|
||||
.peek(block_root)
|
||||
.map_or(false, |cache| cache.executed_block.is_some())
|
||||
{
|
||||
true
|
||||
} else if read_lock.store_keys.contains(block_root) {
|
||||
drop(read_lock);
|
||||
// If there's some kind of error reading from the store, we should just return false
|
||||
self.overflow_store
|
||||
.load_block(block_root)
|
||||
.map_or(false, |maybe_block| maybe_block.is_some())
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_missing_blob_info(&self, block_root: Hash256) -> MissingBlobInfo<T::EthSpec> {
|
||||
let read_lock = self.critical.read();
|
||||
if let Some(cache) = read_lock.in_memory.peek(&block_root) {
|
||||
cache.get_missing_blob_info()
|
||||
} else if read_lock.store_keys.contains(&block_root) {
|
||||
drop(read_lock);
|
||||
// return default if there's an error reading from the store
|
||||
match self.overflow_store.get_pending_components(block_root) {
|
||||
Ok(Some(pending_components)) => pending_components.get_missing_blob_info(),
|
||||
_ => Default::default(),
|
||||
}
|
||||
} else {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn peek_blob(
|
||||
&self,
|
||||
blob_id: &BlobIdentifier,
|
||||
@ -335,27 +404,39 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn put_kzg_verified_blob(
|
||||
pub fn put_kzg_verified_blobs(
|
||||
&self,
|
||||
kzg_verified_blob: KzgVerifiedBlob<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
kzg_verified_blobs: &[KzgVerifiedBlob<T::EthSpec>],
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
for blob in kzg_verified_blobs {
|
||||
let blob_block_root = blob.block_root();
|
||||
if blob_block_root != block_root {
|
||||
return Err(AvailabilityCheckError::BlockBlobRootMismatch {
|
||||
block_root,
|
||||
blob_block_root,
|
||||
});
|
||||
}
|
||||
}
|
||||
let mut write_lock = self.critical.write();
|
||||
let block_root = kzg_verified_blob.block_root();
|
||||
|
||||
let availability = if let Some(mut pending_components) =
|
||||
write_lock.pop_pending_components(block_root, &self.overflow_store)?
|
||||
{
|
||||
let blob_index = kzg_verified_blob.blob_index();
|
||||
*pending_components
|
||||
.verified_blobs
|
||||
.get_mut(blob_index as usize)
|
||||
.ok_or(AvailabilityCheckError::BlobIndexInvalid(blob_index))? =
|
||||
Some(kzg_verified_blob);
|
||||
for kzg_verified_blob in kzg_verified_blobs {
|
||||
let blob_index = kzg_verified_blob.blob_index() as usize;
|
||||
if let Some(maybe_verified_blob) =
|
||||
pending_components.verified_blobs.get_mut(blob_index)
|
||||
{
|
||||
*maybe_verified_blob = Some(kzg_verified_blob.clone())
|
||||
} else {
|
||||
return Err(AvailabilityCheckError::BlobIndexInvalid(blob_index as u64));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(executed_block) = pending_components.executed_block.take() {
|
||||
self.check_block_availability_maybe_cache(
|
||||
write_lock,
|
||||
block_root,
|
||||
pending_components,
|
||||
executed_block,
|
||||
)?
|
||||
@ -365,17 +446,17 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
pending_components,
|
||||
&self.overflow_store,
|
||||
)?;
|
||||
Availability::PendingBlock(block_root)
|
||||
Availability::MissingComponents(block_root)
|
||||
}
|
||||
} else {
|
||||
// not in memory or store -> put new in memory
|
||||
let new_pending_components = PendingComponents::new_from_blob(kzg_verified_blob);
|
||||
let new_pending_components = PendingComponents::new_from_blobs(kzg_verified_blobs);
|
||||
write_lock.put_pending_components(
|
||||
block_root,
|
||||
new_pending_components,
|
||||
&self.overflow_store,
|
||||
)?;
|
||||
Availability::PendingBlock(block_root)
|
||||
Availability::MissingComponents(block_root)
|
||||
};
|
||||
|
||||
Ok(availability)
|
||||
@ -394,7 +475,6 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
match write_lock.pop_pending_components(block_root, &self.overflow_store)? {
|
||||
Some(pending_components) => self.check_block_availability_maybe_cache(
|
||||
write_lock,
|
||||
block_root,
|
||||
pending_components,
|
||||
executed_block,
|
||||
)?,
|
||||
@ -422,7 +502,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
new_pending_components,
|
||||
&self.overflow_store,
|
||||
)?;
|
||||
Availability::PendingBlobs(all_blob_ids)
|
||||
Availability::MissingComponents(block_root)
|
||||
}
|
||||
};
|
||||
|
||||
@ -435,11 +515,10 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
/// Returns an error if there was an error when matching the block commitments against blob commitments.
|
||||
///
|
||||
/// Returns `Ok(Availability::Available(_))` if all blobs for the block are present in cache.
|
||||
/// Returns `Ok(Availability::PendingBlobs(_))` if all corresponding blobs have not been received in the cache.
|
||||
/// Returns `Ok(Availability::MissingComponents(_))` if all corresponding blobs have not been received in the cache.
|
||||
fn check_block_availability_maybe_cache(
|
||||
&self,
|
||||
mut write_lock: RwLockWriteGuard<Critical<T>>,
|
||||
block_root: Hash256,
|
||||
mut pending_components: PendingComponents<T::EthSpec>,
|
||||
executed_block: AvailabilityPendingExecutedBlock<T::EthSpec>,
|
||||
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
|
||||
@ -451,11 +530,12 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
payload_verification_outcome,
|
||||
} = executed_block;
|
||||
|
||||
let verified_blobs = Vec::from(pending_components.verified_blobs)
|
||||
let Some(verified_blobs) = Vec::from(pending_components.verified_blobs)
|
||||
.into_iter()
|
||||
.take(num_blobs_expected)
|
||||
.map(|maybe_blob| maybe_blob.ok_or(AvailabilityCheckError::MissingBlobs))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
.collect::<Option<Vec<_>>>() else {
|
||||
return Ok(Availability::MissingComponents(import_data.block_root))
|
||||
};
|
||||
|
||||
let available_block = block.make_available(verified_blobs)?;
|
||||
Ok(Availability::Available(Box::new(
|
||||
@ -466,14 +546,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
),
|
||||
)))
|
||||
} else {
|
||||
let missing_blob_ids = executed_block.get_filtered_blob_ids(|index| {
|
||||
pending_components
|
||||
.verified_blobs
|
||||
.get(index as usize)
|
||||
.map(|maybe_blob| maybe_blob.is_none())
|
||||
.unwrap_or(true)
|
||||
});
|
||||
|
||||
let block_root = executed_block.import_data.block_root;
|
||||
let _ = pending_components.executed_block.insert(executed_block);
|
||||
write_lock.put_pending_components(
|
||||
block_root,
|
||||
@ -481,7 +554,7 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
&self.overflow_store,
|
||||
)?;
|
||||
|
||||
Ok(Availability::PendingBlobs(missing_blob_ids))
|
||||
Ok(Availability::MissingComponents(block_root))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1080,7 +1153,7 @@ mod test {
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"should be pending blobs"
|
||||
);
|
||||
assert_eq!(
|
||||
@ -1100,16 +1173,18 @@ mod test {
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.expect("kzg should exist");
|
||||
let mut kzg_verified_blobs = Vec::new();
|
||||
for (blob_index, gossip_blob) in blobs.into_iter().enumerate() {
|
||||
let kzg_verified_blob =
|
||||
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify");
|
||||
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
|
||||
.expect("kzg should verify");
|
||||
kzg_verified_blobs.push(kzg_verified_blob);
|
||||
let availability = cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
if blob_index == blobs_expected - 1 {
|
||||
assert!(matches!(availability, Availability::Available(_)));
|
||||
} else {
|
||||
assert!(matches!(availability, Availability::PendingBlobs(_)));
|
||||
assert!(matches!(availability, Availability::MissingComponents(_)));
|
||||
assert_eq!(cache.critical.read().in_memory.len(), 1);
|
||||
}
|
||||
}
|
||||
@ -1126,15 +1201,17 @@ mod test {
|
||||
"should have expected number of blobs"
|
||||
);
|
||||
let root = pending_block.import_data.block_root;
|
||||
let mut kzg_verified_blobs = vec![];
|
||||
for gossip_blob in blobs {
|
||||
let kzg_verified_blob =
|
||||
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify");
|
||||
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
|
||||
.expect("kzg should verify");
|
||||
kzg_verified_blobs.push(kzg_verified_blob);
|
||||
let availability = cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
assert_eq!(
|
||||
availability,
|
||||
Availability::PendingBlock(root),
|
||||
Availability::MissingComponents(root),
|
||||
"should be pending block"
|
||||
);
|
||||
assert_eq!(cache.critical.read().in_memory.len(), 1);
|
||||
@ -1270,11 +1347,13 @@ mod test {
|
||||
|
||||
let blobs_0 = pending_blobs.pop_front().expect("should have blobs");
|
||||
let expected_blobs = blobs_0.len();
|
||||
let mut kzg_verified_blobs = vec![];
|
||||
for (blob_index, gossip_blob) in blobs_0.into_iter().enumerate() {
|
||||
let kzg_verified_blob =
|
||||
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify");
|
||||
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
|
||||
.expect("kzg should verify");
|
||||
kzg_verified_blobs.push(kzg_verified_blob);
|
||||
let availability = cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(roots[0], kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
if blob_index == expected_blobs - 1 {
|
||||
assert!(matches!(availability, Availability::Available(_)));
|
||||
@ -1284,7 +1363,7 @@ mod test {
|
||||
cache.critical.read().in_memory.peek(&roots[0]).is_some(),
|
||||
"first block should be in memory"
|
||||
);
|
||||
assert!(matches!(availability, Availability::PendingBlobs(_)));
|
||||
assert!(matches!(availability, Availability::MissingComponents(_)));
|
||||
}
|
||||
}
|
||||
assert_eq!(
|
||||
@ -1360,13 +1439,17 @@ mod test {
|
||||
|
||||
for _ in 0..(n_epochs * capacity) {
|
||||
let pending_block = pending_blocks.pop_front().expect("should have block");
|
||||
let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs");
|
||||
let block_root = pending_block.block.as_block().canonical_root();
|
||||
let expected_blobs = pending_block.num_blobs_expected();
|
||||
if expected_blobs > 1 {
|
||||
// might as well add a blob too
|
||||
let mut pending_blobs = pending_blobs.pop_front().expect("should have blobs");
|
||||
let one_blob = pending_blobs.pop().expect("should have at least one blob");
|
||||
let kzg_verified_blob =
|
||||
verify_kzg_for_blob(one_blob, kzg.as_ref()).expect("kzg should verify");
|
||||
let one_blob = pending_block_blobs
|
||||
.pop()
|
||||
.expect("should have at least one blob");
|
||||
let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref())
|
||||
.expect("kzg should verify");
|
||||
let kzg_verified_blobs = vec![kzg_verified_blob];
|
||||
// generate random boolean
|
||||
let block_first = (rand::random::<usize>() % 2) == 0;
|
||||
if block_first {
|
||||
@ -1374,43 +1457,41 @@ mod test {
|
||||
.put_pending_executed_block(pending_block)
|
||||
.expect("should put block");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"should have pending blobs"
|
||||
);
|
||||
let availability = cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"availabilty should be pending blobs: {:?}",
|
||||
availability
|
||||
);
|
||||
} else {
|
||||
let availability = cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
let root = pending_block.block.as_block().canonical_root();
|
||||
assert_eq!(
|
||||
availability,
|
||||
Availability::PendingBlock(root),
|
||||
Availability::MissingComponents(root),
|
||||
"should be pending block"
|
||||
);
|
||||
let availability = cache
|
||||
.put_pending_executed_block(pending_block)
|
||||
.expect("should put block");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"should have pending blobs"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// still need to pop front so the blob count is correct
|
||||
pending_blobs.pop_front().expect("should have blobs");
|
||||
let availability = cache
|
||||
.put_pending_executed_block(pending_block)
|
||||
.expect("should put block");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"should be pending blobs"
|
||||
);
|
||||
}
|
||||
@ -1511,63 +1592,63 @@ mod test {
|
||||
let mut remaining_blobs = HashMap::new();
|
||||
for _ in 0..(n_epochs * capacity) {
|
||||
let pending_block = pending_blocks.pop_front().expect("should have block");
|
||||
let mut pending_block_blobs = pending_blobs.pop_front().expect("should have blobs");
|
||||
let block_root = pending_block.block.as_block().canonical_root();
|
||||
let expected_blobs = pending_block.num_blobs_expected();
|
||||
if expected_blobs > 1 {
|
||||
// might as well add a blob too
|
||||
let mut pending_blobs = pending_blobs.pop_front().expect("should have blobs");
|
||||
let one_blob = pending_blobs.pop().expect("should have at least one blob");
|
||||
let kzg_verified_blob =
|
||||
verify_kzg_for_blob(one_blob, kzg.as_ref()).expect("kzg should verify");
|
||||
let one_blob = pending_block_blobs
|
||||
.pop()
|
||||
.expect("should have at least one blob");
|
||||
let kzg_verified_blob = verify_kzg_for_blob(one_blob.to_blob(), kzg.as_ref())
|
||||
.expect("kzg should verify");
|
||||
let kzg_verified_blobs = vec![kzg_verified_blob];
|
||||
// generate random boolean
|
||||
let block_first = (rand::random::<usize>() % 2) == 0;
|
||||
remaining_blobs.insert(block_root, pending_blobs);
|
||||
if block_first {
|
||||
let availability = cache
|
||||
.put_pending_executed_block(pending_block)
|
||||
.expect("should put block");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"should have pending blobs"
|
||||
);
|
||||
let availability = cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"availabilty should be pending blobs: {:?}",
|
||||
availability
|
||||
);
|
||||
} else {
|
||||
let availability = cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(block_root, kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
let root = pending_block.block.as_block().canonical_root();
|
||||
assert_eq!(
|
||||
availability,
|
||||
Availability::PendingBlock(root),
|
||||
Availability::MissingComponents(root),
|
||||
"should be pending block"
|
||||
);
|
||||
let availability = cache
|
||||
.put_pending_executed_block(pending_block)
|
||||
.expect("should put block");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"should have pending blobs"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// still need to pop front so the blob count is correct
|
||||
let pending_blobs = pending_blobs.pop_front().expect("should have blobs");
|
||||
remaining_blobs.insert(block_root, pending_blobs);
|
||||
let availability = cache
|
||||
.put_pending_executed_block(pending_block)
|
||||
.expect("should put block");
|
||||
assert!(
|
||||
matches!(availability, Availability::PendingBlobs(_)),
|
||||
matches!(availability, Availability::MissingComponents(_)),
|
||||
"should be pending blobs"
|
||||
);
|
||||
}
|
||||
remaining_blobs.insert(block_root, pending_block_blobs);
|
||||
}
|
||||
|
||||
// now we should have a full cache spanning multiple epochs
|
||||
@ -1626,18 +1707,20 @@ mod test {
|
||||
);
|
||||
|
||||
// now lets insert the remaining blobs until the cache is empty
|
||||
for (_, blobs) in remaining_blobs {
|
||||
for (root, blobs) in remaining_blobs {
|
||||
let additional_blobs = blobs.len();
|
||||
let mut kzg_verified_blobs = vec![];
|
||||
for (i, gossip_blob) in blobs.into_iter().enumerate() {
|
||||
let kzg_verified_blob =
|
||||
verify_kzg_for_blob(gossip_blob, kzg.as_ref()).expect("kzg should verify");
|
||||
let kzg_verified_blob = verify_kzg_for_blob(gossip_blob.to_blob(), kzg.as_ref())
|
||||
.expect("kzg should verify");
|
||||
kzg_verified_blobs.push(kzg_verified_blob);
|
||||
let availability = recovered_cache
|
||||
.put_kzg_verified_blob(kzg_verified_blob)
|
||||
.put_kzg_verified_blobs(root, kzg_verified_blobs.as_slice())
|
||||
.expect("should put blob");
|
||||
if i == additional_blobs - 1 {
|
||||
assert!(matches!(availability, Availability::Available(_)))
|
||||
} else {
|
||||
assert!(matches!(availability, Availability::PendingBlobs(_)));
|
||||
assert!(matches!(availability, Availability::MissingComponents(_)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,9 @@ pub use self::historical_blocks::HistoricalBlockError;
|
||||
pub use attestation_verification::Error as AttestationError;
|
||||
pub use beacon_fork_choice_store::{BeaconForkChoiceStore, Error as ForkChoiceStoreError};
|
||||
pub use block_verification::{
|
||||
get_block_root, BlockError, ExecutedBlock, ExecutionPayloadError, GossipVerifiedBlock,
|
||||
get_block_root, AvailabilityPendingExecutedBlock, BlockError, ExecutedBlock,
|
||||
ExecutionPayloadError, GossipVerifiedBlock, IntoExecutionPendingBlock,
|
||||
PayloadVerificationOutcome, PayloadVerificationStatus,
|
||||
};
|
||||
pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||
|
@ -379,7 +379,7 @@ mod tests {
|
||||
|
||||
// Try adding an out of bounds index
|
||||
let invalid_index = E::max_blobs_per_block() as u64;
|
||||
let sidecar_d = get_blob_sidecar(0, block_root_a, 4);
|
||||
let sidecar_d = get_blob_sidecar(0, block_root_a, invalid_index);
|
||||
assert_eq!(
|
||||
cache.observe_sidecar(&sidecar_d),
|
||||
Err(Error::InvalidBlobIndex(invalid_index)),
|
||||
|
@ -63,7 +63,7 @@ use types::{typenum::U4294967296, *};
|
||||
// 4th September 2019
|
||||
pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
|
||||
// Environment variable to read if `fork_from_env` feature is enabled.
|
||||
const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
|
||||
pub const FORK_NAME_ENV_VAR: &str = "FORK_NAME";
|
||||
|
||||
// Default target aggregators to set during testing, this ensures an aggregator at each slot.
|
||||
//
|
||||
|
@ -133,10 +133,13 @@ async fn produces_attestations() {
|
||||
assert_eq!(data.target.root, target_root, "bad target root");
|
||||
|
||||
let block_wrapper: BlockWrapper<MainnetEthSpec> = Arc::new(block.clone()).into();
|
||||
let available_block = chain
|
||||
let beacon_chain::blob_verification::MaybeAvailableBlock::Available(available_block) = chain
|
||||
.data_availability_checker
|
||||
.try_check_availability(block_wrapper)
|
||||
.unwrap();
|
||||
.check_availability(block_wrapper)
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("block should be available")
|
||||
};
|
||||
|
||||
let early_attestation = {
|
||||
let proto_block = chain
|
||||
@ -200,11 +203,13 @@ async fn early_attester_cache_old_request() {
|
||||
.unwrap();
|
||||
|
||||
let block_wrapper: BlockWrapper<MainnetEthSpec> = head.beacon_block.clone().into();
|
||||
let available_block = harness
|
||||
.chain
|
||||
let beacon_chain::blob_verification::MaybeAvailableBlock::Available(available_block) = harness.chain
|
||||
.data_availability_checker
|
||||
.try_check_availability(block_wrapper)
|
||||
.unwrap();
|
||||
.check_availability(block_wrapper)
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("block should be available")
|
||||
};
|
||||
|
||||
harness
|
||||
.chain
|
||||
|
@ -6,11 +6,11 @@ edition = "2021"
|
||||
|
||||
[dev-dependencies]
|
||||
serde_yaml = "0.8.13"
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
tokio = "1.14.0"
|
||||
|
||||
[dependencies]
|
||||
state_processing = { path = "../../consensus/state_processing" }
|
||||
beacon_chain = { path = "../beacon_chain" }
|
||||
store = { path = "../store" }
|
||||
network = { path = "../network" }
|
||||
|
@ -28,6 +28,7 @@ use network::{NetworkConfig, NetworkSenders, NetworkService};
|
||||
use slasher::Slasher;
|
||||
use slasher_service::SlasherService;
|
||||
use slog::{debug, info, warn, Logger};
|
||||
use state_processing::per_slot_processing;
|
||||
use std::net::TcpListener;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
@ -346,10 +347,23 @@ where
|
||||
None
|
||||
};
|
||||
|
||||
debug!(context.log(), "Downloading finalized block");
|
||||
// Find a suitable finalized block on an epoch boundary.
|
||||
let mut block = remote
|
||||
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec)
|
||||
debug!(
|
||||
context.log(),
|
||||
"Downloading finalized state";
|
||||
);
|
||||
let mut state = remote
|
||||
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Finalized, &spec)
|
||||
.await
|
||||
.map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))?
|
||||
.ok_or_else(|| "Checkpoint state missing from remote".to_string())?;
|
||||
|
||||
debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot());
|
||||
|
||||
let finalized_block_slot = state.latest_block_header().slot;
|
||||
|
||||
debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot);
|
||||
let block = remote
|
||||
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(finalized_block_slot), &spec)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
ApiError::InvalidSsz(e) => format!(
|
||||
@ -363,55 +377,15 @@ where
|
||||
|
||||
debug!(context.log(), "Downloaded finalized block");
|
||||
|
||||
let mut block_slot = block.slot();
|
||||
|
||||
while block.slot() % slots_per_epoch != 0 {
|
||||
block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch;
|
||||
|
||||
debug!(
|
||||
context.log(),
|
||||
"Searching for aligned checkpoint block";
|
||||
"block_slot" => block_slot
|
||||
);
|
||||
|
||||
if let Some(found_block) = remote
|
||||
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(block_slot), &spec)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
format!("Error fetching block at slot {}: {:?}", block_slot, e)
|
||||
})?
|
||||
{
|
||||
block = found_block;
|
||||
}
|
||||
let epoch_boundary_slot = state.slot() % slots_per_epoch;
|
||||
if epoch_boundary_slot != 0 {
|
||||
debug!(context.log(), "Advancing state to epoch boundary"; "state_slot" => state.slot(), "epoch_boundary_slot" => epoch_boundary_slot);
|
||||
}
|
||||
|
||||
debug!(
|
||||
context.log(),
|
||||
"Downloaded aligned finalized block";
|
||||
"block_root" => ?block.canonical_root(),
|
||||
"block_slot" => block.slot(),
|
||||
);
|
||||
|
||||
let state_root = block.state_root();
|
||||
debug!(
|
||||
context.log(),
|
||||
"Downloading finalized state";
|
||||
"state_root" => ?state_root
|
||||
);
|
||||
let state = remote
|
||||
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Root(state_root), &spec)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Error loading checkpoint state from remote {:?}: {:?}",
|
||||
state_root, e
|
||||
)
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
format!("Checkpoint state missing from remote: {:?}", state_root)
|
||||
})?;
|
||||
|
||||
debug!(context.log(), "Downloaded finalized state");
|
||||
while state.slot() % slots_per_epoch != 0 {
|
||||
per_slot_processing(&mut state, None, &spec)
|
||||
.map_err(|e| format!("Error advancing state: {:?}", e))?;
|
||||
}
|
||||
|
||||
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
|
||||
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
|
||||
@ -419,9 +393,9 @@ where
|
||||
info!(
|
||||
context.log(),
|
||||
"Loaded checkpoint block and state";
|
||||
"slot" => block.slot(),
|
||||
"block_slot" => block.slot(),
|
||||
"state_slot" => state.slot(),
|
||||
"block_root" => ?block.canonical_root(),
|
||||
"state_root" => ?state_root,
|
||||
);
|
||||
|
||||
let service =
|
||||
|
@ -586,7 +586,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
ForkName::Deneb => {
|
||||
// get random number between 0 and Max Blobs
|
||||
let num_blobs = rand::random::<usize>() % T::max_blobs_per_block();
|
||||
let (bundle, transactions) = self.generate_random_blobs(num_blobs)?;
|
||||
let kzg = self.kzg.as_ref().ok_or("kzg not initialized")?;
|
||||
let (bundle, transactions) = generate_random_blobs(num_blobs, kzg)?;
|
||||
for tx in Vec::from(transactions) {
|
||||
execution_payload
|
||||
.transactions_mut()
|
||||
@ -626,88 +627,82 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||
payload_id: id.map(Into::into),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_random_blobs(
|
||||
&self,
|
||||
n_blobs: usize,
|
||||
) -> Result<(BlobsBundleV1<T>, Transactions<T>), String> {
|
||||
let mut bundle = BlobsBundleV1::<T>::default();
|
||||
let mut transactions = vec![];
|
||||
for blob_index in 0..n_blobs {
|
||||
// fill a vector with random bytes
|
||||
let mut blob_bytes = [0u8; BYTES_PER_BLOB];
|
||||
rand::thread_rng().fill_bytes(&mut blob_bytes);
|
||||
// Ensure that the blob is canonical by ensuring that
|
||||
// each field element contained in the blob is < BLS_MODULUS
|
||||
for i in 0..FIELD_ELEMENTS_PER_BLOB {
|
||||
blob_bytes[i * BYTES_PER_FIELD_ELEMENT + BYTES_PER_FIELD_ELEMENT - 1] = 0;
|
||||
}
|
||||
|
||||
let blob = Blob::<T>::new(Vec::from(blob_bytes))
|
||||
.map_err(|e| format!("error constructing random blob: {:?}", e))?;
|
||||
|
||||
let commitment = self
|
||||
.kzg
|
||||
.as_ref()
|
||||
.ok_or("kzg not initialized")?
|
||||
.blob_to_kzg_commitment(blob_bytes.into())
|
||||
.map_err(|e| format!("error computing kzg commitment: {:?}", e))?;
|
||||
|
||||
let proof = self
|
||||
.kzg
|
||||
.as_ref()
|
||||
.ok_or("kzg not initialized")?
|
||||
.compute_blob_kzg_proof(blob_bytes.into(), commitment)
|
||||
.map_err(|e| format!("error computing kzg proof: {:?}", e))?;
|
||||
|
||||
let versioned_hash = commitment.calculate_versioned_hash();
|
||||
|
||||
let blob_transaction = BlobTransaction {
|
||||
chain_id: Default::default(),
|
||||
nonce: 0,
|
||||
max_priority_fee_per_gas: Default::default(),
|
||||
max_fee_per_gas: Default::default(),
|
||||
gas: 100000,
|
||||
to: None,
|
||||
value: Default::default(),
|
||||
data: Default::default(),
|
||||
access_list: Default::default(),
|
||||
max_fee_per_data_gas: Default::default(),
|
||||
versioned_hashes: vec![versioned_hash].into(),
|
||||
};
|
||||
let bad_signature = EcdsaSignature {
|
||||
y_parity: false,
|
||||
r: Uint256::from(0),
|
||||
s: Uint256::from(0),
|
||||
};
|
||||
let signed_blob_transaction = SignedBlobTransaction {
|
||||
message: blob_transaction,
|
||||
signature: bad_signature,
|
||||
};
|
||||
// calculate transaction bytes
|
||||
let tx_bytes = [BLOB_TX_TYPE]
|
||||
.into_iter()
|
||||
.chain(signed_blob_transaction.as_ssz_bytes().into_iter())
|
||||
.collect::<Vec<_>>();
|
||||
let tx = Transaction::<T::MaxBytesPerTransaction>::from(tx_bytes);
|
||||
|
||||
transactions.push(tx);
|
||||
bundle
|
||||
.blobs
|
||||
.push(blob)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
bundle
|
||||
.commitments
|
||||
.push(commitment)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
bundle
|
||||
.proofs
|
||||
.push(proof)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
pub fn generate_random_blobs<T: EthSpec>(
|
||||
n_blobs: usize,
|
||||
kzg: &Kzg,
|
||||
) -> Result<(BlobsBundleV1<T>, Transactions<T>), String> {
|
||||
let mut bundle = BlobsBundleV1::<T>::default();
|
||||
let mut transactions = vec![];
|
||||
for blob_index in 0..n_blobs {
|
||||
// fill a vector with random bytes
|
||||
let mut blob_bytes = [0u8; BYTES_PER_BLOB];
|
||||
rand::thread_rng().fill_bytes(&mut blob_bytes);
|
||||
// Ensure that the blob is canonical by ensuring that
|
||||
// each field element contained in the blob is < BLS_MODULUS
|
||||
for i in 0..FIELD_ELEMENTS_PER_BLOB {
|
||||
blob_bytes[i * BYTES_PER_FIELD_ELEMENT + BYTES_PER_FIELD_ELEMENT - 1] = 0;
|
||||
}
|
||||
|
||||
Ok((bundle, transactions.into()))
|
||||
let blob = Blob::<T>::new(Vec::from(blob_bytes))
|
||||
.map_err(|e| format!("error constructing random blob: {:?}", e))?;
|
||||
|
||||
let commitment = kzg
|
||||
.blob_to_kzg_commitment(blob_bytes.into())
|
||||
.map_err(|e| format!("error computing kzg commitment: {:?}", e))?;
|
||||
|
||||
let proof = kzg
|
||||
.compute_blob_kzg_proof(blob_bytes.into(), commitment)
|
||||
.map_err(|e| format!("error computing kzg proof: {:?}", e))?;
|
||||
|
||||
let versioned_hash = commitment.calculate_versioned_hash();
|
||||
|
||||
let blob_transaction = BlobTransaction {
|
||||
chain_id: Default::default(),
|
||||
nonce: 0,
|
||||
max_priority_fee_per_gas: Default::default(),
|
||||
max_fee_per_gas: Default::default(),
|
||||
gas: 100000,
|
||||
to: None,
|
||||
value: Default::default(),
|
||||
data: Default::default(),
|
||||
access_list: Default::default(),
|
||||
max_fee_per_data_gas: Default::default(),
|
||||
versioned_hashes: vec![versioned_hash].into(),
|
||||
};
|
||||
let bad_signature = EcdsaSignature {
|
||||
y_parity: false,
|
||||
r: Uint256::from(0),
|
||||
s: Uint256::from(0),
|
||||
};
|
||||
let signed_blob_transaction = SignedBlobTransaction {
|
||||
message: blob_transaction,
|
||||
signature: bad_signature,
|
||||
};
|
||||
// calculate transaction bytes
|
||||
let tx_bytes = [BLOB_TX_TYPE]
|
||||
.into_iter()
|
||||
.chain(signed_blob_transaction.as_ssz_bytes().into_iter())
|
||||
.collect::<Vec<_>>();
|
||||
let tx = Transaction::<T::MaxBytesPerTransaction>::from(tx_bytes);
|
||||
|
||||
transactions.push(tx);
|
||||
bundle
|
||||
.blobs
|
||||
.push(blob)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
bundle
|
||||
.commitments
|
||||
.push(commitment)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
bundle
|
||||
.proofs
|
||||
.push(proof)
|
||||
.map_err(|_| format!("blobs are full, blob index: {:?}", blob_index))?;
|
||||
}
|
||||
|
||||
Ok((bundle, transactions.into()))
|
||||
}
|
||||
|
||||
fn payload_id_from_u64(n: u64) -> PayloadId {
|
||||
|
@ -24,7 +24,9 @@ use types::{EthSpec, ExecutionBlockHash, Uint256};
|
||||
use warp::{http::StatusCode, Filter, Rejection};
|
||||
|
||||
use crate::EngineCapabilities;
|
||||
pub use execution_block_generator::{generate_pow_block, Block, ExecutionBlockGenerator};
|
||||
pub use execution_block_generator::{
|
||||
generate_pow_block, generate_random_blobs, Block, ExecutionBlockGenerator,
|
||||
};
|
||||
pub use hook::Hook;
|
||||
pub use mock_builder::{Context as MockBuilderContext, MockBuilder, Operation, TestingBuilder};
|
||||
pub use mock_execution_layer::MockExecutionLayer;
|
||||
|
@ -12,6 +12,7 @@ use slog::{debug, error, info, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::FixedVector;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
@ -77,8 +78,11 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
||||
PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))),
|
||||
)?;
|
||||
}
|
||||
let blobs = signed_blobs.into_iter().map(|blob| blob.message).collect();
|
||||
BlockWrapper::BlockAndBlobs(block, blobs)
|
||||
let blobs = signed_blobs
|
||||
.into_iter()
|
||||
.map(|blob| Some(blob.message))
|
||||
.collect::<Vec<_>>();
|
||||
BlockWrapper::BlockAndBlobs(block, FixedVector::from(blobs))
|
||||
} else {
|
||||
block.into()
|
||||
}
|
||||
@ -136,17 +140,8 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) => {
|
||||
let msg = format!("Missing block with root {:?}", block_root);
|
||||
error!(
|
||||
log,
|
||||
"Invalid block provided to HTTP API";
|
||||
"reason" => &msg
|
||||
);
|
||||
Err(warp_utils::reject::broadcast_without_import(msg))
|
||||
}
|
||||
Ok(AvailabilityProcessingStatus::PendingBlobs(blob_ids)) => {
|
||||
let msg = format!("Missing blobs {:?}", blob_ids);
|
||||
Ok(AvailabilityProcessingStatus::MissingComponents(_, block_root)) => {
|
||||
let msg = format!("Missing parts of block with root {:?}", block_root);
|
||||
error!(
|
||||
log,
|
||||
"Invalid block provided to HTTP API";
|
||||
|
@ -19,7 +19,7 @@ store = { path = "../store" }
|
||||
lighthouse_network = { path = "../lighthouse_network" }
|
||||
types = { path = "../../consensus/types" }
|
||||
slot_clock = { path = "../../common/slot_clock" }
|
||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||
slog = { version = "2.5.2", features = ["max_level_trace", "nested-values"] }
|
||||
hex = "0.4.2"
|
||||
ethereum_ssz = "0.5.0"
|
||||
ssz_types = "0.5.0"
|
||||
@ -46,4 +46,8 @@ derivative = "2.2.0"
|
||||
delay_map = "0.3.0"
|
||||
ethereum-types = { version = "0.14.1", optional = true }
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
execution_layer = { path = "../execution_layer" }
|
||||
execution_layer = { path = "../execution_layer" }
|
||||
|
||||
[features]
|
||||
spec-minimal = ["beacon_chain/spec-minimal"]
|
||||
fork_from_env = ["beacon_chain/fork_from_env"]
|
||||
|
@ -65,6 +65,7 @@ use std::{cmp, collections::HashSet};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::mpsc::error::TrySendError;
|
||||
use types::blob_sidecar::FixedBlobSidecarList;
|
||||
use types::{
|
||||
Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate,
|
||||
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecar,
|
||||
@ -121,9 +122,9 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
|
||||
/// before we start dropping them.
|
||||
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that
|
||||
/// The maximum number of queued `SignedBlobSidecar` objects received on gossip that
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024;
|
||||
const MAX_GOSSIP_BLOB_QUEUE_LEN: usize = 1_024;
|
||||
|
||||
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
|
||||
/// within acceptable clock disparity) that will be queued before we start dropping them.
|
||||
@ -164,6 +165,7 @@ const MAX_SYNC_CONTRIBUTION_QUEUE_LEN: usize = 1024;
|
||||
/// The maximum number of queued `SignedBeaconBlock` objects received from the network RPC that
|
||||
/// will be stored before we start dropping them.
|
||||
const MAX_RPC_BLOCK_QUEUE_LEN: usize = 1_024;
|
||||
const MAX_RPC_BLOB_QUEUE_LEN: usize = 1_024 * 4;
|
||||
|
||||
/// The maximum number of queued `Vec<SignedBeaconBlock>` objects received during syncing that will
|
||||
/// be stored before we start dropping them.
|
||||
@ -233,6 +235,7 @@ pub const GOSSIP_SYNC_CONTRIBUTION: &str = "gossip_sync_contribution";
|
||||
pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_update";
|
||||
pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
|
||||
pub const RPC_BLOCK: &str = "rpc_block";
|
||||
pub const RPC_BLOB: &str = "rpc_blob";
|
||||
pub const CHAIN_SEGMENT: &str = "chain_segment";
|
||||
pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill";
|
||||
pub const STATUS_PROCESSING: &str = "status_processing";
|
||||
@ -628,6 +631,23 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rpc_blobs(
|
||||
block_root: Hash256,
|
||||
blobs: FixedBlobSidecarList<T::EthSpec>,
|
||||
seen_timestamp: Duration,
|
||||
process_type: BlockProcessType,
|
||||
) -> Self {
|
||||
Self {
|
||||
drop_during_sync: false,
|
||||
work: Work::RpcBlobs {
|
||||
block_root,
|
||||
blobs,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new work event to import `blocks` as a beacon chain segment.
|
||||
pub fn chain_segment(
|
||||
process_id: ChainSegmentProcessId,
|
||||
@ -927,6 +947,12 @@ pub enum Work<T: BeaconChainTypes> {
|
||||
process_type: BlockProcessType,
|
||||
should_process: bool,
|
||||
},
|
||||
RpcBlobs {
|
||||
block_root: Hash256,
|
||||
blobs: FixedBlobSidecarList<T::EthSpec>,
|
||||
seen_timestamp: Duration,
|
||||
process_type: BlockProcessType,
|
||||
},
|
||||
ChainSegment {
|
||||
process_id: ChainSegmentProcessId,
|
||||
blocks: Vec<BlockWrapper<T::EthSpec>>,
|
||||
@ -986,6 +1012,7 @@ impl<T: BeaconChainTypes> Work<T> {
|
||||
Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE,
|
||||
Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE,
|
||||
Work::RpcBlock { .. } => RPC_BLOCK,
|
||||
Work::RpcBlobs { .. } => RPC_BLOB,
|
||||
Work::ChainSegment {
|
||||
process_id: ChainSegmentProcessId::BackSyncBatchId { .. },
|
||||
..
|
||||
@ -1148,11 +1175,11 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
|
||||
// Using a FIFO queue since blocks need to be imported sequentially.
|
||||
let mut rpc_block_queue = FifoQueue::new(MAX_RPC_BLOCK_QUEUE_LEN);
|
||||
let mut rpc_blob_queue = FifoQueue::new(MAX_RPC_BLOB_QUEUE_LEN);
|
||||
let mut chain_segment_queue = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
|
||||
let mut backfill_chain_segment = FifoQueue::new(MAX_CHAIN_SEGMENT_QUEUE_LEN);
|
||||
let mut gossip_block_queue = FifoQueue::new(MAX_GOSSIP_BLOCK_QUEUE_LEN);
|
||||
let mut gossip_block_and_blobs_sidecar_queue =
|
||||
FifoQueue::new(MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN);
|
||||
let mut gossip_blob_queue = FifoQueue::new(MAX_GOSSIP_BLOB_QUEUE_LEN);
|
||||
let mut delayed_block_queue = FifoQueue::new(MAX_DELAYED_BLOCK_QUEUE_LEN);
|
||||
|
||||
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
|
||||
@ -1302,6 +1329,8 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
// evolves.
|
||||
} else if let Some(item) = rpc_block_queue.pop() {
|
||||
self.spawn_worker(item, toolbox);
|
||||
} else if let Some(item) = rpc_blob_queue.pop() {
|
||||
self.spawn_worker(item, toolbox);
|
||||
// Check delayed blocks before gossip blocks, the gossip blocks might rely
|
||||
// on the delayed ones.
|
||||
} else if let Some(item) = delayed_block_queue.pop() {
|
||||
@ -1310,7 +1339,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
// required to verify some attestations.
|
||||
} else if let Some(item) = gossip_block_queue.pop() {
|
||||
self.spawn_worker(item, toolbox);
|
||||
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() {
|
||||
} else if let Some(item) = gossip_blob_queue.pop() {
|
||||
self.spawn_worker(item, toolbox);
|
||||
// Check the aggregates, *then* the unaggregates since we assume that
|
||||
// aggregates are more valuable to local validators and effectively give us
|
||||
@ -1526,7 +1555,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
gossip_block_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::GossipSignedBlobSidecar { .. } => {
|
||||
gossip_block_and_blobs_sidecar_queue.push(work, work_id, &self.log)
|
||||
gossip_blob_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::DelayedImportBlock { .. } => {
|
||||
delayed_block_queue.push(work, work_id, &self.log)
|
||||
@ -1551,6 +1580,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
optimistic_update_queue.push(work, work_id, &self.log)
|
||||
}
|
||||
Work::RpcBlock { .. } => rpc_block_queue.push(work, work_id, &self.log),
|
||||
Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log),
|
||||
Work::ChainSegment { ref process_id, .. } => match process_id {
|
||||
ChainSegmentProcessId::RangeBatchId { .. }
|
||||
| ChainSegmentProcessId::ParentLookup { .. } => {
|
||||
@ -1620,6 +1650,10 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
&metrics::BEACON_PROCESSOR_RPC_BLOCK_QUEUE_TOTAL,
|
||||
rpc_block_queue.len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::BEACON_PROCESSOR_RPC_BLOB_QUEUE_TOTAL,
|
||||
rpc_blob_queue.len() as i64,
|
||||
);
|
||||
metrics::set_gauge(
|
||||
&metrics::BEACON_PROCESSOR_CHAIN_SEGMENT_QUEUE_TOTAL,
|
||||
chain_segment_queue.len() as i64,
|
||||
@ -1977,6 +2011,17 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
duplicate_cache,
|
||||
should_process,
|
||||
)),
|
||||
Work::RpcBlobs {
|
||||
block_root,
|
||||
blobs,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
} => task_spawner.spawn_async(worker.process_rpc_blobs(
|
||||
block_root,
|
||||
blobs,
|
||||
seen_timestamp,
|
||||
process_type,
|
||||
)),
|
||||
/*
|
||||
* Verification for a chain segment (multiple blocks).
|
||||
*/
|
||||
|
@ -14,8 +14,7 @@ use super::MAX_SCHEDULED_WORK_QUEUE_LEN;
|
||||
use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent};
|
||||
use crate::metrics;
|
||||
use crate::sync::manager::BlockProcessType;
|
||||
use beacon_chain::blob_verification::AsBlock;
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::blob_verification::{AsBlock, BlockWrapper};
|
||||
use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
|
||||
use fnv::FnvHashMap;
|
||||
use futures::task::Poll;
|
||||
|
@ -682,19 +682,15 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
}
|
||||
Err(err) => {
|
||||
match err {
|
||||
BlobError::BlobParentUnknown {
|
||||
blob_root,
|
||||
blob_parent_root,
|
||||
} => {
|
||||
BlobError::BlobParentUnknown(blob) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Unknown parent hash for blob";
|
||||
"action" => "requesting parent",
|
||||
"blob_root" => %blob_root,
|
||||
"parent_root" => %blob_parent_root
|
||||
"blob_root" => %blob.block_root,
|
||||
"parent_root" => %blob.block_parent_root
|
||||
);
|
||||
// TODO: send blob to reprocessing queue and queue a sync request for the blob.
|
||||
todo!();
|
||||
self.send_sync_message(SyncMessage::UnknownParentBlob(peer_id, blob));
|
||||
}
|
||||
BlobError::ProposerSignatureInvalid
|
||||
| BlobError::UnknownValidator(_)
|
||||
@ -757,28 +753,42 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
// This value is not used presently, but it might come in handy for debugging.
|
||||
_seen_duration: Duration,
|
||||
) {
|
||||
// TODO
|
||||
let blob_root = verified_blob.block_root();
|
||||
let blob_slot = verified_blob.slot();
|
||||
let blob_clone = verified_blob.clone().to_blob();
|
||||
match self
|
||||
.chain
|
||||
.process_blob(verified_blob, CountUnrealized::True)
|
||||
.await
|
||||
{
|
||||
Ok(AvailabilityProcessingStatus::Imported(_hash)) => {
|
||||
todo!()
|
||||
// add to metrics
|
||||
// logging
|
||||
//TODO(sean) add metrics and logging
|
||||
self.chain.recompute_head_at_current_slot().await;
|
||||
}
|
||||
Ok(AvailabilityProcessingStatus::PendingBlobs(pending_blobs)) => self
|
||||
.send_sync_message(SyncMessage::UnknownBlobHash {
|
||||
Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_hash)) => {
|
||||
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
|
||||
slot, peer_id, block_hash,
|
||||
));
|
||||
}
|
||||
Err(err) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Invalid gossip blob";
|
||||
"outcome" => ?err,
|
||||
"block root" => ?blob_root,
|
||||
"block slot" => blob_slot,
|
||||
"blob index" => blob_clone.index,
|
||||
);
|
||||
self.gossip_penalize_peer(
|
||||
peer_id,
|
||||
pending_blobs,
|
||||
}),
|
||||
Ok(AvailabilityProcessingStatus::PendingBlock(block_hash)) => {
|
||||
self.send_sync_message(SyncMessage::UnknownBlockHash(peer_id, block_hash));
|
||||
}
|
||||
Err(_err) => {
|
||||
// handle errors
|
||||
todo!()
|
||||
PeerAction::MidToleranceError,
|
||||
"bad_gossip_blob_ssz",
|
||||
);
|
||||
trace!(
|
||||
self.log,
|
||||
"Invalid gossip blob ssz";
|
||||
"ssz" => format_args!("0x{}", hex::encode(blob_clone.as_ssz_bytes())),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -918,16 +928,13 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
|
||||
verified_block
|
||||
}
|
||||
Err(BlockError::AvailabilityCheck(_err)) => {
|
||||
todo!()
|
||||
}
|
||||
Err(BlockError::ParentUnknown(block)) => {
|
||||
debug!(
|
||||
self.log,
|
||||
"Unknown parent for gossip block";
|
||||
"root" => ?block_root
|
||||
);
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(peer_id, block, block_root));
|
||||
self.send_sync_message(SyncMessage::UnknownParentBlock(peer_id, block, block_root));
|
||||
return None;
|
||||
}
|
||||
Err(e @ BlockError::BeaconChainError(_)) => {
|
||||
@ -987,8 +994,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
);
|
||||
return None;
|
||||
}
|
||||
Err(e @ BlockError::BlobValidation(_)) => {
|
||||
warn!(self.log, "Could not verify blob for gossip. Rejecting the block and blob";
|
||||
Err(e @ BlockError::BlobValidation(_)) | Err(e @ BlockError::AvailabilityCheck(_)) => {
|
||||
warn!(self.log, "Could not verify block against known blobs in gossip. Rejecting the block";
|
||||
"error" => %e);
|
||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||
self.gossip_penalize_peer(
|
||||
@ -1132,23 +1139,13 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
|
||||
self.chain.recompute_head_at_current_slot().await;
|
||||
}
|
||||
Ok(AvailabilityProcessingStatus::PendingBlock(block_root)) => {
|
||||
// This error variant doesn't make any sense in this context
|
||||
crit!(
|
||||
self.log,
|
||||
"Internal error. Cannot get AvailabilityProcessingStatus::PendingBlock on processing block";
|
||||
"block_root" => %block_root
|
||||
);
|
||||
}
|
||||
Ok(AvailabilityProcessingStatus::PendingBlobs(pending_blobs)) => {
|
||||
Ok(AvailabilityProcessingStatus::MissingComponents(slot, block_root)) => {
|
||||
// make rpc request for blob
|
||||
self.send_sync_message(SyncMessage::UnknownBlobHash {
|
||||
self.send_sync_message(SyncMessage::MissingGossipBlockComponents(
|
||||
*slot,
|
||||
peer_id,
|
||||
pending_blobs: pending_blobs.to_vec(),
|
||||
});
|
||||
}
|
||||
Err(BlockError::AvailabilityCheck(_)) => {
|
||||
todo!()
|
||||
*block_root,
|
||||
));
|
||||
}
|
||||
Err(BlockError::ParentUnknown(block)) => {
|
||||
// Inform the sync manager to find parents for this block
|
||||
@ -1158,7 +1155,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
"Block with unknown parent attempted to be processed";
|
||||
"peer_id" => %peer_id
|
||||
);
|
||||
self.send_sync_message(SyncMessage::UnknownBlock(
|
||||
self.send_sync_message(SyncMessage::UnknownParentBlock(
|
||||
peer_id,
|
||||
block.clone(),
|
||||
block_root,
|
||||
@ -1997,7 +1994,10 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
// We don't know the block, get the sync manager to handle the block lookup, and
|
||||
// send the attestation to be scheduled for re-processing.
|
||||
self.sync_tx
|
||||
.send(SyncMessage::UnknownBlockHash(peer_id, *beacon_block_root))
|
||||
.send(SyncMessage::UnknownBlockHashFromAttestation(
|
||||
peer_id,
|
||||
*beacon_block_root,
|
||||
))
|
||||
.unwrap_or_else(|_| {
|
||||
warn!(
|
||||
self.log,
|
||||
|
@ -5,7 +5,7 @@ use crate::beacon_processor::work_reprocessing_queue::QueuedRpcBlock;
|
||||
use crate::beacon_processor::worker::FUTURE_SLOT_TOLERANCE;
|
||||
use crate::beacon_processor::DuplicateCache;
|
||||
use crate::metrics;
|
||||
use crate::sync::manager::{BlockProcessType, SyncMessage};
|
||||
use crate::sync::manager::{BlockProcessType, ResponseType, SyncMessage};
|
||||
use crate::sync::{BatchProcessResult, ChainId};
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::blob_verification::{AsBlock, MaybeAvailableBlock};
|
||||
@ -21,6 +21,7 @@ use slog::{debug, error, info, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::mpsc;
|
||||
use types::blob_sidecar::FixedBlobSidecarList;
|
||||
use types::{Epoch, Hash256};
|
||||
|
||||
/// Id associated to a batch processing request, either a sync batch or a parent lookup.
|
||||
@ -57,9 +58,10 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
) {
|
||||
if !should_process {
|
||||
// Sync handles these results
|
||||
self.send_sync_message(SyncMessage::BlockProcessed {
|
||||
self.send_sync_message(SyncMessage::BlockComponentProcessed {
|
||||
process_type,
|
||||
result: crate::sync::manager::BlockProcessResult::Ignored,
|
||||
result: crate::sync::manager::BlockProcessingResult::Ignored,
|
||||
response_type: crate::sync::manager::ResponseType::Block,
|
||||
});
|
||||
return;
|
||||
}
|
||||
@ -180,7 +182,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
||||
|
||||
// RPC block imported, regardless of process type
|
||||
//TODO(sean) handle pending availability variants
|
||||
//TODO(sean) do we need to do anything here for missing blobs? or is passing the result
|
||||
// along to sync enough?
|
||||
if let &Ok(AvailabilityProcessingStatus::Imported(hash)) = &result {
|
||||
info!(self.log, "New RPC block received"; "slot" => slot, "hash" => %hash);
|
||||
|
||||
@ -205,15 +208,50 @@ impl<T: BeaconChainTypes> Worker<T> {
|
||||
}
|
||||
}
|
||||
// Sync handles these results
|
||||
self.send_sync_message(SyncMessage::BlockProcessed {
|
||||
self.send_sync_message(SyncMessage::BlockComponentProcessed {
|
||||
process_type,
|
||||
result: result.into(),
|
||||
response_type: ResponseType::Block,
|
||||
});
|
||||
|
||||
// Drop the handle to remove the entry from the cache
|
||||
drop(handle);
|
||||
}
|
||||
|
||||
pub async fn process_rpc_blobs(
|
||||
self,
|
||||
block_root: Hash256,
|
||||
blobs: FixedBlobSidecarList<T::EthSpec>,
|
||||
_seen_timestamp: Duration,
|
||||
process_type: BlockProcessType,
|
||||
) {
|
||||
let Some(slot) = blobs.iter().find_map(|blob|{
|
||||
blob.as_ref().map(|blob| blob.slot)
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let result = self
|
||||
.chain
|
||||
.check_availability_and_maybe_import(
|
||||
slot,
|
||||
|chain| {
|
||||
chain
|
||||
.data_availability_checker
|
||||
.put_rpc_blobs(block_root, blobs)
|
||||
},
|
||||
CountUnrealized::True,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Sync handles these results
|
||||
self.send_sync_message(SyncMessage::BlockComponentProcessed {
|
||||
process_type,
|
||||
result: result.into(),
|
||||
response_type: ResponseType::Blob,
|
||||
});
|
||||
}
|
||||
|
||||
/// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync
|
||||
/// thread if more blocks are needed to process it.
|
||||
pub async fn process_chain_segment(
|
||||
|
@ -453,8 +453,8 @@ impl<T: BeaconChainTypes> Router<T> {
|
||||
}
|
||||
id @ (SyncId::BackFillBlocks { .. }
|
||||
| SyncId::RangeBlocks { .. }
|
||||
| SyncId::BackFillBlobs { .. }
|
||||
| SyncId::RangeBlobs { .. }) => id,
|
||||
| SyncId::BackFillBlockAndBlobs { .. }
|
||||
| SyncId::RangeBlockAndBlobs { .. }) => id,
|
||||
},
|
||||
RequestId::Router => unreachable!("All BBRange requests belong to sync"),
|
||||
};
|
||||
@ -512,8 +512,8 @@ impl<T: BeaconChainTypes> Router<T> {
|
||||
id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id,
|
||||
SyncId::BackFillBlocks { .. }
|
||||
| SyncId::RangeBlocks { .. }
|
||||
| SyncId::RangeBlobs { .. }
|
||||
| SyncId::BackFillBlobs { .. } => {
|
||||
| SyncId::RangeBlockAndBlobs { .. }
|
||||
| SyncId::BackFillBlockAndBlobs { .. } => {
|
||||
unreachable!("Batch syncing do not request BBRoot requests")
|
||||
}
|
||||
},
|
||||
@ -545,8 +545,8 @@ impl<T: BeaconChainTypes> Router<T> {
|
||||
id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id,
|
||||
SyncId::BackFillBlocks { .. }
|
||||
| SyncId::RangeBlocks { .. }
|
||||
| SyncId::RangeBlobs { .. }
|
||||
| SyncId::BackFillBlobs { .. } => {
|
||||
| SyncId::RangeBlockAndBlobs { .. }
|
||||
| SyncId::BackFillBlockAndBlobs { .. } => {
|
||||
unreachable!("Batch syncing does not request BBRoot requests")
|
||||
}
|
||||
},
|
||||
|
@ -151,7 +151,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
}
|
||||
|
||||
/// Return count of all currently subscribed subnets (long-lived **and** short-lived).
|
||||
#[cfg(test)]
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
pub fn subscription_count(&self) -> usize {
|
||||
if self.subscribe_all_subnets {
|
||||
self.beacon_chain.spec.attestation_subnet_count as usize
|
||||
@ -167,7 +167,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
}
|
||||
|
||||
/// Returns whether we are subscribed to a subnet for testing purposes.
|
||||
#[cfg(test)]
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
pub(crate) fn is_subscribed(
|
||||
&self,
|
||||
subnet_id: &SubnetId,
|
||||
@ -179,7 +179,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
pub(crate) fn long_lived_subscriptions(&self) -> &HashSet<SubnetId> {
|
||||
&self.long_lived_subscriptions
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ impl<T: BeaconChainTypes> SyncCommitteeService<T> {
|
||||
}
|
||||
|
||||
/// Return count of all currently subscribed subnets.
|
||||
#[cfg(test)]
|
||||
#[cfg(all(test, feature = "spec-mainnet"))]
|
||||
pub fn subscription_count(&self) -> usize {
|
||||
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
|
||||
if self.subscribe_all_subnets {
|
||||
|
@ -1,3 +1,4 @@
|
||||
#![cfg(feature = "spec-mainnet")]
|
||||
use super::*;
|
||||
use beacon_chain::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
|
84
beacon_node/network/src/sync/block_lookups/delayed_lookup.rs
Normal file
84
beacon_node/network/src/sync/block_lookups/delayed_lookup.rs
Normal file
@ -0,0 +1,84 @@
|
||||
use crate::sync::SyncMessage;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use slog::{crit, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::interval_at;
|
||||
use tokio::time::Instant;
|
||||
use types::Hash256;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum DelayedLookupMessage {
|
||||
/// A lookup for all components of a block or blob seen over gossip.
|
||||
MissingComponents(Hash256),
|
||||
}
|
||||
|
||||
/// This service is responsible for collecting lookup messages and sending them back to sync
|
||||
/// for processing after a short delay.
|
||||
///
|
||||
/// We want to delay lookups triggered from gossip for the following reasons:
|
||||
///
|
||||
/// - We only want to make one request for components we are unlikely to see on gossip. This means
|
||||
/// we don't have to repeatedly update our RPC request's state as we receive gossip components.
|
||||
///
|
||||
/// - We are likely to receive blocks/blobs over gossip more quickly than we could via an RPC request.
|
||||
///
|
||||
/// - Delaying a lookup means we are less likely to simultaneously download the same blocks/blobs
|
||||
/// over gossip and RPC.
|
||||
///
|
||||
/// - We would prefer to request peers based on whether we've seen them attest, because this gives
|
||||
/// us an idea about whether they *should* have the block/blobs we're missing. This is because a
|
||||
/// node should not attest to a block unless it has all the blobs for that block. This gives us a
|
||||
/// stronger basis for peer scoring.
|
||||
pub fn spawn_delayed_lookup_service<T: BeaconChainTypes>(
|
||||
executor: &task_executor::TaskExecutor,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
mut delayed_lookups_recv: mpsc::Receiver<DelayedLookupMessage>,
|
||||
sync_send: mpsc::UnboundedSender<SyncMessage<T::EthSpec>>,
|
||||
log: slog::Logger,
|
||||
) {
|
||||
executor.spawn(
|
||||
async move {
|
||||
let slot_duration = beacon_chain.slot_clock.slot_duration();
|
||||
let delay = beacon_chain.slot_clock.single_lookup_delay();
|
||||
let interval_start = match (
|
||||
beacon_chain.slot_clock.duration_to_next_slot(),
|
||||
beacon_chain.slot_clock.seconds_from_current_slot_start(),
|
||||
) {
|
||||
(Some(duration_to_next_slot), Some(seconds_from_current_slot_start)) => {
|
||||
let duration_until_start = if seconds_from_current_slot_start > delay {
|
||||
duration_to_next_slot + delay
|
||||
} else {
|
||||
delay - seconds_from_current_slot_start
|
||||
};
|
||||
tokio::time::Instant::now() + duration_until_start
|
||||
}
|
||||
_ => {
|
||||
crit!(log,
|
||||
"Failed to read slot clock, delayed lookup service timing will be inaccurate.\
|
||||
This may degrade performance"
|
||||
);
|
||||
Instant::now()
|
||||
}
|
||||
};
|
||||
|
||||
let mut interval = interval_at(interval_start, slot_duration);
|
||||
loop {
|
||||
interval.tick().await;
|
||||
while let Ok(msg) = delayed_lookups_recv.try_recv() {
|
||||
match msg {
|
||||
DelayedLookupMessage::MissingComponents(block_root) => {
|
||||
if let Err(e) = sync_send
|
||||
.send(SyncMessage::MissingGossipBlockComponentsDelayed(block_root))
|
||||
{
|
||||
warn!(log, "Failed to send delayed lookup message"; "error" => ?e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"delayed_lookups",
|
||||
);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,18 +1,18 @@
|
||||
use super::RootBlockTuple;
|
||||
use super::single_block_lookup::{LookupRequestError, LookupVerifyError, SingleBlockLookup};
|
||||
use super::{BlobRequestId, BlockRequestId, DownloadedBlocks, PeerShouldHave, ResponseType};
|
||||
use crate::sync::block_lookups::single_block_lookup::{State, UnknownParentComponents};
|
||||
use crate::sync::block_lookups::{RootBlobsTuple, RootBlockTuple};
|
||||
use crate::sync::{manager::SLOT_IMPORT_TOLERANCE, network_context::SyncNetworkContext};
|
||||
use beacon_chain::blob_verification::AsBlock;
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::data_availability_checker::DataAvailabilityChecker;
|
||||
use beacon_chain::BeaconChainTypes;
|
||||
use lighthouse_network::PeerId;
|
||||
use std::sync::Arc;
|
||||
use store::Hash256;
|
||||
use strum::IntoStaticStr;
|
||||
|
||||
use crate::sync::block_lookups::ForceBlockRequest;
|
||||
use crate::sync::{
|
||||
manager::{Id, SLOT_IMPORT_TOLERANCE},
|
||||
network_context::SyncNetworkContext,
|
||||
};
|
||||
|
||||
use super::single_block_lookup::{self, SingleBlockRequest};
|
||||
use types::blob_sidecar::FixedBlobSidecarList;
|
||||
use types::{BlobSidecar, SignedBeaconBlock};
|
||||
|
||||
/// How many attempts we try to find a parent of a block before we give up trying.
|
||||
pub(crate) const PARENT_FAIL_TOLERANCE: u8 = 5;
|
||||
@ -26,19 +26,22 @@ pub(crate) struct ParentLookup<T: BeaconChainTypes> {
|
||||
/// The root of the block triggering this parent request.
|
||||
chain_hash: Hash256,
|
||||
/// The blocks that have currently been downloaded.
|
||||
downloaded_blocks: Vec<RootBlockTuple<T::EthSpec>>,
|
||||
downloaded_blocks: Vec<DownloadedBlocks<T::EthSpec>>,
|
||||
/// Request of the last parent.
|
||||
current_parent_request: SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
|
||||
/// Id of the last parent request.
|
||||
current_parent_request_id: Option<Id>,
|
||||
pub current_parent_request: SingleBlockLookup<PARENT_FAIL_TOLERANCE, T>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, IntoStaticStr)]
|
||||
pub enum VerifyError {
|
||||
pub enum ParentVerifyError {
|
||||
RootMismatch,
|
||||
NoBlockReturned,
|
||||
NotEnoughBlobsReturned,
|
||||
ExtraBlocksReturned,
|
||||
UnrequestedBlobId,
|
||||
ExtraBlobsReturned,
|
||||
InvalidIndex(u64),
|
||||
PreviousFailure { parent_root: Hash256 },
|
||||
BenignFailure,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
@ -55,62 +58,143 @@ pub enum RequestError {
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||
pub fn new(
|
||||
block_root: Hash256,
|
||||
parent_root: Hash256,
|
||||
peer_id: PeerShouldHave,
|
||||
da_checker: Arc<DataAvailabilityChecker<T>>,
|
||||
) -> Self {
|
||||
let current_parent_request =
|
||||
SingleBlockLookup::new(parent_root, Some(<_>::default()), &[peer_id], da_checker);
|
||||
|
||||
Self {
|
||||
chain_hash: block_root,
|
||||
downloaded_blocks: vec![],
|
||||
current_parent_request,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn contains_block(&self, block_root: &Hash256) -> bool {
|
||||
self.downloaded_blocks
|
||||
.iter()
|
||||
.any(|(root, _d_block)| root == block_root)
|
||||
}
|
||||
|
||||
pub fn new(block_root: Hash256, block: BlockWrapper<T::EthSpec>, peer_id: PeerId) -> Self {
|
||||
let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id);
|
||||
|
||||
Self {
|
||||
chain_hash: block_root,
|
||||
downloaded_blocks: vec![(block_root, block)],
|
||||
current_parent_request,
|
||||
current_parent_request_id: None,
|
||||
}
|
||||
pub fn is_for_block(&self, block_root: Hash256) -> bool {
|
||||
self.current_parent_request.is_for_block(block_root)
|
||||
}
|
||||
|
||||
/// Attempts to request the next unknown parent. If the request fails, it should be removed.
|
||||
pub fn request_parent(
|
||||
pub fn request_parent_block(
|
||||
&mut self,
|
||||
cx: &mut SyncNetworkContext<T>,
|
||||
force_block_request: ForceBlockRequest,
|
||||
) -> Result<(), RequestError> {
|
||||
// check to make sure this request hasn't failed
|
||||
if self.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE {
|
||||
if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE {
|
||||
return Err(RequestError::ChainTooLong);
|
||||
}
|
||||
|
||||
let (peer_id, request) = self.current_parent_request.request_block()?;
|
||||
match cx.parent_lookup_request(peer_id, request, force_block_request) {
|
||||
Ok(request_id) => {
|
||||
self.current_parent_request_id = Some(request_id);
|
||||
Ok(())
|
||||
}
|
||||
Err(reason) => {
|
||||
self.current_parent_request_id = None;
|
||||
Err(RequestError::SendFailed(reason))
|
||||
if let Some((peer_id, request)) = self.current_parent_request.request_block()? {
|
||||
match cx.parent_lookup_block_request(peer_id, request) {
|
||||
Ok(request_id) => {
|
||||
self.current_parent_request.id.block_request_id = Some(request_id);
|
||||
return Ok(());
|
||||
}
|
||||
Err(reason) => {
|
||||
self.current_parent_request.id.block_request_id = None;
|
||||
return Err(RequestError::SendFailed(reason));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> {
|
||||
self.current_parent_request.check_peer_disconnected(peer_id)
|
||||
pub fn request_parent_blobs(
|
||||
&mut self,
|
||||
cx: &mut SyncNetworkContext<T>,
|
||||
) -> Result<(), RequestError> {
|
||||
// check to make sure this request hasn't failed
|
||||
if self.downloaded_blocks.len() + 1 >= PARENT_DEPTH_TOLERANCE {
|
||||
return Err(RequestError::ChainTooLong);
|
||||
}
|
||||
|
||||
if let Some((peer_id, request)) = self.current_parent_request.request_blobs()? {
|
||||
match cx.parent_lookup_blobs_request(peer_id, request) {
|
||||
Ok(request_id) => {
|
||||
self.current_parent_request.id.blob_request_id = Some(request_id);
|
||||
return Ok(());
|
||||
}
|
||||
Err(reason) => {
|
||||
self.current_parent_request.id.blob_request_id = None;
|
||||
return Err(RequestError::SendFailed(reason));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_block(&mut self, block: BlockWrapper<T::EthSpec>) {
|
||||
pub fn check_block_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> {
|
||||
self.current_parent_request
|
||||
.block_request_state
|
||||
.state
|
||||
.check_peer_disconnected(peer_id)
|
||||
}
|
||||
|
||||
pub fn check_blob_peer_disconnected(&mut self, peer_id: &PeerId) -> Result<(), ()> {
|
||||
self.current_parent_request
|
||||
.blob_request_state
|
||||
.state
|
||||
.check_peer_disconnected(peer_id)
|
||||
}
|
||||
|
||||
pub fn add_unknown_parent_block(&mut self, block: BlockWrapper<T::EthSpec>) {
|
||||
let next_parent = block.parent_root();
|
||||
let current_root = self.current_parent_request.hash;
|
||||
|
||||
// Cache the block.
|
||||
let current_root = self
|
||||
.current_parent_request
|
||||
.block_request_state
|
||||
.requested_block_root;
|
||||
self.downloaded_blocks.push((current_root, block));
|
||||
self.current_parent_request.hash = next_parent;
|
||||
self.current_parent_request.state = single_block_lookup::State::AwaitingDownload;
|
||||
self.current_parent_request_id = None;
|
||||
|
||||
// Update the block request.
|
||||
self.current_parent_request
|
||||
.block_request_state
|
||||
.requested_block_root = next_parent;
|
||||
self.current_parent_request.block_request_state.state.state = State::AwaitingDownload;
|
||||
self.current_parent_request.id.block_request_id = None;
|
||||
|
||||
// Update the blobs request.
|
||||
self.current_parent_request.blob_request_state.state.state = State::AwaitingDownload;
|
||||
self.current_parent_request.id.blob_request_id = None;
|
||||
|
||||
// Reset the unknown parent components.
|
||||
self.current_parent_request.unknown_parent_components =
|
||||
Some(UnknownParentComponents::default());
|
||||
}
|
||||
|
||||
pub fn pending_response(&self, req_id: Id) -> bool {
|
||||
self.current_parent_request_id == Some(req_id)
|
||||
pub fn add_current_request_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
|
||||
// Cache the block.
|
||||
self.current_parent_request.add_unknown_parent_block(block);
|
||||
|
||||
// Update the request.
|
||||
self.current_parent_request.id.block_request_id = None;
|
||||
}
|
||||
|
||||
pub fn add_current_request_blobs(&mut self, blobs: FixedBlobSidecarList<T::EthSpec>) {
|
||||
// Cache the blobs.
|
||||
self.current_parent_request.add_unknown_parent_blobs(blobs);
|
||||
|
||||
// Update the request.
|
||||
self.current_parent_request.id.blob_request_id = None;
|
||||
}
|
||||
|
||||
pub fn pending_block_response(&self, req_id: BlockRequestId) -> bool {
|
||||
self.current_parent_request.id.block_request_id == Some(req_id)
|
||||
}
|
||||
|
||||
pub fn pending_blob_response(&self, req_id: BlobRequestId) -> bool {
|
||||
self.current_parent_request.id.blob_request_id == Some(req_id)
|
||||
}
|
||||
|
||||
/// Consumes the parent request and destructures it into it's parts.
|
||||
@ -121,18 +205,17 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||
Hash256,
|
||||
Vec<BlockWrapper<T::EthSpec>>,
|
||||
Vec<Hash256>,
|
||||
SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
|
||||
SingleBlockLookup<PARENT_FAIL_TOLERANCE, T>,
|
||||
) {
|
||||
let ParentLookup {
|
||||
chain_hash,
|
||||
downloaded_blocks,
|
||||
current_parent_request,
|
||||
current_parent_request_id: _,
|
||||
} = self;
|
||||
let block_count = downloaded_blocks.len();
|
||||
let mut blocks = Vec::with_capacity(block_count);
|
||||
let mut hashes = Vec::with_capacity(block_count);
|
||||
for (hash, block) in downloaded_blocks {
|
||||
for (hash, block) in downloaded_blocks.into_iter() {
|
||||
blocks.push(block);
|
||||
hashes.push(hash);
|
||||
}
|
||||
@ -144,23 +227,59 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||
self.chain_hash
|
||||
}
|
||||
|
||||
pub fn download_failed(&mut self) {
|
||||
self.current_parent_request.register_failure_downloading();
|
||||
self.current_parent_request_id = None;
|
||||
pub fn block_download_failed(&mut self) {
|
||||
self.current_parent_request
|
||||
.block_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
self.current_parent_request.id.block_request_id = None;
|
||||
}
|
||||
|
||||
pub fn processing_failed(&mut self) {
|
||||
self.current_parent_request.register_failure_processing();
|
||||
self.current_parent_request_id = None;
|
||||
pub fn blob_download_failed(&mut self) {
|
||||
self.current_parent_request
|
||||
.blob_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
self.current_parent_request.id.blob_request_id = None;
|
||||
}
|
||||
|
||||
pub fn block_processing_failed(&mut self) {
|
||||
self.current_parent_request
|
||||
.block_request_state
|
||||
.state
|
||||
.register_failure_processing();
|
||||
if let Some(components) = self
|
||||
.current_parent_request
|
||||
.unknown_parent_components
|
||||
.as_mut()
|
||||
{
|
||||
components.downloaded_block = None;
|
||||
}
|
||||
self.current_parent_request.id.block_request_id = None;
|
||||
}
|
||||
|
||||
pub fn blob_processing_failed(&mut self) {
|
||||
self.current_parent_request
|
||||
.blob_request_state
|
||||
.state
|
||||
.register_failure_processing();
|
||||
if let Some(components) = self
|
||||
.current_parent_request
|
||||
.unknown_parent_components
|
||||
.as_mut()
|
||||
{
|
||||
components.downloaded_blobs = <_>::default();
|
||||
}
|
||||
self.current_parent_request.id.blob_request_id = None;
|
||||
}
|
||||
|
||||
/// Verifies that the received block is what we requested. If so, parent lookup now waits for
|
||||
/// the processing result of the block.
|
||||
pub fn verify_block(
|
||||
&mut self,
|
||||
block: Option<BlockWrapper<T::EthSpec>>,
|
||||
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
|
||||
) -> Result<Option<RootBlockTuple<T::EthSpec>>, VerifyError> {
|
||||
) -> Result<Option<RootBlockTuple<T::EthSpec>>, ParentVerifyError> {
|
||||
let root_and_block = self.current_parent_request.verify_block(block)?;
|
||||
|
||||
// check if the parent of this block isn't in the failed cache. If it is, this chain should
|
||||
@ -170,50 +289,83 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||
.map(|(_, block)| block.parent_root())
|
||||
{
|
||||
if failed_chains.contains(&parent_root) {
|
||||
self.current_parent_request.register_failure_downloading();
|
||||
self.current_parent_request_id = None;
|
||||
return Err(VerifyError::PreviousFailure { parent_root });
|
||||
self.current_parent_request
|
||||
.block_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
self.current_parent_request.id.block_request_id = None;
|
||||
return Err(ParentVerifyError::PreviousFailure { parent_root });
|
||||
}
|
||||
}
|
||||
|
||||
Ok(root_and_block)
|
||||
}
|
||||
|
||||
pub fn get_processing_peer(&self, chain_hash: Hash256) -> Option<PeerId> {
|
||||
if self.chain_hash == chain_hash {
|
||||
return self.current_parent_request.processing_peer().ok();
|
||||
pub fn verify_blob(
|
||||
&mut self,
|
||||
blob: Option<Arc<BlobSidecar<T::EthSpec>>>,
|
||||
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
|
||||
) -> Result<Option<RootBlobsTuple<T::EthSpec>>, ParentVerifyError> {
|
||||
let parent_root_opt = blob.as_ref().map(|b| b.block_parent_root);
|
||||
let blobs = self.current_parent_request.verify_blob(blob)?;
|
||||
|
||||
// check if the parent of this block isn't in the failed cache. If it is, this chain should
|
||||
// be dropped and the peer downscored.
|
||||
if let Some(parent_root) = parent_root_opt {
|
||||
if failed_chains.contains(&parent_root) {
|
||||
self.current_parent_request
|
||||
.blob_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
self.current_parent_request.id.blob_request_id = None;
|
||||
return Err(ParentVerifyError::PreviousFailure { parent_root });
|
||||
}
|
||||
}
|
||||
None
|
||||
|
||||
Ok(blobs)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn failed_attempts(&self) -> u8 {
|
||||
self.current_parent_request.failed_attempts()
|
||||
pub fn add_peers(&mut self, peer_source: &[PeerShouldHave]) {
|
||||
self.current_parent_request.add_peers(peer_source)
|
||||
}
|
||||
|
||||
pub fn add_peer(&mut self, block_root: &Hash256, peer_id: &PeerId) -> bool {
|
||||
self.current_parent_request.add_peer(block_root, peer_id)
|
||||
}
|
||||
|
||||
pub fn used_peers(&self) -> impl Iterator<Item = &PeerId> + '_ {
|
||||
self.current_parent_request.used_peers.iter()
|
||||
pub fn used_peers(&self, response_type: ResponseType) -> impl Iterator<Item = &PeerId> + '_ {
|
||||
match response_type {
|
||||
ResponseType::Block => self
|
||||
.current_parent_request
|
||||
.block_request_state
|
||||
.state
|
||||
.used_peers
|
||||
.iter(),
|
||||
ResponseType::Blob => self
|
||||
.current_parent_request
|
||||
.blob_request_state
|
||||
.state
|
||||
.used_peers
|
||||
.iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<super::single_block_lookup::VerifyError> for VerifyError {
|
||||
fn from(e: super::single_block_lookup::VerifyError) -> Self {
|
||||
use super::single_block_lookup::VerifyError as E;
|
||||
impl From<LookupVerifyError> for ParentVerifyError {
|
||||
fn from(e: LookupVerifyError) -> Self {
|
||||
use LookupVerifyError as E;
|
||||
match e {
|
||||
E::RootMismatch => VerifyError::RootMismatch,
|
||||
E::NoBlockReturned => VerifyError::NoBlockReturned,
|
||||
E::ExtraBlocksReturned => VerifyError::ExtraBlocksReturned,
|
||||
E::RootMismatch => ParentVerifyError::RootMismatch,
|
||||
E::NoBlockReturned => ParentVerifyError::NoBlockReturned,
|
||||
E::ExtraBlocksReturned => ParentVerifyError::ExtraBlocksReturned,
|
||||
E::UnrequestedBlobId => ParentVerifyError::UnrequestedBlobId,
|
||||
E::ExtraBlobsReturned => ParentVerifyError::ExtraBlobsReturned,
|
||||
E::InvalidIndex(index) => ParentVerifyError::InvalidIndex(index),
|
||||
E::NotEnoughBlobsReturned => ParentVerifyError::NotEnoughBlobsReturned,
|
||||
E::BenignFailure => ParentVerifyError::BenignFailure,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<super::single_block_lookup::LookupRequestError> for RequestError {
|
||||
fn from(e: super::single_block_lookup::LookupRequestError) -> Self {
|
||||
use super::single_block_lookup::LookupRequestError as E;
|
||||
impl From<LookupRequestError> for RequestError {
|
||||
fn from(e: LookupRequestError) -> Self {
|
||||
use LookupRequestError as E;
|
||||
match e {
|
||||
E::TooManyAttempts { cannot_process } => {
|
||||
RequestError::TooManyAttempts { cannot_process }
|
||||
|
@ -1,43 +1,210 @@
|
||||
use super::RootBlockTuple;
|
||||
use beacon_chain::blob_verification::AsBlock;
|
||||
use crate::sync::block_lookups::{BlobRequestId, BlockRequestId, RootBlobsTuple, RootBlockTuple};
|
||||
use crate::sync::network_context::SyncNetworkContext;
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::get_block_root;
|
||||
use beacon_chain::data_availability_checker::DataAvailabilityChecker;
|
||||
use beacon_chain::{get_block_root, BeaconChainTypes};
|
||||
use lighthouse_network::rpc::methods::BlobsByRootRequest;
|
||||
use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
|
||||
use rand::seq::IteratorRandom;
|
||||
use ssz_types::VariableList;
|
||||
use std::collections::HashSet;
|
||||
use store::{EthSpec, Hash256};
|
||||
use std::ops::IndexMut;
|
||||
use std::sync::Arc;
|
||||
use store::Hash256;
|
||||
use strum::IntoStaticStr;
|
||||
use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList};
|
||||
use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
|
||||
|
||||
/// Object representing a single block lookup request.
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct SingleBlockRequest<const MAX_ATTEMPTS: u8> {
|
||||
/// The hash of the requested block.
|
||||
pub hash: Hash256,
|
||||
use super::{PeerShouldHave, ResponseType};
|
||||
|
||||
pub struct SingleBlockLookup<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> {
|
||||
pub id: LookupId,
|
||||
pub block_request_state: BlockRequestState<MAX_ATTEMPTS>,
|
||||
pub blob_request_state: BlobRequestState<MAX_ATTEMPTS, T::EthSpec>,
|
||||
pub da_checker: Arc<DataAvailabilityChecker<T>>,
|
||||
/// Only necessary for requests triggered by an `UnknownBlockParent` or `UnknownBlockParent` because any
|
||||
/// blocks or blobs without parents won't hit the data availability cache.
|
||||
pub unknown_parent_components: Option<UnknownParentComponents<T::EthSpec>>,
|
||||
/// We may want to delay the actual request trigger to give us a chance to receive all block
|
||||
/// components over gossip.
|
||||
pub triggered: bool,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct LookupId {
|
||||
pub block_request_id: Option<BlockRequestId>,
|
||||
pub blob_request_id: Option<BlobRequestId>,
|
||||
}
|
||||
|
||||
pub struct BlobRequestState<const MAX_ATTEMPTS: u8, T: EthSpec> {
|
||||
pub requested_ids: Vec<BlobIdentifier>,
|
||||
/// Where we store blobs until we receive the stream terminator.
|
||||
pub blob_download_queue: FixedBlobSidecarList<T>,
|
||||
pub state: SingleLookupRequestState<MAX_ATTEMPTS>,
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8, T: EthSpec> BlobRequestState<MAX_ATTEMPTS, T> {
|
||||
pub fn new(peer_source: &[PeerShouldHave]) -> Self {
|
||||
Self {
|
||||
requested_ids: <_>::default(),
|
||||
blob_download_queue: <_>::default(),
|
||||
state: SingleLookupRequestState::new(peer_source),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct BlockRequestState<const MAX_ATTEMPTS: u8> {
|
||||
pub requested_block_root: Hash256,
|
||||
pub state: SingleLookupRequestState<MAX_ATTEMPTS>,
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8> BlockRequestState<MAX_ATTEMPTS> {
|
||||
pub fn new(block_root: Hash256, peers: &[PeerShouldHave]) -> Self {
|
||||
Self {
|
||||
requested_block_root: block_root,
|
||||
state: SingleLookupRequestState::new(peers),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS, T> {
|
||||
pub(crate) fn register_failure_downloading(&mut self, response_type: ResponseType) {
|
||||
match response_type {
|
||||
ResponseType::Block => self
|
||||
.block_request_state
|
||||
.state
|
||||
.register_failure_downloading(),
|
||||
ResponseType::Blob => self.blob_request_state.state.register_failure_downloading(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS, T> {
|
||||
pub(crate) fn downloading(&mut self, response_type: ResponseType) -> bool {
|
||||
match response_type {
|
||||
ResponseType::Block => {
|
||||
matches!(
|
||||
self.block_request_state.state.state,
|
||||
State::Downloading { .. }
|
||||
)
|
||||
}
|
||||
ResponseType::Blob => {
|
||||
matches!(
|
||||
self.blob_request_state.state.state,
|
||||
State::Downloading { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn remove_peer_if_useless(&mut self, peer_id: &PeerId, response_type: ResponseType) {
|
||||
match response_type {
|
||||
ResponseType::Block => self
|
||||
.block_request_state
|
||||
.state
|
||||
.remove_peer_if_useless(peer_id),
|
||||
ResponseType::Blob => self
|
||||
.blob_request_state
|
||||
.state
|
||||
.remove_peer_if_useless(peer_id),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_peer_disconnected(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
response_type: ResponseType,
|
||||
) -> Result<(), ()> {
|
||||
match response_type {
|
||||
ResponseType::Block => self
|
||||
.block_request_state
|
||||
.state
|
||||
.check_peer_disconnected(peer_id),
|
||||
ResponseType::Blob => self
|
||||
.blob_request_state
|
||||
.state
|
||||
.check_peer_disconnected(peer_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// For requests triggered by an `UnknownBlockParent` or `UnknownBlockParent`, this struct
|
||||
/// is used to cache components as they are sent to the networking layer. We can't use the
|
||||
/// data availability cache currently because any blocks or blobs without parents won't hit
|
||||
/// won't pass validation and therefore won't make it into the cache.
|
||||
#[derive(Default)]
|
||||
pub struct UnknownParentComponents<E: EthSpec> {
|
||||
pub downloaded_block: Option<Arc<SignedBeaconBlock<E>>>,
|
||||
pub downloaded_blobs: FixedBlobSidecarList<E>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> UnknownParentComponents<E> {
|
||||
pub fn new(
|
||||
block: Option<Arc<SignedBeaconBlock<E>>>,
|
||||
blobs: Option<FixedBlobSidecarList<E>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
downloaded_block: block,
|
||||
downloaded_blobs: blobs.unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
pub fn add_unknown_parent_block(&mut self, block: Arc<SignedBeaconBlock<E>>) {
|
||||
self.downloaded_block = Some(block);
|
||||
}
|
||||
pub fn add_unknown_parent_blobs(&mut self, blobs: FixedBlobSidecarList<E>) {
|
||||
for (index, blob_opt) in self.downloaded_blobs.iter_mut().enumerate() {
|
||||
if let Some(Some(downloaded_blob)) = blobs.get(index) {
|
||||
*blob_opt = Some(downloaded_blob.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn downloaded_indices(&self) -> HashSet<usize> {
|
||||
self.downloaded_blobs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, blob_opt)| blob_opt.as_ref().map(|_| i))
|
||||
.collect::<HashSet<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Object representing the state of a single block or blob lookup request.
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct SingleLookupRequestState<const MAX_ATTEMPTS: u8> {
|
||||
/// State of this request.
|
||||
pub state: State,
|
||||
/// Peers that should have this block.
|
||||
/// Peers that should have this block or blob.
|
||||
pub available_peers: HashSet<PeerId>,
|
||||
/// Peers that mar or may not have this block or blob.
|
||||
pub potential_peers: HashSet<PeerId>,
|
||||
/// Peers from which we have requested this block.
|
||||
pub used_peers: HashSet<PeerId>,
|
||||
/// How many times have we attempted to process this block.
|
||||
/// How many times have we attempted to process this block or blob.
|
||||
failed_processing: u8,
|
||||
/// How many times have we attempted to download this block.
|
||||
/// How many times have we attempted to download this block or blob.
|
||||
failed_downloading: u8,
|
||||
pub component_processed: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum State {
|
||||
AwaitingDownload,
|
||||
Downloading { peer_id: PeerId },
|
||||
Processing { peer_id: PeerId },
|
||||
Downloading { peer_id: PeerShouldHave },
|
||||
Processing { peer_id: PeerShouldHave },
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, IntoStaticStr)]
|
||||
pub enum VerifyError {
|
||||
pub enum LookupVerifyError {
|
||||
RootMismatch,
|
||||
NoBlockReturned,
|
||||
ExtraBlocksReturned,
|
||||
UnrequestedBlobId,
|
||||
ExtraBlobsReturned,
|
||||
NotEnoughBlobsReturned,
|
||||
InvalidIndex(u64),
|
||||
/// We don't have enough information to know
|
||||
/// whether the peer is at fault or simply missed
|
||||
/// what was requested on gossip.
|
||||
BenignFailure,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, IntoStaticStr)]
|
||||
@ -50,15 +217,465 @@ pub enum LookupRequestError {
|
||||
NoPeers,
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
pub fn new(hash: Hash256, peer_id: PeerId) -> Self {
|
||||
impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> SingleBlockLookup<MAX_ATTEMPTS, T> {
|
||||
pub fn new(
|
||||
requested_block_root: Hash256,
|
||||
unknown_parent_components: Option<UnknownParentComponents<T::EthSpec>>,
|
||||
peers: &[PeerShouldHave],
|
||||
da_checker: Arc<DataAvailabilityChecker<T>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: <_>::default(),
|
||||
block_request_state: BlockRequestState::new(requested_block_root, peers),
|
||||
blob_request_state: BlobRequestState::new(peers),
|
||||
da_checker,
|
||||
unknown_parent_components,
|
||||
triggered: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_for_block(&self, block_root: Hash256) -> bool {
|
||||
self.block_request_state.requested_block_root == block_root
|
||||
}
|
||||
|
||||
/// Send the necessary request for blobs and blocks and update `self.id` with the latest
|
||||
/// request `Id`s. This will return `Err(())` if neither the block nor blob request could be made
|
||||
/// or are no longer required.
|
||||
pub fn request_block_and_blobs(&mut self, cx: &mut SyncNetworkContext<T>) -> Result<(), ()> {
|
||||
let block_request_id = if let Ok(Some((peer_id, block_request))) = self.request_block() {
|
||||
cx.single_block_lookup_request(peer_id, block_request).ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let blob_request_id = if let Ok(Some((peer_id, blob_request))) = self.request_blobs() {
|
||||
cx.single_blobs_lookup_request(peer_id, blob_request).ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if block_request_id.is_none() && blob_request_id.is_none() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
self.id = LookupId {
|
||||
block_request_id,
|
||||
blob_request_id,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_blobs_request(&mut self) {
|
||||
self.blob_request_state.requested_ids = if let Some(components) =
|
||||
self.unknown_parent_components.as_ref()
|
||||
{
|
||||
let blobs = components.downloaded_indices();
|
||||
self.da_checker
|
||||
.get_missing_blob_ids(
|
||||
self.block_request_state.requested_block_root,
|
||||
components.downloaded_block.as_ref(),
|
||||
Some(blobs),
|
||||
)
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
self.da_checker
|
||||
.get_missing_blob_ids_checking_cache(self.block_request_state.requested_block_root)
|
||||
.unwrap_or_default()
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get_downloaded_block(&mut self) -> Option<BlockWrapper<T::EthSpec>> {
|
||||
self.unknown_parent_components
|
||||
.as_mut()
|
||||
.and_then(|components| {
|
||||
let downloaded_block = components.downloaded_block.as_ref();
|
||||
let downloaded_indices = components.downloaded_indices();
|
||||
let missing_ids = self.da_checker.get_missing_blob_ids(
|
||||
self.block_request_state.requested_block_root,
|
||||
downloaded_block,
|
||||
Some(downloaded_indices),
|
||||
);
|
||||
let download_complete =
|
||||
missing_ids.map_or(true, |missing_ids| missing_ids.is_empty());
|
||||
if download_complete {
|
||||
let UnknownParentComponents {
|
||||
downloaded_block,
|
||||
downloaded_blobs,
|
||||
} = components;
|
||||
downloaded_block.as_ref().map(|block| {
|
||||
BlockWrapper::BlockAndBlobs(block.clone(), std::mem::take(downloaded_blobs))
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add_unknown_parent_components(
|
||||
&mut self,
|
||||
components: UnknownParentComponents<T::EthSpec>,
|
||||
) {
|
||||
if let Some(ref mut existing_components) = self.unknown_parent_components {
|
||||
let UnknownParentComponents {
|
||||
downloaded_block,
|
||||
downloaded_blobs,
|
||||
} = components;
|
||||
if let Some(block) = downloaded_block {
|
||||
existing_components.add_unknown_parent_block(block);
|
||||
}
|
||||
existing_components.add_unknown_parent_blobs(downloaded_blobs);
|
||||
} else {
|
||||
self.unknown_parent_components = Some(components);
|
||||
}
|
||||
}
|
||||
pub fn add_unknown_parent_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
|
||||
if let Some(ref mut components) = self.unknown_parent_components {
|
||||
components.add_unknown_parent_block(block)
|
||||
} else {
|
||||
self.unknown_parent_components = Some(UnknownParentComponents {
|
||||
downloaded_block: Some(block),
|
||||
downloaded_blobs: FixedBlobSidecarList::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_unknown_parent_blobs(&mut self, blobs: FixedBlobSidecarList<T::EthSpec>) {
|
||||
if let Some(ref mut components) = self.unknown_parent_components {
|
||||
components.add_unknown_parent_blobs(blobs)
|
||||
} else {
|
||||
self.unknown_parent_components = Some(UnknownParentComponents {
|
||||
downloaded_block: None,
|
||||
downloaded_blobs: blobs,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies if the received block matches the requested one.
|
||||
/// Returns the block for processing if the response is what we expected.
|
||||
pub fn verify_block(
|
||||
&mut self,
|
||||
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||
) -> Result<Option<RootBlockTuple<T::EthSpec>>, LookupVerifyError> {
|
||||
match self.block_request_state.state.state {
|
||||
State::AwaitingDownload => {
|
||||
self.block_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
Err(LookupVerifyError::ExtraBlocksReturned)
|
||||
}
|
||||
State::Downloading { peer_id } => {
|
||||
match block {
|
||||
Some(block) => {
|
||||
// Compute the block root using this specific function so that we can get timing
|
||||
// metrics.
|
||||
let block_root = get_block_root(&block);
|
||||
if block_root != self.block_request_state.requested_block_root {
|
||||
// return an error and drop the block
|
||||
// NOTE: we take this is as a download failure to prevent counting the
|
||||
// attempt as a chain failure, but simply a peer failure.
|
||||
self.block_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
Err(LookupVerifyError::RootMismatch)
|
||||
} else {
|
||||
// Return the block for processing.
|
||||
self.block_request_state.state.state = State::Processing { peer_id };
|
||||
Ok(Some((block_root, block)))
|
||||
}
|
||||
}
|
||||
None => {
|
||||
if peer_id.should_have_block() {
|
||||
self.block_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
Err(LookupVerifyError::NoBlockReturned)
|
||||
} else {
|
||||
self.block_request_state.state.state = State::AwaitingDownload;
|
||||
Err(LookupVerifyError::BenignFailure)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
State::Processing { peer_id: _ } => match block {
|
||||
Some(_) => {
|
||||
// We sent the block for processing and received an extra block.
|
||||
self.block_request_state
|
||||
.state
|
||||
.register_failure_downloading();
|
||||
Err(LookupVerifyError::ExtraBlocksReturned)
|
||||
}
|
||||
None => {
|
||||
// This is simply the stream termination and we are already processing the
|
||||
// block
|
||||
Ok(None)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_blob(
|
||||
&mut self,
|
||||
blob: Option<Arc<BlobSidecar<T::EthSpec>>>,
|
||||
) -> Result<Option<RootBlobsTuple<T::EthSpec>>, LookupVerifyError> {
|
||||
match self.blob_request_state.state.state {
|
||||
State::AwaitingDownload => {
|
||||
self.blob_request_state.state.register_failure_downloading();
|
||||
Err(LookupVerifyError::ExtraBlobsReturned)
|
||||
}
|
||||
State::Downloading {
|
||||
peer_id: peer_source,
|
||||
} => match blob {
|
||||
Some(blob) => {
|
||||
let received_id = blob.id();
|
||||
if !self.blob_request_state.requested_ids.contains(&received_id) {
|
||||
self.blob_request_state.state.register_failure_downloading();
|
||||
Err(LookupVerifyError::UnrequestedBlobId)
|
||||
} else {
|
||||
// State should remain downloading until we receive the stream terminator.
|
||||
self.blob_request_state
|
||||
.requested_ids
|
||||
.retain(|id| *id != received_id);
|
||||
let blob_index = blob.index;
|
||||
|
||||
if blob_index >= T::EthSpec::max_blobs_per_block() as u64 {
|
||||
return Err(LookupVerifyError::InvalidIndex(blob.index));
|
||||
}
|
||||
*self
|
||||
.blob_request_state
|
||||
.blob_download_queue
|
||||
.index_mut(blob_index as usize) = Some(blob);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.blob_request_state.state.state = State::Processing {
|
||||
peer_id: peer_source,
|
||||
};
|
||||
Ok(Some((
|
||||
self.block_request_state.requested_block_root,
|
||||
std::mem::take(&mut self.blob_request_state.blob_download_queue),
|
||||
)))
|
||||
}
|
||||
},
|
||||
State::Processing { peer_id: _ } => match blob {
|
||||
Some(_) => {
|
||||
// We sent the blob for processing and received an extra blob.
|
||||
self.blob_request_state.state.register_failure_downloading();
|
||||
Err(LookupVerifyError::ExtraBlobsReturned)
|
||||
}
|
||||
None => {
|
||||
// This is simply the stream termination and we are already processing the
|
||||
// block
|
||||
Ok(None)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_block(
|
||||
&mut self,
|
||||
) -> Result<Option<(PeerId, BlocksByRootRequest)>, LookupRequestError> {
|
||||
let block_already_downloaded =
|
||||
if let Some(components) = self.unknown_parent_components.as_ref() {
|
||||
components.downloaded_block.is_some()
|
||||
} else {
|
||||
self.da_checker
|
||||
.has_block(&self.block_request_state.requested_block_root)
|
||||
};
|
||||
|
||||
if block_already_downloaded {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug_assert!(matches!(
|
||||
self.block_request_state.state.state,
|
||||
State::AwaitingDownload
|
||||
));
|
||||
let request = BlocksByRootRequest {
|
||||
block_roots: VariableList::from(vec![self.block_request_state.requested_block_root]),
|
||||
};
|
||||
let response_type = ResponseType::Block;
|
||||
if self.too_many_attempts(response_type) {
|
||||
Err(LookupRequestError::TooManyAttempts {
|
||||
cannot_process: self.cannot_process(response_type),
|
||||
})
|
||||
} else if let Some(peer_id) = self.get_peer(response_type) {
|
||||
self.add_used_peer(peer_id, response_type);
|
||||
Ok(Some((peer_id.to_peer_id(), request)))
|
||||
} else {
|
||||
Err(LookupRequestError::NoPeers)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_blobs(
|
||||
&mut self,
|
||||
) -> Result<Option<(PeerId, BlobsByRootRequest)>, LookupRequestError> {
|
||||
self.update_blobs_request();
|
||||
|
||||
if self.blob_request_state.requested_ids.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug_assert!(matches!(
|
||||
self.blob_request_state.state.state,
|
||||
State::AwaitingDownload
|
||||
));
|
||||
let request = BlobsByRootRequest {
|
||||
blob_ids: VariableList::from(self.blob_request_state.requested_ids.clone()),
|
||||
};
|
||||
let response_type = ResponseType::Blob;
|
||||
if self.too_many_attempts(response_type) {
|
||||
Err(LookupRequestError::TooManyAttempts {
|
||||
cannot_process: self.cannot_process(response_type),
|
||||
})
|
||||
} else if let Some(peer_id) = self.get_peer(response_type) {
|
||||
self.add_used_peer(peer_id, response_type);
|
||||
Ok(Some((peer_id.to_peer_id(), request)))
|
||||
} else {
|
||||
Err(LookupRequestError::NoPeers)
|
||||
}
|
||||
}
|
||||
|
||||
fn too_many_attempts(&self, response_type: ResponseType) -> bool {
|
||||
match response_type {
|
||||
ResponseType::Block => self.block_request_state.state.failed_attempts() >= MAX_ATTEMPTS,
|
||||
ResponseType::Blob => self.blob_request_state.state.failed_attempts() >= MAX_ATTEMPTS,
|
||||
}
|
||||
}
|
||||
|
||||
fn cannot_process(&self, response_type: ResponseType) -> bool {
|
||||
match response_type {
|
||||
ResponseType::Block => {
|
||||
self.block_request_state.state.failed_processing
|
||||
>= self.block_request_state.state.failed_downloading
|
||||
}
|
||||
ResponseType::Blob => {
|
||||
self.blob_request_state.state.failed_processing
|
||||
>= self.blob_request_state.state.failed_downloading
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_peer(&self, response_type: ResponseType) -> Option<PeerShouldHave> {
|
||||
match response_type {
|
||||
ResponseType::Block => self
|
||||
.block_request_state
|
||||
.state
|
||||
.available_peers
|
||||
.iter()
|
||||
.choose(&mut rand::thread_rng())
|
||||
.copied()
|
||||
.map(PeerShouldHave::BlockAndBlobs)
|
||||
.or(self
|
||||
.block_request_state
|
||||
.state
|
||||
.potential_peers
|
||||
.iter()
|
||||
.choose(&mut rand::thread_rng())
|
||||
.copied()
|
||||
.map(PeerShouldHave::Neither)),
|
||||
ResponseType::Blob => self
|
||||
.blob_request_state
|
||||
.state
|
||||
.available_peers
|
||||
.iter()
|
||||
.choose(&mut rand::thread_rng())
|
||||
.copied()
|
||||
.map(PeerShouldHave::BlockAndBlobs)
|
||||
.or(self
|
||||
.blob_request_state
|
||||
.state
|
||||
.potential_peers
|
||||
.iter()
|
||||
.choose(&mut rand::thread_rng())
|
||||
.copied()
|
||||
.map(PeerShouldHave::Neither)),
|
||||
}
|
||||
}
|
||||
|
||||
fn add_used_peer(&mut self, peer_id: PeerShouldHave, response_type: ResponseType) {
|
||||
match response_type {
|
||||
ResponseType::Block => {
|
||||
self.block_request_state
|
||||
.state
|
||||
.used_peers
|
||||
.insert(peer_id.to_peer_id());
|
||||
self.block_request_state.state.state = State::Downloading { peer_id };
|
||||
}
|
||||
ResponseType::Blob => {
|
||||
self.blob_request_state
|
||||
.state
|
||||
.used_peers
|
||||
.insert(peer_id.to_peer_id());
|
||||
self.blob_request_state.state.state = State::Downloading { peer_id };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_peers(&mut self, peers: &[PeerShouldHave]) {
|
||||
for peer in peers {
|
||||
match peer {
|
||||
PeerShouldHave::BlockAndBlobs(peer_id) => {
|
||||
self.block_request_state.state.add_peer(peer_id);
|
||||
self.blob_request_state.state.add_peer(peer_id);
|
||||
}
|
||||
PeerShouldHave::Neither(peer_id) => {
|
||||
self.block_request_state.state.add_potential_peer(peer_id);
|
||||
self.blob_request_state.state.add_potential_peer(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn processing_peer(&self, response_type: ResponseType) -> Result<PeerShouldHave, ()> {
|
||||
match response_type {
|
||||
ResponseType::Block => self.block_request_state.state.processing_peer(),
|
||||
ResponseType::Blob => self.blob_request_state.state.processing_peer(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn downloading_peer(&self, response_type: ResponseType) -> Result<PeerShouldHave, ()> {
|
||||
match response_type {
|
||||
ResponseType::Block => self.block_request_state.state.peer(),
|
||||
ResponseType::Blob => self.blob_request_state.state.peer(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn both_components_processed(&self) -> bool {
|
||||
self.block_request_state.state.component_processed
|
||||
&& self.blob_request_state.state.component_processed
|
||||
}
|
||||
|
||||
pub fn set_component_processed(&mut self, response_type: ResponseType) {
|
||||
match response_type {
|
||||
ResponseType::Block => self.block_request_state.state.component_processed = true,
|
||||
ResponseType::Blob => self.blob_request_state.state.component_processed = true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8> SingleLookupRequestState<MAX_ATTEMPTS> {
|
||||
pub fn new(peers: &[PeerShouldHave]) -> Self {
|
||||
let mut available_peers = HashSet::default();
|
||||
let mut potential_peers = HashSet::default();
|
||||
for peer in peers {
|
||||
match peer {
|
||||
PeerShouldHave::BlockAndBlobs(peer_id) => {
|
||||
available_peers.insert(*peer_id);
|
||||
}
|
||||
PeerShouldHave::Neither(peer_id) => {
|
||||
potential_peers.insert(*peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
Self {
|
||||
hash,
|
||||
state: State::AwaitingDownload,
|
||||
available_peers: HashSet::from([peer_id]),
|
||||
available_peers,
|
||||
potential_peers,
|
||||
used_peers: HashSet::default(),
|
||||
failed_processing: 0,
|
||||
failed_downloading: 0,
|
||||
component_processed: false,
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,19 +697,23 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
self.failed_processing + self.failed_downloading
|
||||
}
|
||||
|
||||
pub fn add_peer(&mut self, hash: &Hash256, peer_id: &PeerId) -> bool {
|
||||
let is_useful = &self.hash == hash;
|
||||
if is_useful {
|
||||
self.available_peers.insert(*peer_id);
|
||||
pub fn add_peer(&mut self, peer_id: &PeerId) {
|
||||
self.potential_peers.remove(peer_id);
|
||||
self.available_peers.insert(*peer_id);
|
||||
}
|
||||
|
||||
pub fn add_potential_peer(&mut self, peer_id: &PeerId) {
|
||||
if !self.available_peers.contains(peer_id) {
|
||||
self.potential_peers.insert(*peer_id);
|
||||
}
|
||||
is_useful
|
||||
}
|
||||
|
||||
/// If a peer disconnects, this request could be failed. If so, an error is returned
|
||||
pub fn check_peer_disconnected(&mut self, dc_peer_id: &PeerId) -> Result<(), ()> {
|
||||
self.available_peers.remove(dc_peer_id);
|
||||
self.potential_peers.remove(dc_peer_id);
|
||||
if let State::Downloading { peer_id } = &self.state {
|
||||
if peer_id == dc_peer_id {
|
||||
if peer_id.as_peer_id() == dc_peer_id {
|
||||
// Peer disconnected before providing a block
|
||||
self.register_failure_downloading();
|
||||
return Err(());
|
||||
@ -101,90 +722,67 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies if the received block matches the requested one.
|
||||
/// Returns the block for processing if the response is what we expected.
|
||||
pub fn verify_block<T: EthSpec>(
|
||||
&mut self,
|
||||
block: Option<BlockWrapper<T>>,
|
||||
) -> Result<Option<RootBlockTuple<T>>, VerifyError> {
|
||||
match self.state {
|
||||
State::AwaitingDownload => {
|
||||
self.register_failure_downloading();
|
||||
Err(VerifyError::ExtraBlocksReturned)
|
||||
}
|
||||
State::Downloading { peer_id } => match block {
|
||||
Some(block) => {
|
||||
// Compute the block root using this specific function so that we can get timing
|
||||
// metrics.
|
||||
let block_root = get_block_root(block.as_block());
|
||||
if block_root != self.hash {
|
||||
// return an error and drop the block
|
||||
// NOTE: we take this is as a download failure to prevent counting the
|
||||
// attempt as a chain failure, but simply a peer failure.
|
||||
self.register_failure_downloading();
|
||||
Err(VerifyError::RootMismatch)
|
||||
} else {
|
||||
// Return the block for processing.
|
||||
self.state = State::Processing { peer_id };
|
||||
Ok(Some((block_root, block)))
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.register_failure_downloading();
|
||||
Err(VerifyError::NoBlockReturned)
|
||||
}
|
||||
},
|
||||
State::Processing { peer_id: _ } => match block {
|
||||
Some(_) => {
|
||||
// We sent the block for processing and received an extra block.
|
||||
self.register_failure_downloading();
|
||||
Err(VerifyError::ExtraBlocksReturned)
|
||||
}
|
||||
None => {
|
||||
// This is simply the stream termination and we are already processing the
|
||||
// block
|
||||
Ok(None)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_block(&mut self) -> Result<(PeerId, BlocksByRootRequest), LookupRequestError> {
|
||||
debug_assert!(matches!(self.state, State::AwaitingDownload));
|
||||
if self.failed_attempts() >= MAX_ATTEMPTS {
|
||||
Err(LookupRequestError::TooManyAttempts {
|
||||
cannot_process: self.failed_processing >= self.failed_downloading,
|
||||
})
|
||||
} else if let Some(&peer_id) = self.available_peers.iter().choose(&mut rand::thread_rng()) {
|
||||
let request = BlocksByRootRequest {
|
||||
block_roots: VariableList::from(vec![self.hash]),
|
||||
};
|
||||
self.state = State::Downloading { peer_id };
|
||||
self.used_peers.insert(peer_id);
|
||||
Ok((peer_id, request))
|
||||
} else {
|
||||
Err(LookupRequestError::NoPeers)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn processing_peer(&self) -> Result<PeerId, ()> {
|
||||
pub fn processing_peer(&self) -> Result<PeerShouldHave, ()> {
|
||||
if let State::Processing { peer_id } = &self.state {
|
||||
Ok(*peer_id)
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn peer(&self) -> Result<PeerShouldHave, ()> {
|
||||
match &self.state {
|
||||
State::Processing { peer_id } => Ok(*peer_id),
|
||||
State::Downloading { peer_id } => Ok(*peer_id),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_peer_if_useless(&mut self, peer_id: &PeerId) {
|
||||
if !self.available_peers.is_empty() || self.potential_peers.len() > 1 {
|
||||
self.potential_peers.remove(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8> slog::Value for SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
impl<const MAX_ATTEMPTS: u8, T: BeaconChainTypes> slog::Value
|
||||
for SingleBlockLookup<MAX_ATTEMPTS, T>
|
||||
{
|
||||
fn serialize(
|
||||
&self,
|
||||
_record: &slog::Record,
|
||||
key: slog::Key,
|
||||
serializer: &mut dyn slog::Serializer,
|
||||
) -> slog::Result {
|
||||
serializer.emit_str("request", key)?;
|
||||
serializer.emit_arguments(
|
||||
"hash",
|
||||
&format_args!("{}", self.block_request_state.requested_block_root),
|
||||
)?;
|
||||
serializer.emit_arguments(
|
||||
"blob_ids",
|
||||
&format_args!("{:?}", self.blob_request_state.requested_ids),
|
||||
)?;
|
||||
serializer.emit_arguments(
|
||||
"block_request_state.state",
|
||||
&format_args!("{:?}", self.block_request_state.state),
|
||||
)?;
|
||||
serializer.emit_arguments(
|
||||
"blob_request_state.state",
|
||||
&format_args!("{:?}", self.blob_request_state.state),
|
||||
)?;
|
||||
slog::Result::Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<const MAX_ATTEMPTS: u8> slog::Value for SingleLookupRequestState<MAX_ATTEMPTS> {
|
||||
fn serialize(
|
||||
&self,
|
||||
record: &slog::Record,
|
||||
key: slog::Key,
|
||||
serializer: &mut dyn slog::Serializer,
|
||||
) -> slog::Result {
|
||||
serializer.emit_str("request", key)?;
|
||||
serializer.emit_arguments("hash", &format_args!("{}", self.hash))?;
|
||||
serializer.emit_str("request_state", key)?;
|
||||
match &self.state {
|
||||
State::AwaitingDownload => {
|
||||
"awaiting_download".serialize(record, "state", serializer)?
|
||||
@ -205,9 +803,16 @@ impl<const MAX_ATTEMPTS: u8> slog::Value for SingleBlockRequest<MAX_ATTEMPTS> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use beacon_chain::builder::Witness;
|
||||
use beacon_chain::eth1_chain::CachingEth1Backend;
|
||||
use sloggers::null::NullLoggerBuilder;
|
||||
use sloggers::Build;
|
||||
use slot_clock::{SlotClock, TestingSlotClock};
|
||||
use std::time::Duration;
|
||||
use store::{HotColdDB, MemoryStore, StoreConfig};
|
||||
use types::{
|
||||
test_utils::{SeedableRng, TestRandom, XorShiftRng},
|
||||
MinimalEthSpec as E, SignedBeaconBlock,
|
||||
ChainSpec, EthSpec, MinimalEthSpec as E, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
fn rand_block() -> SignedBeaconBlock<E> {
|
||||
@ -219,13 +824,27 @@ mod tests {
|
||||
types::Signature::random_for_test(&mut rng),
|
||||
)
|
||||
}
|
||||
type T = Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
|
||||
|
||||
#[test]
|
||||
fn test_happy_path() {
|
||||
let peer_id = PeerId::random();
|
||||
let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random());
|
||||
let block = rand_block();
|
||||
|
||||
let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id);
|
||||
let spec = E::default_spec();
|
||||
let slot_clock = TestingSlotClock::new(
|
||||
Slot::new(0),
|
||||
Duration::from_secs(0),
|
||||
Duration::from_secs(spec.seconds_per_slot),
|
||||
);
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log)
|
||||
.expect("store");
|
||||
let da_checker = Arc::new(
|
||||
DataAvailabilityChecker::new(slot_clock, None, store.into(), spec)
|
||||
.expect("data availability checker"),
|
||||
);
|
||||
let mut sl =
|
||||
SingleBlockLookup::<4, T>::new(block.canonical_root(), None, &[peer_id], da_checker);
|
||||
sl.request_block().unwrap();
|
||||
sl.verify_block(Some(block.into())).unwrap().unwrap();
|
||||
}
|
||||
@ -233,13 +852,32 @@ mod tests {
|
||||
#[test]
|
||||
fn test_block_lookup_failures() {
|
||||
const FAILURES: u8 = 3;
|
||||
let peer_id = PeerId::random();
|
||||
let peer_id = PeerShouldHave::BlockAndBlobs(PeerId::random());
|
||||
let block = rand_block();
|
||||
let spec = E::default_spec();
|
||||
let slot_clock = TestingSlotClock::new(
|
||||
Slot::new(0),
|
||||
Duration::from_secs(0),
|
||||
Duration::from_secs(spec.seconds_per_slot),
|
||||
);
|
||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||
let store = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log)
|
||||
.expect("store");
|
||||
|
||||
let mut sl = SingleBlockRequest::<FAILURES>::new(block.canonical_root(), peer_id);
|
||||
let da_checker = Arc::new(
|
||||
DataAvailabilityChecker::new(slot_clock, None, store.into(), spec)
|
||||
.expect("data availability checker"),
|
||||
);
|
||||
|
||||
let mut sl = SingleBlockLookup::<FAILURES, T>::new(
|
||||
block.canonical_root(),
|
||||
None,
|
||||
&[peer_id],
|
||||
da_checker,
|
||||
);
|
||||
for _ in 1..FAILURES {
|
||||
sl.request_block().unwrap();
|
||||
sl.register_failure_downloading();
|
||||
sl.block_request_state.state.register_failure_downloading();
|
||||
}
|
||||
|
||||
// Now we receive the block and send it for processing
|
||||
@ -247,7 +885,7 @@ mod tests {
|
||||
sl.verify_block(Some(block.into())).unwrap().unwrap();
|
||||
|
||||
// One processing failure maxes the available attempts
|
||||
sl.register_failure_processing();
|
||||
sl.block_request_state.state.register_failure_processing();
|
||||
assert_eq!(
|
||||
sl.request_block(),
|
||||
Err(LookupRequestError::TooManyAttempts {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,5 @@
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use ssz_types::FixedVector;
|
||||
use std::{collections::VecDeque, sync::Arc};
|
||||
use types::{BlobSidecar, EthSpec, SignedBeaconBlock};
|
||||
|
||||
@ -55,7 +56,22 @@ impl<T: EthSpec> BlocksAndBlobsRequestInfo<T> {
|
||||
if blob_list.is_empty() {
|
||||
responses.push(BlockWrapper::Block(block))
|
||||
} else {
|
||||
responses.push(BlockWrapper::BlockAndBlobs(block, blob_list))
|
||||
let mut blobs_fixed = vec![None; T::max_blobs_per_block()];
|
||||
for blob in blob_list {
|
||||
let blob_index = blob.index as usize;
|
||||
let Some(blob_opt) = blobs_fixed.get_mut(blob_index) else {
|
||||
return Err("Invalid blob index");
|
||||
};
|
||||
if blob_opt.is_some() {
|
||||
return Err("Repeat blob index");
|
||||
} else {
|
||||
*blob_opt = Some(blob);
|
||||
}
|
||||
}
|
||||
responses.push(BlockWrapper::BlockAndBlobs(
|
||||
block,
|
||||
FixedVector::from(blobs_fixed),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,17 +34,24 @@
|
||||
//! search for the block and subsequently search for parents if needed.
|
||||
|
||||
use super::backfill_sync::{BackFillSync, ProcessResult, SyncStart};
|
||||
use super::block_lookups::BlockLookups;
|
||||
use super::block_lookups::{BlockLookups, PeerShouldHave};
|
||||
use super::network_context::{BlockOrBlob, SyncNetworkContext};
|
||||
use super::peer_sync_info::{remote_sync_type, PeerSyncType};
|
||||
use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
|
||||
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
|
||||
use crate::service::NetworkMessage;
|
||||
use crate::status::ToStatusMessage;
|
||||
use crate::sync::block_lookups::delayed_lookup;
|
||||
use crate::sync::block_lookups::delayed_lookup::DelayedLookupMessage;
|
||||
pub use crate::sync::block_lookups::ResponseType;
|
||||
use crate::sync::block_lookups::UnknownParentComponents;
|
||||
use crate::sync::range_sync::ByRangeRequestType;
|
||||
use beacon_chain::blob_verification::AsBlock;
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState};
|
||||
use beacon_chain::{
|
||||
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState,
|
||||
MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
|
||||
use lighthouse_network::rpc::RPCError;
|
||||
@ -52,12 +59,14 @@ use lighthouse_network::types::{NetworkGlobals, SyncState};
|
||||
use lighthouse_network::SyncInfo;
|
||||
use lighthouse_network::{PeerAction, PeerId};
|
||||
use slog::{crit, debug, error, info, trace, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::boxed::Box;
|
||||
use std::ops::IndexMut;
|
||||
use std::ops::Sub;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc;
|
||||
use types::blob_sidecar::BlobIdentifier;
|
||||
use types::blob_sidecar::FixedBlobSidecarList;
|
||||
use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync
|
||||
@ -68,6 +77,9 @@ use types::{BlobSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
/// gossip if no peers are further than this range ahead of us that we have not already downloaded
|
||||
/// blocks for.
|
||||
pub const SLOT_IMPORT_TOLERANCE: usize = 32;
|
||||
/// The maximum number of messages the delay queue can handle in a single slot before messages are
|
||||
/// dropped.
|
||||
pub const DELAY_QUEUE_CHANNEL_SIZE: usize = 128;
|
||||
|
||||
pub type Id = u32;
|
||||
|
||||
@ -81,11 +93,11 @@ pub enum RequestId {
|
||||
/// Request was from the backfill sync algorithm.
|
||||
BackFillBlocks { id: Id },
|
||||
/// Backfill request that is composed by both a block range request and a blob range request.
|
||||
BackFillBlobs { id: Id },
|
||||
BackFillBlockAndBlobs { id: Id },
|
||||
/// The request was from a chain in the range sync algorithm.
|
||||
RangeBlocks { id: Id },
|
||||
/// Range request that is composed by both a block range request and a blob range request.
|
||||
RangeBlobs { id: Id },
|
||||
RangeBlockAndBlobs { id: Id },
|
||||
}
|
||||
|
||||
// TODO(diva) I'm updating functions what at a time, but this should be revisited because I think
|
||||
@ -115,18 +127,24 @@ pub enum SyncMessage<T: EthSpec> {
|
||||
},
|
||||
|
||||
/// A block with an unknown parent has been received.
|
||||
UnknownBlock(PeerId, BlockWrapper<T>, Hash256),
|
||||
UnknownParentBlock(PeerId, BlockWrapper<T>, Hash256),
|
||||
|
||||
/// A peer has sent an object that references a block that is unknown. This triggers the
|
||||
/// A blob with an unknown parent has been received.
|
||||
UnknownParentBlob(PeerId, Arc<BlobSidecar<T>>),
|
||||
|
||||
/// A peer has sent an attestation that references a block that is unknown. This triggers the
|
||||
/// manager to attempt to find the block matching the unknown hash.
|
||||
UnknownBlockHash(PeerId, Hash256),
|
||||
UnknownBlockHashFromAttestation(PeerId, Hash256),
|
||||
|
||||
/// A peer has sent us a block that we haven't received all the blobs for. This triggers
|
||||
/// the manager to attempt to find the pending blobs for the given block root.
|
||||
UnknownBlobHash {
|
||||
peer_id: PeerId,
|
||||
pending_blobs: Vec<BlobIdentifier>,
|
||||
},
|
||||
/// A peer has sent a blob that references a block that is unknown or a peer has sent a block for
|
||||
/// which we haven't received blobs.
|
||||
///
|
||||
/// We will either attempt to find the block matching the unknown hash immediately or queue a lookup,
|
||||
/// which will then trigger the request when we receive `MissingGossipBlockComponentsDelayed`.
|
||||
MissingGossipBlockComponents(Slot, PeerId, Hash256),
|
||||
|
||||
/// This message triggers a request for missing block components after a delay.
|
||||
MissingGossipBlockComponentsDelayed(Hash256),
|
||||
|
||||
/// A peer has disconnected.
|
||||
Disconnect(PeerId),
|
||||
@ -145,9 +163,10 @@ pub enum SyncMessage<T: EthSpec> {
|
||||
},
|
||||
|
||||
/// Block processed
|
||||
BlockProcessed {
|
||||
BlockComponentProcessed {
|
||||
process_type: BlockProcessType,
|
||||
result: BlockProcessResult<T>,
|
||||
result: BlockProcessingResult<T>,
|
||||
response_type: ResponseType,
|
||||
},
|
||||
}
|
||||
|
||||
@ -159,8 +178,8 @@ pub enum BlockProcessType {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BlockProcessResult<T: EthSpec> {
|
||||
Ok,
|
||||
pub enum BlockProcessingResult<T: EthSpec> {
|
||||
Ok(AvailabilityProcessingStatus),
|
||||
Err(BlockError<T>),
|
||||
Ignored,
|
||||
}
|
||||
@ -205,6 +224,8 @@ pub struct SyncManager<T: BeaconChainTypes> {
|
||||
|
||||
block_lookups: BlockLookups<T>,
|
||||
|
||||
delayed_lookups: mpsc::Sender<DelayedLookupMessage>,
|
||||
|
||||
/// The logger for the import manager.
|
||||
log: Logger,
|
||||
}
|
||||
@ -226,6 +247,8 @@ pub fn spawn<T: BeaconChainTypes>(
|
||||
);
|
||||
// generate the message channel
|
||||
let (sync_send, sync_recv) = mpsc::unbounded_channel::<SyncMessage<T::EthSpec>>();
|
||||
let (delayed_lookups_send, delayed_lookups_recv) =
|
||||
mpsc::channel::<DelayedLookupMessage>(DELAY_QUEUE_CHANNEL_SIZE);
|
||||
|
||||
// create an instance of the SyncManager
|
||||
let mut sync_manager = SyncManager {
|
||||
@ -240,15 +263,29 @@ pub fn spawn<T: BeaconChainTypes>(
|
||||
log.clone(),
|
||||
),
|
||||
range_sync: RangeSync::new(beacon_chain.clone(), log.clone()),
|
||||
backfill_sync: BackFillSync::new(beacon_chain, network_globals, log.clone()),
|
||||
block_lookups: BlockLookups::new(log.clone()),
|
||||
backfill_sync: BackFillSync::new(beacon_chain.clone(), network_globals, log.clone()),
|
||||
block_lookups: BlockLookups::new(
|
||||
beacon_chain.data_availability_checker.clone(),
|
||||
log.clone(),
|
||||
),
|
||||
delayed_lookups: delayed_lookups_send,
|
||||
log: log.clone(),
|
||||
};
|
||||
|
||||
let log_clone = log.clone();
|
||||
let sync_send_clone = sync_send.clone();
|
||||
delayed_lookup::spawn_delayed_lookup_service(
|
||||
&executor,
|
||||
beacon_chain,
|
||||
delayed_lookups_recv,
|
||||
sync_send,
|
||||
log,
|
||||
);
|
||||
|
||||
// spawn the sync manager thread
|
||||
debug!(log, "Sync Manager started");
|
||||
debug!(log_clone, "Sync Manager started");
|
||||
executor.spawn(async move { Box::pin(sync_manager.main()).await }, "sync");
|
||||
sync_send
|
||||
sync_send_clone
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
@ -291,8 +328,12 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
trace!(self.log, "Sync manager received a failed RPC");
|
||||
match request_id {
|
||||
RequestId::SingleBlock { id } => {
|
||||
self.block_lookups
|
||||
.single_block_lookup_failed(id, &mut self.network);
|
||||
self.block_lookups.single_block_lookup_failed(
|
||||
id,
|
||||
&peer_id,
|
||||
&mut self.network,
|
||||
error,
|
||||
);
|
||||
}
|
||||
RequestId::ParentLookup { id } => {
|
||||
self.block_lookups
|
||||
@ -313,7 +354,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
}
|
||||
}
|
||||
|
||||
RequestId::BackFillBlobs { id } => {
|
||||
RequestId::BackFillBlockAndBlobs { id } => {
|
||||
if let Some(batch_id) = self
|
||||
.network
|
||||
.backfill_request_failed(id, ByRangeRequestType::BlocksAndBlobs)
|
||||
@ -342,7 +383,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
self.update_sync_state()
|
||||
}
|
||||
}
|
||||
RequestId::RangeBlobs { id } => {
|
||||
RequestId::RangeBlockAndBlobs { id } => {
|
||||
if let Some((chain_id, batch_id)) = self
|
||||
.network
|
||||
.range_sync_request_failed(id, ByRangeRequestType::BlocksAndBlobs)
|
||||
@ -567,49 +608,84 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
beacon_block,
|
||||
seen_timestamp,
|
||||
} => {
|
||||
self.rpc_block_or_blob_received(
|
||||
request_id,
|
||||
self.rpc_block_received(request_id, peer_id, beacon_block, seen_timestamp);
|
||||
}
|
||||
SyncMessage::RpcBlob {
|
||||
request_id,
|
||||
peer_id,
|
||||
blob_sidecar,
|
||||
seen_timestamp,
|
||||
} => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp),
|
||||
SyncMessage::UnknownParentBlock(peer_id, block, block_root) => {
|
||||
let block_slot = block.slot();
|
||||
let (block, blobs) = block.deconstruct();
|
||||
let parent_root = block.parent_root();
|
||||
let parent_components = UnknownParentComponents::new(Some(block), blobs);
|
||||
self.handle_unknown_parent(
|
||||
peer_id,
|
||||
beacon_block.into(),
|
||||
seen_timestamp,
|
||||
block_root,
|
||||
parent_root,
|
||||
block_slot,
|
||||
Some(parent_components),
|
||||
);
|
||||
}
|
||||
SyncMessage::UnknownBlock(peer_id, block, block_root) => {
|
||||
// If we are not synced or within SLOT_IMPORT_TOLERANCE of the block, ignore
|
||||
if !self.network_globals.sync_state.read().is_synced() {
|
||||
let head_slot = self.chain.canonical_head.cached_head().head_slot();
|
||||
let unknown_block_slot = block.slot();
|
||||
|
||||
// if the block is far in the future, ignore it. If its within the slot tolerance of
|
||||
// our current head, regardless of the syncing state, fetch it.
|
||||
if (head_slot >= unknown_block_slot
|
||||
&& head_slot.sub(unknown_block_slot).as_usize() > SLOT_IMPORT_TOLERANCE)
|
||||
|| (head_slot < unknown_block_slot
|
||||
&& unknown_block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE)
|
||||
{
|
||||
return;
|
||||
SyncMessage::UnknownParentBlob(peer_id, blob) => {
|
||||
let blob_slot = blob.slot;
|
||||
let block_root = blob.block_root;
|
||||
let parent_root = blob.block_parent_root;
|
||||
let blob_index = blob.index;
|
||||
let mut blobs = FixedBlobSidecarList::default();
|
||||
*blobs.index_mut(blob_index as usize) = Some(blob);
|
||||
self.handle_unknown_parent(
|
||||
peer_id,
|
||||
block_root,
|
||||
parent_root,
|
||||
blob_slot,
|
||||
Some(UnknownParentComponents::new(None, Some(blobs))),
|
||||
);
|
||||
}
|
||||
SyncMessage::UnknownBlockHashFromAttestation(peer_id, block_hash) => {
|
||||
// If we are not synced, ignore this block.
|
||||
if self.synced_and_connected(&peer_id) {
|
||||
self.block_lookups.search_block(
|
||||
block_hash,
|
||||
PeerShouldHave::BlockAndBlobs(peer_id),
|
||||
&mut self.network,
|
||||
);
|
||||
}
|
||||
}
|
||||
SyncMessage::MissingGossipBlockComponents(slot, peer_id, block_root) => {
|
||||
// If we are not synced, ignore this block.
|
||||
if self.synced_and_connected(&peer_id) {
|
||||
if self.should_delay_lookup(slot) {
|
||||
self.block_lookups
|
||||
.search_block_delayed(block_root, PeerShouldHave::Neither(peer_id));
|
||||
if let Err(e) = self
|
||||
.delayed_lookups
|
||||
.try_send(DelayedLookupMessage::MissingComponents(block_root))
|
||||
{
|
||||
warn!(self.log, "Delayed lookup dropped for block referenced by a blob";
|
||||
"block_root" => ?block_root, "error" => ?e);
|
||||
}
|
||||
} else {
|
||||
self.block_lookups.search_block(
|
||||
block_root,
|
||||
PeerShouldHave::Neither(peer_id),
|
||||
&mut self.network,
|
||||
)
|
||||
}
|
||||
}
|
||||
if self.network_globals.peers.read().is_connected(&peer_id)
|
||||
&& self.network.is_execution_engine_online()
|
||||
{
|
||||
self.block_lookups
|
||||
.search_parent(block_root, block, peer_id, &mut self.network);
|
||||
}
|
||||
}
|
||||
SyncMessage::UnknownBlockHash(peer_id, block_hash) => {
|
||||
// If we are not synced, ignore this block.
|
||||
if self.network_globals.sync_state.read().is_synced()
|
||||
&& self.network_globals.peers.read().is_connected(&peer_id)
|
||||
&& self.network.is_execution_engine_online()
|
||||
SyncMessage::MissingGossipBlockComponentsDelayed(block_root) => {
|
||||
if self
|
||||
.block_lookups
|
||||
.trigger_lookup_by_root(block_root, &mut self.network)
|
||||
.is_err()
|
||||
{
|
||||
self.block_lookups
|
||||
.search_block(block_hash, peer_id, &mut self.network);
|
||||
// No request was made for block or blob so the lookup is dropped.
|
||||
self.block_lookups.remove_lookup_by_root(block_root);
|
||||
}
|
||||
}
|
||||
SyncMessage::UnknownBlobHash { .. } => {
|
||||
unimplemented!()
|
||||
}
|
||||
SyncMessage::Disconnect(peer_id) => {
|
||||
self.peer_disconnect(&peer_id);
|
||||
}
|
||||
@ -618,17 +694,17 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
request_id,
|
||||
error,
|
||||
} => self.inject_error(peer_id, request_id, error),
|
||||
SyncMessage::BlockProcessed {
|
||||
SyncMessage::BlockComponentProcessed {
|
||||
process_type,
|
||||
result,
|
||||
response_type,
|
||||
} => match process_type {
|
||||
BlockProcessType::SingleBlock { id } => {
|
||||
self.block_lookups
|
||||
.single_block_processed(id, result, &mut self.network)
|
||||
}
|
||||
BlockProcessType::SingleBlock { id } => self
|
||||
.block_lookups
|
||||
.single_block_component_processed(id, result, response_type, &mut self.network),
|
||||
BlockProcessType::ParentLookup { chain_hash } => self
|
||||
.block_lookups
|
||||
.parent_block_processed(chain_hash, result, &mut self.network),
|
||||
.parent_block_processed(chain_hash, result, response_type, &mut self.network),
|
||||
},
|
||||
SyncMessage::BatchProcessed { sync_type, result } => match sync_type {
|
||||
ChainSegmentProcessId::RangeBatchId(chain_id, epoch, _) => {
|
||||
@ -659,20 +735,97 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
.block_lookups
|
||||
.parent_chain_processed(chain_hash, result, &mut self.network),
|
||||
},
|
||||
SyncMessage::RpcBlob {
|
||||
request_id,
|
||||
peer_id,
|
||||
blob_sidecar,
|
||||
seen_timestamp,
|
||||
} => self.rpc_block_or_blob_received(
|
||||
request_id,
|
||||
peer_id,
|
||||
blob_sidecar.into(),
|
||||
seen_timestamp,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_unknown_parent(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
block_root: Hash256,
|
||||
parent_root: Hash256,
|
||||
slot: Slot,
|
||||
parent_components: Option<UnknownParentComponents<T::EthSpec>>,
|
||||
) {
|
||||
if self.should_search_for_block(slot, &peer_id) {
|
||||
self.block_lookups.search_parent(
|
||||
slot,
|
||||
block_root,
|
||||
parent_root,
|
||||
peer_id,
|
||||
&mut self.network,
|
||||
);
|
||||
if self.should_delay_lookup(slot) {
|
||||
self.block_lookups.search_child_delayed(
|
||||
block_root,
|
||||
parent_components,
|
||||
&[PeerShouldHave::Neither(peer_id)],
|
||||
);
|
||||
if let Err(e) = self
|
||||
.delayed_lookups
|
||||
.try_send(DelayedLookupMessage::MissingComponents(block_root))
|
||||
{
|
||||
warn!(self.log, "Delayed lookups dropped for block"; "block_root" => ?block_root, "error" => ?e);
|
||||
}
|
||||
} else {
|
||||
self.block_lookups.search_child_block(
|
||||
block_root,
|
||||
parent_components,
|
||||
&[PeerShouldHave::Neither(peer_id)],
|
||||
&mut self.network,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn should_delay_lookup(&mut self, slot: Slot) -> bool {
|
||||
let earliest_slot = self
|
||||
.chain
|
||||
.slot_clock
|
||||
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY);
|
||||
let latest_slot = self
|
||||
.chain
|
||||
.slot_clock
|
||||
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY);
|
||||
if let (Some(earliest_slot), Some(latest_slot)) = (earliest_slot, latest_slot) {
|
||||
let msg_for_current_slot = slot >= earliest_slot && slot <= latest_slot;
|
||||
let delay_threshold_unmet = self
|
||||
.chain
|
||||
.slot_clock
|
||||
.seconds_from_current_slot_start()
|
||||
.map_or(false, |secs_into_slot| {
|
||||
secs_into_slot < self.chain.slot_clock.single_lookup_delay()
|
||||
});
|
||||
msg_for_current_slot && delay_threshold_unmet
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn should_search_for_block(&mut self, block_slot: Slot, peer_id: &PeerId) -> bool {
|
||||
if !self.network_globals.sync_state.read().is_synced() {
|
||||
let head_slot = self.chain.canonical_head.cached_head().head_slot();
|
||||
|
||||
// if the block is far in the future, ignore it. If its within the slot tolerance of
|
||||
// our current head, regardless of the syncing state, fetch it.
|
||||
if (head_slot >= block_slot
|
||||
&& head_slot.sub(block_slot).as_usize() > SLOT_IMPORT_TOLERANCE)
|
||||
|| (head_slot < block_slot
|
||||
&& block_slot.sub(head_slot).as_usize() > SLOT_IMPORT_TOLERANCE)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
self.network_globals.peers.read().is_connected(peer_id)
|
||||
&& self.network.is_execution_engine_online()
|
||||
}
|
||||
|
||||
fn synced_and_connected(&mut self, peer_id: &PeerId) -> bool {
|
||||
self.network_globals.sync_state.read().is_synced()
|
||||
&& self.network_globals.peers.read().is_connected(peer_id)
|
||||
&& self.network.is_execution_engine_online()
|
||||
}
|
||||
|
||||
fn handle_new_execution_engine_state(&mut self, engine_state: EngineState) {
|
||||
self.network.update_execution_engine_state(engine_state);
|
||||
|
||||
@ -728,50 +881,30 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
}
|
||||
}
|
||||
|
||||
fn rpc_block_or_blob_received(
|
||||
fn rpc_block_received(
|
||||
&mut self,
|
||||
request_id: RequestId,
|
||||
peer_id: PeerId,
|
||||
block_or_blob: BlockOrBlob<T::EthSpec>,
|
||||
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||
seen_timestamp: Duration,
|
||||
) {
|
||||
match request_id {
|
||||
RequestId::SingleBlock { id } => {
|
||||
// TODO(diva) adjust when dealing with by root requests. This code is here to
|
||||
// satisfy dead code analysis
|
||||
match block_or_blob {
|
||||
BlockOrBlob::Block(maybe_block) => {
|
||||
self.block_lookups.single_block_lookup_response(
|
||||
id,
|
||||
peer_id,
|
||||
maybe_block.map(BlockWrapper::Block),
|
||||
seen_timestamp,
|
||||
&mut self.network,
|
||||
)
|
||||
}
|
||||
BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."),
|
||||
}
|
||||
}
|
||||
RequestId::ParentLookup { id } => {
|
||||
// TODO(diva) adjust when dealing with by root requests. This code is here to
|
||||
// satisfy dead code analysis
|
||||
match block_or_blob {
|
||||
BlockOrBlob::Block(maybe_block) => self.block_lookups.parent_lookup_response(
|
||||
id,
|
||||
peer_id,
|
||||
maybe_block.map(BlockWrapper::Block),
|
||||
seen_timestamp,
|
||||
&mut self.network,
|
||||
),
|
||||
BlockOrBlob::Sidecar(_) => unimplemented!("Mimatch between BlockWrapper and what the network receives needs to be handled first."),
|
||||
}
|
||||
}
|
||||
RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response(
|
||||
id,
|
||||
peer_id,
|
||||
block,
|
||||
seen_timestamp,
|
||||
&mut self.network,
|
||||
),
|
||||
RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response(
|
||||
id,
|
||||
peer_id,
|
||||
block,
|
||||
seen_timestamp,
|
||||
&mut self.network,
|
||||
),
|
||||
RequestId::BackFillBlocks { id } => {
|
||||
let maybe_block = match block_or_blob {
|
||||
BlockOrBlob::Block(maybe_block) => maybe_block,
|
||||
BlockOrBlob::Sidecar(_) => todo!("I think this is unreachable"),
|
||||
};
|
||||
let is_stream_terminator = maybe_block.is_none();
|
||||
let is_stream_terminator = block.is_none();
|
||||
if let Some(batch_id) = self
|
||||
.network
|
||||
.backfill_sync_only_blocks_response(id, is_stream_terminator)
|
||||
@ -781,7 +914,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
batch_id,
|
||||
&peer_id,
|
||||
id,
|
||||
maybe_block.map(|block| block.into()),
|
||||
block.map(BlockWrapper::Block),
|
||||
) {
|
||||
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
|
||||
Ok(ProcessResult::Successful) => {}
|
||||
@ -794,14 +927,10 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
}
|
||||
}
|
||||
RequestId::RangeBlocks { id } => {
|
||||
let maybe_block = match block_or_blob {
|
||||
BlockOrBlob::Block(maybe_block) => maybe_block,
|
||||
BlockOrBlob::Sidecar(_) => todo!("I think this should be unreachable, since this is a range only-blocks request, and the network should not accept this chunk at all. Needs better handling"),
|
||||
};
|
||||
let is_stream_terminator = maybe_block.is_none();
|
||||
let is_stream_terminator = block.is_none();
|
||||
if let Some((chain_id, batch_id)) = self
|
||||
.network
|
||||
.range_sync_block_response(id, is_stream_terminator)
|
||||
.range_sync_block_only_response(id, is_stream_terminator)
|
||||
{
|
||||
self.range_sync.blocks_by_range_response(
|
||||
&mut self.network,
|
||||
@ -809,17 +938,53 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
chain_id,
|
||||
batch_id,
|
||||
id,
|
||||
maybe_block.map(|block| block.into()),
|
||||
block.map(BlockWrapper::Block),
|
||||
);
|
||||
self.update_sync_state();
|
||||
}
|
||||
}
|
||||
|
||||
RequestId::BackFillBlobs { id } => {
|
||||
self.backfill_block_and_blobs_response(id, peer_id, block_or_blob)
|
||||
RequestId::BackFillBlockAndBlobs { id } => {
|
||||
self.backfill_block_and_blobs_response(id, peer_id, block.into())
|
||||
}
|
||||
RequestId::RangeBlobs { id } => {
|
||||
self.range_block_and_blobs_response(id, peer_id, block_or_blob)
|
||||
RequestId::RangeBlockAndBlobs { id } => {
|
||||
self.range_block_and_blobs_response(id, peer_id, block.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn rpc_blob_received(
|
||||
&mut self,
|
||||
request_id: RequestId,
|
||||
peer_id: PeerId,
|
||||
blob: Option<Arc<BlobSidecar<T::EthSpec>>>,
|
||||
seen_timestamp: Duration,
|
||||
) {
|
||||
match request_id {
|
||||
RequestId::SingleBlock { id } => self.block_lookups.single_blob_lookup_response(
|
||||
id,
|
||||
peer_id,
|
||||
blob,
|
||||
seen_timestamp,
|
||||
&mut self.network,
|
||||
),
|
||||
RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_blob_response(
|
||||
id,
|
||||
peer_id,
|
||||
blob,
|
||||
seen_timestamp,
|
||||
&mut self.network,
|
||||
),
|
||||
RequestId::BackFillBlocks { id: _ } => {
|
||||
crit!(self.log, "Blob received during backfill block request"; "peer_id" => %peer_id );
|
||||
}
|
||||
RequestId::RangeBlocks { id: _ } => {
|
||||
crit!(self.log, "Blob received during range block request"; "peer_id" => %peer_id );
|
||||
}
|
||||
RequestId::BackFillBlockAndBlobs { id } => {
|
||||
self.backfill_block_and_blobs_response(id, peer_id, blob.into())
|
||||
}
|
||||
RequestId::RangeBlockAndBlobs { id } => {
|
||||
self.range_block_and_blobs_response(id, peer_id, blob.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -863,7 +1028,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
|
||||
);
|
||||
// TODO: penalize the peer for being a bad boy
|
||||
let id = RequestId::RangeBlobs { id };
|
||||
let id = RequestId::RangeBlockAndBlobs { id };
|
||||
self.inject_error(peer_id, id, RPCError::InvalidData(e.into()))
|
||||
}
|
||||
}
|
||||
@ -915,7 +1080,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
|
||||
);
|
||||
// TODO: penalize the peer for being a bad boy
|
||||
let id = RequestId::BackFillBlobs { id };
|
||||
let id = RequestId::BackFillBlockAndBlobs { id };
|
||||
self.inject_error(peer_id, id, RPCError::InvalidData(e.into()))
|
||||
}
|
||||
}
|
||||
@ -923,17 +1088,19 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<IgnoredOkVal, T: EthSpec> From<Result<IgnoredOkVal, BlockError<T>>> for BlockProcessResult<T> {
|
||||
fn from(result: Result<IgnoredOkVal, BlockError<T>>) -> Self {
|
||||
impl<T: EthSpec> From<Result<AvailabilityProcessingStatus, BlockError<T>>>
|
||||
for BlockProcessingResult<T>
|
||||
{
|
||||
fn from(result: Result<AvailabilityProcessingStatus, BlockError<T>>) -> Self {
|
||||
match result {
|
||||
Ok(_) => BlockProcessResult::Ok,
|
||||
Err(e) => e.into(),
|
||||
Ok(status) => BlockProcessingResult::Ok(status),
|
||||
Err(e) => BlockProcessingResult::Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<BlockError<T>> for BlockProcessResult<T> {
|
||||
impl<T: EthSpec> From<BlockError<T>> for BlockProcessingResult<T> {
|
||||
fn from(e: BlockError<T>) -> Self {
|
||||
BlockProcessResult::Err(e)
|
||||
BlockProcessingResult::Err(e)
|
||||
}
|
||||
}
|
||||
|
@ -9,5 +9,6 @@ mod network_context;
|
||||
mod peer_sync_info;
|
||||
mod range_sync;
|
||||
|
||||
pub use block_lookups::UnknownParentComponents;
|
||||
pub use manager::{BatchProcessResult, SyncMessage};
|
||||
pub use range_sync::{BatchOperationOutcome, ChainId};
|
||||
|
@ -7,11 +7,11 @@ use super::range_sync::{BatchId, ByRangeRequestType, ChainId};
|
||||
use crate::beacon_processor::WorkEvent;
|
||||
use crate::service::{NetworkMessage, RequestId};
|
||||
use crate::status::ToStatusMessage;
|
||||
use crate::sync::block_lookups::ForceBlockRequest;
|
||||
use crate::sync::block_lookups::{BlobRequestId, BlockRequestId};
|
||||
use beacon_chain::blob_verification::BlockWrapper;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState};
|
||||
use fnv::FnvHashMap;
|
||||
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
|
||||
use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
|
||||
use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason};
|
||||
use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request};
|
||||
use slog::{debug, trace, warn};
|
||||
@ -62,7 +62,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
|
||||
/// Channel to send work to the beacon processor.
|
||||
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
|
||||
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
pub chain: Arc<BeaconChain<T>>,
|
||||
|
||||
/// Logger for the `SyncNetworkContext`.
|
||||
log: slog::Logger,
|
||||
@ -71,7 +71,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
|
||||
/// Small enumeration to make dealing with block and blob requests easier.
|
||||
pub enum BlockOrBlob<T: EthSpec> {
|
||||
Block(Option<Arc<SignedBeaconBlock<T>>>),
|
||||
Sidecar(Option<Arc<BlobSidecar<T>>>),
|
||||
Blob(Option<Arc<BlobSidecar<T>>>),
|
||||
}
|
||||
|
||||
impl<T: EthSpec> From<Option<Arc<SignedBeaconBlock<T>>>> for BlockOrBlob<T> {
|
||||
@ -82,7 +82,7 @@ impl<T: EthSpec> From<Option<Arc<SignedBeaconBlock<T>>>> for BlockOrBlob<T> {
|
||||
|
||||
impl<T: EthSpec> From<Option<Arc<BlobSidecar<T>>>> for BlockOrBlob<T> {
|
||||
fn from(blob: Option<Arc<BlobSidecar<T>>>) -> Self {
|
||||
BlockOrBlob::Sidecar(blob)
|
||||
BlockOrBlob::Blob(blob)
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,7 +187,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
|
||||
// create the shared request id. This is fine since the rpc handles substream ids.
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::RangeBlobs { id });
|
||||
let request_id = RequestId::Sync(SyncRequestId::RangeBlockAndBlobs { id });
|
||||
|
||||
// Create the blob request based on the blob request.
|
||||
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||
@ -260,7 +260,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
|
||||
// create the shared request id. This is fine since the rpc handles substream ids.
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::BackFillBlobs { id });
|
||||
let request_id = RequestId::Sync(SyncRequestId::BackFillBlockAndBlobs { id });
|
||||
|
||||
// Create the blob request based on the blob request.
|
||||
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||
@ -289,7 +289,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
}
|
||||
|
||||
/// Response for a request that is only for blocks.
|
||||
pub fn range_sync_block_response(
|
||||
pub fn range_sync_block_only_response(
|
||||
&mut self,
|
||||
request_id: Id,
|
||||
is_stream_terminator: bool,
|
||||
@ -313,7 +313,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
let info = &mut req.block_blob_info;
|
||||
match block_or_blob {
|
||||
BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block),
|
||||
BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
|
||||
BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
|
||||
}
|
||||
if info.is_finished() {
|
||||
// If the request is finished, dequeue everything
|
||||
@ -390,7 +390,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
let (_, info) = entry.get_mut();
|
||||
match block_or_blob {
|
||||
BlockOrBlob::Block(maybe_block) => info.add_block_response(maybe_block),
|
||||
BlockOrBlob::Sidecar(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
|
||||
BlockOrBlob::Blob(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
|
||||
}
|
||||
if info.is_finished() {
|
||||
// If the request is finished, dequeue everything
|
||||
@ -409,83 +409,101 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a blocks by root request for a single block lookup.
|
||||
/// Sends a blocks by root request for a parent request.
|
||||
pub fn single_block_lookup_request(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request: BlocksByRootRequest,
|
||||
) -> Result<Id, &'static str> {
|
||||
let request = if self
|
||||
.chain
|
||||
.is_data_availability_check_required()
|
||||
.map_err(|_| "Unable to read slot clock")?
|
||||
{
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlobsByRoot Request";
|
||||
"method" => "BlobsByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
unimplemented!("There is no longer such thing as a single block lookup, since we nede to ask for blobs and blocks separetely");
|
||||
} else {
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
Request::BlocksByRoot(request)
|
||||
};
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
|
||||
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
|
||||
self.send_network_msg(NetworkMessage::SendRequest {
|
||||
peer_id,
|
||||
request,
|
||||
request: Request::BlocksByRoot(request),
|
||||
request_id,
|
||||
})?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
/// Sends a blobs by root request for a parent request.
|
||||
pub fn single_blobs_lookup_request(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request: BlobsByRootRequest,
|
||||
) -> Result<Id, &'static str> {
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
|
||||
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlobsByRoot Request";
|
||||
"method" => "BlobsByRoot",
|
||||
"count" => request.blob_ids.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
|
||||
self.send_network_msg(NetworkMessage::SendRequest {
|
||||
peer_id,
|
||||
request: Request::BlobsByRoot(request),
|
||||
request_id,
|
||||
})?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
/// Sends a blocks by root request for a parent request.
|
||||
pub fn parent_lookup_request(
|
||||
pub fn parent_lookup_block_request(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request: BlocksByRootRequest,
|
||||
force_block_request: ForceBlockRequest,
|
||||
) -> Result<Id, &'static str> {
|
||||
let request = if self
|
||||
.chain
|
||||
.is_data_availability_check_required()
|
||||
.map_err(|_| "Unable to read slot clock")?
|
||||
&& matches!(force_block_request, ForceBlockRequest::False)
|
||||
{
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlobsByRoot Request";
|
||||
"method" => "BlobsByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
unimplemented!(
|
||||
"Parent requests now need to interleave blocks and blobs or something like that."
|
||||
)
|
||||
} else {
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
Request::BlocksByRoot(request)
|
||||
};
|
||||
) -> Result<BlockRequestId, &'static str> {
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
|
||||
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending parent BlocksByRoot Request";
|
||||
"method" => "BlocksByRoot",
|
||||
"count" => request.block_roots.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
|
||||
self.send_network_msg(NetworkMessage::SendRequest {
|
||||
peer_id,
|
||||
request,
|
||||
request: Request::BlocksByRoot(request),
|
||||
request_id,
|
||||
})?;
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
/// Sends a blocks by root request for a parent request.
|
||||
pub fn parent_lookup_blobs_request(
|
||||
&mut self,
|
||||
peer_id: PeerId,
|
||||
request: BlobsByRootRequest,
|
||||
) -> Result<BlobRequestId, &'static str> {
|
||||
let id = self.next_id();
|
||||
let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
|
||||
|
||||
trace!(
|
||||
self.log,
|
||||
"Sending parent BlobsByRoot Request";
|
||||
"method" => "BlobsByRoot",
|
||||
"count" => request.blob_ids.len(),
|
||||
"peer" => %peer_id
|
||||
);
|
||||
|
||||
self.send_network_msg(NetworkMessage::SendRequest {
|
||||
peer_id,
|
||||
request: Request::BlobsByRoot(request),
|
||||
request_id,
|
||||
})?;
|
||||
Ok(id)
|
||||
|
@ -685,7 +685,7 @@ mod tests {
|
||||
range.add_peer(&mut rig.cx, local_info, peer1, head_info);
|
||||
let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
|
||||
(rig.cx.range_sync_block_response(id, true).unwrap(), id)
|
||||
(rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
};
|
||||
@ -704,7 +704,7 @@ mod tests {
|
||||
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
|
||||
let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 {
|
||||
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
|
||||
(rig.cx.range_sync_block_response(id, true).unwrap(), id)
|
||||
(rig.cx.range_sync_block_only_response(id, true).unwrap(), id)
|
||||
}
|
||||
other => panic!("unexpected request {:?}", other),
|
||||
};
|
||||
|
@ -1952,7 +1952,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
&& last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune
|
||||
> end_epoch.as_u64()
|
||||
{
|
||||
info!(self.log, "Blobs sidecars are pruned");
|
||||
debug!(self.log, "Blobs sidecars are pruned");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
@ -137,4 +137,13 @@ pub trait SlotClock: Send + Sync + Sized + Clone {
|
||||
slot_clock.set_current_time(freeze_at);
|
||||
slot_clock
|
||||
}
|
||||
|
||||
/// Returns the delay between the start of the slot and when a request for block components
|
||||
/// missed over gossip in the current slot should be made via RPC.
|
||||
///
|
||||
/// Currently set equal to 1/2 of the `unagg_attestation_production_delay`, but this may be
|
||||
/// changed in the future.
|
||||
fn single_lookup_delay(&self) -> Duration {
|
||||
self.unagg_attestation_production_delay() / 2
|
||||
}
|
||||
}
|
||||
|
@ -352,7 +352,7 @@ where
|
||||
spec: &ChainSpec,
|
||||
) -> Result<Self, Error<T::Error>> {
|
||||
// Sanity check: the anchor must lie on an epoch boundary.
|
||||
if anchor_block.slot() % E::slots_per_epoch() != 0 {
|
||||
if anchor_state.slot() % E::slots_per_epoch() != 0 {
|
||||
return Err(Error::InvalidAnchor {
|
||||
block_slot: anchor_block.slot(),
|
||||
state_slot: anchor_state.slot(),
|
||||
@ -388,6 +388,7 @@ where
|
||||
let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot());
|
||||
|
||||
let proto_array = ProtoArrayForkChoice::new::<E>(
|
||||
current_slot,
|
||||
finalized_block_slot,
|
||||
finalized_block_state_root,
|
||||
*fc_store.justified_checkpoint(),
|
||||
|
@ -80,6 +80,7 @@ impl ForkChoiceTestDefinition {
|
||||
let junk_shuffling_id =
|
||||
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
|
||||
let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||
self.finalized_block_slot,
|
||||
self.finalized_block_slot,
|
||||
Hash256::zero(),
|
||||
self.justified_checkpoint,
|
||||
|
@ -345,6 +345,7 @@ pub struct ProtoArrayForkChoice {
|
||||
impl ProtoArrayForkChoice {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new<E: EthSpec>(
|
||||
current_slot: Slot,
|
||||
finalized_block_slot: Slot,
|
||||
finalized_block_state_root: Hash256,
|
||||
justified_checkpoint: Checkpoint,
|
||||
@ -380,7 +381,7 @@ impl ProtoArrayForkChoice {
|
||||
};
|
||||
|
||||
proto_array
|
||||
.on_block::<E>(block, finalized_block_slot)
|
||||
.on_block::<E>(block, current_slot)
|
||||
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
|
||||
|
||||
Ok(Self {
|
||||
@ -983,6 +984,7 @@ mod test_compute_deltas {
|
||||
};
|
||||
|
||||
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||
genesis_slot,
|
||||
genesis_slot,
|
||||
state_root,
|
||||
genesis_checkpoint,
|
||||
@ -1108,6 +1110,7 @@ mod test_compute_deltas {
|
||||
};
|
||||
|
||||
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||
genesis_slot,
|
||||
genesis_slot,
|
||||
junk_state_root,
|
||||
genesis_checkpoint,
|
||||
|
@ -23,7 +23,7 @@ impl From<ArithError> for Error {
|
||||
///
|
||||
/// If the root of the supplied `state` is known, then it can be passed as `state_root`. If
|
||||
/// `state_root` is `None`, the root of `state` will be computed using a cached tree hash.
|
||||
/// Providing the `state_root` makes this function several orders of magniude faster.
|
||||
/// Providing the `state_root` makes this function several orders of magnitude faster.
|
||||
pub fn per_slot_processing<T: EthSpec>(
|
||||
state: &mut BeaconState<T>,
|
||||
state_root: Option<Hash256>,
|
||||
|
@ -6,13 +6,15 @@ use kzg::{KzgCommitment, KzgProof};
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz::Encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::VariableList;
|
||||
use ssz_types::{FixedVector, VariableList};
|
||||
use std::sync::Arc;
|
||||
use test_random_derive::TestRandom;
|
||||
use tree_hash_derive::TreeHash;
|
||||
|
||||
/// Container of the data that identifies an individual blob.
|
||||
#[derive(Serialize, Deserialize, Encode, Decode, TreeHash, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash,
|
||||
)]
|
||||
pub struct BlobIdentifier {
|
||||
pub block_root: Hash256,
|
||||
pub index: u64,
|
||||
@ -73,6 +75,8 @@ impl<T: EthSpec> Ord for BlobSidecar<T> {
|
||||
}
|
||||
|
||||
pub type BlobSidecarList<T> = VariableList<Arc<BlobSidecar<T>>, <T as EthSpec>::MaxBlobsPerBlock>;
|
||||
pub type FixedBlobSidecarList<T> =
|
||||
FixedVector<Option<Arc<BlobSidecar<T>>>, <T as EthSpec>::MaxBlobsPerBlock>;
|
||||
pub type Blobs<T> = VariableList<Blob<T>, <T as EthSpec>::MaxBlobsPerBlock>;
|
||||
|
||||
impl<T: EthSpec> SignedRoot for BlobSidecar<T> {}
|
||||
|
@ -105,7 +105,7 @@ pub trait EthSpec:
|
||||
/*
|
||||
* New in Deneb
|
||||
*/
|
||||
type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin;
|
||||
type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||
/*
|
||||
@ -255,6 +255,11 @@ pub trait EthSpec:
|
||||
fn max_blobs_per_block() -> usize {
|
||||
Self::MaxBlobsPerBlock::to_usize()
|
||||
}
|
||||
|
||||
/// Returns the `BYTES_PER_BLOB` constant for this specification.
|
||||
fn bytes_per_blob() -> usize {
|
||||
Self::BytesPerBlob::to_usize()
|
||||
}
|
||||
}
|
||||
|
||||
/// Macro to inherit some type values from another EthSpec.
|
||||
|
@ -1,3 +1,4 @@
|
||||
use crate::blob_sidecar::BlobIdentifier;
|
||||
use crate::*;
|
||||
use bls::Signature;
|
||||
use derivative::Derivative;
|
||||
@ -248,6 +249,38 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
|
||||
pub fn canonical_root(&self) -> Hash256 {
|
||||
self.message().tree_hash_root()
|
||||
}
|
||||
|
||||
pub fn num_expected_blobs(&self) -> usize {
|
||||
self.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.map(|c| c.len())
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
pub fn get_expected_blob_ids(&self, block_root: Option<Hash256>) -> Vec<BlobIdentifier> {
|
||||
self.get_filtered_blob_ids(block_root, |_, _| true)
|
||||
}
|
||||
|
||||
/// If the filter returns `true` the id for the corresponding index and root will be included.
|
||||
pub fn get_filtered_blob_ids(
|
||||
&self,
|
||||
block_root: Option<Hash256>,
|
||||
filter: impl Fn(usize, Hash256) -> bool,
|
||||
) -> Vec<BlobIdentifier> {
|
||||
let block_root = block_root.unwrap_or_else(|| self.canonical_root());
|
||||
let num_blobs_expected = self.num_expected_blobs();
|
||||
let mut blob_ids = Vec::with_capacity(num_blobs_expected);
|
||||
for i in 0..num_blobs_expected {
|
||||
if filter(i, block_root) {
|
||||
blob_ids.push(BlobIdentifier {
|
||||
block_root,
|
||||
index: i as u64,
|
||||
});
|
||||
}
|
||||
}
|
||||
blob_ids
|
||||
}
|
||||
}
|
||||
|
||||
// We can convert pre-Bellatrix blocks without payloads into blocks with payloads.
|
||||
|
@ -4,8 +4,21 @@ use smallvec::smallvec;
|
||||
|
||||
impl<N: Unsigned + Clone> TestRandom for BitList<N> {
|
||||
fn random_for_test(rng: &mut impl RngCore) -> Self {
|
||||
let mut raw_bytes = smallvec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)];
|
||||
let initial_len = std::cmp::max(1, (N::to_usize() + 7) / 8);
|
||||
let mut raw_bytes = smallvec![0; initial_len];
|
||||
rng.fill_bytes(&mut raw_bytes);
|
||||
|
||||
let non_zero_bytes = raw_bytes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.rev()
|
||||
.find_map(|(i, byte)| (*byte > 0).then_some(i + 1))
|
||||
.unwrap_or(0);
|
||||
|
||||
if non_zero_bytes < initial_len {
|
||||
raw_bytes.truncate(non_zero_bytes);
|
||||
}
|
||||
|
||||
Self::from_bytes(raw_bytes).expect("we generate a valid BitList")
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user