Merge pull request #3751 from realbigsean/eip4844-devnet-v3
Eip4844 devnet v3
This commit is contained in:
commit
254cad369e
35
Cargo.lock
generated
35
Cargo.lock
generated
@ -412,6 +412,7 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"int_to_bytes",
|
"int_to_bytes",
|
||||||
"itertools",
|
"itertools",
|
||||||
|
"kzg",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"lighthouse_metrics",
|
"lighthouse_metrics",
|
||||||
"logging",
|
"logging",
|
||||||
@ -717,6 +718,15 @@ dependencies = [
|
|||||||
"pkg-config",
|
"pkg-config",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "c-kzg"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "git+https://github.com/pawanjay176/c-kzg-4844?rev=669a13800a8a0d094c5387db58e06936ef194a25#669a13800a8a0d094c5387db58e06936ef194a25"
|
||||||
|
dependencies = [
|
||||||
|
"hex",
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cached_tree_hash"
|
name = "cached_tree_hash"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -3170,6 +3180,24 @@ version = "0.1.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838"
|
checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "kzg"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"c-kzg",
|
||||||
|
"derivative",
|
||||||
|
"eth2_hashing",
|
||||||
|
"eth2_serde_utils",
|
||||||
|
"eth2_ssz",
|
||||||
|
"eth2_ssz_derive",
|
||||||
|
"ethereum-types 0.12.1",
|
||||||
|
"hex",
|
||||||
|
"rand 0.7.3",
|
||||||
|
"serde",
|
||||||
|
"serde_derive",
|
||||||
|
"tree_hash",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lazy_static"
|
name = "lazy_static"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
@ -3200,6 +3228,7 @@ dependencies = [
|
|||||||
"environment",
|
"environment",
|
||||||
"eth1_test_rig",
|
"eth1_test_rig",
|
||||||
"eth2",
|
"eth2",
|
||||||
|
"eth2_hashing",
|
||||||
"eth2_network_config",
|
"eth2_network_config",
|
||||||
"eth2_ssz",
|
"eth2_ssz",
|
||||||
"eth2_wallet",
|
"eth2_wallet",
|
||||||
@ -5701,12 +5730,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde-big-array"
|
name = "serde-big-array"
|
||||||
version = "0.3.2"
|
version = "0.3.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62"
|
checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"serde_derive",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -7123,6 +7151,7 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"int_to_bytes",
|
"int_to_bytes",
|
||||||
"itertools",
|
"itertools",
|
||||||
|
"kzg",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"log",
|
"log",
|
||||||
"maplit",
|
"maplit",
|
||||||
|
@ -63,10 +63,12 @@ members = [
|
|||||||
"consensus/tree_hash_derive",
|
"consensus/tree_hash_derive",
|
||||||
|
|
||||||
"crypto/bls",
|
"crypto/bls",
|
||||||
|
"crypto/kzg",
|
||||||
"crypto/eth2_hashing",
|
"crypto/eth2_hashing",
|
||||||
"crypto/eth2_key_derivation",
|
"crypto/eth2_key_derivation",
|
||||||
"crypto/eth2_keystore",
|
"crypto/eth2_keystore",
|
||||||
"crypto/eth2_wallet",
|
"crypto/eth2_wallet",
|
||||||
|
"crypto/kzg",
|
||||||
|
|
||||||
"lcli",
|
"lcli",
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
FROM rust:1.62.1-bullseye AS builder
|
FROM rust:1.62.1-bullseye AS builder
|
||||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
|
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
||||||
COPY . lighthouse
|
COPY . lighthouse
|
||||||
ARG FEATURES
|
ARG FEATURES
|
||||||
ENV FEATURES $FEATURES
|
ENV FEATURES $FEATURES
|
||||||
|
4
Makefile
4
Makefile
@ -21,7 +21,7 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx
|
|||||||
CROSS_PROFILE ?= release
|
CROSS_PROFILE ?= release
|
||||||
|
|
||||||
# List of features to use when running EF tests.
|
# List of features to use when running EF tests.
|
||||||
EF_TEST_FEATURES ?= beacon_chain/withdrawals,beacon_chain/withdrawals-processing
|
EF_TEST_FEATURES ?= withdrawals,withdrawals-processing
|
||||||
|
|
||||||
# Cargo profile for regular builds.
|
# Cargo profile for regular builds.
|
||||||
PROFILE ?= release
|
PROFILE ?= release
|
||||||
@ -38,7 +38,7 @@ install:
|
|||||||
|
|
||||||
# Builds the lcli binary in release (optimized).
|
# Builds the lcli binary in release (optimized).
|
||||||
install-lcli:
|
install-lcli:
|
||||||
cargo install --path lcli --force --locked --features "$(FEATURES)" --profile "$(PROFILE)"
|
cargo install --path lcli --force --locked --features "$(FEATURES),$(EF_TEST_FEATURES)" --profile "$(PROFILE)"
|
||||||
|
|
||||||
# The following commands use `cross` to build a cross-compile.
|
# The following commands use `cross` to build a cross-compile.
|
||||||
#
|
#
|
||||||
|
@ -55,6 +55,7 @@ lru = "0.7.1"
|
|||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
bitvec = "0.20.4"
|
bitvec = "0.20.4"
|
||||||
bls = { path = "../../crypto/bls" }
|
bls = { path = "../../crypto/bls" }
|
||||||
|
kzg = { path = "../../crypto/kzg" }
|
||||||
safe_arith = { path = "../../consensus/safe_arith" }
|
safe_arith = { path = "../../consensus/safe_arith" }
|
||||||
fork_choice = { path = "../../consensus/fork_choice" }
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
|
@ -6,7 +6,7 @@ use crate::attestation_verification::{
|
|||||||
use crate::attester_cache::{AttesterCache, AttesterCacheKey};
|
use crate::attester_cache::{AttesterCache, AttesterCacheKey};
|
||||||
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
|
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
|
||||||
use crate::beacon_proposer_cache::BeaconProposerCache;
|
use crate::beacon_proposer_cache::BeaconProposerCache;
|
||||||
use crate::blob_verification::{BlobError, VerifiedBlobsSidecar};
|
use crate::blob_cache::BlobCache;
|
||||||
use crate::block_times_cache::BlockTimesCache;
|
use crate::block_times_cache::BlockTimesCache;
|
||||||
use crate::block_verification::{
|
use crate::block_verification::{
|
||||||
check_block_is_finalized_descendant, check_block_relevancy, get_block_root,
|
check_block_is_finalized_descendant, check_block_relevancy, get_block_root,
|
||||||
@ -52,13 +52,13 @@ use crate::validator_monitor::{
|
|||||||
HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS,
|
HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS,
|
||||||
};
|
};
|
||||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||||
use crate::BeaconForkChoiceStore;
|
|
||||||
use crate::BeaconSnapshot;
|
use crate::BeaconSnapshot;
|
||||||
|
use crate::{kzg_utils, BeaconForkChoiceStore};
|
||||||
use crate::{metrics, BeaconChainError};
|
use crate::{metrics, BeaconChainError};
|
||||||
use eth2::types::{EventKind, SseBlock, SyncDuty};
|
use eth2::types::{EventKind, SseBlock, SyncDuty};
|
||||||
use execution_layer::{
|
use execution_layer::{
|
||||||
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
|
BlockProposalContents, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition,
|
||||||
PayloadAttributes, PayloadStatus,
|
PayloadAttributes, PayloadAttributesV2, PayloadStatus,
|
||||||
};
|
};
|
||||||
pub use fork_choice::CountUnrealized;
|
pub use fork_choice::CountUnrealized;
|
||||||
use fork_choice::{
|
use fork_choice::{
|
||||||
@ -103,12 +103,14 @@ use store::{
|
|||||||
use task_executor::{ShutdownReason, TaskExecutor};
|
use task_executor::{ShutdownReason, TaskExecutor};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::beacon_state::CloneConfig;
|
use types::beacon_state::CloneConfig;
|
||||||
|
use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS;
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
|
pub type ForkChoiceError = fork_choice::Error<crate::ForkChoiceStoreError>;
|
||||||
|
|
||||||
/// Alias to appease clippy.
|
/// Alias to appease clippy.
|
||||||
type HashBlockTuple<E> = (Hash256, Arc<SignedBeaconBlock<E>>);
|
type HashBlockTuple<E> = (Hash256, BlockWrapper<E>);
|
||||||
|
|
||||||
/// The time-out before failure during an operation to take a read/write RwLock on the block
|
/// The time-out before failure during an operation to take a read/write RwLock on the block
|
||||||
/// processing cache.
|
/// processing cache.
|
||||||
@ -397,6 +399,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
pub slasher: Option<Arc<Slasher<T::EthSpec>>>,
|
pub slasher: Option<Arc<Slasher<T::EthSpec>>>,
|
||||||
/// Provides monitoring of a set of explicitly defined validators.
|
/// Provides monitoring of a set of explicitly defined validators.
|
||||||
pub validator_monitor: RwLock<ValidatorMonitor<T::EthSpec>>,
|
pub validator_monitor: RwLock<ValidatorMonitor<T::EthSpec>>,
|
||||||
|
pub blob_cache: BlobCache<T::EthSpec>,
|
||||||
|
pub kzg: Option<Arc<kzg::Kzg>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>);
|
type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>);
|
||||||
@ -923,6 +927,28 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
Ok(self.get_block(block_root).await?.map(Arc::new))
|
Ok(self.get_block(block_root).await?.map(Arc::new))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn get_block_and_blobs_checking_early_attester_cache(
|
||||||
|
&self,
|
||||||
|
block_root: &Hash256,
|
||||||
|
) -> Result<
|
||||||
|
(
|
||||||
|
Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
|
Option<Arc<BlobsSidecar<T::EthSpec>>>,
|
||||||
|
),
|
||||||
|
Error,
|
||||||
|
> {
|
||||||
|
if let (Some(block), Some(blobs)) = (
|
||||||
|
self.early_attester_cache.get_block(*block_root),
|
||||||
|
self.early_attester_cache.get_blobs(*block_root),
|
||||||
|
) {
|
||||||
|
return Ok((Some(block), Some(blobs)));
|
||||||
|
}
|
||||||
|
Ok((
|
||||||
|
self.get_block(block_root).await?.map(Arc::new),
|
||||||
|
self.get_blobs(block_root).await?.map(Arc::new),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the block at the given root, if any.
|
/// Returns the block at the given root, if any.
|
||||||
///
|
///
|
||||||
/// ## Errors
|
/// ## Errors
|
||||||
@ -989,6 +1015,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.map(Some)
|
.map(Some)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the blobs at the given root, if any.
|
||||||
|
///
|
||||||
|
/// ## Errors
|
||||||
|
///
|
||||||
|
/// May return a database error.
|
||||||
|
pub async fn get_blobs(
|
||||||
|
&self,
|
||||||
|
block_root: &Hash256,
|
||||||
|
) -> Result<Option<BlobsSidecar<T::EthSpec>>, Error> {
|
||||||
|
Ok(self.store.get_blobs(block_root)?)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_blinded_block(
|
pub fn get_blinded_block(
|
||||||
&self,
|
&self,
|
||||||
block_root: &Hash256,
|
block_root: &Hash256,
|
||||||
@ -1791,23 +1829,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Accepts some `BlobsSidecar` received over from the network and attempts to verify it,
|
|
||||||
/// returning `Ok(_)` if it is valid to be (re)broadcast on the gossip network.
|
|
||||||
pub fn verify_blobs_sidecar_for_gossip<'a>(
|
|
||||||
&self,
|
|
||||||
blobs_sidecar: &'a BlobsSidecar<T::EthSpec>,
|
|
||||||
) -> Result<VerifiedBlobsSidecar<'a, T>, BlobError> {
|
|
||||||
metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_REQUESTS);
|
|
||||||
let _timer = metrics::start_timer(&metrics::BLOBS_SIDECAR_GOSSIP_VERIFICATION_TIMES);
|
|
||||||
VerifiedBlobsSidecar::verify(blobs_sidecar, self).map(|v| {
|
|
||||||
if let Some(_event_handler) = self.event_handler.as_ref() {
|
|
||||||
// TODO: Handle sse events
|
|
||||||
}
|
|
||||||
metrics::inc_counter(&metrics::BLOBS_SIDECAR_PROCESSING_SUCCESSES);
|
|
||||||
v
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accepts some attestation-type object and attempts to verify it in the context of fork
|
/// Accepts some attestation-type object and attempts to verify it in the context of fork
|
||||||
/// choice. If it is valid it is applied to `self.fork_choice`.
|
/// choice. If it is valid it is applied to `self.fork_choice`.
|
||||||
///
|
///
|
||||||
@ -2258,7 +2279,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// This method is potentially long-running and should not run on the core executor.
|
/// This method is potentially long-running and should not run on the core executor.
|
||||||
pub fn filter_chain_segment(
|
pub fn filter_chain_segment(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
chain_segment: Vec<BlockWrapper<T::EthSpec>>,
|
||||||
) -> Result<Vec<HashBlockTuple<T::EthSpec>>, ChainSegmentResult<T::EthSpec>> {
|
) -> Result<Vec<HashBlockTuple<T::EthSpec>>, ChainSegmentResult<T::EthSpec>> {
|
||||||
// This function will never import any blocks.
|
// This function will never import any blocks.
|
||||||
let imported_blocks = 0;
|
let imported_blocks = 0;
|
||||||
@ -2270,19 +2291,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let children = chain_segment
|
let children = chain_segment
|
||||||
.iter()
|
.iter()
|
||||||
.skip(1)
|
.skip(1)
|
||||||
.map(|block| (block.parent_root(), block.slot()))
|
.map(|block| (block.block().parent_root(), block.slot()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
for (i, block) in chain_segment.into_iter().enumerate() {
|
for (i, block) in chain_segment.into_iter().enumerate() {
|
||||||
// Ensure the block is the correct structure for the fork at `block.slot()`.
|
// Ensure the block is the correct structure for the fork at `block.slot()`.
|
||||||
if let Err(e) = block.fork_name(&self.spec) {
|
if let Err(e) = block.block().fork_name(&self.spec) {
|
||||||
return Err(ChainSegmentResult::Failed {
|
return Err(ChainSegmentResult::Failed {
|
||||||
imported_blocks,
|
imported_blocks,
|
||||||
error: BlockError::InconsistentFork(e),
|
error: BlockError::InconsistentFork(e),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let block_root = get_block_root(&block);
|
let block_root = get_block_root(block.block());
|
||||||
|
|
||||||
if let Some((child_parent_root, child_slot)) = children.get(i) {
|
if let Some((child_parent_root, child_slot)) = children.get(i) {
|
||||||
// If this block has a child in this chain segment, ensure that its parent root matches
|
// If this block has a child in this chain segment, ensure that its parent root matches
|
||||||
@ -2306,7 +2327,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match check_block_relevancy(&block, block_root, self) {
|
match check_block_relevancy(block.block(), block_root, self) {
|
||||||
// If the block is relevant, add it to the filtered chain segment.
|
// If the block is relevant, add it to the filtered chain segment.
|
||||||
Ok(_) => filtered_chain_segment.push((block_root, block)),
|
Ok(_) => filtered_chain_segment.push((block_root, block)),
|
||||||
// If the block is already known, simply ignore this block.
|
// If the block is already known, simply ignore this block.
|
||||||
@ -2364,7 +2385,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// `Self::process_block`.
|
/// `Self::process_block`.
|
||||||
pub async fn process_chain_segment(
|
pub async fn process_chain_segment(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
chain_segment: Vec<BlockWrapper<T::EthSpec>>,
|
||||||
count_unrealized: CountUnrealized,
|
count_unrealized: CountUnrealized,
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> ChainSegmentResult<T::EthSpec> {
|
) -> ChainSegmentResult<T::EthSpec> {
|
||||||
@ -2465,7 +2486,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
/// Returns an `Err` if the given block was invalid, or an error was encountered during
|
/// Returns an `Err` if the given block was invalid, or an error was encountered during
|
||||||
pub async fn verify_block_for_gossip(
|
pub async fn verify_block_for_gossip(
|
||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||||
let chain = self.clone();
|
let chain = self.clone();
|
||||||
self.task_executor
|
self.task_executor
|
||||||
@ -2532,8 +2553,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
// Increment the Prometheus counter for block processing requests.
|
// Increment the Prometheus counter for block processing requests.
|
||||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS);
|
metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS);
|
||||||
|
|
||||||
// Clone the block so we can provide it to the event handler.
|
let slot = unverified_block.block().slot();
|
||||||
let block = unverified_block.block().clone();
|
|
||||||
|
|
||||||
// A small closure to group the verification and import errors.
|
// A small closure to group the verification and import errors.
|
||||||
let chain = self.clone();
|
let chain = self.clone();
|
||||||
@ -2556,7 +2576,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Beacon block imported";
|
"Beacon block imported";
|
||||||
"block_root" => ?block_root,
|
"block_root" => ?block_root,
|
||||||
"block_slot" => %block.slot(),
|
"block_slot" => slot,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Increment the Prometheus counter for block processing successes.
|
// Increment the Prometheus counter for block processing successes.
|
||||||
@ -2684,7 +2704,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn import_block(
|
fn import_block(
|
||||||
&self,
|
&self,
|
||||||
signed_block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
signed_block: BlockWrapper<T::EthSpec>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
mut state: BeaconState<T::EthSpec>,
|
mut state: BeaconState<T::EthSpec>,
|
||||||
confirmed_state_roots: Vec<Hash256>,
|
confirmed_state_roots: Vec<Hash256>,
|
||||||
@ -2738,7 +2758,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let mut fork_choice = self.canonical_head.fork_choice_write_lock();
|
let mut fork_choice = self.canonical_head.fork_choice_write_lock();
|
||||||
|
|
||||||
// Do not import a block that doesn't descend from the finalized root.
|
// Do not import a block that doesn't descend from the finalized root.
|
||||||
check_block_is_finalized_descendant(self, &fork_choice, &signed_block)?;
|
let signed_block = check_block_is_finalized_descendant(self, &fork_choice, signed_block)?;
|
||||||
|
let block = signed_block.message();
|
||||||
|
|
||||||
// Register the new block with the fork choice service.
|
// Register the new block with the fork choice service.
|
||||||
{
|
{
|
||||||
@ -2841,12 +2862,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
// If the write fails, revert fork choice to the version from disk, else we can
|
// If the write fails, revert fork choice to the version from disk, else we can
|
||||||
// end up with blocks in fork choice that are missing from disk.
|
// end up with blocks in fork choice that are missing from disk.
|
||||||
// See https://github.com/sigp/lighthouse/issues/2028
|
// See https://github.com/sigp/lighthouse/issues/2028
|
||||||
|
let (signed_block, blobs) = signed_block.deconstruct();
|
||||||
|
let block = signed_block.message();
|
||||||
let mut ops: Vec<_> = confirmed_state_roots
|
let mut ops: Vec<_> = confirmed_state_roots
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(StoreOp::DeleteStateTemporaryFlag)
|
.map(StoreOp::DeleteStateTemporaryFlag)
|
||||||
.collect();
|
.collect();
|
||||||
ops.push(StoreOp::PutBlock(block_root, signed_block.clone()));
|
ops.push(StoreOp::PutBlock(block_root, signed_block.clone()));
|
||||||
ops.push(StoreOp::PutState(block.state_root(), &state));
|
ops.push(StoreOp::PutState(block.state_root(), &state));
|
||||||
|
|
||||||
|
if let Some(blobs) = blobs {
|
||||||
|
ops.push(StoreOp::PutBlobs(block_root, blobs));
|
||||||
|
};
|
||||||
let txn_lock = self.store.hot_db.begin_rw_transaction();
|
let txn_lock = self.store.hot_db.begin_rw_transaction();
|
||||||
|
|
||||||
kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?);
|
kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?);
|
||||||
@ -3604,9 +3631,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
//FIXME(sean) waiting for the BN<>EE api for this to stabilize
|
|
||||||
let kzg_commitments = vec![];
|
|
||||||
|
|
||||||
// Part 3/3 (blocking)
|
// Part 3/3 (blocking)
|
||||||
//
|
//
|
||||||
// Perform the final steps of combining all the parts and computing the state root.
|
// Perform the final steps of combining all the parts and computing the state root.
|
||||||
@ -3617,7 +3641,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
chain.complete_partial_beacon_block(
|
chain.complete_partial_beacon_block(
|
||||||
partial_beacon_block,
|
partial_beacon_block,
|
||||||
block_contents,
|
block_contents,
|
||||||
kzg_commitments,
|
|
||||||
verification,
|
verification,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
@ -3873,7 +3896,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
&self,
|
&self,
|
||||||
partial_beacon_block: PartialBeaconBlock<T::EthSpec, Payload>,
|
partial_beacon_block: PartialBeaconBlock<T::EthSpec, Payload>,
|
||||||
block_contents: Option<BlockProposalContents<T::EthSpec, Payload>>,
|
block_contents: Option<BlockProposalContents<T::EthSpec, Payload>>,
|
||||||
kzg_commitments: Vec<KzgCommitment>,
|
|
||||||
verification: ProduceBlockVerification,
|
verification: ProduceBlockVerification,
|
||||||
) -> Result<BeaconBlockAndState<T::EthSpec, Payload>, BlockProductionError> {
|
) -> Result<BeaconBlockAndState<T::EthSpec, Payload>, BlockProductionError> {
|
||||||
let PartialBeaconBlock {
|
let PartialBeaconBlock {
|
||||||
@ -3898,8 +3920,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
bls_to_execution_changes,
|
bls_to_execution_changes,
|
||||||
} = partial_beacon_block;
|
} = partial_beacon_block;
|
||||||
|
|
||||||
let inner_block = match &state {
|
let (inner_block, blobs_opt) = match &state {
|
||||||
BeaconState::Base(_) => BeaconBlock::Base(BeaconBlockBase {
|
BeaconState::Base(_) => (
|
||||||
|
BeaconBlock::Base(BeaconBlockBase {
|
||||||
slot,
|
slot,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
parent_root,
|
parent_root,
|
||||||
@ -3916,7 +3939,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
BeaconState::Altair(_) => BeaconBlock::Altair(BeaconBlockAltair {
|
None,
|
||||||
|
),
|
||||||
|
BeaconState::Altair(_) => (
|
||||||
|
BeaconBlock::Altair(BeaconBlockAltair {
|
||||||
slot,
|
slot,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
parent_root,
|
parent_root,
|
||||||
@ -3935,7 +3961,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
BeaconState::Merge(_) => BeaconBlock::Merge(BeaconBlockMerge {
|
None,
|
||||||
|
),
|
||||||
|
BeaconState::Merge(_) => {
|
||||||
|
let (payload, _, _) = block_contents
|
||||||
|
.ok_or(BlockProductionError::MissingExecutionPayload)?
|
||||||
|
.deconstruct();
|
||||||
|
(
|
||||||
|
BeaconBlock::Merge(BeaconBlockMerge {
|
||||||
slot,
|
slot,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
parent_root,
|
parent_root,
|
||||||
@ -3951,14 +3984,21 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
voluntary_exits: voluntary_exits.into(),
|
voluntary_exits: voluntary_exits.into(),
|
||||||
sync_aggregate: sync_aggregate
|
sync_aggregate: sync_aggregate
|
||||||
.ok_or(BlockProductionError::MissingSyncAggregate)?,
|
.ok_or(BlockProductionError::MissingSyncAggregate)?,
|
||||||
execution_payload: block_contents
|
execution_payload: payload
|
||||||
.ok_or(BlockProductionError::MissingExecutionPayload)?
|
|
||||||
.to_payload()
|
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
|
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
BeaconState::Capella(_) => BeaconBlock::Capella(BeaconBlockCapella {
|
None,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
BeaconState::Capella(_) => {
|
||||||
|
let (payload, _, _) = block_contents
|
||||||
|
.ok_or(BlockProductionError::MissingExecutionPayload)?
|
||||||
|
.deconstruct();
|
||||||
|
|
||||||
|
(
|
||||||
|
BeaconBlock::Capella(BeaconBlockCapella {
|
||||||
slot,
|
slot,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
parent_root,
|
parent_root,
|
||||||
@ -3974,16 +4014,23 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
voluntary_exits: voluntary_exits.into(),
|
voluntary_exits: voluntary_exits.into(),
|
||||||
sync_aggregate: sync_aggregate
|
sync_aggregate: sync_aggregate
|
||||||
.ok_or(BlockProductionError::MissingSyncAggregate)?,
|
.ok_or(BlockProductionError::MissingSyncAggregate)?,
|
||||||
execution_payload: block_contents
|
execution_payload: payload
|
||||||
.ok_or(BlockProductionError::MissingExecutionPayload)?
|
|
||||||
.to_payload()
|
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
|
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
|
||||||
#[cfg(feature = "withdrawals")]
|
#[cfg(feature = "withdrawals")]
|
||||||
bls_to_execution_changes: bls_to_execution_changes.into(),
|
bls_to_execution_changes: bls_to_execution_changes.into(),
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
BeaconState::Eip4844(_) => BeaconBlock::Eip4844(BeaconBlockEip4844 {
|
None,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
BeaconState::Eip4844(_) => {
|
||||||
|
let (payload, kzg_commitments, blobs) = block_contents
|
||||||
|
.ok_or(BlockProductionError::MissingExecutionPayload)?
|
||||||
|
.deconstruct();
|
||||||
|
|
||||||
|
(
|
||||||
|
BeaconBlock::Eip4844(BeaconBlockEip4844 {
|
||||||
slot,
|
slot,
|
||||||
proposer_index,
|
proposer_index,
|
||||||
parent_root,
|
parent_root,
|
||||||
@ -3999,17 +4046,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
voluntary_exits: voluntary_exits.into(),
|
voluntary_exits: voluntary_exits.into(),
|
||||||
sync_aggregate: sync_aggregate
|
sync_aggregate: sync_aggregate
|
||||||
.ok_or(BlockProductionError::MissingSyncAggregate)?,
|
.ok_or(BlockProductionError::MissingSyncAggregate)?,
|
||||||
execution_payload: block_contents
|
execution_payload: payload
|
||||||
.ok_or(BlockProductionError::MissingExecutionPayload)?
|
|
||||||
.to_payload()
|
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
|
.map_err(|_| BlockProductionError::InvalidPayloadFork)?,
|
||||||
#[cfg(feature = "withdrawals")]
|
#[cfg(feature = "withdrawals")]
|
||||||
bls_to_execution_changes: bls_to_execution_changes.into(),
|
bls_to_execution_changes: bls_to_execution_changes.into(),
|
||||||
//FIXME(sean) get blobs
|
blob_kzg_commitments: kzg_commitments
|
||||||
blob_kzg_commitments: VariableList::from(kzg_commitments),
|
.ok_or(BlockProductionError::InvalidPayloadFork)?,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
|
blobs,
|
||||||
|
)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let block = SignedBeaconBlock::from_block(
|
let block = SignedBeaconBlock::from_block(
|
||||||
@ -4036,8 +4084,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao,
|
ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao,
|
||||||
ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification,
|
ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Use a context without block root or proposer index so that both are checked.
|
// Use a context without block root or proposer index so that both are checked.
|
||||||
let mut ctxt = ConsensusContext::new(block.slot());
|
let mut ctxt = ConsensusContext::new(block.slot())
|
||||||
|
//FIXME(sean) This is a hack beacuse `valdiate blobs sidecar requires the block root`
|
||||||
|
// which we won't have until after the state root is calculated.
|
||||||
|
.set_blobs_sidecar_validated(true);
|
||||||
|
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&block,
|
&block,
|
||||||
@ -4055,6 +4108,40 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let (mut block, _) = block.deconstruct();
|
let (mut block, _) = block.deconstruct();
|
||||||
*block.state_root_mut() = state_root;
|
*block.state_root_mut() = state_root;
|
||||||
|
|
||||||
|
//FIXME(sean)
|
||||||
|
// - add a new timer for processing here
|
||||||
|
if let Some(blobs) = blobs_opt {
|
||||||
|
let kzg = if let Some(kzg) = &self.kzg {
|
||||||
|
kzg
|
||||||
|
} else {
|
||||||
|
return Err(BlockProductionError::TrustedSetupNotInitialized);
|
||||||
|
};
|
||||||
|
let kzg_aggregated_proof =
|
||||||
|
kzg_utils::compute_aggregate_kzg_proof::<T::EthSpec>(&kzg, &blobs)
|
||||||
|
.map_err(|e| BlockProductionError::KzgError(e))?;
|
||||||
|
let beacon_block_root = block.canonical_root();
|
||||||
|
let expected_kzg_commitments = block.body().blob_kzg_commitments().map_err(|_| {
|
||||||
|
BlockProductionError::InvalidBlockVariant(
|
||||||
|
"EIP4844 block does not contain kzg commitments".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
let blobs_sidecar = BlobsSidecar {
|
||||||
|
beacon_block_slot: slot,
|
||||||
|
beacon_block_root,
|
||||||
|
blobs,
|
||||||
|
kzg_aggregated_proof,
|
||||||
|
};
|
||||||
|
kzg_utils::validate_blobs_sidecar(
|
||||||
|
&kzg,
|
||||||
|
slot,
|
||||||
|
beacon_block_root,
|
||||||
|
expected_kzg_commitments,
|
||||||
|
&blobs_sidecar,
|
||||||
|
)
|
||||||
|
.map_err(BlockProductionError::KzgError)?;
|
||||||
|
self.blob_cache.put(beacon_block_root, blobs_sidecar);
|
||||||
|
}
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);
|
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
@ -5331,6 +5418,33 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
gossip_attested || block_attested || aggregated || produced_block
|
gossip_attested || block_attested || aggregated || produced_block
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The epoch at which we require a data availability check in block processing.
|
||||||
|
/// `None` if the `Eip4844` fork is disabled.
|
||||||
|
pub fn data_availability_boundary(&self) -> Option<Epoch> {
|
||||||
|
self.spec
|
||||||
|
.eip4844_fork_epoch
|
||||||
|
.map(|fork_epoch| {
|
||||||
|
self.epoch().ok().map(|current_epoch| {
|
||||||
|
std::cmp::max(
|
||||||
|
fork_epoch,
|
||||||
|
current_epoch.saturating_sub(*MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.flatten()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns `true` if we are at or past the `Eip4844` fork. This will always return `false` if
|
||||||
|
/// the `Eip4844` fork is disabled.
|
||||||
|
pub fn is_data_availability_check_required(&self) -> Result<bool, Error> {
|
||||||
|
let current_epoch = self.epoch()?;
|
||||||
|
Ok(self
|
||||||
|
.spec
|
||||||
|
.eip4844_fork_epoch
|
||||||
|
.map(|fork_epoch| fork_epoch <= current_epoch)
|
||||||
|
.unwrap_or(false))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> Drop for BeaconChain<T> {
|
impl<T: BeaconChainTypes> Drop for BeaconChain<T> {
|
||||||
|
32
beacon_node/beacon_chain/src/blob_cache.rs
Normal file
32
beacon_node/beacon_chain/src/blob_cache.rs
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
use lru::LruCache;
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
use types::{BlobsSidecar, EthSpec, ExecutionPayload, Hash256};
|
||||||
|
|
||||||
|
pub const DEFAULT_BLOB_CACHE_SIZE: usize = 10;
|
||||||
|
|
||||||
|
/// A cache blobs by beacon block root.
|
||||||
|
pub struct BlobCache<T: EthSpec> {
|
||||||
|
blobs: Mutex<LruCache<BlobCacheId, BlobsSidecar<T>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Hash, PartialEq, Eq)]
|
||||||
|
struct BlobCacheId(Hash256);
|
||||||
|
|
||||||
|
impl<T: EthSpec> Default for BlobCache<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
BlobCache {
|
||||||
|
blobs: Mutex::new(LruCache::new(DEFAULT_BLOB_CACHE_SIZE)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> BlobCache<T> {
|
||||||
|
pub fn put(&self, beacon_block: Hash256, blobs: BlobsSidecar<T>) -> Option<BlobsSidecar<T>> {
|
||||||
|
self.blobs.lock().put(BlobCacheId(beacon_block), blobs)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pop(&self, root: &Hash256) -> Option<BlobsSidecar<T>> {
|
||||||
|
self.blobs.lock().pop(&BlobCacheId(*root))
|
||||||
|
}
|
||||||
|
}
|
@ -1,11 +1,13 @@
|
|||||||
use derivative::Derivative;
|
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
|
|
||||||
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
|
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
|
||||||
use crate::BeaconChainError;
|
use crate::{kzg_utils, BeaconChainError};
|
||||||
use bls::PublicKey;
|
use bls::PublicKey;
|
||||||
use types::{consts::eip4844::BLS_MODULUS, BeaconStateError, BlobsSidecar, Slot};
|
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
|
||||||
|
use types::consts::eip4844::BLS_MODULUS;
|
||||||
|
use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub enum BlobError {
|
pub enum BlobError {
|
||||||
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
|
/// The blob sidecar is from a slot that is later than the current slot (with respect to the
|
||||||
/// gossip clock disparity).
|
/// gossip clock disparity).
|
||||||
@ -17,15 +19,15 @@ pub enum BlobError {
|
|||||||
message_slot: Slot,
|
message_slot: Slot,
|
||||||
latest_permissible_slot: Slot,
|
latest_permissible_slot: Slot,
|
||||||
},
|
},
|
||||||
/// The blob sidecar is from a slot that is prior to the earliest permissible slot (with
|
|
||||||
/// respect to the gossip clock disparity).
|
/// The blob sidecar has a different slot than the block.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||||
PastSlot {
|
SlotMismatch {
|
||||||
message_slot: Slot,
|
blob_slot: Slot,
|
||||||
earliest_permissible_slot: Slot,
|
block_slot: Slot,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`.
|
/// The blob sidecar contains an incorrectly formatted `BLSFieldElement` > `BLS_MODULUS`.
|
||||||
@ -34,7 +36,9 @@ pub enum BlobError {
|
|||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// The peer has sent an invalid message.
|
/// The peer has sent an invalid message.
|
||||||
BlobOutOfRange { blob_index: usize },
|
BlobOutOfRange {
|
||||||
|
blob_index: usize,
|
||||||
|
},
|
||||||
|
|
||||||
/// The blob sidecar contains a KZGCommitment that is not a valid G1 point on
|
/// The blob sidecar contains a KZGCommitment that is not a valid G1 point on
|
||||||
/// the bls curve.
|
/// the bls curve.
|
||||||
@ -50,13 +54,31 @@ pub enum BlobError {
|
|||||||
/// The signature on the blob sidecar invalid and the peer is faulty.
|
/// The signature on the blob sidecar invalid and the peer is faulty.
|
||||||
ProposalSignatureInvalid,
|
ProposalSignatureInvalid,
|
||||||
|
|
||||||
|
/// No kzg ccommitment associated with blob sidecar.
|
||||||
|
KzgCommitmentMissing,
|
||||||
|
|
||||||
|
/// No transactions in block
|
||||||
|
TransactionsMissing,
|
||||||
|
|
||||||
|
/// Blob transactions in the block do not correspond to the kzg commitments.
|
||||||
|
TransactionCommitmentMismatch,
|
||||||
|
|
||||||
|
TrustedSetupNotInitialized,
|
||||||
|
|
||||||
|
InvalidKzgProof,
|
||||||
|
|
||||||
|
KzgError(kzg::Error),
|
||||||
|
|
||||||
/// A blob sidecar for this proposer and slot has already been observed.
|
/// A blob sidecar for this proposer and slot has already been observed.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not
|
/// The `proposer` has already proposed a sidecar at this slot. The existing sidecar may or may not
|
||||||
/// be equal to the given sidecar.
|
/// be equal to the given sidecar.
|
||||||
RepeatSidecar { proposer: u64, slot: Slot },
|
RepeatSidecar {
|
||||||
|
proposer: u64,
|
||||||
|
slot: Slot,
|
||||||
|
},
|
||||||
|
|
||||||
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
/// There was an error whilst processing the sync contribution. It is not known if it is valid or invalid.
|
||||||
///
|
///
|
||||||
@ -79,19 +101,14 @@ impl From<BeaconStateError> for BlobError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A wrapper around a `BlobsSidecar` that indicates it has been verified w.r.t the corresponding
|
pub fn validate_blob_for_gossip<T: BeaconChainTypes>(
|
||||||
/// `SignedBeaconBlock`.
|
blob_sidecar: &BlobsSidecar<T::EthSpec>,
|
||||||
#[derive(Derivative)]
|
kzg_commitments: &[KzgCommitment],
|
||||||
#[derivative(Debug(bound = "T: BeaconChainTypes"))]
|
transactions: &Transactions<T::EthSpec>,
|
||||||
pub struct VerifiedBlobsSidecar<'a, T: BeaconChainTypes> {
|
block_slot: Slot,
|
||||||
pub blob_sidecar: &'a BlobsSidecar<T::EthSpec>,
|
block_root: Hash256,
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> {
|
|
||||||
pub fn verify(
|
|
||||||
blob_sidecar: &'a BlobsSidecar<T::EthSpec>,
|
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Self, BlobError> {
|
) -> Result<(), BlobError> {
|
||||||
let blob_slot = blob_sidecar.beacon_block_slot;
|
let blob_slot = blob_sidecar.beacon_block_slot;
|
||||||
// Do not gossip or process blobs from future or past slots.
|
// Do not gossip or process blobs from future or past slots.
|
||||||
let latest_permissible_slot = chain
|
let latest_permissible_slot = chain
|
||||||
@ -105,32 +122,53 @@ impl<'a, T: BeaconChainTypes> VerifiedBlobsSidecar<'a, T> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let earliest_permissible_slot = chain
|
if blob_slot != block_slot {
|
||||||
.slot_clock
|
return Err(BlobError::SlotMismatch {
|
||||||
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)
|
blob_slot,
|
||||||
.ok_or(BeaconChainError::UnableToReadSlot)?;
|
block_slot,
|
||||||
if blob_slot > earliest_permissible_slot {
|
|
||||||
return Err(BlobError::PastSlot {
|
|
||||||
message_slot: earliest_permissible_slot,
|
|
||||||
earliest_permissible_slot: blob_slot,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that blobs are properly formatted
|
// Verify that kzg commitments in the block are valid BLS g1 points
|
||||||
//TODO: add the check while constructing a Blob type from bytes instead of after
|
for commitment in kzg_commitments {
|
||||||
for (i, blob) in blob_sidecar.blobs.iter().enumerate() {
|
if kzg::bytes_to_g1(&commitment.0).is_err() {
|
||||||
if blob.iter().any(|b| *b >= *BLS_MODULUS) {
|
|
||||||
return Err(BlobError::BlobOutOfRange { blob_index: i });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the KZG proof is a valid G1 point
|
|
||||||
if PublicKey::deserialize(&blob_sidecar.kzg_aggregate_proof.0).is_err() {
|
|
||||||
return Err(BlobError::InvalidKZGCommitment);
|
return Err(BlobError::InvalidKZGCommitment);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Check that we have not already received a sidecar with a valid signature for this slot.
|
|
||||||
|
|
||||||
Ok(Self { blob_sidecar })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate commitments agains transactions in the block.
|
||||||
|
if verify_kzg_commitments_against_transactions::<T::EthSpec>(transactions, kzg_commitments)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return Err(BlobError::TransactionCommitmentMismatch);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that blobs are < BLS_MODULUS
|
||||||
|
// TODO(pawan): Add this check after there's some resolution of this
|
||||||
|
// issue https://github.com/ethereum/c-kzg-4844/issues/11
|
||||||
|
// As of now, `bytes_to_bls_field` does not fail in the c-kzg library if blob >= BLS_MODULUS
|
||||||
|
|
||||||
|
// Validate that kzg proof is a valid g1 point
|
||||||
|
if kzg::bytes_to_g1(&blob_sidecar.kzg_aggregated_proof.0).is_err() {
|
||||||
|
return Err(BlobError::InvalidKzgProof);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validatate that the kzg proof is valid against the commitments and blobs
|
||||||
|
let kzg = chain
|
||||||
|
.kzg
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(BlobError::TrustedSetupNotInitialized)?;
|
||||||
|
|
||||||
|
if !kzg_utils::validate_blobs_sidecar(
|
||||||
|
kzg,
|
||||||
|
block_slot,
|
||||||
|
block_root,
|
||||||
|
kzg_commitments,
|
||||||
|
blob_sidecar,
|
||||||
|
)
|
||||||
|
.map_err(BlobError::KzgError)?
|
||||||
|
{
|
||||||
|
return Err(BlobError::InvalidKzgProof);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -42,14 +42,17 @@
|
|||||||
//! END
|
//! END
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
|
use crate::blob_verification::{validate_blob_for_gossip, BlobError};
|
||||||
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
||||||
use crate::execution_payload::{
|
use crate::execution_payload::{
|
||||||
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
|
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
|
||||||
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
|
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
|
||||||
};
|
};
|
||||||
|
use crate::kzg_utils;
|
||||||
use crate::snapshot_cache::PreProcessingSnapshot;
|
use crate::snapshot_cache::PreProcessingSnapshot;
|
||||||
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
|
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
|
||||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||||
|
use crate::BlockError::BlobValidation;
|
||||||
use crate::{
|
use crate::{
|
||||||
beacon_chain::{
|
beacon_chain::{
|
||||||
BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
|
BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
|
||||||
@ -67,6 +70,7 @@ use safe_arith::ArithError;
|
|||||||
use slog::{debug, error, warn, Logger};
|
use slog::{debug, error, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
|
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
|
||||||
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
|
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
||||||
@ -83,12 +87,13 @@ use std::time::Duration;
|
|||||||
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
|
use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp};
|
||||||
use task_executor::JoinHandle;
|
use task_executor::JoinHandle;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::ExecPayload;
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
use types::{
|
use types::{
|
||||||
BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch,
|
BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch,
|
||||||
EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes,
|
EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes,
|
||||||
RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
|
RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot,
|
||||||
};
|
};
|
||||||
|
use types::{BlobsSidecar, ExecPayload};
|
||||||
|
|
||||||
pub const POS_PANDA_BANNER: &str = r#"
|
pub const POS_PANDA_BANNER: &str = r#"
|
||||||
,,, ,,, ,,, ,,,
|
,,, ,,, ,,, ,,,
|
||||||
@ -135,9 +140,12 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
///
|
///
|
||||||
/// It's unclear if this block is valid, but it cannot be processed without already knowing
|
/// It's unclear if this block is valid, but it cannot be processed without already knowing
|
||||||
/// its parent.
|
/// its parent.
|
||||||
ParentUnknown(Arc<SignedBeaconBlock<T>>),
|
ParentUnknown(BlockWrapper<T>),
|
||||||
/// The block skips too many slots and is a DoS risk.
|
/// The block skips too many slots and is a DoS risk.
|
||||||
TooManySkippedSlots { parent_slot: Slot, block_slot: Slot },
|
TooManySkippedSlots {
|
||||||
|
parent_slot: Slot,
|
||||||
|
block_slot: Slot,
|
||||||
|
},
|
||||||
/// The block slot is greater than the present slot.
|
/// The block slot is greater than the present slot.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
@ -152,7 +160,10 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// The peer has incompatible state transition logic and is faulty.
|
/// The peer has incompatible state transition logic and is faulty.
|
||||||
StateRootMismatch { block: Hash256, local: Hash256 },
|
StateRootMismatch {
|
||||||
|
block: Hash256,
|
||||||
|
local: Hash256,
|
||||||
|
},
|
||||||
/// The block was a genesis block, these blocks cannot be re-imported.
|
/// The block was a genesis block, these blocks cannot be re-imported.
|
||||||
GenesisBlock,
|
GenesisBlock,
|
||||||
/// The slot is finalized, no need to import.
|
/// The slot is finalized, no need to import.
|
||||||
@ -171,7 +182,9 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
///
|
///
|
||||||
/// It's unclear if this block is valid, but it conflicts with finality and shouldn't be
|
/// It's unclear if this block is valid, but it conflicts with finality and shouldn't be
|
||||||
/// imported.
|
/// imported.
|
||||||
NotFinalizedDescendant { block_parent_root: Hash256 },
|
NotFinalizedDescendant {
|
||||||
|
block_parent_root: Hash256,
|
||||||
|
},
|
||||||
/// Block is already known, no need to re-import.
|
/// Block is already known, no need to re-import.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
@ -184,7 +197,10 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
///
|
///
|
||||||
/// The `proposer` has already proposed a block at this slot. The existing block may or may not
|
/// The `proposer` has already proposed a block at this slot. The existing block may or may not
|
||||||
/// be equal to the given block.
|
/// be equal to the given block.
|
||||||
RepeatProposal { proposer: u64, slot: Slot },
|
RepeatProposal {
|
||||||
|
proposer: u64,
|
||||||
|
slot: Slot,
|
||||||
|
},
|
||||||
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
|
/// The block slot exceeds the MAXIMUM_BLOCK_SLOT_NUMBER.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
@ -199,7 +215,10 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// The block is invalid and the peer is faulty.
|
/// The block is invalid and the peer is faulty.
|
||||||
IncorrectBlockProposer { block: u64, local_shuffling: u64 },
|
IncorrectBlockProposer {
|
||||||
|
block: u64,
|
||||||
|
local_shuffling: u64,
|
||||||
|
},
|
||||||
/// The proposal signature in invalid.
|
/// The proposal signature in invalid.
|
||||||
///
|
///
|
||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
@ -223,7 +242,10 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
/// ## Peer scoring
|
/// ## Peer scoring
|
||||||
///
|
///
|
||||||
/// The block is invalid and the peer is faulty.
|
/// The block is invalid and the peer is faulty.
|
||||||
BlockIsNotLaterThanParent { block_slot: Slot, parent_slot: Slot },
|
BlockIsNotLaterThanParent {
|
||||||
|
block_slot: Slot,
|
||||||
|
parent_slot: Slot,
|
||||||
|
},
|
||||||
/// At least one block in the chain segment did not have it's parent root set to the root of
|
/// At least one block in the chain segment did not have it's parent root set to the root of
|
||||||
/// the prior block.
|
/// the prior block.
|
||||||
///
|
///
|
||||||
@ -279,7 +301,10 @@ pub enum BlockError<T: EthSpec> {
|
|||||||
///
|
///
|
||||||
/// The peer sent us an invalid block, but I'm not really sure how to score this in an
|
/// The peer sent us an invalid block, but I'm not really sure how to score this in an
|
||||||
/// "optimistic" sync world.
|
/// "optimistic" sync world.
|
||||||
ParentExecutionPayloadInvalid { parent_root: Hash256 },
|
ParentExecutionPayloadInvalid {
|
||||||
|
parent_root: Hash256,
|
||||||
|
},
|
||||||
|
BlobValidation(BlobError),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returned when block validation failed due to some issue verifying
|
/// Returned when block validation failed due to some issue verifying
|
||||||
@ -524,7 +549,7 @@ fn process_block_slash_info<T: BeaconChainTypes>(
|
|||||||
/// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error
|
/// The given `chain_segment` must contain only blocks from the same epoch, otherwise an error
|
||||||
/// will be returned.
|
/// will be returned.
|
||||||
pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
||||||
mut chain_segment: Vec<(Hash256, Arc<SignedBeaconBlock<T::EthSpec>>)>,
|
mut chain_segment: Vec<(Hash256, BlockWrapper<T::EthSpec>)>,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Vec<SignatureVerifiedBlock<T>>, BlockError<T::EthSpec>> {
|
) -> Result<Vec<SignatureVerifiedBlock<T>>, BlockError<T::EthSpec>> {
|
||||||
if chain_segment.is_empty() {
|
if chain_segment.is_empty() {
|
||||||
@ -554,10 +579,12 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
|||||||
let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len());
|
let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len());
|
||||||
|
|
||||||
for (block_root, block) in &chain_segment {
|
for (block_root, block) in &chain_segment {
|
||||||
let mut consensus_context =
|
let mut consensus_context = ConsensusContext::new(block.slot())
|
||||||
ConsensusContext::new(block.slot()).set_current_block_root(*block_root);
|
.set_current_block_root(*block_root)
|
||||||
|
//FIXME(sean) Consider removing this is we pass the blob wrapper everywhere
|
||||||
|
.set_blobs_sidecar(block.blobs_sidecar());
|
||||||
|
|
||||||
signature_verifier.include_all_signatures(block, &mut consensus_context)?;
|
signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?;
|
||||||
|
|
||||||
// Save the block and its consensus context. The context will have had its proposer index
|
// Save the block and its consensus context. The context will have had its proposer index
|
||||||
// and attesting indices filled in, which can be used to accelerate later block processing.
|
// and attesting indices filled in, which can be used to accelerate later block processing.
|
||||||
@ -587,7 +614,7 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
|||||||
#[derive(Derivative)]
|
#[derive(Derivative)]
|
||||||
#[derivative(Debug(bound = "T: BeaconChainTypes"))]
|
#[derivative(Debug(bound = "T: BeaconChainTypes"))]
|
||||||
pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
|
pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
|
||||||
pub block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
pub block: BlockWrapper<T::EthSpec>,
|
||||||
pub block_root: Hash256,
|
pub block_root: Hash256,
|
||||||
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
||||||
consensus_context: ConsensusContext<T::EthSpec>,
|
consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
@ -596,7 +623,7 @@ pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
|
|||||||
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
|
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
|
||||||
/// signatures) have been verified.
|
/// signatures) have been verified.
|
||||||
pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
|
pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
||||||
consensus_context: ConsensusContext<T::EthSpec>,
|
consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
@ -613,12 +640,13 @@ type PayloadVerificationHandle<E> =
|
|||||||
/// - Signatures
|
/// - Signatures
|
||||||
/// - State root check
|
/// - State root check
|
||||||
/// - Per block processing
|
/// - Per block processing
|
||||||
|
/// - Blobs sidecar has been validated if present
|
||||||
///
|
///
|
||||||
/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid
|
/// Note: a `ExecutionPendingBlock` is not _forever_ valid to be imported, it may later become invalid
|
||||||
/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the
|
/// due to finality or some other event. A `ExecutionPendingBlock` should be imported into the
|
||||||
/// `BeaconChain` immediately after it is instantiated.
|
/// `BeaconChain` immediately after it is instantiated.
|
||||||
pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
|
pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
|
||||||
pub block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
pub block: BlockWrapper<T::EthSpec>,
|
||||||
pub block_root: Hash256,
|
pub block_root: Hash256,
|
||||||
pub state: BeaconState<T::EthSpec>,
|
pub state: BeaconState<T::EthSpec>,
|
||||||
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||||
@ -642,7 +670,8 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
|
|||||||
.map(|execution_pending| {
|
.map(|execution_pending| {
|
||||||
// Supply valid block to slasher.
|
// Supply valid block to slasher.
|
||||||
if let Some(slasher) = chain.slasher.as_ref() {
|
if let Some(slasher) = chain.slasher.as_ref() {
|
||||||
slasher.accept_block_header(execution_pending.block.signed_block_header());
|
slasher
|
||||||
|
.accept_block_header(execution_pending.block.block().signed_block_header());
|
||||||
}
|
}
|
||||||
execution_pending
|
execution_pending
|
||||||
})
|
})
|
||||||
@ -664,16 +693,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
/// Instantiates `Self`, a wrapper that indicates the given `block` is safe to be re-gossiped
|
/// Instantiates `Self`, a wrapper that indicates the given `block` is safe to be re-gossiped
|
||||||
/// on the p2p network.
|
/// on the p2p network.
|
||||||
///
|
///
|
||||||
/// Returns an error if the block is invalid, or if the block was unable to be verified.
|
/// Returns an error if the block is invalid, or i8f the block was unable to be verified.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||||
// If the block is valid for gossip we don't supply it to the slasher here because
|
// If the block is valid for gossip we don't supply it to the slasher here because
|
||||||
// we assume it will be transformed into a fully verified block. We *do* need to supply
|
// we assume it will be transformed into a fully verified block. We *do* need to supply
|
||||||
// it to the slasher if an error occurs, because that's the end of this block's journey,
|
// it to the slasher if an error occurs, because that's the end of this block's journey,
|
||||||
// and it could be a repeat proposal (a likely cause for slashing!).
|
// and it could be a repeat proposal (a likely cause for slashing!).
|
||||||
let header = block.signed_block_header();
|
let header = block.block().signed_block_header();
|
||||||
Self::new_without_slasher_checks(block, chain).map_err(|e| {
|
Self::new_without_slasher_checks(block, chain).map_err(|e| {
|
||||||
process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e))
|
process_block_slash_info(chain, BlockSlashInfo::from_early_error(header, e))
|
||||||
})
|
})
|
||||||
@ -681,11 +710,12 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
|
|
||||||
/// As for new, but doesn't pass the block to the slasher.
|
/// As for new, but doesn't pass the block to the slasher.
|
||||||
fn new_without_slasher_checks(
|
fn new_without_slasher_checks(
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||||
// Ensure the block is the correct structure for the fork at `block.slot()`.
|
// Ensure the block is the correct structure for the fork at `block.slot()`.
|
||||||
block
|
block
|
||||||
|
.block()
|
||||||
.fork_name(&chain.spec)
|
.fork_name(&chain.spec)
|
||||||
.map_err(BlockError::InconsistentFork)?;
|
.map_err(BlockError::InconsistentFork)?;
|
||||||
|
|
||||||
@ -701,7 +731,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let block_root = get_block_root(&block);
|
let block_root = get_block_root(block.block());
|
||||||
|
|
||||||
// Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any.
|
// Disallow blocks that conflict with the anchor (weak subjectivity checkpoint), if any.
|
||||||
check_block_against_anchor_slot(block.message(), chain)?;
|
check_block_against_anchor_slot(block.message(), chain)?;
|
||||||
@ -740,10 +770,10 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
// Do not process a block that doesn't descend from the finalized root.
|
// Do not process a block that doesn't descend from the finalized root.
|
||||||
//
|
//
|
||||||
// We check this *before* we load the parent so that we can return a more detailed error.
|
// We check this *before* we load the parent so that we can return a more detailed error.
|
||||||
check_block_is_finalized_descendant(
|
let block = check_block_is_finalized_descendant(
|
||||||
chain,
|
chain,
|
||||||
&chain.canonical_head.fork_choice_write_lock(),
|
&chain.canonical_head.fork_choice_write_lock(),
|
||||||
&block,
|
block,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
let block_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||||
@ -837,7 +867,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
let pubkey = pubkey_cache
|
let pubkey = pubkey_cache
|
||||||
.get(block.message().proposer_index() as usize)
|
.get(block.message().proposer_index() as usize)
|
||||||
.ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?;
|
.ok_or_else(|| BlockError::UnknownValidator(block.message().proposer_index()))?;
|
||||||
block.verify_signature(
|
block.block().verify_signature(
|
||||||
Some(block_root),
|
Some(block_root),
|
||||||
pubkey,
|
pubkey,
|
||||||
&fork,
|
&fork,
|
||||||
@ -877,10 +907,37 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
// Validate the block's execution_payload (if any).
|
// Validate the block's execution_payload (if any).
|
||||||
validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?;
|
validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?;
|
||||||
|
|
||||||
|
if let Some(blobs_sidecar) = block.blobs() {
|
||||||
|
let kzg_commitments = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.blob_kzg_commitments()
|
||||||
|
.map_err(|_| BlockError::BlobValidation(BlobError::KzgCommitmentMissing))?;
|
||||||
|
let transactions = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload_eip4844()
|
||||||
|
.map(|payload| payload.transactions())
|
||||||
|
.map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))?
|
||||||
|
.ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?;
|
||||||
|
validate_blob_for_gossip(
|
||||||
|
blobs_sidecar,
|
||||||
|
kzg_commitments,
|
||||||
|
transactions,
|
||||||
|
block.slot(),
|
||||||
|
block_root,
|
||||||
|
chain,
|
||||||
|
)
|
||||||
|
.map_err(BlobValidation)?;
|
||||||
|
}
|
||||||
|
|
||||||
// Having checked the proposer index and the block root we can cache them.
|
// Having checked the proposer index and the block root we can cache them.
|
||||||
let consensus_context = ConsensusContext::new(block.slot())
|
let consensus_context = ConsensusContext::new(block.slot())
|
||||||
.set_current_block_root(block_root)
|
.set_current_block_root(block_root)
|
||||||
.set_proposer_index(block.message().proposer_index());
|
.set_proposer_index(block.message().proposer_index())
|
||||||
|
.set_blobs_sidecar_validated(true) // Validated in `validate_blob_for_gossip`
|
||||||
|
.set_blobs_verified_vs_txs(true) // Validated in `validate_blob_for_gossip`
|
||||||
|
.set_blobs_sidecar(block.blobs_sidecar()); // TODO: potentially remove
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
block,
|
block,
|
||||||
@ -913,7 +970,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||||
&self.block
|
self.block.block()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -923,12 +980,13 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
///
|
///
|
||||||
/// Returns an error if the block is invalid, or if the block was unable to be verified.
|
/// Returns an error if the block is invalid, or if the block was unable to be verified.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||||
// Ensure the block is the correct structure for the fork at `block.slot()`.
|
// Ensure the block is the correct structure for the fork at `block.slot()`.
|
||||||
block
|
block
|
||||||
|
.block()
|
||||||
.fork_name(&chain.spec)
|
.fork_name(&chain.spec)
|
||||||
.map_err(BlockError::InconsistentFork)?;
|
.map_err(BlockError::InconsistentFork)?;
|
||||||
|
|
||||||
@ -951,10 +1009,11 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
|
|
||||||
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
||||||
|
|
||||||
let mut consensus_context =
|
let mut consensus_context = ConsensusContext::new(block.slot())
|
||||||
ConsensusContext::new(block.slot()).set_current_block_root(block_root);
|
.set_current_block_root(block_root)
|
||||||
|
.set_blobs_sidecar(block.blobs_sidecar());
|
||||||
|
|
||||||
signature_verifier.include_all_signatures(&block, &mut consensus_context)?;
|
signature_verifier.include_all_signatures(block.block(), &mut consensus_context)?;
|
||||||
|
|
||||||
if signature_verifier.verify().is_ok() {
|
if signature_verifier.verify().is_ok() {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -970,11 +1029,11 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
|
|
||||||
/// As for `new` above but producing `BlockSlashInfo`.
|
/// As for `new` above but producing `BlockSlashInfo`.
|
||||||
pub fn check_slashable(
|
pub fn check_slashable(
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||||
let header = block.signed_block_header();
|
let header = block.block().signed_block_header();
|
||||||
Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e))
|
Self::new(block, block_root, chain).map_err(|e| BlockSlashInfo::from_early_error(header, e))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1005,7 +1064,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
// signature.
|
// signature.
|
||||||
let mut consensus_context = from.consensus_context;
|
let mut consensus_context = from.consensus_context;
|
||||||
signature_verifier
|
signature_verifier
|
||||||
.include_all_signatures_except_proposal(&block, &mut consensus_context)?;
|
.include_all_signatures_except_proposal(block.block(), &mut consensus_context)?;
|
||||||
|
|
||||||
if signature_verifier.verify().is_ok() {
|
if signature_verifier.verify().is_ok() {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -1024,7 +1083,7 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
from: GossipVerifiedBlock<T>,
|
from: GossipVerifiedBlock<T>,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
) -> Result<Self, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||||
let header = from.block.signed_block_header();
|
let header = from.block.block().signed_block_header();
|
||||||
Self::from_gossip_verified_block(from, chain)
|
Self::from_gossip_verified_block(from, chain)
|
||||||
.map_err(|e| BlockSlashInfo::from_early_error(header, e))
|
.map_err(|e| BlockSlashInfo::from_early_error(header, e))
|
||||||
}
|
}
|
||||||
@ -1042,7 +1101,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
|
|||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||||
let header = self.block.signed_block_header();
|
let header = self.block.block().signed_block_header();
|
||||||
let (parent, block) = if let Some(parent) = self.parent {
|
let (parent, block) = if let Some(parent) = self.parent {
|
||||||
(parent, self.block)
|
(parent, self.block)
|
||||||
} else {
|
} else {
|
||||||
@ -1062,7 +1121,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||||
&self.block
|
&self.block.block()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1079,7 +1138,11 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
|
|||||||
let block_root = check_block_relevancy(&self, block_root, chain)
|
let block_root = check_block_relevancy(&self, block_root, chain)
|
||||||
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
|
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
|
||||||
|
|
||||||
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
|
SignatureVerifiedBlock::check_slashable(
|
||||||
|
BlockWrapper::Block { block: self },
|
||||||
|
block_root,
|
||||||
|
chain,
|
||||||
|
)?
|
||||||
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
|
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1088,6 +1151,29 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for BlockWrapper<T::EthSpec> {
|
||||||
|
/// Verifies the `SignedBeaconBlock` by first transforming it into a `SignatureVerifiedBlock`
|
||||||
|
/// and then using that implementation of `IntoExecutionPendingBlock` to complete verification.
|
||||||
|
fn into_execution_pending_block_slashable(
|
||||||
|
self,
|
||||||
|
block_root: Hash256,
|
||||||
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
|
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||||
|
// Perform an early check to prevent wasting time on irrelevant blocks.
|
||||||
|
let block_root = check_block_relevancy(self.block(), block_root, chain).map_err(|e| {
|
||||||
|
BlockSlashInfo::SignatureNotChecked(self.block().signed_block_header(), e)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
|
||||||
|
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||||
|
self.block()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||||
/// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See
|
/// Instantiates `Self`, a wrapper that indicates that the given `block` is fully valid. See
|
||||||
/// the struct-level documentation for more information.
|
/// the struct-level documentation for more information.
|
||||||
@ -1097,7 +1183,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
///
|
///
|
||||||
/// Returns an error if the block is invalid, or if the block was unable to be verified.
|
/// Returns an error if the block is invalid, or if the block was unable to be verified.
|
||||||
pub fn from_signature_verified_components(
|
pub fn from_signature_verified_components(
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
parent: PreProcessingSnapshot<T::EthSpec>,
|
parent: PreProcessingSnapshot<T::EthSpec>,
|
||||||
mut consensus_context: ConsensusContext<T::EthSpec>,
|
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
@ -1137,7 +1223,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
* Perform cursory checks to see if the block is even worth processing.
|
* Perform cursory checks to see if the block is even worth processing.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
check_block_relevancy(&block, block_root, chain)?;
|
check_block_relevancy(block.block(), block_root, chain)?;
|
||||||
|
|
||||||
// Define a future that will verify the execution payload with an execution engine.
|
// Define a future that will verify the execution payload with an execution engine.
|
||||||
//
|
//
|
||||||
@ -1145,7 +1231,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
// with the payload verification.
|
// with the payload verification.
|
||||||
let payload_notifier = PayloadNotifier::new(
|
let payload_notifier = PayloadNotifier::new(
|
||||||
chain.clone(),
|
chain.clone(),
|
||||||
block.clone(),
|
block.block_cloned(),
|
||||||
&parent.pre_state,
|
&parent.pre_state,
|
||||||
notify_execution_layer,
|
notify_execution_layer,
|
||||||
)?;
|
)?;
|
||||||
@ -1386,13 +1472,13 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
&state,
|
&state,
|
||||||
&chain.log,
|
&chain.log,
|
||||||
);
|
);
|
||||||
write_block(&block, block_root, &chain.log);
|
write_block(block.block(), block_root, &chain.log);
|
||||||
|
|
||||||
let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE);
|
let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE);
|
||||||
|
|
||||||
if let Err(err) = per_block_processing(
|
if let Err(err) = per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&block,
|
block.block(),
|
||||||
// Signatures were verified earlier in this function.
|
// Signatures were verified earlier in this function.
|
||||||
BlockSignatureStrategy::NoVerification,
|
BlockSignatureStrategy::NoVerification,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
@ -1429,9 +1515,9 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
* Check to ensure the state root on the block matches the one we have calculated.
|
* Check to ensure the state root on the block matches the one we have calculated.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if block.state_root() != state_root {
|
if block.block().state_root() != state_root {
|
||||||
return Err(BlockError::StateRootMismatch {
|
return Err(BlockError::StateRootMismatch {
|
||||||
block: block.state_root(),
|
block: block.block().state_root(),
|
||||||
local: state_root,
|
local: state_root,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -1474,6 +1560,56 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
}
|
}
|
||||||
drop(fork_choice);
|
drop(fork_choice);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Verify kzg proofs and kzg commitments against transactions if required
|
||||||
|
*/
|
||||||
|
//FIXME(sean) should this be prior to applying attestions to fork choice above? done in parallel?
|
||||||
|
if let Some(ref sidecar) = consensus_context.blobs_sidecar() {
|
||||||
|
if let Some(data_availability_boundary) = chain.data_availability_boundary() {
|
||||||
|
if block_slot.epoch(T::EthSpec::slots_per_epoch()) > data_availability_boundary {
|
||||||
|
let kzg = chain.kzg.as_ref().ok_or(BlockError::BlobValidation(
|
||||||
|
BlobError::TrustedSetupNotInitialized,
|
||||||
|
))?;
|
||||||
|
let transactions = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload_eip4844()
|
||||||
|
.map(|payload| payload.transactions())
|
||||||
|
.map_err(|_| BlockError::BlobValidation(BlobError::TransactionsMissing))?
|
||||||
|
.ok_or(BlockError::BlobValidation(BlobError::TransactionsMissing))?;
|
||||||
|
let kzg_commitments =
|
||||||
|
block.message().body().blob_kzg_commitments().map_err(|_| {
|
||||||
|
BlockError::BlobValidation(BlobError::KzgCommitmentMissing)
|
||||||
|
})?;
|
||||||
|
if !consensus_context.blobs_sidecar_validated() {
|
||||||
|
if !kzg_utils::validate_blobs_sidecar(
|
||||||
|
&kzg,
|
||||||
|
block.slot(),
|
||||||
|
block_root,
|
||||||
|
kzg_commitments,
|
||||||
|
sidecar,
|
||||||
|
)
|
||||||
|
.map_err(|e| BlockError::BlobValidation(BlobError::KzgError(e)))?
|
||||||
|
{
|
||||||
|
return Err(BlockError::BlobValidation(BlobError::InvalidKzgProof));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !consensus_context.blobs_verified_vs_txs()
|
||||||
|
&& verify_kzg_commitments_against_transactions::<T::EthSpec>(
|
||||||
|
transactions,
|
||||||
|
kzg_commitments,
|
||||||
|
)
|
||||||
|
//FIXME(sean) we should maybe just map this error so we have more info about the mismatch
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return Err(BlockError::BlobValidation(
|
||||||
|
BlobError::TransactionCommitmentMismatch,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
@ -1563,10 +1699,10 @@ fn check_block_against_finalized_slot<T: BeaconChainTypes>(
|
|||||||
pub fn check_block_is_finalized_descendant<T: BeaconChainTypes>(
|
pub fn check_block_is_finalized_descendant<T: BeaconChainTypes>(
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
fork_choice: &BeaconForkChoice<T>,
|
fork_choice: &BeaconForkChoice<T>,
|
||||||
block: &Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
) -> Result<(), BlockError<T::EthSpec>> {
|
) -> Result<BlockWrapper<T::EthSpec>, BlockError<T::EthSpec>> {
|
||||||
if fork_choice.is_descendant_of_finalized(block.parent_root()) {
|
if fork_choice.is_descendant_of_finalized(block.parent_root()) {
|
||||||
Ok(())
|
Ok(block)
|
||||||
} else {
|
} else {
|
||||||
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
|
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
|
||||||
// then there are two more cases:
|
// then there are two more cases:
|
||||||
@ -1585,7 +1721,7 @@ pub fn check_block_is_finalized_descendant<T: BeaconChainTypes>(
|
|||||||
block_parent_root: block.parent_root(),
|
block_parent_root: block.parent_root(),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(BlockError::ParentUnknown(block.clone()))
|
Err(BlockError::ParentUnknown(block))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1657,8 +1793,8 @@ pub fn get_block_root<E: EthSpec>(block: &SignedBeaconBlock<E>) -> Hash256 {
|
|||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
fn verify_parent_block_is_known<T: BeaconChainTypes>(
|
fn verify_parent_block_is_known<T: BeaconChainTypes>(
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
) -> Result<(ProtoBlock, Arc<SignedBeaconBlock<T::EthSpec>>), BlockError<T::EthSpec>> {
|
) -> Result<(ProtoBlock, BlockWrapper<T::EthSpec>), BlockError<T::EthSpec>> {
|
||||||
if let Some(proto_block) = chain
|
if let Some(proto_block) = chain
|
||||||
.canonical_head
|
.canonical_head
|
||||||
.fork_choice_read_lock()
|
.fork_choice_read_lock()
|
||||||
@ -1677,15 +1813,9 @@ fn verify_parent_block_is_known<T: BeaconChainTypes>(
|
|||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
fn load_parent<T: BeaconChainTypes>(
|
fn load_parent<T: BeaconChainTypes>(
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<
|
) -> Result<(PreProcessingSnapshot<T::EthSpec>, BlockWrapper<T::EthSpec>), BlockError<T::EthSpec>> {
|
||||||
(
|
|
||||||
PreProcessingSnapshot<T::EthSpec>,
|
|
||||||
Arc<SignedBeaconBlock<T::EthSpec>>,
|
|
||||||
),
|
|
||||||
BlockError<T::EthSpec>,
|
|
||||||
> {
|
|
||||||
let spec = &chain.spec;
|
let spec = &chain.spec;
|
||||||
|
|
||||||
// Reject any block if its parent is not known to fork choice.
|
// Reject any block if its parent is not known to fork choice.
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
|
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
|
||||||
|
use crate::blob_cache::BlobCache;
|
||||||
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||||
use crate::eth1_finalization_cache::Eth1FinalizationCache;
|
use crate::eth1_finalization_cache::Eth1FinalizationCache;
|
||||||
use crate::fork_choice_signal::ForkChoiceSignalTx;
|
use crate::fork_choice_signal::ForkChoiceSignalTx;
|
||||||
@ -20,12 +21,14 @@ use eth1::Config as Eth1Config;
|
|||||||
use execution_layer::ExecutionLayer;
|
use execution_layer::ExecutionLayer;
|
||||||
use fork_choice::{ForkChoice, ResetPayloadStatuses};
|
use fork_choice::{ForkChoice, ResetPayloadStatuses};
|
||||||
use futures::channel::mpsc::Sender;
|
use futures::channel::mpsc::Sender;
|
||||||
|
use kzg::Kzg;
|
||||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use slasher::Slasher;
|
use slasher::Slasher;
|
||||||
use slog::{crit, error, info, Logger};
|
use slog::{crit, error, info, Logger};
|
||||||
use slot_clock::{SlotClock, TestingSlotClock};
|
use slot_clock::{SlotClock, TestingSlotClock};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp};
|
use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp};
|
||||||
@ -93,6 +96,7 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
|||||||
// Pending I/O batch that is constructed during building and should be executed atomically
|
// Pending I/O batch that is constructed during building and should be executed atomically
|
||||||
// alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called.
|
// alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called.
|
||||||
pending_io_batch: Vec<KeyValueStoreOp>,
|
pending_io_batch: Vec<KeyValueStoreOp>,
|
||||||
|
trusted_setup_path: Option<PathBuf>,
|
||||||
task_executor: Option<TaskExecutor>,
|
task_executor: Option<TaskExecutor>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,6 +136,7 @@ where
|
|||||||
slasher: None,
|
slasher: None,
|
||||||
validator_monitor: None,
|
validator_monitor: None,
|
||||||
pending_io_batch: vec![],
|
pending_io_batch: vec![],
|
||||||
|
trusted_setup_path: None,
|
||||||
task_executor: None,
|
task_executor: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -571,6 +576,11 @@ where
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn trusted_setup(mut self, trusted_setup_file_path: PathBuf) -> Self {
|
||||||
|
self.trusted_setup_path = Some(trusted_setup_file_path);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied.
|
/// Consumes `self`, returning a `BeaconChain` if all required parameters have been supplied.
|
||||||
///
|
///
|
||||||
/// An error will be returned at runtime if all required parameters have not been configured.
|
/// An error will be returned at runtime if all required parameters have not been configured.
|
||||||
@ -612,6 +622,14 @@ where
|
|||||||
slot_clock.now().ok_or("Unable to read slot")?
|
slot_clock.now().ok_or("Unable to read slot")?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let kzg = if let Some(trusted_setup_file) = self.trusted_setup_path {
|
||||||
|
let kzg = Kzg::new_from_file(trusted_setup_file)
|
||||||
|
.map_err(|e| format!("Failed to load trusted setup: {:?}", e))?;
|
||||||
|
Some(Arc::new(kzg))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let initial_head_block_root = fork_choice
|
let initial_head_block_root = fork_choice
|
||||||
.get_head(current_slot, &self.spec)
|
.get_head(current_slot, &self.spec)
|
||||||
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
|
.map_err(|e| format!("Unable to get fork choice head: {:?}", e))?;
|
||||||
@ -812,6 +830,8 @@ where
|
|||||||
graffiti: self.graffiti,
|
graffiti: self.graffiti,
|
||||||
slasher: self.slasher.clone(),
|
slasher: self.slasher.clone(),
|
||||||
validator_monitor: RwLock::new(validator_monitor),
|
validator_monitor: RwLock::new(validator_monitor),
|
||||||
|
blob_cache: BlobCache::default(),
|
||||||
|
kzg,
|
||||||
};
|
};
|
||||||
|
|
||||||
let head = beacon_chain.head_snapshot();
|
let head = beacon_chain.head_snapshot();
|
||||||
|
@ -5,6 +5,7 @@ use crate::{
|
|||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use proto_array::Block as ProtoBlock;
|
use proto_array::Block as ProtoBlock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use store::signed_block_and_blobs::BlockWrapper;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
pub struct CacheItem<E: EthSpec> {
|
pub struct CacheItem<E: EthSpec> {
|
||||||
@ -20,6 +21,7 @@ pub struct CacheItem<E: EthSpec> {
|
|||||||
* Values used to make the block available.
|
* Values used to make the block available.
|
||||||
*/
|
*/
|
||||||
block: Arc<SignedBeaconBlock<E>>,
|
block: Arc<SignedBeaconBlock<E>>,
|
||||||
|
blobs: Option<Arc<BlobsSidecar<E>>>,
|
||||||
proto_block: ProtoBlock,
|
proto_block: ProtoBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,7 +51,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
|||||||
pub fn add_head_block(
|
pub fn add_head_block(
|
||||||
&self,
|
&self,
|
||||||
beacon_block_root: Hash256,
|
beacon_block_root: Hash256,
|
||||||
block: Arc<SignedBeaconBlock<E>>,
|
block: BlockWrapper<E>,
|
||||||
proto_block: ProtoBlock,
|
proto_block: ProtoBlock,
|
||||||
state: &BeaconState<E>,
|
state: &BeaconState<E>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -67,6 +69,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let (block, blobs) = block.deconstruct();
|
||||||
let item = CacheItem {
|
let item = CacheItem {
|
||||||
epoch,
|
epoch,
|
||||||
committee_lengths,
|
committee_lengths,
|
||||||
@ -74,6 +77,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
|||||||
source,
|
source,
|
||||||
target,
|
target,
|
||||||
block,
|
block,
|
||||||
|
blobs,
|
||||||
proto_block,
|
proto_block,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -155,6 +159,16 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
|
|||||||
.map(|item| item.block.clone())
|
.map(|item| item.block.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the blobs, if `block_root` matches the cached item.
|
||||||
|
pub fn get_blobs(&self, block_root: Hash256) -> Option<Arc<BlobsSidecar<E>>> {
|
||||||
|
self.item
|
||||||
|
.read()
|
||||||
|
.as_ref()
|
||||||
|
.filter(|item| item.beacon_block_root == block_root)
|
||||||
|
.map(|item| item.blobs.clone())
|
||||||
|
.flatten()
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the proto-array block, if `block_root` matches the cached item.
|
/// Returns the proto-array block, if `block_root` matches the cached item.
|
||||||
pub fn get_proto_block(&self, block_root: Hash256) -> Option<ProtoBlock> {
|
pub fn get_proto_block(&self, block_root: Hash256) -> Option<ProtoBlock> {
|
||||||
self.item
|
self.item
|
||||||
|
@ -267,6 +267,9 @@ pub enum BlockProductionError {
|
|||||||
TokioJoin(tokio::task::JoinError),
|
TokioJoin(tokio::task::JoinError),
|
||||||
BeaconChain(BeaconChainError),
|
BeaconChain(BeaconChainError),
|
||||||
InvalidPayloadFork,
|
InvalidPayloadFork,
|
||||||
|
TrustedSetupNotInitialized,
|
||||||
|
InvalidBlockVariant(String),
|
||||||
|
KzgError(kzg::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
easy_from_to!(BlockProcessingError, BlockProductionError);
|
easy_from_to!(BlockProcessingError, BlockProductionError);
|
||||||
|
59
beacon_node/beacon_chain/src/kzg_utils.rs
Normal file
59
beacon_node/beacon_chain/src/kzg_utils.rs
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
use kzg::{Error as KzgError, Kzg};
|
||||||
|
use types::{Blob, BlobsSidecar, EthSpec, Hash256, KzgCommitment, KzgProof, Slot};
|
||||||
|
|
||||||
|
// TODO(pawan): make this generic over blob size
|
||||||
|
fn ssz_blob_to_crypto_blob<T: EthSpec>(blob: Blob<T>) -> Option<[u8; 131072]> {
|
||||||
|
if blob.len() != 131072 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let blob_vec: Vec<u8> = blob.into();
|
||||||
|
let mut arr = [0; 131072];
|
||||||
|
arr.copy_from_slice(&blob_vec);
|
||||||
|
Some(arr)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn validate_blobs_sidecar<T: EthSpec>(
|
||||||
|
kzg: &Kzg,
|
||||||
|
slot: Slot,
|
||||||
|
beacon_block_root: Hash256,
|
||||||
|
expected_kzg_commitments: &[KzgCommitment],
|
||||||
|
blobs_sidecar: &BlobsSidecar<T>,
|
||||||
|
) -> Result<bool, KzgError> {
|
||||||
|
if slot != blobs_sidecar.beacon_block_slot
|
||||||
|
|| beacon_block_root != blobs_sidecar.beacon_block_root
|
||||||
|
|| blobs_sidecar.blobs.len() != expected_kzg_commitments.len()
|
||||||
|
{
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
let blobs = blobs_sidecar
|
||||||
|
.blobs
|
||||||
|
.into_iter()
|
||||||
|
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // TODO(pawan): avoid this clone
|
||||||
|
.collect::<Option<Vec<_>>>()
|
||||||
|
.ok_or_else(|| KzgError::InvalidBlob("Invalid blobs in sidecar".to_string()))?;
|
||||||
|
|
||||||
|
kzg.verify_aggregate_kzg_proof(
|
||||||
|
&blobs,
|
||||||
|
expected_kzg_commitments,
|
||||||
|
blobs_sidecar.kzg_aggregated_proof,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compute_aggregate_kzg_proof<T: EthSpec>(
|
||||||
|
kzg: &Kzg,
|
||||||
|
blobs: &[Blob<T>],
|
||||||
|
) -> Result<KzgProof, KzgError> {
|
||||||
|
let blobs = blobs
|
||||||
|
.into_iter()
|
||||||
|
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // TODO(pawan): avoid this clone
|
||||||
|
.collect::<Option<Vec<_>>>()
|
||||||
|
.ok_or_else(|| KzgError::InvalidBlob("Invalid blobs".to_string()))?;
|
||||||
|
|
||||||
|
kzg.compute_aggregate_kzg_proof(&blobs)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blob_to_kzg_commitment<T: EthSpec>(kzg: &Kzg, blob: Blob<T>) -> Option<KzgCommitment> {
|
||||||
|
let blob = ssz_blob_to_crypto_blob::<T>(blob)?;
|
||||||
|
Some(kzg.blob_to_kzg_commitment(blob))
|
||||||
|
}
|
@ -5,6 +5,7 @@ mod beacon_chain;
|
|||||||
mod beacon_fork_choice_store;
|
mod beacon_fork_choice_store;
|
||||||
pub mod beacon_proposer_cache;
|
pub mod beacon_proposer_cache;
|
||||||
mod beacon_snapshot;
|
mod beacon_snapshot;
|
||||||
|
pub mod blob_cache;
|
||||||
pub mod blob_verification;
|
pub mod blob_verification;
|
||||||
pub mod block_reward;
|
pub mod block_reward;
|
||||||
mod block_times_cache;
|
mod block_times_cache;
|
||||||
@ -22,6 +23,7 @@ pub mod fork_choice_signal;
|
|||||||
pub mod fork_revert;
|
pub mod fork_revert;
|
||||||
mod head_tracker;
|
mod head_tracker;
|
||||||
pub mod historical_blocks;
|
pub mod historical_blocks;
|
||||||
|
pub mod kzg_utils;
|
||||||
pub mod merge_readiness;
|
pub mod merge_readiness;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
pub mod migrate;
|
pub mod migrate;
|
||||||
|
@ -142,6 +142,7 @@ async fn produces_attestations() {
|
|||||||
.add_head_block(
|
.add_head_block(
|
||||||
block_root,
|
block_root,
|
||||||
Arc::new(block.clone()),
|
Arc::new(block.clone()),
|
||||||
|
None,
|
||||||
proto_block,
|
proto_block,
|
||||||
&state,
|
&state,
|
||||||
&chain.spec,
|
&chain.spec,
|
||||||
@ -198,6 +199,7 @@ async fn early_attester_cache_old_request() {
|
|||||||
.add_head_block(
|
.add_head_block(
|
||||||
head.beacon_block_root,
|
head.beacon_block_root,
|
||||||
head.beacon_block.clone(),
|
head.beacon_block.clone(),
|
||||||
|
None,
|
||||||
head_proto_block,
|
head_proto_block,
|
||||||
&head.beacon_state,
|
&head.beacon_state,
|
||||||
&harness.chain.spec,
|
&harness.chain.spec,
|
||||||
|
@ -185,6 +185,12 @@ where
|
|||||||
builder
|
builder
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let builder = if let Some(trusted_setup_file) = config.trusted_setup_file {
|
||||||
|
builder.trusted_setup(trusted_setup_file)
|
||||||
|
} else {
|
||||||
|
builder
|
||||||
|
};
|
||||||
|
|
||||||
let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false);
|
let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false);
|
||||||
|
|
||||||
// If the client is expect to resume but there's no beacon chain in the database,
|
// If the client is expect to resume but there's no beacon chain in the database,
|
||||||
|
@ -68,6 +68,7 @@ pub struct Config {
|
|||||||
pub chain: beacon_chain::ChainConfig,
|
pub chain: beacon_chain::ChainConfig,
|
||||||
pub eth1: eth1::Config,
|
pub eth1: eth1::Config,
|
||||||
pub execution_layer: Option<execution_layer::Config>,
|
pub execution_layer: Option<execution_layer::Config>,
|
||||||
|
pub trusted_setup_file: Option<PathBuf>,
|
||||||
pub http_api: http_api::Config,
|
pub http_api: http_api::Config,
|
||||||
pub http_metrics: http_metrics::Config,
|
pub http_metrics: http_metrics::Config,
|
||||||
pub monitoring_api: Option<monitoring_api::Config>,
|
pub monitoring_api: Option<monitoring_api::Config>,
|
||||||
@ -90,6 +91,7 @@ impl Default for Config {
|
|||||||
sync_eth1_chain: false,
|
sync_eth1_chain: false,
|
||||||
eth1: <_>::default(),
|
eth1: <_>::default(),
|
||||||
execution_layer: None,
|
execution_layer: None,
|
||||||
|
trusted_setup_file: None,
|
||||||
graffiti: Graffiti::default(),
|
graffiti: Graffiti::default(),
|
||||||
http_api: <_>::default(),
|
http_api: <_>::default(),
|
||||||
http_metrics: <_>::default(),
|
http_metrics: <_>::default(),
|
||||||
|
@ -31,10 +31,12 @@ pub const ETH_SYNCING_TIMEOUT: Duration = Duration::from_secs(1);
|
|||||||
|
|
||||||
pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1";
|
pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1";
|
||||||
pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2";
|
pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2";
|
||||||
|
pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3";
|
||||||
pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8);
|
pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8);
|
||||||
|
|
||||||
pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1";
|
pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1";
|
||||||
pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2";
|
pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2";
|
||||||
|
pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3";
|
||||||
pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2);
|
pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2);
|
||||||
|
|
||||||
pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1";
|
pub const ENGINE_GET_BLOBS_BUNDLE_V1: &str = "engine_getBlobsBundleV1";
|
||||||
@ -708,6 +710,23 @@ impl HttpJsonRpc {
|
|||||||
Ok(response.into())
|
Ok(response.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn new_payload_v3<T: EthSpec>(
|
||||||
|
&self,
|
||||||
|
execution_payload: ExecutionPayload<T>,
|
||||||
|
) -> Result<PayloadStatusV1, Error> {
|
||||||
|
let params = json!([JsonExecutionPayloadV2::try_from(execution_payload)?]);
|
||||||
|
|
||||||
|
let response: JsonPayloadStatusV1 = self
|
||||||
|
.rpc_request(
|
||||||
|
ENGINE_NEW_PAYLOAD_V3,
|
||||||
|
params,
|
||||||
|
ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(response.into())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_payload_v1<T: EthSpec>(
|
pub async fn get_payload_v1<T: EthSpec>(
|
||||||
&self,
|
&self,
|
||||||
fork_name: ForkName,
|
fork_name: ForkName,
|
||||||
@ -744,13 +763,31 @@ impl HttpJsonRpc {
|
|||||||
JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name)
|
JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn get_payload_v3<T: EthSpec>(
|
||||||
|
&self,
|
||||||
|
fork_name: ForkName,
|
||||||
|
payload_id: PayloadId,
|
||||||
|
) -> Result<ExecutionPayload<T>, Error> {
|
||||||
|
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
|
||||||
|
|
||||||
|
let payload_v2: JsonExecutionPayloadV2<T> = self
|
||||||
|
.rpc_request(
|
||||||
|
ENGINE_GET_PAYLOAD_V3,
|
||||||
|
params,
|
||||||
|
ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
JsonExecutionPayload::V2(payload_v2).try_into_execution_payload(fork_name)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn get_blobs_bundle_v1<T: EthSpec>(
|
pub async fn get_blobs_bundle_v1<T: EthSpec>(
|
||||||
&self,
|
&self,
|
||||||
payload_id: PayloadId,
|
payload_id: PayloadId,
|
||||||
) -> Result<JsonBlobBundles<T>, Error> {
|
) -> Result<JsonBlobsBundle<T>, Error> {
|
||||||
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
|
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
|
||||||
|
|
||||||
let response: JsonBlobBundles<T> = self
|
let response: JsonBlobsBundle<T> = self
|
||||||
.rpc_request(
|
.rpc_request(
|
||||||
ENGINE_GET_BLOBS_BUNDLE_V1,
|
ENGINE_GET_BLOBS_BUNDLE_V1,
|
||||||
params,
|
params,
|
||||||
@ -855,13 +892,10 @@ impl HttpJsonRpc {
|
|||||||
&self,
|
&self,
|
||||||
execution_payload: ExecutionPayload<T>,
|
execution_payload: ExecutionPayload<T>,
|
||||||
) -> Result<PayloadStatusV1, Error> {
|
) -> Result<PayloadStatusV1, Error> {
|
||||||
let supported_apis = self.get_cached_supported_apis().await?;
|
match execution_payload {
|
||||||
if supported_apis.new_payload_v2 {
|
ExecutionPayload::Eip4844(_) => self.new_payload_v3(execution_payload).await,
|
||||||
self.new_payload_v2(execution_payload).await
|
ExecutionPayload::Capella(_) => self.new_payload_v2(execution_payload).await,
|
||||||
} else if supported_apis.new_payload_v1 {
|
ExecutionPayload::Merge(_) => self.new_payload_v1(execution_payload).await,
|
||||||
self.new_payload_v1(execution_payload).await
|
|
||||||
} else {
|
|
||||||
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -872,13 +906,11 @@ impl HttpJsonRpc {
|
|||||||
fork_name: ForkName,
|
fork_name: ForkName,
|
||||||
payload_id: PayloadId,
|
payload_id: PayloadId,
|
||||||
) -> Result<ExecutionPayload<T>, Error> {
|
) -> Result<ExecutionPayload<T>, Error> {
|
||||||
let supported_apis = self.get_cached_supported_apis().await?;
|
match fork_name {
|
||||||
if supported_apis.get_payload_v2 {
|
ForkName::Eip4844 => self.get_payload_v3(fork_name, payload_id).await,
|
||||||
self.get_payload_v2(fork_name, payload_id).await
|
ForkName::Capella => self.get_payload_v2(fork_name, payload_id).await,
|
||||||
} else if supported_apis.new_payload_v1 {
|
ForkName::Merge => self.get_payload_v1(fork_name, payload_id).await,
|
||||||
self.get_payload_v1(fork_name, payload_id).await
|
_ => Err(Error::RequiredMethodUnsupported("engine_getPayload")),
|
||||||
} else {
|
|
||||||
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -886,14 +918,16 @@ impl HttpJsonRpc {
|
|||||||
// forkchoice_updated that the execution engine supports
|
// forkchoice_updated that the execution engine supports
|
||||||
pub async fn forkchoice_updated(
|
pub async fn forkchoice_updated(
|
||||||
&self,
|
&self,
|
||||||
|
fork_name: ForkName,
|
||||||
forkchoice_state: ForkchoiceState,
|
forkchoice_state: ForkchoiceState,
|
||||||
payload_attributes: Option<PayloadAttributes>,
|
payload_attributes: Option<PayloadAttributes>,
|
||||||
) -> Result<ForkchoiceUpdatedResponse, Error> {
|
) -> Result<ForkchoiceUpdatedResponse, Error> {
|
||||||
let supported_apis = self.get_cached_supported_apis().await?;
|
match fork_name {
|
||||||
if supported_apis.forkchoice_updated_v2 {
|
ForkName::Capella | ForkName::Eip4844 => {
|
||||||
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
|
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
|
||||||
.await
|
.await
|
||||||
} else if supported_apis.forkchoice_updated_v1 {
|
}
|
||||||
|
ForkName::Merge => {
|
||||||
self.forkchoice_updated_v1(
|
self.forkchoice_updated_v1(
|
||||||
forkchoice_state,
|
forkchoice_state,
|
||||||
payload_attributes
|
payload_attributes
|
||||||
@ -901,8 +935,8 @@ impl HttpJsonRpc {
|
|||||||
.transpose()?,
|
.transpose()?,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
} else {
|
}
|
||||||
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))
|
_ => Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -95,8 +95,6 @@ pub struct JsonExecutionPayload<T: EthSpec> {
|
|||||||
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
||||||
pub base_fee_per_gas: Uint256,
|
pub base_fee_per_gas: Uint256,
|
||||||
#[superstruct(only(V2))]
|
#[superstruct(only(V2))]
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
#[serde(default)]
|
|
||||||
#[serde(with = "eth2_serde_utils::u256_hex_be_opt")]
|
#[serde(with = "eth2_serde_utils::u256_hex_be_opt")]
|
||||||
pub excess_data_gas: Option<Uint256>,
|
pub excess_data_gas: Option<Uint256>,
|
||||||
pub block_hash: ExecutionBlockHash,
|
pub block_hash: ExecutionBlockHash,
|
||||||
@ -423,10 +421,11 @@ impl From<JsonPayloadAttributes> for PayloadAttributes {
|
|||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(bound = "T: EthSpec", rename_all = "camelCase")]
|
#[serde(bound = "T: EthSpec", rename_all = "camelCase")]
|
||||||
pub struct JsonBlobBundles<T: EthSpec> {
|
pub struct JsonBlobsBundle<T: EthSpec> {
|
||||||
pub block_hash: ExecutionBlockHash,
|
pub block_hash: ExecutionBlockHash,
|
||||||
pub kzgs: Vec<KzgCommitment>,
|
pub kzgs: VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
|
||||||
pub blobs: Vec<Blob<T>>,
|
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
|
||||||
|
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
@ -11,7 +11,7 @@ use std::sync::Arc;
|
|||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use tokio::sync::{watch, Mutex, RwLock};
|
use tokio::sync::{watch, Mutex, RwLock};
|
||||||
use tokio_stream::wrappers::WatchStream;
|
use tokio_stream::wrappers::WatchStream;
|
||||||
use types::ExecutionBlockHash;
|
use types::{Address, ExecutionBlockHash, ForkName, Hash256};
|
||||||
|
|
||||||
/// The number of payload IDs that will be stored for each `Engine`.
|
/// The number of payload IDs that will be stored for each `Engine`.
|
||||||
///
|
///
|
||||||
@ -114,7 +114,7 @@ pub struct Engine {
|
|||||||
pub api: HttpJsonRpc,
|
pub api: HttpJsonRpc,
|
||||||
payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>,
|
payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>,
|
||||||
state: RwLock<State>,
|
state: RwLock<State>,
|
||||||
latest_forkchoice_state: RwLock<Option<ForkchoiceState>>,
|
latest_forkchoice_state: RwLock<Option<(ForkName, ForkchoiceState)>>,
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
}
|
}
|
||||||
@ -153,13 +153,15 @@ impl Engine {
|
|||||||
|
|
||||||
pub async fn notify_forkchoice_updated(
|
pub async fn notify_forkchoice_updated(
|
||||||
&self,
|
&self,
|
||||||
|
fork_name: ForkName,
|
||||||
forkchoice_state: ForkchoiceState,
|
forkchoice_state: ForkchoiceState,
|
||||||
payload_attributes: Option<PayloadAttributes>,
|
payload_attributes: Option<PayloadAttributes>,
|
||||||
log: &Logger,
|
log: &Logger,
|
||||||
) -> Result<ForkchoiceUpdatedResponse, EngineApiError> {
|
) -> Result<ForkchoiceUpdatedResponse, EngineApiError> {
|
||||||
|
info!(log, "Notifying FCU"; "fork_name" => ?fork_name);
|
||||||
let response = self
|
let response = self
|
||||||
.api
|
.api
|
||||||
.forkchoice_updated(forkchoice_state, payload_attributes.clone())
|
.forkchoice_updated(fork_name, forkchoice_state, payload_attributes.clone())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Some(payload_id) = response.payload_id {
|
if let Some(payload_id) = response.payload_id {
|
||||||
@ -179,18 +181,18 @@ impl Engine {
|
|||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_latest_forkchoice_state(&self) -> Option<ForkchoiceState> {
|
async fn get_latest_forkchoice_state(&self) -> Option<(ForkName, ForkchoiceState)> {
|
||||||
*self.latest_forkchoice_state.read().await
|
*self.latest_forkchoice_state.read().await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) {
|
pub async fn set_latest_forkchoice_state(&self, fork_name: ForkName, state: ForkchoiceState) {
|
||||||
*self.latest_forkchoice_state.write().await = Some(state);
|
*self.latest_forkchoice_state.write().await = Some((fork_name, state));
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn send_latest_forkchoice_state(&self) {
|
async fn send_latest_forkchoice_state(&self) {
|
||||||
let latest_forkchoice_state = self.get_latest_forkchoice_state().await;
|
let latest_forkchoice_state = self.get_latest_forkchoice_state().await;
|
||||||
|
|
||||||
if let Some(forkchoice_state) = latest_forkchoice_state {
|
if let Some((fork_name, forkchoice_state)) = latest_forkchoice_state {
|
||||||
if forkchoice_state.head_block_hash == ExecutionBlockHash::zero() {
|
if forkchoice_state.head_block_hash == ExecutionBlockHash::zero() {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -204,11 +206,16 @@ impl Engine {
|
|||||||
self.log,
|
self.log,
|
||||||
"Issuing forkchoiceUpdated";
|
"Issuing forkchoiceUpdated";
|
||||||
"forkchoice_state" => ?forkchoice_state,
|
"forkchoice_state" => ?forkchoice_state,
|
||||||
|
"fork_name" => ?fork_name,
|
||||||
);
|
);
|
||||||
|
|
||||||
// For simplicity, payload attributes are never included in this call. It may be
|
// For simplicity, payload attributes are never included in this call. It may be
|
||||||
// reasonable to include them in the future.
|
// reasonable to include them in the future.
|
||||||
if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await {
|
if let Err(e) = self
|
||||||
|
.api
|
||||||
|
.forkchoice_updated(fork_name, forkchoice_state, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Failed to issue latest head to engine";
|
"Failed to issue latest head to engine";
|
||||||
|
@ -107,12 +107,29 @@ pub enum BlockProposalContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
|
|||||||
Payload(Payload),
|
Payload(Payload),
|
||||||
PayloadAndBlobs {
|
PayloadAndBlobs {
|
||||||
payload: Payload,
|
payload: Payload,
|
||||||
kzg_commitments: Vec<KzgCommitment>,
|
kzg_commitments: VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
|
||||||
blobs: Vec<Blob<T>>,
|
blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Payload> {
|
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Payload> {
|
||||||
|
pub fn deconstruct(
|
||||||
|
self,
|
||||||
|
) -> (
|
||||||
|
Payload,
|
||||||
|
Option<VariableList<KzgCommitment, T::MaxBlobsPerBlock>>,
|
||||||
|
Option<VariableList<Blob<T>, T::MaxBlobsPerBlock>>,
|
||||||
|
) {
|
||||||
|
match self {
|
||||||
|
Self::Payload(payload) => (payload, None, None),
|
||||||
|
Self::PayloadAndBlobs {
|
||||||
|
payload,
|
||||||
|
kzg_commitments,
|
||||||
|
blobs,
|
||||||
|
} => (payload, Some(kzg_commitments), Some(blobs)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn payload(&self) -> &Payload {
|
pub fn payload(&self) -> &Payload {
|
||||||
match self {
|
match self {
|
||||||
Self::Payload(payload) => payload,
|
Self::Payload(payload) => payload,
|
||||||
@ -133,26 +150,6 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
|
|||||||
} => payload,
|
} => payload,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn kzg_commitments(&self) -> Option<&[KzgCommitment]> {
|
|
||||||
match self {
|
|
||||||
Self::Payload(_) => None,
|
|
||||||
Self::PayloadAndBlobs {
|
|
||||||
payload: _,
|
|
||||||
kzg_commitments,
|
|
||||||
blobs: _,
|
|
||||||
} => Some(kzg_commitments),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn blobs(&self) -> Option<&[Blob<T>]> {
|
|
||||||
match self {
|
|
||||||
Self::Payload(_) => None,
|
|
||||||
Self::PayloadAndBlobs {
|
|
||||||
payload: _,
|
|
||||||
kzg_commitments: _,
|
|
||||||
blobs,
|
|
||||||
} => Some(blobs),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn default_at_fork(fork_name: ForkName) -> Self {
|
pub fn default_at_fork(fork_name: ForkName) -> Self {
|
||||||
match fork_name {
|
match fork_name {
|
||||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||||
@ -160,8 +157,8 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
|
|||||||
}
|
}
|
||||||
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
|
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
|
||||||
payload: Payload::default_at_fork(fork_name),
|
payload: Payload::default_at_fork(fork_name),
|
||||||
blobs: vec![],
|
blobs: VariableList::default(),
|
||||||
kzg_commitments: vec![],
|
kzg_commitments: VariableList::default(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -217,6 +214,7 @@ struct Inner<E: EthSpec> {
|
|||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
payload_cache: PayloadCache<E>,
|
payload_cache: PayloadCache<E>,
|
||||||
builder_profit_threshold: Uint256,
|
builder_profit_threshold: Uint256,
|
||||||
|
spec: ChainSpec,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,6 +238,8 @@ pub struct Config {
|
|||||||
/// The minimum value of an external payload for it to be considered in a proposal.
|
/// The minimum value of an external payload for it to be considered in a proposal.
|
||||||
pub builder_profit_threshold: u128,
|
pub builder_profit_threshold: u128,
|
||||||
pub execution_timeout_multiplier: Option<u32>,
|
pub execution_timeout_multiplier: Option<u32>,
|
||||||
|
#[serde(skip)]
|
||||||
|
pub spec: ChainSpec,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
||||||
@ -262,6 +262,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
default_datadir,
|
default_datadir,
|
||||||
builder_profit_threshold,
|
builder_profit_threshold,
|
||||||
execution_timeout_multiplier,
|
execution_timeout_multiplier,
|
||||||
|
spec,
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
if urls.len() > 1 {
|
if urls.len() > 1 {
|
||||||
@ -333,6 +334,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
executor,
|
executor,
|
||||||
payload_cache: PayloadCache::default(),
|
payload_cache: PayloadCache::default(),
|
||||||
builder_profit_threshold: Uint256::from(builder_profit_threshold),
|
builder_profit_threshold: Uint256::from(builder_profit_threshold),
|
||||||
|
spec,
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1008,6 +1010,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
|
|
||||||
let response = engine
|
let response = engine
|
||||||
.notify_forkchoice_updated(
|
.notify_forkchoice_updated(
|
||||||
|
current_fork,
|
||||||
fork_choice_state,
|
fork_choice_state,
|
||||||
Some(payload_attributes.clone()),
|
Some(payload_attributes.clone()),
|
||||||
self.log(),
|
self.log(),
|
||||||
@ -1266,8 +1269,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
finalized_block_hash,
|
finalized_block_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let fork_name = self
|
||||||
|
.inner
|
||||||
|
.spec
|
||||||
|
.fork_name_at_epoch(next_slot.epoch(T::slots_per_epoch()));
|
||||||
|
|
||||||
self.engine()
|
self.engine()
|
||||||
.set_latest_forkchoice_state(forkchoice_state)
|
.set_latest_forkchoice_state(fork_name, forkchoice_state)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let payload_attributes_ref = &payload_attributes;
|
let payload_attributes_ref = &payload_attributes;
|
||||||
@ -1276,6 +1284,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
.request(|engine| async move {
|
.request(|engine| async move {
|
||||||
engine
|
engine
|
||||||
.notify_forkchoice_updated(
|
.notify_forkchoice_updated(
|
||||||
|
fork_name,
|
||||||
forkchoice_state,
|
forkchoice_state,
|
||||||
payload_attributes_ref.clone(),
|
payload_attributes_ref.clone(),
|
||||||
self.log(),
|
self.log(),
|
||||||
|
@ -74,7 +74,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
.unwrap())
|
.unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ENGINE_NEW_PAYLOAD_V1 => {
|
ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => {
|
||||||
let request: JsonExecutionPayload<T> = get_param(params, 0)?;
|
let request: JsonExecutionPayload<T> = get_param(params, 0)?;
|
||||||
|
|
||||||
// Canned responses set by block hash take priority.
|
// Canned responses set by block hash take priority.
|
||||||
@ -120,7 +120,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
|
|
||||||
Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap())
|
Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap())
|
||||||
}
|
}
|
||||||
ENGINE_FORKCHOICE_UPDATED_V1 => {
|
ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => {
|
||||||
let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?;
|
let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?;
|
||||||
let payload_attributes: Option<JsonPayloadAttributes> = get_param(params, 1)?;
|
let payload_attributes: Option<JsonPayloadAttributes> = get_param(params, 1)?;
|
||||||
|
|
||||||
@ -153,6 +153,19 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
|
|
||||||
Ok(serde_json::to_value(response).unwrap())
|
Ok(serde_json::to_value(response).unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ENGINE_GET_PAYLOAD_V2 => {
|
||||||
|
let request: JsonPayloadIdRequest = get_param(params, 0)?;
|
||||||
|
let id = request.into();
|
||||||
|
|
||||||
|
let response = ctx
|
||||||
|
.execution_block_generator
|
||||||
|
.write()
|
||||||
|
.get_payload(&id)
|
||||||
|
.ok_or_else(|| format!("no payload for id {:?}", id))?;
|
||||||
|
|
||||||
|
Ok(serde_json::to_value(JsonExecutionPayloadV2::try_from(response).unwrap()).unwrap())
|
||||||
|
}
|
||||||
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => {
|
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => {
|
||||||
let block_generator = ctx.execution_block_generator.read();
|
let block_generator = ctx.execution_block_generator.read();
|
||||||
let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 {
|
let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 {
|
||||||
|
@ -1107,6 +1107,8 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// POST beacon/blocks
|
// POST beacon/blocks
|
||||||
|
|
||||||
|
// TODO: THIS IS NOT THE RIGHT CODE
|
||||||
let post_beacon_blocks = eth_v1
|
let post_beacon_blocks = eth_v1
|
||||||
.and(warp::path("beacon"))
|
.and(warp::path("beacon"))
|
||||||
.and(warp::path("blocks"))
|
.and(warp::path("blocks"))
|
||||||
@ -1120,14 +1122,11 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
chain: Arc<BeaconChain<T>>,
|
chain: Arc<BeaconChain<T>>,
|
||||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
log: Logger| async move {
|
log: Logger| async move {
|
||||||
// need to have cached the blob sidecar somewhere in the beacon chain
|
publish_blocks::publish_block(None, block, chain, &network_tx, log)
|
||||||
// to publish
|
|
||||||
publish_blocks::publish_block(None, block, None, chain, &network_tx, log)
|
|
||||||
.await
|
.await
|
||||||
.map(|()| warp::reply())
|
.map(|()| warp::reply())
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* beacon/blocks
|
* beacon/blocks
|
||||||
*/
|
*/
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
||||||
use beacon_chain::{
|
use beacon_chain::NotifyExecutionLayer;
|
||||||
BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer,
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
|
||||||
};
|
use lighthouse_network::PubsubMessage;
|
||||||
use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar};
|
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
use slog::{crit, error, info, warn, Logger};
|
use slog::{crit, error, info, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
@ -12,7 +11,8 @@ use tokio::sync::mpsc::UnboundedSender;
|
|||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash,
|
AbstractExecPayload, BlindedPayload, BlobsSidecar, EthSpec, ExecPayload, ExecutionBlockHash,
|
||||||
FullPayload, Hash256, SignedBeaconBlock,
|
FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
|
||||||
|
SignedBeaconBlockEip4844,
|
||||||
};
|
};
|
||||||
use warp::Rejection;
|
use warp::Rejection;
|
||||||
|
|
||||||
@ -20,31 +20,32 @@ use warp::Rejection;
|
|||||||
pub async fn publish_block<T: BeaconChainTypes>(
|
pub async fn publish_block<T: BeaconChainTypes>(
|
||||||
block_root: Option<Hash256>,
|
block_root: Option<Hash256>,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||||
blobs_sidecar: Option<Arc<BlobsSidecar<T::EthSpec>>>,
|
|
||||||
chain: Arc<BeaconChain<T>>,
|
chain: Arc<BeaconChain<T>>,
|
||||||
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<(), Rejection> {
|
) -> Result<(), Rejection> {
|
||||||
let seen_timestamp = timestamp_now();
|
let seen_timestamp = timestamp_now();
|
||||||
|
|
||||||
|
//FIXME(sean) have to move this to prior to publishing because it's included in the blobs sidecar message.
|
||||||
|
//this may skew metrics
|
||||||
|
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
||||||
|
|
||||||
// Send the block, regardless of whether or not it is valid. The API
|
// Send the block, regardless of whether or not it is valid. The API
|
||||||
// specification is very clear that this is the desired behaviour.
|
// specification is very clear that this is the desired behaviour.
|
||||||
|
let message = if matches!(block.as_ref(), &SignedBeaconBlock::Eip4844(_)) {
|
||||||
let message = match &*block {
|
if let Some(sidecar) = chain.blob_cache.pop(&block_root) {
|
||||||
SignedBeaconBlock::Eip4844(block) => {
|
PubsubMessage::BeaconBlockAndBlobsSidecars(SignedBeaconBlockAndBlobsSidecar {
|
||||||
if let Some(sidecar) = blobs_sidecar {
|
|
||||||
PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new(
|
|
||||||
SignedBeaconBlockAndBlobsSidecar {
|
|
||||||
beacon_block: block.clone(),
|
beacon_block: block.clone(),
|
||||||
blobs_sidecar: (*sidecar).clone(),
|
blobs_sidecar: Arc::new(sidecar),
|
||||||
},
|
})
|
||||||
))
|
|
||||||
} else {
|
} else {
|
||||||
//TODO(pawan): return an empty sidecar instead
|
//FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required
|
||||||
return Err(warp_utils::reject::broadcast_without_import(format!("")));
|
return Err(warp_utils::reject::broadcast_without_import(format!(
|
||||||
|
"no blob cached for block"
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
_ => PubsubMessage::BeaconBlock(block.clone()),
|
PubsubMessage::BeaconBlock(block.clone())
|
||||||
};
|
};
|
||||||
crate::publish_pubsub_message(network_tx, message)?;
|
crate::publish_pubsub_message(network_tx, message)?;
|
||||||
|
|
||||||
@ -52,8 +53,6 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
|||||||
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
|
let delay = get_block_delay_ms(seen_timestamp, block.message(), &chain.slot_clock);
|
||||||
metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay);
|
metrics::observe_duration(&metrics::HTTP_API_BLOCK_BROADCAST_DELAY_TIMES, delay);
|
||||||
|
|
||||||
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
|
||||||
|
|
||||||
match chain
|
match chain
|
||||||
.process_block(
|
.process_block(
|
||||||
block_root,
|
block_root,
|
||||||
@ -160,7 +159,6 @@ pub async fn publish_blinded_block<T: BeaconChainTypes>(
|
|||||||
publish_block::<T>(
|
publish_block::<T>(
|
||||||
Some(block_root),
|
Some(block_root),
|
||||||
Arc::new(full_block),
|
Arc::new(full_block),
|
||||||
None,
|
|
||||||
chain,
|
chain,
|
||||||
network_tx,
|
network_tx,
|
||||||
log,
|
log,
|
||||||
|
@ -15,7 +15,6 @@ pub mod peer_manager;
|
|||||||
pub mod rpc;
|
pub mod rpc;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
pub use crate::types::SignedBeaconBlockAndBlobsSidecar;
|
|
||||||
pub use config::gossip_max_size;
|
pub use config::gossip_max_size;
|
||||||
|
|
||||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
@ -503,6 +503,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||||
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
||||||
Protocol::LightClientBootstrap => PeerAction::LowToleranceError,
|
Protocol::LightClientBootstrap => PeerAction::LowToleranceError,
|
||||||
|
Protocol::BlobsByRoot => PeerAction::MidToleranceError,
|
||||||
Protocol::Goodbye => PeerAction::LowToleranceError,
|
Protocol::Goodbye => PeerAction::LowToleranceError,
|
||||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||||
Protocol::Status => PeerAction::LowToleranceError,
|
Protocol::Status => PeerAction::LowToleranceError,
|
||||||
@ -519,6 +520,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
Protocol::BlocksByRange => return,
|
Protocol::BlocksByRange => return,
|
||||||
Protocol::BlocksByRoot => return,
|
Protocol::BlocksByRoot => return,
|
||||||
Protocol::BlobsByRange => return,
|
Protocol::BlobsByRange => return,
|
||||||
|
Protocol::BlobsByRoot => return,
|
||||||
Protocol::Goodbye => return,
|
Protocol::Goodbye => return,
|
||||||
Protocol::LightClientBootstrap => return,
|
Protocol::LightClientBootstrap => return,
|
||||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||||
@ -536,6 +538,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
||||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||||
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
||||||
|
Protocol::BlobsByRoot => PeerAction::MidToleranceError,
|
||||||
Protocol::LightClientBootstrap => return,
|
Protocol::LightClientBootstrap => return,
|
||||||
Protocol::Goodbye => return,
|
Protocol::Goodbye => return,
|
||||||
Protocol::MetaData => return,
|
Protocol::MetaData => return,
|
||||||
|
@ -18,8 +18,8 @@ use tokio_util::codec::{Decoder, Encoder};
|
|||||||
use types::light_client_bootstrap::LightClientBootstrap;
|
use types::light_client_bootstrap::LightClientBootstrap;
|
||||||
use types::{
|
use types::{
|
||||||
BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock,
|
BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock,
|
||||||
SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
|
SignedBeaconBlockAltair, SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockBase,
|
||||||
SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
|
SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
|
||||||
};
|
};
|
||||||
use unsigned_varint::codec::Uvi;
|
use unsigned_varint::codec::Uvi;
|
||||||
|
|
||||||
@ -73,6 +73,7 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
|
|||||||
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
|
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
|
||||||
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
|
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
|
||||||
RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(),
|
RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(),
|
||||||
|
RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(),
|
||||||
RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(),
|
RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(),
|
||||||
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
|
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
|
||||||
RPCResponse::MetaData(res) =>
|
RPCResponse::MetaData(res) =>
|
||||||
@ -233,6 +234,7 @@ impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for SSZSnappyOutboundCodec<
|
|||||||
OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(),
|
OutboundRequest::BlocksByRange(req) => req.as_ssz_bytes(),
|
||||||
OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(),
|
OutboundRequest::BlocksByRoot(req) => req.block_roots.as_ssz_bytes(),
|
||||||
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
|
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
|
||||||
|
OutboundRequest::BlobsByRoot(req) => req.block_roots.as_ssz_bytes(),
|
||||||
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
||||||
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
|
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
|
||||||
OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(),
|
OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(),
|
||||||
@ -316,7 +318,11 @@ impl<TSpec: EthSpec> Decoder for SSZSnappyOutboundCodec<TSpec> {
|
|||||||
let _read_bytes = src.split_to(n as usize);
|
let _read_bytes = src.split_to(n as usize);
|
||||||
|
|
||||||
match self.protocol.version {
|
match self.protocol.version {
|
||||||
Version::V1 => handle_v1_response(self.protocol.message_name, &decoded_buffer),
|
Version::V1 => handle_v1_response(
|
||||||
|
self.protocol.message_name,
|
||||||
|
&decoded_buffer,
|
||||||
|
&mut self.fork_name,
|
||||||
|
),
|
||||||
Version::V2 => handle_v2_response(
|
Version::V2 => handle_v2_response(
|
||||||
self.protocol.message_name,
|
self.protocol.message_name,
|
||||||
&decoded_buffer,
|
&decoded_buffer,
|
||||||
@ -486,6 +492,9 @@ fn handle_v1_request<T: EthSpec>(
|
|||||||
Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange(
|
Protocol::BlobsByRange => Ok(Some(InboundRequest::BlobsByRange(
|
||||||
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
|
BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?,
|
||||||
))),
|
))),
|
||||||
|
Protocol::BlobsByRoot => Ok(Some(InboundRequest::BlobsByRoot(BlobsByRootRequest {
|
||||||
|
block_roots: VariableList::from_ssz_bytes(decoded_buffer)?,
|
||||||
|
}))),
|
||||||
Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping {
|
Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping {
|
||||||
data: u64::from_ssz_bytes(decoded_buffer)?,
|
data: u64::from_ssz_bytes(decoded_buffer)?,
|
||||||
}))),
|
}))),
|
||||||
@ -547,6 +556,7 @@ fn handle_v2_request<T: EthSpec>(
|
|||||||
fn handle_v1_response<T: EthSpec>(
|
fn handle_v1_response<T: EthSpec>(
|
||||||
protocol: Protocol,
|
protocol: Protocol,
|
||||||
decoded_buffer: &[u8],
|
decoded_buffer: &[u8],
|
||||||
|
fork_name: &mut Option<ForkName>,
|
||||||
) -> Result<Option<RPCResponse<T>>, RPCError> {
|
) -> Result<Option<RPCResponse<T>>, RPCError> {
|
||||||
match protocol {
|
match protocol {
|
||||||
Protocol::Status => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes(
|
Protocol::Status => Ok(Some(RPCResponse::Status(StatusMessage::from_ssz_bytes(
|
||||||
@ -562,7 +572,40 @@ fn handle_v1_response<T: EthSpec>(
|
|||||||
Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
Protocol::BlocksByRoot => Ok(Some(RPCResponse::BlocksByRoot(Arc::new(
|
||||||
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?),
|
||||||
)))),
|
)))),
|
||||||
Protocol::BlobsByRange => Err(RPCError::InvalidData("blobs by range via v1".to_string())),
|
Protocol::BlobsByRange => {
|
||||||
|
let fork_name = fork_name.take().ok_or_else(|| {
|
||||||
|
RPCError::ErrorResponse(
|
||||||
|
RPCResponseErrorCode::InvalidRequest,
|
||||||
|
format!("No context bytes provided for {} response", protocol),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
match fork_name {
|
||||||
|
ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRange(Arc::new(
|
||||||
|
BlobsSidecar::from_ssz_bytes(decoded_buffer)?,
|
||||||
|
)))),
|
||||||
|
_ => Err(RPCError::ErrorResponse(
|
||||||
|
RPCResponseErrorCode::InvalidRequest,
|
||||||
|
"Invalid forkname for blobsbyrange".to_string(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Protocol::BlobsByRoot => {
|
||||||
|
let fork_name = fork_name.take().ok_or_else(|| {
|
||||||
|
RPCError::ErrorResponse(
|
||||||
|
RPCResponseErrorCode::InvalidRequest,
|
||||||
|
format!("No context bytes provided for {} response", protocol),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
match fork_name {
|
||||||
|
ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRoot(Arc::new(
|
||||||
|
SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(decoded_buffer)?,
|
||||||
|
)))),
|
||||||
|
_ => Err(RPCError::ErrorResponse(
|
||||||
|
RPCResponseErrorCode::InvalidRequest,
|
||||||
|
"Invalid forkname for blobsbyroot".to_string(),
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping {
|
Protocol::Ping => Ok(Some(RPCResponse::Pong(Ping {
|
||||||
data: u64::from_ssz_bytes(decoded_buffer)?,
|
data: u64::from_ssz_bytes(decoded_buffer)?,
|
||||||
}))),
|
}))),
|
||||||
@ -650,15 +693,12 @@ fn handle_v2_response<T: EthSpec>(
|
|||||||
)?),
|
)?),
|
||||||
)))),
|
)))),
|
||||||
},
|
},
|
||||||
Protocol::BlobsByRange => match fork_name {
|
Protocol::BlobsByRange => {
|
||||||
ForkName::Eip4844 => Ok(Some(RPCResponse::BlobsByRange(Arc::new(
|
Err(RPCError::InvalidData("blobs by range via v2".to_string()))
|
||||||
BlobsSidecar::from_ssz_bytes(decoded_buffer)?,
|
}
|
||||||
)))),
|
Protocol::BlobsByRoot => {
|
||||||
_ => Err(RPCError::ErrorResponse(
|
Err(RPCError::InvalidData("blobs by range via v2".to_string()))
|
||||||
RPCResponseErrorCode::InvalidRequest,
|
}
|
||||||
"Invalid forkname for blobsbyrange".to_string(),
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
_ => Err(RPCError::ErrorResponse(
|
_ => Err(RPCError::ErrorResponse(
|
||||||
RPCResponseErrorCode::InvalidRequest,
|
RPCResponseErrorCode::InvalidRequest,
|
||||||
"Invalid v2 request".to_string(),
|
"Invalid v2 request".to_string(),
|
||||||
@ -927,6 +967,9 @@ mod tests {
|
|||||||
OutboundRequest::BlobsByRange(blbrange) => {
|
OutboundRequest::BlobsByRange(blbrange) => {
|
||||||
assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange))
|
assert_eq!(decoded, InboundRequest::BlobsByRange(blbrange))
|
||||||
}
|
}
|
||||||
|
OutboundRequest::BlobsByRoot(bbroot) => {
|
||||||
|
assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot))
|
||||||
|
}
|
||||||
OutboundRequest::Ping(ping) => {
|
OutboundRequest::Ping(ping) => {
|
||||||
assert_eq!(decoded, InboundRequest::Ping(ping))
|
assert_eq!(decoded, InboundRequest::Ping(ping))
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ use std::ops::Deref;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use strum::IntoStaticStr;
|
use strum::IntoStaticStr;
|
||||||
use superstruct::superstruct;
|
use superstruct::superstruct;
|
||||||
|
use types::SignedBeaconBlockAndBlobsSidecar;
|
||||||
use types::{
|
use types::{
|
||||||
blobs_sidecar::BlobsSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec,
|
blobs_sidecar::BlobsSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec,
|
||||||
Hash256, SignedBeaconBlock, Slot,
|
Hash256, SignedBeaconBlock, Slot,
|
||||||
@ -244,6 +245,20 @@ pub struct BlocksByRootRequest {
|
|||||||
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Request a number of beacon blocks and blobs from a peer.
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub struct BlobsByRootRequest {
|
||||||
|
/// The list of beacon block roots being requested.
|
||||||
|
pub block_roots: VariableList<Hash256, MaxRequestBlocks>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BlocksByRootRequest> for BlobsByRootRequest {
|
||||||
|
fn from(r: BlocksByRootRequest) -> Self {
|
||||||
|
let BlocksByRootRequest { block_roots } = r;
|
||||||
|
Self { block_roots }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* RPC Handling and Grouping */
|
/* RPC Handling and Grouping */
|
||||||
// Collection of enums and structs used by the Codecs to encode/decode RPC messages
|
// Collection of enums and structs used by the Codecs to encode/decode RPC messages
|
||||||
|
|
||||||
@ -265,6 +280,9 @@ pub enum RPCResponse<T: EthSpec> {
|
|||||||
/// A response to a get LIGHTCLIENT_BOOTSTRAP request.
|
/// A response to a get LIGHTCLIENT_BOOTSTRAP request.
|
||||||
LightClientBootstrap(LightClientBootstrap<T>),
|
LightClientBootstrap(LightClientBootstrap<T>),
|
||||||
|
|
||||||
|
/// A response to a get BLOBS_BY_ROOT request.
|
||||||
|
BlobsByRoot(Arc<SignedBeaconBlockAndBlobsSidecar<T>>),
|
||||||
|
|
||||||
/// A PONG response to a PING request.
|
/// A PONG response to a PING request.
|
||||||
Pong(Ping),
|
Pong(Ping),
|
||||||
|
|
||||||
@ -283,6 +301,9 @@ pub enum ResponseTermination {
|
|||||||
|
|
||||||
/// Blobs by range stream termination.
|
/// Blobs by range stream termination.
|
||||||
BlobsByRange,
|
BlobsByRange,
|
||||||
|
|
||||||
|
/// Blobs by root stream termination.
|
||||||
|
BlobsByRoot,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The structured response containing a result/code indicating success or failure
|
/// The structured response containing a result/code indicating success or failure
|
||||||
@ -351,6 +372,7 @@ impl<T: EthSpec> RPCCodedResponse<T> {
|
|||||||
RPCResponse::BlocksByRange(_) => true,
|
RPCResponse::BlocksByRange(_) => true,
|
||||||
RPCResponse::BlocksByRoot(_) => true,
|
RPCResponse::BlocksByRoot(_) => true,
|
||||||
RPCResponse::BlobsByRange(_) => true,
|
RPCResponse::BlobsByRange(_) => true,
|
||||||
|
RPCResponse::BlobsByRoot(_) => true,
|
||||||
RPCResponse::Pong(_) => false,
|
RPCResponse::Pong(_) => false,
|
||||||
RPCResponse::MetaData(_) => false,
|
RPCResponse::MetaData(_) => false,
|
||||||
RPCResponse::LightClientBootstrap(_) => false,
|
RPCResponse::LightClientBootstrap(_) => false,
|
||||||
@ -387,6 +409,7 @@ impl<T: EthSpec> RPCResponse<T> {
|
|||||||
RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange,
|
RPCResponse::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||||
RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||||
RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange,
|
RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||||
|
RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
||||||
RPCResponse::Pong(_) => Protocol::Ping,
|
RPCResponse::Pong(_) => Protocol::Ping,
|
||||||
RPCResponse::MetaData(_) => Protocol::MetaData,
|
RPCResponse::MetaData(_) => Protocol::MetaData,
|
||||||
RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||||
@ -426,6 +449,13 @@ impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
|
|||||||
RPCResponse::BlobsByRange(blob) => {
|
RPCResponse::BlobsByRange(blob) => {
|
||||||
write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot)
|
write!(f, "BlobsByRange: Blob slot: {}", blob.beacon_block_slot)
|
||||||
}
|
}
|
||||||
|
RPCResponse::BlobsByRoot(blob) => {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"BlobsByRoot: Blob slot: {}",
|
||||||
|
blob.blobs_sidecar.beacon_block_slot
|
||||||
|
)
|
||||||
|
}
|
||||||
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
|
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
|
||||||
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
|
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
|
||||||
RPCResponse::LightClientBootstrap(bootstrap) => {
|
RPCResponse::LightClientBootstrap(bootstrap) => {
|
||||||
|
@ -133,6 +133,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
|
|||||||
Duration::from_secs(10),
|
Duration::from_secs(10),
|
||||||
)
|
)
|
||||||
.n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10))
|
.n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10))
|
||||||
|
.n_every(Protocol::BlobsByRoot, 128, Duration::from_secs(10))
|
||||||
.n_every(
|
.n_every(
|
||||||
Protocol::BlobsByRange,
|
Protocol::BlobsByRange,
|
||||||
MAX_REQUEST_BLOBS_SIDECARS,
|
MAX_REQUEST_BLOBS_SIDECARS,
|
||||||
@ -308,6 +309,7 @@ where
|
|||||||
ResponseTermination::BlocksByRange => Protocol::BlocksByRange,
|
ResponseTermination::BlocksByRange => Protocol::BlocksByRange,
|
||||||
ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot,
|
ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot,
|
||||||
ResponseTermination::BlobsByRange => Protocol::BlobsByRange,
|
ResponseTermination::BlobsByRange => Protocol::BlobsByRange,
|
||||||
|
ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot,
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
@ -39,6 +39,7 @@ pub enum OutboundRequest<TSpec: EthSpec> {
|
|||||||
BlocksByRange(OldBlocksByRangeRequest),
|
BlocksByRange(OldBlocksByRangeRequest),
|
||||||
BlocksByRoot(BlocksByRootRequest),
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
BlobsByRange(BlobsByRangeRequest),
|
BlobsByRange(BlobsByRangeRequest),
|
||||||
|
BlobsByRoot(BlobsByRootRequest),
|
||||||
LightClientBootstrap(LightClientBootstrapRequest),
|
LightClientBootstrap(LightClientBootstrapRequest),
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
MetaData(PhantomData<TSpec>),
|
MetaData(PhantomData<TSpec>),
|
||||||
@ -82,6 +83,11 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
Version::V1,
|
Version::V1,
|
||||||
Encoding::SSZSnappy,
|
Encoding::SSZSnappy,
|
||||||
)],
|
)],
|
||||||
|
OutboundRequest::BlobsByRoot(_) => vec![ProtocolId::new(
|
||||||
|
Protocol::BlobsByRoot,
|
||||||
|
Version::V1,
|
||||||
|
Encoding::SSZSnappy,
|
||||||
|
)],
|
||||||
OutboundRequest::Ping(_) => vec![ProtocolId::new(
|
OutboundRequest::Ping(_) => vec![ProtocolId::new(
|
||||||
Protocol::Ping,
|
Protocol::Ping,
|
||||||
Version::V1,
|
Version::V1,
|
||||||
@ -107,6 +113,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlocksByRange(req) => req.count,
|
OutboundRequest::BlocksByRange(req) => req.count,
|
||||||
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
OutboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
||||||
OutboundRequest::BlobsByRange(req) => req.count,
|
OutboundRequest::BlobsByRange(req) => req.count,
|
||||||
|
OutboundRequest::BlobsByRoot(req) => req.block_roots.len() as u64,
|
||||||
OutboundRequest::Ping(_) => 1,
|
OutboundRequest::Ping(_) => 1,
|
||||||
OutboundRequest::MetaData(_) => 1,
|
OutboundRequest::MetaData(_) => 1,
|
||||||
OutboundRequest::LightClientBootstrap(_) => 1,
|
OutboundRequest::LightClientBootstrap(_) => 1,
|
||||||
@ -121,6 +128,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
OutboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||||
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
OutboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||||
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||||
|
OutboundRequest::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
||||||
OutboundRequest::Ping(_) => Protocol::Ping,
|
OutboundRequest::Ping(_) => Protocol::Ping,
|
||||||
OutboundRequest::MetaData(_) => Protocol::MetaData,
|
OutboundRequest::MetaData(_) => Protocol::MetaData,
|
||||||
OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||||
@ -136,6 +144,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||||
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||||
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
||||||
|
OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot,
|
||||||
OutboundRequest::LightClientBootstrap(_) => unreachable!(),
|
OutboundRequest::LightClientBootstrap(_) => unreachable!(),
|
||||||
OutboundRequest::Status(_) => unreachable!(),
|
OutboundRequest::Status(_) => unreachable!(),
|
||||||
OutboundRequest::Goodbye(_) => unreachable!(),
|
OutboundRequest::Goodbye(_) => unreachable!(),
|
||||||
@ -193,6 +202,7 @@ impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
OutboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
||||||
OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
||||||
OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
||||||
|
OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req),
|
||||||
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||||
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||||
OutboundRequest::LightClientBootstrap(bootstrap) => {
|
OutboundRequest::LightClientBootstrap(bootstrap) => {
|
||||||
|
@ -107,12 +107,6 @@ lazy_static! {
|
|||||||
.as_ssz_bytes()
|
.as_ssz_bytes()
|
||||||
.len();
|
.len();
|
||||||
|
|
||||||
pub static ref BLOBS_SIDECAR_MIN: usize = BlobsSidecar::<MainnetEthSpec>::empty()
|
|
||||||
.as_ssz_bytes()
|
|
||||||
.len();
|
|
||||||
|
|
||||||
pub static ref BLOBS_SIDECAR_MAX: usize = *BLOBS_SIDECAR_MIN // Max size of variable length `blobs` field
|
|
||||||
+ (MainnetEthSpec::max_blobs_per_block() * <Blob<MainnetEthSpec> as Encode>::ssz_fixed_len());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The maximum bytes that can be sent across the RPC pre-merge.
|
/// The maximum bytes that can be sent across the RPC pre-merge.
|
||||||
@ -181,6 +175,8 @@ pub enum Protocol {
|
|||||||
BlocksByRoot,
|
BlocksByRoot,
|
||||||
/// The `BlobsByRange` protocol name.
|
/// The `BlobsByRange` protocol name.
|
||||||
BlobsByRange,
|
BlobsByRange,
|
||||||
|
/// The `BlobsByRoot` protocol name.
|
||||||
|
BlobsByRoot,
|
||||||
/// The `Ping` protocol name.
|
/// The `Ping` protocol name.
|
||||||
Ping,
|
Ping,
|
||||||
/// The `MetaData` protocol name.
|
/// The `MetaData` protocol name.
|
||||||
@ -212,6 +208,7 @@ impl std::fmt::Display for Protocol {
|
|||||||
Protocol::BlocksByRange => "beacon_blocks_by_range",
|
Protocol::BlocksByRange => "beacon_blocks_by_range",
|
||||||
Protocol::BlocksByRoot => "beacon_blocks_by_root",
|
Protocol::BlocksByRoot => "beacon_blocks_by_root",
|
||||||
Protocol::BlobsByRange => "blobs_sidecars_by_range",
|
Protocol::BlobsByRange => "blobs_sidecars_by_range",
|
||||||
|
Protocol::BlobsByRoot => "beacon_block_and_blobs_sidecar_by_root",
|
||||||
Protocol::Ping => "ping",
|
Protocol::Ping => "ping",
|
||||||
Protocol::MetaData => "metadata",
|
Protocol::MetaData => "metadata",
|
||||||
Protocol::LightClientBootstrap => "light_client_bootstrap",
|
Protocol::LightClientBootstrap => "light_client_bootstrap",
|
||||||
@ -334,6 +331,9 @@ impl ProtocolId {
|
|||||||
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
|
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
|
||||||
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
|
<BlobsByRangeRequest as Encode>::ssz_fixed_len(),
|
||||||
),
|
),
|
||||||
|
Protocol::BlobsByRoot => {
|
||||||
|
RpcLimits::new(*BLOCKS_BY_ROOT_REQUEST_MIN, *BLOCKS_BY_ROOT_REQUEST_MAX)
|
||||||
|
}
|
||||||
Protocol::Ping => RpcLimits::new(
|
Protocol::Ping => RpcLimits::new(
|
||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
@ -356,7 +356,11 @@ impl ProtocolId {
|
|||||||
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
|
Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response
|
||||||
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
|
Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||||
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
|
Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||||
Protocol::BlobsByRange => RpcLimits::new(*BLOBS_SIDECAR_MIN, *BLOBS_SIDECAR_MAX),
|
|
||||||
|
//FIXME(sean) add blob sizes
|
||||||
|
Protocol::BlobsByRange => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||||
|
Protocol::BlobsByRoot => rpc_block_limits_by_fork(fork_context.current_fork()),
|
||||||
|
|
||||||
Protocol::Ping => RpcLimits::new(
|
Protocol::Ping => RpcLimits::new(
|
||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
@ -475,6 +479,7 @@ pub enum InboundRequest<TSpec: EthSpec> {
|
|||||||
BlocksByRange(OldBlocksByRangeRequest),
|
BlocksByRange(OldBlocksByRangeRequest),
|
||||||
BlocksByRoot(BlocksByRootRequest),
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
BlobsByRange(BlobsByRangeRequest),
|
BlobsByRange(BlobsByRangeRequest),
|
||||||
|
BlobsByRoot(BlobsByRootRequest),
|
||||||
LightClientBootstrap(LightClientBootstrapRequest),
|
LightClientBootstrap(LightClientBootstrapRequest),
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
MetaData(PhantomData<TSpec>),
|
MetaData(PhantomData<TSpec>),
|
||||||
@ -492,6 +497,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
InboundRequest::BlocksByRange(req) => req.count,
|
InboundRequest::BlocksByRange(req) => req.count,
|
||||||
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
InboundRequest::BlocksByRoot(req) => req.block_roots.len() as u64,
|
||||||
InboundRequest::BlobsByRange(req) => req.count,
|
InboundRequest::BlobsByRange(req) => req.count,
|
||||||
|
InboundRequest::BlobsByRoot(req) => req.block_roots.len() as u64,
|
||||||
InboundRequest::Ping(_) => 1,
|
InboundRequest::Ping(_) => 1,
|
||||||
InboundRequest::MetaData(_) => 1,
|
InboundRequest::MetaData(_) => 1,
|
||||||
InboundRequest::LightClientBootstrap(_) => 1,
|
InboundRequest::LightClientBootstrap(_) => 1,
|
||||||
@ -506,6 +512,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
InboundRequest::BlocksByRange(_) => Protocol::BlocksByRange,
|
||||||
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
InboundRequest::BlocksByRoot(_) => Protocol::BlocksByRoot,
|
||||||
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||||
|
InboundRequest::BlobsByRoot(_) => Protocol::BlobsByRoot,
|
||||||
InboundRequest::Ping(_) => Protocol::Ping,
|
InboundRequest::Ping(_) => Protocol::Ping,
|
||||||
InboundRequest::MetaData(_) => Protocol::MetaData,
|
InboundRequest::MetaData(_) => Protocol::MetaData,
|
||||||
InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||||
@ -521,6 +528,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
InboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||||
InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||||
InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
||||||
|
InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot,
|
||||||
InboundRequest::Status(_) => unreachable!(),
|
InboundRequest::Status(_) => unreachable!(),
|
||||||
InboundRequest::Goodbye(_) => unreachable!(),
|
InboundRequest::Goodbye(_) => unreachable!(),
|
||||||
InboundRequest::Ping(_) => unreachable!(),
|
InboundRequest::Ping(_) => unreachable!(),
|
||||||
@ -628,6 +636,7 @@ impl<TSpec: EthSpec> std::fmt::Display for InboundRequest<TSpec> {
|
|||||||
InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
InboundRequest::BlocksByRange(req) => write!(f, "Blocks by range: {}", req),
|
||||||
InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req),
|
||||||
InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
||||||
|
InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req),
|
||||||
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||||
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||||
InboundRequest::LightClientBootstrap(bootstrap) => {
|
InboundRequest::LightClientBootstrap(bootstrap) => {
|
||||||
|
@ -75,6 +75,8 @@ pub struct RPCRateLimiter {
|
|||||||
bbroots_rl: Limiter<PeerId>,
|
bbroots_rl: Limiter<PeerId>,
|
||||||
/// BlobsByRange rate limiter.
|
/// BlobsByRange rate limiter.
|
||||||
blbrange_rl: Limiter<PeerId>,
|
blbrange_rl: Limiter<PeerId>,
|
||||||
|
/// BlobsByRoot rate limiter.
|
||||||
|
blbroot_rl: Limiter<PeerId>,
|
||||||
/// LightClientBootstrap rate limiter.
|
/// LightClientBootstrap rate limiter.
|
||||||
lcbootstrap_rl: Limiter<PeerId>,
|
lcbootstrap_rl: Limiter<PeerId>,
|
||||||
}
|
}
|
||||||
@ -104,6 +106,8 @@ pub struct RPCRateLimiterBuilder {
|
|||||||
bbroots_quota: Option<Quota>,
|
bbroots_quota: Option<Quota>,
|
||||||
/// Quota for the BlobsByRange protocol.
|
/// Quota for the BlobsByRange protocol.
|
||||||
blbrange_quota: Option<Quota>,
|
blbrange_quota: Option<Quota>,
|
||||||
|
/// Quota for the BlobsByRoot protocol.
|
||||||
|
blbroot_quota: Option<Quota>,
|
||||||
/// Quota for the LightClientBootstrap protocol.
|
/// Quota for the LightClientBootstrap protocol.
|
||||||
lcbootstrap_quota: Option<Quota>,
|
lcbootstrap_quota: Option<Quota>,
|
||||||
}
|
}
|
||||||
@ -125,6 +129,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
Protocol::BlocksByRange => self.bbrange_quota = q,
|
Protocol::BlocksByRange => self.bbrange_quota = q,
|
||||||
Protocol::BlocksByRoot => self.bbroots_quota = q,
|
Protocol::BlocksByRoot => self.bbroots_quota = q,
|
||||||
Protocol::BlobsByRange => self.blbrange_quota = q,
|
Protocol::BlobsByRange => self.blbrange_quota = q,
|
||||||
|
Protocol::BlobsByRoot => self.blbroot_quota = q,
|
||||||
Protocol::LightClientBootstrap => self.lcbootstrap_quota = q,
|
Protocol::LightClientBootstrap => self.lcbootstrap_quota = q,
|
||||||
}
|
}
|
||||||
self
|
self
|
||||||
@ -173,6 +178,10 @@ impl RPCRateLimiterBuilder {
|
|||||||
.blbrange_quota
|
.blbrange_quota
|
||||||
.ok_or("BlobsByRange quota not specified")?;
|
.ok_or("BlobsByRange quota not specified")?;
|
||||||
|
|
||||||
|
let blbroots_quota = self
|
||||||
|
.blbroot_quota
|
||||||
|
.ok_or("BlobsByRoot quota not specified")?;
|
||||||
|
|
||||||
// create the rate limiters
|
// create the rate limiters
|
||||||
let ping_rl = Limiter::from_quota(ping_quota)?;
|
let ping_rl = Limiter::from_quota(ping_quota)?;
|
||||||
let metadata_rl = Limiter::from_quota(metadata_quota)?;
|
let metadata_rl = Limiter::from_quota(metadata_quota)?;
|
||||||
@ -181,6 +190,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
|
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
|
||||||
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
|
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
|
||||||
let blbrange_rl = Limiter::from_quota(blbrange_quota)?;
|
let blbrange_rl = Limiter::from_quota(blbrange_quota)?;
|
||||||
|
let blbroot_rl = Limiter::from_quota(blbroots_quota)?;
|
||||||
let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?;
|
let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?;
|
||||||
|
|
||||||
// check for peers to prune every 30 seconds, starting in 30 seconds
|
// check for peers to prune every 30 seconds, starting in 30 seconds
|
||||||
@ -196,6 +206,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
bbroots_rl,
|
bbroots_rl,
|
||||||
bbrange_rl,
|
bbrange_rl,
|
||||||
blbrange_rl,
|
blbrange_rl,
|
||||||
|
blbroot_rl,
|
||||||
lcbootstrap_rl,
|
lcbootstrap_rl,
|
||||||
init_time: Instant::now(),
|
init_time: Instant::now(),
|
||||||
})
|
})
|
||||||
@ -221,6 +232,7 @@ impl RPCRateLimiter {
|
|||||||
Protocol::BlocksByRange => &mut self.bbrange_rl,
|
Protocol::BlocksByRange => &mut self.bbrange_rl,
|
||||||
Protocol::BlocksByRoot => &mut self.bbroots_rl,
|
Protocol::BlocksByRoot => &mut self.bbroots_rl,
|
||||||
Protocol::BlobsByRange => &mut self.blbrange_rl,
|
Protocol::BlobsByRange => &mut self.blbrange_rl,
|
||||||
|
Protocol::BlobsByRoot => &mut self.blbroot_rl,
|
||||||
Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl,
|
Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl,
|
||||||
};
|
};
|
||||||
check(limiter)
|
check(limiter)
|
||||||
@ -235,6 +247,7 @@ impl RPCRateLimiter {
|
|||||||
self.bbrange_rl.prune(time_since_start);
|
self.bbrange_rl.prune(time_since_start);
|
||||||
self.bbroots_rl.prune(time_since_start);
|
self.bbroots_rl.prune(time_since_start);
|
||||||
self.blbrange_rl.prune(time_since_start);
|
self.blbrange_rl.prune(time_since_start);
|
||||||
|
self.blbroot_rl.prune(time_since_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ use libp2p::core::connection::ConnectionId;
|
|||||||
use types::light_client_bootstrap::LightClientBootstrap;
|
use types::light_client_bootstrap::LightClientBootstrap;
|
||||||
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
|
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
|
||||||
|
|
||||||
use crate::rpc::methods::BlobsByRangeRequest;
|
use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
|
||||||
use crate::rpc::{
|
use crate::rpc::{
|
||||||
methods::{
|
methods::{
|
||||||
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
|
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
|
||||||
@ -12,6 +12,7 @@ use crate::rpc::{
|
|||||||
},
|
},
|
||||||
OutboundRequest, SubstreamId,
|
OutboundRequest, SubstreamId,
|
||||||
};
|
};
|
||||||
|
use types::SignedBeaconBlockAndBlobsSidecar;
|
||||||
|
|
||||||
/// Identifier of requests sent by a peer.
|
/// Identifier of requests sent by a peer.
|
||||||
pub type PeerRequestId = (ConnectionId, SubstreamId);
|
pub type PeerRequestId = (ConnectionId, SubstreamId);
|
||||||
@ -40,6 +41,8 @@ pub enum Request {
|
|||||||
BlocksByRoot(BlocksByRootRequest),
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
// light client bootstrap request
|
// light client bootstrap request
|
||||||
LightClientBootstrap(LightClientBootstrapRequest),
|
LightClientBootstrap(LightClientBootstrapRequest),
|
||||||
|
/// A request blobs root request.
|
||||||
|
BlobsByRoot(BlobsByRootRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
||||||
@ -55,6 +58,7 @@ impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
|||||||
}
|
}
|
||||||
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
||||||
Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b),
|
Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b),
|
||||||
|
Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r),
|
||||||
Request::Status(s) => OutboundRequest::Status(s),
|
Request::Status(s) => OutboundRequest::Status(s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -78,6 +82,8 @@ pub enum Response<TSpec: EthSpec> {
|
|||||||
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
||||||
/// A response to a LightClientUpdate request.
|
/// A response to a LightClientUpdate request.
|
||||||
LightClientBootstrap(LightClientBootstrap<TSpec>),
|
LightClientBootstrap(LightClientBootstrap<TSpec>),
|
||||||
|
/// A response to a get BLOBS_BY_ROOT request.
|
||||||
|
BlobsByRoot(Option<Arc<SignedBeaconBlockAndBlobsSidecar<TSpec>>>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TSpec> {
|
impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TSpec> {
|
||||||
@ -91,6 +97,10 @@ impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TS
|
|||||||
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
|
Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)),
|
||||||
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
|
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange),
|
||||||
},
|
},
|
||||||
|
Response::BlobsByRoot(r) => match r {
|
||||||
|
Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRoot(b)),
|
||||||
|
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRoot),
|
||||||
|
},
|
||||||
Response::BlobsByRange(r) => match r {
|
Response::BlobsByRange(r) => match r {
|
||||||
Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)),
|
Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)),
|
||||||
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange),
|
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange),
|
||||||
|
@ -996,6 +996,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
Request::BlobsByRange { .. } => {
|
Request::BlobsByRange { .. } => {
|
||||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"])
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_range"])
|
||||||
}
|
}
|
||||||
|
Request::BlobsByRoot { .. } => {
|
||||||
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
NetworkEvent::RequestReceived {
|
NetworkEvent::RequestReceived {
|
||||||
peer_id,
|
peer_id,
|
||||||
@ -1267,6 +1270,11 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
);
|
);
|
||||||
Some(event)
|
Some(event)
|
||||||
}
|
}
|
||||||
|
InboundRequest::BlobsByRoot(req) => {
|
||||||
|
let event =
|
||||||
|
self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req));
|
||||||
|
Some(event)
|
||||||
|
}
|
||||||
InboundRequest::LightClientBootstrap(req) => {
|
InboundRequest::LightClientBootstrap(req) => {
|
||||||
let event = self.build_request(
|
let event = self.build_request(
|
||||||
peer_request_id,
|
peer_request_id,
|
||||||
@ -1305,6 +1313,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
RPCResponse::BlocksByRoot(resp) => {
|
RPCResponse::BlocksByRoot(resp) => {
|
||||||
self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
|
self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
|
||||||
}
|
}
|
||||||
|
RPCResponse::BlobsByRoot(resp) => {
|
||||||
|
self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp)))
|
||||||
|
}
|
||||||
// Should never be reached
|
// Should never be reached
|
||||||
RPCResponse::LightClientBootstrap(bootstrap) => {
|
RPCResponse::LightClientBootstrap(bootstrap) => {
|
||||||
self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap))
|
self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap))
|
||||||
@ -1316,6 +1327,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
ResponseTermination::BlocksByRange => Response::BlocksByRange(None),
|
ResponseTermination::BlocksByRange => Response::BlocksByRange(None),
|
||||||
ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None),
|
ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None),
|
||||||
ResponseTermination::BlobsByRange => Response::BlobsByRange(None),
|
ResponseTermination::BlobsByRange => Response::BlobsByRange(None),
|
||||||
|
ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None),
|
||||||
};
|
};
|
||||||
self.build_response(id, peer_id, response)
|
self.build_response(id, peer_id, response)
|
||||||
}
|
}
|
||||||
|
@ -254,6 +254,7 @@ pub(crate) fn create_whitelist_filter(
|
|||||||
add(AttesterSlashing);
|
add(AttesterSlashing);
|
||||||
add(SignedContributionAndProof);
|
add(SignedContributionAndProof);
|
||||||
add(BlsToExecutionChange);
|
add(BlsToExecutionChange);
|
||||||
|
add(BeaconBlocksAndBlobsSidecar);
|
||||||
for id in 0..attestation_subnet_count {
|
for id in 0..attestation_subnet_count {
|
||||||
add(Attestation(SubnetId::new(id)));
|
add(Attestation(SubnetId::new(id)));
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ pub type EnrSyncCommitteeBitfield<T> = BitVector<<T as EthSpec>::SyncCommitteeSu
|
|||||||
pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
|
pub type Enr = discv5::enr::Enr<discv5::enr::CombinedKey>;
|
||||||
|
|
||||||
pub use globals::NetworkGlobals;
|
pub use globals::NetworkGlobals;
|
||||||
pub use pubsub::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar, SnappyTransform};
|
pub use pubsub::{PubsubMessage, SnappyTransform};
|
||||||
pub use subnet::{Subnet, SubnetDiscovery};
|
pub use subnet::{Subnet, SubnetDiscovery};
|
||||||
pub use sync_state::{BackFillState, SyncState};
|
pub use sync_state::{BackFillState, SyncState};
|
||||||
pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS};
|
pub use topics::{subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, CORE_TOPICS};
|
||||||
|
@ -13,28 +13,18 @@ use std::sync::Arc;
|
|||||||
use tree_hash_derive::TreeHash;
|
use tree_hash_derive::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing,
|
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ForkContext, ForkName, ProposerSlashing,
|
||||||
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase,
|
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAltair,
|
||||||
SignedBeaconBlockCapella, SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
|
SignedBeaconBlockAndBlobsSidecar, SignedBeaconBlockBase, SignedBeaconBlockCapella,
|
||||||
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
|
SignedBeaconBlockEip4844, SignedBeaconBlockMerge, SignedBlsToExecutionChange,
|
||||||
SyncCommitteeMessage, SyncSubnetId,
|
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// TODO(pawan): move this to consensus/types? strictly not a consensus type
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)]
|
|
||||||
#[serde(bound = "T: EthSpec")]
|
|
||||||
pub struct SignedBeaconBlockAndBlobsSidecar<T: EthSpec> {
|
|
||||||
// TODO(pawan): switch to a SignedBeaconBlock and use ssz offsets for decoding to make this
|
|
||||||
// future proof?
|
|
||||||
pub beacon_block: SignedBeaconBlockEip4844<T>,
|
|
||||||
pub blobs_sidecar: BlobsSidecar<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum PubsubMessage<T: EthSpec> {
|
pub enum PubsubMessage<T: EthSpec> {
|
||||||
/// Gossipsub message providing notification of a new block.
|
/// Gossipsub message providing notification of a new block.
|
||||||
BeaconBlock(Arc<SignedBeaconBlock<T>>),
|
BeaconBlock(Arc<SignedBeaconBlock<T>>),
|
||||||
/// Gossipsub message providing notification of a new SignedBeaconBlock coupled with a blobs sidecar.
|
/// Gossipsub message providing notification of a new SignedBeaconBlock coupled with a blobs sidecar.
|
||||||
BeaconBlockAndBlobsSidecars(Arc<SignedBeaconBlockAndBlobsSidecar<T>>),
|
BeaconBlockAndBlobsSidecars(SignedBeaconBlockAndBlobsSidecar<T>),
|
||||||
/// Gossipsub message providing notification of a Aggregate attestation and associated proof.
|
/// Gossipsub message providing notification of a Aggregate attestation and associated proof.
|
||||||
AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>),
|
AggregateAndProofAttestation(Box<SignedAggregateAndProof<T>>),
|
||||||
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
|
/// Gossipsub message providing notification of a raw un-aggregated attestation with its shard id.
|
||||||
@ -214,9 +204,9 @@ impl<T: EthSpec> PubsubMessage<T> {
|
|||||||
let block_and_blobs_sidecar =
|
let block_and_blobs_sidecar =
|
||||||
SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(data)
|
SignedBeaconBlockAndBlobsSidecar::from_ssz_bytes(data)
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
Ok(PubsubMessage::BeaconBlockAndBlobsSidecars(Arc::new(
|
Ok(PubsubMessage::BeaconBlockAndBlobsSidecars(
|
||||||
block_and_blobs_sidecar,
|
block_and_blobs_sidecar,
|
||||||
)))
|
))
|
||||||
}
|
}
|
||||||
Some(
|
Some(
|
||||||
ForkName::Base
|
ForkName::Base
|
||||||
@ -309,7 +299,7 @@ impl<T: EthSpec> std::fmt::Display for PubsubMessage<T> {
|
|||||||
PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blob) => write!(
|
PubsubMessage::BeaconBlockAndBlobsSidecars(block_and_blob) => write!(
|
||||||
f,
|
f,
|
||||||
"Beacon block and Blobs Sidecar: slot: {}, blobs: {}",
|
"Beacon block and Blobs Sidecar: slot: {}, blobs: {}",
|
||||||
block_and_blob.beacon_block.message.slot,
|
block_and_blob.beacon_block.message().slot(),
|
||||||
block_and_blob.blobs_sidecar.blobs.len(),
|
block_and_blob.blobs_sidecar.blobs.len(),
|
||||||
),
|
),
|
||||||
PubsubMessage::AggregateAndProofAttestation(att) => write!(
|
PubsubMessage::AggregateAndProofAttestation(att) => write!(
|
||||||
|
@ -45,9 +45,8 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExe
|
|||||||
use derivative::Derivative;
|
use derivative::Derivative;
|
||||||
use futures::stream::{Stream, StreamExt};
|
use futures::stream::{Stream, StreamExt};
|
||||||
use futures::task::Poll;
|
use futures::task::Poll;
|
||||||
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
|
use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
|
||||||
use lighthouse_network::rpc::LightClientBootstrapRequest;
|
use lighthouse_network::rpc::LightClientBootstrapRequest;
|
||||||
use lighthouse_network::SignedBeaconBlockAndBlobsSidecar;
|
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage},
|
rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage},
|
||||||
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId,
|
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId,
|
||||||
@ -63,10 +62,11 @@ use std::time::Duration;
|
|||||||
use std::{cmp, collections::HashSet};
|
use std::{cmp, collections::HashSet};
|
||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof,
|
Attestation, AttesterSlashing, Hash256, ProposerSlashing, SignedAggregateAndProof,
|
||||||
SignedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit,
|
SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, SignedBlsToExecutionChange,
|
||||||
SubnetId, SyncCommitteeMessage, SyncSubnetId,
|
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId,
|
||||||
};
|
};
|
||||||
use work_reprocessing_queue::{
|
use work_reprocessing_queue::{
|
||||||
spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork,
|
spawn_reprocess_scheduler, QueuedAggregate, QueuedRpcBlock, QueuedUnaggregate, ReadyWork,
|
||||||
@ -164,6 +164,8 @@ const MAX_BLOBS_BY_RANGE_QUEUE_LEN: usize = 1_024;
|
|||||||
/// will be stored before we start dropping them.
|
/// will be stored before we start dropping them.
|
||||||
const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
|
const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
|
||||||
|
|
||||||
|
const MAX_BLOCK_AND_BLOBS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
|
||||||
|
|
||||||
/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them.
|
/// Maximum number of `SignedBlsToExecutionChange` messages to queue before dropping them.
|
||||||
///
|
///
|
||||||
/// This value is set high to accommodate the large spike that is expected immediately after Capella
|
/// This value is set high to accommodate the large spike that is expected immediately after Capella
|
||||||
@ -215,6 +217,7 @@ pub const STATUS_PROCESSING: &str = "status_processing";
|
|||||||
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
|
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
|
||||||
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
|
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
|
||||||
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
|
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
|
||||||
|
pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request";
|
||||||
pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap";
|
pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap";
|
||||||
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
|
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
|
||||||
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
|
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
|
||||||
@ -427,7 +430,7 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
|||||||
message_id: MessageId,
|
message_id: MessageId,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
peer_client: Client,
|
peer_client: Client,
|
||||||
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
|
block_and_blobs: SignedBeaconBlockAndBlobsSidecar<T::EthSpec>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -548,7 +551,7 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
|||||||
/// sent to the other side of `result_tx`.
|
/// sent to the other side of `result_tx`.
|
||||||
pub fn rpc_beacon_block(
|
pub fn rpc_beacon_block(
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
process_type: BlockProcessType,
|
process_type: BlockProcessType,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -567,7 +570,7 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
|||||||
/// Create a new work event to import `blocks` as a beacon chain segment.
|
/// Create a new work event to import `blocks` as a beacon chain segment.
|
||||||
pub fn chain_segment(
|
pub fn chain_segment(
|
||||||
process_id: ChainSegmentProcessId,
|
process_id: ChainSegmentProcessId,
|
||||||
blocks: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
blocks: Vec<BlockWrapper<T::EthSpec>>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
drop_during_sync: false,
|
drop_during_sync: false,
|
||||||
@ -646,6 +649,21 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn blobs_by_root_request(
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: BlobsByRootRequest,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
drop_during_sync: false,
|
||||||
|
work: Work::BlobsByRootsRequest {
|
||||||
|
peer_id,
|
||||||
|
request_id,
|
||||||
|
request,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Get a `str` representation of the type of work this `WorkEvent` contains.
|
/// Get a `str` representation of the type of work this `WorkEvent` contains.
|
||||||
pub fn work_type(&self) -> &'static str {
|
pub fn work_type(&self) -> &'static str {
|
||||||
self.work.str_id()
|
self.work.str_id()
|
||||||
@ -768,7 +786,7 @@ pub enum Work<T: BeaconChainTypes> {
|
|||||||
message_id: MessageId,
|
message_id: MessageId,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
peer_client: Client,
|
peer_client: Client,
|
||||||
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
|
block_and_blobs: SignedBeaconBlockAndBlobsSidecar<T::EthSpec>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
},
|
},
|
||||||
DelayedImportBlock {
|
DelayedImportBlock {
|
||||||
@ -806,14 +824,14 @@ pub enum Work<T: BeaconChainTypes> {
|
|||||||
},
|
},
|
||||||
RpcBlock {
|
RpcBlock {
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
process_type: BlockProcessType,
|
process_type: BlockProcessType,
|
||||||
should_process: bool,
|
should_process: bool,
|
||||||
},
|
},
|
||||||
ChainSegment {
|
ChainSegment {
|
||||||
process_id: ChainSegmentProcessId,
|
process_id: ChainSegmentProcessId,
|
||||||
blocks: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
blocks: Vec<BlockWrapper<T::EthSpec>>,
|
||||||
},
|
},
|
||||||
Status {
|
Status {
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
@ -844,6 +862,11 @@ pub enum Work<T: BeaconChainTypes> {
|
|||||||
request_id: PeerRequestId,
|
request_id: PeerRequestId,
|
||||||
request: LightClientBootstrapRequest,
|
request: LightClientBootstrapRequest,
|
||||||
},
|
},
|
||||||
|
BlobsByRootsRequest {
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: BlobsByRootRequest,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> Work<T> {
|
impl<T: BeaconChainTypes> Work<T> {
|
||||||
@ -868,6 +891,7 @@ impl<T: BeaconChainTypes> Work<T> {
|
|||||||
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
|
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
|
||||||
Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST,
|
Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST,
|
||||||
Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST,
|
Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST,
|
||||||
|
Work::BlobsByRootsRequest { .. } => BLOBS_BY_ROOTS_REQUEST,
|
||||||
Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST,
|
Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST,
|
||||||
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
|
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
|
||||||
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
|
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
|
||||||
@ -1015,6 +1039,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
|
let mut status_queue = FifoQueue::new(MAX_STATUS_QUEUE_LEN);
|
||||||
let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN);
|
let mut bbrange_queue = FifoQueue::new(MAX_BLOCKS_BY_RANGE_QUEUE_LEN);
|
||||||
let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN);
|
let mut bbroots_queue = FifoQueue::new(MAX_BLOCKS_BY_ROOTS_QUEUE_LEN);
|
||||||
|
let mut blbroots_queue = FifoQueue::new(MAX_BLOCK_AND_BLOBS_BY_ROOTS_QUEUE_LEN);
|
||||||
let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN);
|
let mut blbrange_queue = FifoQueue::new(MAX_BLOBS_BY_RANGE_QUEUE_LEN);
|
||||||
|
|
||||||
let mut gossip_bls_to_execution_change_queue =
|
let mut gossip_bls_to_execution_change_queue =
|
||||||
@ -1110,8 +1135,9 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
// blocks into the system.
|
// blocks into the system.
|
||||||
if let Some(item) = chain_segment_queue.pop() {
|
if let Some(item) = chain_segment_queue.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
// Check sync blocks before gossip blocks, since we've already explicitly
|
// Sync block and blob segments have the same priority as normal chain
|
||||||
// requested these blocks.
|
// segments. This here might change depending on how batch processing
|
||||||
|
// evolves.
|
||||||
} else if let Some(item) = rpc_block_queue.pop() {
|
} else if let Some(item) = rpc_block_queue.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
// Check delayed blocks before gossip blocks, the gossip blocks might rely
|
// Check delayed blocks before gossip blocks, the gossip blocks might rely
|
||||||
@ -1246,6 +1272,10 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
} else if let Some(item) = bbroots_queue.pop() {
|
} else if let Some(item) = bbroots_queue.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
|
} else if let Some(item) = blbrange_queue.pop() {
|
||||||
|
self.spawn_worker(item, toolbox);
|
||||||
|
} else if let Some(item) = blbroots_queue.pop() {
|
||||||
|
self.spawn_worker(item, toolbox);
|
||||||
// Check slashings after all other consensus messages so we prioritize
|
// Check slashings after all other consensus messages so we prioritize
|
||||||
// following head.
|
// following head.
|
||||||
//
|
//
|
||||||
@ -1385,6 +1415,9 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
Work::GossipBlsToExecutionChange { .. } => {
|
Work::GossipBlsToExecutionChange { .. } => {
|
||||||
gossip_bls_to_execution_change_queue.push(work, work_id, &self.log)
|
gossip_bls_to_execution_change_queue.push(work, work_id, &self.log)
|
||||||
}
|
}
|
||||||
|
Work::BlobsByRootsRequest { .. } => {
|
||||||
|
blbroots_queue.push(work, work_id, &self.log)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1595,7 +1628,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
message_id,
|
message_id,
|
||||||
peer_id,
|
peer_id,
|
||||||
peer_client,
|
peer_client,
|
||||||
block,
|
BlockWrapper::Block { block },
|
||||||
work_reprocessing_tx,
|
work_reprocessing_tx,
|
||||||
duplicate_cache,
|
duplicate_cache,
|
||||||
seen_timestamp,
|
seen_timestamp,
|
||||||
@ -1609,15 +1642,17 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
message_id,
|
message_id,
|
||||||
peer_id,
|
peer_id,
|
||||||
peer_client,
|
peer_client,
|
||||||
block_and_blobs,
|
block_and_blobs: block_sidecar_pair,
|
||||||
seen_timestamp,
|
seen_timestamp,
|
||||||
} => task_spawner.spawn_async(async move {
|
} => task_spawner.spawn_async(async move {
|
||||||
worker
|
worker
|
||||||
.process_gossip_block_and_blobs_sidecar(
|
.process_gossip_block(
|
||||||
message_id,
|
message_id,
|
||||||
peer_id,
|
peer_id,
|
||||||
peer_client,
|
peer_client,
|
||||||
block_and_blobs,
|
BlockWrapper::BlockAndBlob { block_sidecar_pair },
|
||||||
|
work_reprocessing_tx,
|
||||||
|
duplicate_cache,
|
||||||
seen_timestamp,
|
seen_timestamp,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@ -1803,6 +1838,21 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
request,
|
request,
|
||||||
)
|
)
|
||||||
}),
|
}),
|
||||||
|
|
||||||
|
Work::BlobsByRootsRequest {
|
||||||
|
peer_id,
|
||||||
|
request_id,
|
||||||
|
request,
|
||||||
|
} => task_spawner.spawn_blocking_with_manual_send_idle(move |send_idle_on_drop| {
|
||||||
|
worker.handle_blobs_by_root_request(
|
||||||
|
sub_executor,
|
||||||
|
send_idle_on_drop,
|
||||||
|
peer_id,
|
||||||
|
request_id,
|
||||||
|
request,
|
||||||
|
)
|
||||||
|
}),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Processing of lightclient bootstrap requests from other peers.
|
* Processing of lightclient bootstrap requests from other peers.
|
||||||
*/
|
*/
|
||||||
|
@ -30,6 +30,7 @@ use task_executor::TaskExecutor;
|
|||||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||||
use tokio::time::error::Error as TimeError;
|
use tokio::time::error::Error as TimeError;
|
||||||
use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey};
|
use tokio_util::time::delay_queue::{DelayQueue, Key as DelayKey};
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId};
|
use types::{Attestation, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, SubnetId};
|
||||||
|
|
||||||
const TASK_NAME: &str = "beacon_processor_reprocess_queue";
|
const TASK_NAME: &str = "beacon_processor_reprocess_queue";
|
||||||
@ -110,7 +111,7 @@ pub struct QueuedGossipBlock<T: BeaconChainTypes> {
|
|||||||
/// It is queued for later import.
|
/// It is queued for later import.
|
||||||
pub struct QueuedRpcBlock<T: EthSpec> {
|
pub struct QueuedRpcBlock<T: EthSpec> {
|
||||||
pub block_root: Hash256,
|
pub block_root: Hash256,
|
||||||
pub block: Arc<SignedBeaconBlock<T>>,
|
pub block: BlockWrapper<T>,
|
||||||
pub process_type: BlockProcessType,
|
pub process_type: BlockProcessType,
|
||||||
pub seen_timestamp: Duration,
|
pub seen_timestamp: Duration,
|
||||||
/// Indicates if the beacon chain should process this block or not.
|
/// Indicates if the beacon chain should process this block or not.
|
||||||
@ -394,7 +395,7 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
|
|||||||
debug!(
|
debug!(
|
||||||
log,
|
log,
|
||||||
"Sending rpc block for reprocessing";
|
"Sending rpc block for reprocessing";
|
||||||
"block_root" => %queued_rpc_block.block.canonical_root()
|
"block_root" => %queued_rpc_block.block_root
|
||||||
);
|
);
|
||||||
if self
|
if self
|
||||||
.ready_work_tx
|
.ready_work_tx
|
||||||
|
@ -9,10 +9,7 @@ use beacon_chain::{
|
|||||||
BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError,
|
BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError,
|
||||||
GossipVerifiedBlock, NotifyExecutionLayer,
|
GossipVerifiedBlock, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource};
|
||||||
Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource,
|
|
||||||
SignedBeaconBlockAndBlobsSidecar,
|
|
||||||
};
|
|
||||||
use slog::{crit, debug, error, info, trace, warn};
|
use slog::{crit, debug, error, info, trace, warn};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
@ -20,11 +17,12 @@ use std::sync::Arc;
|
|||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
use store::hot_cold_store::HotColdDBError;
|
use store::hot_cold_store::HotColdDBError;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, EthSpec, Hash256, IndexedAttestation, ProposerSlashing,
|
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, Hash256, IndexedAttestation,
|
||||||
SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange,
|
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
|
||||||
SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId, SyncCommitteeMessage,
|
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, Slot, SubnetId,
|
||||||
SyncSubnetId,
|
SyncCommitteeMessage, SyncSubnetId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
@ -659,7 +657,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
message_id: MessageId,
|
message_id: MessageId,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
peer_client: Client,
|
peer_client: Client,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
||||||
duplicate_cache: DuplicateCache,
|
duplicate_cache: DuplicateCache,
|
||||||
seen_duration: Duration,
|
seen_duration: Duration,
|
||||||
@ -697,19 +695,6 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub async fn process_gossip_block_and_blobs_sidecar(
|
|
||||||
self,
|
|
||||||
_message_id: MessageId,
|
|
||||||
_peer_id: PeerId,
|
|
||||||
_peer_client: Client,
|
|
||||||
_block_and_blob: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
|
|
||||||
_seen_timestamp: Duration,
|
|
||||||
) {
|
|
||||||
//FIXME
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process the beacon block received from the gossip network and
|
/// Process the beacon block received from the gossip network and
|
||||||
/// if it passes gossip propagation criteria, tell the network thread to forward it.
|
/// if it passes gossip propagation criteria, tell the network thread to forward it.
|
||||||
///
|
///
|
||||||
@ -719,7 +704,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
message_id: MessageId,
|
message_id: MessageId,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
peer_client: Client,
|
peer_client: Client,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
||||||
seen_duration: Duration,
|
seen_duration: Duration,
|
||||||
) -> Option<GossipVerifiedBlock<T>> {
|
) -> Option<GossipVerifiedBlock<T>> {
|
||||||
@ -740,7 +725,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
let block_root = if let Ok(verified_block) = &verification_result {
|
let block_root = if let Ok(verified_block) = &verification_result {
|
||||||
verified_block.block_root
|
verified_block.block_root
|
||||||
} else {
|
} else {
|
||||||
block.canonical_root()
|
block.block().canonical_root()
|
||||||
};
|
};
|
||||||
|
|
||||||
// Write the time the block was observed into delay cache.
|
// Write the time the block was observed into delay cache.
|
||||||
@ -855,6 +840,17 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
);
|
);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
Err(e@ BlockError::BlobValidation(_)) => {
|
||||||
|
warn!(self.log, "Could not verify blob for gossip. Rejecting the block and blob";
|
||||||
|
"error" => %e);
|
||||||
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
|
self.gossip_penalize_peer(
|
||||||
|
peer_id,
|
||||||
|
PeerAction::LowToleranceError,
|
||||||
|
"gossip_blob_low",
|
||||||
|
);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL);
|
metrics::inc_counter(&metrics::BEACON_PROCESSOR_GOSSIP_BLOCK_VERIFIED_TOTAL);
|
||||||
@ -947,7 +943,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
// This value is not used presently, but it might come in handy for debugging.
|
// This value is not used presently, but it might come in handy for debugging.
|
||||||
_seen_duration: Duration,
|
_seen_duration: Duration,
|
||||||
) {
|
) {
|
||||||
let block: Arc<_> = verified_block.block.clone();
|
let block = verified_block.block.block_cloned();
|
||||||
let block_root = verified_block.block_root;
|
let block_root = verified_block.block_root;
|
||||||
|
|
||||||
match self
|
match self
|
||||||
@ -984,7 +980,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
self.chain.recompute_head_at_current_slot().await;
|
self.chain.recompute_head_at_current_slot().await;
|
||||||
}
|
}
|
||||||
Err(BlockError::ParentUnknown { .. }) => {
|
Err(BlockError::ParentUnknown(block)) => {
|
||||||
// Inform the sync manager to find parents for this block
|
// Inform the sync manager to find parents for this block
|
||||||
// This should not occur. It should be checked by `should_forward_block`
|
// This should not occur. It should be checked by `should_forward_block`
|
||||||
error!(
|
error!(
|
||||||
|
@ -4,15 +4,19 @@ use crate::status::ToStatusMessage;
|
|||||||
use crate::sync::SyncMessage;
|
use crate::sync::SyncMessage;
|
||||||
use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped};
|
use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped};
|
||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use lighthouse_network::rpc::methods::{BlobsByRangeRequest, MAX_REQUEST_BLOBS_SIDECARS};
|
use lighthouse_network::rpc::methods::{
|
||||||
|
BlobsByRangeRequest, BlobsByRootRequest, MAX_REQUEST_BLOBS_SIDECARS,
|
||||||
|
};
|
||||||
use lighthouse_network::rpc::StatusMessage;
|
use lighthouse_network::rpc::StatusMessage;
|
||||||
use lighthouse_network::rpc::*;
|
use lighthouse_network::rpc::*;
|
||||||
use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo};
|
use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo};
|
||||||
use slog::{debug, error};
|
use slog::{debug, error};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
|
use ssz_types::VariableList;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot};
|
use types::light_client_bootstrap::LightClientBootstrap;
|
||||||
|
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlockAndBlobsSidecar, Slot};
|
||||||
|
|
||||||
use super::Worker;
|
use super::Worker;
|
||||||
|
|
||||||
@ -204,6 +208,106 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"load_blocks_by_root_blocks",
|
"load_blocks_by_root_blocks",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
/// Handle a `BlobsByRoot` request from the peer.
|
||||||
|
pub fn handle_blobs_by_root_request(
|
||||||
|
self,
|
||||||
|
executor: TaskExecutor,
|
||||||
|
send_on_drop: SendOnDrop,
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: BlobsByRootRequest,
|
||||||
|
) {
|
||||||
|
// Fetching blocks is async because it may have to hit the execution layer for payloads.
|
||||||
|
executor.spawn(
|
||||||
|
async move {
|
||||||
|
let mut send_block_count = 0;
|
||||||
|
let mut send_response = true;
|
||||||
|
for root in request.block_roots.iter() {
|
||||||
|
match self
|
||||||
|
.chain
|
||||||
|
.get_block_and_blobs_checking_early_attester_cache(root)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok((Some(block), Some(blobs))) => {
|
||||||
|
self.send_response(
|
||||||
|
peer_id,
|
||||||
|
Response::BlobsByRoot(Some(Arc::new(SignedBeaconBlockAndBlobsSidecar {
|
||||||
|
beacon_block: block,
|
||||||
|
blobs_sidecar: blobs,
|
||||||
|
}))),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
send_block_count += 1;
|
||||||
|
}
|
||||||
|
Ok((None, None)) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Peer requested unknown block and blobs";
|
||||||
|
"peer" => %peer_id,
|
||||||
|
"request_root" => ?root
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok((Some(_), None)) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Peer requested block and blob, but no blob found";
|
||||||
|
"peer" => %peer_id,
|
||||||
|
"request_root" => ?root
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok((None, Some(_))) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Peer requested block and blob, but no block found";
|
||||||
|
"peer" => %peer_id,
|
||||||
|
"request_root" => ?root
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(BeaconChainError::BlockHashMissingFromExecutionLayer(_)) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Failed to fetch execution payload for block and blobs by root request";
|
||||||
|
"block_root" => ?root,
|
||||||
|
"reason" => "execution layer not synced",
|
||||||
|
);
|
||||||
|
// send the stream terminator
|
||||||
|
self.send_error_response(
|
||||||
|
peer_id,
|
||||||
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
|
"Execution layer not synced".into(),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
send_response = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Error fetching block for peer";
|
||||||
|
"peer" => %peer_id,
|
||||||
|
"request_root" => ?root,
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Received BlobsByRoot Request";
|
||||||
|
"peer" => %peer_id,
|
||||||
|
"requested" => request.block_roots.len(),
|
||||||
|
"returned" => %send_block_count
|
||||||
|
);
|
||||||
|
|
||||||
|
// send stream termination
|
||||||
|
if send_response {
|
||||||
|
self.send_response(peer_id, Response::BlocksByRoot(None), request_id);
|
||||||
|
}
|
||||||
|
drop(send_on_drop);
|
||||||
|
},
|
||||||
|
"load_blobs_by_root_blocks",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Handle a `BlocksByRoot` request from the peer.
|
/// Handle a `BlocksByRoot` request from the peer.
|
||||||
pub fn handle_light_client_bootstrap(
|
pub fn handle_light_client_bootstrap(
|
||||||
@ -450,10 +554,10 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
/// Handle a `BlobsByRange` request from the peer.
|
/// Handle a `BlobsByRange` request from the peer.
|
||||||
pub fn handle_blobs_by_range_request(
|
pub fn handle_blobs_by_range_request(
|
||||||
self,
|
self,
|
||||||
_executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
_send_on_drop: SendOnDrop,
|
send_on_drop: SendOnDrop,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
_request_id: PeerRequestId,
|
request_id: PeerRequestId,
|
||||||
mut req: BlobsByRangeRequest,
|
mut req: BlobsByRangeRequest,
|
||||||
) {
|
) {
|
||||||
debug!(self.log, "Received BlobsByRange Request";
|
debug!(self.log, "Received BlobsByRange Request";
|
||||||
@ -467,131 +571,123 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
req.count = MAX_REQUEST_BLOBS_SIDECARS;
|
req.count = MAX_REQUEST_BLOBS_SIDECARS;
|
||||||
}
|
}
|
||||||
|
|
||||||
//FIXME(sean) create the blobs iter
|
let forwards_block_root_iter = match self
|
||||||
|
.chain
|
||||||
|
.forwards_iter_block_roots(Slot::from(req.start_slot))
|
||||||
|
{
|
||||||
|
Ok(iter) => iter,
|
||||||
|
Err(BeaconChainError::HistoricalBlockError(
|
||||||
|
HistoricalBlockError::BlockOutOfRange {
|
||||||
|
slot,
|
||||||
|
oldest_block_slot,
|
||||||
|
},
|
||||||
|
)) => {
|
||||||
|
debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot);
|
||||||
|
return self.send_error_response(
|
||||||
|
peer_id,
|
||||||
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
|
"Backfilling".into(),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e),
|
||||||
|
};
|
||||||
|
|
||||||
// let forwards_block_root_iter = match self
|
// Pick out the required blocks, ignoring skip-slots.
|
||||||
// .chain
|
let mut last_block_root = None;
|
||||||
// .forwards_iter_block_roots(Slot::from(req.start_slot))
|
let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
|
||||||
// {
|
iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count))
|
||||||
// Ok(iter) => iter,
|
// map skip slots to None
|
||||||
// Err(BeaconChainError::HistoricalBlockError(
|
.map(|(root, _)| {
|
||||||
// HistoricalBlockError::BlockOutOfRange {
|
let result = if Some(root) == last_block_root {
|
||||||
// slot,
|
None
|
||||||
// oldest_block_slot,
|
} else {
|
||||||
// },
|
Some(root)
|
||||||
// )) => {
|
};
|
||||||
// debug!(self.log, "Range request failed during backfill"; "requested_slot" => slot, "oldest_known_slot" => oldest_block_slot);
|
last_block_root = Some(root);
|
||||||
// return self.send_error_response(
|
result
|
||||||
// peer_id,
|
})
|
||||||
// RPCResponseErrorCode::ResourceUnavailable,
|
.collect::<Vec<Option<Hash256>>>()
|
||||||
// "Backfilling".into(),
|
});
|
||||||
// request_id,
|
|
||||||
// );
|
let block_roots = match maybe_block_roots {
|
||||||
// }
|
Ok(block_roots) => block_roots,
|
||||||
// Err(e) => return error!(self.log, "Unable to obtain root iter"; "error" => ?e),
|
Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e),
|
||||||
// };
|
};
|
||||||
//
|
|
||||||
// // Pick out the required blocks, ignoring skip-slots.
|
// remove all skip slots
|
||||||
// let mut last_block_root = None;
|
let block_roots = block_roots.into_iter().flatten().collect::<Vec<_>>();
|
||||||
// let maybe_block_roots = process_results(forwards_block_root_iter, |iter| {
|
|
||||||
// iter.take_while(|(_, slot)| slot.as_u64() < req.start_slot.saturating_add(req.count))
|
let mut blobs_sent = 0;
|
||||||
// // map skip slots to None
|
let mut send_response = true;
|
||||||
// .map(|(root, _)| {
|
|
||||||
// let result = if Some(root) == last_block_root {
|
for root in block_roots {
|
||||||
// None
|
match self.chain.store.get_blobs(&root) {
|
||||||
// } else {
|
Ok(Some(blob)) => {
|
||||||
// Some(root)
|
blobs_sent += 1;
|
||||||
// };
|
self.send_network_message(NetworkMessage::SendResponse {
|
||||||
// last_block_root = Some(root);
|
peer_id,
|
||||||
// result
|
response: Response::BlobsByRange(Some(Arc::new(blob))),
|
||||||
// })
|
id: request_id,
|
||||||
// .collect::<Vec<Option<Hash256>>>()
|
});
|
||||||
// });
|
}
|
||||||
//
|
Ok(None) => {
|
||||||
// let block_roots = match maybe_block_roots {
|
error!(
|
||||||
// Ok(block_roots) => block_roots,
|
self.log,
|
||||||
// Err(e) => return error!(self.log, "Error during iteration over blocks"; "error" => ?e),
|
"Blob in the chain is not in the store";
|
||||||
// };
|
"request_root" => ?root
|
||||||
//
|
);
|
||||||
// // remove all skip slots
|
break;
|
||||||
// let block_roots = block_roots.into_iter().flatten().collect::<Vec<_>>();
|
}
|
||||||
//
|
Err(e) => {
|
||||||
// // Fetching blocks is async because it may have to hit the execution layer for payloads.
|
error!(
|
||||||
// executor.spawn(
|
self.log,
|
||||||
// async move {
|
"Error fetching blob for peer";
|
||||||
// let mut blocks_sent = 0;
|
"block_root" => ?root,
|
||||||
// let mut send_response = true;
|
"error" => ?e
|
||||||
//
|
);
|
||||||
// for root in block_roots {
|
break;
|
||||||
// match self.chain.store.get_blobs(&root) {
|
}
|
||||||
// Ok(Some(blob)) => {
|
}
|
||||||
// blocks_sent += 1;
|
}
|
||||||
// self.send_network_message(NetworkMessage::SendResponse {
|
|
||||||
// peer_id,
|
let current_slot = self
|
||||||
// response: Response::BlobsByRange(Some(Arc::new(VariableList::new(vec![blob.message]).unwrap()))),
|
.chain
|
||||||
// id: request_id,
|
.slot()
|
||||||
// });
|
.unwrap_or_else(|_| self.chain.slot_clock.genesis_slot());
|
||||||
// }
|
|
||||||
// Ok(None) => {
|
if blobs_sent < (req.count as usize) {
|
||||||
// error!(
|
debug!(
|
||||||
// self.log,
|
self.log,
|
||||||
// "Blob in the chain is not in the store";
|
"BlobsByRange Response processed";
|
||||||
// "request_root" => ?root
|
"peer" => %peer_id,
|
||||||
// );
|
"msg" => "Failed to return all requested blocks",
|
||||||
// break;
|
"start_slot" => req.start_slot,
|
||||||
// }
|
"current_slot" => current_slot,
|
||||||
// Err(e) => {
|
"requested" => req.count,
|
||||||
// error!(
|
"returned" => blobs_sent
|
||||||
// self.log,
|
);
|
||||||
// "Error fetching block for peer";
|
} else {
|
||||||
// "block_root" => ?root,
|
debug!(
|
||||||
// "error" => ?e
|
self.log,
|
||||||
// );
|
"BlobsByRange Response processed";
|
||||||
// break;
|
"peer" => %peer_id,
|
||||||
// }
|
"start_slot" => req.start_slot,
|
||||||
// }
|
"current_slot" => current_slot,
|
||||||
// }
|
"requested" => req.count,
|
||||||
//
|
"returned" => blobs_sent
|
||||||
// let current_slot = self
|
);
|
||||||
// .chain
|
}
|
||||||
// .slot()
|
|
||||||
// .unwrap_or_else(|_| self.chain.slot_clock.genesis_slot());
|
if send_response {
|
||||||
//
|
// send the stream terminator
|
||||||
// if blocks_sent < (req.count as usize) {
|
self.send_network_message(NetworkMessage::SendResponse {
|
||||||
// debug!(
|
peer_id,
|
||||||
// self.log,
|
response: Response::BlobsByRange(None),
|
||||||
// "BlocksByRange Response processed";
|
id: request_id,
|
||||||
// "peer" => %peer_id,
|
});
|
||||||
// "msg" => "Failed to return all requested blocks",
|
}
|
||||||
// "start_slot" => req.start_slot,
|
|
||||||
// "current_slot" => current_slot,
|
drop(send_on_drop);
|
||||||
// "requested" => req.count,
|
|
||||||
// "returned" => blocks_sent
|
|
||||||
// );
|
|
||||||
// } else {
|
|
||||||
// debug!(
|
|
||||||
// self.log,
|
|
||||||
// "BlocksByRange Response processed";
|
|
||||||
// "peer" => %peer_id,
|
|
||||||
// "start_slot" => req.start_slot,
|
|
||||||
// "current_slot" => current_slot,
|
|
||||||
// "requested" => req.count,
|
|
||||||
// "returned" => blocks_sent
|
|
||||||
// );
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// if send_response {
|
|
||||||
// // send the stream terminator
|
|
||||||
// self.send_network_message(NetworkMessage::SendResponse {
|
|
||||||
// peer_id,
|
|
||||||
// response: Response::BlobsByRange(None),
|
|
||||||
// id: request_id,
|
|
||||||
// });
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// drop(send_on_drop);
|
|
||||||
// },
|
|
||||||
// "load_blocks_by_range_blocks",
|
|
||||||
// );
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,11 @@ use lighthouse_network::PeerAction;
|
|||||||
use slog::{debug, error, info, warn};
|
use slog::{debug, error, info, warn};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::{Epoch, Hash256, SignedBeaconBlock};
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
use types::{
|
||||||
|
Epoch, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
|
||||||
|
SignedBeaconBlockAndBlobsSidecarDecode,
|
||||||
|
};
|
||||||
|
|
||||||
/// Id associated to a batch processing request, either a sync batch or a parent lookup.
|
/// Id associated to a batch processing request, either a sync batch or a parent lookup.
|
||||||
#[derive(Clone, Debug, PartialEq)]
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
@ -43,7 +47,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
pub async fn process_rpc_block(
|
pub async fn process_rpc_block(
|
||||||
self,
|
self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
process_type: BlockProcessType,
|
process_type: BlockProcessType,
|
||||||
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
reprocess_tx: mpsc::Sender<ReprocessQueueMessage<T>>,
|
||||||
@ -132,7 +136,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
pub async fn process_chain_segment(
|
pub async fn process_chain_segment(
|
||||||
&self,
|
&self,
|
||||||
sync_type: ChainSegmentProcessId,
|
sync_type: ChainSegmentProcessId,
|
||||||
downloaded_blocks: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
downloaded_blocks: Vec<BlockWrapper<T::EthSpec>>,
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) {
|
) {
|
||||||
let result = match sync_type {
|
let result = match sync_type {
|
||||||
@ -187,7 +191,18 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
|
let end_slot = downloaded_blocks.last().map(|b| b.slot().as_u64());
|
||||||
let sent_blocks = downloaded_blocks.len();
|
let sent_blocks = downloaded_blocks.len();
|
||||||
|
|
||||||
match self.process_backfill_blocks(downloaded_blocks) {
|
let unwrapped = downloaded_blocks
|
||||||
|
.into_iter()
|
||||||
|
.map(|block| match block {
|
||||||
|
BlockWrapper::Block { block } => block,
|
||||||
|
//FIXME(sean) handle blobs in backfill
|
||||||
|
BlockWrapper::BlockAndBlob {
|
||||||
|
block_sidecar_pair: _,
|
||||||
|
} => todo!(),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
match self.process_backfill_blocks(unwrapped) {
|
||||||
(_, Ok(_)) => {
|
(_, Ok(_)) => {
|
||||||
debug!(self.log, "Backfill batch processed";
|
debug!(self.log, "Backfill batch processed";
|
||||||
"batch_epoch" => epoch,
|
"batch_epoch" => epoch,
|
||||||
@ -259,11 +274,11 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
/// Helper function to process blocks batches which only consumes the chain and blocks to process.
|
/// Helper function to process blocks batches which only consumes the chain and blocks to process.
|
||||||
async fn process_blocks<'a>(
|
async fn process_blocks<'a>(
|
||||||
&self,
|
&self,
|
||||||
downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>,
|
downloaded_blocks: impl Iterator<Item = &'a BlockWrapper<T::EthSpec>>,
|
||||||
count_unrealized: CountUnrealized,
|
count_unrealized: CountUnrealized,
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> (usize, Result<(), ChainSegmentFailed>) {
|
) -> (usize, Result<(), ChainSegmentFailed>) {
|
||||||
let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect();
|
let blocks: Vec<_> = downloaded_blocks.cloned().collect();
|
||||||
match self
|
match self
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, count_unrealized, notify_execution_layer)
|
.process_chain_segment(blocks, count_unrealized, notify_execution_layer)
|
||||||
|
@ -171,6 +171,9 @@ impl<T: BeaconChainTypes> Router<T> {
|
|||||||
Request::BlobsByRange(request) => self
|
Request::BlobsByRange(request) => self
|
||||||
.processor
|
.processor
|
||||||
.on_blobs_by_range_request(peer_id, id, request),
|
.on_blobs_by_range_request(peer_id, id, request),
|
||||||
|
Request::BlobsByRoot(request) => self
|
||||||
|
.processor
|
||||||
|
.on_blobs_by_root_request(peer_id, id, request),
|
||||||
Request::LightClientBootstrap(request) => self
|
Request::LightClientBootstrap(request) => self
|
||||||
.processor
|
.processor
|
||||||
.on_lightclient_bootstrap(peer_id, id, request),
|
.on_lightclient_bootstrap(peer_id, id, request),
|
||||||
@ -202,6 +205,10 @@ impl<T: BeaconChainTypes> Router<T> {
|
|||||||
self.processor
|
self.processor
|
||||||
.on_blobs_by_range_response(peer_id, request_id, beacon_blob);
|
.on_blobs_by_range_response(peer_id, request_id, beacon_blob);
|
||||||
}
|
}
|
||||||
|
Response::BlobsByRoot(beacon_blob) => {
|
||||||
|
self.processor
|
||||||
|
.on_blobs_by_root_response(peer_id, request_id, beacon_blob);
|
||||||
|
}
|
||||||
Response::LightClientBootstrap(_) => unreachable!(),
|
Response::LightClientBootstrap(_) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,8 +6,8 @@ use crate::status::status_message;
|
|||||||
use crate::sync::manager::RequestId as SyncId;
|
use crate::sync::manager::RequestId as SyncId;
|
||||||
use crate::sync::SyncMessage;
|
use crate::sync::SyncMessage;
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
|
use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest};
|
||||||
use lighthouse_network::{rpc::*, SignedBeaconBlockAndBlobsSidecar};
|
use lighthouse_network::rpc::*;
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response,
|
Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, Request, Response,
|
||||||
};
|
};
|
||||||
@ -19,8 +19,9 @@ use store::SyncCommitteeMessage;
|
|||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ProposerSlashing,
|
Attestation, AttesterSlashing, BlobsSidecar, EthSpec, ProposerSlashing,
|
||||||
SignedAggregateAndProof, SignedBeaconBlock, SignedBlsToExecutionChange,
|
SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
|
||||||
SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncSubnetId,
|
SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId,
|
||||||
|
SyncSubnetId,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Processes validated messages from the network. It relays necessary data to the syncing thread
|
/// Processes validated messages from the network. It relays necessary data to the syncing thread
|
||||||
@ -173,6 +174,17 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn on_blobs_by_root_request(
|
||||||
|
&mut self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: BlobsByRootRequest,
|
||||||
|
) {
|
||||||
|
self.send_beacon_processor_work(BeaconWorkEvent::blobs_by_root_request(
|
||||||
|
peer_id, request_id, request,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
/// Handle a `LightClientBootstrap` request from the peer.
|
/// Handle a `LightClientBootstrap` request from the peer.
|
||||||
pub fn on_lightclient_bootstrap(
|
pub fn on_lightclient_bootstrap(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -210,7 +222,10 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => {
|
SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. } => {
|
||||||
unreachable!("Block lookups do not request BBRange requests")
|
unreachable!("Block lookups do not request BBRange requests")
|
||||||
}
|
}
|
||||||
id @ (SyncId::BackFillSync { .. } | SyncId::RangeSync { .. }) => id,
|
id @ (SyncId::BackFillSync { .. }
|
||||||
|
| SyncId::RangeSync { .. }
|
||||||
|
| SyncId::BackFillSidecarPair { .. }
|
||||||
|
| SyncId::RangeSidecarPair { .. }) => id,
|
||||||
},
|
},
|
||||||
RequestId::Router => unreachable!("All BBRange requests belong to sync"),
|
RequestId::Router => unreachable!("All BBRange requests belong to sync"),
|
||||||
};
|
};
|
||||||
@ -233,7 +248,7 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
blob_wrapper: Option<Arc<BlobsSidecar<T::EthSpec>>>,
|
blob_sidecar: Option<Arc<BlobsSidecar<T::EthSpec>>>,
|
||||||
) {
|
) {
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -242,10 +257,10 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if let RequestId::Sync(id) = request_id {
|
if let RequestId::Sync(id) = request_id {
|
||||||
self.send_to_sync(SyncMessage::RpcBlob {
|
self.send_to_sync(SyncMessage::RpcGlob {
|
||||||
peer_id,
|
peer_id,
|
||||||
request_id: id,
|
request_id: id,
|
||||||
blob_sidecar: blob_wrapper,
|
blob_sidecar,
|
||||||
seen_timestamp: timestamp_now(),
|
seen_timestamp: timestamp_now(),
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
@ -266,7 +281,10 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
let request_id = match request_id {
|
let request_id = match request_id {
|
||||||
RequestId::Sync(sync_id) => match sync_id {
|
RequestId::Sync(sync_id) => match sync_id {
|
||||||
id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id,
|
id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id,
|
||||||
SyncId::BackFillSync { .. } | SyncId::RangeSync { .. } => {
|
SyncId::BackFillSync { .. }
|
||||||
|
| SyncId::RangeSync { .. }
|
||||||
|
| SyncId::RangeSidecarPair { .. }
|
||||||
|
| SyncId::BackFillSidecarPair { .. } => {
|
||||||
unreachable!("Batch syncing do not request BBRoot requests")
|
unreachable!("Batch syncing do not request BBRoot requests")
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -286,6 +304,39 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handle a `BlobsByRoot` response from the peer.
|
||||||
|
pub fn on_blobs_by_root_response(
|
||||||
|
&mut self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: RequestId,
|
||||||
|
block_and_blobs: Option<Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>>,
|
||||||
|
) {
|
||||||
|
let request_id = match request_id {
|
||||||
|
RequestId::Sync(sync_id) => match sync_id {
|
||||||
|
id @ (SyncId::SingleBlock { .. } | SyncId::ParentLookup { .. }) => id,
|
||||||
|
SyncId::BackFillSync { .. }
|
||||||
|
| SyncId::RangeSync { .. }
|
||||||
|
| SyncId::RangeSidecarPair { .. }
|
||||||
|
| SyncId::BackFillSidecarPair { .. } => {
|
||||||
|
unreachable!("Batch syncing does not request BBRoot requests")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
RequestId::Router => unreachable!("All BBRoot requests belong to sync"),
|
||||||
|
};
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
self.log,
|
||||||
|
"Received BlockAndBlobssByRoot Response";
|
||||||
|
"peer" => %peer_id,
|
||||||
|
);
|
||||||
|
self.send_to_sync(SyncMessage::RpcBlockAndGlob {
|
||||||
|
peer_id,
|
||||||
|
request_id,
|
||||||
|
block_and_blobs,
|
||||||
|
seen_timestamp: timestamp_now(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/// Process a gossip message declaring a new block.
|
/// Process a gossip message declaring a new block.
|
||||||
///
|
///
|
||||||
/// Attempts to apply to block to the beacon chain. May queue the block for later processing.
|
/// Attempts to apply to block to the beacon chain. May queue the block for later processing.
|
||||||
@ -312,7 +363,7 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
message_id: MessageId,
|
message_id: MessageId,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
peer_client: Client,
|
peer_client: Client,
|
||||||
block_and_blobs: Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>,
|
block_and_blobs: SignedBeaconBlockAndBlobsSidecar<T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
self.send_beacon_processor_work(BeaconWorkEvent::gossip_block_and_blobs_sidecar(
|
self.send_beacon_processor_work(BeaconWorkEvent::gossip_block_and_blobs_sidecar(
|
||||||
message_id,
|
message_id,
|
||||||
|
@ -24,7 +24,8 @@ use std::collections::{
|
|||||||
HashMap, HashSet,
|
HashMap, HashSet,
|
||||||
};
|
};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{Epoch, EthSpec, SignedBeaconBlock};
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
use types::{Epoch, EthSpec};
|
||||||
|
|
||||||
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
|
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
|
||||||
/// blocks per batch are requested _at most_. A batch may request less blocks to account for
|
/// blocks per batch are requested _at most_. A batch may request less blocks to account for
|
||||||
@ -54,7 +55,7 @@ impl BatchConfig for BackFillBatchConfig {
|
|||||||
fn max_batch_processing_attempts() -> u8 {
|
fn max_batch_processing_attempts() -> u8 {
|
||||||
MAX_BATCH_PROCESSING_ATTEMPTS
|
MAX_BATCH_PROCESSING_ATTEMPTS
|
||||||
}
|
}
|
||||||
fn batch_attempt_hash<T: EthSpec>(blocks: &[Arc<SignedBeaconBlock<T>>]) -> u64 {
|
fn batch_attempt_hash<T: EthSpec>(blocks: &[BlockWrapper<T>]) -> u64 {
|
||||||
use std::collections::hash_map::DefaultHasher;
|
use std::collections::hash_map::DefaultHasher;
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
let mut hasher = DefaultHasher::new();
|
let mut hasher = DefaultHasher::new();
|
||||||
@ -390,7 +391,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
batch_id: BatchId,
|
batch_id: BatchId,
|
||||||
peer_id: &PeerId,
|
peer_id: &PeerId,
|
||||||
request_id: Id,
|
request_id: Id,
|
||||||
beacon_block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
beacon_block: Option<BlockWrapper<T::EthSpec>>,
|
||||||
) -> Result<ProcessResult, BackFillError> {
|
) -> Result<ProcessResult, BackFillError> {
|
||||||
// check if we have this batch
|
// check if we have this batch
|
||||||
let batch = match self.batches.get_mut(&batch_id) {
|
let batch = match self.batches.get_mut(&batch_id) {
|
||||||
@ -535,10 +536,8 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id);
|
let process_id = ChainSegmentProcessId::BackSyncBatchId(batch_id);
|
||||||
self.current_processing_batch = Some(batch_id);
|
self.current_processing_batch = Some(batch_id);
|
||||||
|
|
||||||
if let Err(e) = network
|
let work_event = BeaconWorkEvent::chain_segment(process_id, blocks.into_wrapped_blocks());
|
||||||
.processor_channel()
|
if let Err(e) = network.processor_channel().try_send(work_event) {
|
||||||
.try_send(BeaconWorkEvent::chain_segment(process_id, blocks))
|
|
||||||
{
|
|
||||||
crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch",
|
crit!(self.log, "Failed to send backfill segment to processor."; "msg" => "process_batch",
|
||||||
"error" => %e, "batch" => self.processing_target);
|
"error" => %e, "batch" => self.processing_target);
|
||||||
// This is unlikely to happen but it would stall syncing since the batch now has no
|
// This is unlikely to happen but it would stall syncing since the batch now has no
|
||||||
@ -953,8 +952,8 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
) -> Result<(), BackFillError> {
|
) -> Result<(), BackFillError> {
|
||||||
if let Some(batch) = self.batches.get_mut(&batch_id) {
|
if let Some(batch) = self.batches.get_mut(&batch_id) {
|
||||||
let request = batch.to_blocks_by_range_request();
|
let (request, is_blob_batch) = batch.to_blocks_by_range_request();
|
||||||
match network.backfill_blocks_by_range_request(peer, request, batch_id) {
|
match network.backfill_blocks_by_range_request(peer, is_blob_batch, request, batch_id) {
|
||||||
Ok(request_id) => {
|
Ok(request_id) => {
|
||||||
// inform the batch about the new request
|
// inform the batch about the new request
|
||||||
if let Err(e) = batch.start_downloading_from_peer(peer, request_id) {
|
if let Err(e) = batch.start_downloading_from_peer(peer, request_id) {
|
||||||
@ -1054,7 +1053,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
idle_peers.shuffle(&mut rng);
|
idle_peers.shuffle(&mut rng);
|
||||||
|
|
||||||
while let Some(peer) = idle_peers.pop() {
|
while let Some(peer) = idle_peers.pop() {
|
||||||
if let Some(batch_id) = self.include_next_batch() {
|
if let Some(batch_id) = self.include_next_batch(network) {
|
||||||
// send the batch
|
// send the batch
|
||||||
self.send_batch(network, batch_id, peer)?;
|
self.send_batch(network, batch_id, peer)?;
|
||||||
} else {
|
} else {
|
||||||
@ -1067,7 +1066,7 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
|
|
||||||
/// Creates the next required batch from the chain. If there are no more batches required,
|
/// Creates the next required batch from the chain. If there are no more batches required,
|
||||||
/// `false` is returned.
|
/// `false` is returned.
|
||||||
fn include_next_batch(&mut self) -> Option<BatchId> {
|
fn include_next_batch(&mut self, network: &mut SyncNetworkContext<T>) -> Option<BatchId> {
|
||||||
// don't request batches beyond genesis;
|
// don't request batches beyond genesis;
|
||||||
if self.last_batch_downloaded {
|
if self.last_batch_downloaded {
|
||||||
return None;
|
return None;
|
||||||
@ -1104,10 +1103,15 @@ impl<T: BeaconChainTypes> BackFillSync<T> {
|
|||||||
self.to_be_downloaded = self
|
self.to_be_downloaded = self
|
||||||
.to_be_downloaded
|
.to_be_downloaded
|
||||||
.saturating_sub(BACKFILL_EPOCHS_PER_BATCH);
|
.saturating_sub(BACKFILL_EPOCHS_PER_BATCH);
|
||||||
self.include_next_batch()
|
self.include_next_batch(network)
|
||||||
}
|
}
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
entry.insert(BatchInfo::new(&batch_id, BACKFILL_EPOCHS_PER_BATCH));
|
let batch_type = network.batch_type(batch_id);
|
||||||
|
entry.insert(BatchInfo::new(
|
||||||
|
&batch_id,
|
||||||
|
BACKFILL_EPOCHS_PER_BATCH,
|
||||||
|
batch_type,
|
||||||
|
));
|
||||||
if batch_id == 0 {
|
if batch_id == 0 {
|
||||||
self.last_batch_downloaded = true;
|
self.last_batch_downloaded = true;
|
||||||
}
|
}
|
||||||
|
@ -4,12 +4,15 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use beacon_chain::{BeaconChainTypes, BlockError};
|
use beacon_chain::{BeaconChainTypes, BlockError};
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use itertools::{Either, Itertools};
|
||||||
use lighthouse_network::{PeerAction, PeerId};
|
use lighthouse_network::{PeerAction, PeerId};
|
||||||
use lru_cache::LRUTimeCache;
|
use lru_cache::LRUTimeCache;
|
||||||
use slog::{debug, error, trace, warn, Logger};
|
use slog::{debug, error, trace, warn, Logger};
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{Hash256, SignedBeaconBlock};
|
use store::{Hash256, SignedBeaconBlock};
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
|
||||||
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
|
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
@ -32,7 +35,7 @@ mod single_block_lookup;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
pub type RootBlockTuple<T> = (Hash256, Arc<SignedBeaconBlock<T>>);
|
pub type RootBlockTuple<T> = (Hash256, BlockWrapper<T>);
|
||||||
|
|
||||||
const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60;
|
const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60;
|
||||||
const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3;
|
const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3;
|
||||||
@ -110,7 +113,9 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
|
|
||||||
let mut single_block_request = SingleBlockRequest::new(hash, peer_id);
|
let mut single_block_request = SingleBlockRequest::new(hash, peer_id);
|
||||||
|
|
||||||
let (peer_id, request) = single_block_request.request_block().unwrap();
|
let (peer_id, request) = single_block_request
|
||||||
|
.request_block()
|
||||||
|
.expect("none of the possible failure cases apply for a newly created block lookup");
|
||||||
if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) {
|
if let Ok(request_id) = cx.single_block_lookup_request(peer_id, request) {
|
||||||
self.single_block_lookups
|
self.single_block_lookups
|
||||||
.insert(request_id, single_block_request);
|
.insert(request_id, single_block_request);
|
||||||
@ -127,7 +132,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
pub fn search_parent(
|
pub fn search_parent(
|
||||||
&mut self,
|
&mut self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
@ -169,7 +174,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
id: Id,
|
id: Id,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
block: Option<BlockWrapper<T::EthSpec>>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
@ -234,7 +239,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
id: Id,
|
id: Id,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
block: Option<BlockWrapper<T::EthSpec>>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
@ -555,7 +560,9 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing();
|
let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing();
|
||||||
let process_id = ChainSegmentProcessId::ParentLookup(chain_hash);
|
let process_id = ChainSegmentProcessId::ParentLookup(chain_hash);
|
||||||
|
|
||||||
match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) {
|
let work = WorkEvent::chain_segment(process_id, blocks);
|
||||||
|
|
||||||
|
match beacon_processor_send.try_send(work) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
self.processing_parent_lookups
|
self.processing_parent_lookups
|
||||||
.insert(chain_hash, (hashes, request));
|
.insert(chain_hash, (hashes, request));
|
||||||
@ -659,7 +666,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
fn send_block_for_processing(
|
fn send_block_for_processing(
|
||||||
&mut self,
|
&mut self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: BlockWrapper<T::EthSpec>,
|
||||||
duration: Duration,
|
duration: Duration,
|
||||||
process_type: BlockProcessType,
|
process_type: BlockProcessType,
|
||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
|
@ -4,6 +4,7 @@ use lighthouse_network::PeerId;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{Hash256, SignedBeaconBlock};
|
use store::{Hash256, SignedBeaconBlock};
|
||||||
use strum::IntoStaticStr;
|
use strum::IntoStaticStr;
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
|
||||||
use crate::sync::{
|
use crate::sync::{
|
||||||
manager::{Id, SLOT_IMPORT_TOLERANCE},
|
manager::{Id, SLOT_IMPORT_TOLERANCE},
|
||||||
@ -59,11 +60,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
.any(|(root, _d_block)| root == block_root)
|
.any(|(root, _d_block)| root == block_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(block_root: Hash256, block: BlockWrapper<T::EthSpec>, peer_id: PeerId) -> Self {
|
||||||
block_root: Hash256,
|
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
|
||||||
peer_id: PeerId,
|
|
||||||
) -> Self {
|
|
||||||
let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id);
|
let current_parent_request = SingleBlockRequest::new(block.parent_root(), peer_id);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
@ -98,7 +95,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
self.current_parent_request.check_peer_disconnected(peer_id)
|
self.current_parent_request.check_peer_disconnected(peer_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
|
pub fn add_block(&mut self, block: BlockWrapper<T::EthSpec>) {
|
||||||
let next_parent = block.parent_root();
|
let next_parent = block.parent_root();
|
||||||
let current_root = self.current_parent_request.hash;
|
let current_root = self.current_parent_request.hash;
|
||||||
self.downloaded_blocks.push((current_root, block));
|
self.downloaded_blocks.push((current_root, block));
|
||||||
@ -117,7 +114,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
self,
|
self,
|
||||||
) -> (
|
) -> (
|
||||||
Hash256,
|
Hash256,
|
||||||
Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
Vec<BlockWrapper<T::EthSpec>>,
|
||||||
Vec<Hash256>,
|
Vec<Hash256>,
|
||||||
SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
|
SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
|
||||||
) {
|
) {
|
||||||
@ -156,7 +153,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
/// the processing result of the block.
|
/// the processing result of the block.
|
||||||
pub fn verify_block(
|
pub fn verify_block(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
block: Option<BlockWrapper<T::EthSpec>>,
|
||||||
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
|
failed_chains: &mut lru_cache::LRUTimeCache<Hash256>,
|
||||||
) -> Result<Option<RootBlockTuple<T::EthSpec>>, VerifyError> {
|
) -> Result<Option<RootBlockTuple<T::EthSpec>>, VerifyError> {
|
||||||
let root_and_block = self.current_parent_request.verify_block(block)?;
|
let root_and_block = self.current_parent_request.verify_block(block)?;
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use super::RootBlockTuple;
|
use super::RootBlockTuple;
|
||||||
use beacon_chain::get_block_root;
|
use beacon_chain::get_block_root;
|
||||||
use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
|
use lighthouse_network::{rpc::BlocksByRootRequest, PeerId};
|
||||||
use rand::seq::IteratorRandom;
|
use rand::seq::IteratorRandom;
|
||||||
use ssz_types::VariableList;
|
use ssz_types::VariableList;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Arc;
|
||||||
use store::{EthSpec, Hash256, SignedBeaconBlock};
|
use store::{EthSpec, Hash256, SignedBeaconBlock};
|
||||||
use strum::IntoStaticStr;
|
use strum::IntoStaticStr;
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
|
||||||
/// Object representing a single block lookup request.
|
/// Object representing a single block lookup request.
|
||||||
#[derive(PartialEq, Eq)]
|
#[derive(PartialEq, Eq)]
|
||||||
@ -105,7 +105,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
|||||||
/// Returns the block for processing if the response is what we expected.
|
/// Returns the block for processing if the response is what we expected.
|
||||||
pub fn verify_block<T: EthSpec>(
|
pub fn verify_block<T: EthSpec>(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: Option<Arc<SignedBeaconBlock<T>>>,
|
block: Option<BlockWrapper<T>>,
|
||||||
) -> Result<Option<RootBlockTuple<T>>, VerifyError> {
|
) -> Result<Option<RootBlockTuple<T>>, VerifyError> {
|
||||||
match self.state {
|
match self.state {
|
||||||
State::AwaitingDownload => {
|
State::AwaitingDownload => {
|
||||||
@ -116,7 +116,7 @@ impl<const MAX_ATTEMPTS: u8> SingleBlockRequest<MAX_ATTEMPTS> {
|
|||||||
Some(block) => {
|
Some(block) => {
|
||||||
// Compute the block root using this specific function so that we can get timing
|
// Compute the block root using this specific function so that we can get timing
|
||||||
// metrics.
|
// metrics.
|
||||||
let block_root = get_block_root(&block);
|
let block_root = get_block_root(block.block());
|
||||||
if block_root != self.hash {
|
if block_root != self.hash {
|
||||||
// return an error and drop the block
|
// return an error and drop the block
|
||||||
// NOTE: we take this is as a download failure to prevent counting the
|
// NOTE: we take this is as a download failure to prevent counting the
|
||||||
@ -225,7 +225,7 @@ mod tests {
|
|||||||
|
|
||||||
let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id);
|
let mut sl = SingleBlockRequest::<4>::new(block.canonical_root(), peer_id);
|
||||||
sl.request_block().unwrap();
|
sl.request_block().unwrap();
|
||||||
sl.verify_block(Some(Arc::new(block))).unwrap().unwrap();
|
sl.verify_block(Some(block.into())).unwrap().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -242,7 +242,7 @@ mod tests {
|
|||||||
|
|
||||||
// Now we receive the block and send it for processing
|
// Now we receive the block and send it for processing
|
||||||
sl.request_block().unwrap();
|
sl.request_block().unwrap();
|
||||||
sl.verify_block(Some(Arc::new(block))).unwrap().unwrap();
|
sl.verify_block(Some(block.into())).unwrap().unwrap();
|
||||||
|
|
||||||
// One processing failure maxes the available attempts
|
// One processing failure maxes the available attempts
|
||||||
sl.register_failure_processing();
|
sl.register_failure_processing();
|
||||||
|
@ -10,11 +10,11 @@ use beacon_chain::builder::Witness;
|
|||||||
use beacon_chain::eth1_chain::CachingEth1Backend;
|
use beacon_chain::eth1_chain::CachingEth1Backend;
|
||||||
use lighthouse_network::{NetworkGlobals, Request};
|
use lighthouse_network::{NetworkGlobals, Request};
|
||||||
use slog::{Drain, Level};
|
use slog::{Drain, Level};
|
||||||
use slot_clock::SystemTimeSlotClock;
|
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||||
use store::MemoryStore;
|
use store::MemoryStore;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
|
||||||
use types::MinimalEthSpec as E;
|
use types::{EthSpec, MainnetEthSpec, MinimalEthSpec as E, Slot};
|
||||||
|
|
||||||
type T = Witness<SystemTimeSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
|
type T = Witness<SystemTimeSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
|
||||||
|
|
||||||
@ -55,6 +55,7 @@ impl TestRig {
|
|||||||
network_tx,
|
network_tx,
|
||||||
globals,
|
globals,
|
||||||
beacon_processor_tx,
|
beacon_processor_tx,
|
||||||
|
chain,
|
||||||
log.new(slog::o!("component" => "network_context")),
|
log.new(slog::o!("component" => "network_context")),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
@ -157,7 +158,7 @@ fn test_single_block_lookup_happy_path() {
|
|||||||
|
|
||||||
// The peer provides the correct block, should not be penalized. Now the block should be sent
|
// The peer provides the correct block, should not be penalized. Now the block should be sent
|
||||||
// for processing.
|
// for processing.
|
||||||
bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx);
|
bl.single_block_lookup_response(id, peer_id, Some(block.into()), D, &mut cx);
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
|
|
||||||
@ -203,7 +204,7 @@ fn test_single_block_lookup_wrong_response() {
|
|||||||
|
|
||||||
// Peer sends something else. It should be penalized.
|
// Peer sends something else. It should be penalized.
|
||||||
let bad_block = rig.rand_block();
|
let bad_block = rig.rand_block();
|
||||||
bl.single_block_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx);
|
bl.single_block_lookup_response(id, peer_id, Some(bad_block.into()), D, &mut cx);
|
||||||
rig.expect_penalty();
|
rig.expect_penalty();
|
||||||
rig.expect_block_request(); // should be retried
|
rig.expect_block_request(); // should be retried
|
||||||
|
|
||||||
@ -242,7 +243,7 @@ fn test_single_block_lookup_becomes_parent_request() {
|
|||||||
|
|
||||||
// The peer provides the correct block, should not be penalized. Now the block should be sent
|
// The peer provides the correct block, should not be penalized. Now the block should be sent
|
||||||
// for processing.
|
// for processing.
|
||||||
bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx);
|
bl.single_block_lookup_response(id, peer_id, Some(block.clone().into()), D, &mut cx);
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
|
|
||||||
@ -251,11 +252,7 @@ fn test_single_block_lookup_becomes_parent_request() {
|
|||||||
|
|
||||||
// Send the stream termination. Peer should have not been penalized, and the request moved to a
|
// Send the stream termination. Peer should have not been penalized, and the request moved to a
|
||||||
// parent request after processing.
|
// parent request after processing.
|
||||||
bl.single_block_processed(
|
bl.single_block_processed(id, BlockError::ParentUnknown(block.into()).into(), &mut cx);
|
||||||
id,
|
|
||||||
BlockError::ParentUnknown(Arc::new(block)).into(),
|
|
||||||
&mut cx,
|
|
||||||
);
|
|
||||||
assert_eq!(bl.single_block_lookups.len(), 0);
|
assert_eq!(bl.single_block_lookups.len(), 0);
|
||||||
rig.expect_parent_request();
|
rig.expect_parent_request();
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
@ -272,11 +269,11 @@ fn test_parent_lookup_happy_path() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(chain_hash, block.into(), peer_id, &mut cx);
|
||||||
let id = rig.expect_parent_request();
|
let id = rig.expect_parent_request();
|
||||||
|
|
||||||
// Peer sends the right block, it should be sent for processing. Peer should not be penalized.
|
// Peer sends the right block, it should be sent for processing. Peer should not be penalized.
|
||||||
bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, Some(parent.into()), D, &mut cx);
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
|
|
||||||
@ -300,12 +297,12 @@ fn test_parent_lookup_wrong_response() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(chain_hash, block.into(), peer_id, &mut cx);
|
||||||
let id1 = rig.expect_parent_request();
|
let id1 = rig.expect_parent_request();
|
||||||
|
|
||||||
// Peer sends the wrong block, peer should be penalized and the block re-requested.
|
// Peer sends the wrong block, peer should be penalized and the block re-requested.
|
||||||
let bad_block = rig.rand_block();
|
let bad_block = rig.rand_block();
|
||||||
bl.parent_lookup_response(id1, peer_id, Some(Arc::new(bad_block)), D, &mut cx);
|
bl.parent_lookup_response(id1, peer_id, Some(bad_block.into()), D, &mut cx);
|
||||||
rig.expect_penalty();
|
rig.expect_penalty();
|
||||||
let id2 = rig.expect_parent_request();
|
let id2 = rig.expect_parent_request();
|
||||||
|
|
||||||
@ -314,7 +311,7 @@ fn test_parent_lookup_wrong_response() {
|
|||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
|
|
||||||
// Send the right block this time.
|
// Send the right block this time.
|
||||||
bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx);
|
bl.parent_lookup_response(id2, peer_id, Some(parent.into()), D, &mut cx);
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
|
|
||||||
// Processing succeeds, now the rest of the chain should be sent for processing.
|
// Processing succeeds, now the rest of the chain should be sent for processing.
|
||||||
@ -337,7 +334,7 @@ fn test_parent_lookup_empty_response() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(chain_hash, block.into(), peer_id, &mut cx);
|
||||||
let id1 = rig.expect_parent_request();
|
let id1 = rig.expect_parent_request();
|
||||||
|
|
||||||
// Peer sends an empty response, peer should be penalized and the block re-requested.
|
// Peer sends an empty response, peer should be penalized and the block re-requested.
|
||||||
@ -346,7 +343,7 @@ fn test_parent_lookup_empty_response() {
|
|||||||
let id2 = rig.expect_parent_request();
|
let id2 = rig.expect_parent_request();
|
||||||
|
|
||||||
// Send the right block this time.
|
// Send the right block this time.
|
||||||
bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx);
|
bl.parent_lookup_response(id2, peer_id, Some(parent.into()), D, &mut cx);
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
|
|
||||||
// Processing succeeds, now the rest of the chain should be sent for processing.
|
// Processing succeeds, now the rest of the chain should be sent for processing.
|
||||||
@ -369,7 +366,7 @@ fn test_parent_lookup_rpc_failure() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(chain_hash, block.into(), peer_id, &mut cx);
|
||||||
let id1 = rig.expect_parent_request();
|
let id1 = rig.expect_parent_request();
|
||||||
|
|
||||||
// The request fails. It should be tried again.
|
// The request fails. It should be tried again.
|
||||||
@ -377,7 +374,7 @@ fn test_parent_lookup_rpc_failure() {
|
|||||||
let id2 = rig.expect_parent_request();
|
let id2 = rig.expect_parent_request();
|
||||||
|
|
||||||
// Send the right block this time.
|
// Send the right block this time.
|
||||||
bl.parent_lookup_response(id2, peer_id, Some(Arc::new(parent)), D, &mut cx);
|
bl.parent_lookup_response(id2, peer_id, Some(parent.into()), D, &mut cx);
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
|
|
||||||
// Processing succeeds, now the rest of the chain should be sent for processing.
|
// Processing succeeds, now the rest of the chain should be sent for processing.
|
||||||
@ -400,7 +397,7 @@ fn test_parent_lookup_too_many_attempts() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(chain_hash, block.into(), peer_id, &mut cx);
|
||||||
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
|
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
|
||||||
let id = rig.expect_parent_request();
|
let id = rig.expect_parent_request();
|
||||||
match i % 2 {
|
match i % 2 {
|
||||||
@ -412,7 +409,7 @@ fn test_parent_lookup_too_many_attempts() {
|
|||||||
_ => {
|
_ => {
|
||||||
// Send a bad block this time. It should be tried again.
|
// Send a bad block this time. It should be tried again.
|
||||||
let bad_block = rig.rand_block();
|
let bad_block = rig.rand_block();
|
||||||
bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, Some(bad_block.into()), D, &mut cx);
|
||||||
// Send the stream termination
|
// Send the stream termination
|
||||||
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
|
||||||
rig.expect_penalty();
|
rig.expect_penalty();
|
||||||
@ -436,7 +433,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(block_hash, block.into(), peer_id, &mut cx);
|
||||||
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
|
for i in 1..=parent_lookup::PARENT_FAIL_TOLERANCE {
|
||||||
assert!(!bl.failed_chains.contains(&block_hash));
|
assert!(!bl.failed_chains.contains(&block_hash));
|
||||||
let id = rig.expect_parent_request();
|
let id = rig.expect_parent_request();
|
||||||
@ -446,7 +443,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
|
|||||||
} else {
|
} else {
|
||||||
// Send a bad block this time. It should be tried again.
|
// Send a bad block this time. It should be tried again.
|
||||||
let bad_block = rig.rand_block();
|
let bad_block = rig.rand_block();
|
||||||
bl.parent_lookup_response(id, peer_id, Some(Arc::new(bad_block)), D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, Some(bad_block.into()), D, &mut cx);
|
||||||
rig.expect_penalty();
|
rig.expect_penalty();
|
||||||
}
|
}
|
||||||
if i < parent_lookup::PARENT_FAIL_TOLERANCE {
|
if i < parent_lookup::PARENT_FAIL_TOLERANCE {
|
||||||
@ -470,7 +467,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(block_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(block_hash, block.into(), peer_id, &mut cx);
|
||||||
|
|
||||||
// Fail downloading the block
|
// Fail downloading the block
|
||||||
for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) {
|
for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) {
|
||||||
@ -484,7 +481,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
|
|||||||
let id = dbg!(rig.expect_parent_request());
|
let id = dbg!(rig.expect_parent_request());
|
||||||
assert!(!bl.failed_chains.contains(&block_hash));
|
assert!(!bl.failed_chains.contains(&block_hash));
|
||||||
// send the right parent but fail processing
|
// send the right parent but fail processing
|
||||||
bl.parent_lookup_response(id, peer_id, Some(parent.clone()), D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, Some(parent.clone().into()), D, &mut cx);
|
||||||
bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx);
|
bl.parent_block_processed(block_hash, BlockError::InvalidSignature.into(), &mut cx);
|
||||||
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
|
||||||
rig.expect_penalty();
|
rig.expect_penalty();
|
||||||
@ -511,12 +508,12 @@ fn test_parent_lookup_too_deep() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
let trigger_block = blocks.pop().unwrap();
|
let trigger_block = blocks.pop().unwrap();
|
||||||
let chain_hash = trigger_block.canonical_root();
|
let chain_hash = trigger_block.canonical_root();
|
||||||
bl.search_parent(chain_hash, Arc::new(trigger_block), peer_id, &mut cx);
|
bl.search_parent(chain_hash, trigger_block.into(), peer_id, &mut cx);
|
||||||
|
|
||||||
for block in blocks.into_iter().rev() {
|
for block in blocks.into_iter().rev() {
|
||||||
let id = rig.expect_parent_request();
|
let id = rig.expect_parent_request();
|
||||||
// the block
|
// the block
|
||||||
bl.parent_lookup_response(id, peer_id, Some(Arc::new(block.clone())), D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, Some(block.clone().into()), D, &mut cx);
|
||||||
// the stream termination
|
// the stream termination
|
||||||
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
|
||||||
// the processing request
|
// the processing request
|
||||||
@ -524,7 +521,7 @@ fn test_parent_lookup_too_deep() {
|
|||||||
// the processing result
|
// the processing result
|
||||||
bl.parent_block_processed(
|
bl.parent_block_processed(
|
||||||
chain_hash,
|
chain_hash,
|
||||||
BlockError::ParentUnknown(Arc::new(block)).into(),
|
BlockError::ParentUnknown(block.into()).into(),
|
||||||
&mut cx,
|
&mut cx,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -540,7 +537,7 @@ fn test_parent_lookup_disconnection() {
|
|||||||
let trigger_block = rig.rand_block();
|
let trigger_block = rig.rand_block();
|
||||||
bl.search_parent(
|
bl.search_parent(
|
||||||
trigger_block.canonical_root(),
|
trigger_block.canonical_root(),
|
||||||
Arc::new(trigger_block),
|
trigger_block.into(),
|
||||||
peer_id,
|
peer_id,
|
||||||
&mut cx,
|
&mut cx,
|
||||||
);
|
);
|
||||||
@ -561,7 +558,7 @@ fn test_single_block_lookup_ignored_response() {
|
|||||||
|
|
||||||
// The peer provides the correct block, should not be penalized. Now the block should be sent
|
// The peer provides the correct block, should not be penalized. Now the block should be sent
|
||||||
// for processing.
|
// for processing.
|
||||||
bl.single_block_lookup_response(id, peer_id, Some(Arc::new(block)), D, &mut cx);
|
bl.single_block_lookup_response(id, peer_id, Some(block.into()), D, &mut cx);
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
|
|
||||||
@ -587,11 +584,11 @@ fn test_parent_lookup_ignored_response() {
|
|||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
// Trigger the request
|
// Trigger the request
|
||||||
bl.search_parent(chain_hash, Arc::new(block), peer_id, &mut cx);
|
bl.search_parent(chain_hash, block.into(), peer_id, &mut cx);
|
||||||
let id = rig.expect_parent_request();
|
let id = rig.expect_parent_request();
|
||||||
|
|
||||||
// Peer sends the right block, it should be sent for processing. Peer should not be penalized.
|
// Peer sends the right block, it should be sent for processing. Peer should not be penalized.
|
||||||
bl.parent_lookup_response(id, peer_id, Some(Arc::new(parent)), D, &mut cx);
|
bl.parent_lookup_response(id, peer_id, Some(parent.into()), D, &mut cx);
|
||||||
rig.expect_block_process();
|
rig.expect_block_process();
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@ use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH};
|
|||||||
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
|
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
|
||||||
use crate::service::NetworkMessage;
|
use crate::service::NetworkMessage;
|
||||||
use crate::status::ToStatusMessage;
|
use crate::status::ToStatusMessage;
|
||||||
|
use crate::sync::range_sync::ExpectedBatchTy;
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, EngineState};
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
|
use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
|
||||||
@ -53,7 +54,10 @@ use std::ops::Sub;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
use types::{
|
||||||
|
BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, Slot,
|
||||||
|
};
|
||||||
|
|
||||||
/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync
|
/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync
|
||||||
/// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a
|
/// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a
|
||||||
@ -75,12 +79,16 @@ pub enum RequestId {
|
|||||||
ParentLookup { id: Id },
|
ParentLookup { id: Id },
|
||||||
/// Request was from the backfill sync algorithm.
|
/// Request was from the backfill sync algorithm.
|
||||||
BackFillSync { id: Id },
|
BackFillSync { id: Id },
|
||||||
|
/// Backfill request for blocks and sidecars.
|
||||||
|
BackFillSidecarPair { id: Id },
|
||||||
/// The request was from a chain in the range sync algorithm.
|
/// The request was from a chain in the range sync algorithm.
|
||||||
RangeSync { id: Id },
|
RangeSync { id: Id },
|
||||||
|
/// The request was from a chain in range, asking for ranges of blocks and sidecars.
|
||||||
|
RangeSidecarPair { id: Id },
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
/// A message than can be sent to the sync manager thread.
|
/// A message that can be sent to the sync manager thread.
|
||||||
pub enum SyncMessage<T: EthSpec> {
|
pub enum SyncMessage<T: EthSpec> {
|
||||||
/// A useful peer has been discovered.
|
/// A useful peer has been discovered.
|
||||||
AddPeer(PeerId, SyncInfo),
|
AddPeer(PeerId, SyncInfo),
|
||||||
@ -93,16 +101,24 @@ pub enum SyncMessage<T: EthSpec> {
|
|||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// A blob has been received from RPC.
|
/// A blob has been received from the RPC.
|
||||||
RpcBlob {
|
RpcGlob {
|
||||||
peer_id: PeerId,
|
|
||||||
request_id: RequestId,
|
request_id: RequestId,
|
||||||
|
peer_id: PeerId,
|
||||||
blob_sidecar: Option<Arc<BlobsSidecar<T>>>,
|
blob_sidecar: Option<Arc<BlobsSidecar<T>>>,
|
||||||
seen_timestamp: Duration,
|
seen_timestamp: Duration,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
/// A block and blobs have been received from the RPC.
|
||||||
|
RpcBlockAndGlob {
|
||||||
|
request_id: RequestId,
|
||||||
|
peer_id: PeerId,
|
||||||
|
block_and_blobs: Option<Arc<SignedBeaconBlockAndBlobsSidecar<T>>>,
|
||||||
|
seen_timestamp: Duration,
|
||||||
|
},
|
||||||
|
|
||||||
/// A block with an unknown parent has been received.
|
/// A block with an unknown parent has been received.
|
||||||
UnknownBlock(PeerId, Arc<SignedBeaconBlock<T>>, Hash256),
|
UnknownBlock(PeerId, BlockWrapper<T>, Hash256),
|
||||||
|
|
||||||
/// A peer has sent an object that references a block that is unknown. This triggers the
|
/// A peer has sent an object that references a block that is unknown. This triggers the
|
||||||
/// manager to attempt to find the block matching the unknown hash.
|
/// manager to attempt to find the block matching the unknown hash.
|
||||||
@ -215,6 +231,7 @@ pub fn spawn<T: BeaconChainTypes>(
|
|||||||
network_send,
|
network_send,
|
||||||
network_globals.clone(),
|
network_globals.clone(),
|
||||||
beacon_processor_send,
|
beacon_processor_send,
|
||||||
|
beacon_chain.clone(),
|
||||||
log.clone(),
|
log.clone(),
|
||||||
),
|
),
|
||||||
range_sync: RangeSync::new(beacon_chain.clone(), log.clone()),
|
range_sync: RangeSync::new(beacon_chain.clone(), log.clone()),
|
||||||
@ -277,7 +294,25 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
.parent_lookup_failed(id, peer_id, &mut self.network);
|
.parent_lookup_failed(id, peer_id, &mut self.network);
|
||||||
}
|
}
|
||||||
RequestId::BackFillSync { id } => {
|
RequestId::BackFillSync { id } => {
|
||||||
if let Some(batch_id) = self.network.backfill_sync_response(id, true) {
|
if let Some(batch_id) = self
|
||||||
|
.network
|
||||||
|
.backfill_request_failed(id, ExpectedBatchTy::OnlyBlock)
|
||||||
|
{
|
||||||
|
match self
|
||||||
|
.backfill_sync
|
||||||
|
.inject_error(&mut self.network, batch_id, &peer_id, id)
|
||||||
|
{
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(_) => self.update_sync_state(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RequestId::BackFillSidecarPair { id } => {
|
||||||
|
if let Some(batch_id) = self
|
||||||
|
.network
|
||||||
|
.backfill_request_failed(id, ExpectedBatchTy::OnlyBlockBlobs)
|
||||||
|
{
|
||||||
match self
|
match self
|
||||||
.backfill_sync
|
.backfill_sync
|
||||||
.inject_error(&mut self.network, batch_id, &peer_id, id)
|
.inject_error(&mut self.network, batch_id, &peer_id, id)
|
||||||
@ -288,7 +323,25 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
RequestId::RangeSync { id } => {
|
RequestId::RangeSync { id } => {
|
||||||
if let Some((chain_id, batch_id)) = self.network.range_sync_response(id, true) {
|
if let Some((chain_id, batch_id)) = self
|
||||||
|
.network
|
||||||
|
.range_sync_request_failed(id, ExpectedBatchTy::OnlyBlock)
|
||||||
|
{
|
||||||
|
self.range_sync.inject_error(
|
||||||
|
&mut self.network,
|
||||||
|
peer_id,
|
||||||
|
batch_id,
|
||||||
|
chain_id,
|
||||||
|
id,
|
||||||
|
);
|
||||||
|
self.update_sync_state()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestId::RangeSidecarPair { id } => {
|
||||||
|
if let Some((chain_id, batch_id)) = self
|
||||||
|
.network
|
||||||
|
.range_sync_request_failed(id, ExpectedBatchTy::OnlyBlockBlobs)
|
||||||
|
{
|
||||||
self.range_sync.inject_error(
|
self.range_sync.inject_error(
|
||||||
&mut self.network,
|
&mut self.network,
|
||||||
peer_id,
|
peer_id,
|
||||||
@ -592,8 +645,23 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
.block_lookups
|
.block_lookups
|
||||||
.parent_chain_processed(chain_hash, result, &mut self.network),
|
.parent_chain_processed(chain_hash, result, &mut self.network),
|
||||||
},
|
},
|
||||||
//FIXME(sean)
|
SyncMessage::RpcGlob {
|
||||||
SyncMessage::RpcBlob { .. } => todo!(),
|
request_id,
|
||||||
|
peer_id,
|
||||||
|
blob_sidecar,
|
||||||
|
seen_timestamp,
|
||||||
|
} => self.rpc_sidecar_received(request_id, peer_id, blob_sidecar, seen_timestamp),
|
||||||
|
SyncMessage::RpcBlockAndGlob {
|
||||||
|
request_id,
|
||||||
|
peer_id,
|
||||||
|
block_and_blobs,
|
||||||
|
seen_timestamp,
|
||||||
|
} => self.rpc_block_sidecar_pair_received(
|
||||||
|
request_id,
|
||||||
|
peer_id,
|
||||||
|
block_and_blobs,
|
||||||
|
seen_timestamp,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,28 +731,29 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response(
|
RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response(
|
||||||
id,
|
id,
|
||||||
peer_id,
|
peer_id,
|
||||||
beacon_block,
|
beacon_block.map(|block| BlockWrapper::Block { block }),
|
||||||
seen_timestamp,
|
seen_timestamp,
|
||||||
&mut self.network,
|
&mut self.network,
|
||||||
),
|
),
|
||||||
RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response(
|
RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response(
|
||||||
id,
|
id,
|
||||||
peer_id,
|
peer_id,
|
||||||
beacon_block,
|
beacon_block.map(|block| BlockWrapper::Block { block }),
|
||||||
seen_timestamp,
|
seen_timestamp,
|
||||||
&mut self.network,
|
&mut self.network,
|
||||||
),
|
),
|
||||||
RequestId::BackFillSync { id } => {
|
RequestId::BackFillSync { id } => {
|
||||||
if let Some(batch_id) = self
|
if let Some((batch_id, block)) = self.network.backfill_sync_block_response(
|
||||||
.network
|
id,
|
||||||
.backfill_sync_response(id, beacon_block.is_none())
|
beacon_block,
|
||||||
{
|
ExpectedBatchTy::OnlyBlock,
|
||||||
|
) {
|
||||||
match self.backfill_sync.on_block_response(
|
match self.backfill_sync.on_block_response(
|
||||||
&mut self.network,
|
&mut self.network,
|
||||||
batch_id,
|
batch_id,
|
||||||
&peer_id,
|
&peer_id,
|
||||||
id,
|
id,
|
||||||
beacon_block,
|
block,
|
||||||
) {
|
) {
|
||||||
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
|
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
|
||||||
Ok(ProcessResult::Successful) => {}
|
Ok(ProcessResult::Successful) => {}
|
||||||
@ -697,8 +766,108 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
RequestId::RangeSync { id } => {
|
RequestId::RangeSync { id } => {
|
||||||
if let Some((chain_id, batch_id)) =
|
if let Some((chain_id, batch_id, block)) = self.network.range_sync_block_response(
|
||||||
self.network.range_sync_response(id, beacon_block.is_none())
|
id,
|
||||||
|
beacon_block,
|
||||||
|
ExpectedBatchTy::OnlyBlock,
|
||||||
|
) {
|
||||||
|
self.range_sync.blocks_by_range_response(
|
||||||
|
&mut self.network,
|
||||||
|
peer_id,
|
||||||
|
chain_id,
|
||||||
|
batch_id,
|
||||||
|
id,
|
||||||
|
block,
|
||||||
|
);
|
||||||
|
self.update_sync_state();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RequestId::BackFillSidecarPair { id } => {
|
||||||
|
if let Some((batch_id, block)) = self.network.backfill_sync_block_response(
|
||||||
|
id,
|
||||||
|
beacon_block,
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs,
|
||||||
|
) {
|
||||||
|
match self.backfill_sync.on_block_response(
|
||||||
|
&mut self.network,
|
||||||
|
batch_id,
|
||||||
|
&peer_id,
|
||||||
|
id,
|
||||||
|
block,
|
||||||
|
) {
|
||||||
|
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
|
||||||
|
Ok(ProcessResult::Successful) => {}
|
||||||
|
Err(_error) => {
|
||||||
|
// The backfill sync has failed, errors are reported
|
||||||
|
// within.
|
||||||
|
self.update_sync_state();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestId::RangeSidecarPair { id } => {
|
||||||
|
if let Some((chain_id, batch_id, block)) = self.network.range_sync_block_response(
|
||||||
|
id,
|
||||||
|
beacon_block,
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs,
|
||||||
|
) {
|
||||||
|
self.range_sync.blocks_by_range_response(
|
||||||
|
&mut self.network,
|
||||||
|
peer_id,
|
||||||
|
chain_id,
|
||||||
|
batch_id,
|
||||||
|
id,
|
||||||
|
block,
|
||||||
|
);
|
||||||
|
self.update_sync_state();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rpc_sidecar_received(
|
||||||
|
&mut self,
|
||||||
|
request_id: RequestId,
|
||||||
|
peer_id: PeerId,
|
||||||
|
maybe_sidecar: Option<Arc<BlobsSidecar<<T>::EthSpec>>>,
|
||||||
|
seen_timestamp: Duration,
|
||||||
|
) {
|
||||||
|
match request_id {
|
||||||
|
RequestId::SingleBlock { id } | RequestId::ParentLookup { id } => {
|
||||||
|
unreachable!("There is no such thing as a singular 'by root' glob request that is not accompanied by the block")
|
||||||
|
}
|
||||||
|
RequestId::BackFillSync { .. } => {
|
||||||
|
unreachable!("An only blocks request does not receive sidecars")
|
||||||
|
}
|
||||||
|
RequestId::BackFillSidecarPair { id } => {
|
||||||
|
if let Some((batch_id, block)) = self
|
||||||
|
.network
|
||||||
|
.backfill_sync_sidecar_response(id, maybe_sidecar)
|
||||||
|
{
|
||||||
|
match self.backfill_sync.on_block_response(
|
||||||
|
&mut self.network,
|
||||||
|
batch_id,
|
||||||
|
&peer_id,
|
||||||
|
id,
|
||||||
|
block,
|
||||||
|
) {
|
||||||
|
Ok(ProcessResult::SyncCompleted) => self.update_sync_state(),
|
||||||
|
Ok(ProcessResult::Successful) => {}
|
||||||
|
Err(_error) => {
|
||||||
|
// The backfill sync has failed, errors are reported
|
||||||
|
// within.
|
||||||
|
self.update_sync_state();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RequestId::RangeSync { .. } => {
|
||||||
|
unreachable!("And only blocks range request does not receive sidecars")
|
||||||
|
}
|
||||||
|
RequestId::RangeSidecarPair { id } => {
|
||||||
|
if let Some((chain_id, batch_id, block)) =
|
||||||
|
self.network.range_sync_sidecar_response(id, maybe_sidecar)
|
||||||
{
|
{
|
||||||
self.range_sync.blocks_by_range_response(
|
self.range_sync.blocks_by_range_response(
|
||||||
&mut self.network,
|
&mut self.network,
|
||||||
@ -706,13 +875,50 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
chain_id,
|
chain_id,
|
||||||
batch_id,
|
batch_id,
|
||||||
id,
|
id,
|
||||||
beacon_block,
|
block,
|
||||||
);
|
);
|
||||||
self.update_sync_state();
|
self.update_sync_state();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn rpc_block_sidecar_pair_received(
|
||||||
|
&mut self,
|
||||||
|
request_id: RequestId,
|
||||||
|
peer_id: PeerId,
|
||||||
|
block_sidecar_pair: Option<Arc<SignedBeaconBlockAndBlobsSidecar<T::EthSpec>>>,
|
||||||
|
seen_timestamp: Duration,
|
||||||
|
) {
|
||||||
|
match request_id {
|
||||||
|
RequestId::SingleBlock { id } => self.block_lookups.single_block_lookup_response(
|
||||||
|
id,
|
||||||
|
peer_id,
|
||||||
|
block_sidecar_pair.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob {
|
||||||
|
// TODO: why is this in an arc
|
||||||
|
block_sidecar_pair: (*block_sidecar_pair).clone(),
|
||||||
|
}),
|
||||||
|
seen_timestamp,
|
||||||
|
&mut self.network,
|
||||||
|
),
|
||||||
|
RequestId::ParentLookup { id } => self.block_lookups.parent_lookup_response(
|
||||||
|
id,
|
||||||
|
peer_id,
|
||||||
|
block_sidecar_pair.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob {
|
||||||
|
// TODO: why is this in an arc
|
||||||
|
block_sidecar_pair: (*block_sidecar_pair).clone(),
|
||||||
|
}),
|
||||||
|
seen_timestamp,
|
||||||
|
&mut self.network,
|
||||||
|
),
|
||||||
|
RequestId::BackFillSync { .. }
|
||||||
|
| RequestId::BackFillSidecarPair { .. }
|
||||||
|
| RequestId::RangeSync { .. }
|
||||||
|
| RequestId::RangeSidecarPair { .. } => unreachable!(
|
||||||
|
"since range requests are not block-glob coupled, this should never be reachable"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<IgnoredOkVal, T: EthSpec> From<Result<IgnoredOkVal, BlockError<T>>> for BlockProcessResult<T> {
|
impl<IgnoredOkVal, T: EthSpec> From<Result<IgnoredOkVal, BlockError<T>>> for BlockProcessResult<T> {
|
||||||
|
@ -2,20 +2,71 @@
|
|||||||
//! channel and stores a global RPC ID to perform requests.
|
//! channel and stores a global RPC ID to perform requests.
|
||||||
|
|
||||||
use super::manager::{Id, RequestId as SyncRequestId};
|
use super::manager::{Id, RequestId as SyncRequestId};
|
||||||
use super::range_sync::{BatchId, ChainId};
|
use super::range_sync::{BatchId, ChainId, ExpectedBatchTy};
|
||||||
use crate::beacon_processor::WorkEvent;
|
use crate::beacon_processor::WorkEvent;
|
||||||
use crate::service::{NetworkMessage, RequestId};
|
use crate::service::{NetworkMessage, RequestId};
|
||||||
use crate::status::ToStatusMessage;
|
use crate::status::ToStatusMessage;
|
||||||
use beacon_chain::{BeaconChainTypes, EngineState};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, EngineState};
|
||||||
use fnv::FnvHashMap;
|
use fnv::FnvHashMap;
|
||||||
|
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
|
||||||
use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason};
|
use lighthouse_network::rpc::{BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason};
|
||||||
use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request};
|
use lighthouse_network::{Client, NetworkGlobals, PeerAction, PeerId, ReportSource, Request};
|
||||||
use slog::{debug, trace, warn};
|
use slog::{debug, trace, warn};
|
||||||
|
use slot_clock::SlotClock;
|
||||||
|
use std::collections::hash_map::Entry;
|
||||||
|
use std::collections::VecDeque;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
use types::{
|
||||||
|
BlobsSidecar, ChainSpec, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
struct BlockBlobRequestInfo<T: EthSpec> {
|
||||||
|
/// Blocks we have received awaiting for their corresponding sidecar.
|
||||||
|
accumulated_blocks: VecDeque<Arc<SignedBeaconBlock<T>>>,
|
||||||
|
/// Sidecars we have received awaiting for their corresponding block.
|
||||||
|
accumulated_sidecars: VecDeque<Arc<BlobsSidecar<T>>>,
|
||||||
|
/// Whether the individual RPC request for blocks is finished or not.
|
||||||
|
is_blocks_rpc_finished: bool,
|
||||||
|
/// Whether the individual RPC request for sidecars is finished or not.
|
||||||
|
is_sidecar_rpc_finished: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> BlockBlobRequestInfo<T> {
|
||||||
|
pub fn add_block_response(&mut self, maybe_block: Option<Arc<SignedBeaconBlock<T>>>) {
|
||||||
|
match maybe_block {
|
||||||
|
Some(block) => self.accumulated_blocks.push_back(block),
|
||||||
|
None => self.is_blocks_rpc_finished = true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn add_sidecar_response(&mut self, maybe_sidecar: Option<Arc<BlobsSidecar<T>>>) {
|
||||||
|
match maybe_sidecar {
|
||||||
|
Some(sidecar) => self.accumulated_sidecars.push_back(sidecar),
|
||||||
|
None => self.is_sidecar_rpc_finished = true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pop_response(&mut self) -> Option<SignedBeaconBlockAndBlobsSidecar<T>> {
|
||||||
|
if !self.accumulated_blocks.is_empty() && !self.accumulated_blocks.is_empty() {
|
||||||
|
let beacon_block = self.accumulated_blocks.pop_front().expect("non empty");
|
||||||
|
let blobs_sidecar = self.accumulated_sidecars.pop_front().expect("non empty");
|
||||||
|
return Some(SignedBeaconBlockAndBlobsSidecar {
|
||||||
|
beacon_block,
|
||||||
|
blobs_sidecar,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_finished(&self) -> bool {
|
||||||
|
self.is_blocks_rpc_finished && self.is_sidecar_rpc_finished
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id.
|
/// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id.
|
||||||
|
|
||||||
pub struct SyncNetworkContext<T: BeaconChainTypes> {
|
pub struct SyncNetworkContext<T: BeaconChainTypes> {
|
||||||
/// The network channel to relay messages to the Network service.
|
/// The network channel to relay messages to the Network service.
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
@ -32,6 +83,13 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
|
|||||||
/// BlocksByRange requests made by backfill syncing.
|
/// BlocksByRange requests made by backfill syncing.
|
||||||
backfill_requests: FnvHashMap<Id, BatchId>,
|
backfill_requests: FnvHashMap<Id, BatchId>,
|
||||||
|
|
||||||
|
/// BlocksByRange requests paired with BlobsByRange requests made by the range.
|
||||||
|
range_sidecar_pair_requests:
|
||||||
|
FnvHashMap<Id, (ChainId, BatchId, BlockBlobRequestInfo<T::EthSpec>)>,
|
||||||
|
|
||||||
|
/// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync.
|
||||||
|
backfill_sidecar_pair_requests: FnvHashMap<Id, (BatchId, BlockBlobRequestInfo<T::EthSpec>)>,
|
||||||
|
|
||||||
/// Whether the ee is online. If it's not, we don't allow access to the
|
/// Whether the ee is online. If it's not, we don't allow access to the
|
||||||
/// `beacon_processor_send`.
|
/// `beacon_processor_send`.
|
||||||
execution_engine_state: EngineState,
|
execution_engine_state: EngineState,
|
||||||
@ -39,6 +97,8 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
|
|||||||
/// Channel to send work to the beacon processor.
|
/// Channel to send work to the beacon processor.
|
||||||
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
|
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
|
||||||
|
|
||||||
|
chain: Arc<BeaconChain<T>>,
|
||||||
|
|
||||||
/// Logger for the `SyncNetworkContext`.
|
/// Logger for the `SyncNetworkContext`.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
@ -48,16 +108,20 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
||||||
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
|
beacon_processor_send: mpsc::Sender<WorkEvent<T>>,
|
||||||
|
chain: Arc<BeaconChain<T>>,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
SyncNetworkContext {
|
||||||
network_send,
|
network_send,
|
||||||
execution_engine_state: EngineState::Online, // always assume `Online` at the start
|
|
||||||
network_globals,
|
network_globals,
|
||||||
request_id: 1,
|
request_id: 1,
|
||||||
range_requests: FnvHashMap::default(),
|
range_requests: Default::default(),
|
||||||
backfill_requests: FnvHashMap::default(),
|
backfill_requests: Default::default(),
|
||||||
|
range_sidecar_pair_requests: Default::default(),
|
||||||
|
backfill_sidecar_pair_requests: Default::default(),
|
||||||
|
execution_engine_state: EngineState::Online, // always assume `Online` at the start
|
||||||
beacon_processor_send,
|
beacon_processor_send,
|
||||||
|
chain,
|
||||||
log,
|
log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -104,10 +168,13 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
pub fn blocks_by_range_request(
|
pub fn blocks_by_range_request(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
|
batch_type: ExpectedBatchTy,
|
||||||
request: BlocksByRangeRequest,
|
request: BlocksByRangeRequest,
|
||||||
chain_id: ChainId,
|
chain_id: ChainId,
|
||||||
batch_id: BatchId,
|
batch_id: BatchId,
|
||||||
) -> Result<Id, &'static str> {
|
) -> Result<Id, &'static str> {
|
||||||
|
match batch_type {
|
||||||
|
ExpectedBatchTy::OnlyBlock => {
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
"Sending BlocksByRange Request";
|
"Sending BlocksByRange Request";
|
||||||
@ -126,14 +193,55 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.range_requests.insert(id, (chain_id, batch_id));
|
self.range_requests.insert(id, (chain_id, batch_id));
|
||||||
Ok(id)
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Sending BlockBlock by range request";
|
||||||
|
"method" => "Mixed by range request",
|
||||||
|
"count" => request.count,
|
||||||
|
"peer" => %peer_id,
|
||||||
|
);
|
||||||
|
|
||||||
|
// create the shared request id. This is fine since the rpc handles substream ids.
|
||||||
|
let id = self.next_id();
|
||||||
|
let request_id = RequestId::Sync(SyncRequestId::RangeSidecarPair { id });
|
||||||
|
|
||||||
|
// Create the blob request based on the blob request.
|
||||||
|
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||||
|
start_slot: request.start_slot,
|
||||||
|
count: request.count,
|
||||||
|
});
|
||||||
|
let blocks_request = Request::BlocksByRange(request);
|
||||||
|
|
||||||
|
// Send both requests. Make sure both can be sent.
|
||||||
|
self.send_network_msg(NetworkMessage::SendRequest {
|
||||||
|
peer_id,
|
||||||
|
request: blocks_request,
|
||||||
|
request_id,
|
||||||
|
})?;
|
||||||
|
self.send_network_msg(NetworkMessage::SendRequest {
|
||||||
|
peer_id,
|
||||||
|
request: blobs_request,
|
||||||
|
request_id,
|
||||||
|
})?;
|
||||||
|
let block_blob_info = BlockBlobRequestInfo::default();
|
||||||
|
self.range_sidecar_pair_requests
|
||||||
|
.insert(id, (chain_id, batch_id, block_blob_info));
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A blocks by range request sent by the backfill sync algorithm
|
/// A blocks by range request sent by the backfill sync algorithm
|
||||||
pub fn backfill_blocks_by_range_request(
|
pub fn backfill_blocks_by_range_request(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
|
batch_type: ExpectedBatchTy,
|
||||||
request: BlocksByRangeRequest,
|
request: BlocksByRangeRequest,
|
||||||
batch_id: BatchId,
|
batch_id: BatchId,
|
||||||
) -> Result<Id, &'static str> {
|
) -> Result<Id, &'static str> {
|
||||||
|
match batch_type {
|
||||||
|
ExpectedBatchTy::OnlyBlock => {
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
"Sending backfill BlocksByRange Request";
|
"Sending backfill BlocksByRange Request";
|
||||||
@ -152,26 +260,203 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.backfill_requests.insert(id, batch_id);
|
self.backfill_requests.insert(id, batch_id);
|
||||||
Ok(id)
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Sending BlockBlock by range request";
|
||||||
|
"method" => "Mixed by range request",
|
||||||
|
"count" => request.count,
|
||||||
|
"peer" => %peer_id,
|
||||||
|
);
|
||||||
|
|
||||||
|
// create the shared request id. This is fine since the rpc handles substream ids.
|
||||||
|
let id = self.next_id();
|
||||||
|
let request_id = RequestId::Sync(SyncRequestId::RangeSidecarPair { id });
|
||||||
|
|
||||||
|
// Create the blob request based on the blob request.
|
||||||
|
let blobs_request = Request::BlobsByRange(BlobsByRangeRequest {
|
||||||
|
start_slot: request.start_slot,
|
||||||
|
count: request.count,
|
||||||
|
});
|
||||||
|
let blocks_request = Request::BlocksByRange(request);
|
||||||
|
|
||||||
|
// Send both requests. Make sure both can be sent.
|
||||||
|
self.send_network_msg(NetworkMessage::SendRequest {
|
||||||
|
peer_id,
|
||||||
|
request: blocks_request,
|
||||||
|
request_id,
|
||||||
|
})?;
|
||||||
|
self.send_network_msg(NetworkMessage::SendRequest {
|
||||||
|
peer_id,
|
||||||
|
request: blobs_request,
|
||||||
|
request_id,
|
||||||
|
})?;
|
||||||
|
let block_blob_info = BlockBlobRequestInfo::default();
|
||||||
|
self.backfill_sidecar_pair_requests
|
||||||
|
.insert(id, (batch_id, block_blob_info));
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Received a blocks by range response.
|
/// Received a blocks by range response.
|
||||||
pub fn range_sync_response(
|
pub fn range_sync_block_response(
|
||||||
&mut self,
|
&mut self,
|
||||||
request_id: Id,
|
request_id: Id,
|
||||||
remove: bool,
|
maybe_block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
|
batch_type: ExpectedBatchTy,
|
||||||
|
) -> Option<(ChainId, BatchId, Option<BlockWrapper<T::EthSpec>>)> {
|
||||||
|
match batch_type {
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs => {
|
||||||
|
match self.range_sidecar_pair_requests.entry(request_id) {
|
||||||
|
Entry::Occupied(mut entry) => {
|
||||||
|
let (chain_id, batch_id, info) = entry.get_mut();
|
||||||
|
let chain_id = chain_id.clone();
|
||||||
|
let batch_id = batch_id.clone();
|
||||||
|
info.add_block_response(maybe_block);
|
||||||
|
let maybe_block = info.pop_response().map(|block_sidecar_pair| {
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair }
|
||||||
|
});
|
||||||
|
if info.is_finished() {
|
||||||
|
entry.remove();
|
||||||
|
}
|
||||||
|
Some((chain_id, batch_id, maybe_block))
|
||||||
|
}
|
||||||
|
Entry::Vacant(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ExpectedBatchTy::OnlyBlock => {
|
||||||
|
// if the request is just for blocks then it can be removed on a stream termination
|
||||||
|
match maybe_block {
|
||||||
|
Some(block) => {
|
||||||
|
self.range_requests
|
||||||
|
.get(&request_id)
|
||||||
|
.cloned()
|
||||||
|
.map(|(chain_id, batch_id)| {
|
||||||
|
(chain_id, batch_id, Some(BlockWrapper::Block { block }))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
None => self
|
||||||
|
.range_requests
|
||||||
|
.remove(&request_id)
|
||||||
|
.map(|(chain_id, batch_id)| (chain_id, batch_id, None)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn range_sync_sidecar_response(
|
||||||
|
&mut self,
|
||||||
|
request_id: Id,
|
||||||
|
maybe_sidecar: Option<Arc<BlobsSidecar<T::EthSpec>>>,
|
||||||
|
) -> Option<(ChainId, BatchId, Option<BlockWrapper<T::EthSpec>>)> {
|
||||||
|
match self.range_sidecar_pair_requests.entry(request_id) {
|
||||||
|
Entry::Occupied(mut entry) => {
|
||||||
|
let (chain_id, batch_id, info) = entry.get_mut();
|
||||||
|
let chain_id = chain_id.clone();
|
||||||
|
let batch_id = batch_id.clone();
|
||||||
|
info.add_sidecar_response(maybe_sidecar);
|
||||||
|
let maybe_block = info
|
||||||
|
.pop_response()
|
||||||
|
.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair });
|
||||||
|
if info.is_finished() {
|
||||||
|
entry.remove();
|
||||||
|
}
|
||||||
|
Some((chain_id, batch_id, maybe_block))
|
||||||
|
}
|
||||||
|
Entry::Vacant(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn range_sync_request_failed(
|
||||||
|
&mut self,
|
||||||
|
request_id: Id,
|
||||||
|
batch_type: ExpectedBatchTy,
|
||||||
) -> Option<(ChainId, BatchId)> {
|
) -> Option<(ChainId, BatchId)> {
|
||||||
if remove {
|
match batch_type {
|
||||||
self.range_requests.remove(&request_id)
|
ExpectedBatchTy::OnlyBlockBlobs => self
|
||||||
} else {
|
.range_sidecar_pair_requests
|
||||||
self.range_requests.get(&request_id).cloned()
|
.remove(&request_id)
|
||||||
|
.map(|(chain_id, batch_id, _info)| (chain_id, batch_id)),
|
||||||
|
ExpectedBatchTy::OnlyBlock => self.range_requests.remove(&request_id),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backfill_request_failed(
|
||||||
|
&mut self,
|
||||||
|
request_id: Id,
|
||||||
|
batch_type: ExpectedBatchTy,
|
||||||
|
) -> Option<BatchId> {
|
||||||
|
match batch_type {
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs => self
|
||||||
|
.backfill_sidecar_pair_requests
|
||||||
|
.remove(&request_id)
|
||||||
|
.map(|(batch_id, _info)| batch_id),
|
||||||
|
ExpectedBatchTy::OnlyBlock => self.backfill_requests.remove(&request_id),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Received a blocks by range response.
|
/// Received a blocks by range response.
|
||||||
pub fn backfill_sync_response(&mut self, request_id: Id, remove: bool) -> Option<BatchId> {
|
pub fn backfill_sync_block_response(
|
||||||
if remove {
|
&mut self,
|
||||||
self.backfill_requests.remove(&request_id)
|
request_id: Id,
|
||||||
} else {
|
maybe_block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
self.backfill_requests.get(&request_id).cloned()
|
batch_type: ExpectedBatchTy,
|
||||||
|
) -> Option<(BatchId, Option<BlockWrapper<T::EthSpec>>)> {
|
||||||
|
match batch_type {
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs => {
|
||||||
|
match self.backfill_sidecar_pair_requests.entry(request_id) {
|
||||||
|
Entry::Occupied(mut entry) => {
|
||||||
|
let (batch_id, info) = entry.get_mut();
|
||||||
|
let batch_id = batch_id.clone();
|
||||||
|
info.add_block_response(maybe_block);
|
||||||
|
let maybe_block = info.pop_response().map(|block_sidecar_pair| {
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair }
|
||||||
|
});
|
||||||
|
if info.is_finished() {
|
||||||
|
entry.remove();
|
||||||
|
}
|
||||||
|
Some((batch_id, maybe_block))
|
||||||
|
}
|
||||||
|
Entry::Vacant(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ExpectedBatchTy::OnlyBlock => {
|
||||||
|
// if the request is just for blocks then it can be removed on a stream termination
|
||||||
|
match maybe_block {
|
||||||
|
Some(block) => self
|
||||||
|
.backfill_requests
|
||||||
|
.get(&request_id)
|
||||||
|
.cloned()
|
||||||
|
.map(|batch_id| (batch_id, Some(BlockWrapper::Block { block }))),
|
||||||
|
None => self
|
||||||
|
.backfill_requests
|
||||||
|
.remove(&request_id)
|
||||||
|
.map(|batch_id| (batch_id, None)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backfill_sync_sidecar_response(
|
||||||
|
&mut self,
|
||||||
|
request_id: Id,
|
||||||
|
maybe_sidecar: Option<Arc<BlobsSidecar<T::EthSpec>>>,
|
||||||
|
) -> Option<(BatchId, Option<BlockWrapper<T::EthSpec>>)> {
|
||||||
|
match self.backfill_sidecar_pair_requests.entry(request_id) {
|
||||||
|
Entry::Occupied(mut entry) => {
|
||||||
|
let (batch_id, info) = entry.get_mut();
|
||||||
|
let batch_id = batch_id.clone();
|
||||||
|
info.add_sidecar_response(maybe_sidecar);
|
||||||
|
let maybe_block = info
|
||||||
|
.pop_response()
|
||||||
|
.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair });
|
||||||
|
if info.is_finished() {
|
||||||
|
entry.remove();
|
||||||
|
}
|
||||||
|
Some((batch_id, maybe_block))
|
||||||
|
}
|
||||||
|
Entry::Vacant(_) => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,6 +466,20 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request: BlocksByRootRequest,
|
request: BlocksByRootRequest,
|
||||||
) -> Result<Id, &'static str> {
|
) -> Result<Id, &'static str> {
|
||||||
|
let request = if self
|
||||||
|
.chain
|
||||||
|
.is_data_availability_check_required()
|
||||||
|
.map_err(|_| "Unable to read slot clock")?
|
||||||
|
{
|
||||||
|
trace!(
|
||||||
|
self.log,
|
||||||
|
"Sending BlobsByRoot Request";
|
||||||
|
"method" => "BlobsByRoot",
|
||||||
|
"count" => request.block_roots.len(),
|
||||||
|
"peer" => %peer_id
|
||||||
|
);
|
||||||
|
Request::BlobsByRoot(request.into())
|
||||||
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
"Sending BlocksByRoot Request";
|
"Sending BlocksByRoot Request";
|
||||||
@ -188,7 +487,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
"count" => request.block_roots.len(),
|
"count" => request.block_roots.len(),
|
||||||
"peer" => %peer_id
|
"peer" => %peer_id
|
||||||
);
|
);
|
||||||
let request = Request::BlocksByRoot(request);
|
Request::BlocksByRoot(request)
|
||||||
|
};
|
||||||
let id = self.next_id();
|
let id = self.next_id();
|
||||||
let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
|
let request_id = RequestId::Sync(SyncRequestId::SingleBlock { id });
|
||||||
self.send_network_msg(NetworkMessage::SendRequest {
|
self.send_network_msg(NetworkMessage::SendRequest {
|
||||||
@ -205,6 +505,20 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
request: BlocksByRootRequest,
|
request: BlocksByRootRequest,
|
||||||
) -> Result<Id, &'static str> {
|
) -> Result<Id, &'static str> {
|
||||||
|
let request = if self
|
||||||
|
.chain
|
||||||
|
.is_data_availability_check_required()
|
||||||
|
.map_err(|_| "Unable to read slot clock")?
|
||||||
|
{
|
||||||
|
trace!(
|
||||||
|
self.log,
|
||||||
|
"Sending BlobsByRoot Request";
|
||||||
|
"method" => "BlobsByRoot",
|
||||||
|
"count" => request.block_roots.len(),
|
||||||
|
"peer" => %peer_id
|
||||||
|
);
|
||||||
|
Request::BlobsByRoot(request.into())
|
||||||
|
} else {
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
"Sending BlocksByRoot Request";
|
"Sending BlocksByRoot Request";
|
||||||
@ -212,7 +526,8 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
"count" => request.block_roots.len(),
|
"count" => request.block_roots.len(),
|
||||||
"peer" => %peer_id
|
"peer" => %peer_id
|
||||||
);
|
);
|
||||||
let request = Request::BlocksByRoot(request);
|
Request::BlocksByRoot(request)
|
||||||
|
};
|
||||||
let id = self.next_id();
|
let id = self.next_id();
|
||||||
let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
|
let request_id = RequestId::Sync(SyncRequestId::ParentLookup { id });
|
||||||
self.send_network_msg(NetworkMessage::SendRequest {
|
self.send_network_msg(NetworkMessage::SendRequest {
|
||||||
@ -292,4 +607,35 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
self.request_id += 1;
|
self.request_id += 1;
|
||||||
id
|
id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn batch_type(&self, epoch: types::Epoch) -> ExpectedBatchTy {
|
||||||
|
// Keep tests only for blocks.
|
||||||
|
#[cfg(test)]
|
||||||
|
{
|
||||||
|
return ExpectedBatchTy::OnlyBlock;
|
||||||
|
}
|
||||||
|
#[cfg(not(test))]
|
||||||
|
{
|
||||||
|
use super::range_sync::EPOCHS_PER_BATCH;
|
||||||
|
assert_eq!(
|
||||||
|
EPOCHS_PER_BATCH, 1,
|
||||||
|
"If this is not one, everything will fail horribly"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Here we need access to the beacon chain, check the fork boundary, the current epoch, the
|
||||||
|
// blob period to serve and check with that if the batch is a blob batch or not.
|
||||||
|
// NOTE: This would carelessly assume batch sizes are always 1 epoch, to avoid needing to
|
||||||
|
// align with the batch boundary.
|
||||||
|
|
||||||
|
if let Some(data_availability_boundary) = self.chain.data_availability_boundary() {
|
||||||
|
if epoch >= data_availability_boundary {
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs
|
||||||
|
} else {
|
||||||
|
ExpectedBatchTy::OnlyBlock
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ExpectedBatchTy::OnlyBlock
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,8 @@ use std::collections::HashSet;
|
|||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use std::ops::Sub;
|
use std::ops::Sub;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{Epoch, EthSpec, SignedBeaconBlock, Slot};
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
use types::{Epoch, EthSpec, SignedBeaconBlock, SignedBeaconBlockAndBlobsSidecar, Slot};
|
||||||
|
|
||||||
/// The number of times to retry a batch before it is considered failed.
|
/// The number of times to retry a batch before it is considered failed.
|
||||||
const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5;
|
const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5;
|
||||||
@ -14,6 +15,37 @@ const MAX_BATCH_DOWNLOAD_ATTEMPTS: u8 = 5;
|
|||||||
/// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty.
|
/// after `MAX_BATCH_PROCESSING_ATTEMPTS` times, it is considered faulty.
|
||||||
const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3;
|
const MAX_BATCH_PROCESSING_ATTEMPTS: u8 = 3;
|
||||||
|
|
||||||
|
pub enum BatchTy<T: EthSpec> {
|
||||||
|
Blocks(Vec<Arc<SignedBeaconBlock<T>>>),
|
||||||
|
BlocksAndBlobs(Vec<SignedBeaconBlockAndBlobsSidecar<T>>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> BatchTy<T> {
|
||||||
|
pub fn into_wrapped_blocks(self) -> Vec<BlockWrapper<T>> {
|
||||||
|
match self {
|
||||||
|
BatchTy::Blocks(blocks) => blocks
|
||||||
|
.into_iter()
|
||||||
|
.map(|block| BlockWrapper::Block { block })
|
||||||
|
.collect(),
|
||||||
|
BatchTy::BlocksAndBlobs(block_sidecar_pair) => block_sidecar_pair
|
||||||
|
.into_iter()
|
||||||
|
.map(|block_sidecar_pair| BlockWrapper::BlockAndBlob { block_sidecar_pair })
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error representing a batch with mixed block types.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MixedBlockTyErr;
|
||||||
|
|
||||||
|
/// Type of expected batch.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum ExpectedBatchTy {
|
||||||
|
OnlyBlockBlobs,
|
||||||
|
OnlyBlock,
|
||||||
|
}
|
||||||
|
|
||||||
/// Allows customisation of the above constants used in other sync methods such as BackFillSync.
|
/// Allows customisation of the above constants used in other sync methods such as BackFillSync.
|
||||||
pub trait BatchConfig {
|
pub trait BatchConfig {
|
||||||
/// The maximum batch download attempts.
|
/// The maximum batch download attempts.
|
||||||
@ -47,7 +79,7 @@ pub trait BatchConfig {
|
|||||||
/// Note that simpler hashing functions considered in the past (hash of first block, hash of last
|
/// Note that simpler hashing functions considered in the past (hash of first block, hash of last
|
||||||
/// block, number of received blocks) are not good enough to differentiate attempts. For this
|
/// block, number of received blocks) are not good enough to differentiate attempts. For this
|
||||||
/// reason, we hash the complete set of blocks both in RangeSync and BackFillSync.
|
/// reason, we hash the complete set of blocks both in RangeSync and BackFillSync.
|
||||||
fn batch_attempt_hash<T: EthSpec>(blocks: &[Arc<SignedBeaconBlock<T>>]) -> u64;
|
fn batch_attempt_hash<T: EthSpec>(blocks: &[BlockWrapper<T>]) -> u64;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RangeSyncBatchConfig {}
|
pub struct RangeSyncBatchConfig {}
|
||||||
@ -59,7 +91,7 @@ impl BatchConfig for RangeSyncBatchConfig {
|
|||||||
fn max_batch_processing_attempts() -> u8 {
|
fn max_batch_processing_attempts() -> u8 {
|
||||||
MAX_BATCH_PROCESSING_ATTEMPTS
|
MAX_BATCH_PROCESSING_ATTEMPTS
|
||||||
}
|
}
|
||||||
fn batch_attempt_hash<T: EthSpec>(blocks: &[Arc<SignedBeaconBlock<T>>]) -> u64 {
|
fn batch_attempt_hash<T: EthSpec>(blocks: &[BlockWrapper<T>]) -> u64 {
|
||||||
let mut hasher = std::collections::hash_map::DefaultHasher::new();
|
let mut hasher = std::collections::hash_map::DefaultHasher::new();
|
||||||
blocks.hash(&mut hasher);
|
blocks.hash(&mut hasher);
|
||||||
hasher.finish()
|
hasher.finish()
|
||||||
@ -96,6 +128,8 @@ pub struct BatchInfo<T: EthSpec, B: BatchConfig = RangeSyncBatchConfig> {
|
|||||||
failed_download_attempts: Vec<PeerId>,
|
failed_download_attempts: Vec<PeerId>,
|
||||||
/// State of the batch.
|
/// State of the batch.
|
||||||
state: BatchState<T>,
|
state: BatchState<T>,
|
||||||
|
/// Whether this batch contains all blocks or all blocks and blobs.
|
||||||
|
batch_type: ExpectedBatchTy,
|
||||||
/// Pin the generic
|
/// Pin the generic
|
||||||
marker: std::marker::PhantomData<B>,
|
marker: std::marker::PhantomData<B>,
|
||||||
}
|
}
|
||||||
@ -105,9 +139,9 @@ pub enum BatchState<T: EthSpec> {
|
|||||||
/// The batch has failed either downloading or processing, but can be requested again.
|
/// The batch has failed either downloading or processing, but can be requested again.
|
||||||
AwaitingDownload,
|
AwaitingDownload,
|
||||||
/// The batch is being downloaded.
|
/// The batch is being downloaded.
|
||||||
Downloading(PeerId, Vec<Arc<SignedBeaconBlock<T>>>, Id),
|
Downloading(PeerId, Vec<BlockWrapper<T>>, Id),
|
||||||
/// The batch has been completely downloaded and is ready for processing.
|
/// The batch has been completely downloaded and is ready for processing.
|
||||||
AwaitingProcessing(PeerId, Vec<Arc<SignedBeaconBlock<T>>>),
|
AwaitingProcessing(PeerId, Vec<BlockWrapper<T>>),
|
||||||
/// The batch is being processed.
|
/// The batch is being processed.
|
||||||
Processing(Attempt),
|
Processing(Attempt),
|
||||||
/// The batch was successfully processed and is waiting to be validated.
|
/// The batch was successfully processed and is waiting to be validated.
|
||||||
@ -139,8 +173,13 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> {
|
|||||||
/// Epoch boundary | |
|
/// Epoch boundary | |
|
||||||
/// ... | 30 | 31 | 32 | 33 | 34 | ... | 61 | 62 | 63 | 64 | 65 |
|
/// ... | 30 | 31 | 32 | 33 | 34 | ... | 61 | 62 | 63 | 64 | 65 |
|
||||||
/// Batch 1 | Batch 2 | Batch 3
|
/// Batch 1 | Batch 2 | Batch 3
|
||||||
pub fn new(start_epoch: &Epoch, num_of_epochs: u64) -> Self {
|
///
|
||||||
let start_slot = start_epoch.start_slot(T::slots_per_epoch()) + 1;
|
/// NOTE: Removed the shift by one for eip4844 because otherwise the last batch before the blob
|
||||||
|
/// fork boundary will be of mixed type (all blocks and one last blockblob), and I don't want to
|
||||||
|
/// deal with this for now.
|
||||||
|
/// This means finalization might be slower in eip4844
|
||||||
|
pub fn new(start_epoch: &Epoch, num_of_epochs: u64, batch_type: ExpectedBatchTy) -> Self {
|
||||||
|
let start_slot = start_epoch.start_slot(T::slots_per_epoch());
|
||||||
let end_slot = start_slot + num_of_epochs * T::slots_per_epoch();
|
let end_slot = start_slot + num_of_epochs * T::slots_per_epoch();
|
||||||
BatchInfo {
|
BatchInfo {
|
||||||
start_slot,
|
start_slot,
|
||||||
@ -149,6 +188,7 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> {
|
|||||||
failed_download_attempts: Vec::new(),
|
failed_download_attempts: Vec::new(),
|
||||||
non_faulty_processing_attempts: 0,
|
non_faulty_processing_attempts: 0,
|
||||||
state: BatchState::AwaitingDownload,
|
state: BatchState::AwaitingDownload,
|
||||||
|
batch_type,
|
||||||
marker: std::marker::PhantomData,
|
marker: std::marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -201,11 +241,14 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a BlocksByRange request associated with the batch.
|
/// Returns a BlocksByRange request associated with the batch.
|
||||||
pub fn to_blocks_by_range_request(&self) -> BlocksByRangeRequest {
|
pub fn to_blocks_by_range_request(&self) -> (BlocksByRangeRequest, ExpectedBatchTy) {
|
||||||
|
(
|
||||||
BlocksByRangeRequest {
|
BlocksByRangeRequest {
|
||||||
start_slot: self.start_slot.into(),
|
start_slot: self.start_slot.into(),
|
||||||
count: self.end_slot.sub(self.start_slot).into(),
|
count: self.end_slot.sub(self.start_slot).into(),
|
||||||
}
|
},
|
||||||
|
self.batch_type.clone(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// After different operations over a batch, this could be in a state that allows it to
|
/// After different operations over a batch, this could be in a state that allows it to
|
||||||
@ -231,7 +274,7 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a block to a downloading batch.
|
/// Adds a block to a downloading batch.
|
||||||
pub fn add_block(&mut self, block: Arc<SignedBeaconBlock<T>>) -> Result<(), WrongState> {
|
pub fn add_block(&mut self, block: BlockWrapper<T>) -> Result<(), WrongState> {
|
||||||
match self.state.poison() {
|
match self.state.poison() {
|
||||||
BatchState::Downloading(peer, mut blocks, req_id) => {
|
BatchState::Downloading(peer, mut blocks, req_id) => {
|
||||||
blocks.push(block);
|
blocks.push(block);
|
||||||
@ -363,11 +406,30 @@ impl<T: EthSpec, B: BatchConfig> BatchInfo<T, B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn start_processing(&mut self) -> Result<Vec<Arc<SignedBeaconBlock<T>>>, WrongState> {
|
pub fn start_processing(&mut self) -> Result<BatchTy<T>, WrongState> {
|
||||||
match self.state.poison() {
|
match self.state.poison() {
|
||||||
BatchState::AwaitingProcessing(peer, blocks) => {
|
BatchState::AwaitingProcessing(peer, blocks) => {
|
||||||
self.state = BatchState::Processing(Attempt::new::<B, T>(peer, &blocks));
|
self.state = BatchState::Processing(Attempt::new::<B, T>(peer, &blocks));
|
||||||
Ok(blocks)
|
match self.batch_type {
|
||||||
|
ExpectedBatchTy::OnlyBlockBlobs => {
|
||||||
|
let blocks = blocks.into_iter().map(|block| {
|
||||||
|
let BlockWrapper::BlockAndBlob { block_sidecar_pair: block_and_blob } = block else {
|
||||||
|
panic!("Batches should never have a mixed type. This is a bug. Contact D")
|
||||||
|
};
|
||||||
|
block_and_blob
|
||||||
|
}).collect();
|
||||||
|
Ok(BatchTy::BlocksAndBlobs(blocks))
|
||||||
|
}
|
||||||
|
ExpectedBatchTy::OnlyBlock => {
|
||||||
|
let blocks = blocks.into_iter().map(|block| {
|
||||||
|
let BlockWrapper::Block { block } = block else {
|
||||||
|
panic!("Batches should never have a mixed type. This is a bug. Contact D")
|
||||||
|
};
|
||||||
|
block
|
||||||
|
}).collect();
|
||||||
|
Ok(BatchTy::Blocks(blocks))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
BatchState::Poisoned => unreachable!("Poisoned batch"),
|
BatchState::Poisoned => unreachable!("Poisoned batch"),
|
||||||
other => {
|
other => {
|
||||||
@ -461,10 +523,7 @@ pub struct Attempt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Attempt {
|
impl Attempt {
|
||||||
fn new<B: BatchConfig, T: EthSpec>(
|
fn new<B: BatchConfig, T: EthSpec>(peer_id: PeerId, blocks: &[BlockWrapper<T>]) -> Self {
|
||||||
peer_id: PeerId,
|
|
||||||
blocks: &[Arc<SignedBeaconBlock<T>>],
|
|
||||||
) -> Self {
|
|
||||||
let hash = B::batch_attempt_hash(blocks);
|
let hash = B::batch_attempt_hash(blocks);
|
||||||
Attempt { peer_id, hash }
|
Attempt { peer_id, hash }
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use super::batch::{BatchInfo, BatchProcessingResult, BatchState};
|
use super::batch::{BatchInfo, BatchProcessingResult, BatchState};
|
||||||
|
use super::BatchTy;
|
||||||
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
|
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent as BeaconWorkEvent};
|
||||||
use crate::sync::{
|
use crate::sync::{
|
||||||
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
manager::Id, network_context::SyncNetworkContext, BatchOperationOutcome, BatchProcessResult,
|
||||||
@ -10,8 +11,8 @@ use rand::seq::SliceRandom;
|
|||||||
use slog::{crit, debug, o, warn};
|
use slog::{crit, debug, o, warn};
|
||||||
use std::collections::{btree_map::Entry, BTreeMap, HashSet};
|
use std::collections::{btree_map::Entry, BTreeMap, HashSet};
|
||||||
use std::hash::{Hash, Hasher};
|
use std::hash::{Hash, Hasher};
|
||||||
use std::sync::Arc;
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
use types::{Epoch, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
|
/// Blocks are downloaded in batches from peers. This constant specifies how many epochs worth of
|
||||||
/// blocks per batch are requested _at most_. A batch may request less blocks to account for
|
/// blocks per batch are requested _at most_. A batch may request less blocks to account for
|
||||||
@ -19,7 +20,7 @@ use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
|||||||
/// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which
|
/// we will negatively report peers with poor bandwidth. This can be set arbitrarily high, in which
|
||||||
/// case the responder will fill the response up to the max request size, assuming they have the
|
/// case the responder will fill the response up to the max request size, assuming they have the
|
||||||
/// bandwidth to do so.
|
/// bandwidth to do so.
|
||||||
pub const EPOCHS_PER_BATCH: u64 = 2;
|
pub const EPOCHS_PER_BATCH: u64 = 1;
|
||||||
|
|
||||||
/// The maximum number of batches to queue before requesting more.
|
/// The maximum number of batches to queue before requesting more.
|
||||||
const BATCH_BUFFER_SIZE: u8 = 5;
|
const BATCH_BUFFER_SIZE: u8 = 5;
|
||||||
@ -225,7 +226,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
batch_id: BatchId,
|
batch_id: BatchId,
|
||||||
peer_id: &PeerId,
|
peer_id: &PeerId,
|
||||||
request_id: Id,
|
request_id: Id,
|
||||||
beacon_block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
beacon_block: Option<BlockWrapper<T::EthSpec>>,
|
||||||
) -> ProcessingResult {
|
) -> ProcessingResult {
|
||||||
// check if we have this batch
|
// check if we have this batch
|
||||||
let batch = match self.batches.get_mut(&batch_id) {
|
let batch = match self.batches.get_mut(&batch_id) {
|
||||||
@ -326,9 +327,9 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized);
|
let process_id = ChainSegmentProcessId::RangeBatchId(self.id, batch_id, count_unrealized);
|
||||||
self.current_processing_batch = Some(batch_id);
|
self.current_processing_batch = Some(batch_id);
|
||||||
|
|
||||||
if let Err(e) =
|
let work_event = BeaconWorkEvent::chain_segment(process_id, blocks.into_wrapped_blocks());
|
||||||
beacon_processor_send.try_send(BeaconWorkEvent::chain_segment(process_id, blocks))
|
|
||||||
{
|
if let Err(e) = beacon_processor_send.try_send(work_event) {
|
||||||
crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch",
|
crit!(self.log, "Failed to send chain segment to processor."; "msg" => "process_batch",
|
||||||
"error" => %e, "batch" => self.processing_target);
|
"error" => %e, "batch" => self.processing_target);
|
||||||
// This is unlikely to happen but it would stall syncing since the batch now has no
|
// This is unlikely to happen but it would stall syncing since the batch now has no
|
||||||
@ -897,8 +898,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
) -> ProcessingResult {
|
) -> ProcessingResult {
|
||||||
if let Some(batch) = self.batches.get_mut(&batch_id) {
|
if let Some(batch) = self.batches.get_mut(&batch_id) {
|
||||||
let request = batch.to_blocks_by_range_request();
|
let (request, batch_type) = batch.to_blocks_by_range_request();
|
||||||
match network.blocks_by_range_request(peer, request, self.id, batch_id) {
|
match network.blocks_by_range_request(peer, batch_type, request, self.id, batch_id) {
|
||||||
Ok(request_id) => {
|
Ok(request_id) => {
|
||||||
// inform the batch about the new request
|
// inform the batch about the new request
|
||||||
batch.start_downloading_from_peer(peer, request_id)?;
|
batch.start_downloading_from_peer(peer, request_id)?;
|
||||||
@ -1002,7 +1003,8 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
if let Some(epoch) = self.optimistic_start {
|
if let Some(epoch) = self.optimistic_start {
|
||||||
if let Entry::Vacant(entry) = self.batches.entry(epoch) {
|
if let Entry::Vacant(entry) = self.batches.entry(epoch) {
|
||||||
if let Some(peer) = idle_peers.pop() {
|
if let Some(peer) = idle_peers.pop() {
|
||||||
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH);
|
let batch_type = network.batch_type(epoch);
|
||||||
|
let optimistic_batch = BatchInfo::new(&epoch, EPOCHS_PER_BATCH, batch_type);
|
||||||
entry.insert(optimistic_batch);
|
entry.insert(optimistic_batch);
|
||||||
self.send_batch(network, epoch, peer)?;
|
self.send_batch(network, epoch, peer)?;
|
||||||
}
|
}
|
||||||
@ -1011,7 +1013,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
while let Some(peer) = idle_peers.pop() {
|
while let Some(peer) = idle_peers.pop() {
|
||||||
if let Some(batch_id) = self.include_next_batch() {
|
if let Some(batch_id) = self.include_next_batch(network) {
|
||||||
// send the batch
|
// send the batch
|
||||||
self.send_batch(network, batch_id, peer)?;
|
self.send_batch(network, batch_id, peer)?;
|
||||||
} else {
|
} else {
|
||||||
@ -1025,7 +1027,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
|
|
||||||
/// Creates the next required batch from the chain. If there are no more batches required,
|
/// Creates the next required batch from the chain. If there are no more batches required,
|
||||||
/// `false` is returned.
|
/// `false` is returned.
|
||||||
fn include_next_batch(&mut self) -> Option<BatchId> {
|
fn include_next_batch(&mut self, network: &mut SyncNetworkContext<T>) -> Option<BatchId> {
|
||||||
// don't request batches beyond the target head slot
|
// don't request batches beyond the target head slot
|
||||||
if self
|
if self
|
||||||
.to_be_downloaded
|
.to_be_downloaded
|
||||||
@ -1059,10 +1061,11 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
Entry::Occupied(_) => {
|
Entry::Occupied(_) => {
|
||||||
// this batch doesn't need downloading, let this same function decide the next batch
|
// this batch doesn't need downloading, let this same function decide the next batch
|
||||||
self.to_be_downloaded += EPOCHS_PER_BATCH;
|
self.to_be_downloaded += EPOCHS_PER_BATCH;
|
||||||
self.include_next_batch()
|
self.include_next_batch(network)
|
||||||
}
|
}
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH));
|
let batch_type = network.batch_type(batch_id);
|
||||||
|
entry.insert(BatchInfo::new(&batch_id, EPOCHS_PER_BATCH, batch_type));
|
||||||
self.to_be_downloaded += EPOCHS_PER_BATCH;
|
self.to_be_downloaded += EPOCHS_PER_BATCH;
|
||||||
Some(batch_id)
|
Some(batch_id)
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,10 @@ mod chain_collection;
|
|||||||
mod range;
|
mod range;
|
||||||
mod sync_type;
|
mod sync_type;
|
||||||
|
|
||||||
pub use batch::{BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState};
|
pub use batch::{
|
||||||
|
BatchConfig, BatchInfo, BatchOperationOutcome, BatchProcessingResult, BatchState, BatchTy,
|
||||||
|
ExpectedBatchTy,
|
||||||
|
};
|
||||||
pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH};
|
pub use chain::{BatchId, ChainId, EPOCHS_PER_BATCH};
|
||||||
pub use range::RangeSync;
|
pub use range::RangeSync;
|
||||||
pub use sync_type::RangeSyncType;
|
pub use sync_type::RangeSyncType;
|
||||||
|
@ -55,7 +55,8 @@ use lru_cache::LRUTimeCache;
|
|||||||
use slog::{crit, debug, trace, warn};
|
use slog::{crit, debug, trace, warn};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
use types::signed_block_and_blobs::BlockWrapper;
|
||||||
|
use types::{Epoch, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
/// For how long we store failed finalized chains to prevent retries.
|
/// For how long we store failed finalized chains to prevent retries.
|
||||||
const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30;
|
const FAILED_CHAINS_EXPIRY_SECONDS: u64 = 30;
|
||||||
@ -202,7 +203,7 @@ where
|
|||||||
chain_id: ChainId,
|
chain_id: ChainId,
|
||||||
batch_id: BatchId,
|
batch_id: BatchId,
|
||||||
request_id: Id,
|
request_id: Id,
|
||||||
beacon_block: Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
beacon_block: Option<BlockWrapper<T::EthSpec>>,
|
||||||
) {
|
) {
|
||||||
// check if this chunk removes the chain
|
// check if this chunk removes the chain
|
||||||
match self.chains.call_by_id(chain_id, |chain| {
|
match self.chains.call_by_id(chain_id, |chain| {
|
||||||
@ -372,6 +373,7 @@ where
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use crate::service::RequestId;
|
use crate::service::RequestId;
|
||||||
|
use crate::sync::range_sync::ExpectedBatchTy;
|
||||||
use crate::NetworkMessage;
|
use crate::NetworkMessage;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@ -386,11 +388,12 @@ mod tests {
|
|||||||
use slog::{o, Drain};
|
use slog::{o, Drain};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use slot_clock::SystemTimeSlotClock;
|
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
use store::MemoryStore;
|
use store::MemoryStore;
|
||||||
use types::{Hash256, MinimalEthSpec as E};
|
use types::{Hash256, MainnetEthSpec, MinimalEthSpec as E};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct FakeStorage {
|
struct FakeStorage {
|
||||||
@ -604,6 +607,7 @@ mod tests {
|
|||||||
network_tx,
|
network_tx,
|
||||||
globals.clone(),
|
globals.clone(),
|
||||||
beacon_processor_tx,
|
beacon_processor_tx,
|
||||||
|
chain,
|
||||||
log.new(o!("component" => "network_context")),
|
log.new(o!("component" => "network_context")),
|
||||||
);
|
);
|
||||||
let test_rig = TestRig {
|
let test_rig = TestRig {
|
||||||
@ -682,10 +686,13 @@ mod tests {
|
|||||||
// add some peers
|
// add some peers
|
||||||
let (peer1, local_info, head_info) = rig.head_peer();
|
let (peer1, local_info, head_info) = rig.head_peer();
|
||||||
range.add_peer(&mut rig.cx, local_info, peer1, head_info);
|
range.add_peer(&mut rig.cx, local_info, peer1, head_info);
|
||||||
let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 {
|
let ((chain1, batch1, _), id1) = match rig.grab_request(&peer1).0 {
|
||||||
RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => {
|
RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => (
|
||||||
(rig.cx.range_sync_response(id, true).unwrap(), id)
|
rig.cx
|
||||||
}
|
.range_sync_block_response(id, None, ExpectedBatchTy::OnlyBlock)
|
||||||
|
.unwrap(),
|
||||||
|
id,
|
||||||
|
),
|
||||||
other => panic!("unexpected request {:?}", other),
|
other => panic!("unexpected request {:?}", other),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -701,10 +708,13 @@ mod tests {
|
|||||||
// while the ee is offline, more peers might arrive. Add a new finalized peer.
|
// while the ee is offline, more peers might arrive. Add a new finalized peer.
|
||||||
let (peer2, local_info, finalized_info) = rig.finalized_peer();
|
let (peer2, local_info, finalized_info) = rig.finalized_peer();
|
||||||
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
|
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
|
||||||
let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 {
|
let ((chain2, batch2, _), id2) = match rig.grab_request(&peer2).0 {
|
||||||
RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => {
|
RequestId::Sync(crate::sync::manager::RequestId::RangeSync { id }) => (
|
||||||
(rig.cx.range_sync_response(id, true).unwrap(), id)
|
rig.cx
|
||||||
}
|
.range_sync_block_response(id, None, ExpectedBatchTy::OnlyBlock)
|
||||||
|
.unwrap(),
|
||||||
|
id,
|
||||||
|
),
|
||||||
other => panic!("unexpected request {:?}", other),
|
other => panic!("unexpected request {:?}", other),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -511,6 +511,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("1")
|
.default_value("1")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
|
/* 4844 settings */
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("trusted-setup-file")
|
||||||
|
.long("trusted-setup-file")
|
||||||
|
.value_name("FILE")
|
||||||
|
.help("File containing the trusted setup parameters. \
|
||||||
|
NOTE: This is only for the devnet, the trusted setup params \
|
||||||
|
must be embedded into the ethspec once parameter loading \
|
||||||
|
is supported in the ckzg library")
|
||||||
|
.takes_value(true)
|
||||||
|
)
|
||||||
/*
|
/*
|
||||||
* Database purging and compaction.
|
* Database purging and compaction.
|
||||||
*/
|
*/
|
||||||
|
@ -342,6 +342,7 @@ pub fn get_config<E: EthSpec>(
|
|||||||
let execution_timeout_multiplier =
|
let execution_timeout_multiplier =
|
||||||
clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?;
|
clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?;
|
||||||
el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier);
|
el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier);
|
||||||
|
el_config.spec = spec.clone();
|
||||||
|
|
||||||
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
|
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
|
||||||
// use `--execution-endpoint` instead. Also, log a deprecation warning.
|
// use `--execution-endpoint` instead. Also, log a deprecation warning.
|
||||||
@ -364,6 +365,11 @@ pub fn get_config<E: EthSpec>(
|
|||||||
client_config.execution_layer = Some(el_config);
|
client_config.execution_layer = Some(el_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 4844 params
|
||||||
|
if let Some(trusted_setup_file) = cli_args.value_of("trusted-setup-file") {
|
||||||
|
client_config.trusted_setup_file = Some(PathBuf::from(trusted_setup_file));
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(freezer_dir) = cli_args.value_of("freezer-dir") {
|
if let Some(freezer_dir) = cli_args.value_of("freezer-dir") {
|
||||||
client_config.freezer_db_path = Some(PathBuf::from(freezer_dir));
|
client_config.freezer_db_path = Some(PathBuf::from(freezer_dir));
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,8 @@ pub enum Error {
|
|||||||
SchemaMigrationError(String),
|
SchemaMigrationError(String),
|
||||||
/// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied.
|
/// The store's `anchor_info` was mutated concurrently, the latest modification wasn't applied.
|
||||||
AnchorInfoConcurrentMutation,
|
AnchorInfoConcurrentMutation,
|
||||||
|
/// The store's `blob_info` was mutated concurrently, the latest modification wasn't applied.
|
||||||
|
BlobInfoConcurrentMutation,
|
||||||
/// The block or state is unavailable due to weak subjectivity sync.
|
/// The block or state is unavailable due to weak subjectivity sync.
|
||||||
HistoryUnavailable,
|
HistoryUnavailable,
|
||||||
/// State reconstruction cannot commence because not all historic blocks are known.
|
/// State reconstruction cannot commence because not all historic blocks are known.
|
||||||
|
@ -12,9 +12,9 @@ use crate::leveldb_store::BytesKey;
|
|||||||
use crate::leveldb_store::LevelDB;
|
use crate::leveldb_store::LevelDB;
|
||||||
use crate::memory_store::MemoryStore;
|
use crate::memory_store::MemoryStore;
|
||||||
use crate::metadata::{
|
use crate::metadata::{
|
||||||
AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY,
|
AnchorInfo, BlobInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY,
|
||||||
COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY,
|
BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION,
|
||||||
SCHEMA_VERSION_KEY, SPLIT_KEY,
|
PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY,
|
||||||
};
|
};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -53,6 +53,8 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
|||||||
pub(crate) split: RwLock<Split>,
|
pub(crate) split: RwLock<Split>,
|
||||||
/// The starting slots for the range of blocks & states stored in the database.
|
/// The starting slots for the range of blocks & states stored in the database.
|
||||||
anchor_info: RwLock<Option<AnchorInfo>>,
|
anchor_info: RwLock<Option<AnchorInfo>>,
|
||||||
|
/// The starting slots for the range of blobs stored in the database.
|
||||||
|
blob_info: RwLock<Option<BlobInfo>>,
|
||||||
pub(crate) config: StoreConfig,
|
pub(crate) config: StoreConfig,
|
||||||
/// Cold database containing compact historical data.
|
/// Cold database containing compact historical data.
|
||||||
pub cold_db: Cold,
|
pub cold_db: Cold,
|
||||||
@ -128,6 +130,7 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
|
|||||||
let db = HotColdDB {
|
let db = HotColdDB {
|
||||||
split: RwLock::new(Split::default()),
|
split: RwLock::new(Split::default()),
|
||||||
anchor_info: RwLock::new(None),
|
anchor_info: RwLock::new(None),
|
||||||
|
blob_info: RwLock::new(None),
|
||||||
cold_db: MemoryStore::open(),
|
cold_db: MemoryStore::open(),
|
||||||
hot_db: MemoryStore::open(),
|
hot_db: MemoryStore::open(),
|
||||||
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
|
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
|
||||||
@ -162,6 +165,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
|
|||||||
let mut db = HotColdDB {
|
let mut db = HotColdDB {
|
||||||
split: RwLock::new(Split::default()),
|
split: RwLock::new(Split::default()),
|
||||||
anchor_info: RwLock::new(None),
|
anchor_info: RwLock::new(None),
|
||||||
|
blob_info: RwLock::new(None),
|
||||||
cold_db: LevelDB::open(cold_path)?,
|
cold_db: LevelDB::open(cold_path)?,
|
||||||
hot_db: LevelDB::open(hot_path)?,
|
hot_db: LevelDB::open(hot_path)?,
|
||||||
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
|
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
|
||||||
@ -1302,6 +1306,65 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
.map(|a| a.anchor_slot)
|
.map(|a| a.anchor_slot)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get a clone of the store's blob info.
|
||||||
|
///
|
||||||
|
/// To do mutations, use `compare_and_set_blob_info`.
|
||||||
|
pub fn get_blob_info(&self) -> Option<BlobInfo> {
|
||||||
|
self.blob_info.read_recursive().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Atomically update the blob info from `prev_value` to `new_value`.
|
||||||
|
///
|
||||||
|
/// Return a `KeyValueStoreOp` which should be written to disk, possibly atomically with other
|
||||||
|
/// values.
|
||||||
|
///
|
||||||
|
/// Return an `BlobInfoConcurrentMutation` error if the `prev_value` provided
|
||||||
|
/// is not correct.
|
||||||
|
pub fn compare_and_set_blob_info(
|
||||||
|
&self,
|
||||||
|
prev_value: Option<BlobInfo>,
|
||||||
|
new_value: Option<BlobInfo>,
|
||||||
|
) -> Result<KeyValueStoreOp, Error> {
|
||||||
|
let mut blob_info = self.blob_info.write();
|
||||||
|
if *blob_info == prev_value {
|
||||||
|
let kv_op = self.store_blob_info_in_batch(&new_value);
|
||||||
|
*blob_info = new_value;
|
||||||
|
Ok(kv_op)
|
||||||
|
} else {
|
||||||
|
Err(Error::AnchorInfoConcurrentMutation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// As for `compare_and_set_blob_info`, but also writes the blob info to disk immediately.
|
||||||
|
pub fn compare_and_set_blob_info_with_write(
|
||||||
|
&self,
|
||||||
|
prev_value: Option<BlobInfo>,
|
||||||
|
new_value: Option<BlobInfo>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let kv_store_op = self.compare_and_set_blob_info(prev_value, new_value)?;
|
||||||
|
self.hot_db.do_atomically(vec![kv_store_op])
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load the blob info from disk, but do not set `self.blob_info`.
|
||||||
|
fn load_blob_info(&self) -> Result<Option<BlobInfo>, Error> {
|
||||||
|
self.hot_db.get(&BLOB_INFO_KEY)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store the given `blob_info` to disk.
|
||||||
|
///
|
||||||
|
/// The argument is intended to be `self.blob_info`, but is passed manually to avoid issues
|
||||||
|
/// with recursive locking.
|
||||||
|
fn store_blob_info_in_batch(&self, blob_info: &Option<BlobInfo>) -> KeyValueStoreOp {
|
||||||
|
if let Some(ref blob_info) = blob_info {
|
||||||
|
blob_info.as_kv_store_op(BLOB_INFO_KEY)
|
||||||
|
} else {
|
||||||
|
KeyValueStoreOp::DeleteKey(get_key_for_col(
|
||||||
|
DBColumn::BeaconMeta.into(),
|
||||||
|
BLOB_INFO_KEY.as_bytes(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the slot-window describing the available historic states.
|
/// Return the slot-window describing the available historic states.
|
||||||
///
|
///
|
||||||
/// Returns `(lower_limit, upper_limit)`.
|
/// Returns `(lower_limit, upper_limit)`.
|
||||||
|
@ -15,6 +15,7 @@ pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2);
|
|||||||
pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
|
pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
|
||||||
pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
|
pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
|
||||||
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
||||||
|
pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6);
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct SchemaVersion(pub u64);
|
pub struct SchemaVersion(pub u64);
|
||||||
@ -117,3 +118,28 @@ impl StoreItem for AnchorInfo {
|
|||||||
Ok(Self::from_ssz_bytes(bytes)?)
|
Ok(Self::from_ssz_bytes(bytes)?)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Database parameters relevant to blob sync.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, Serialize, Deserialize)]
|
||||||
|
pub struct BlobInfo {
|
||||||
|
/// The block root of the next blob that needs to be added to fill in the history.
|
||||||
|
pub oldest_blob_parent: Hash256,
|
||||||
|
/// The slot before which blobs are available.
|
||||||
|
pub oldest_blob_slot: Slot,
|
||||||
|
/// The slot from which blobs are available.
|
||||||
|
pub latest_blob_slot: Slot,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StoreItem for BlobInfo {
|
||||||
|
fn db_column() -> DBColumn {
|
||||||
|
DBColumn::BeaconMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_store_bytes(&self) -> Vec<u8> {
|
||||||
|
self.as_ssz_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
||||||
|
Ok(Self::from_ssz_bytes(bytes)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -35,5 +35,5 @@ procinfo = { version = "0.4.2", optional = true }
|
|||||||
[features]
|
[features]
|
||||||
default = ["lighthouse"]
|
default = ["lighthouse"]
|
||||||
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
||||||
withdrawals = ["store/withdrawals"]
|
withdrawals = ["store/withdrawals", "types/withdrawals"]
|
||||||
withdrawals-processing = ["store/withdrawals-processing"]
|
withdrawals-processing = ["store/withdrawals-processing"]
|
@ -1,6 +1,6 @@
|
|||||||
use ethereum_types::U256;
|
use ethereum_types::U256;
|
||||||
|
|
||||||
use serde::de::Visitor;
|
use serde::de::{Error, Visitor};
|
||||||
use serde::{de, Deserializer, Serialize, Serializer};
|
use serde::{de, Deserializer, Serialize, Serializer};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@ -15,12 +15,26 @@ where
|
|||||||
pub struct U256Visitor;
|
pub struct U256Visitor;
|
||||||
|
|
||||||
impl<'de> Visitor<'de> for U256Visitor {
|
impl<'de> Visitor<'de> for U256Visitor {
|
||||||
type Value = String;
|
type Value = Option<String>;
|
||||||
|
|
||||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||||
formatter.write_str("a well formatted hex string")
|
formatter.write_str("a well formatted hex string")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
deserializer.deserialize_string(U256Visitor)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_none<E>(self) -> Result<Self::Value, E>
|
||||||
|
where
|
||||||
|
E: Error,
|
||||||
|
{
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||||
where
|
where
|
||||||
E: de::Error,
|
E: de::Error,
|
||||||
@ -35,11 +49,11 @@ impl<'de> Visitor<'de> for U256Visitor {
|
|||||||
stripped
|
stripped
|
||||||
)))
|
)))
|
||||||
} else if stripped == "0" {
|
} else if stripped == "0" {
|
||||||
Ok(value.to_string())
|
Ok(Some(value.to_string()))
|
||||||
} else if stripped.starts_with('0') {
|
} else if stripped.starts_with('0') {
|
||||||
Err(de::Error::custom("cannot have leading zero"))
|
Err(de::Error::custom("cannot have leading zero"))
|
||||||
} else {
|
} else {
|
||||||
Ok(value.to_string())
|
Ok(Some(value.to_string()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -48,12 +62,13 @@ pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<U256>, D::Error>
|
|||||||
where
|
where
|
||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
{
|
{
|
||||||
let decoded = deserializer.deserialize_string(U256Visitor)?;
|
let decoded = deserializer.deserialize_option(U256Visitor)?;
|
||||||
|
|
||||||
Some(
|
decoded
|
||||||
|
.map(|decoded| {
|
||||||
U256::from_str(&decoded)
|
U256::from_str(&decoded)
|
||||||
.map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e))),
|
.map_err(|e| de::Error::custom(format!("Invalid U256 string: {}", e)))
|
||||||
)
|
})
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,6 +176,10 @@ mod test {
|
|||||||
val: Some(U256::max_value())
|
val: Some(U256::max_value())
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
assert_eq!(
|
||||||
|
serde_json::from_str::<Wrapper>("null").unwrap(),
|
||||||
|
Wrapper { val: None },
|
||||||
|
);
|
||||||
serde_json::from_str::<Wrapper>("\"0x\"").unwrap_err();
|
serde_json::from_str::<Wrapper>("\"0x\"").unwrap_err();
|
||||||
serde_json::from_str::<Wrapper>("\"0x0400\"").unwrap_err();
|
serde_json::from_str::<Wrapper>("\"0x0400\"").unwrap_err();
|
||||||
serde_json::from_str::<Wrapper>("\"400\"").unwrap_err();
|
serde_json::from_str::<Wrapper>("\"400\"").unwrap_err();
|
||||||
|
77
consensus/ssz_types/src/serde_utils/list_of_hex_fixed_vec.rs
Normal file
77
consensus/ssz_types/src/serde_utils/list_of_hex_fixed_vec.rs
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
//! Serialize `VariableList<FixedVector<u8, M>, N>` as list of 0x-prefixed hex string.
|
||||||
|
use crate::{FixedVector, VariableList};
|
||||||
|
use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
use typenum::Unsigned;
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct WrappedListOwned<N: Unsigned>(
|
||||||
|
#[serde(with = "crate::serde_utils::hex_fixed_vec")] FixedVector<u8, N>,
|
||||||
|
);
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(transparent)]
|
||||||
|
pub struct WrappedListRef<'a, N: Unsigned>(
|
||||||
|
#[serde(with = "crate::serde_utils::hex_fixed_vec")] &'a FixedVector<u8, N>,
|
||||||
|
);
|
||||||
|
|
||||||
|
pub fn serialize<S, M, N>(
|
||||||
|
list: &VariableList<FixedVector<u8, M>, N>,
|
||||||
|
serializer: S,
|
||||||
|
) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
M: Unsigned,
|
||||||
|
N: Unsigned,
|
||||||
|
{
|
||||||
|
let mut seq = serializer.serialize_seq(Some(list.len()))?;
|
||||||
|
for bytes in list {
|
||||||
|
seq.serialize_element(&WrappedListRef(bytes))?;
|
||||||
|
}
|
||||||
|
seq.end()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct Visitor<M, N> {
|
||||||
|
_phantom_m: PhantomData<M>,
|
||||||
|
_phantom_n: PhantomData<N>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, M, N> serde::de::Visitor<'a> for Visitor<M, N>
|
||||||
|
where
|
||||||
|
M: Unsigned,
|
||||||
|
N: Unsigned,
|
||||||
|
{
|
||||||
|
type Value = VariableList<FixedVector<u8, M>, N>;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
write!(formatter, "a list of 0x-prefixed hex bytes")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
|
||||||
|
where
|
||||||
|
A: serde::de::SeqAccess<'a>,
|
||||||
|
{
|
||||||
|
let mut list: VariableList<FixedVector<u8, M>, N> = <_>::default();
|
||||||
|
|
||||||
|
while let Some(val) = seq.next_element::<WrappedListOwned<M>>()? {
|
||||||
|
list.push(val.0).map_err(|e| {
|
||||||
|
serde::de::Error::custom(format!("failed to push value to list: {:?}.", e))
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D, M, N>(
|
||||||
|
deserializer: D,
|
||||||
|
) -> Result<VariableList<FixedVector<u8, M>, N>, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
M: Unsigned,
|
||||||
|
N: Unsigned,
|
||||||
|
{
|
||||||
|
deserializer.deserialize_seq(Visitor::default())
|
||||||
|
}
|
@ -1,5 +1,6 @@
|
|||||||
pub mod hex_fixed_vec;
|
pub mod hex_fixed_vec;
|
||||||
pub mod hex_var_list;
|
pub mod hex_var_list;
|
||||||
|
pub mod list_of_hex_fixed_vec;
|
||||||
pub mod list_of_hex_var_list;
|
pub mod list_of_hex_var_list;
|
||||||
pub mod quoted_u64_fixed_vec;
|
pub mod quoted_u64_fixed_vec;
|
||||||
pub mod quoted_u64_var_list;
|
pub mod quoted_u64_var_list;
|
||||||
|
@ -2,10 +2,12 @@ use crate::common::get_indexed_attestation;
|
|||||||
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
||||||
use std::collections::{hash_map::Entry, HashMap};
|
use std::collections::{hash_map::Entry, HashMap};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
|
use std::sync::Arc;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList,
|
AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList,
|
||||||
ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot,
|
BlobsSidecar, ChainSpec, Epoch, EthSpec, ExecPayload, Hash256, IndexedAttestation,
|
||||||
|
SignedBeaconBlock, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -19,7 +21,12 @@ pub struct ConsensusContext<T: EthSpec> {
|
|||||||
/// Cache of indexed attestations constructed during block processing.
|
/// Cache of indexed attestations constructed during block processing.
|
||||||
indexed_attestations:
|
indexed_attestations:
|
||||||
HashMap<(AttestationData, BitList<T::MaxValidatorsPerCommittee>), IndexedAttestation<T>>,
|
HashMap<(AttestationData, BitList<T::MaxValidatorsPerCommittee>), IndexedAttestation<T>>,
|
||||||
_phantom: PhantomData<T>,
|
/// Should only be populated if the sidecar has not been validated.
|
||||||
|
blobs_sidecar: Option<Arc<BlobsSidecar<T>>>,
|
||||||
|
/// Whether `validate_blobs_sidecar` has successfully passed.
|
||||||
|
blobs_sidecar_validated: bool,
|
||||||
|
/// Whether `verify_kzg_commitments_against_transactions` has successfully passed.
|
||||||
|
blobs_verified_vs_txs: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
@ -42,7 +49,9 @@ impl<T: EthSpec> ConsensusContext<T> {
|
|||||||
proposer_index: None,
|
proposer_index: None,
|
||||||
current_block_root: None,
|
current_block_root: None,
|
||||||
indexed_attestations: HashMap::new(),
|
indexed_attestations: HashMap::new(),
|
||||||
_phantom: PhantomData,
|
blobs_sidecar: None,
|
||||||
|
blobs_sidecar_validated: false,
|
||||||
|
blobs_verified_vs_txs: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -158,4 +167,31 @@ impl<T: EthSpec> ConsensusContext<T> {
|
|||||||
pub fn num_cached_indexed_attestations(&self) -> usize {
|
pub fn num_cached_indexed_attestations(&self) -> usize {
|
||||||
self.indexed_attestations.len()
|
self.indexed_attestations.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn set_blobs_sidecar_validated(mut self, blobs_sidecar_validated: bool) -> Self {
|
||||||
|
self.blobs_sidecar_validated = blobs_sidecar_validated;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_blobs_verified_vs_txs(mut self, blobs_verified_vs_txs: bool) -> Self {
|
||||||
|
self.blobs_verified_vs_txs = blobs_verified_vs_txs;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blobs_sidecar_validated(&self) -> bool {
|
||||||
|
self.blobs_sidecar_validated
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blobs_verified_vs_txs(&self) -> bool {
|
||||||
|
self.blobs_verified_vs_txs
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_blobs_sidecar(mut self, blobs_sidecar: Option<Arc<BlobsSidecar<T>>>) -> Self {
|
||||||
|
self.blobs_sidecar = blobs_sidecar;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blobs_sidecar(&self) -> Option<Arc<BlobsSidecar<T>>> {
|
||||||
|
self.blobs_sidecar.clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,6 @@ pub use process_operations::process_operations;
|
|||||||
pub use verify_attestation::{
|
pub use verify_attestation::{
|
||||||
verify_attestation_for_block_inclusion, verify_attestation_for_state,
|
verify_attestation_for_block_inclusion, verify_attestation_for_state,
|
||||||
};
|
};
|
||||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
|
||||||
pub use verify_bls_to_execution_change::verify_bls_to_execution_change;
|
pub use verify_bls_to_execution_change::verify_bls_to_execution_change;
|
||||||
pub use verify_deposit::{
|
pub use verify_deposit::{
|
||||||
get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature,
|
get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature,
|
||||||
@ -36,13 +35,11 @@ pub mod signature_sets;
|
|||||||
pub mod tests;
|
pub mod tests;
|
||||||
mod verify_attestation;
|
mod verify_attestation;
|
||||||
mod verify_attester_slashing;
|
mod verify_attester_slashing;
|
||||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
|
||||||
mod verify_bls_to_execution_change;
|
mod verify_bls_to_execution_change;
|
||||||
mod verify_deposit;
|
mod verify_deposit;
|
||||||
mod verify_exit;
|
mod verify_exit;
|
||||||
mod verify_proposer_slashing;
|
mod verify_proposer_slashing;
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
use crate::common::decrease_balance;
|
use crate::common::decrease_balance;
|
||||||
|
|
||||||
#[cfg(feature = "arbitrary-fuzz")]
|
#[cfg(feature = "arbitrary-fuzz")]
|
||||||
@ -186,6 +183,9 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
|
|
||||||
process_blob_kzg_commitments(block.body())?;
|
process_blob_kzg_commitments(block.body())?;
|
||||||
|
|
||||||
|
//FIXME(sean) add `validate_blobs_sidecar` (is_data_available) and only run it if the consensus
|
||||||
|
// context tells us it wasnt already run
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -519,7 +519,7 @@ pub fn get_expected_withdrawals<T: EthSpec>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// FIXME: add link to this function once the spec is stable
|
/// FIXME: add link to this function once the spec is stable
|
||||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
#[cfg(feature = "withdrawals")]
|
||||||
pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
pub fn process_withdrawals<'payload, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
payload: Payload::Ref<'payload>,
|
payload: Payload::Ref<'payload>,
|
||||||
|
@ -3,7 +3,6 @@ use eth2_hashing::hash_fixed;
|
|||||||
use itertools::{EitherOrBoth, Itertools};
|
use itertools::{EitherOrBoth, Itertools};
|
||||||
use safe_arith::SafeArith;
|
use safe_arith::SafeArith;
|
||||||
use ssz::Decode;
|
use ssz::Decode;
|
||||||
use ssz_types::VariableList;
|
|
||||||
use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG};
|
use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG};
|
||||||
use types::{
|
use types::{
|
||||||
AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, KzgCommitment, Transaction,
|
AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, KzgCommitment, Transaction,
|
||||||
@ -18,6 +17,7 @@ pub fn process_blob_kzg_commitments<T: EthSpec, Payload: AbstractExecPayload<T>>
|
|||||||
block_body.blob_kzg_commitments(),
|
block_body.blob_kzg_commitments(),
|
||||||
) {
|
) {
|
||||||
if let Some(transactions) = payload.transactions() {
|
if let Some(transactions) = payload.transactions() {
|
||||||
|
//FIXME(sean) only run if this wasn't run in gossip (use consensus context)
|
||||||
if !verify_kzg_commitments_against_transactions::<T>(transactions, kzg_commitments)? {
|
if !verify_kzg_commitments_against_transactions::<T>(transactions, kzg_commitments)? {
|
||||||
return Err(BlockProcessingError::BlobVersionHashMismatch);
|
return Err(BlockProcessingError::BlobVersionHashMismatch);
|
||||||
}
|
}
|
||||||
@ -29,7 +29,7 @@ pub fn process_blob_kzg_commitments<T: EthSpec, Payload: AbstractExecPayload<T>>
|
|||||||
|
|
||||||
pub fn verify_kzg_commitments_against_transactions<T: EthSpec>(
|
pub fn verify_kzg_commitments_against_transactions<T: EthSpec>(
|
||||||
transactions: &Transactions<T>,
|
transactions: &Transactions<T>,
|
||||||
kzg_commitments: &VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
|
kzg_commitments: &[KzgCommitment],
|
||||||
) -> Result<bool, BlockProcessingError> {
|
) -> Result<bool, BlockProcessingError> {
|
||||||
let nested_iter = transactions
|
let nested_iter = transactions
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -295,7 +295,6 @@ pub fn process_exits<T: EthSpec>(
|
|||||||
///
|
///
|
||||||
/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returs
|
/// Returns `Ok(())` if the validation and state updates completed successfully. Otherwise returs
|
||||||
/// an `Err` describing the invalid object or cause of failure.
|
/// an `Err` describing the invalid object or cause of failure.
|
||||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
|
||||||
pub fn process_bls_to_execution_changes<T: EthSpec>(
|
pub fn process_bls_to_execution_changes<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
bls_to_execution_changes: &[SignedBlsToExecutionChange],
|
bls_to_execution_changes: &[SignedBlsToExecutionChange],
|
||||||
|
@ -12,6 +12,7 @@ harness = false
|
|||||||
serde-big-array = {version = "0.3.2", features = ["const-generics"]}
|
serde-big-array = {version = "0.3.2", features = ["const-generics"]}
|
||||||
merkle_proof = { path = "../../consensus/merkle_proof" }
|
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||||
bls = { path = "../../crypto/bls" }
|
bls = { path = "../../crypto/bls" }
|
||||||
|
kzg = { path = "../../crypto/kzg" }
|
||||||
compare_fields = { path = "../../common/compare_fields" }
|
compare_fields = { path = "../../common/compare_fields" }
|
||||||
compare_fields_derive = { path = "../../common/compare_fields_derive" }
|
compare_fields_derive = { path = "../../common/compare_fields_derive" }
|
||||||
eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" }
|
eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" }
|
||||||
@ -28,7 +29,8 @@ serde_derive = "1.0.116"
|
|||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
eth2_ssz = "0.4.1"
|
eth2_ssz = "0.4.1"
|
||||||
eth2_ssz_derive = "0.3.1"
|
eth2_ssz_derive = "0.3.1"
|
||||||
eth2_ssz_types = "0.2.2"
|
#FIXME(sean)
|
||||||
|
eth2_ssz_types = { path = "../ssz_types" }
|
||||||
swap_or_not_shuffle = { path = "../swap_or_not_shuffle" }
|
swap_or_not_shuffle = { path = "../swap_or_not_shuffle" }
|
||||||
test_random_derive = { path = "../../common/test_random_derive" }
|
test_random_derive = { path = "../../common/test_random_derive" }
|
||||||
tree_hash = "0.4.1"
|
tree_hash = "0.4.1"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use crate::kzg_commitment::KzgCommitment;
|
use super::KzgCommitment;
|
||||||
use crate::test_utils::TestRandom;
|
use crate::test_utils::TestRandom;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use derivative::Derivative;
|
use derivative::Derivative;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::kzg_proof::KzgProof;
|
|
||||||
use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot};
|
use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot};
|
||||||
|
use kzg::KzgProof;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
@ -12,8 +12,9 @@ use tree_hash_derive::TreeHash;
|
|||||||
pub struct BlobsSidecar<T: EthSpec> {
|
pub struct BlobsSidecar<T: EthSpec> {
|
||||||
pub beacon_block_root: Hash256,
|
pub beacon_block_root: Hash256,
|
||||||
pub beacon_block_slot: Slot,
|
pub beacon_block_slot: Slot,
|
||||||
|
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
|
||||||
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
|
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
|
||||||
pub kzg_aggregate_proof: KzgProof,
|
pub kzg_aggregated_proof: KzgProof,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> SignedRoot for BlobsSidecar<T> {}
|
impl<T: EthSpec> SignedRoot for BlobsSidecar<T> {}
|
||||||
|
@ -23,7 +23,7 @@ pub mod merge {
|
|||||||
pub const INTERVALS_PER_SLOT: u64 = 3;
|
pub const INTERVALS_PER_SLOT: u64 = 3;
|
||||||
}
|
}
|
||||||
pub mod eip4844 {
|
pub mod eip4844 {
|
||||||
use crate::Uint256;
|
use crate::{Epoch, Uint256};
|
||||||
|
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
|
|
||||||
@ -32,6 +32,7 @@ pub mod eip4844 {
|
|||||||
"52435875175126190479447740508185965837690552500527637822603658699938581184513"
|
"52435875175126190479447740508185965837690552500527637822603658699938581184513"
|
||||||
)
|
)
|
||||||
.expect("should initialize BLS_MODULUS");
|
.expect("should initialize BLS_MODULUS");
|
||||||
|
pub static ref MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS: Epoch = Epoch::from(4096_u64);
|
||||||
}
|
}
|
||||||
pub const BLOB_TX_TYPE: u8 = 5;
|
pub const BLOB_TX_TYPE: u8 = 5;
|
||||||
pub const VERSIONED_HASH_VERSION_KZG: u8 = 1;
|
pub const VERSIONED_HASH_VERSION_KZG: u8 = 1;
|
||||||
|
@ -3,7 +3,7 @@ use crate::*;
|
|||||||
use safe_arith::SafeArith;
|
use safe_arith::SafeArith;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use ssz_types::typenum::{
|
use ssz_types::typenum::{
|
||||||
bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U16,
|
bit::B0, UInt, Unsigned, U0, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U16,
|
||||||
U16777216, U2, U2048, U256, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192,
|
U16777216, U2, U2048, U256, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192,
|
||||||
};
|
};
|
||||||
use std::fmt::{self, Debug};
|
use std::fmt::{self, Debug};
|
||||||
@ -105,6 +105,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq +
|
|||||||
*/
|
*/
|
||||||
type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||||
type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||||
|
type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||||
/*
|
/*
|
||||||
* Derived values (set these CAREFULLY)
|
* Derived values (set these CAREFULLY)
|
||||||
*/
|
*/
|
||||||
@ -123,6 +124,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq +
|
|||||||
/// Must be set to `SyncCommitteeSize / SyncCommitteeSubnetCount`.
|
/// Must be set to `SyncCommitteeSize / SyncCommitteeSubnetCount`.
|
||||||
type SyncSubcommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
type SyncSubcommitteeSize: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||||
|
|
||||||
|
/// The total length of a blob in bytes.
|
||||||
|
///
|
||||||
|
/// Must be set to `BytesPerFieldElement * FieldElementsPerBlob`.
|
||||||
|
type BytesPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq;
|
||||||
|
|
||||||
fn default_spec() -> ChainSpec;
|
fn default_spec() -> ChainSpec;
|
||||||
|
|
||||||
fn spec_name() -> EthSpecId;
|
fn spec_name() -> EthSpecId;
|
||||||
@ -248,9 +254,9 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq +
|
|||||||
Self::MaxBlobsPerBlock::to_usize()
|
Self::MaxBlobsPerBlock::to_usize()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// FIXME: why is this called chunks_per_blob??
|
/// Returns the `BYTES_PER_BLOB` constant for the specification.
|
||||||
fn chunks_per_blob() -> usize {
|
fn bytes_per_blob() -> usize {
|
||||||
Self::FieldElementsPerBlob::to_usize()
|
Self::BytesPerBlob::to_usize()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -293,7 +299,9 @@ impl EthSpec for MainnetEthSpec {
|
|||||||
type MinGasLimit = U5000;
|
type MinGasLimit = U5000;
|
||||||
type MaxExtraDataBytes = U32;
|
type MaxExtraDataBytes = U32;
|
||||||
type MaxBlobsPerBlock = U16; // 2**4 = 16
|
type MaxBlobsPerBlock = U16; // 2**4 = 16
|
||||||
|
type BytesPerFieldElement = U32;
|
||||||
type FieldElementsPerBlob = U4096;
|
type FieldElementsPerBlob = U4096;
|
||||||
|
type BytesPerBlob = U131072;
|
||||||
type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count
|
type SyncSubcommitteeSize = U128; // 512 committee size / 4 sync committee subnet count
|
||||||
type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch
|
type MaxPendingAttestations = U4096; // 128 max attestations * 32 slots per epoch
|
||||||
type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch
|
type SlotsPerEth1VotingPeriod = U2048; // 64 epochs * 32 slots per epoch
|
||||||
@ -325,6 +333,8 @@ impl EthSpec for MinimalEthSpec {
|
|||||||
type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch
|
type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch
|
||||||
type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch
|
type SlotsPerEth1VotingPeriod = U32; // 4 epochs * 8 slots per epoch
|
||||||
type MaxWithdrawalsPerPayload = U4;
|
type MaxWithdrawalsPerPayload = U4;
|
||||||
|
type FieldElementsPerBlob = U4; //FIXME(sean) this is spec'd out currently but will likely change
|
||||||
|
type BytesPerBlob = U128; //FIXME(sean) this is spec'd out currently but will likely change
|
||||||
|
|
||||||
params_from_eth_spec!(MainnetEthSpec {
|
params_from_eth_spec!(MainnetEthSpec {
|
||||||
JustificationBitsLength,
|
JustificationBitsLength,
|
||||||
@ -347,7 +357,7 @@ impl EthSpec for MinimalEthSpec {
|
|||||||
MaxExtraDataBytes,
|
MaxExtraDataBytes,
|
||||||
MaxBlsToExecutionChanges,
|
MaxBlsToExecutionChanges,
|
||||||
MaxBlobsPerBlock,
|
MaxBlobsPerBlock,
|
||||||
FieldElementsPerBlob
|
BytesPerFieldElement
|
||||||
});
|
});
|
||||||
|
|
||||||
fn default_spec() -> ChainSpec {
|
fn default_spec() -> ChainSpec {
|
||||||
@ -396,6 +406,8 @@ impl EthSpec for GnosisEthSpec {
|
|||||||
type MaxWithdrawalsPerPayload = U16;
|
type MaxWithdrawalsPerPayload = U16;
|
||||||
type MaxBlobsPerBlock = U16; // 2**4 = 16
|
type MaxBlobsPerBlock = U16; // 2**4 = 16
|
||||||
type FieldElementsPerBlob = U4096;
|
type FieldElementsPerBlob = U4096;
|
||||||
|
type BytesPerFieldElement = U32;
|
||||||
|
type BytesPerBlob = U131072;
|
||||||
|
|
||||||
fn default_spec() -> ChainSpec {
|
fn default_spec() -> ChainSpec {
|
||||||
ChainSpec::gnosis()
|
ChainSpec::gnosis()
|
||||||
|
@ -10,7 +10,7 @@ use std::fmt;
|
|||||||
#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)]
|
#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)]
|
||||||
#[derivative(Debug = "transparent")]
|
#[derivative(Debug = "transparent")]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct ExecutionBlockHash(Hash256);
|
pub struct ExecutionBlockHash(pub Hash256);
|
||||||
|
|
||||||
impl ExecutionBlockHash {
|
impl ExecutionBlockHash {
|
||||||
pub fn zero() -> Self {
|
pub fn zero() -> Self {
|
||||||
|
@ -1,43 +0,0 @@
|
|||||||
use crate::test_utils::TestRandom;
|
|
||||||
use crate::*;
|
|
||||||
use derivative::Derivative;
|
|
||||||
use serde_derive::{Deserialize, Serialize};
|
|
||||||
use ssz_derive::{Decode, Encode};
|
|
||||||
use std::fmt;
|
|
||||||
use std::fmt::{Display, Formatter};
|
|
||||||
use tree_hash::{PackedEncoding, TreeHash};
|
|
||||||
|
|
||||||
#[derive(Derivative, Debug, Clone, Encode, Decode, Serialize, Deserialize)]
|
|
||||||
#[derivative(PartialEq, Eq, Hash)]
|
|
||||||
#[ssz(struct_behaviour = "transparent")]
|
|
||||||
pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]);
|
|
||||||
|
|
||||||
impl Display for KzgCommitment {
|
|
||||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for KzgCommitment {
|
|
||||||
fn tree_hash_type() -> tree_hash::TreeHashType {
|
|
||||||
<[u8; 48] as TreeHash>::tree_hash_type()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tree_hash_packed_encoding(&self) -> PackedEncoding {
|
|
||||||
self.0.tree_hash_packed_encoding()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tree_hash_packing_factor() -> usize {
|
|
||||||
<[u8; 48] as TreeHash>::tree_hash_packing_factor()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tree_hash_root(&self) -> tree_hash::Hash256 {
|
|
||||||
self.0.tree_hash_root()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestRandom for KzgCommitment {
|
|
||||||
fn random_for_test(rng: &mut impl rand::RngCore) -> Self {
|
|
||||||
KzgCommitment(<[u8; 48] as TestRandom>::random_for_test(rng))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,63 +0,0 @@
|
|||||||
use crate::test_utils::{RngCore, TestRandom};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use serde_big_array::BigArray;
|
|
||||||
use ssz_derive::{Decode, Encode};
|
|
||||||
use std::fmt;
|
|
||||||
use tree_hash::{PackedEncoding, TreeHash};
|
|
||||||
|
|
||||||
const KZG_PROOF_BYTES_LEN: usize = 48;
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Hash, Clone, Copy, Encode, Decode, Serialize, Deserialize)]
|
|
||||||
#[serde(transparent)]
|
|
||||||
#[ssz(struct_behaviour = "transparent")]
|
|
||||||
pub struct KzgProof(#[serde(with = "BigArray")] pub [u8; KZG_PROOF_BYTES_LEN]);
|
|
||||||
|
|
||||||
impl fmt::Display for KzgProof {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for KzgProof {
|
|
||||||
fn default() -> Self {
|
|
||||||
KzgProof([0; 48])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof {
|
|
||||||
fn from(bytes: [u8; KZG_PROOF_BYTES_LEN]) -> Self {
|
|
||||||
Self(bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof {
|
|
||||||
fn into(self) -> [u8; KZG_PROOF_BYTES_LEN] {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TreeHash for KzgProof {
|
|
||||||
fn tree_hash_type() -> tree_hash::TreeHashType {
|
|
||||||
<[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_type()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tree_hash_packed_encoding(&self) -> PackedEncoding {
|
|
||||||
self.0.tree_hash_packed_encoding()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tree_hash_packing_factor() -> usize {
|
|
||||||
<[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_packing_factor()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tree_hash_root(&self) -> tree_hash::Hash256 {
|
|
||||||
self.0.tree_hash_root()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestRandom for KzgProof {
|
|
||||||
fn random_for_test(rng: &mut impl RngCore) -> Self {
|
|
||||||
let mut bytes = [0; KZG_PROOF_BYTES_LEN];
|
|
||||||
rng.fill_bytes(&mut bytes);
|
|
||||||
Self(bytes)
|
|
||||||
}
|
|
||||||
}
|
|
@ -97,8 +97,7 @@ pub mod slot_data;
|
|||||||
pub mod sqlite;
|
pub mod sqlite;
|
||||||
|
|
||||||
pub mod blobs_sidecar;
|
pub mod blobs_sidecar;
|
||||||
pub mod kzg_commitment;
|
pub mod signed_block_and_blobs;
|
||||||
pub mod kzg_proof;
|
|
||||||
|
|
||||||
use ethereum_types::{H160, H256};
|
use ethereum_types::{H160, H256};
|
||||||
|
|
||||||
@ -150,8 +149,6 @@ pub use crate::free_attestation::FreeAttestation;
|
|||||||
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
pub use crate::graffiti::{Graffiti, GRAFFITI_BYTES_LEN};
|
||||||
pub use crate::historical_batch::HistoricalBatch;
|
pub use crate::historical_batch::HistoricalBatch;
|
||||||
pub use crate::indexed_attestation::IndexedAttestation;
|
pub use crate::indexed_attestation::IndexedAttestation;
|
||||||
pub use crate::kzg_commitment::KzgCommitment;
|
|
||||||
pub use crate::kzg_proof::KzgProof;
|
|
||||||
pub use crate::participation_flags::ParticipationFlags;
|
pub use crate::participation_flags::ParticipationFlags;
|
||||||
pub use crate::participation_list::ParticipationList;
|
pub use crate::participation_list::ParticipationList;
|
||||||
pub use crate::payload::{
|
pub use crate::payload::{
|
||||||
@ -173,6 +170,8 @@ pub use crate::signed_beacon_block::{
|
|||||||
SignedBlindedBeaconBlock,
|
SignedBlindedBeaconBlock,
|
||||||
};
|
};
|
||||||
pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader;
|
pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader;
|
||||||
|
pub use crate::signed_block_and_blobs::SignedBeaconBlockAndBlobsSidecar;
|
||||||
|
pub use crate::signed_block_and_blobs::SignedBeaconBlockAndBlobsSidecarDecode;
|
||||||
pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange;
|
pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange;
|
||||||
pub use crate::signed_contribution_and_proof::SignedContributionAndProof;
|
pub use crate::signed_contribution_and_proof::SignedContributionAndProof;
|
||||||
pub use crate::signed_voluntary_exit::SignedVoluntaryExit;
|
pub use crate::signed_voluntary_exit::SignedVoluntaryExit;
|
||||||
@ -193,7 +192,6 @@ pub use crate::validator_registration_data::*;
|
|||||||
pub use crate::validator_subscription::ValidatorSubscription;
|
pub use crate::validator_subscription::ValidatorSubscription;
|
||||||
pub use crate::voluntary_exit::VoluntaryExit;
|
pub use crate::voluntary_exit::VoluntaryExit;
|
||||||
pub use crate::withdrawal::Withdrawal;
|
pub use crate::withdrawal::Withdrawal;
|
||||||
use serde_big_array::BigArray;
|
|
||||||
|
|
||||||
pub type CommitteeIndex = u64;
|
pub type CommitteeIndex = u64;
|
||||||
pub type Hash256 = H256;
|
pub type Hash256 = H256;
|
||||||
@ -201,12 +199,15 @@ pub type Uint256 = ethereum_types::U256;
|
|||||||
pub type Address = H160;
|
pub type Address = H160;
|
||||||
pub type ForkVersion = [u8; 4];
|
pub type ForkVersion = [u8; 4];
|
||||||
pub type BLSFieldElement = Uint256;
|
pub type BLSFieldElement = Uint256;
|
||||||
pub type Blob<T> = FixedVector<BLSFieldElement, <T as EthSpec>::FieldElementsPerBlob>;
|
pub type Blob<T> = FixedVector<u8, <T as EthSpec>::BytesPerBlob>;
|
||||||
pub type VersionedHash = Hash256;
|
pub type VersionedHash = Hash256;
|
||||||
|
|
||||||
pub use bls::{
|
pub use bls::{
|
||||||
AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey,
|
AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey,
|
||||||
Signature, SignatureBytes,
|
Signature, SignatureBytes,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub use kzg::{KzgCommitment, KzgProof};
|
||||||
|
|
||||||
pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList};
|
pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList};
|
||||||
pub use superstruct::superstruct;
|
pub use superstruct::superstruct;
|
||||||
|
139
consensus/types/src/signed_block_and_blobs.rs
Normal file
139
consensus/types/src/signed_block_and_blobs.rs
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
use crate::{BlobsSidecar, EthSpec, Hash256, SignedBeaconBlock, SignedBeaconBlockEip4844, Slot};
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz::{Decode, DecodeError};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tree_hash_derive::TreeHash;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, PartialEq)]
|
||||||
|
#[serde(bound = "T: EthSpec")]
|
||||||
|
pub struct SignedBeaconBlockAndBlobsSidecarDecode<T: EthSpec> {
|
||||||
|
pub beacon_block: SignedBeaconBlockEip4844<T>,
|
||||||
|
pub blobs_sidecar: BlobsSidecar<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, PartialEq)]
|
||||||
|
#[serde(bound = "T: EthSpec")]
|
||||||
|
pub struct SignedBeaconBlockAndBlobsSidecar<T: EthSpec> {
|
||||||
|
pub beacon_block: Arc<SignedBeaconBlock<T>>,
|
||||||
|
pub blobs_sidecar: Arc<BlobsSidecar<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> SignedBeaconBlockAndBlobsSidecar<T> {
|
||||||
|
pub fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
|
||||||
|
let SignedBeaconBlockAndBlobsSidecarDecode {
|
||||||
|
beacon_block,
|
||||||
|
blobs_sidecar,
|
||||||
|
} = SignedBeaconBlockAndBlobsSidecarDecode::from_ssz_bytes(bytes)?;
|
||||||
|
Ok(SignedBeaconBlockAndBlobsSidecar {
|
||||||
|
beacon_block: Arc::new(SignedBeaconBlock::Eip4844(beacon_block)),
|
||||||
|
blobs_sidecar: Arc::new(blobs_sidecar),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobsSidecar`].
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum BlockWrapper<T: EthSpec> {
|
||||||
|
Block {
|
||||||
|
block: Arc<SignedBeaconBlock<T>>,
|
||||||
|
},
|
||||||
|
BlockAndBlob {
|
||||||
|
block_sidecar_pair: SignedBeaconBlockAndBlobsSidecar<T>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> BlockWrapper<T> {
|
||||||
|
pub fn slot(&self) -> Slot {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { block } => block.slot(),
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair } => {
|
||||||
|
block_sidecar_pair.beacon_block.slot()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn block(&self) -> &SignedBeaconBlock<T> {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { block } => &block,
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair } => &block_sidecar_pair.beacon_block,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn block_cloned(&self) -> Arc<SignedBeaconBlock<T>> {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { block } => block.clone(),
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair } => {
|
||||||
|
block_sidecar_pair.beacon_block.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn blobs_sidecar(&self) -> Option<Arc<BlobsSidecar<T>>> {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { block: _ } => None,
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair } => {
|
||||||
|
Some(block_sidecar_pair.blobs_sidecar.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blobs(&self) -> Option<&BlobsSidecar<T>> {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { .. } => None,
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair } => {
|
||||||
|
Some(&block_sidecar_pair.blobs_sidecar)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn message(&self) -> crate::BeaconBlockRef<T> {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { block } => block.message(),
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair } => {
|
||||||
|
block_sidecar_pair.beacon_block.message()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parent_root(&self) -> Hash256 {
|
||||||
|
self.block().parent_root()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deconstruct(self) -> (Arc<SignedBeaconBlock<T>>, Option<Arc<BlobsSidecar<T>>>) {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { block } => (block, None),
|
||||||
|
BlockWrapper::BlockAndBlob { block_sidecar_pair } => {
|
||||||
|
let SignedBeaconBlockAndBlobsSidecar {
|
||||||
|
beacon_block,
|
||||||
|
blobs_sidecar,
|
||||||
|
} = block_sidecar_pair;
|
||||||
|
(beacon_block, Some(blobs_sidecar))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: probably needes to be changed. This is needed because SignedBeaconBlockAndBlobsSidecar
|
||||||
|
// does not implement Hash
|
||||||
|
impl<T: EthSpec> std::hash::Hash for BlockWrapper<T> {
|
||||||
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||||
|
match self {
|
||||||
|
BlockWrapper::Block { block } => block.hash(state),
|
||||||
|
BlockWrapper::BlockAndBlob {
|
||||||
|
block_sidecar_pair: block_and_blob,
|
||||||
|
} => block_and_blob.beacon_block.hash(state),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> From<SignedBeaconBlock<T>> for BlockWrapper<T> {
|
||||||
|
fn from(block: SignedBeaconBlock<T>) -> Self {
|
||||||
|
BlockWrapper::Block {
|
||||||
|
block: Arc::new(block),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> From<Arc<SignedBeaconBlock<T>>> for BlockWrapper<T> {
|
||||||
|
fn from(block: Arc<SignedBeaconBlock<T>>) -> Self {
|
||||||
|
BlockWrapper::Block { block }
|
||||||
|
}
|
||||||
|
}
|
@ -10,6 +10,8 @@ mod address;
|
|||||||
mod aggregate_signature;
|
mod aggregate_signature;
|
||||||
mod bitfield;
|
mod bitfield;
|
||||||
mod hash256;
|
mod hash256;
|
||||||
|
mod kzg_commitment;
|
||||||
|
mod kzg_proof;
|
||||||
mod public_key;
|
mod public_key;
|
||||||
mod public_key_bytes;
|
mod public_key_bytes;
|
||||||
mod secret_key;
|
mod secret_key;
|
||||||
|
@ -0,0 +1,8 @@
|
|||||||
|
use super::*;
|
||||||
|
use crate::KzgCommitment;
|
||||||
|
|
||||||
|
impl TestRandom for KzgCommitment {
|
||||||
|
fn random_for_test(rng: &mut impl rand::RngCore) -> Self {
|
||||||
|
KzgCommitment(<[u8; 48] as TestRandom>::random_for_test(rng))
|
||||||
|
}
|
||||||
|
}
|
11
consensus/types/src/test_utils/test_random/kzg_proof.rs
Normal file
11
consensus/types/src/test_utils/test_random/kzg_proof.rs
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
use super::*;
|
||||||
|
use kzg::KzgProof;
|
||||||
|
|
||||||
|
impl TestRandom for KzgProof {
|
||||||
|
fn random_for_test(rng: &mut impl RngCore) -> Self {
|
||||||
|
// TODO(pawan): use the length constant here
|
||||||
|
let mut bytes = [0; 48];
|
||||||
|
rng.fill_bytes(&mut bytes);
|
||||||
|
Self(bytes)
|
||||||
|
}
|
||||||
|
}
|
22
crypto/kzg/Cargo.toml
Normal file
22
crypto/kzg/Cargo.toml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
[package]
|
||||||
|
name = "kzg"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Pawan Dhananjay <pawandhananjay@gmail.com>"]
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
eth2_ssz = "0.4.1"
|
||||||
|
eth2_ssz_derive = "0.3.1"
|
||||||
|
tree_hash = "0.4.1"
|
||||||
|
derivative = "2.1.1"
|
||||||
|
rand = "0.7.3"
|
||||||
|
serde = "1.0.116"
|
||||||
|
serde_derive = "1.0.116"
|
||||||
|
eth2_serde_utils = "0.1.1"
|
||||||
|
hex = "0.4.2"
|
||||||
|
eth2_hashing = "0.3.0"
|
||||||
|
ethereum-types = "0.12.1"
|
||||||
|
c-kzg = {git = "https://github.com/pawanjay176/c-kzg-4844", rev = "669a13800a8a0d094c5387db58e06936ef194a25" }
|
||||||
|
|
104
crypto/kzg/src/kzg_commitment.rs
Normal file
104
crypto/kzg/src/kzg_commitment.rs
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
use derivative::Derivative;
|
||||||
|
use serde::de::{Deserialize, Deserializer};
|
||||||
|
use serde::ser::{Serialize, Serializer};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::fmt;
|
||||||
|
use std::fmt::{Debug, Display, Formatter};
|
||||||
|
use std::str::FromStr;
|
||||||
|
use tree_hash::{PackedEncoding, TreeHash};
|
||||||
|
|
||||||
|
const KZG_COMMITMENT_BYTES_LEN: usize = 48;
|
||||||
|
|
||||||
|
#[derive(Derivative, Clone, Encode, Decode)]
|
||||||
|
#[derivative(PartialEq, Eq, Hash)]
|
||||||
|
#[ssz(struct_behaviour = "transparent")]
|
||||||
|
pub struct KzgCommitment(pub [u8; KZG_COMMITMENT_BYTES_LEN]);
|
||||||
|
|
||||||
|
impl Display for KzgCommitment {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TreeHash for KzgCommitment {
|
||||||
|
fn tree_hash_type() -> tree_hash::TreeHashType {
|
||||||
|
<[u8; KZG_COMMITMENT_BYTES_LEN] as TreeHash>::tree_hash_type()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tree_hash_packed_encoding(&self) -> PackedEncoding {
|
||||||
|
self.0.tree_hash_packed_encoding()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tree_hash_packing_factor() -> usize {
|
||||||
|
<[u8; KZG_COMMITMENT_BYTES_LEN] as TreeHash>::tree_hash_packing_factor()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tree_hash_root(&self) -> tree_hash::Hash256 {
|
||||||
|
self.0.tree_hash_root()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for KzgCommitment {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
serializer.serialize_str(&self.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for KzgCommitment {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
pub struct StringVisitor;
|
||||||
|
|
||||||
|
impl<'de> serde::de::Visitor<'de> for StringVisitor {
|
||||||
|
type Value = String;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
formatter.write_str("a hex string with 0x prefix")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||||
|
where
|
||||||
|
E: serde::de::Error,
|
||||||
|
{
|
||||||
|
Ok(value.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let string = deserializer.deserialize_str(StringVisitor)?;
|
||||||
|
<Self as std::str::FromStr>::from_str(&string).map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for KzgCommitment {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
if let Some(stripped) = s.strip_prefix("0x") {
|
||||||
|
let bytes = hex::decode(stripped).map_err(|e| e.to_string())?;
|
||||||
|
if bytes.len() == KZG_COMMITMENT_BYTES_LEN {
|
||||||
|
let mut kzg_commitment_bytes = [0; KZG_COMMITMENT_BYTES_LEN];
|
||||||
|
kzg_commitment_bytes[..].copy_from_slice(&bytes);
|
||||||
|
Ok(Self(kzg_commitment_bytes))
|
||||||
|
} else {
|
||||||
|
Err(format!(
|
||||||
|
"InvalidByteLength: got {}, expected {}",
|
||||||
|
bytes.len(),
|
||||||
|
KZG_COMMITMENT_BYTES_LEN
|
||||||
|
))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err("must start with 0x".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for KzgCommitment {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "{}", eth2_serde_utils::hex::encode(&self.0))
|
||||||
|
}
|
||||||
|
}
|
120
crypto/kzg/src/kzg_proof.rs
Normal file
120
crypto/kzg/src/kzg_proof.rs
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
use serde::de::{Deserialize, Deserializer};
|
||||||
|
use serde::ser::{Serialize, Serializer};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::fmt;
|
||||||
|
use std::fmt::Debug;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use tree_hash::{PackedEncoding, TreeHash};
|
||||||
|
|
||||||
|
const KZG_PROOF_BYTES_LEN: usize = 48;
|
||||||
|
|
||||||
|
#[derive(PartialEq, Hash, Clone, Copy, Encode, Decode)]
|
||||||
|
#[ssz(struct_behaviour = "transparent")]
|
||||||
|
pub struct KzgProof(pub [u8; KZG_PROOF_BYTES_LEN]);
|
||||||
|
|
||||||
|
impl fmt::Display for KzgProof {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for KzgProof {
|
||||||
|
fn default() -> Self {
|
||||||
|
KzgProof([0; KZG_PROOF_BYTES_LEN])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof {
|
||||||
|
fn from(bytes: [u8; KZG_PROOF_BYTES_LEN]) -> Self {
|
||||||
|
Self(bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<[u8; KZG_PROOF_BYTES_LEN]> for KzgProof {
|
||||||
|
fn into(self) -> [u8; KZG_PROOF_BYTES_LEN] {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TreeHash for KzgProof {
|
||||||
|
fn tree_hash_type() -> tree_hash::TreeHashType {
|
||||||
|
<[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_type()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tree_hash_packed_encoding(&self) -> PackedEncoding {
|
||||||
|
self.0.tree_hash_packed_encoding()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tree_hash_packing_factor() -> usize {
|
||||||
|
<[u8; KZG_PROOF_BYTES_LEN]>::tree_hash_packing_factor()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tree_hash_root(&self) -> tree_hash::Hash256 {
|
||||||
|
self.0.tree_hash_root()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Serialize for KzgProof {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
serializer.serialize_str(&self.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'de> Deserialize<'de> for KzgProof {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
pub struct StringVisitor;
|
||||||
|
|
||||||
|
impl<'de> serde::de::Visitor<'de> for StringVisitor {
|
||||||
|
type Value = String;
|
||||||
|
|
||||||
|
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
formatter.write_str("a hex string with 0x prefix")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||||
|
where
|
||||||
|
E: serde::de::Error,
|
||||||
|
{
|
||||||
|
Ok(value.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let string = deserializer.deserialize_str(StringVisitor)?;
|
||||||
|
<Self as std::str::FromStr>::from_str(&string).map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for KzgProof {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
if let Some(stripped) = s.strip_prefix("0x") {
|
||||||
|
let bytes = hex::decode(stripped).map_err(|e| e.to_string())?;
|
||||||
|
if bytes.len() == KZG_PROOF_BYTES_LEN {
|
||||||
|
let mut kzg_proof_bytes = [0; KZG_PROOF_BYTES_LEN];
|
||||||
|
kzg_proof_bytes[..].copy_from_slice(&bytes);
|
||||||
|
Ok(Self(kzg_proof_bytes))
|
||||||
|
} else {
|
||||||
|
Err(format!(
|
||||||
|
"InvalidByteLength: got {}, expected {}",
|
||||||
|
bytes.len(),
|
||||||
|
KZG_PROOF_BYTES_LEN
|
||||||
|
))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err("must start with 0x".to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for KzgProof {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "{}", eth2_serde_utils::hex::encode(&self.0))
|
||||||
|
}
|
||||||
|
}
|
75
crypto/kzg/src/lib.rs
Normal file
75
crypto/kzg/src/lib.rs
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
mod kzg_commitment;
|
||||||
|
mod kzg_proof;
|
||||||
|
|
||||||
|
pub use crate::{kzg_commitment::KzgCommitment, kzg_proof::KzgProof};
|
||||||
|
pub use c_kzg::bytes_to_g1;
|
||||||
|
use c_kzg::{Error as CKzgError, KZGSettings, BYTES_PER_FIELD_ELEMENT, FIELD_ELEMENTS_PER_BLOB};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
const BYTES_PER_BLOB: usize = FIELD_ELEMENTS_PER_BLOB * BYTES_PER_FIELD_ELEMENT;
|
||||||
|
|
||||||
|
/// The consensus type `Blob` is generic over EthSpec, so it cannot be imported
|
||||||
|
/// in this crate without creating a cyclic dependency between the kzg and consensus/types crates.
|
||||||
|
/// So need to use a Vec here unless we think of a smarter way of doing this
|
||||||
|
type Blob = [u8; BYTES_PER_BLOB];
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
InvalidTrustedSetup(CKzgError),
|
||||||
|
InvalidKzgCommitment(CKzgError),
|
||||||
|
InvalidKzgProof(CKzgError),
|
||||||
|
KzgVerificationFailed(CKzgError),
|
||||||
|
InvalidLength(String),
|
||||||
|
KzgProofComputationFailed(CKzgError),
|
||||||
|
InvalidBlob(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper over a kzg library that holds the trusted setup parameters.
|
||||||
|
pub struct Kzg {
|
||||||
|
trusted_setup: KZGSettings,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Kzg {
|
||||||
|
pub fn new_from_file(file_path: PathBuf) -> Result<Self, Error> {
|
||||||
|
Ok(Self {
|
||||||
|
trusted_setup: KZGSettings::load_trusted_setup(file_path)
|
||||||
|
.map_err(Error::InvalidTrustedSetup)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn compute_aggregate_kzg_proof(&self, blobs: &[Blob]) -> Result<KzgProof, Error> {
|
||||||
|
c_kzg::KZGProof::compute_aggregate_kzg_proof(blobs, &self.trusted_setup)
|
||||||
|
.map_err(Error::KzgProofComputationFailed)
|
||||||
|
.map(|proof| KzgProof(proof.to_bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn verify_aggregate_kzg_proof(
|
||||||
|
&self,
|
||||||
|
blobs: &[Blob],
|
||||||
|
expected_kzg_commitments: &[KzgCommitment],
|
||||||
|
kzg_aggregated_proof: KzgProof,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
if blobs.len() != expected_kzg_commitments.len() {
|
||||||
|
return Err(Error::InvalidLength(
|
||||||
|
"blobs and expected_kzg_commitments should be of same size".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let commitments = expected_kzg_commitments
|
||||||
|
.into_iter()
|
||||||
|
.map(|comm| {
|
||||||
|
c_kzg::KZGCommitment::from_bytes(&comm.0).map_err(Error::InvalidKzgCommitment)
|
||||||
|
})
|
||||||
|
.collect::<Result<Vec<c_kzg::KZGCommitment>, Error>>()?;
|
||||||
|
let proof =
|
||||||
|
c_kzg::KZGProof::from_bytes(&kzg_aggregated_proof.0).map_err(Error::InvalidKzgProof)?;
|
||||||
|
proof
|
||||||
|
.verify_aggregate_kzg_proof(blobs, &commitments, &self.trusted_setup)
|
||||||
|
.map_err(Error::InvalidKzgProof)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn blob_to_kzg_commitment(&self, blob: Blob) -> KzgCommitment {
|
||||||
|
KzgCommitment(
|
||||||
|
c_kzg::KZGCommitment::blob_to_kzg_commitment(blob, &self.trusted_setup).to_bytes(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
@ -8,6 +8,8 @@ edition = "2021"
|
|||||||
[features]
|
[features]
|
||||||
portable = ["bls/supranational-portable"]
|
portable = ["bls/supranational-portable"]
|
||||||
fake_crypto = ['bls/fake_crypto']
|
fake_crypto = ['bls/fake_crypto']
|
||||||
|
withdrawals = ["types/withdrawals", "beacon_chain/withdrawals", "store/withdrawals", "state_processing/withdrawals"]
|
||||||
|
withdrawals-processing = ["beacon_chain/withdrawals-processing", "store/withdrawals-processing", "state_processing/withdrawals-processing"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../crypto/bls" }
|
bls = { path = "../crypto/bls" }
|
||||||
@ -21,6 +23,7 @@ types = { path = "../consensus/types" }
|
|||||||
state_processing = { path = "../consensus/state_processing" }
|
state_processing = { path = "../consensus/state_processing" }
|
||||||
int_to_bytes = { path = "../consensus/int_to_bytes" }
|
int_to_bytes = { path = "../consensus/int_to_bytes" }
|
||||||
eth2_ssz = "0.4.1"
|
eth2_ssz = "0.4.1"
|
||||||
|
eth2_hashing = "0.3.0"
|
||||||
environment = { path = "../lighthouse/environment" }
|
environment = { path = "../lighthouse/environment" }
|
||||||
eth2_network_config = { path = "../common/eth2_network_config" }
|
eth2_network_config = { path = "../common/eth2_network_config" }
|
||||||
genesis = { path = "../beacon_node/genesis" }
|
genesis = { path = "../beacon_node/genesis" }
|
||||||
|
@ -559,14 +559,41 @@ fn main() {
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("merge-fork-epoch")
|
Arg::with_name("bellatrix-fork-epoch")
|
||||||
.long("merge-fork-epoch")
|
.long("bellatrix-fork-epoch")
|
||||||
.value_name("EPOCH")
|
.value_name("EPOCH")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.help(
|
.help(
|
||||||
"The epoch at which to enable the Merge hard fork",
|
"The epoch at which to enable the Merge hard fork",
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("capella-fork-epoch")
|
||||||
|
.long("capella-fork-epoch")
|
||||||
|
.value_name("EPOCH")
|
||||||
|
.takes_value(true)
|
||||||
|
.help(
|
||||||
|
"The epoch at which to enable the Capella hard fork",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("eip4844-fork-epoch")
|
||||||
|
.long("eip4844-fork-epoch")
|
||||||
|
.value_name("EPOCH")
|
||||||
|
.takes_value(true)
|
||||||
|
.help(
|
||||||
|
"The epoch at which to enable the eip4844 hard fork",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("ttd")
|
||||||
|
.long("ttd")
|
||||||
|
.value_name("TTD")
|
||||||
|
.takes_value(true)
|
||||||
|
.help(
|
||||||
|
"The terminal total difficulty",
|
||||||
|
),
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("eth1-block-hash")
|
Arg::with_name("eth1-block-hash")
|
||||||
.long("eth1-block-hash")
|
.long("eth1-block-hash")
|
||||||
|
@ -1,17 +1,25 @@
|
|||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use clap_utils::{parse_optional, parse_required, parse_ssz_optional};
|
use clap_utils::{parse_optional, parse_required, parse_ssz_optional};
|
||||||
|
use eth2_hashing::hash;
|
||||||
use eth2_network_config::Eth2NetworkConfig;
|
use eth2_network_config::Eth2NetworkConfig;
|
||||||
use genesis::interop_genesis_state;
|
use genesis::interop_genesis_state;
|
||||||
use ssz::Decode;
|
use ssz::Decode;
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
|
use state_processing::process_activations;
|
||||||
|
use state_processing::upgrade::{
|
||||||
|
upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_eip4844,
|
||||||
|
};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::str::FromStr;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use types::{
|
use types::{
|
||||||
test_utils::generate_deterministic_keypairs, Address, Config, EthSpec, ExecutionPayloadHeader,
|
test_utils::generate_deterministic_keypairs, Address, BeaconState, ChainSpec, Config, Eth1Data,
|
||||||
ExecutionPayloadHeaderMerge,
|
EthSpec, ExecutionPayloadHeader, ExecutionPayloadHeaderMerge, Hash256, Keypair, PublicKey,
|
||||||
|
Validator,
|
||||||
};
|
};
|
||||||
|
use types::{BeaconStateMerge, ExecutionBlockHash};
|
||||||
|
|
||||||
pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> {
|
pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Result<(), String> {
|
||||||
let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?;
|
let deposit_contract_address: Address = parse_required(matches, "deposit-contract-address")?;
|
||||||
@ -63,10 +71,22 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
|||||||
spec.altair_fork_epoch = Some(fork_epoch);
|
spec.altair_fork_epoch = Some(fork_epoch);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(fork_epoch) = parse_optional(matches, "merge-fork-epoch")? {
|
if let Some(fork_epoch) = parse_optional(matches, "bellatrix-fork-epoch")? {
|
||||||
spec.bellatrix_fork_epoch = Some(fork_epoch);
|
spec.bellatrix_fork_epoch = Some(fork_epoch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(fork_epoch) = parse_optional(matches, "capella-fork-epoch")? {
|
||||||
|
spec.capella_fork_epoch = Some(fork_epoch);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(fork_epoch) = parse_optional(matches, "eip4844-fork-epoch")? {
|
||||||
|
spec.eip4844_fork_epoch = Some(fork_epoch);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ttd) = parse_optional(matches, "ttd")? {
|
||||||
|
spec.terminal_total_difficulty = ttd;
|
||||||
|
}
|
||||||
|
|
||||||
let genesis_state_bytes = if matches.is_present("interop-genesis-state") {
|
let genesis_state_bytes = if matches.is_present("interop-genesis-state") {
|
||||||
let execution_payload_header: Option<ExecutionPayloadHeader<T>> =
|
let execution_payload_header: Option<ExecutionPayloadHeader<T>> =
|
||||||
parse_optional(matches, "execution-payload-header")?
|
parse_optional(matches, "execution-payload-header")?
|
||||||
@ -108,7 +128,7 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
|||||||
|
|
||||||
let keypairs = generate_deterministic_keypairs(validator_count);
|
let keypairs = generate_deterministic_keypairs(validator_count);
|
||||||
|
|
||||||
let genesis_state = interop_genesis_state::<T>(
|
let genesis_state = initialize_state_with_validators::<T>(
|
||||||
&keypairs,
|
&keypairs,
|
||||||
genesis_time,
|
genesis_time,
|
||||||
eth1_block_hash.into_root(),
|
eth1_block_hash.into_root(),
|
||||||
@ -130,3 +150,103 @@ pub fn run<T: EthSpec>(testnet_dir_path: PathBuf, matches: &ArgMatches) -> Resul
|
|||||||
|
|
||||||
testnet.write_to_file(testnet_dir_path, overwrite_files)
|
testnet.write_to_file(testnet_dir_path, overwrite_files)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn initialize_state_with_validators<T: EthSpec>(
|
||||||
|
keypairs: &[Keypair],
|
||||||
|
genesis_time: u64,
|
||||||
|
eth1_block_hash: Hash256,
|
||||||
|
execution_payload_header: Option<ExecutionPayloadHeader<T>>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<BeaconState<T>, String> {
|
||||||
|
let default_header = ExecutionPayloadHeaderMerge {
|
||||||
|
gas_limit: 10,
|
||||||
|
base_fee_per_gas: 10.into(),
|
||||||
|
timestamp: genesis_time,
|
||||||
|
block_hash: ExecutionBlockHash(eth1_block_hash),
|
||||||
|
prev_randao: Hash256::random(),
|
||||||
|
parent_hash: ExecutionBlockHash::zero(),
|
||||||
|
transactions_root: Hash256::random(),
|
||||||
|
..ExecutionPayloadHeaderMerge::default()
|
||||||
|
};
|
||||||
|
let execution_payload_header =
|
||||||
|
execution_payload_header.or(Some(ExecutionPayloadHeader::Merge(default_header)));
|
||||||
|
// Empty eth1 data
|
||||||
|
let eth1_data = Eth1Data {
|
||||||
|
block_hash: eth1_block_hash,
|
||||||
|
deposit_count: 0,
|
||||||
|
deposit_root: Hash256::from_str(
|
||||||
|
"0xd70a234731285c6804c2a4f56711ddb8c82c99740f207854891028af34e27e5e",
|
||||||
|
)
|
||||||
|
.unwrap(), // empty deposit tree root
|
||||||
|
};
|
||||||
|
let mut state = BeaconState::new(genesis_time, eth1_data, spec);
|
||||||
|
|
||||||
|
// Seed RANDAO with Eth1 entropy
|
||||||
|
state.fill_randao_mixes_with(eth1_block_hash);
|
||||||
|
|
||||||
|
for keypair in keypairs.into_iter() {
|
||||||
|
let withdrawal_credentials = |pubkey: &PublicKey| {
|
||||||
|
let mut credentials = hash(&pubkey.as_ssz_bytes());
|
||||||
|
credentials[0] = spec.bls_withdrawal_prefix_byte;
|
||||||
|
Hash256::from_slice(&credentials)
|
||||||
|
};
|
||||||
|
let amount = spec.max_effective_balance;
|
||||||
|
// Create a new validator.
|
||||||
|
let validator = Validator {
|
||||||
|
pubkey: keypair.pk.clone().into(),
|
||||||
|
withdrawal_credentials: withdrawal_credentials(&keypair.pk),
|
||||||
|
activation_eligibility_epoch: spec.far_future_epoch,
|
||||||
|
activation_epoch: spec.far_future_epoch,
|
||||||
|
exit_epoch: spec.far_future_epoch,
|
||||||
|
withdrawable_epoch: spec.far_future_epoch,
|
||||||
|
effective_balance: std::cmp::min(
|
||||||
|
amount - amount % (spec.effective_balance_increment),
|
||||||
|
spec.max_effective_balance,
|
||||||
|
),
|
||||||
|
slashed: false,
|
||||||
|
};
|
||||||
|
state.validators_mut().push(validator).unwrap();
|
||||||
|
state.balances_mut().push(amount).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
process_activations(&mut state, spec).unwrap();
|
||||||
|
|
||||||
|
if spec
|
||||||
|
.altair_fork_epoch
|
||||||
|
.map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch())
|
||||||
|
{
|
||||||
|
upgrade_to_altair(&mut state, spec).unwrap();
|
||||||
|
|
||||||
|
state.fork_mut().previous_version = spec.altair_fork_version;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Similarly, perform an upgrade to the merge if configured from genesis.
|
||||||
|
if spec
|
||||||
|
.bellatrix_fork_epoch
|
||||||
|
.map_or(false, |fork_epoch| fork_epoch == T::genesis_epoch())
|
||||||
|
{
|
||||||
|
upgrade_to_bellatrix(&mut state, spec).unwrap();
|
||||||
|
|
||||||
|
// Remove intermediate Altair fork from `state.fork`.
|
||||||
|
state.fork_mut().previous_version = spec.bellatrix_fork_version;
|
||||||
|
|
||||||
|
// Override latest execution payload header.
|
||||||
|
// See https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/merge/beacon-chain.md#testing
|
||||||
|
|
||||||
|
if let Some(ExecutionPayloadHeader::Merge(ref header)) = execution_payload_header {
|
||||||
|
*state
|
||||||
|
.latest_execution_payload_header_merge_mut()
|
||||||
|
.map_err(|_| {
|
||||||
|
"State must contain bellatrix execution payload header".to_string()
|
||||||
|
})? = header.clone();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we have our validators, initialize the caches (including the committees)
|
||||||
|
state.build_all_caches(spec).unwrap();
|
||||||
|
|
||||||
|
// Set genesis validators root for domain separation and chain versioning
|
||||||
|
*state.genesis_validators_root_mut() = state.update_validators_tree_hash_cache().unwrap();
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
@ -51,12 +51,18 @@ pub fn run_parse_ssz<T: EthSpec>(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
"signed_block_base" => decode_and_print::<SignedBeaconBlockBase<T>>(&bytes, format)?,
|
"signed_block_base" => decode_and_print::<SignedBeaconBlockBase<T>>(&bytes, format)?,
|
||||||
"signed_block_altair" => decode_and_print::<SignedBeaconBlockAltair<T>>(&bytes, format)?,
|
"signed_block_altair" => decode_and_print::<SignedBeaconBlockAltair<T>>(&bytes, format)?,
|
||||||
"signed_block_merge" => decode_and_print::<SignedBeaconBlockMerge<T>>(&bytes, format)?,
|
"signed_block_merge" => decode_and_print::<SignedBeaconBlockMerge<T>>(&bytes, format)?,
|
||||||
|
"signed_block_capella" => decode_and_print::<SignedBeaconBlockCapella<T>>(&bytes, format)?,
|
||||||
|
"signed_block_eip4844" => decode_and_print::<SignedBeaconBlockEip4844<T>>(&bytes, format)?,
|
||||||
"block_base" => decode_and_print::<BeaconBlockBase<T>>(&bytes, format)?,
|
"block_base" => decode_and_print::<BeaconBlockBase<T>>(&bytes, format)?,
|
||||||
"block_altair" => decode_and_print::<BeaconBlockAltair<T>>(&bytes, format)?,
|
"block_altair" => decode_and_print::<BeaconBlockAltair<T>>(&bytes, format)?,
|
||||||
"block_merge" => decode_and_print::<BeaconBlockMerge<T>>(&bytes, format)?,
|
"block_merge" => decode_and_print::<BeaconBlockMerge<T>>(&bytes, format)?,
|
||||||
|
"block_capella" => decode_and_print::<BeaconBlockCapella<T>>(&bytes, format)?,
|
||||||
|
"block_eip4844" => decode_and_print::<BeaconBlockEip4844<T>>(&bytes, format)?,
|
||||||
"state_base" => decode_and_print::<BeaconStateBase<T>>(&bytes, format)?,
|
"state_base" => decode_and_print::<BeaconStateBase<T>>(&bytes, format)?,
|
||||||
"state_altair" => decode_and_print::<BeaconStateAltair<T>>(&bytes, format)?,
|
"state_altair" => decode_and_print::<BeaconStateAltair<T>>(&bytes, format)?,
|
||||||
"state_merge" => decode_and_print::<BeaconStateMerge<T>>(&bytes, format)?,
|
"state_merge" => decode_and_print::<BeaconStateMerge<T>>(&bytes, format)?,
|
||||||
|
"state_capella" => decode_and_print::<BeaconStateCapella<T>>(&bytes, format)?,
|
||||||
|
"state_eip4844" => decode_and_print::<BeaconStateEip4844<T>>(&bytes, format)?,
|
||||||
other => return Err(format!("Unknown type: {}", other)),
|
other => return Err(format!("Unknown type: {}", other)),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -30,6 +30,8 @@ while getopts "d:sh" flag; do
|
|||||||
echo " DATADIR Value for --datadir parameter"
|
echo " DATADIR Value for --datadir parameter"
|
||||||
echo " NETWORK-PORT Value for --enr-udp-port, --enr-tcp-port and --port"
|
echo " NETWORK-PORT Value for --enr-udp-port, --enr-tcp-port and --port"
|
||||||
echo " HTTP-PORT Value for --http-port"
|
echo " HTTP-PORT Value for --http-port"
|
||||||
|
echo " EXECUTION-ENDPOINT Value for --execution-endpoint"
|
||||||
|
echo " EXECUTION-JWT Value for --execution-jwt"
|
||||||
exit
|
exit
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@ -39,8 +41,12 @@ done
|
|||||||
data_dir=${@:$OPTIND+0:1}
|
data_dir=${@:$OPTIND+0:1}
|
||||||
network_port=${@:$OPTIND+1:1}
|
network_port=${@:$OPTIND+1:1}
|
||||||
http_port=${@:$OPTIND+2:1}
|
http_port=${@:$OPTIND+2:1}
|
||||||
|
execution_endpoint=${@:$OPTIND+3:1}
|
||||||
|
execution_jwt=${@:$OPTIND+4:1}
|
||||||
|
|
||||||
exec lighthouse \
|
lighthouse_binary=lighthouse
|
||||||
|
|
||||||
|
exec $lighthouse_binary \
|
||||||
--debug-level $DEBUG_LEVEL \
|
--debug-level $DEBUG_LEVEL \
|
||||||
bn \
|
bn \
|
||||||
$SUBSCRIBE_ALL_SUBNETS \
|
$SUBSCRIBE_ALL_SUBNETS \
|
||||||
@ -54,4 +60,7 @@ exec lighthouse \
|
|||||||
--port $network_port \
|
--port $network_port \
|
||||||
--http-port $http_port \
|
--http-port $http_port \
|
||||||
--disable-packet-filter \
|
--disable-packet-filter \
|
||||||
--target-peers $((BN_COUNT - 1))
|
--target-peers $((BN_COUNT - 1)) \
|
||||||
|
--execution-endpoint $execution_endpoint \
|
||||||
|
--trusted-setup-file ./trusted_setup.txt \
|
||||||
|
--execution-jwt $execution_jwt
|
||||||
|
4
scripts/local_testnet/el_bootnode.sh
Executable file
4
scripts/local_testnet/el_bootnode.sh
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91"
|
||||||
|
|
||||||
|
|
||||||
|
/home/sean/CLionProjects/eip4844-interop/geth/go-ethereum/build/bin/bootnode --nodekeyhex $priv_key
|
852
scripts/local_testnet/genesis.json
Normal file
852
scripts/local_testnet/genesis.json
Normal file
File diff suppressed because one or more lines are too long
52
scripts/local_testnet/geth.sh
Executable file
52
scripts/local_testnet/geth.sh
Executable file
@ -0,0 +1,52 @@
|
|||||||
|
set -Eeuo pipefail
|
||||||
|
|
||||||
|
source ./vars.env
|
||||||
|
|
||||||
|
# Get options
|
||||||
|
while getopts "d:sh" flag; do
|
||||||
|
case "${flag}" in
|
||||||
|
d) DEBUG_LEVEL=${OPTARG};;
|
||||||
|
s) SUBSCRIBE_ALL_SUBNETS="--subscribe-all-subnets";;
|
||||||
|
h)
|
||||||
|
echo "Start a geth node"
|
||||||
|
echo
|
||||||
|
echo "usage: $0 <Options> <DATADIR> <NETWORK-PORT> <HTTP-PORT>"
|
||||||
|
echo
|
||||||
|
echo "Options:"
|
||||||
|
echo " -h: this help"
|
||||||
|
echo
|
||||||
|
echo "Positional arguments:"
|
||||||
|
echo " DATADIR Value for --datadir parameter"
|
||||||
|
echo " NETWORK-PORT Value for --port"
|
||||||
|
echo " HTTP-PORT Value for --http.port"
|
||||||
|
echo " AUTH-PORT Value for --authrpc.port"
|
||||||
|
echo " GENESIS_FILE Value for geth init"
|
||||||
|
exit
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Get positional arguments
|
||||||
|
data_dir=${@:$OPTIND+0:1}
|
||||||
|
network_port=${@:$OPTIND+1:1}
|
||||||
|
http_port=${@:$OPTIND+2:1}
|
||||||
|
auth_port=${@:$OPTIND+3:1}
|
||||||
|
genesis_file=${@:$OPTIND+4:1}
|
||||||
|
|
||||||
|
# Init
|
||||||
|
$GETH_BINARY init \
|
||||||
|
--datadir $data_dir \
|
||||||
|
$genesis_file
|
||||||
|
|
||||||
|
echo "Completed init"
|
||||||
|
|
||||||
|
exec $GETH_BINARY \
|
||||||
|
--datadir $data_dir \
|
||||||
|
--ipcdisable \
|
||||||
|
--http \
|
||||||
|
--http.api="engine,eth,web3,net,debug" \
|
||||||
|
--networkid=$CHAIN_ID \
|
||||||
|
--syncmode=full \
|
||||||
|
--bootnodes $EL_BOOTNODE_ENODE \
|
||||||
|
--port $network_port \
|
||||||
|
--http.port $auth_port
|
@ -12,7 +12,7 @@ if [ -f "$1" ]; then
|
|||||||
[[ -n "$pid" ]] || continue
|
[[ -n "$pid" ]] || continue
|
||||||
|
|
||||||
echo killing $pid
|
echo killing $pid
|
||||||
kill $pid
|
kill $pid || true
|
||||||
done < $1
|
done < $1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -13,11 +13,6 @@ set -o nounset -o errexit -o pipefail
|
|||||||
|
|
||||||
source ./vars.env
|
source ./vars.env
|
||||||
|
|
||||||
lcli \
|
|
||||||
deploy-deposit-contract \
|
|
||||||
--eth1-http http://localhost:8545 \
|
|
||||||
--confirmations 1 \
|
|
||||||
--validator-count $VALIDATOR_COUNT
|
|
||||||
|
|
||||||
NOW=`date +%s`
|
NOW=`date +%s`
|
||||||
GENESIS_TIME=`expr $NOW + $GENESIS_DELAY`
|
GENESIS_TIME=`expr $NOW + $GENESIS_DELAY`
|
||||||
@ -32,13 +27,20 @@ lcli \
|
|||||||
--genesis-delay $GENESIS_DELAY \
|
--genesis-delay $GENESIS_DELAY \
|
||||||
--genesis-fork-version $GENESIS_FORK_VERSION \
|
--genesis-fork-version $GENESIS_FORK_VERSION \
|
||||||
--altair-fork-epoch $ALTAIR_FORK_EPOCH \
|
--altair-fork-epoch $ALTAIR_FORK_EPOCH \
|
||||||
|
--bellatrix-fork-epoch $BELLATRIX_FORK_EPOCH \
|
||||||
|
--capella-fork-epoch $CAPELLA_FORK_EPOCH \
|
||||||
|
--eip4844-fork-epoch $EIP4844_FORK_EPOCH \
|
||||||
|
--ttd $TTD \
|
||||||
|
--eth1-block-hash $ETH1_BLOCK_HASH \
|
||||||
--eth1-id $CHAIN_ID \
|
--eth1-id $CHAIN_ID \
|
||||||
--eth1-follow-distance 1 \
|
--eth1-follow-distance 1 \
|
||||||
--seconds-per-slot $SECONDS_PER_SLOT \
|
--seconds-per-slot $SECONDS_PER_SLOT \
|
||||||
--seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \
|
--seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \
|
||||||
|
--validator-count $GENESIS_VALIDATOR_COUNT \
|
||||||
|
--interop-genesis-state \
|
||||||
--force
|
--force
|
||||||
|
|
||||||
echo Specification generated at $TESTNET_DIR.
|
echo Specification and genesis.ssz generated at $TESTNET_DIR.
|
||||||
echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)"
|
echo "Generating $VALIDATOR_COUNT validators concurrently... (this may take a while)"
|
||||||
|
|
||||||
lcli \
|
lcli \
|
||||||
@ -48,13 +50,10 @@ lcli \
|
|||||||
--node-count $BN_COUNT
|
--node-count $BN_COUNT
|
||||||
|
|
||||||
echo Validators generated with keystore passwords at $DATADIR.
|
echo Validators generated with keystore passwords at $DATADIR.
|
||||||
echo "Building genesis state... (this might take a while)"
|
|
||||||
|
|
||||||
lcli \
|
GENESIS_TIME=$(lcli pretty-ssz state_merge ~/.lighthouse/local-testnet/testnet/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d')
|
||||||
interop-genesis \
|
CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT)))
|
||||||
--spec $SPEC_PRESET \
|
EIP4844_TIME=$((GENESIS_TIME + (EIP4844_FORK_EPOCH * 32 * SECONDS_PER_SLOT)))
|
||||||
--genesis-time $GENESIS_TIME \
|
|
||||||
--testnet-dir $TESTNET_DIR \
|
|
||||||
$GENESIS_VALIDATOR_COUNT
|
|
||||||
|
|
||||||
echo Created genesis state in $TESTNET_DIR
|
sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' genesis.json
|
||||||
|
sed -i 's/"shardingForkTime".*$/"shardingForkTime": '"$EIP4844_TIME"',/g' genesis.json
|
||||||
|
@ -40,6 +40,8 @@ if (( $VC_COUNT > $BN_COUNT )); then
|
|||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
genesis_file=${@:$OPTIND+0:1}
|
||||||
|
|
||||||
# Init some constants
|
# Init some constants
|
||||||
PID_FILE=$TESTNET_DIR/PIDS.pid
|
PID_FILE=$TESTNET_DIR/PIDS.pid
|
||||||
LOG_DIR=$TESTNET_DIR
|
LOG_DIR=$TESTNET_DIR
|
||||||
@ -55,6 +57,9 @@ mkdir -p $LOG_DIR
|
|||||||
for (( bn=1; bn<=$BN_COUNT; bn++ )); do
|
for (( bn=1; bn<=$BN_COUNT; bn++ )); do
|
||||||
touch $LOG_DIR/beacon_node_$bn.log
|
touch $LOG_DIR/beacon_node_$bn.log
|
||||||
done
|
done
|
||||||
|
for (( el=1; el<=$BN_COUNT; el++ )); do
|
||||||
|
touch $LOG_DIR/geth_$el.log
|
||||||
|
done
|
||||||
for (( vc=1; vc<=$VC_COUNT; vc++ )); do
|
for (( vc=1; vc<=$VC_COUNT; vc++ )); do
|
||||||
touch $LOG_DIR/validator_node_$vc.log
|
touch $LOG_DIR/validator_node_$vc.log
|
||||||
done
|
done
|
||||||
@ -92,12 +97,6 @@ execute_command_add_PID() {
|
|||||||
echo "$!" >> $PID_FILE
|
echo "$!" >> $PID_FILE
|
||||||
}
|
}
|
||||||
|
|
||||||
# Start ganache, setup things up and start the bootnode.
|
|
||||||
# The delays are necessary, hopefully there is a better way :(
|
|
||||||
|
|
||||||
# Delay to let ganache to get started
|
|
||||||
execute_command_add_PID ganache_test_node.log ./ganache_test_node.sh
|
|
||||||
sleeping 10
|
|
||||||
|
|
||||||
# Setup data
|
# Setup data
|
||||||
echo "executing: ./setup.sh >> $LOG_DIR/setup.log"
|
echo "executing: ./setup.sh >> $LOG_DIR/setup.log"
|
||||||
@ -105,16 +104,37 @@ echo "executing: ./setup.sh >> $LOG_DIR/setup.log"
|
|||||||
|
|
||||||
# Delay to let boot_enr.yaml to be created
|
# Delay to let boot_enr.yaml to be created
|
||||||
execute_command_add_PID bootnode.log ./bootnode.sh
|
execute_command_add_PID bootnode.log ./bootnode.sh
|
||||||
sleeping 1
|
sleeping 3
|
||||||
|
|
||||||
|
execute_command_add_PID el_bootnode.log ./el_bootnode.sh
|
||||||
|
sleeping 3
|
||||||
|
|
||||||
# Start beacon nodes
|
# Start beacon nodes
|
||||||
BN_udp_tcp_base=9000
|
BN_udp_tcp_base=9000
|
||||||
BN_http_port_base=8000
|
BN_http_port_base=8000
|
||||||
|
|
||||||
|
EL_base_network=7000
|
||||||
|
EL_base_http=6000
|
||||||
|
EL_base_auth_http=5000
|
||||||
|
|
||||||
(( $VC_COUNT < $BN_COUNT )) && SAS=-s || SAS=
|
(( $VC_COUNT < $BN_COUNT )) && SAS=-s || SAS=
|
||||||
|
|
||||||
|
for (( el=1; el<=$BN_COUNT; el++ )); do
|
||||||
|
execute_command_add_PID geth_$el.log ./geth.sh $DATADIR/geth_datadir$el $((EL_base_network + $el)) $((EL_base_http + $el)) $((EL_base_auth_http + $el + 10)) $genesis_file
|
||||||
|
done
|
||||||
|
|
||||||
|
sleeping 20
|
||||||
|
|
||||||
|
# Reset the `genesis.json` config file fork times.
|
||||||
|
sed -i 's/"shanghaiTime".*$/"shanghaiTime": 0,/g' genesis.json
|
||||||
|
sed -i 's/"shardingForkTime".*$/"shardingForkTime": 0,/g' genesis.json
|
||||||
|
|
||||||
for (( bn=1; bn<=$BN_COUNT; bn++ )); do
|
for (( bn=1; bn<=$BN_COUNT; bn++ )); do
|
||||||
execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_http_port_base + $bn))
|
|
||||||
|
execute_command_add_PID json_snoop_$bn.log json_rpc_snoop -p $((EL_base_auth_http + $bn)) -b 0.0.0.0 http://localhost:$((EL_base_auth_http + $bn + 10))
|
||||||
|
secret=$DATADIR/geth_datadir$bn/geth/jwtsecret
|
||||||
|
echo $secret
|
||||||
|
execute_command_add_PID beacon_node_$bn.log ./beacon_node.sh $SAS -d $DEBUG_LEVEL $DATADIR/node_$bn $((BN_udp_tcp_base + $bn)) $((BN_http_port_base + $bn)) http://localhost:$((EL_base_auth_http + $bn)) $secret
|
||||||
done
|
done
|
||||||
|
|
||||||
# Start requested number of validator clients
|
# Start requested number of validator clients
|
||||||
|
4163
scripts/local_testnet/trusted_setup.txt
Normal file
4163
scripts/local_testnet/trusted_setup.txt
Normal file
File diff suppressed because it is too large
Load Diff
@ -30,4 +30,5 @@ exec lighthouse \
|
|||||||
--testnet-dir $TESTNET_DIR \
|
--testnet-dir $TESTNET_DIR \
|
||||||
--init-slashing-protection \
|
--init-slashing-protection \
|
||||||
--beacon-nodes ${@:$OPTIND+1:1} \
|
--beacon-nodes ${@:$OPTIND+1:1} \
|
||||||
|
--suggested-fee-recipient 0x690B9A9E9aa1C9dB991C7721a92d351Db4FaC990 \
|
||||||
$VC_ARGS
|
$VC_ARGS
|
||||||
|
@ -1,17 +1,21 @@
|
|||||||
|
GETH_BINARY=geth
|
||||||
|
|
||||||
# Base directories for the validator keys and secrets
|
# Base directories for the validator keys and secrets
|
||||||
DATADIR=~/.lighthouse/local-testnet
|
DATADIR=~/.lighthouse/local-testnet
|
||||||
|
|
||||||
# Directory for the eth2 config
|
# Directory for the eth2 config
|
||||||
TESTNET_DIR=$DATADIR/testnet
|
TESTNET_DIR=$DATADIR/testnet
|
||||||
|
|
||||||
# Mnemonic for the ganache test network
|
EL_BOOTNODE_ENODE="enode://51ea9bb34d31efc3491a842ed13b8cab70e753af108526b57916d716978b380ed713f4336a80cdb85ec2a115d5a8c0ae9f3247bed3c84d3cb025c6bab311062c@127.0.0.1:0?discport=30301"
|
||||||
ETH1_NETWORK_MNEMONIC="vast thought differ pull jewel broom cook wrist tribe word before omit"
|
|
||||||
|
|
||||||
# Hardcoded deposit contract based on ETH1_NETWORK_MNEMONIC
|
# Hardcoded deposit contract
|
||||||
DEPOSIT_CONTRACT_ADDRESS=8c594691c0e592ffa21f153a16ae41db5befcaaa
|
DEPOSIT_CONTRACT_ADDRESS=4242424242424242424242424242424242424242
|
||||||
|
|
||||||
GENESIS_FORK_VERSION=0x42424242
|
GENESIS_FORK_VERSION=0x42424242
|
||||||
|
|
||||||
|
# Block hash generated from genesis.json in directory
|
||||||
|
ETH1_BLOCK_HASH=16ef16304456fdacdeb272bd70207021031db355ed6c5e44ebd34c1ab757e221
|
||||||
|
|
||||||
VALIDATOR_COUNT=80
|
VALIDATOR_COUNT=80
|
||||||
GENESIS_VALIDATOR_COUNT=80
|
GENESIS_VALIDATOR_COUNT=80
|
||||||
|
|
||||||
@ -33,7 +37,12 @@ BOOTNODE_PORT=4242
|
|||||||
CHAIN_ID=4242
|
CHAIN_ID=4242
|
||||||
|
|
||||||
# Hard fork configuration
|
# Hard fork configuration
|
||||||
ALTAIR_FORK_EPOCH=18446744073709551615
|
ALTAIR_FORK_EPOCH=0
|
||||||
|
BELLATRIX_FORK_EPOCH=0
|
||||||
|
CAPELLA_FORK_EPOCH=1
|
||||||
|
EIP4844_FORK_EPOCH=2
|
||||||
|
|
||||||
|
TTD=0
|
||||||
|
|
||||||
# Spec version (mainnet or minimal)
|
# Spec version (mainnet or minimal)
|
||||||
SPEC_PRESET=mainnet
|
SPEC_PRESET=mainnet
|
||||||
|
@ -9,6 +9,8 @@ edition = "2021"
|
|||||||
ef_tests = []
|
ef_tests = []
|
||||||
milagro = ["bls/milagro"]
|
milagro = ["bls/milagro"]
|
||||||
fake_crypto = ["bls/fake_crypto"]
|
fake_crypto = ["bls/fake_crypto"]
|
||||||
|
withdrawals = ["state_processing/withdrawals", "store/withdrawals", "beacon_chain/withdrawals", "types/withdrawals", "execution_layer/withdrawals"]
|
||||||
|
withdrawals-processing = ["state_processing/withdrawals-processing", "store/withdrawals-processing", "beacon_chain/withdrawals-processing", "execution_layer/withdrawals-processing"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../../crypto/bls", default-features = false }
|
bls = { path = "../../crypto/bls", default-features = false }
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user