Merge pull request #3905 from emhane/beacon_chain_tests

Debug CI
This commit is contained in:
realbigsean 2023-02-15 11:53:41 -05:00 committed by GitHub
commit 87d1fbeb21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 785 additions and 606 deletions

1
.gitignore vendored
View File

@ -9,6 +9,7 @@ perf.data*
/bin /bin
genesis.ssz genesis.ssz
/clippy.toml /clippy.toml
/.cargo
# IntelliJ # IntelliJ
/*.iml /*.iml

648
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -94,7 +94,6 @@ resolver = "2"
[patch] [patch]
[patch.crates-io] [patch.crates-io]
fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" }
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
eth2_ssz = { path = "consensus/ssz" } eth2_ssz = { path = "consensus/ssz" }
eth2_ssz_derive = { path = "consensus/ssz_derive" } eth2_ssz_derive = { path = "consensus/ssz_derive" }

View File

@ -36,7 +36,7 @@ PROFILE ?= release
# List of all hard forks. This list is used to set env variables for several tests so that # List of all hard forks. This list is used to set env variables for several tests so that
# they run for different forks. # they run for different forks.
FORKS=phase0 altair merge capella FORKS=phase0 altair merge capella eip4844
# Builds the Lighthouse binary in release (optimized). # Builds the Lighthouse binary in release (optimized).
# #
@ -163,6 +163,7 @@ lint:
cargo clippy --workspace --tests -- \ cargo clippy --workspace --tests -- \
-D clippy::fn_to_numeric_cast_any \ -D clippy::fn_to_numeric_cast_any \
-D warnings \ -D warnings \
-A clippy::uninlined-format-args \
-A clippy::derive_partial_eq_without_eq \ -A clippy::derive_partial_eq_without_eq \
-A clippy::from-over-into \ -A clippy::from-over-into \
-A clippy::upper-case-acronyms \ -A clippy::upper-case-acronyms \
@ -193,7 +194,7 @@ arbitrary-fuzz:
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
audit: audit:
cargo install --force cargo-audit cargo install --force cargo-audit
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 cargo audit --ignore RUSTSEC-2020-0071
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
vendor: vendor:

View File

@ -68,6 +68,8 @@ hex = "0.4.2"
exit-future = "0.2.0" exit-future = "0.2.0"
unused_port = {path = "../../common/unused_port"} unused_port = {path = "../../common/unused_port"}
oneshot_broadcast = { path = "../../common/oneshot_broadcast" } oneshot_broadcast = { path = "../../common/oneshot_broadcast" }
slog-term = "2.6.0"
slog-async = "2.5.0"
[[test]] [[test]]
name = "beacon_chain_tests" name = "beacon_chain_tests"

View File

@ -42,7 +42,6 @@ pub enum Error {
// Boxed to avoid an infinite-size recursion issue. // Boxed to avoid an infinite-size recursion issue.
BeaconChain(Box<BeaconChainError>), BeaconChain(Box<BeaconChainError>),
MissingBeaconState(Hash256), MissingBeaconState(Hash256),
MissingBlobs,
FailedToTransitionState(StateAdvanceError), FailedToTransitionState(StateAdvanceError),
CannotAttestToFutureState { CannotAttestToFutureState {
state_slot: Slot, state_slot: Slot,

View File

@ -4579,7 +4579,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (payload, _, _) = block_contents let (payload, _, _) = block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)? .ok_or(BlockProductionError::MissingExecutionPayload)?
.deconstruct(); .deconstruct();
( (
BeaconBlock::Capella(BeaconBlockCapella { BeaconBlock::Capella(BeaconBlockCapella {
slot, slot,
@ -4610,7 +4609,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (payload, kzg_commitments, blobs) = block_contents let (payload, kzg_commitments, blobs) = block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)? .ok_or(BlockProductionError::MissingExecutionPayload)?
.deconstruct(); .deconstruct();
( (
BeaconBlock::Eip4844(BeaconBlockEip4844 { BeaconBlock::Eip4844(BeaconBlockEip4844 {
slot, slot,
@ -4694,8 +4692,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.as_ref() .as_ref()
.ok_or(BlockProductionError::TrustedSetupNotInitialized)?; .ok_or(BlockProductionError::TrustedSetupNotInitialized)?;
let kzg_aggregated_proof = let kzg_aggregated_proof =
kzg_utils::compute_aggregate_kzg_proof::<T::EthSpec>(&kzg, &blobs) kzg_utils::compute_aggregate_kzg_proof::<T::EthSpec>(kzg, &blobs)
.map_err(|e| BlockProductionError::KzgError(e))?; .map_err(BlockProductionError::KzgError)?;
let beacon_block_root = block.canonical_root(); let beacon_block_root = block.canonical_root();
let expected_kzg_commitments = block.body().blob_kzg_commitments().map_err(|_| { let expected_kzg_commitments = block.body().blob_kzg_commitments().map_err(|_| {
BlockProductionError::InvalidBlockVariant( BlockProductionError::InvalidBlockVariant(
@ -4709,7 +4707,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
kzg_aggregated_proof, kzg_aggregated_proof,
}; };
kzg_utils::validate_blobs_sidecar( kzg_utils::validate_blobs_sidecar(
&kzg, kzg,
slot, slot,
beacon_block_root, beacon_block_root,
expected_kzg_commitments, expected_kzg_commitments,
@ -5941,9 +5939,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// The epoch at which we require a data availability check in block processing. /// The epoch at which we require a data availability check in block processing.
/// `None` if the `Eip4844` fork is disabled. /// `None` if the `Eip4844` fork is disabled.
pub fn data_availability_boundary(&self) -> Option<Epoch> { pub fn data_availability_boundary(&self) -> Option<Epoch> {
self.spec self.spec.eip4844_fork_epoch.and_then(|fork_epoch| {
.eip4844_fork_epoch
.map(|fork_epoch| {
self.epoch().ok().map(|current_epoch| { self.epoch().ok().map(|current_epoch| {
std::cmp::max( std::cmp::max(
fork_epoch, fork_epoch,
@ -5951,7 +5947,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
) )
}) })
}) })
.flatten()
} }
/// The epoch that is a data availability boundary, or the latest finalized epoch. /// The epoch that is a data availability boundary, or the latest finalized epoch.

View File

@ -292,12 +292,12 @@ impl<E: EthSpec> AvailableBlock<E> {
let blobs_sidecar = beacon_block let blobs_sidecar = beacon_block
.reconstruct_empty_blobs(Some(block_root)) .reconstruct_empty_blobs(Some(block_root))
.map(Arc::new)?; .map(Arc::new)?;
return Ok(AvailableBlock(AvailableBlockInner::BlockAndBlob( Ok(AvailableBlock(AvailableBlockInner::BlockAndBlob(
SignedBeaconBlockAndBlobsSidecar { SignedBeaconBlockAndBlobsSidecar {
beacon_block, beacon_block,
blobs_sidecar, blobs_sidecar,
}, },
))); )))
} }
DataAvailabilityCheckRequired::No => { DataAvailabilityCheckRequired::No => {
Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block))) Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block)))
@ -391,6 +391,7 @@ pub trait AsBlock<E: EthSpec> {
fn message(&self) -> BeaconBlockRef<E>; fn message(&self) -> BeaconBlockRef<E>;
fn as_block(&self) -> &SignedBeaconBlock<E>; fn as_block(&self) -> &SignedBeaconBlock<E>;
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>>; fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>>;
fn canonical_root(&self) -> Hash256;
} }
impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> { impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
@ -432,8 +433,8 @@ impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
} }
fn as_block(&self) -> &SignedBeaconBlock<E> { fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self { match &self {
BlockWrapper::Block(block) => &block, BlockWrapper::Block(block) => block,
BlockWrapper::BlockAndBlob(block, _) => &block, BlockWrapper::BlockAndBlob(block, _) => block,
} }
} }
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> { fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
@ -442,6 +443,12 @@ impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
BlockWrapper::BlockAndBlob(block, _) => block.clone(), BlockWrapper::BlockAndBlob(block, _) => block.clone(),
} }
} }
fn canonical_root(&self) -> Hash256 {
match &self {
BlockWrapper::Block(block) => block.canonical_root(),
BlockWrapper::BlockAndBlob(block, _) => block.canonical_root(),
}
}
} }
impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> { impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> {
@ -483,8 +490,8 @@ impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> {
} }
fn as_block(&self) -> &SignedBeaconBlock<E> { fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self { match &self {
BlockWrapper::Block(block) => &block, BlockWrapper::Block(block) => block,
BlockWrapper::BlockAndBlob(block, _) => &block, BlockWrapper::BlockAndBlob(block, _) => block,
} }
} }
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> { fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
@ -493,6 +500,12 @@ impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> {
BlockWrapper::BlockAndBlob(block, _) => block.clone(), BlockWrapper::BlockAndBlob(block, _) => block.clone(),
} }
} }
fn canonical_root(&self) -> Hash256 {
match &self {
BlockWrapper::Block(block) => block.canonical_root(),
BlockWrapper::BlockAndBlob(block, _) => block.canonical_root(),
}
}
} }
impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> { impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
@ -546,7 +559,7 @@ impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
} }
fn as_block(&self) -> &SignedBeaconBlock<E> { fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self.0 { match &self.0 {
AvailableBlockInner::Block(block) => &block, AvailableBlockInner::Block(block) => block,
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => { AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
&block_sidecar_pair.beacon_block &block_sidecar_pair.beacon_block
} }
@ -560,4 +573,12 @@ impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
} }
} }
} }
fn canonical_root(&self) -> Hash256 {
match &self.0 {
AvailableBlockInner::Block(block) => block.canonical_root(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.canonical_root()
}
}
}
} }

View File

@ -1120,7 +1120,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
} }
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> { fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
&self.block.as_block() self.block.as_block()
} }
} }

View File

@ -1016,6 +1016,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::test_utils::EphemeralHarnessType;
use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
use eth2_hashing::hash; use eth2_hashing::hash;
use genesis::{ use genesis::{
@ -1030,6 +1031,7 @@ mod test {
use types::{EthSpec, MinimalEthSpec, Slot}; use types::{EthSpec, MinimalEthSpec, Slot};
type TestEthSpec = MinimalEthSpec; type TestEthSpec = MinimalEthSpec;
type Builder = BeaconChainBuilder<EphemeralHarnessType<TestEthSpec>>;
fn get_logger() -> Logger { fn get_logger() -> Logger {
let builder = NullLoggerBuilder; let builder = NullLoggerBuilder;
@ -1062,7 +1064,7 @@ mod test {
let (shutdown_tx, _) = futures::channel::mpsc::channel(1); let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let runtime = TestRuntime::default(); let runtime = TestRuntime::default();
let chain = BeaconChainBuilder::new(MinimalEthSpec) let chain = Builder::new(MinimalEthSpec)
.logger(log.clone()) .logger(log.clone())
.store(Arc::new(store)) .store(Arc::new(store))
.task_executor(runtime.task_executor.clone()) .task_executor(runtime.task_executor.clone())

View File

@ -165,8 +165,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
.read() .read()
.as_ref() .as_ref()
.filter(|item| item.beacon_block_root == block_root) .filter(|item| item.beacon_block_root == block_root)
.map(|item| item.blobs.clone()) .and_then(|item| item.blobs.clone())
.flatten()
} }
/// Returns the proto-array block, if `block_root` matches the cached item. /// Returns the proto-array block, if `block_root` matches the cached item.

View File

@ -40,7 +40,7 @@ pub fn compute_aggregate_kzg_proof<T: EthSpec>(
blobs: &[Blob<T>], blobs: &[Blob<T>],
) -> Result<KzgProof, KzgError> { ) -> Result<KzgProof, KzgError> {
let blobs = blobs let blobs = blobs
.into_iter() .iter()
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // TODO(pawan): avoid this clone .map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // TODO(pawan): avoid this clone
.collect::<Vec<_>>(); .collect::<Vec<_>>();

View File

@ -34,7 +34,9 @@ use rand::Rng;
use rand::SeedableRng; use rand::SeedableRng;
use rayon::prelude::*; use rayon::prelude::*;
use sensitive_url::SensitiveUrl; use sensitive_url::SensitiveUrl;
use slog::Logger; use slog::{o, Drain, Logger};
use slog_async::Async;
use slog_term::{FullFormat, TermDecorator};
use slot_clock::{SlotClock, TestingSlotClock}; use slot_clock::{SlotClock, TestingSlotClock};
use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::per_block_processing::compute_timestamp_at_slot;
use state_processing::{ use state_processing::{
@ -1924,8 +1926,9 @@ where
chain_dump chain_dump
.iter() .iter()
.cloned() .cloned()
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root)
.filter(|block_hash| *block_hash != Hash256::zero().into()) .filter(|block_hash| *block_hash != Hash256::zero())
.map(|hash| hash.into())
.collect() .collect()
} }
@ -2125,3 +2128,15 @@ impl<T: BeaconChainTypes> fmt::Debug for BeaconChainHarness<T> {
write!(f, "BeaconChainHarness") write!(f, "BeaconChainHarness")
} }
} }
pub fn build_log(level: slog::Level, enabled: bool) -> Logger {
let decorator = TermDecorator::new().build();
let drain = FullFormat::new(decorator).build().fuse();
let drain = Async::new(drain).build().fuse();
if enabled {
Logger::root(drain.filter_level(level).fuse(), o!())
} else {
Logger::root(drain.filter(|_| false).fuse(), o!())
}
}

View File

@ -299,7 +299,7 @@ mod test {
let ops = cache let ops = cache
.import_new_pubkeys(&state) .import_new_pubkeys(&state)
.expect("should import pubkeys"); .expect("should import pubkeys");
store.do_atomically(ops).unwrap(); store.do_atomically_with_block_and_blobs_cache(ops).unwrap();
check_cache_get(&cache, &keypairs[..]); check_cache_get(&cache, &keypairs[..]);
drop(cache); drop(cache);

View File

@ -1,6 +1,9 @@
#![cfg(not(debug_assertions))] #![cfg(not(debug_assertions))]
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::{
blob_verification::{BlockWrapper, IntoAvailableBlock},
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
};
use beacon_chain::{StateSkipConfig, WhenSlotSkipped}; use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use std::sync::Arc; use std::sync::Arc;
@ -131,6 +134,8 @@ async fn produces_attestations() {
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch"); assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
assert_eq!(data.target.root, target_root, "bad target root"); assert_eq!(data.target.root, target_root, "bad target root");
let block_wrapper: BlockWrapper<MainnetEthSpec> = Arc::new(block.clone()).into();
let early_attestation = { let early_attestation = {
let proto_block = chain let proto_block = chain
.canonical_head .canonical_head
@ -141,8 +146,9 @@ async fn produces_attestations() {
.early_attester_cache .early_attester_cache
.add_head_block( .add_head_block(
block_root, block_root,
Arc::new(block.clone()), block_wrapper
None, .into_available_block(block_root, chain)
.expect("should wrap into available block"),
proto_block, proto_block,
&state, &state,
&chain.spec, &chain.spec,
@ -193,13 +199,18 @@ async fn early_attester_cache_old_request() {
.get_block(&head.beacon_block_root) .get_block(&head.beacon_block_root)
.unwrap(); .unwrap();
let block: BlockWrapper<MainnetEthSpec> = head.beacon_block.clone().into();
let chain = &harness.chain;
harness harness
.chain .chain
.early_attester_cache .early_attester_cache
.add_head_block( .add_head_block(
head.beacon_block_root, head.beacon_block_root,
head.beacon_block.clone(), block
None, .clone()
.into_available_block(head.beacon_block_root, &chain)
.expect("should wrap into available block"),
head_proto_block, head_proto_block,
&head.beacon_state, &head.beacon_state,
&harness.chain.spec, &harness.chain.spec,

View File

@ -1,7 +1,8 @@
#![cfg(not(debug_assertions))] #![cfg(not(debug_assertions))]
use beacon_chain::test_utils::{ use beacon_chain::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, blob_verification::{AsBlock, BlockWrapper},
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
}; };
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer}; use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer};
use fork_choice::CountUnrealized; use fork_choice::CountUnrealized;
@ -80,7 +81,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
fn chain_segment_blocks(chain_segment: &[BeaconSnapshot<E>]) -> Vec<Arc<SignedBeaconBlock<E>>> { fn chain_segment_blocks(chain_segment: &[BeaconSnapshot<E>]) -> Vec<Arc<SignedBeaconBlock<E>>> {
chain_segment chain_segment
.iter() .iter()
.map(|snapshot| snapshot.beacon_block.clone()) .map(|snapshot| snapshot.beacon_block.clone().into())
.collect() .collect()
} }
@ -137,7 +138,10 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
async fn chain_segment_full_segment() { async fn chain_segment_full_segment() {
let harness = get_harness(VALIDATOR_COUNT); let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await; let chain_segment = get_chain_segment().await;
let blocks = chain_segment_blocks(&chain_segment); let blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
harness harness
.chain .chain
@ -177,7 +181,10 @@ async fn chain_segment_varying_chunk_size() {
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
let harness = get_harness(VALIDATOR_COUNT); let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await; let chain_segment = get_chain_segment().await;
let blocks = chain_segment_blocks(&chain_segment); let blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
harness harness
.chain .chain
@ -220,7 +227,10 @@ async fn chain_segment_non_linear_parent_roots() {
/* /*
* Test with a block removed. * Test with a block removed.
*/ */
let mut blocks = chain_segment_blocks(&chain_segment); let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
blocks.remove(2); blocks.remove(2);
assert!( assert!(
@ -238,10 +248,14 @@ async fn chain_segment_non_linear_parent_roots() {
/* /*
* Test with a modified parent root. * Test with a modified parent root.
*/ */
let mut blocks = chain_segment_blocks(&chain_segment); let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); .into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.parent_root_mut() = Hash256::zero(); *block.parent_root_mut() = Hash256::zero();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
assert!( assert!(
matches!( matches!(
@ -269,10 +283,13 @@ async fn chain_segment_non_linear_slots() {
* Test where a child is lower than the parent. * Test where a child is lower than the parent.
*/ */
let mut blocks = chain_segment_blocks(&chain_segment); let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); .into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = Slot::new(0); *block.slot_mut() = Slot::new(0);
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
assert!( assert!(
matches!( matches!(
@ -290,10 +307,13 @@ async fn chain_segment_non_linear_slots() {
* Test where a child is equal to the parent. * Test where a child is equal to the parent.
*/ */
let mut blocks = chain_segment_blocks(&chain_segment); let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct(); .into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = blocks[2].slot(); *block.slot_mut() = blocks[2].slot();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)); blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
assert!( assert!(
matches!( matches!(
@ -315,9 +335,9 @@ async fn assert_invalid_signature(
snapshots: &[BeaconSnapshot<E>], snapshots: &[BeaconSnapshot<E>],
item: &str, item: &str,
) { ) {
let blocks = snapshots let blocks: Vec<BlockWrapper<E>> = snapshots
.iter() .iter()
.map(|snapshot| snapshot.beacon_block.clone()) .map(|snapshot| snapshot.beacon_block.clone().into())
.collect(); .collect();
// Ensure the block will be rejected if imported in a chain segment. // Ensure the block will be rejected if imported in a chain segment.
@ -341,7 +361,7 @@ async fn assert_invalid_signature(
let ancestor_blocks = chain_segment let ancestor_blocks = chain_segment
.iter() .iter()
.take(block_index) .take(block_index)
.map(|snapshot| snapshot.beacon_block.clone()) .map(|snapshot| snapshot.beacon_block.clone().into())
.collect(); .collect();
// We don't care if this fails, we just call this to ensure that all prior blocks have been // We don't care if this fails, we just call this to ensure that all prior blocks have been
// imported prior to this test. // imported prior to this test.
@ -409,7 +429,7 @@ async fn invalid_signature_gossip_block() {
let ancestor_blocks = chain_segment let ancestor_blocks = chain_segment
.iter() .iter()
.take(block_index) .take(block_index)
.map(|snapshot| snapshot.beacon_block.clone()) .map(|snapshot| snapshot.beacon_block.clone().into())
.collect(); .collect();
harness harness
.chain .chain
@ -455,9 +475,9 @@ async fn invalid_signature_block_proposal() {
block.clone(), block.clone(),
junk_signature(), junk_signature(),
)); ));
let blocks = snapshots let blocks: Vec<BlockWrapper<E>> = snapshots
.iter() .iter()
.map(|snapshot| snapshot.beacon_block.clone()) .map(|snapshot| snapshot.beacon_block.clone().into())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Ensure the block will be rejected if imported in a chain segment. // Ensure the block will be rejected if imported in a chain segment.
assert!( assert!(
@ -654,9 +674,9 @@ async fn invalid_signature_deposit() {
Arc::new(SignedBeaconBlock::from_block(block, signature)); Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots); update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness); update_proposal_signatures(&mut snapshots, &harness);
let blocks = snapshots let blocks: Vec<BlockWrapper<E>> = snapshots
.iter() .iter()
.map(|snapshot| snapshot.beacon_block.clone()) .map(|snapshot| snapshot.beacon_block.clone().into())
.collect(); .collect();
assert!( assert!(
!matches!( !matches!(
@ -733,7 +753,7 @@ async fn block_gossip_verification() {
for snapshot in &chain_segment[0..block_index] { for snapshot in &chain_segment[0..block_index] {
let gossip_verified = harness let gossip_verified = harness
.chain .chain
.verify_block_for_gossip(snapshot.beacon_block.clone()) .verify_block_for_gossip(snapshot.beacon_block.clone().into())
.await .await
.expect("should obtain gossip verified block"); .expect("should obtain gossip verified block");
@ -771,7 +791,7 @@ async fn block_gossip_verification() {
*block.slot_mut() = expected_block_slot; *block.slot_mut() = expected_block_slot;
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::FutureSlot { BlockError::FutureSlot {
present_slot, present_slot,
block_slot, block_slot,
@ -805,7 +825,7 @@ async fn block_gossip_verification() {
*block.slot_mut() = expected_finalized_slot; *block.slot_mut() = expected_finalized_slot;
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::WouldRevertFinalizedSlot { BlockError::WouldRevertFinalizedSlot {
block_slot, block_slot,
finalized_slot, finalized_slot,
@ -835,10 +855,9 @@ async fn block_gossip_verification() {
unwrap_err( unwrap_err(
harness harness
.chain .chain
.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block( .verify_block_for_gossip(
block, Arc::new(SignedBeaconBlock::from_block(block, junk_signature())).into()
junk_signature() )
)))
.await .await
), ),
BlockError::ProposalSignatureInvalid BlockError::ProposalSignatureInvalid
@ -863,7 +882,7 @@ async fn block_gossip_verification() {
*block.parent_root_mut() = parent_root; *block.parent_root_mut() = parent_root;
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::ParentUnknown(block) BlockError::ParentUnknown(block)
if block.parent_root() == parent_root if block.parent_root() == parent_root
), ),
@ -889,7 +908,7 @@ async fn block_gossip_verification() {
*block.parent_root_mut() = parent_root; *block.parent_root_mut() = parent_root;
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::NotFinalizedDescendant { block_parent_root } BlockError::NotFinalizedDescendant { block_parent_root }
if block_parent_root == parent_root if block_parent_root == parent_root
), ),
@ -927,7 +946,7 @@ async fn block_gossip_verification() {
); );
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
BlockError::IncorrectBlockProposer { BlockError::IncorrectBlockProposer {
block, block,
local_shuffling, local_shuffling,
@ -939,7 +958,7 @@ async fn block_gossip_verification() {
// Check to ensure that we registered this is a valid block from this proposer. // Check to ensure that we registered this is a valid block from this proposer.
assert!( assert!(
matches!( matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await), unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
BlockError::RepeatProposal { BlockError::RepeatProposal {
proposer, proposer,
slot, slot,
@ -951,7 +970,11 @@ async fn block_gossip_verification() {
let block = chain_segment[block_index].beacon_block.clone(); let block = chain_segment[block_index].beacon_block.clone();
assert!( assert!(
harness.chain.verify_block_for_gossip(block).await.is_ok(), harness
.chain
.verify_block_for_gossip(block.into())
.await
.is_ok(),
"the valid block should be processed" "the valid block should be processed"
); );
@ -969,7 +992,7 @@ async fn block_gossip_verification() {
matches!( matches!(
harness harness
.chain .chain
.verify_block_for_gossip(block.clone()) .verify_block_for_gossip(block.clone().into())
.await .await
.err() .err()
.expect("should error when processing known block"), .expect("should error when processing known block"),
@ -1006,7 +1029,7 @@ async fn verify_block_for_gossip_slashing_detection() {
let verified_block = harness let verified_block = harness
.chain .chain
.verify_block_for_gossip(Arc::new(block1)) .verify_block_for_gossip(Arc::new(block1).into())
.await .await
.unwrap(); .unwrap();
harness harness
@ -1022,7 +1045,7 @@ async fn verify_block_for_gossip_slashing_detection() {
unwrap_err( unwrap_err(
harness harness
.chain .chain
.verify_block_for_gossip(Arc::new(block2)) .verify_block_for_gossip(Arc::new(block2).into())
.await, .await,
); );
@ -1045,7 +1068,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
let verified_block = harness let verified_block = harness
.chain .chain
.verify_block_for_gossip(Arc::new(block)) .verify_block_for_gossip(Arc::new(block).into())
.await .await
.unwrap(); .unwrap();
let attestations = verified_block.block.message().body().attestations().clone(); let attestations = verified_block.block.message().body().attestations().clone();
@ -1184,7 +1207,7 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.verify_block_for_gossip(Arc::new(base_block.clone())) .verify_block_for_gossip(Arc::new(base_block.clone()).into())
.await .await
.err() .err()
.expect("should error when processing base block"), .expect("should error when processing base block"),
@ -1218,7 +1241,7 @@ async fn add_base_block_to_altair_chain() {
harness harness
.chain .chain
.process_chain_segment( .process_chain_segment(
vec![Arc::new(base_block)], vec![Arc::new(base_block).into()],
CountUnrealized::True, CountUnrealized::True,
NotifyExecutionLayer::Yes, NotifyExecutionLayer::Yes,
) )
@ -1322,7 +1345,7 @@ async fn add_altair_block_to_base_chain() {
assert!(matches!( assert!(matches!(
harness harness
.chain .chain
.verify_block_for_gossip(Arc::new(altair_block.clone())) .verify_block_for_gossip(Arc::new(altair_block.clone()).into())
.await .await
.err() .err()
.expect("should error when processing altair block"), .expect("should error when processing altair block"),
@ -1356,7 +1379,7 @@ async fn add_altair_block_to_base_chain() {
harness harness
.chain .chain
.process_chain_segment( .process_chain_segment(
vec![Arc::new(altair_block)], vec![Arc::new(altair_block).into()],
CountUnrealized::True, CountUnrealized::True,
NotifyExecutionLayer::Yes NotifyExecutionLayer::Yes
) )

View File

@ -31,7 +31,15 @@ fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
let cold_path = db_path.path().join("cold_db"); let cold_path = db_path.path().join("cold_db");
let config = StoreConfig::default(); let config = StoreConfig::default();
let log = NullLoggerBuilder.build().expect("logger should build"); let log = NullLoggerBuilder.build().expect("logger should build");
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log) HotColdDB::open(
&hot_path,
&cold_path,
None,
|_, _, _| Ok(()),
config,
spec,
log,
)
.expect("disk store should initialize") .expect("disk store should initialize")
} }

View File

@ -1040,7 +1040,7 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for gossip. // Ensure the block built atop an invalid payload is invalid for gossip.
assert!(matches!( assert!(matches!(
rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await, rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root }) Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root if invalid_root == parent_root
)); ));

View File

@ -11,7 +11,9 @@ use beacon_chain::{
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer, BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
ServerSentEventHandler, WhenSlotSkipped, ServerSentEventHandler, WhenSlotSkipped,
}; };
use eth2_network_config::TRUSTED_SETUP;
use fork_choice::CountUnrealized; use fork_choice::CountUnrealized;
use kzg::TrustedSetup;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use logging::test_logger; use logging::test_logger;
use maplit::hashset; use maplit::hashset;
@ -57,7 +59,15 @@ fn get_store_with_spec(
let config = StoreConfig::default(); let config = StoreConfig::default();
let log = test_logger(); let log = test_logger();
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log) HotColdDB::open(
&hot_path,
&cold_path,
None,
|_, _, _| Ok(()),
config,
spec,
log,
)
.expect("disk store should initialize") .expect("disk store should initialize")
} }
@ -65,7 +75,7 @@ fn get_harness(
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
validator_count: usize, validator_count: usize,
) -> TestHarness { ) -> TestHarness {
let harness = BeaconChainHarness::builder(MinimalEthSpec) let harness = TestHarness::builder(MinimalEthSpec)
.default_spec() .default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec()) .keypairs(KEYPAIRS[0..validator_count].to_vec())
.fresh_disk_store(store) .fresh_disk_store(store)
@ -565,7 +575,7 @@ async fn delete_blocks_and_states() {
let store = get_store(&db_path); let store = get_store(&db_path);
let validators_keypairs = let validators_keypairs =
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
let harness = BeaconChainHarness::builder(MinimalEthSpec) let harness = TestHarness::builder(MinimalEthSpec)
.default_spec() .default_spec()
.keypairs(validators_keypairs) .keypairs(validators_keypairs)
.fresh_disk_store(store.clone()) .fresh_disk_store(store.clone())
@ -695,7 +705,7 @@ async fn multi_epoch_fork_valid_blocks_test(
let store = get_store(&db_path); let store = get_store(&db_path);
let validators_keypairs = let validators_keypairs =
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT); types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
let harness = BeaconChainHarness::builder(MinimalEthSpec) let harness = TestHarness::builder(MinimalEthSpec)
.default_spec() .default_spec()
.keypairs(validators_keypairs) .keypairs(validators_keypairs)
.fresh_disk_store(store) .fresh_disk_store(store)
@ -2101,10 +2111,13 @@ async fn weak_subjectivity_sync() {
let store = get_store(&temp2); let store = get_store(&temp2);
let spec = test_spec::<E>(); let spec = test_spec::<E>();
let seconds_per_slot = spec.seconds_per_slot; let seconds_per_slot = spec.seconds_per_slot;
let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP)
.map_err(|e| println!("Unable to read trusted setup file: {}", e))
.unwrap();
// Initialise a new beacon chain from the finalized checkpoint // Initialise a new beacon chain from the finalized checkpoint
let beacon_chain = Arc::new( let beacon_chain = Arc::new(
BeaconChainBuilder::new(MinimalEthSpec) BeaconChainBuilder::<DiskHarnessType<E>>::new(MinimalEthSpec)
.store(store.clone()) .store(store.clone())
.custom_spec(test_spec::<E>()) .custom_spec(test_spec::<E>())
.task_executor(harness.chain.task_executor.clone()) .task_executor(harness.chain.task_executor.clone())
@ -2123,6 +2136,7 @@ async fn weak_subjectivity_sync() {
1, 1,
))) )))
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
.trusted_setup(trusted_setup)
.build() .build()
.expect("should build"), .expect("should build"),
); );
@ -2300,7 +2314,7 @@ async fn finalizes_after_resuming_from_db() {
let original_chain = harness.chain; let original_chain = harness.chain;
let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec) let resumed_harness = BeaconChainHarness::<DiskHarnessType<E>>::builder(MinimalEthSpec)
.default_spec() .default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec()) .keypairs(KEYPAIRS[0..validator_count].to_vec())
.resumed_disk_store(store) .resumed_disk_store(store)
@ -2476,7 +2490,7 @@ async fn revert_minority_fork_on_resume() {
drop(harness1); drop(harness1);
let resume_store = get_store_with_spec(&db_path1, spec2.clone()); let resume_store = get_store_with_spec(&db_path1, spec2.clone());
let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec) let resumed_harness = TestHarness::builder(MinimalEthSpec)
.spec(spec2) .spec(spec2)
.keypairs(KEYPAIRS[0..validator_count].to_vec()) .keypairs(KEYPAIRS[0..validator_count].to_vec())
.resumed_disk_store(resume_store) .resumed_disk_store(resume_store)

View File

@ -133,6 +133,7 @@ mod test {
nonce: Hash64::zero(), nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(), base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None, withdrawals_root: None,
excess_data_gas: None,
}; };
let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b"; let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b";
let expected_hash = let expected_hash =
@ -161,6 +162,7 @@ mod test {
nonce: Hash64::zero(), nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(), base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None, withdrawals_root: None,
excess_data_gas: None,
}; };
let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b"; let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b";
let expected_hash = let expected_hash =
@ -190,6 +192,7 @@ mod test {
nonce: Hash64::zero(), nonce: Hash64::zero(),
base_fee_per_gas: 0x34187b238_u64.into(), base_fee_per_gas: 0x34187b238_u64.into(),
withdrawals_root: None, withdrawals_root: None,
excess_data_gas: None,
}; };
let expected_hash = let expected_hash =
Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351") Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351")

View File

@ -2,7 +2,7 @@ use crate::engines::ForkchoiceState;
use crate::http::{ use crate::http::{
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1,
ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2,
ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3,
}; };
use crate::BlobTxConversionError; use crate::BlobTxConversionError;
pub use ethers_core::types::Transaction; pub use ethers_core::types::Transaction;
@ -414,6 +414,9 @@ impl EngineCapabilities {
if self.new_payload_v2 { if self.new_payload_v2 {
response.push(ENGINE_NEW_PAYLOAD_V2); response.push(ENGINE_NEW_PAYLOAD_V2);
} }
if self.new_payload_v3 {
response.push(ENGINE_NEW_PAYLOAD_V3);
}
if self.forkchoice_updated_v1 { if self.forkchoice_updated_v1 {
response.push(ENGINE_FORKCHOICE_UPDATED_V1); response.push(ENGINE_FORKCHOICE_UPDATED_V1);
} }
@ -426,6 +429,9 @@ impl EngineCapabilities {
if self.get_payload_v2 { if self.get_payload_v2 {
response.push(ENGINE_GET_PAYLOAD_V2); response.push(ENGINE_GET_PAYLOAD_V2);
} }
if self.get_payload_v3 {
response.push(ENGINE_GET_PAYLOAD_V3);
}
if self.exchange_transition_configuration_v1 { if self.exchange_transition_configuration_v1 {
response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1); response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1);
} }

View File

@ -63,8 +63,10 @@ pub const METHOD_NOT_FOUND_CODE: i64 = -32601;
pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V1,
ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V2,
ENGINE_NEW_PAYLOAD_V3,
ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V1,
ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V2,
ENGINE_GET_PAYLOAD_V3,
ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V1,
ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V2,
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1,

View File

@ -2,13 +2,12 @@ use super::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use strum::EnumString; use strum::EnumString;
use superstruct::superstruct; use superstruct::superstruct;
use types::blobs_sidecar::KzgCommitments;
use types::{ use types::{
Blob, EthSpec, ExecutionBlockHash, FixedVector, KzgCommitment, Transaction, Unsigned, Blobs, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
ExecutionPayloadEip4844, ExecutionPayloadMerge, FixedVector, Transaction, Unsigned,
VariableList, Withdrawal, VariableList, Withdrawal,
}; };
use types::{
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
};
#[derive(Debug, PartialEq, Serialize, Deserialize)] #[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
@ -418,9 +417,9 @@ impl From<JsonPayloadAttributes> for PayloadAttributes {
#[serde(bound = "T: EthSpec", rename_all = "camelCase")] #[serde(bound = "T: EthSpec", rename_all = "camelCase")]
pub struct JsonBlobsBundle<T: EthSpec> { pub struct JsonBlobsBundle<T: EthSpec> {
pub block_hash: ExecutionBlockHash, pub block_hash: ExecutionBlockHash,
pub kzgs: VariableList<KzgCommitment, T::MaxBlobsPerBlock>, pub kzgs: KzgCommitments<T>,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>, pub blobs: Blobs<T>,
} }
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]

View File

@ -43,16 +43,15 @@ use tokio_stream::wrappers::WatchStream;
use types::consts::eip4844::BLOB_TX_TYPE; use types::consts::eip4844::BLOB_TX_TYPE;
use types::transaction::{AccessTuple, BlobTransaction, EcdsaSignature, SignedBlobTransaction}; use types::transaction::{AccessTuple, BlobTransaction, EcdsaSignature, SignedBlobTransaction};
use types::{ use types::{
AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment, VersionedHash, blobs_sidecar::{Blobs, KzgCommitments},
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
}; };
use types::{AbstractExecPayload, BeaconStateError, ExecPayload, VersionedHash};
use types::{ use types::{
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName, BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Transaction, ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Transaction,
Uint256, Uint256,
}; };
use types::{
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
};
mod block_hash; mod block_hash;
mod engine_api; mod engine_api;
@ -135,19 +134,13 @@ pub enum BlockProposalContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
PayloadAndBlobs { PayloadAndBlobs {
payload: Payload, payload: Payload,
block_value: Uint256, block_value: Uint256,
kzg_commitments: VariableList<KzgCommitment, T::MaxBlobsPerBlock>, kzg_commitments: KzgCommitments<T>,
blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>, blobs: Blobs<T>,
}, },
} }
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Payload> { impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Payload> {
pub fn deconstruct( pub fn deconstruct(self) -> (Payload, Option<KzgCommitments<T>>, Option<Blobs<T>>) {
self,
) -> (
Payload,
Option<VariableList<KzgCommitment, T::MaxBlobsPerBlock>>,
Option<VariableList<Blob<T>, T::MaxBlobsPerBlock>>,
) {
match self { match self {
Self::Payload { Self::Payload {
payload, payload,
@ -2167,7 +2160,7 @@ fn ethers_tx_to_bytes<T: EthSpec>(
.ok_or(BlobTxConversionError::BlobVersionedHashesMissing)? .ok_or(BlobTxConversionError::BlobVersionedHashesMissing)?
.as_array() .as_array()
.ok_or(BlobTxConversionError::BlobVersionedHashesMissing)? .ok_or(BlobTxConversionError::BlobVersionedHashesMissing)?
.into_iter() .iter()
.map(|versioned_hash| { .map(|versioned_hash| {
let hash_bytes = eth2_serde_utils::hex::decode( let hash_bytes = eth2_serde_utils::hex::decode(
versioned_hash versioned_hash

View File

@ -152,7 +152,7 @@ pub async fn handle_rpc<T: EthSpec>(
ForkName::Eip4844 => { ForkName::Eip4844 => {
if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 { if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 {
return Err(( return Err((
format!("{} called after capella fork!", method), format!("{} called after eip4844 fork!", method),
GENERIC_ERROR_CODE, GENERIC_ERROR_CODE,
)); ));
} }

View File

@ -227,7 +227,7 @@ impl BlockId {
"Blob with block root {} is not in the store", "Blob with block root {} is not in the store",
root root
))), ))),
Err(e) => Err(warp_utils::reject::beacon_chain_error(e.into())), Err(e) => Err(warp_utils::reject::beacon_chain_error(e)),
} }
} }
} }

View File

@ -46,9 +46,9 @@ pub async fn publish_block<T: BeaconChainTypes>(
block_and_blobs.into() block_and_blobs.into()
} else { } else {
//FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required //FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required
return Err(warp_utils::reject::broadcast_without_import(format!( return Err(warp_utils::reject::broadcast_without_import(
"no blob cached for block" "no blob cached for block".into(),
))); ));
} }
} else { } else {
crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?; crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?;

View File

@ -1,7 +1,5 @@
use beacon_chain::{ use beacon_chain::{
test_utils::{ test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType},
BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType,
},
BeaconChain, BeaconChainTypes, BeaconChain, BeaconChainTypes,
}; };
use directory::DEFAULT_ROOT_DIR; use directory::DEFAULT_ROOT_DIR;
@ -57,9 +55,8 @@ pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
pub external_peer_id: PeerId, pub external_peer_id: PeerId,
} }
type Initializer<E> = Box< type HarnessBuilder<E> = Builder<EphemeralHarnessType<E>>;
dyn FnOnce(HarnessBuilder<EphemeralHarnessType<E>>) -> HarnessBuilder<EphemeralHarnessType<E>>, type Initializer<E> = Box<dyn FnOnce(HarnessBuilder<E>) -> HarnessBuilder<E>>;
>;
type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>; type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>;
impl<E: EthSpec> InteractiveTester<E> { impl<E: EthSpec> InteractiveTester<E> {

View File

@ -408,15 +408,12 @@ impl ProtocolId {
/// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the /// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the
/// beginning of the stream, else returns `false`. /// beginning of the stream, else returns `false`.
pub fn has_context_bytes(&self) -> bool { pub fn has_context_bytes(&self) -> bool {
match self.version { match self.message_name {
Version::V2 => match self.message_name { Protocol::BlocksByRange | Protocol::BlocksByRoot => {
Protocol::BlocksByRange | Protocol::BlocksByRoot => return true, !matches!(self.version, Version::V1)
_ => return false, }
}, Protocol::BlobsByRange | Protocol::BlobsByRoot | Protocol::LightClientBootstrap => true,
Version::V1 => match self.message_name { Protocol::Goodbye | Protocol::Ping | Protocol::Status | Protocol::MetaData => false,
Protocol::BlobsByRange | Protocol::BlobsByRoot => return true,
_ => return false,
},
} }
} }
} }

View File

@ -243,7 +243,7 @@ impl TestRig {
pub fn enqueue_rpc_block(&self) { pub fn enqueue_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block( let event = WorkEvent::rpc_beacon_block(
self.next_block.canonical_root(), self.next_block.canonical_root(),
self.next_block.clone(), self.next_block.clone().into(),
std::time::Duration::default(), std::time::Duration::default(),
BlockProcessType::ParentLookup { BlockProcessType::ParentLookup {
chain_hash: Hash256::random(), chain_hash: Hash256::random(),
@ -255,7 +255,7 @@ impl TestRig {
pub fn enqueue_single_lookup_rpc_block(&self) { pub fn enqueue_single_lookup_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block( let event = WorkEvent::rpc_beacon_block(
self.next_block.canonical_root(), self.next_block.canonical_root(),
self.next_block.clone(), self.next_block.clone().into(),
std::time::Duration::default(), std::time::Duration::default(),
BlockProcessType::SingleBlock { id: 1 }, BlockProcessType::SingleBlock { id: 1 },
); );

View File

@ -3,14 +3,16 @@
mod tests { mod tests {
use crate::persisted_dht::load_dht; use crate::persisted_dht::load_dht;
use crate::{NetworkConfig, NetworkService}; use crate::{NetworkConfig, NetworkService};
use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::test_utils::EphemeralHarnessType;
use lighthouse_network::Enr; use lighthouse_network::Enr;
use slog::{o, Drain, Level, Logger}; use slog::{o, Drain, Level, Logger};
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use types::MinimalEthSpec; use types::MinimalEthSpec as E;
type BeaconChainHarness = beacon_chain::test_utils::BeaconChainHarness<EphemeralHarnessType<E>>;
fn get_logger(actual_log: bool) -> Logger { fn get_logger(actual_log: bool) -> Logger {
if actual_log { if actual_log {
@ -34,7 +36,7 @@ mod tests {
fn test_dht_persistence() { fn test_dht_persistence() {
let log = get_logger(false); let log = get_logger(false);
let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec) let beacon_chain = BeaconChainHarness::builder(E)
.default_spec() .default_spec()
.deterministic_keypairs(8) .deterministic_keypairs(8)
.fresh_ephemeral_store() .fresh_ephemeral_store()

View File

@ -204,8 +204,10 @@ impl<const MAX_ATTEMPTS: u8> slog::Value for SingleBlockRequest<MAX_ATTEMPTS> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::{
use types::{MinimalEthSpec as E, SignedBeaconBlock}; test_utils::{SeedableRng, TestRandom, XorShiftRng},
MinimalEthSpec as E, SignedBeaconBlock,
};
fn rand_block() -> SignedBeaconBlock<E> { fn rand_block() -> SignedBeaconBlock<E> {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);

View File

@ -6,17 +6,23 @@ use crate::NetworkMessage;
use super::*; use super::*;
use beacon_chain::builder::Witness; use beacon_chain::{
use beacon_chain::eth1_chain::CachingEth1Backend; builder::Witness,
eth1_chain::CachingEth1Backend,
test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType},
};
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
use lighthouse_network::{NetworkGlobals, Request}; use lighthouse_network::{NetworkGlobals, Request};
use slog::{Drain, Level}; use slot_clock::TestingSlotClock;
use slot_clock::SystemTimeSlotClock; use std::time::Duration;
use store::MemoryStore; use store::MemoryStore;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::{
use types::MinimalEthSpec as E; test_utils::{SeedableRng, TestRandom, XorShiftRng},
MinimalEthSpec as E, SignedBeaconBlock,
};
type T = Witness<SystemTimeSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>; type T = Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
struct TestRig { struct TestRig {
beacon_processor_rx: mpsc::Receiver<WorkEvent<T>>, beacon_processor_rx: mpsc::Receiver<WorkEvent<T>>,
@ -27,18 +33,18 @@ struct TestRig {
const D: Duration = Duration::new(0, 0); const D: Duration = Duration::new(0, 0);
impl TestRig { impl TestRig {
fn test_setup(log_level: Option<Level>) -> (BlockLookups<T>, SyncNetworkContext<T>, Self) { fn test_setup(enable_log: bool) -> (BlockLookups<T>, SyncNetworkContext<T>, Self) {
let log = { let log = build_log(slog::Level::Debug, enable_log);
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
if let Some(log_level) = log_level { // Initialise a new beacon chain
slog::Logger::root(drain.filter_level(log_level).fuse(), slog::o!()) let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
} else { .default_spec()
slog::Logger::root(drain.filter(|_| false).fuse(), slog::o!()) .logger(log.clone())
} .deterministic_keypairs(1)
}; .fresh_ephemeral_store()
.build();
let chain = harness.chain;
let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(100); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(100);
let (network_tx, network_rx) = mpsc::unbounded_channel(); let (network_tx, network_rx) = mpsc::unbounded_channel();
@ -147,7 +153,7 @@ impl TestRig {
#[test] #[test]
fn test_single_block_lookup_happy_path() { fn test_single_block_lookup_happy_path() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block = rig.rand_block(); let block = rig.rand_block();
let peer_id = PeerId::random(); let peer_id = PeerId::random();
@ -175,7 +181,7 @@ fn test_single_block_lookup_happy_path() {
#[test] #[test]
fn test_single_block_lookup_empty_response() { fn test_single_block_lookup_empty_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block_hash = Hash256::random(); let block_hash = Hash256::random();
let peer_id = PeerId::random(); let peer_id = PeerId::random();
@ -193,7 +199,7 @@ fn test_single_block_lookup_empty_response() {
#[test] #[test]
fn test_single_block_lookup_wrong_response() { fn test_single_block_lookup_wrong_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block_hash = Hash256::random(); let block_hash = Hash256::random();
let peer_id = PeerId::random(); let peer_id = PeerId::random();
@ -215,7 +221,7 @@ fn test_single_block_lookup_wrong_response() {
#[test] #[test]
fn test_single_block_lookup_failure() { fn test_single_block_lookup_failure() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block_hash = Hash256::random(); let block_hash = Hash256::random();
let peer_id = PeerId::random(); let peer_id = PeerId::random();
@ -232,7 +238,7 @@ fn test_single_block_lookup_failure() {
#[test] #[test]
fn test_single_block_lookup_becomes_parent_request() { fn test_single_block_lookup_becomes_parent_request() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block = rig.rand_block(); let block = rig.rand_block();
let peer_id = PeerId::random(); let peer_id = PeerId::random();
@ -261,7 +267,7 @@ fn test_single_block_lookup_becomes_parent_request() {
#[test] #[test]
fn test_parent_lookup_happy_path() { fn test_parent_lookup_happy_path() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -289,7 +295,7 @@ fn test_parent_lookup_happy_path() {
#[test] #[test]
fn test_parent_lookup_wrong_response() { fn test_parent_lookup_wrong_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -326,7 +332,7 @@ fn test_parent_lookup_wrong_response() {
#[test] #[test]
fn test_parent_lookup_empty_response() { fn test_parent_lookup_empty_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -358,7 +364,7 @@ fn test_parent_lookup_empty_response() {
#[test] #[test]
fn test_parent_lookup_rpc_failure() { fn test_parent_lookup_rpc_failure() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -370,7 +376,15 @@ fn test_parent_lookup_rpc_failure() {
let id1 = rig.expect_parent_request(); let id1 = rig.expect_parent_request();
// The request fails. It should be tried again. // The request fails. It should be tried again.
bl.parent_lookup_failed(id1, peer_id, &mut cx); bl.parent_lookup_failed(
id1,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
let id2 = rig.expect_parent_request(); let id2 = rig.expect_parent_request();
// Send the right block this time. // Send the right block this time.
@ -389,7 +403,7 @@ fn test_parent_lookup_rpc_failure() {
#[test] #[test]
fn test_parent_lookup_too_many_attempts() { fn test_parent_lookup_too_many_attempts() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -404,7 +418,15 @@ fn test_parent_lookup_too_many_attempts() {
// make sure every error is accounted for // make sure every error is accounted for
0 => { 0 => {
// The request fails. It should be tried again. // The request fails. It should be tried again.
bl.parent_lookup_failed(id, peer_id, &mut cx); bl.parent_lookup_failed(
id,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
} }
_ => { _ => {
// Send a bad block this time. It should be tried again. // Send a bad block this time. It should be tried again.
@ -425,7 +447,7 @@ fn test_parent_lookup_too_many_attempts() {
#[test] #[test]
fn test_parent_lookup_too_many_download_attempts_no_blacklist() { fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -439,7 +461,15 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
if i % 2 != 0 { if i % 2 != 0 {
// The request fails. It should be tried again. // The request fails. It should be tried again.
bl.parent_lookup_failed(id, peer_id, &mut cx); bl.parent_lookup_failed(
id,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
} else { } else {
// Send a bad block this time. It should be tried again. // Send a bad block this time. It should be tried again.
let bad_block = rig.rand_block(); let bad_block = rig.rand_block();
@ -459,7 +489,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
#[test] #[test]
fn test_parent_lookup_too_many_processing_attempts_must_blacklist() { fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1; const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1;
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = Arc::new(rig.rand_block()); let parent = Arc::new(rig.rand_block());
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -473,7 +503,15 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) { for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) {
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
// The request fails. It should be tried again. // The request fails. It should be tried again.
bl.parent_lookup_failed(id, peer_id, &mut cx); bl.parent_lookup_failed(
id,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
} }
// Now fail processing a block in the parent request // Now fail processing a block in the parent request
@ -493,7 +531,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
#[test] #[test]
fn test_parent_lookup_too_deep() { fn test_parent_lookup_too_deep() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let mut blocks = let mut blocks =
Vec::<SignedBeaconBlock<E>>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE); Vec::<SignedBeaconBlock<E>>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE);
while blocks.len() < parent_lookup::PARENT_DEPTH_TOLERANCE { while blocks.len() < parent_lookup::PARENT_DEPTH_TOLERANCE {
@ -532,7 +570,7 @@ fn test_parent_lookup_too_deep() {
#[test] #[test]
fn test_parent_lookup_disconnection() { fn test_parent_lookup_disconnection() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let peer_id = PeerId::random(); let peer_id = PeerId::random();
let trigger_block = rig.rand_block(); let trigger_block = rig.rand_block();
bl.search_parent( bl.search_parent(
@ -547,7 +585,7 @@ fn test_parent_lookup_disconnection() {
#[test] #[test]
fn test_single_block_lookup_ignored_response() { fn test_single_block_lookup_ignored_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block = rig.rand_block(); let block = rig.rand_block();
let peer_id = PeerId::random(); let peer_id = PeerId::random();
@ -576,7 +614,7 @@ fn test_single_block_lookup_ignored_response() {
#[test] #[test]
fn test_parent_lookup_ignored_response() { fn test_parent_lookup_ignored_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None); let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block(); let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root()); let block = rig.block_with_parent(parent.canonical_root());
@ -601,7 +639,7 @@ fn test_parent_lookup_ignored_response() {
/// This is a regression test. /// This is a regression test.
#[test] #[test]
fn test_same_chain_race_condition() { fn test_same_chain_race_condition() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug)); let (mut bl, mut cx, mut rig) = TestRig::test_setup(true);
#[track_caller] #[track_caller]
fn parent_lookups_consistency(bl: &BlockLookups<T>) { fn parent_lookups_consistency(bl: &BlockLookups<T>) {
@ -636,12 +674,12 @@ fn test_same_chain_race_condition() {
let peer_id = PeerId::random(); let peer_id = PeerId::random();
let trigger_block = blocks.pop().unwrap(); let trigger_block = blocks.pop().unwrap();
let chain_hash = trigger_block.canonical_root(); let chain_hash = trigger_block.canonical_root();
bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx); bl.search_parent(chain_hash, trigger_block.clone().into(), peer_id, &mut cx);
for (i, block) in blocks.into_iter().rev().enumerate() { for (i, block) in blocks.into_iter().rev().enumerate() {
let id = rig.expect_parent_request(); let id = rig.expect_parent_request();
// the block // the block
bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx); bl.parent_lookup_response(id, peer_id, Some(block.clone().into()), D, &mut cx);
// the stream termination // the stream termination
bl.parent_lookup_response(id, peer_id, None, D, &mut cx); bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
// the processing request // the processing request
@ -651,7 +689,11 @@ fn test_same_chain_race_condition() {
// one block was removed // one block was removed
bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx) bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx)
} else { } else {
bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx) bl.parent_block_processed(
chain_hash,
BlockError::ParentUnknown(block.into()).into(),
&mut cx,
)
} }
parent_lookups_consistency(&bl) parent_lookups_consistency(&bl)
} }
@ -661,7 +703,7 @@ fn test_same_chain_race_condition() {
// Try to get this block again while the chain is being processed. We should not request it again. // Try to get this block again while the chain is being processed. We should not request it again.
let peer_id = PeerId::random(); let peer_id = PeerId::random();
bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx); bl.search_parent(chain_hash, trigger_block.into(), peer_id, &mut cx);
parent_lookups_consistency(&bl); parent_lookups_consistency(&bl);
let process_result = BatchProcessResult::Success { let process_result = BatchProcessResult::Success {

View File

@ -803,15 +803,15 @@ impl<T: BeaconChainTypes> SyncManager<T> {
peer_id: PeerId, peer_id: PeerId,
block_or_blob: BlockOrBlobs<T::EthSpec>, block_or_blob: BlockOrBlobs<T::EthSpec>,
) { ) {
if let Some((chain_id, batch_id, block_responses)) = self if let Some((chain_id, resp)) = self
.network .network
.range_sync_block_and_blob_response(id, block_or_blob) .range_sync_block_and_blob_response(id, block_or_blob)
{ {
match block_responses { match resp.responses {
Ok(blocks) => { Ok(blocks) => {
for block in blocks for block in blocks
.into_iter() .into_iter()
.map(|block| Some(block)) .map(Some)
// chain the stream terminator // chain the stream terminator
.chain(vec![None]) .chain(vec![None])
{ {
@ -819,7 +819,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
&mut self.network, &mut self.network,
peer_id, peer_id,
chain_id, chain_id,
batch_id, resp.batch_id,
id, id,
block, block,
); );
@ -831,7 +831,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// With time we will want to downgrade this log // With time we will want to downgrade this log
warn!( warn!(
self.log, "Blocks and blobs request for range received invalid data"; self.log, "Blocks and blobs request for range received invalid data";
"peer_id" => %peer_id, "batch_id" => batch_id, "error" => e "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
); );
// TODO: penalize the peer for being a bad boy // TODO: penalize the peer for being a bad boy
let id = RequestId::RangeBlobs { id }; let id = RequestId::RangeBlobs { id };
@ -849,21 +849,21 @@ impl<T: BeaconChainTypes> SyncManager<T> {
peer_id: PeerId, peer_id: PeerId,
block_or_blob: BlockOrBlobs<T::EthSpec>, block_or_blob: BlockOrBlobs<T::EthSpec>,
) { ) {
if let Some((batch_id, block_responses)) = self if let Some(resp) = self
.network .network
.backfill_sync_block_and_blob_response(id, block_or_blob) .backfill_sync_block_and_blob_response(id, block_or_blob)
{ {
match block_responses { match resp.responses {
Ok(blocks) => { Ok(blocks) => {
for block in blocks for block in blocks
.into_iter() .into_iter()
.map(|block| Some(block)) .map(Some)
// chain the stream terminator // chain the stream terminator
.chain(vec![None]) .chain(vec![None])
{ {
match self.backfill_sync.on_block_response( match self.backfill_sync.on_block_response(
&mut self.network, &mut self.network,
batch_id, resp.batch_id,
&peer_id, &peer_id,
id, id,
block, block,
@ -883,7 +883,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// With time we will want to downgrade this log // With time we will want to downgrade this log
warn!( warn!(
self.log, "Blocks and blobs request for backfill received invalid data"; self.log, "Blocks and blobs request for backfill received invalid data";
"peer_id" => %peer_id, "batch_id" => batch_id, "error" => e "peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
); );
// TODO: penalize the peer for being a bad boy // TODO: penalize the peer for being a bad boy
let id = RequestId::BackFillBlobs { id }; let id = RequestId::BackFillBlobs { id };

View File

@ -20,6 +20,17 @@ use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock}; use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
pub struct BlocksAndBlobsByRangeResponse<T: EthSpec> {
pub batch_id: BatchId,
pub responses: Result<Vec<BlockWrapper<T>>, &'static str>,
}
pub struct BlocksAndBlobsByRangeRequest<T: EthSpec> {
pub chain_id: ChainId,
pub batch_id: BatchId,
pub block_blob_info: BlocksAndBlobsRequestInfo<T>,
}
/// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id. /// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id.
pub struct SyncNetworkContext<T: BeaconChainTypes> { pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// The network channel to relay messages to the Network service. /// The network channel to relay messages to the Network service.
@ -38,8 +49,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
backfill_requests: FnvHashMap<Id, BatchId>, backfill_requests: FnvHashMap<Id, BatchId>,
/// BlocksByRange requests paired with BlobsByRange requests made by the range. /// BlocksByRange requests paired with BlobsByRange requests made by the range.
range_blocks_and_blobs_requests: range_blocks_and_blobs_requests: FnvHashMap<Id, BlocksAndBlobsByRangeRequest<T::EthSpec>>,
FnvHashMap<Id, (ChainId, BatchId, BlocksAndBlobsRequestInfo<T::EthSpec>)>,
/// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync. /// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync.
backfill_blocks_and_blobs_requests: backfill_blocks_and_blobs_requests:
@ -198,8 +208,14 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
request_id, request_id,
})?; })?;
let block_blob_info = BlocksAndBlobsRequestInfo::default(); let block_blob_info = BlocksAndBlobsRequestInfo::default();
self.range_blocks_and_blobs_requests self.range_blocks_and_blobs_requests.insert(
.insert(id, (chain_id, batch_id, block_blob_info)); id,
BlocksAndBlobsByRangeRequest {
chain_id,
batch_id,
block_blob_info,
},
);
Ok(id) Ok(id)
} }
} }
@ -290,22 +306,30 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
&mut self, &mut self,
request_id: Id, request_id: Id,
block_or_blob: BlockOrBlobs<T::EthSpec>, block_or_blob: BlockOrBlobs<T::EthSpec>,
) -> Option<( ) -> Option<(ChainId, BlocksAndBlobsByRangeResponse<T::EthSpec>)> {
ChainId,
BatchId,
Result<Vec<BlockWrapper<T::EthSpec>>, &'static str>,
)> {
match self.range_blocks_and_blobs_requests.entry(request_id) { match self.range_blocks_and_blobs_requests.entry(request_id) {
Entry::Occupied(mut entry) => { Entry::Occupied(mut entry) => {
let (_, _, info) = entry.get_mut(); let req = entry.get_mut();
let info = &mut req.block_blob_info;
match block_or_blob { match block_or_blob {
BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block), BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar), BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
} }
if info.is_finished() { if info.is_finished() {
// If the request is finished, dequeue everything // If the request is finished, dequeue everything
let (chain_id, batch_id, info) = entry.remove(); let BlocksAndBlobsByRangeRequest {
Some((chain_id, batch_id, info.into_responses())) chain_id,
batch_id,
block_blob_info,
} = entry.remove();
Some((
chain_id,
BlocksAndBlobsByRangeResponse {
batch_id,
responses: block_blob_info.into_responses(),
},
))
} else { } else {
None None
} }
@ -323,7 +347,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
ByRangeRequestType::BlocksAndBlobs => self ByRangeRequestType::BlocksAndBlobs => self
.range_blocks_and_blobs_requests .range_blocks_and_blobs_requests
.remove(&request_id) .remove(&request_id)
.map(|(chain_id, batch_id, _info)| (chain_id, batch_id)), .map(|req| (req.chain_id, req.batch_id)),
ByRangeRequestType::Blocks => self.range_requests.remove(&request_id), ByRangeRequestType::Blocks => self.range_requests.remove(&request_id),
} }
} }
@ -349,20 +373,19 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
is_stream_terminator: bool, is_stream_terminator: bool,
) -> Option<BatchId> { ) -> Option<BatchId> {
if is_stream_terminator { if is_stream_terminator {
self.backfill_requests self.backfill_requests.remove(&request_id)
.remove(&request_id)
.map(|batch_id| batch_id)
} else { } else {
self.backfill_requests.get(&request_id).copied() self.backfill_requests.get(&request_id).copied()
} }
} }
/// Received a blocks by range response for a request that couples blocks and blobs. /// Received a blocks by range or blobs by range response for a request that couples blocks '
/// and blobs.
pub fn backfill_sync_block_and_blob_response( pub fn backfill_sync_block_and_blob_response(
&mut self, &mut self,
request_id: Id, request_id: Id,
block_or_blob: BlockOrBlobs<T::EthSpec>, block_or_blob: BlockOrBlobs<T::EthSpec>,
) -> Option<(BatchId, Result<Vec<BlockWrapper<T::EthSpec>>, &'static str>)> { ) -> Option<BlocksAndBlobsByRangeResponse<T::EthSpec>> {
match self.backfill_blocks_and_blobs_requests.entry(request_id) { match self.backfill_blocks_and_blobs_requests.entry(request_id) {
Entry::Occupied(mut entry) => { Entry::Occupied(mut entry) => {
let (_, info) = entry.get_mut(); let (_, info) = entry.get_mut();
@ -373,7 +396,10 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
if info.is_finished() { if info.is_finished() {
// If the request is finished, dequeue everything // If the request is finished, dequeue everything
let (batch_id, info) = entry.remove(); let (batch_id, info) = entry.remove();
Some((batch_id, info.into_responses())) Some(BlocksAndBlobsByRangeResponse {
batch_id,
responses: info.into_responses(),
})
} else { } else {
None None
} }
@ -534,16 +560,20 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
/// Check whether a batch for this epoch (and only this epoch) should request just blocks or /// Check whether a batch for this epoch (and only this epoch) should request just blocks or
/// blocks and blobs. /// blocks and blobs.
#[allow(unused)]
pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType { pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType {
// Induces a compile time panic if this doesn't hold true.
#[allow(clippy::assertions_on_constants)]
const _: () = assert!( const _: () = assert!(
super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1 super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1
&& super::range_sync::EPOCHS_PER_BATCH == 1, && super::range_sync::EPOCHS_PER_BATCH == 1,
"To deal with alignment with 4844 boundaries, batches need to be of just one epoch" "To deal with alignment with 4844 boundaries, batches need to be of just one epoch"
); );
#[cfg(test)] #[cfg(test)]
{ {
// Keep tests only for blocks. // Keep tests only for blocks.
return ByRangeRequestType::Blocks; ByRangeRequestType::Blocks
} }
#[cfg(not(test))] #[cfg(not(test))]
{ {

View File

@ -615,6 +615,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
/// ///
/// If a previous batch has been validated and it had been re-processed, penalize the original /// If a previous batch has been validated and it had been re-processed, penalize the original
/// peer. /// peer.
#[allow(clippy::modulo_one)]
fn advance_chain(&mut self, network: &mut SyncNetworkContext<T>, validating_epoch: Epoch) { fn advance_chain(&mut self, network: &mut SyncNetworkContext<T>, validating_epoch: Epoch) {
// make sure this epoch produces an advancement // make sure this epoch produces an advancement
if validating_epoch <= self.start_epoch { if validating_epoch <= self.start_epoch {

View File

@ -372,26 +372,27 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::service::RequestId;
use crate::sync::range_sync::ByRangeRequestType;
use crate::NetworkMessage;
use super::*; use super::*;
use crate::beacon_processor::WorkEvent as BeaconWorkEvent;
use beacon_chain::builder::Witness;
use beacon_chain::eth1_chain::CachingEth1Backend;
use beacon_chain::parking_lot::RwLock;
use beacon_chain::EngineState;
use lighthouse_network::rpc::BlocksByRangeRequest;
use lighthouse_network::Request;
use lighthouse_network::{rpc::StatusMessage, NetworkGlobals};
use slog::{o, Drain};
use tokio::sync::mpsc;
use slot_clock::SystemTimeSlotClock; use crate::beacon_processor::WorkEvent as BeaconWorkEvent;
use std::collections::HashSet; use crate::service::RequestId;
use std::sync::Arc; use crate::NetworkMessage;
use beacon_chain::{
builder::Witness,
eth1_chain::CachingEth1Backend,
parking_lot::RwLock,
test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType},
EngineState,
};
use lighthouse_network::{
rpc::{BlocksByRangeRequest, StatusMessage},
NetworkGlobals, Request,
};
use slog::o;
use slot_clock::TestingSlotClock;
use std::{collections::HashSet, sync::Arc};
use store::MemoryStore; use store::MemoryStore;
use tokio::sync::mpsc;
use types::{Hash256, MinimalEthSpec as E}; use types::{Hash256, MinimalEthSpec as E};
#[derive(Debug)] #[derive(Debug)]
@ -439,19 +440,7 @@ mod tests {
} }
type TestBeaconChainType = type TestBeaconChainType =
Witness<SystemTimeSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>; Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
if enabled {
slog::Logger::root(drain.filter_level(level).fuse(), o!())
} else {
slog::Logger::root(drain.filter(|_| false).fuse(), o!())
}
}
#[allow(unused)] #[allow(unused)]
struct TestRig { struct TestRig {
@ -593,11 +582,20 @@ mod tests {
} }
fn range(log_enabled: bool) -> (TestRig, RangeSync<TestBeaconChainType, FakeStorage>) { fn range(log_enabled: bool) -> (TestRig, RangeSync<TestBeaconChainType, FakeStorage>) {
let chain = Arc::new(FakeStorage::default());
let log = build_log(slog::Level::Trace, log_enabled); let log = build_log(slog::Level::Trace, log_enabled);
// Initialise a new beacon chain
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.default_spec()
.logger(log.clone())
.deterministic_keypairs(1)
.fresh_ephemeral_store()
.build();
let chain = harness.chain;
let fake_store = Arc::new(FakeStorage::default());
let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10); let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10);
let range_sync = RangeSync::<TestBeaconChainType, FakeStorage>::new( let range_sync = RangeSync::<TestBeaconChainType, FakeStorage>::new(
chain.clone(), fake_store.clone(),
log.new(o!("component" => "range")), log.new(o!("component" => "range")),
); );
let (network_tx, network_rx) = mpsc::unbounded_channel(); let (network_tx, network_rx) = mpsc::unbounded_channel();
@ -612,7 +610,7 @@ mod tests {
let test_rig = TestRig { let test_rig = TestRig {
log, log,
beacon_processor_rx, beacon_processor_rx,
chain, chain: fake_store,
cx, cx,
network_rx, network_rx,
globals, globals,
@ -687,7 +685,7 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, peer1, head_info); range.add_peer(&mut rig.cx, local_info, peer1, head_info);
let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 { let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_response(id, true).unwrap(), id) (rig.cx.range_sync_block_response(id, true).unwrap(), id)
} }
other => panic!("unexpected request {:?}", other), other => panic!("unexpected request {:?}", other),
}; };
@ -706,7 +704,7 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info); range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 { let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => { RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_response(id, true).unwrap(), id) (rig.cx.range_sync_block_response(id, true).unwrap(), id)
} }
other => panic!("unexpected request {:?}", other), other => panic!("unexpected request {:?}", other),
}; };

View File

@ -1907,14 +1907,13 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
let margin_epochs = self.get_config().blob_prune_margin_epochs; let margin_epochs = self.get_config().blob_prune_margin_epochs;
let end_epoch = earliest_prunable_epoch - margin_epochs; let end_epoch = earliest_prunable_epoch - margin_epochs;
if !force { if !force
if last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune && last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune
> end_epoch.as_u64() > end_epoch.as_u64()
{ {
info!(self.log, "Blobs sidecars are pruned"); info!(self.log, "Blobs sidecars are pruned");
return Ok(()); return Ok(());
} }
}
// Iterate block roots forwards from the oldest blob slot. // Iterate block roots forwards from the oldest blob slot.
debug!( debug!(

View File

@ -1,6 +1,6 @@
# Extends the mainnet preset # Extends the mainnet preset
PRESET_BASE: 'mainnet' PRESET_BASE: 'mainnet'
CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis CONFIG_NAME: 'eip4844' # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis and needs to match configuration in common_eth2_config/src/lib.rs to pass lh ci.
# Genesis # Genesis
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -362,11 +362,12 @@ mod tests {
let base_dir = temp_dir.path().join("my_testnet"); let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_deploy_block = 42; let deposit_contract_deploy_block = 42;
let testnet: Eth2NetworkConfig = Eth2NetworkConfig { let testnet = Eth2NetworkConfig {
deposit_contract_deploy_block, deposit_contract_deploy_block,
boot_enr, boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes), genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
config, config,
kzg_trusted_setup: None,
}; };
testnet testnet

View File

@ -7,8 +7,8 @@ mod system_time_slot_clock;
use std::time::Duration; use std::time::Duration;
pub use crate::manual_slot_clock::ManualSlotClock;
pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock; pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock;
pub use crate::manual_slot_clock::ManualSlotClock;
pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use crate::system_time_slot_clock::SystemTimeSlotClock;
pub use metrics::scrape_for_metrics; pub use metrics::scrape_for_metrics;
use types::consts::merge::INTERVALS_PER_SLOT; use types::consts::merge::INTERVALS_PER_SLOT;

View File

@ -479,7 +479,7 @@ pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>
) -> Result<Container, DecodeError> { ) -> Result<Container, DecodeError> {
if bytes.is_empty() { if bytes.is_empty() {
return Container::try_from_iter(iter::empty()).map_err(|e| { return Container::try_from_iter(iter::empty()).map_err(|e| {
DecodeError::BytesInvalid(format!("Error trying to collect empty list: {:?}", e)) DecodeError::BytesInvalid(format!("Error trying to collect empty list: {e:?}"))
}); });
} }
@ -494,8 +494,7 @@ pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>
if max_len.map_or(false, |max| num_items > max) { if max_len.map_or(false, |max| num_items > max) {
return Err(DecodeError::BytesInvalid(format!( return Err(DecodeError::BytesInvalid(format!(
"Variable length list of {} items exceeds maximum of {:?}", "Variable length list of {num_items} items exceeds maximum of {max_len:?}",
num_items, max_len
))); )));
} }
@ -519,7 +518,7 @@ pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>
}), }),
|iter| iter.try_collect(), |iter| iter.try_collect(),
)? )?
.map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {:?}", e))) .map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {e:?}")))
} }
#[cfg(test)] #[cfg(test)]

View File

@ -49,7 +49,7 @@ pub fn verify_kzg_commitments_against_transactions<T: EthSpec>(
.flatten() .flatten()
// Need to use `itertools::zip_longest` here because just zipping hides if one iter is shorter // Need to use `itertools::zip_longest` here because just zipping hides if one iter is shorter
// and `itertools::zip_eq` panics. // and `itertools::zip_eq` panics.
.zip_longest(kzg_commitments.into_iter()) .zip_longest(kzg_commitments.iter())
.enumerate() .enumerate()
.map(|(index, next)| match next { .map(|(index, next)| match next {
EitherOrBoth::Both(hash, commitment) => Ok((hash?, commitment)), EitherOrBoth::Both(hash, commitment) => Ok((hash?, commitment)),

View File

@ -34,7 +34,7 @@ async fn get_harness<E: EthSpec>(
// Set the state and block to be in the last slot of the `epoch_offset`th epoch. // Set the state and block to be in the last slot of the `epoch_offset`th epoch.
let last_slot_of_epoch = let last_slot_of_epoch =
(MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch()); (MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch());
let harness = BeaconChainHarness::builder(E::default()) let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.default_spec() .default_spec()
.keypairs(KEYPAIRS[0..num_validators].to_vec()) .keypairs(KEYPAIRS[0..num_validators].to_vec())
.fresh_ephemeral_store() .fresh_ephemeral_store()

View File

@ -97,7 +97,7 @@ fn get_zero_hash(height: usize) -> &'static [u8] {
if height <= ZERO_HASHES_MAX_INDEX { if height <= ZERO_HASHES_MAX_INDEX {
&ZERO_HASHES[height] &ZERO_HASHES[height]
} else { } else {
panic!("Tree exceeds MAX_TREE_DEPTH of {}", ZERO_HASHES_MAX_INDEX) panic!("Tree exceeds MAX_TREE_DEPTH of {ZERO_HASHES_MAX_INDEX}")
} }
} }

View File

@ -9,7 +9,6 @@ name = "benches"
harness = false harness = false
[dependencies] [dependencies]
serde-big-array = {version = "0.3.2", features = ["const-generics"]}
merkle_proof = { path = "../../consensus/merkle_proof" } merkle_proof = { path = "../../consensus/merkle_proof" }
bls = { path = "../../crypto/bls", features = ["arbitrary"] } bls = { path = "../../crypto/bls", features = ["arbitrary"] }
kzg = { path = "../../crypto/kzg", features = ["arbitrary"] } kzg = { path = "../../crypto/kzg", features = ["arbitrary"] }
@ -39,10 +38,10 @@ cached_tree_hash = { path = "../cached_tree_hash" }
serde_yaml = "0.8.13" serde_yaml = "0.8.13"
tempfile = "3.1.0" tempfile = "3.1.0"
derivative = "2.1.1" derivative = "2.1.1"
rusqlite = { version = "0.25.3", features = ["bundled"], optional = true }
# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by # The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by
# `AbstractExecPayload` # `AbstractExecPayload`
arbitrary = { version = "1.0", features = ["derive"] } arbitrary = { version = "1.0", features = ["derive"] }
rusqlite = { version = "0.28.0", features = ["bundled"], optional = true }
eth2_serde_utils = "0.1.1" eth2_serde_utils = "0.1.1"
regex = "1.5.5" regex = "1.5.5"
lazy_static = "1.4.0" lazy_static = "1.4.0"

View File

@ -1,5 +1,5 @@
use crate::test_utils::TestRandom;
use crate::*; use crate::*;
use crate::{blobs_sidecar::KzgCommitments, test_utils::TestRandom};
use derivative::Derivative; use derivative::Derivative;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
@ -69,7 +69,7 @@ pub struct BeaconBlockBody<T: EthSpec, Payload: AbstractExecPayload<T> = FullPay
pub bls_to_execution_changes: pub bls_to_execution_changes:
VariableList<SignedBlsToExecutionChange, T::MaxBlsToExecutionChanges>, VariableList<SignedBlsToExecutionChange, T::MaxBlsToExecutionChanges>,
#[superstruct(only(Eip4844))] #[superstruct(only(Eip4844))]
pub blob_kzg_commitments: VariableList<KzgCommitment, T::MaxBlobsPerBlock>, pub blob_kzg_commitments: KzgCommitments<T>,
#[superstruct(only(Base, Altair))] #[superstruct(only(Base, Altair))]
#[ssz(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)] #[tree_hash(skip_hashing)]

View File

@ -1,5 +1,5 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot}; use crate::{Blob, EthSpec, Hash256, KzgCommitment, SignedRoot, Slot};
use derivative::Derivative; use derivative::Derivative;
use kzg::KzgProof; use kzg::KzgProof;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
@ -9,6 +9,9 @@ use ssz_types::VariableList;
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::TreeHash; use tree_hash_derive::TreeHash;
pub type KzgCommitments<T> = VariableList<KzgCommitment, <T as EthSpec>::MaxBlobsPerBlock>;
pub type Blobs<T> = VariableList<Blob<T>, <T as EthSpec>::MaxBlobsPerBlock>;
#[derive( #[derive(
Debug, Debug,
Clone, Clone,
@ -29,7 +32,7 @@ pub struct BlobsSidecar<T: EthSpec> {
pub beacon_block_root: Hash256, pub beacon_block_root: Hash256,
pub beacon_block_slot: Slot, pub beacon_block_slot: Slot,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>, pub blobs: Blobs<T>,
pub kzg_aggregated_proof: KzgProof, pub kzg_aggregated_proof: KzgProof,
} }

View File

@ -121,7 +121,7 @@ pub use crate::beacon_block_body::{
pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_block_header::BeaconBlockHeader;
pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee};
pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *};
pub use crate::blobs_sidecar::BlobsSidecar; pub use crate::blobs_sidecar::{Blobs, BlobsSidecar, KzgCommitments};
pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::bls_to_execution_change::BlsToExecutionChange;
pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::chain_spec::{ChainSpec, Config, Domain};
pub use crate::checkpoint::Checkpoint; pub use crate::checkpoint::Checkpoint;

View File

@ -265,7 +265,7 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
.map_err(|_| BlobReconstructionError::InconsistentFork)?; .map_err(|_| BlobReconstructionError::InconsistentFork)?;
if kzg_commitments.is_empty() { if kzg_commitments.is_empty() {
Ok(BlobsSidecar::empty_from_parts( Ok(BlobsSidecar::empty_from_parts(
block_root_opt.unwrap_or(self.canonical_root()), block_root_opt.unwrap_or_else(|| self.canonical_root()),
self.slot(), self.slot(),
)) ))
} else { } else {

View File

@ -11,13 +11,11 @@ eth2_ssz = "0.4.1"
eth2_ssz_derive = "0.3.1" eth2_ssz_derive = "0.3.1"
tree_hash = "0.4.1" tree_hash = "0.4.1"
derivative = "2.1.1" derivative = "2.1.1"
rand = "0.7.3"
serde = "1.0.116" serde = "1.0.116"
serde_derive = "1.0.116" serde_derive = "1.0.116"
eth2_serde_utils = "0.1.1" eth2_serde_utils = "0.1.1"
hex = "0.4.2" hex = "0.4.2"
eth2_hashing = "0.3.0" eth2_hashing = "0.3.0"
ethereum-types = "0.12.1"
c-kzg = {git = "https://github.com/ethereum/c-kzg-4844", rev = "69f6155d7524247be9d3f54ab3bfbe33a0345622" } c-kzg = {git = "https://github.com/ethereum/c-kzg-4844", rev = "69f6155d7524247be9d3f54ab3bfbe33a0345622" }
arbitrary = { version = "1.0", features = ["derive"], optional = true } arbitrary = { version = "1.0", features = ["derive"], optional = true }

View File

@ -99,7 +99,7 @@ impl FromStr for KzgCommitment {
impl Debug for KzgCommitment { impl Debug for KzgCommitment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
} }
} }

View File

@ -123,7 +123,7 @@ impl FromStr for KzgProof {
impl Debug for KzgProof { impl Debug for KzgProof {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", eth2_serde_utils::hex::encode(&self.0)) write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
} }
} }

View File

@ -72,7 +72,7 @@ impl Kzg {
)); ));
} }
let commitments = expected_kzg_commitments let commitments = expected_kzg_commitments
.into_iter() .iter()
.map(|comm| comm.0.into()) .map(|comm| comm.0.into())
.collect::<Vec<c_kzg::KZGCommitment>>(); .collect::<Vec<c_kzg::KZGCommitment>>();
let proof: c_kzg::KZGProof = kzg_aggregated_proof.0.into(); let proof: c_kzg::KZGProof = kzg_aggregated_proof.0.into();

View File

@ -215,7 +215,7 @@ fn initialize_state_with_validators<T: EthSpec>(
// Seed RANDAO with Eth1 entropy // Seed RANDAO with Eth1 entropy
state.fill_randao_mixes_with(eth1_block_hash); state.fill_randao_mixes_with(eth1_block_hash);
for keypair in keypairs.into_iter() { for keypair in keypairs.iter() {
let withdrawal_credentials = |pubkey: &PublicKey| { let withdrawal_credentials = |pubkey: &PublicKey| {
let mut credentials = hash(&pubkey.as_ssz_bytes()); let mut credentials = hash(&pubkey.as_ssz_bytes());
credentials[0] = spec.bls_withdrawal_prefix_byte; credentials[0] = spec.bls_withdrawal_prefix_byte;

View File

@ -1357,27 +1357,27 @@ fn prune_blobs_on_startup_false() {
fn epochs_per_blob_prune_default() { fn epochs_per_blob_prune_default() {
CommandLineTest::new() CommandLineTest::new()
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| assert!(config.epochs_per_blob_prune == 1)); .with_config(|config| assert!(config.store.epochs_per_blob_prune == 1));
} }
#[test] #[test]
fn epochs_per_blob_prune_on_startup_five() { fn epochs_per_blob_prune_on_startup_five() {
CommandLineTest::new() CommandLineTest::new()
.flag("epochs-per-blob-prune", Some(5)) .flag("epochs-per-blob-prune", Some("5"))
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| assert!(!config.epochs_per_blob_prune == 5)); .with_config(|config| assert!(config.store.epochs_per_blob_prune == 5));
} }
#[test] #[test]
fn blob_prune_margin_epochs_default() { fn blob_prune_margin_epochs_default() {
CommandLineTest::new() CommandLineTest::new()
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| assert!(config.blob_prune_margin_epochs == 0)); .with_config(|config| assert!(config.store.blob_prune_margin_epochs == 0));
} }
#[test] #[test]
fn blob_prune_margin_epochs_on_startup_ten() { fn blob_prune_margin_epochs_on_startup_ten() {
CommandLineTest::new() CommandLineTest::new()
.flag("blob-prune-margin-epochs", Some(10)) .flag("blob-prune-margin-epochs", Some("10"))
.run_with_zero_port() .run_with_zero_port()
.with_config(|config| assert!(!config.blob_prune_margin_epochs == Some(10))); .with_config(|config| assert!(config.store.blob_prune_margin_epochs == 10));
} }
#[test] #[test]
fn reconstruct_historic_states_flag() { fn reconstruct_historic_states_flag() {

View File

@ -306,7 +306,7 @@ impl<E: EthSpec> Tester<E> {
)); ));
} }
let harness = BeaconChainHarness::builder(E::default()) let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.spec(spec.clone()) .spec(spec.clone())
.keypairs(vec![]) .keypairs(vec![])
.genesis_state_ephemeral_store(case.anchor_state.clone()) .genesis_state_ephemeral_store(case.anchor_state.clone())

View File

@ -12,9 +12,9 @@ path = "tests/main.rs"
[dependencies] [dependencies]
tempfile = "3.1.0" tempfile = "3.1.0"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
rusqlite = { version = "0.25.3", features = ["bundled"] } rusqlite = { version = "0.28.0", features = ["bundled"] }
r2d2 = "0.8.9" r2d2 = "0.8.9"
r2d2_sqlite = "0.18.0" r2d2_sqlite = "0.21.0"
serde = "1.0.116" serde = "1.0.116"
serde_derive = "1.0.116" serde_derive = "1.0.116"
serde_json = "1.0.58" serde_json = "1.0.58"

View File

@ -162,8 +162,8 @@ impl SlashingDatabase {
/// The exclusive locking mode also has the benefit of applying to other processes, so multiple /// The exclusive locking mode also has the benefit of applying to other processes, so multiple
/// Lighthouse processes trying to access the same database will also be blocked. /// Lighthouse processes trying to access the same database will also be blocked.
fn apply_pragmas(conn: &mut rusqlite::Connection) -> Result<(), rusqlite::Error> { fn apply_pragmas(conn: &mut rusqlite::Connection) -> Result<(), rusqlite::Error> {
conn.pragma_update(None, "foreign_keys", &true)?; conn.pragma_update(None, "foreign_keys", true)?;
conn.pragma_update(None, "locking_mode", &"EXCLUSIVE")?; conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?;
Ok(()) Ok(())
} }