Merge pull request #3905 from emhane/beacon_chain_tests

Debug CI
This commit is contained in:
realbigsean 2023-02-15 11:53:41 -05:00 committed by GitHub
commit 87d1fbeb21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 785 additions and 606 deletions

1
.gitignore vendored
View File

@ -9,6 +9,7 @@ perf.data*
/bin
genesis.ssz
/clippy.toml
/.cargo
# IntelliJ
/*.iml

648
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -94,7 +94,6 @@ resolver = "2"
[patch]
[patch.crates-io]
fixed-hash = { git = "https://github.com/paritytech/parity-common", rev="df638ab0885293d21d656dc300d39236b69ce57d" }
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
eth2_ssz = { path = "consensus/ssz" }
eth2_ssz_derive = { path = "consensus/ssz_derive" }

View File

@ -36,7 +36,7 @@ PROFILE ?= release
# List of all hard forks. This list is used to set env variables for several tests so that
# they run for different forks.
FORKS=phase0 altair merge capella
FORKS=phase0 altair merge capella eip4844
# Builds the Lighthouse binary in release (optimized).
#
@ -163,6 +163,7 @@ lint:
cargo clippy --workspace --tests -- \
-D clippy::fn_to_numeric_cast_any \
-D warnings \
-A clippy::uninlined-format-args \
-A clippy::derive_partial_eq_without_eq \
-A clippy::from-over-into \
-A clippy::upper-case-acronyms \
@ -193,7 +194,7 @@ arbitrary-fuzz:
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
audit:
cargo install --force cargo-audit
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159
cargo audit --ignore RUSTSEC-2020-0071
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
vendor:

View File

@ -68,6 +68,8 @@ hex = "0.4.2"
exit-future = "0.2.0"
unused_port = {path = "../../common/unused_port"}
oneshot_broadcast = { path = "../../common/oneshot_broadcast" }
slog-term = "2.6.0"
slog-async = "2.5.0"
[[test]]
name = "beacon_chain_tests"

View File

@ -42,7 +42,6 @@ pub enum Error {
// Boxed to avoid an infinite-size recursion issue.
BeaconChain(Box<BeaconChainError>),
MissingBeaconState(Hash256),
MissingBlobs,
FailedToTransitionState(StateAdvanceError),
CannotAttestToFutureState {
state_slot: Slot,

View File

@ -4579,7 +4579,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (payload, _, _) = block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)?
.deconstruct();
(
BeaconBlock::Capella(BeaconBlockCapella {
slot,
@ -4610,7 +4609,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let (payload, kzg_commitments, blobs) = block_contents
.ok_or(BlockProductionError::MissingExecutionPayload)?
.deconstruct();
(
BeaconBlock::Eip4844(BeaconBlockEip4844 {
slot,
@ -4694,8 +4692,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.as_ref()
.ok_or(BlockProductionError::TrustedSetupNotInitialized)?;
let kzg_aggregated_proof =
kzg_utils::compute_aggregate_kzg_proof::<T::EthSpec>(&kzg, &blobs)
.map_err(|e| BlockProductionError::KzgError(e))?;
kzg_utils::compute_aggregate_kzg_proof::<T::EthSpec>(kzg, &blobs)
.map_err(BlockProductionError::KzgError)?;
let beacon_block_root = block.canonical_root();
let expected_kzg_commitments = block.body().blob_kzg_commitments().map_err(|_| {
BlockProductionError::InvalidBlockVariant(
@ -4709,7 +4707,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
kzg_aggregated_proof,
};
kzg_utils::validate_blobs_sidecar(
&kzg,
kzg,
slot,
beacon_block_root,
expected_kzg_commitments,
@ -5941,17 +5939,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// The epoch at which we require a data availability check in block processing.
/// `None` if the `Eip4844` fork is disabled.
pub fn data_availability_boundary(&self) -> Option<Epoch> {
self.spec
.eip4844_fork_epoch
.map(|fork_epoch| {
self.epoch().ok().map(|current_epoch| {
std::cmp::max(
fork_epoch,
current_epoch.saturating_sub(*MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS),
)
})
self.spec.eip4844_fork_epoch.and_then(|fork_epoch| {
self.epoch().ok().map(|current_epoch| {
std::cmp::max(
fork_epoch,
current_epoch.saturating_sub(*MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS),
)
})
.flatten()
})
}
/// The epoch that is a data availability boundary, or the latest finalized epoch.

View File

@ -292,12 +292,12 @@ impl<E: EthSpec> AvailableBlock<E> {
let blobs_sidecar = beacon_block
.reconstruct_empty_blobs(Some(block_root))
.map(Arc::new)?;
return Ok(AvailableBlock(AvailableBlockInner::BlockAndBlob(
Ok(AvailableBlock(AvailableBlockInner::BlockAndBlob(
SignedBeaconBlockAndBlobsSidecar {
beacon_block,
blobs_sidecar,
},
)));
)))
}
DataAvailabilityCheckRequired::No => {
Ok(AvailableBlock(AvailableBlockInner::Block(beacon_block)))
@ -391,6 +391,7 @@ pub trait AsBlock<E: EthSpec> {
fn message(&self) -> BeaconBlockRef<E>;
fn as_block(&self) -> &SignedBeaconBlock<E>;
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>>;
fn canonical_root(&self) -> Hash256;
}
impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
@ -432,8 +433,8 @@ impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self {
BlockWrapper::Block(block) => &block,
BlockWrapper::BlockAndBlob(block, _) => &block,
BlockWrapper::Block(block) => block,
BlockWrapper::BlockAndBlob(block, _) => block,
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
@ -442,6 +443,12 @@ impl<E: EthSpec> AsBlock<E> for BlockWrapper<E> {
BlockWrapper::BlockAndBlob(block, _) => block.clone(),
}
}
fn canonical_root(&self) -> Hash256 {
match &self {
BlockWrapper::Block(block) => block.canonical_root(),
BlockWrapper::BlockAndBlob(block, _) => block.canonical_root(),
}
}
}
impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> {
@ -483,8 +490,8 @@ impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> {
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self {
BlockWrapper::Block(block) => &block,
BlockWrapper::BlockAndBlob(block, _) => &block,
BlockWrapper::Block(block) => block,
BlockWrapper::BlockAndBlob(block, _) => block,
}
}
fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
@ -493,6 +500,12 @@ impl<E: EthSpec> AsBlock<E> for &BlockWrapper<E> {
BlockWrapper::BlockAndBlob(block, _) => block.clone(),
}
}
fn canonical_root(&self) -> Hash256 {
match &self {
BlockWrapper::Block(block) => block.canonical_root(),
BlockWrapper::BlockAndBlob(block, _) => block.canonical_root(),
}
}
}
impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
@ -546,7 +559,7 @@ impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
}
fn as_block(&self) -> &SignedBeaconBlock<E> {
match &self.0 {
AvailableBlockInner::Block(block) => &block,
AvailableBlockInner::Block(block) => block,
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
&block_sidecar_pair.beacon_block
}
@ -560,4 +573,12 @@ impl<E: EthSpec> AsBlock<E> for AvailableBlock<E> {
}
}
}
fn canonical_root(&self) -> Hash256 {
match &self.0 {
AvailableBlockInner::Block(block) => block.canonical_root(),
AvailableBlockInner::BlockAndBlob(block_sidecar_pair) => {
block_sidecar_pair.beacon_block.canonical_root()
}
}
}
}

View File

@ -1120,7 +1120,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
}
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
&self.block.as_block()
self.block.as_block()
}
}

View File

@ -1016,6 +1016,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String {
#[cfg(test)]
mod test {
use super::*;
use crate::test_utils::EphemeralHarnessType;
use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
use eth2_hashing::hash;
use genesis::{
@ -1030,6 +1031,7 @@ mod test {
use types::{EthSpec, MinimalEthSpec, Slot};
type TestEthSpec = MinimalEthSpec;
type Builder = BeaconChainBuilder<EphemeralHarnessType<TestEthSpec>>;
fn get_logger() -> Logger {
let builder = NullLoggerBuilder;
@ -1062,7 +1064,7 @@ mod test {
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
let runtime = TestRuntime::default();
let chain = BeaconChainBuilder::new(MinimalEthSpec)
let chain = Builder::new(MinimalEthSpec)
.logger(log.clone())
.store(Arc::new(store))
.task_executor(runtime.task_executor.clone())

View File

@ -165,8 +165,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
.read()
.as_ref()
.filter(|item| item.beacon_block_root == block_root)
.map(|item| item.blobs.clone())
.flatten()
.and_then(|item| item.blobs.clone())
}
/// Returns the proto-array block, if `block_root` matches the cached item.

View File

@ -40,7 +40,7 @@ pub fn compute_aggregate_kzg_proof<T: EthSpec>(
blobs: &[Blob<T>],
) -> Result<KzgProof, KzgError> {
let blobs = blobs
.into_iter()
.iter()
.map(|blob| ssz_blob_to_crypto_blob::<T>(blob.clone())) // TODO(pawan): avoid this clone
.collect::<Vec<_>>();

View File

@ -34,7 +34,9 @@ use rand::Rng;
use rand::SeedableRng;
use rayon::prelude::*;
use sensitive_url::SensitiveUrl;
use slog::Logger;
use slog::{o, Drain, Logger};
use slog_async::Async;
use slog_term::{FullFormat, TermDecorator};
use slot_clock::{SlotClock, TestingSlotClock};
use state_processing::per_block_processing::compute_timestamp_at_slot;
use state_processing::{
@ -1924,8 +1926,9 @@ where
chain_dump
.iter()
.cloned()
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into())
.filter(|block_hash| *block_hash != Hash256::zero().into())
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root)
.filter(|block_hash| *block_hash != Hash256::zero())
.map(|hash| hash.into())
.collect()
}
@ -2125,3 +2128,15 @@ impl<T: BeaconChainTypes> fmt::Debug for BeaconChainHarness<T> {
write!(f, "BeaconChainHarness")
}
}
pub fn build_log(level: slog::Level, enabled: bool) -> Logger {
let decorator = TermDecorator::new().build();
let drain = FullFormat::new(decorator).build().fuse();
let drain = Async::new(drain).build().fuse();
if enabled {
Logger::root(drain.filter_level(level).fuse(), o!())
} else {
Logger::root(drain.filter(|_| false).fuse(), o!())
}
}

View File

@ -299,7 +299,7 @@ mod test {
let ops = cache
.import_new_pubkeys(&state)
.expect("should import pubkeys");
store.do_atomically(ops).unwrap();
store.do_atomically_with_block_and_blobs_cache(ops).unwrap();
check_cache_get(&cache, &keypairs[..]);
drop(cache);

View File

@ -1,6 +1,9 @@
#![cfg(not(debug_assertions))]
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
use beacon_chain::{
blob_verification::{BlockWrapper, IntoAvailableBlock},
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
};
use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
use lazy_static::lazy_static;
use std::sync::Arc;
@ -131,6 +134,8 @@ async fn produces_attestations() {
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
assert_eq!(data.target.root, target_root, "bad target root");
let block_wrapper: BlockWrapper<MainnetEthSpec> = Arc::new(block.clone()).into();
let early_attestation = {
let proto_block = chain
.canonical_head
@ -141,8 +146,9 @@ async fn produces_attestations() {
.early_attester_cache
.add_head_block(
block_root,
Arc::new(block.clone()),
None,
block_wrapper
.into_available_block(block_root, chain)
.expect("should wrap into available block"),
proto_block,
&state,
&chain.spec,
@ -193,13 +199,18 @@ async fn early_attester_cache_old_request() {
.get_block(&head.beacon_block_root)
.unwrap();
let block: BlockWrapper<MainnetEthSpec> = head.beacon_block.clone().into();
let chain = &harness.chain;
harness
.chain
.early_attester_cache
.add_head_block(
head.beacon_block_root,
head.beacon_block.clone(),
None,
block
.clone()
.into_available_block(head.beacon_block_root, &chain)
.expect("should wrap into available block"),
head_proto_block,
&head.beacon_state,
&harness.chain.spec,

View File

@ -1,7 +1,8 @@
#![cfg(not(debug_assertions))]
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
use beacon_chain::{
blob_verification::{AsBlock, BlockWrapper},
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
};
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer};
use fork_choice::CountUnrealized;
@ -80,7 +81,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
fn chain_segment_blocks(chain_segment: &[BeaconSnapshot<E>]) -> Vec<Arc<SignedBeaconBlock<E>>> {
chain_segment
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.map(|snapshot| snapshot.beacon_block.clone().into())
.collect()
}
@ -137,7 +138,10 @@ fn update_parent_roots(snapshots: &mut [BeaconSnapshot<E>]) {
async fn chain_segment_full_segment() {
let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await;
let blocks = chain_segment_blocks(&chain_segment);
let blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
harness
.chain
@ -177,7 +181,10 @@ async fn chain_segment_varying_chunk_size() {
for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] {
let harness = get_harness(VALIDATOR_COUNT);
let chain_segment = get_chain_segment().await;
let blocks = chain_segment_blocks(&chain_segment);
let blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
harness
.chain
@ -220,7 +227,10 @@ async fn chain_segment_non_linear_parent_roots() {
/*
* Test with a block removed.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
blocks.remove(2);
assert!(
@ -238,10 +248,14 @@ async fn chain_segment_non_linear_parent_roots() {
/*
* Test with a modified parent root.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.parent_root_mut() = Hash256::zero();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
assert!(
matches!(
@ -269,10 +283,13 @@ async fn chain_segment_non_linear_slots() {
* Test where a child is lower than the parent.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = Slot::new(0);
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
assert!(
matches!(
@ -290,10 +307,13 @@ async fn chain_segment_non_linear_slots() {
* Test where a child is equal to the parent.
*/
let mut blocks = chain_segment_blocks(&chain_segment);
let (mut block, signature) = blocks[3].as_ref().clone().deconstruct();
let mut blocks: Vec<BlockWrapper<E>> = chain_segment_blocks(&chain_segment)
.into_iter()
.map(|block| block.into())
.collect();
let (mut block, signature) = blocks[3].as_block().clone().deconstruct();
*block.slot_mut() = blocks[2].slot();
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature));
blocks[3] = Arc::new(SignedBeaconBlock::from_block(block, signature)).into();
assert!(
matches!(
@ -315,9 +335,9 @@ async fn assert_invalid_signature(
snapshots: &[BeaconSnapshot<E>],
item: &str,
) {
let blocks = snapshots
let blocks: Vec<BlockWrapper<E>> = snapshots
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.map(|snapshot| snapshot.beacon_block.clone().into())
.collect();
// Ensure the block will be rejected if imported in a chain segment.
@ -341,7 +361,7 @@ async fn assert_invalid_signature(
let ancestor_blocks = chain_segment
.iter()
.take(block_index)
.map(|snapshot| snapshot.beacon_block.clone())
.map(|snapshot| snapshot.beacon_block.clone().into())
.collect();
// We don't care if this fails, we just call this to ensure that all prior blocks have been
// imported prior to this test.
@ -409,7 +429,7 @@ async fn invalid_signature_gossip_block() {
let ancestor_blocks = chain_segment
.iter()
.take(block_index)
.map(|snapshot| snapshot.beacon_block.clone())
.map(|snapshot| snapshot.beacon_block.clone().into())
.collect();
harness
.chain
@ -455,9 +475,9 @@ async fn invalid_signature_block_proposal() {
block.clone(),
junk_signature(),
));
let blocks = snapshots
let blocks: Vec<BlockWrapper<E>> = snapshots
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.map(|snapshot| snapshot.beacon_block.clone().into())
.collect::<Vec<_>>();
// Ensure the block will be rejected if imported in a chain segment.
assert!(
@ -654,9 +674,9 @@ async fn invalid_signature_deposit() {
Arc::new(SignedBeaconBlock::from_block(block, signature));
update_parent_roots(&mut snapshots);
update_proposal_signatures(&mut snapshots, &harness);
let blocks = snapshots
let blocks: Vec<BlockWrapper<E>> = snapshots
.iter()
.map(|snapshot| snapshot.beacon_block.clone())
.map(|snapshot| snapshot.beacon_block.clone().into())
.collect();
assert!(
!matches!(
@ -733,7 +753,7 @@ async fn block_gossip_verification() {
for snapshot in &chain_segment[0..block_index] {
let gossip_verified = harness
.chain
.verify_block_for_gossip(snapshot.beacon_block.clone())
.verify_block_for_gossip(snapshot.beacon_block.clone().into())
.await
.expect("should obtain gossip verified block");
@ -771,7 +791,7 @@ async fn block_gossip_verification() {
*block.slot_mut() = expected_block_slot;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::FutureSlot {
present_slot,
block_slot,
@ -805,7 +825,7 @@ async fn block_gossip_verification() {
*block.slot_mut() = expected_finalized_slot;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::WouldRevertFinalizedSlot {
block_slot,
finalized_slot,
@ -835,10 +855,9 @@ async fn block_gossip_verification() {
unwrap_err(
harness
.chain
.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(
block,
junk_signature()
)))
.verify_block_for_gossip(
Arc::new(SignedBeaconBlock::from_block(block, junk_signature())).into()
)
.await
),
BlockError::ProposalSignatureInvalid
@ -863,7 +882,7 @@ async fn block_gossip_verification() {
*block.parent_root_mut() = parent_root;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::ParentUnknown(block)
if block.parent_root() == parent_root
),
@ -889,7 +908,7 @@ async fn block_gossip_verification() {
*block.parent_root_mut() = parent_root;
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature)).into()).await),
BlockError::NotFinalizedDescendant { block_parent_root }
if block_parent_root == parent_root
),
@ -927,7 +946,7 @@ async fn block_gossip_verification() {
);
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
BlockError::IncorrectBlockProposer {
block,
local_shuffling,
@ -939,7 +958,7 @@ async fn block_gossip_verification() {
// Check to ensure that we registered this is a valid block from this proposer.
assert!(
matches!(
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone())).await),
unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(block.clone()).into()).await),
BlockError::RepeatProposal {
proposer,
slot,
@ -951,7 +970,11 @@ async fn block_gossip_verification() {
let block = chain_segment[block_index].beacon_block.clone();
assert!(
harness.chain.verify_block_for_gossip(block).await.is_ok(),
harness
.chain
.verify_block_for_gossip(block.into())
.await
.is_ok(),
"the valid block should be processed"
);
@ -969,7 +992,7 @@ async fn block_gossip_verification() {
matches!(
harness
.chain
.verify_block_for_gossip(block.clone())
.verify_block_for_gossip(block.clone().into())
.await
.err()
.expect("should error when processing known block"),
@ -1006,7 +1029,7 @@ async fn verify_block_for_gossip_slashing_detection() {
let verified_block = harness
.chain
.verify_block_for_gossip(Arc::new(block1))
.verify_block_for_gossip(Arc::new(block1).into())
.await
.unwrap();
harness
@ -1022,7 +1045,7 @@ async fn verify_block_for_gossip_slashing_detection() {
unwrap_err(
harness
.chain
.verify_block_for_gossip(Arc::new(block2))
.verify_block_for_gossip(Arc::new(block2).into())
.await,
);
@ -1045,7 +1068,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
let verified_block = harness
.chain
.verify_block_for_gossip(Arc::new(block))
.verify_block_for_gossip(Arc::new(block).into())
.await
.unwrap();
let attestations = verified_block.block.message().body().attestations().clone();
@ -1184,7 +1207,7 @@ async fn add_base_block_to_altair_chain() {
assert!(matches!(
harness
.chain
.verify_block_for_gossip(Arc::new(base_block.clone()))
.verify_block_for_gossip(Arc::new(base_block.clone()).into())
.await
.err()
.expect("should error when processing base block"),
@ -1218,7 +1241,7 @@ async fn add_base_block_to_altair_chain() {
harness
.chain
.process_chain_segment(
vec![Arc::new(base_block)],
vec![Arc::new(base_block).into()],
CountUnrealized::True,
NotifyExecutionLayer::Yes,
)
@ -1322,7 +1345,7 @@ async fn add_altair_block_to_base_chain() {
assert!(matches!(
harness
.chain
.verify_block_for_gossip(Arc::new(altair_block.clone()))
.verify_block_for_gossip(Arc::new(altair_block.clone()).into())
.await
.err()
.expect("should error when processing altair block"),
@ -1356,7 +1379,7 @@ async fn add_altair_block_to_base_chain() {
harness
.chain
.process_chain_segment(
vec![Arc::new(altair_block)],
vec![Arc::new(altair_block).into()],
CountUnrealized::True,
NotifyExecutionLayer::Yes
)

View File

@ -31,8 +31,16 @@ fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
let cold_path = db_path.path().join("cold_db");
let config = StoreConfig::default();
let log = NullLoggerBuilder.build().expect("logger should build");
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
.expect("disk store should initialize")
HotColdDB::open(
&hot_path,
&cold_path,
None,
|_, _, _| Ok(()),
config,
spec,
log,
)
.expect("disk store should initialize")
}
fn get_harness(store: Arc<HotColdDB>, validator_count: usize) -> TestHarness {

View File

@ -1040,7 +1040,7 @@ async fn invalid_parent() {
// Ensure the block built atop an invalid payload is invalid for gossip.
assert!(matches!(
rig.harness.chain.clone().verify_block_for_gossip(block.clone()).await,
rig.harness.chain.clone().verify_block_for_gossip(block.clone().into()).await,
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
if invalid_root == parent_root
));

View File

@ -11,7 +11,9 @@ use beacon_chain::{
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
ServerSentEventHandler, WhenSlotSkipped,
};
use eth2_network_config::TRUSTED_SETUP;
use fork_choice::CountUnrealized;
use kzg::TrustedSetup;
use lazy_static::lazy_static;
use logging::test_logger;
use maplit::hashset;
@ -57,15 +59,23 @@ fn get_store_with_spec(
let config = StoreConfig::default();
let log = test_logger();
HotColdDB::open(&hot_path, &cold_path, |_, _, _| Ok(()), config, spec, log)
.expect("disk store should initialize")
HotColdDB::open(
&hot_path,
&cold_path,
None,
|_, _, _| Ok(()),
config,
spec,
log,
)
.expect("disk store should initialize")
}
fn get_harness(
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
validator_count: usize,
) -> TestHarness {
let harness = BeaconChainHarness::builder(MinimalEthSpec)
let harness = TestHarness::builder(MinimalEthSpec)
.default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.fresh_disk_store(store)
@ -565,7 +575,7 @@ async fn delete_blocks_and_states() {
let store = get_store(&db_path);
let validators_keypairs =
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
let harness = BeaconChainHarness::builder(MinimalEthSpec)
let harness = TestHarness::builder(MinimalEthSpec)
.default_spec()
.keypairs(validators_keypairs)
.fresh_disk_store(store.clone())
@ -695,7 +705,7 @@ async fn multi_epoch_fork_valid_blocks_test(
let store = get_store(&db_path);
let validators_keypairs =
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
let harness = BeaconChainHarness::builder(MinimalEthSpec)
let harness = TestHarness::builder(MinimalEthSpec)
.default_spec()
.keypairs(validators_keypairs)
.fresh_disk_store(store)
@ -2101,10 +2111,13 @@ async fn weak_subjectivity_sync() {
let store = get_store(&temp2);
let spec = test_spec::<E>();
let seconds_per_slot = spec.seconds_per_slot;
let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP)
.map_err(|e| println!("Unable to read trusted setup file: {}", e))
.unwrap();
// Initialise a new beacon chain from the finalized checkpoint
let beacon_chain = Arc::new(
BeaconChainBuilder::new(MinimalEthSpec)
BeaconChainBuilder::<DiskHarnessType<E>>::new(MinimalEthSpec)
.store(store.clone())
.custom_spec(test_spec::<E>())
.task_executor(harness.chain.task_executor.clone())
@ -2123,6 +2136,7 @@ async fn weak_subjectivity_sync() {
1,
)))
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
.trusted_setup(trusted_setup)
.build()
.expect("should build"),
);
@ -2300,7 +2314,7 @@ async fn finalizes_after_resuming_from_db() {
let original_chain = harness.chain;
let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec)
let resumed_harness = BeaconChainHarness::<DiskHarnessType<E>>::builder(MinimalEthSpec)
.default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.resumed_disk_store(store)
@ -2476,7 +2490,7 @@ async fn revert_minority_fork_on_resume() {
drop(harness1);
let resume_store = get_store_with_spec(&db_path1, spec2.clone());
let resumed_harness = BeaconChainHarness::builder(MinimalEthSpec)
let resumed_harness = TestHarness::builder(MinimalEthSpec)
.spec(spec2)
.keypairs(KEYPAIRS[0..validator_count].to_vec())
.resumed_disk_store(resume_store)

View File

@ -133,6 +133,7 @@ mod test {
nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None,
excess_data_gas: None,
};
let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b";
let expected_hash =
@ -161,6 +162,7 @@ mod test {
nonce: Hash64::zero(),
base_fee_per_gas: 0x036b_u64.into(),
withdrawals_root: None,
excess_data_gas: None,
};
let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b";
let expected_hash =
@ -190,6 +192,7 @@ mod test {
nonce: Hash64::zero(),
base_fee_per_gas: 0x34187b238_u64.into(),
withdrawals_root: None,
excess_data_gas: None,
};
let expected_hash =
Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351")

View File

@ -2,7 +2,7 @@ use crate::engines::ForkchoiceState;
use crate::http::{
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1,
ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2,
ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2,
ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3,
};
use crate::BlobTxConversionError;
pub use ethers_core::types::Transaction;
@ -414,6 +414,9 @@ impl EngineCapabilities {
if self.new_payload_v2 {
response.push(ENGINE_NEW_PAYLOAD_V2);
}
if self.new_payload_v3 {
response.push(ENGINE_NEW_PAYLOAD_V3);
}
if self.forkchoice_updated_v1 {
response.push(ENGINE_FORKCHOICE_UPDATED_V1);
}
@ -426,6 +429,9 @@ impl EngineCapabilities {
if self.get_payload_v2 {
response.push(ENGINE_GET_PAYLOAD_V2);
}
if self.get_payload_v3 {
response.push(ENGINE_GET_PAYLOAD_V3);
}
if self.exchange_transition_configuration_v1 {
response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1);
}

View File

@ -63,8 +63,10 @@ pub const METHOD_NOT_FOUND_CODE: i64 = -32601;
pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
ENGINE_NEW_PAYLOAD_V1,
ENGINE_NEW_PAYLOAD_V2,
ENGINE_NEW_PAYLOAD_V3,
ENGINE_GET_PAYLOAD_V1,
ENGINE_GET_PAYLOAD_V2,
ENGINE_GET_PAYLOAD_V3,
ENGINE_FORKCHOICE_UPDATED_V1,
ENGINE_FORKCHOICE_UPDATED_V2,
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1,

View File

@ -2,13 +2,12 @@ use super::*;
use serde::{Deserialize, Serialize};
use strum::EnumString;
use superstruct::superstruct;
use types::blobs_sidecar::KzgCommitments;
use types::{
Blob, EthSpec, ExecutionBlockHash, FixedVector, KzgCommitment, Transaction, Unsigned,
Blobs, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
ExecutionPayloadEip4844, ExecutionPayloadMerge, FixedVector, Transaction, Unsigned,
VariableList, Withdrawal,
};
use types::{
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
};
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@ -418,9 +417,9 @@ impl From<JsonPayloadAttributes> for PayloadAttributes {
#[serde(bound = "T: EthSpec", rename_all = "camelCase")]
pub struct JsonBlobsBundle<T: EthSpec> {
pub block_hash: ExecutionBlockHash,
pub kzgs: VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
pub kzgs: KzgCommitments<T>,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
pub blobs: Blobs<T>,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]

View File

@ -43,16 +43,15 @@ use tokio_stream::wrappers::WatchStream;
use types::consts::eip4844::BLOB_TX_TYPE;
use types::transaction::{AccessTuple, BlobTransaction, EcdsaSignature, SignedBlobTransaction};
use types::{
AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment, VersionedHash,
blobs_sidecar::{Blobs, KzgCommitments},
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
};
use types::{AbstractExecPayload, BeaconStateError, ExecPayload, VersionedHash};
use types::{
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Transaction,
Uint256,
};
use types::{
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
};
mod block_hash;
mod engine_api;
@ -135,19 +134,13 @@ pub enum BlockProposalContents<T: EthSpec, Payload: AbstractExecPayload<T>> {
PayloadAndBlobs {
payload: Payload,
block_value: Uint256,
kzg_commitments: VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
kzg_commitments: KzgCommitments<T>,
blobs: Blobs<T>,
},
}
impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Payload> {
pub fn deconstruct(
self,
) -> (
Payload,
Option<VariableList<KzgCommitment, T::MaxBlobsPerBlock>>,
Option<VariableList<Blob<T>, T::MaxBlobsPerBlock>>,
) {
pub fn deconstruct(self) -> (Payload, Option<KzgCommitments<T>>, Option<Blobs<T>>) {
match self {
Self::Payload {
payload,
@ -2167,7 +2160,7 @@ fn ethers_tx_to_bytes<T: EthSpec>(
.ok_or(BlobTxConversionError::BlobVersionedHashesMissing)?
.as_array()
.ok_or(BlobTxConversionError::BlobVersionedHashesMissing)?
.into_iter()
.iter()
.map(|versioned_hash| {
let hash_bytes = eth2_serde_utils::hex::decode(
versioned_hash

View File

@ -152,7 +152,7 @@ pub async fn handle_rpc<T: EthSpec>(
ForkName::Eip4844 => {
if method == ENGINE_NEW_PAYLOAD_V1 || method == ENGINE_NEW_PAYLOAD_V2 {
return Err((
format!("{} called after capella fork!", method),
format!("{} called after eip4844 fork!", method),
GENERIC_ERROR_CODE,
));
}

View File

@ -227,7 +227,7 @@ impl BlockId {
"Blob with block root {} is not in the store",
root
))),
Err(e) => Err(warp_utils::reject::beacon_chain_error(e.into())),
Err(e) => Err(warp_utils::reject::beacon_chain_error(e)),
}
}
}

View File

@ -46,9 +46,9 @@ pub async fn publish_block<T: BeaconChainTypes>(
block_and_blobs.into()
} else {
//FIXME(sean): This should probably return a specific no-blob-cached error code, beacon API coordination required
return Err(warp_utils::reject::broadcast_without_import(format!(
"no blob cached for block"
)));
return Err(warp_utils::reject::broadcast_without_import(
"no blob cached for block".into(),
));
}
} else {
crate::publish_pubsub_message(network_tx, PubsubMessage::BeaconBlock(block.clone()))?;

View File

@ -1,7 +1,5 @@
use beacon_chain::{
test_utils::{
BeaconChainHarness, BoxedMutator, Builder as HarnessBuilder, EphemeralHarnessType,
},
test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType},
BeaconChain, BeaconChainTypes,
};
use directory::DEFAULT_ROOT_DIR;
@ -57,9 +55,8 @@ pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
pub external_peer_id: PeerId,
}
type Initializer<E> = Box<
dyn FnOnce(HarnessBuilder<EphemeralHarnessType<E>>) -> HarnessBuilder<EphemeralHarnessType<E>>,
>;
type HarnessBuilder<E> = Builder<EphemeralHarnessType<E>>;
type Initializer<E> = Box<dyn FnOnce(HarnessBuilder<E>) -> HarnessBuilder<E>>;
type Mutator<E> = BoxedMutator<E, MemoryStore<E>, MemoryStore<E>>;
impl<E: EthSpec> InteractiveTester<E> {

View File

@ -408,15 +408,12 @@ impl ProtocolId {
/// Returns `true` if the given `ProtocolId` should expect `context_bytes` in the
/// beginning of the stream, else returns `false`.
pub fn has_context_bytes(&self) -> bool {
match self.version {
Version::V2 => match self.message_name {
Protocol::BlocksByRange | Protocol::BlocksByRoot => return true,
_ => return false,
},
Version::V1 => match self.message_name {
Protocol::BlobsByRange | Protocol::BlobsByRoot => return true,
_ => return false,
},
match self.message_name {
Protocol::BlocksByRange | Protocol::BlocksByRoot => {
!matches!(self.version, Version::V1)
}
Protocol::BlobsByRange | Protocol::BlobsByRoot | Protocol::LightClientBootstrap => true,
Protocol::Goodbye | Protocol::Ping | Protocol::Status | Protocol::MetaData => false,
}
}
}

View File

@ -243,7 +243,7 @@ impl TestRig {
pub fn enqueue_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block(
self.next_block.canonical_root(),
self.next_block.clone(),
self.next_block.clone().into(),
std::time::Duration::default(),
BlockProcessType::ParentLookup {
chain_hash: Hash256::random(),
@ -255,7 +255,7 @@ impl TestRig {
pub fn enqueue_single_lookup_rpc_block(&self) {
let event = WorkEvent::rpc_beacon_block(
self.next_block.canonical_root(),
self.next_block.clone(),
self.next_block.clone().into(),
std::time::Duration::default(),
BlockProcessType::SingleBlock { id: 1 },
);

View File

@ -3,14 +3,16 @@
mod tests {
use crate::persisted_dht::load_dht;
use crate::{NetworkConfig, NetworkService};
use beacon_chain::test_utils::BeaconChainHarness;
use beacon_chain::test_utils::EphemeralHarnessType;
use lighthouse_network::Enr;
use slog::{o, Drain, Level, Logger};
use sloggers::{null::NullLoggerBuilder, Build};
use std::str::FromStr;
use std::sync::Arc;
use tokio::runtime::Runtime;
use types::MinimalEthSpec;
use types::MinimalEthSpec as E;
type BeaconChainHarness = beacon_chain::test_utils::BeaconChainHarness<EphemeralHarnessType<E>>;
fn get_logger(actual_log: bool) -> Logger {
if actual_log {
@ -34,7 +36,7 @@ mod tests {
fn test_dht_persistence() {
let log = get_logger(false);
let beacon_chain = BeaconChainHarness::builder(MinimalEthSpec)
let beacon_chain = BeaconChainHarness::builder(E)
.default_spec()
.deterministic_keypairs(8)
.fresh_ephemeral_store()

View File

@ -204,8 +204,10 @@ impl<const MAX_ATTEMPTS: u8> slog::Value for SingleBlockRequest<MAX_ATTEMPTS> {
#[cfg(test)]
mod tests {
use super::*;
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use types::{MinimalEthSpec as E, SignedBeaconBlock};
use types::{
test_utils::{SeedableRng, TestRandom, XorShiftRng},
MinimalEthSpec as E, SignedBeaconBlock,
};
fn rand_block() -> SignedBeaconBlock<E> {
let mut rng = XorShiftRng::from_seed([42; 16]);

View File

@ -6,17 +6,23 @@ use crate::NetworkMessage;
use super::*;
use beacon_chain::builder::Witness;
use beacon_chain::eth1_chain::CachingEth1Backend;
use beacon_chain::{
builder::Witness,
eth1_chain::CachingEth1Backend,
test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType},
};
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
use lighthouse_network::{NetworkGlobals, Request};
use slog::{Drain, Level};
use slot_clock::SystemTimeSlotClock;
use slot_clock::TestingSlotClock;
use std::time::Duration;
use store::MemoryStore;
use tokio::sync::mpsc;
use types::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use types::MinimalEthSpec as E;
use types::{
test_utils::{SeedableRng, TestRandom, XorShiftRng},
MinimalEthSpec as E, SignedBeaconBlock,
};
type T = Witness<SystemTimeSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
type T = Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
struct TestRig {
beacon_processor_rx: mpsc::Receiver<WorkEvent<T>>,
@ -27,18 +33,18 @@ struct TestRig {
const D: Duration = Duration::new(0, 0);
impl TestRig {
fn test_setup(log_level: Option<Level>) -> (BlockLookups<T>, SyncNetworkContext<T>, Self) {
let log = {
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
fn test_setup(enable_log: bool) -> (BlockLookups<T>, SyncNetworkContext<T>, Self) {
let log = build_log(slog::Level::Debug, enable_log);
if let Some(log_level) = log_level {
slog::Logger::root(drain.filter_level(log_level).fuse(), slog::o!())
} else {
slog::Logger::root(drain.filter(|_| false).fuse(), slog::o!())
}
};
// Initialise a new beacon chain
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.default_spec()
.logger(log.clone())
.deterministic_keypairs(1)
.fresh_ephemeral_store()
.build();
let chain = harness.chain;
let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(100);
let (network_tx, network_rx) = mpsc::unbounded_channel();
@ -147,7 +153,7 @@ impl TestRig {
#[test]
fn test_single_block_lookup_happy_path() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block = rig.rand_block();
let peer_id = PeerId::random();
@ -175,7 +181,7 @@ fn test_single_block_lookup_happy_path() {
#[test]
fn test_single_block_lookup_empty_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block_hash = Hash256::random();
let peer_id = PeerId::random();
@ -193,7 +199,7 @@ fn test_single_block_lookup_empty_response() {
#[test]
fn test_single_block_lookup_wrong_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block_hash = Hash256::random();
let peer_id = PeerId::random();
@ -215,7 +221,7 @@ fn test_single_block_lookup_wrong_response() {
#[test]
fn test_single_block_lookup_failure() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block_hash = Hash256::random();
let peer_id = PeerId::random();
@ -232,7 +238,7 @@ fn test_single_block_lookup_failure() {
#[test]
fn test_single_block_lookup_becomes_parent_request() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block = rig.rand_block();
let peer_id = PeerId::random();
@ -261,7 +267,7 @@ fn test_single_block_lookup_becomes_parent_request() {
#[test]
fn test_parent_lookup_happy_path() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root());
@ -289,7 +295,7 @@ fn test_parent_lookup_happy_path() {
#[test]
fn test_parent_lookup_wrong_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root());
@ -326,7 +332,7 @@ fn test_parent_lookup_wrong_response() {
#[test]
fn test_parent_lookup_empty_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root());
@ -358,7 +364,7 @@ fn test_parent_lookup_empty_response() {
#[test]
fn test_parent_lookup_rpc_failure() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root());
@ -370,7 +376,15 @@ fn test_parent_lookup_rpc_failure() {
let id1 = rig.expect_parent_request();
// The request fails. It should be tried again.
bl.parent_lookup_failed(id1, peer_id, &mut cx);
bl.parent_lookup_failed(
id1,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
let id2 = rig.expect_parent_request();
// Send the right block this time.
@ -389,7 +403,7 @@ fn test_parent_lookup_rpc_failure() {
#[test]
fn test_parent_lookup_too_many_attempts() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root());
@ -404,7 +418,15 @@ fn test_parent_lookup_too_many_attempts() {
// make sure every error is accounted for
0 => {
// The request fails. It should be tried again.
bl.parent_lookup_failed(id, peer_id, &mut cx);
bl.parent_lookup_failed(
id,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
}
_ => {
// Send a bad block this time. It should be tried again.
@ -425,7 +447,7 @@ fn test_parent_lookup_too_many_attempts() {
#[test]
fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root());
@ -439,7 +461,15 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
let id = rig.expect_parent_request();
if i % 2 != 0 {
// The request fails. It should be tried again.
bl.parent_lookup_failed(id, peer_id, &mut cx);
bl.parent_lookup_failed(
id,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
} else {
// Send a bad block this time. It should be tried again.
let bad_block = rig.rand_block();
@ -459,7 +489,7 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
#[test]
fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
const PROCESSING_FAILURES: u8 = parent_lookup::PARENT_FAIL_TOLERANCE / 2 + 1;
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = Arc::new(rig.rand_block());
let block = rig.block_with_parent(parent.canonical_root());
@ -473,7 +503,15 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
for _ in 0..(parent_lookup::PARENT_FAIL_TOLERANCE - PROCESSING_FAILURES) {
let id = rig.expect_parent_request();
// The request fails. It should be tried again.
bl.parent_lookup_failed(id, peer_id, &mut cx);
bl.parent_lookup_failed(
id,
peer_id,
&mut cx,
RPCError::ErrorResponse(
RPCResponseErrorCode::ResourceUnavailable,
"older than eip4844".into(),
),
);
}
// Now fail processing a block in the parent request
@ -493,7 +531,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
#[test]
fn test_parent_lookup_too_deep() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let mut blocks =
Vec::<SignedBeaconBlock<E>>::with_capacity(parent_lookup::PARENT_DEPTH_TOLERANCE);
while blocks.len() < parent_lookup::PARENT_DEPTH_TOLERANCE {
@ -532,7 +570,7 @@ fn test_parent_lookup_too_deep() {
#[test]
fn test_parent_lookup_disconnection() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let peer_id = PeerId::random();
let trigger_block = rig.rand_block();
bl.search_parent(
@ -547,7 +585,7 @@ fn test_parent_lookup_disconnection() {
#[test]
fn test_single_block_lookup_ignored_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let block = rig.rand_block();
let peer_id = PeerId::random();
@ -576,7 +614,7 @@ fn test_single_block_lookup_ignored_response() {
#[test]
fn test_parent_lookup_ignored_response() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(None);
let (mut bl, mut cx, mut rig) = TestRig::test_setup(false);
let parent = rig.rand_block();
let block = rig.block_with_parent(parent.canonical_root());
@ -601,7 +639,7 @@ fn test_parent_lookup_ignored_response() {
/// This is a regression test.
#[test]
fn test_same_chain_race_condition() {
let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug));
let (mut bl, mut cx, mut rig) = TestRig::test_setup(true);
#[track_caller]
fn parent_lookups_consistency(bl: &BlockLookups<T>) {
@ -636,12 +674,12 @@ fn test_same_chain_race_condition() {
let peer_id = PeerId::random();
let trigger_block = blocks.pop().unwrap();
let chain_hash = trigger_block.canonical_root();
bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx);
bl.search_parent(chain_hash, trigger_block.clone().into(), peer_id, &mut cx);
for (i, block) in blocks.into_iter().rev().enumerate() {
let id = rig.expect_parent_request();
// the block
bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx);
bl.parent_lookup_response(id, peer_id, Some(block.clone().into()), D, &mut cx);
// the stream termination
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
// the processing request
@ -651,7 +689,11 @@ fn test_same_chain_race_condition() {
// one block was removed
bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx)
} else {
bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx)
bl.parent_block_processed(
chain_hash,
BlockError::ParentUnknown(block.into()).into(),
&mut cx,
)
}
parent_lookups_consistency(&bl)
}
@ -661,7 +703,7 @@ fn test_same_chain_race_condition() {
// Try to get this block again while the chain is being processed. We should not request it again.
let peer_id = PeerId::random();
bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx);
bl.search_parent(chain_hash, trigger_block.into(), peer_id, &mut cx);
parent_lookups_consistency(&bl);
let process_result = BatchProcessResult::Success {

View File

@ -803,15 +803,15 @@ impl<T: BeaconChainTypes> SyncManager<T> {
peer_id: PeerId,
block_or_blob: BlockOrBlobs<T::EthSpec>,
) {
if let Some((chain_id, batch_id, block_responses)) = self
if let Some((chain_id, resp)) = self
.network
.range_sync_block_and_blob_response(id, block_or_blob)
{
match block_responses {
match resp.responses {
Ok(blocks) => {
for block in blocks
.into_iter()
.map(|block| Some(block))
.map(Some)
// chain the stream terminator
.chain(vec![None])
{
@ -819,7 +819,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
&mut self.network,
peer_id,
chain_id,
batch_id,
resp.batch_id,
id,
block,
);
@ -831,7 +831,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// With time we will want to downgrade this log
warn!(
self.log, "Blocks and blobs request for range received invalid data";
"peer_id" => %peer_id, "batch_id" => batch_id, "error" => e
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
);
// TODO: penalize the peer for being a bad boy
let id = RequestId::RangeBlobs { id };
@ -849,21 +849,21 @@ impl<T: BeaconChainTypes> SyncManager<T> {
peer_id: PeerId,
block_or_blob: BlockOrBlobs<T::EthSpec>,
) {
if let Some((batch_id, block_responses)) = self
if let Some(resp) = self
.network
.backfill_sync_block_and_blob_response(id, block_or_blob)
{
match block_responses {
match resp.responses {
Ok(blocks) => {
for block in blocks
.into_iter()
.map(|block| Some(block))
.map(Some)
// chain the stream terminator
.chain(vec![None])
{
match self.backfill_sync.on_block_response(
&mut self.network,
batch_id,
resp.batch_id,
&peer_id,
id,
block,
@ -883,7 +883,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// With time we will want to downgrade this log
warn!(
self.log, "Blocks and blobs request for backfill received invalid data";
"peer_id" => %peer_id, "batch_id" => batch_id, "error" => e
"peer_id" => %peer_id, "batch_id" => resp.batch_id, "error" => e
);
// TODO: penalize the peer for being a bad boy
let id = RequestId::BackFillBlobs { id };

View File

@ -20,6 +20,17 @@ use std::sync::Arc;
use tokio::sync::mpsc;
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
pub struct BlocksAndBlobsByRangeResponse<T: EthSpec> {
pub batch_id: BatchId,
pub responses: Result<Vec<BlockWrapper<T>>, &'static str>,
}
pub struct BlocksAndBlobsByRangeRequest<T: EthSpec> {
pub chain_id: ChainId,
pub batch_id: BatchId,
pub block_blob_info: BlocksAndBlobsRequestInfo<T>,
}
/// Wraps a Network channel to employ various RPC related network functionality for the Sync manager. This includes management of a global RPC request Id.
pub struct SyncNetworkContext<T: BeaconChainTypes> {
/// The network channel to relay messages to the Network service.
@ -38,8 +49,7 @@ pub struct SyncNetworkContext<T: BeaconChainTypes> {
backfill_requests: FnvHashMap<Id, BatchId>,
/// BlocksByRange requests paired with BlobsByRange requests made by the range.
range_blocks_and_blobs_requests:
FnvHashMap<Id, (ChainId, BatchId, BlocksAndBlobsRequestInfo<T::EthSpec>)>,
range_blocks_and_blobs_requests: FnvHashMap<Id, BlocksAndBlobsByRangeRequest<T::EthSpec>>,
/// BlocksByRange requests paired with BlobsByRange requests made by the backfill sync.
backfill_blocks_and_blobs_requests:
@ -198,8 +208,14 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
request_id,
})?;
let block_blob_info = BlocksAndBlobsRequestInfo::default();
self.range_blocks_and_blobs_requests
.insert(id, (chain_id, batch_id, block_blob_info));
self.range_blocks_and_blobs_requests.insert(
id,
BlocksAndBlobsByRangeRequest {
chain_id,
batch_id,
block_blob_info,
},
);
Ok(id)
}
}
@ -290,22 +306,30 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
&mut self,
request_id: Id,
block_or_blob: BlockOrBlobs<T::EthSpec>,
) -> Option<(
ChainId,
BatchId,
Result<Vec<BlockWrapper<T::EthSpec>>, &'static str>,
)> {
) -> Option<(ChainId, BlocksAndBlobsByRangeResponse<T::EthSpec>)> {
match self.range_blocks_and_blobs_requests.entry(request_id) {
Entry::Occupied(mut entry) => {
let (_, _, info) = entry.get_mut();
let req = entry.get_mut();
let info = &mut req.block_blob_info;
match block_or_blob {
BlockOrBlobs::Block(maybe_block) => info.add_block_response(maybe_block),
BlockOrBlobs::Blobs(maybe_sidecar) => info.add_sidecar_response(maybe_sidecar),
}
if info.is_finished() {
// If the request is finished, dequeue everything
let (chain_id, batch_id, info) = entry.remove();
Some((chain_id, batch_id, info.into_responses()))
let BlocksAndBlobsByRangeRequest {
chain_id,
batch_id,
block_blob_info,
} = entry.remove();
Some((
chain_id,
BlocksAndBlobsByRangeResponse {
batch_id,
responses: block_blob_info.into_responses(),
},
))
} else {
None
}
@ -323,7 +347,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
ByRangeRequestType::BlocksAndBlobs => self
.range_blocks_and_blobs_requests
.remove(&request_id)
.map(|(chain_id, batch_id, _info)| (chain_id, batch_id)),
.map(|req| (req.chain_id, req.batch_id)),
ByRangeRequestType::Blocks => self.range_requests.remove(&request_id),
}
}
@ -349,20 +373,19 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
is_stream_terminator: bool,
) -> Option<BatchId> {
if is_stream_terminator {
self.backfill_requests
.remove(&request_id)
.map(|batch_id| batch_id)
self.backfill_requests.remove(&request_id)
} else {
self.backfill_requests.get(&request_id).copied()
}
}
/// Received a blocks by range response for a request that couples blocks and blobs.
/// Received a blocks by range or blobs by range response for a request that couples blocks '
/// and blobs.
pub fn backfill_sync_block_and_blob_response(
&mut self,
request_id: Id,
block_or_blob: BlockOrBlobs<T::EthSpec>,
) -> Option<(BatchId, Result<Vec<BlockWrapper<T::EthSpec>>, &'static str>)> {
) -> Option<BlocksAndBlobsByRangeResponse<T::EthSpec>> {
match self.backfill_blocks_and_blobs_requests.entry(request_id) {
Entry::Occupied(mut entry) => {
let (_, info) = entry.get_mut();
@ -373,7 +396,10 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
if info.is_finished() {
// If the request is finished, dequeue everything
let (batch_id, info) = entry.remove();
Some((batch_id, info.into_responses()))
Some(BlocksAndBlobsByRangeResponse {
batch_id,
responses: info.into_responses(),
})
} else {
None
}
@ -534,16 +560,20 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
/// Check whether a batch for this epoch (and only this epoch) should request just blocks or
/// blocks and blobs.
#[allow(unused)]
pub fn batch_type(&self, epoch: types::Epoch) -> ByRangeRequestType {
// Induces a compile time panic if this doesn't hold true.
#[allow(clippy::assertions_on_constants)]
const _: () = assert!(
super::backfill_sync::BACKFILL_EPOCHS_PER_BATCH == 1
&& super::range_sync::EPOCHS_PER_BATCH == 1,
"To deal with alignment with 4844 boundaries, batches need to be of just one epoch"
);
#[cfg(test)]
{
// Keep tests only for blocks.
return ByRangeRequestType::Blocks;
ByRangeRequestType::Blocks
}
#[cfg(not(test))]
{

View File

@ -615,6 +615,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
///
/// If a previous batch has been validated and it had been re-processed, penalize the original
/// peer.
#[allow(clippy::modulo_one)]
fn advance_chain(&mut self, network: &mut SyncNetworkContext<T>, validating_epoch: Epoch) {
// make sure this epoch produces an advancement
if validating_epoch <= self.start_epoch {

View File

@ -372,26 +372,27 @@ where
#[cfg(test)]
mod tests {
use crate::service::RequestId;
use crate::sync::range_sync::ByRangeRequestType;
use crate::NetworkMessage;
use super::*;
use crate::beacon_processor::WorkEvent as BeaconWorkEvent;
use beacon_chain::builder::Witness;
use beacon_chain::eth1_chain::CachingEth1Backend;
use beacon_chain::parking_lot::RwLock;
use beacon_chain::EngineState;
use lighthouse_network::rpc::BlocksByRangeRequest;
use lighthouse_network::Request;
use lighthouse_network::{rpc::StatusMessage, NetworkGlobals};
use slog::{o, Drain};
use tokio::sync::mpsc;
use slot_clock::SystemTimeSlotClock;
use std::collections::HashSet;
use std::sync::Arc;
use crate::beacon_processor::WorkEvent as BeaconWorkEvent;
use crate::service::RequestId;
use crate::NetworkMessage;
use beacon_chain::{
builder::Witness,
eth1_chain::CachingEth1Backend,
parking_lot::RwLock,
test_utils::{build_log, BeaconChainHarness, EphemeralHarnessType},
EngineState,
};
use lighthouse_network::{
rpc::{BlocksByRangeRequest, StatusMessage},
NetworkGlobals, Request,
};
use slog::o;
use slot_clock::TestingSlotClock;
use std::{collections::HashSet, sync::Arc};
use store::MemoryStore;
use tokio::sync::mpsc;
use types::{Hash256, MinimalEthSpec as E};
#[derive(Debug)]
@ -439,19 +440,7 @@ mod tests {
}
type TestBeaconChainType =
Witness<SystemTimeSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
fn build_log(level: slog::Level, enabled: bool) -> slog::Logger {
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
if enabled {
slog::Logger::root(drain.filter_level(level).fuse(), o!())
} else {
slog::Logger::root(drain.filter(|_| false).fuse(), o!())
}
}
Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
#[allow(unused)]
struct TestRig {
@ -593,11 +582,20 @@ mod tests {
}
fn range(log_enabled: bool) -> (TestRig, RangeSync<TestBeaconChainType, FakeStorage>) {
let chain = Arc::new(FakeStorage::default());
let log = build_log(slog::Level::Trace, log_enabled);
// Initialise a new beacon chain
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.default_spec()
.logger(log.clone())
.deterministic_keypairs(1)
.fresh_ephemeral_store()
.build();
let chain = harness.chain;
let fake_store = Arc::new(FakeStorage::default());
let (beacon_processor_tx, beacon_processor_rx) = mpsc::channel(10);
let range_sync = RangeSync::<TestBeaconChainType, FakeStorage>::new(
chain.clone(),
fake_store.clone(),
log.new(o!("component" => "range")),
);
let (network_tx, network_rx) = mpsc::unbounded_channel();
@ -612,7 +610,7 @@ mod tests {
let test_rig = TestRig {
log,
beacon_processor_rx,
chain,
chain: fake_store,
cx,
network_rx,
globals,
@ -687,7 +685,7 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, peer1, head_info);
let ((chain1, batch1), id1) = match rig.grab_request(&peer1).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_response(id, true).unwrap(), id)
(rig.cx.range_sync_block_response(id, true).unwrap(), id)
}
other => panic!("unexpected request {:?}", other),
};
@ -706,7 +704,7 @@ mod tests {
range.add_peer(&mut rig.cx, local_info, peer2, finalized_info);
let ((chain2, batch2), id2) = match rig.grab_request(&peer2).0 {
RequestId::Sync(crate::sync::manager::RequestId::RangeBlocks { id }) => {
(rig.cx.range_sync_response(id, true).unwrap(), id)
(rig.cx.range_sync_block_response(id, true).unwrap(), id)
}
other => panic!("unexpected request {:?}", other),
};

View File

@ -1907,13 +1907,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
let margin_epochs = self.get_config().blob_prune_margin_epochs;
let end_epoch = earliest_prunable_epoch - margin_epochs;
if !force {
if last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune
if !force
&& last_pruned_epoch.as_u64() + self.get_config().epochs_per_blob_prune
> end_epoch.as_u64()
{
info!(self.log, "Blobs sidecars are pruned");
return Ok(());
}
{
info!(self.log, "Blobs sidecars are pruned");
return Ok(());
}
// Iterate block roots forwards from the oldest blob slot.

View File

@ -1,6 +1,6 @@
# Extends the mainnet preset
PRESET_BASE: 'mainnet'
CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis
CONFIG_NAME: 'eip4844' # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis and needs to match configuration in common_eth2_config/src/lib.rs to pass lh ci.
# Genesis
# ---------------------------------------------------------------

View File

@ -362,11 +362,12 @@ mod tests {
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_deploy_block = 42;
let testnet: Eth2NetworkConfig = Eth2NetworkConfig {
let testnet = Eth2NetworkConfig {
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
config,
kzg_trusted_setup: None,
};
testnet

View File

@ -7,8 +7,8 @@ mod system_time_slot_clock;
use std::time::Duration;
pub use crate::manual_slot_clock::ManualSlotClock;
pub use crate::manual_slot_clock::ManualSlotClock as TestingSlotClock;
pub use crate::manual_slot_clock::ManualSlotClock;
pub use crate::system_time_slot_clock::SystemTimeSlotClock;
pub use metrics::scrape_for_metrics;
use types::consts::merge::INTERVALS_PER_SLOT;

View File

@ -479,7 +479,7 @@ pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>
) -> Result<Container, DecodeError> {
if bytes.is_empty() {
return Container::try_from_iter(iter::empty()).map_err(|e| {
DecodeError::BytesInvalid(format!("Error trying to collect empty list: {:?}", e))
DecodeError::BytesInvalid(format!("Error trying to collect empty list: {e:?}"))
});
}
@ -494,8 +494,7 @@ pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>
if max_len.map_or(false, |max| num_items > max) {
return Err(DecodeError::BytesInvalid(format!(
"Variable length list of {} items exceeds maximum of {:?}",
num_items, max_len
"Variable length list of {num_items} items exceeds maximum of {max_len:?}",
)));
}
@ -519,7 +518,7 @@ pub fn decode_list_of_variable_length_items<T: Decode, Container: TryFromIter<T>
}),
|iter| iter.try_collect(),
)?
.map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {:?}", e)))
.map_err(|e| DecodeError::BytesInvalid(format!("Error collecting into container: {e:?}")))
}
#[cfg(test)]

View File

@ -49,7 +49,7 @@ pub fn verify_kzg_commitments_against_transactions<T: EthSpec>(
.flatten()
// Need to use `itertools::zip_longest` here because just zipping hides if one iter is shorter
// and `itertools::zip_eq` panics.
.zip_longest(kzg_commitments.into_iter())
.zip_longest(kzg_commitments.iter())
.enumerate()
.map(|(index, next)| match next {
EitherOrBoth::Both(hash, commitment) => Ok((hash?, commitment)),

View File

@ -34,7 +34,7 @@ async fn get_harness<E: EthSpec>(
// Set the state and block to be in the last slot of the `epoch_offset`th epoch.
let last_slot_of_epoch =
(MainnetEthSpec::genesis_epoch() + epoch_offset).end_slot(E::slots_per_epoch());
let harness = BeaconChainHarness::builder(E::default())
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.default_spec()
.keypairs(KEYPAIRS[0..num_validators].to_vec())
.fresh_ephemeral_store()

View File

@ -97,7 +97,7 @@ fn get_zero_hash(height: usize) -> &'static [u8] {
if height <= ZERO_HASHES_MAX_INDEX {
&ZERO_HASHES[height]
} else {
panic!("Tree exceeds MAX_TREE_DEPTH of {}", ZERO_HASHES_MAX_INDEX)
panic!("Tree exceeds MAX_TREE_DEPTH of {ZERO_HASHES_MAX_INDEX}")
}
}

View File

@ -9,7 +9,6 @@ name = "benches"
harness = false
[dependencies]
serde-big-array = {version = "0.3.2", features = ["const-generics"]}
merkle_proof = { path = "../../consensus/merkle_proof" }
bls = { path = "../../crypto/bls", features = ["arbitrary"] }
kzg = { path = "../../crypto/kzg", features = ["arbitrary"] }
@ -39,10 +38,10 @@ cached_tree_hash = { path = "../cached_tree_hash" }
serde_yaml = "0.8.13"
tempfile = "3.1.0"
derivative = "2.1.1"
rusqlite = { version = "0.25.3", features = ["bundled"], optional = true }
# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by
# `AbstractExecPayload`
arbitrary = { version = "1.0", features = ["derive"] }
rusqlite = { version = "0.28.0", features = ["bundled"], optional = true }
eth2_serde_utils = "0.1.1"
regex = "1.5.5"
lazy_static = "1.4.0"

View File

@ -1,5 +1,5 @@
use crate::test_utils::TestRandom;
use crate::*;
use crate::{blobs_sidecar::KzgCommitments, test_utils::TestRandom};
use derivative::Derivative;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
@ -69,7 +69,7 @@ pub struct BeaconBlockBody<T: EthSpec, Payload: AbstractExecPayload<T> = FullPay
pub bls_to_execution_changes:
VariableList<SignedBlsToExecutionChange, T::MaxBlsToExecutionChanges>,
#[superstruct(only(Eip4844))]
pub blob_kzg_commitments: VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
pub blob_kzg_commitments: KzgCommitments<T>,
#[superstruct(only(Base, Altair))]
#[ssz(skip_serializing, skip_deserializing)]
#[tree_hash(skip_hashing)]

View File

@ -1,5 +1,5 @@
use crate::test_utils::TestRandom;
use crate::{Blob, EthSpec, Hash256, SignedRoot, Slot};
use crate::{Blob, EthSpec, Hash256, KzgCommitment, SignedRoot, Slot};
use derivative::Derivative;
use kzg::KzgProof;
use serde_derive::{Deserialize, Serialize};
@ -9,6 +9,9 @@ use ssz_types::VariableList;
use test_random_derive::TestRandom;
use tree_hash_derive::TreeHash;
pub type KzgCommitments<T> = VariableList<KzgCommitment, <T as EthSpec>::MaxBlobsPerBlock>;
pub type Blobs<T> = VariableList<Blob<T>, <T as EthSpec>::MaxBlobsPerBlock>;
#[derive(
Debug,
Clone,
@ -29,7 +32,7 @@ pub struct BlobsSidecar<T: EthSpec> {
pub beacon_block_root: Hash256,
pub beacon_block_slot: Slot,
#[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")]
pub blobs: VariableList<Blob<T>, T::MaxBlobsPerBlock>,
pub blobs: Blobs<T>,
pub kzg_aggregated_proof: KzgProof,
}

View File

@ -121,7 +121,7 @@ pub use crate::beacon_block_body::{
pub use crate::beacon_block_header::BeaconBlockHeader;
pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee};
pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *};
pub use crate::blobs_sidecar::BlobsSidecar;
pub use crate::blobs_sidecar::{Blobs, BlobsSidecar, KzgCommitments};
pub use crate::bls_to_execution_change::BlsToExecutionChange;
pub use crate::chain_spec::{ChainSpec, Config, Domain};
pub use crate::checkpoint::Checkpoint;

View File

@ -265,7 +265,7 @@ impl<E: EthSpec, Payload: AbstractExecPayload<E>> SignedBeaconBlock<E, Payload>
.map_err(|_| BlobReconstructionError::InconsistentFork)?;
if kzg_commitments.is_empty() {
Ok(BlobsSidecar::empty_from_parts(
block_root_opt.unwrap_or(self.canonical_root()),
block_root_opt.unwrap_or_else(|| self.canonical_root()),
self.slot(),
))
} else {

View File

@ -11,13 +11,11 @@ eth2_ssz = "0.4.1"
eth2_ssz_derive = "0.3.1"
tree_hash = "0.4.1"
derivative = "2.1.1"
rand = "0.7.3"
serde = "1.0.116"
serde_derive = "1.0.116"
eth2_serde_utils = "0.1.1"
hex = "0.4.2"
eth2_hashing = "0.3.0"
ethereum-types = "0.12.1"
c-kzg = {git = "https://github.com/ethereum/c-kzg-4844", rev = "69f6155d7524247be9d3f54ab3bfbe33a0345622" }
arbitrary = { version = "1.0", features = ["derive"], optional = true }

View File

@ -99,7 +99,7 @@ impl FromStr for KzgCommitment {
impl Debug for KzgCommitment {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", eth2_serde_utils::hex::encode(&self.0))
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
}
}

View File

@ -123,7 +123,7 @@ impl FromStr for KzgProof {
impl Debug for KzgProof {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", eth2_serde_utils::hex::encode(&self.0))
write!(f, "{}", eth2_serde_utils::hex::encode(self.0))
}
}

View File

@ -72,7 +72,7 @@ impl Kzg {
));
}
let commitments = expected_kzg_commitments
.into_iter()
.iter()
.map(|comm| comm.0.into())
.collect::<Vec<c_kzg::KZGCommitment>>();
let proof: c_kzg::KZGProof = kzg_aggregated_proof.0.into();

View File

@ -215,7 +215,7 @@ fn initialize_state_with_validators<T: EthSpec>(
// Seed RANDAO with Eth1 entropy
state.fill_randao_mixes_with(eth1_block_hash);
for keypair in keypairs.into_iter() {
for keypair in keypairs.iter() {
let withdrawal_credentials = |pubkey: &PublicKey| {
let mut credentials = hash(&pubkey.as_ssz_bytes());
credentials[0] = spec.bls_withdrawal_prefix_byte;

View File

@ -1357,27 +1357,27 @@ fn prune_blobs_on_startup_false() {
fn epochs_per_blob_prune_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| assert!(config.epochs_per_blob_prune == 1));
.with_config(|config| assert!(config.store.epochs_per_blob_prune == 1));
}
#[test]
fn epochs_per_blob_prune_on_startup_five() {
CommandLineTest::new()
.flag("epochs-per-blob-prune", Some(5))
.flag("epochs-per-blob-prune", Some("5"))
.run_with_zero_port()
.with_config(|config| assert!(!config.epochs_per_blob_prune == 5));
.with_config(|config| assert!(config.store.epochs_per_blob_prune == 5));
}
#[test]
fn blob_prune_margin_epochs_default() {
CommandLineTest::new()
.run_with_zero_port()
.with_config(|config| assert!(config.blob_prune_margin_epochs == 0));
.with_config(|config| assert!(config.store.blob_prune_margin_epochs == 0));
}
#[test]
fn blob_prune_margin_epochs_on_startup_ten() {
CommandLineTest::new()
.flag("blob-prune-margin-epochs", Some(10))
.flag("blob-prune-margin-epochs", Some("10"))
.run_with_zero_port()
.with_config(|config| assert!(!config.blob_prune_margin_epochs == Some(10)));
.with_config(|config| assert!(config.store.blob_prune_margin_epochs == 10));
}
#[test]
fn reconstruct_historic_states_flag() {

View File

@ -306,7 +306,7 @@ impl<E: EthSpec> Tester<E> {
));
}
let harness = BeaconChainHarness::builder(E::default())
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
.spec(spec.clone())
.keypairs(vec![])
.genesis_state_ephemeral_store(case.anchor_state.clone())

View File

@ -12,9 +12,9 @@ path = "tests/main.rs"
[dependencies]
tempfile = "3.1.0"
types = { path = "../../consensus/types" }
rusqlite = { version = "0.25.3", features = ["bundled"] }
rusqlite = { version = "0.28.0", features = ["bundled"] }
r2d2 = "0.8.9"
r2d2_sqlite = "0.18.0"
r2d2_sqlite = "0.21.0"
serde = "1.0.116"
serde_derive = "1.0.116"
serde_json = "1.0.58"

View File

@ -162,8 +162,8 @@ impl SlashingDatabase {
/// The exclusive locking mode also has the benefit of applying to other processes, so multiple
/// Lighthouse processes trying to access the same database will also be blocked.
fn apply_pragmas(conn: &mut rusqlite::Connection) -> Result<(), rusqlite::Error> {
conn.pragma_update(None, "foreign_keys", &true)?;
conn.pragma_update(None, "locking_mode", &"EXCLUSIVE")?;
conn.pragma_update(None, "foreign_keys", true)?;
conn.pragma_update(None, "locking_mode", "EXCLUSIVE")?;
Ok(())
}