Merge branch 'unstable' into merge-unstable-to-deneb-20230822
# Conflicts: # beacon_node/beacon_chain/src/builder.rs # beacon_node/beacon_chain/tests/store_tests.rs # beacon_node/client/src/builder.rs # beacon_node/src/config.rs # beacon_node/store/src/hot_cold_store.rs # lighthouse/tests/beacon_node.rs
This commit is contained in:
commit
8a6f171b2a
35
.github/workflows/docker-antithesis.yml
vendored
35
.github/workflows/docker-antithesis.yml
vendored
@ -1,35 +0,0 @@
|
|||||||
name: docker antithesis
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- unstable
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
ANTITHESIS_PASSWORD: ${{ secrets.ANTITHESIS_PASSWORD }}
|
|
||||||
ANTITHESIS_USERNAME: ${{ secrets.ANTITHESIS_USERNAME }}
|
|
||||||
ANTITHESIS_SERVER: ${{ secrets.ANTITHESIS_SERVER }}
|
|
||||||
REPOSITORY: ${{ secrets.ANTITHESIS_REPOSITORY }}
|
|
||||||
IMAGE_NAME: lighthouse
|
|
||||||
TAG: libvoidstar
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-docker:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Update Rust
|
|
||||||
run: rustup update stable
|
|
||||||
- name: Dockerhub login
|
|
||||||
run: |
|
|
||||||
echo "${ANTITHESIS_PASSWORD}" | docker login --username ${ANTITHESIS_USERNAME} https://${ANTITHESIS_SERVER} --password-stdin
|
|
||||||
- name: Build AMD64 dockerfile (with push)
|
|
||||||
run: |
|
|
||||||
docker build \
|
|
||||||
--tag ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG} \
|
|
||||||
--file ./testing/antithesis/Dockerfile.libvoidstar .
|
|
||||||
docker push ${ANTITHESIS_SERVER}/${REPOSITORY}/${IMAGE_NAME}:${TAG}
|
|
7
Cargo.lock
generated
7
Cargo.lock
generated
@ -808,14 +808,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "blst"
|
name = "blst"
|
||||||
version = "0.3.10"
|
version = "0.3.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6a30d0edd9dd1c60ddb42b80341c7852f6f985279a5c1a83659dcb65899dec99"
|
checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"glob",
|
"glob",
|
||||||
"threadpool",
|
"threadpool",
|
||||||
"which",
|
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -1197,7 +1196,6 @@ dependencies = [
|
|||||||
"logging",
|
"logging",
|
||||||
"monitoring_api",
|
"monitoring_api",
|
||||||
"network",
|
"network",
|
||||||
"num_cpus",
|
|
||||||
"operation_pool",
|
"operation_pool",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"sensitive_url",
|
"sensitive_url",
|
||||||
@ -5256,7 +5254,6 @@ dependencies = [
|
|||||||
"logging",
|
"logging",
|
||||||
"lru_cache",
|
"lru_cache",
|
||||||
"matches",
|
"matches",
|
||||||
"num_cpus",
|
|
||||||
"operation_pool",
|
"operation_pool",
|
||||||
"parking_lot 0.12.1",
|
"parking_lot 0.12.1",
|
||||||
"rand 0.8.5",
|
"rand 0.8.5",
|
||||||
|
2
Makefile
2
Makefile
@ -215,7 +215,7 @@ arbitrary-fuzz:
|
|||||||
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
# Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database)
|
||||||
audit:
|
audit:
|
||||||
cargo install --force cargo-audit
|
cargo install --force cargo-audit
|
||||||
cargo audit --ignore RUSTSEC-2020-0071
|
cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2022-0093
|
||||||
|
|
||||||
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
|
# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
|
||||||
vendor:
|
vendor:
|
||||||
|
@ -4943,6 +4943,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Produced block on state";
|
"Produced block on state";
|
||||||
"block_size" => block_size,
|
"block_size" => block_size,
|
||||||
|
"slot" => block.slot(),
|
||||||
);
|
);
|
||||||
|
|
||||||
metrics::observe(&metrics::BLOCK_SIZE, block_size as f64);
|
metrics::observe(&metrics::BLOCK_SIZE, block_size as f64);
|
||||||
@ -5921,14 +5922,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt {
|
let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt {
|
||||||
(state, state_root)
|
(state, state_root)
|
||||||
} else {
|
} else {
|
||||||
let state_root = head_block.state_root;
|
let block_state_root = head_block.state_root;
|
||||||
let state = self
|
let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
let (state_root, state) = self
|
||||||
.store
|
.store
|
||||||
.get_inconsistent_state_for_attestation_verification_only(
|
.get_inconsistent_state_for_attestation_verification_only(
|
||||||
&state_root,
|
&head_block_root,
|
||||||
Some(head_block.slot),
|
max_slot,
|
||||||
|
block_state_root,
|
||||||
)?
|
)?
|
||||||
.ok_or(Error::MissingBeaconState(head_block.state_root))?;
|
.ok_or(Error::MissingBeaconState(block_state_root))?;
|
||||||
(state, state_root)
|
(state, state_root)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -321,9 +321,17 @@ where
|
|||||||
.deconstruct()
|
.deconstruct()
|
||||||
.0;
|
.0;
|
||||||
|
|
||||||
let state = self
|
let max_slot = self
|
||||||
|
.justified_checkpoint
|
||||||
|
.epoch
|
||||||
|
.start_slot(E::slots_per_epoch());
|
||||||
|
let (_, state) = self
|
||||||
.store
|
.store
|
||||||
.get_state(&justified_block.state_root(), Some(justified_block.slot()))
|
.get_advanced_hot_state(
|
||||||
|
self.justified_checkpoint.root,
|
||||||
|
max_slot,
|
||||||
|
justified_block.state_root(),
|
||||||
|
)
|
||||||
.map_err(Error::FailedToReadState)?
|
.map_err(Error::FailedToReadState)?
|
||||||
.ok_or_else(|| Error::MissingState(justified_block.state_root()))?;
|
.ok_or_else(|| Error::MissingState(justified_block.state_root()))?;
|
||||||
|
|
||||||
|
@ -1358,7 +1358,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
|
|
||||||
// Perform a sanity check on the pre-state.
|
// Perform a sanity check on the pre-state.
|
||||||
let parent_slot = parent.beacon_block.slot();
|
let parent_slot = parent.beacon_block.slot();
|
||||||
if state.slot() < parent_slot || state.slot() > parent_slot + 1 {
|
if state.slot() < parent_slot || state.slot() > block.slot() {
|
||||||
return Err(BeaconChainError::BadPreState {
|
return Err(BeaconChainError::BadPreState {
|
||||||
parent_root: parent.beacon_block_root,
|
parent_root: parent.beacon_block_root,
|
||||||
parent_slot,
|
parent_slot,
|
||||||
@ -1860,13 +1860,18 @@ fn load_parent<T: BeaconChainTypes, B: AsBlock<T::EthSpec>>(
|
|||||||
BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root()))
|
BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root()))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Load the parent blocks state from the database, returning an error if it is not found.
|
// Load the parent block's state from the database, returning an error if it is not found.
|
||||||
// It is an error because if we know the parent block we should also know the parent state.
|
// It is an error because if we know the parent block we should also know the parent state.
|
||||||
let parent_state_root = parent_block.state_root();
|
// Retrieve any state that is advanced through to at most `block.slot()`: this is
|
||||||
let parent_state = chain
|
// particularly important if `block` descends from the finalized/split block, but at a slot
|
||||||
.get_state(&parent_state_root, Some(parent_block.slot()))?
|
// prior to the finalized slot (which is invalid and inaccessible in our DB schema).
|
||||||
|
let (parent_state_root, parent_state) = chain
|
||||||
|
.store
|
||||||
|
.get_advanced_hot_state(root, block.slot(), parent_block.state_root())?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root))
|
BeaconChainError::DBInconsistent(
|
||||||
|
format!("Missing state for parent block {root:?}",),
|
||||||
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES);
|
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES);
|
||||||
|
@ -26,8 +26,9 @@ use operation_pool::{OperationPool, PersistedOperationPool};
|
|||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
|
use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
|
||||||
use slasher::Slasher;
|
use slasher::Slasher;
|
||||||
use slog::{crit, error, info, Logger};
|
use slog::{crit, debug, error, info, Logger};
|
||||||
use slot_clock::{SlotClock, TestingSlotClock};
|
use slot_clock::{SlotClock, TestingSlotClock};
|
||||||
|
use state_processing::per_slot_processing;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -291,7 +292,7 @@ where
|
|||||||
let genesis_state = store
|
let genesis_state = store
|
||||||
.get_state(&genesis_block.state_root(), Some(genesis_block.slot()))
|
.get_state(&genesis_block.state_root(), Some(genesis_block.slot()))
|
||||||
.map_err(|e| descriptive_db_error("genesis state", &e))?
|
.map_err(|e| descriptive_db_error("genesis state", &e))?
|
||||||
.ok_or("Genesis block not found in store")?;
|
.ok_or("Genesis state not found in store")?;
|
||||||
|
|
||||||
self.genesis_time = Some(genesis_state.genesis_time());
|
self.genesis_time = Some(genesis_state.genesis_time());
|
||||||
|
|
||||||
@ -386,6 +387,16 @@ where
|
|||||||
let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?;
|
let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?;
|
||||||
self = updated_builder;
|
self = updated_builder;
|
||||||
|
|
||||||
|
// Stage the database's metadata fields for atomic storage when `build` is called.
|
||||||
|
// Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent
|
||||||
|
// historic states from being retained (unless `--reconstruct-historic-states` is set).
|
||||||
|
let retain_historic_states = self.chain_config.reconstruct_historic_states;
|
||||||
|
self.pending_io_batch.push(
|
||||||
|
store
|
||||||
|
.init_anchor_info(genesis.beacon_block.message(), retain_historic_states)
|
||||||
|
.map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?,
|
||||||
|
);
|
||||||
|
|
||||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis)
|
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis)
|
||||||
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
||||||
let current_slot = None;
|
let current_slot = None;
|
||||||
@ -412,21 +423,28 @@ where
|
|||||||
weak_subj_block: SignedBeaconBlock<TEthSpec>,
|
weak_subj_block: SignedBeaconBlock<TEthSpec>,
|
||||||
genesis_state: BeaconState<TEthSpec>,
|
genesis_state: BeaconState<TEthSpec>,
|
||||||
) -> Result<Self, String> {
|
) -> Result<Self, String> {
|
||||||
let store = self.store.clone().ok_or("genesis_state requires a store")?;
|
let store = self
|
||||||
|
.store
|
||||||
|
.clone()
|
||||||
|
.ok_or("weak_subjectivity_state requires a store")?;
|
||||||
|
let log = self
|
||||||
|
.log
|
||||||
|
.as_ref()
|
||||||
|
.ok_or("weak_subjectivity_state requires a log")?;
|
||||||
|
|
||||||
let weak_subj_slot = weak_subj_state.slot();
|
// Ensure the state is advanced to an epoch boundary.
|
||||||
let weak_subj_block_root = weak_subj_block.canonical_root();
|
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
||||||
let weak_subj_state_root = weak_subj_block.state_root();
|
if weak_subj_state.slot() % slots_per_epoch != 0 {
|
||||||
|
debug!(
|
||||||
// Check that the given state lies on an epoch boundary. Due to the database only storing
|
log,
|
||||||
// full states on epoch boundaries and at restore points it would be difficult to support
|
"Advancing checkpoint state to boundary";
|
||||||
// starting from a mid-epoch state.
|
"state_slot" => weak_subj_state.slot(),
|
||||||
if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 {
|
"block_slot" => weak_subj_block.slot(),
|
||||||
return Err(format!(
|
);
|
||||||
"Checkpoint state at slot {} is not aligned to epoch start. \
|
while weak_subj_state.slot() % slots_per_epoch != 0 {
|
||||||
Please supply an aligned checkpoint with state.slot % 32 == 0",
|
per_slot_processing(&mut weak_subj_state, None, &self.spec)
|
||||||
weak_subj_slot,
|
.map_err(|e| format!("Error advancing state: {e:?}"))?;
|
||||||
));
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prime all caches before storing the state in the database and computing the tree hash
|
// Prime all caches before storing the state in the database and computing the tree hash
|
||||||
@ -434,21 +452,20 @@ where
|
|||||||
weak_subj_state
|
weak_subj_state
|
||||||
.build_caches(&self.spec)
|
.build_caches(&self.spec)
|
||||||
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
|
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
|
||||||
weak_subj_state
|
let weak_subj_state_root = weak_subj_state
|
||||||
.update_tree_hash_cache()
|
.update_tree_hash_cache()
|
||||||
.map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?;
|
.map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?;
|
||||||
|
|
||||||
let latest_block_slot = weak_subj_state.latest_block_header().slot;
|
let weak_subj_slot = weak_subj_state.slot();
|
||||||
|
let weak_subj_block_root = weak_subj_block.canonical_root();
|
||||||
|
|
||||||
// We can only validate the block root if it exists in the state. We can't calculated it
|
// Validate the state's `latest_block_header` against the checkpoint block.
|
||||||
// from the `latest_block_header` because the state root might be set to the zero hash.
|
let state_latest_block_root = weak_subj_state.get_latest_block_root(weak_subj_state_root);
|
||||||
if let Ok(state_slot_block_root) = weak_subj_state.get_block_root(latest_block_slot) {
|
if weak_subj_block_root != state_latest_block_root {
|
||||||
if weak_subj_block_root != *state_slot_block_root {
|
return Err(format!(
|
||||||
return Err(format!(
|
"Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}",
|
||||||
"Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}",
|
weak_subj_block_root, state_latest_block_root
|
||||||
weak_subj_block_root, state_slot_block_root
|
));
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the checkpoint state is for the same network as the genesis state.
|
// Check that the checkpoint state is for the same network as the genesis state.
|
||||||
@ -464,7 +481,7 @@ where
|
|||||||
|
|
||||||
// Set the store's split point *before* storing genesis so that genesis is stored
|
// Set the store's split point *before* storing genesis so that genesis is stored
|
||||||
// immediately in the freezer DB.
|
// immediately in the freezer DB.
|
||||||
store.set_split(weak_subj_slot, weak_subj_state_root);
|
store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root);
|
||||||
let (_, updated_builder) = self.set_genesis_state(genesis_state)?;
|
let (_, updated_builder) = self.set_genesis_state(genesis_state)?;
|
||||||
self = updated_builder;
|
self = updated_builder;
|
||||||
|
|
||||||
@ -480,10 +497,11 @@ where
|
|||||||
// Stage the database's metadata fields for atomic storage when `build` is called.
|
// Stage the database's metadata fields for atomic storage when `build` is called.
|
||||||
// This prevents the database from restarting in an inconsistent state if the anchor
|
// This prevents the database from restarting in an inconsistent state if the anchor
|
||||||
// info or split point is written before the `PersistedBeaconChain`.
|
// info or split point is written before the `PersistedBeaconChain`.
|
||||||
|
let retain_historic_states = self.chain_config.reconstruct_historic_states;
|
||||||
self.pending_io_batch.push(store.store_split_in_batch());
|
self.pending_io_batch.push(store.store_split_in_batch());
|
||||||
self.pending_io_batch.push(
|
self.pending_io_batch.push(
|
||||||
store
|
store
|
||||||
.init_anchor_info(weak_subj_block.message())
|
.init_anchor_info(weak_subj_block.message(), retain_historic_states)
|
||||||
.map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?,
|
.map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -685,9 +703,8 @@ where
|
|||||||
Err(e) => return Err(descriptive_db_error("head block", &e)),
|
Err(e) => return Err(descriptive_db_error("head block", &e)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let head_state_root = head_block.state_root();
|
let (_head_state_root, head_state) = store
|
||||||
let head_state = store
|
.get_advanced_hot_state(head_block_root, current_slot, head_block.state_root())
|
||||||
.get_state(&head_state_root, Some(head_block.slot()))
|
|
||||||
.map_err(|e| descriptive_db_error("head state", &e))?
|
.map_err(|e| descriptive_db_error("head state", &e))?
|
||||||
.ok_or("Head state not found in store")?;
|
.ok_or("Head state not found in store")?;
|
||||||
|
|
||||||
|
@ -47,7 +47,8 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead};
|
use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead};
|
||||||
use fork_choice::{
|
use fork_choice::{
|
||||||
ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses,
|
ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock,
|
||||||
|
ResetPayloadStatuses,
|
||||||
};
|
};
|
||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||||
@ -298,10 +299,10 @@ impl<T: BeaconChainTypes> CanonicalHead<T> {
|
|||||||
let beacon_block = store
|
let beacon_block = store
|
||||||
.get_full_block(&beacon_block_root)?
|
.get_full_block(&beacon_block_root)?
|
||||||
.ok_or(Error::MissingBeaconBlock(beacon_block_root))?;
|
.ok_or(Error::MissingBeaconBlock(beacon_block_root))?;
|
||||||
let beacon_state_root = beacon_block.state_root();
|
let current_slot = fork_choice.fc_store().get_current_slot();
|
||||||
let beacon_state = store
|
let (_, beacon_state) = store
|
||||||
.get_state(&beacon_state_root, Some(beacon_block.slot()))?
|
.get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())?
|
||||||
.ok_or(Error::MissingBeaconState(beacon_state_root))?;
|
.ok_or(Error::MissingBeaconState(beacon_block.state_root()))?;
|
||||||
|
|
||||||
let snapshot = BeaconSnapshot {
|
let snapshot = BeaconSnapshot {
|
||||||
beacon_block_root,
|
beacon_block_root,
|
||||||
@ -669,10 +670,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.get_full_block(&new_view.head_block_root)?
|
.get_full_block(&new_view.head_block_root)?
|
||||||
.ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?;
|
.ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?;
|
||||||
|
|
||||||
let beacon_state_root = beacon_block.state_root();
|
let (_, beacon_state) = self
|
||||||
let beacon_state: BeaconState<T::EthSpec> = self
|
.store
|
||||||
.get_state(&beacon_state_root, Some(beacon_block.slot()))?
|
.get_advanced_hot_state(
|
||||||
.ok_or(Error::MissingBeaconState(beacon_state_root))?;
|
new_view.head_block_root,
|
||||||
|
current_slot,
|
||||||
|
beacon_block.state_root(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::MissingBeaconState(beacon_block.state_root()))?;
|
||||||
|
|
||||||
Ok(BeaconSnapshot {
|
Ok(BeaconSnapshot {
|
||||||
beacon_block: Arc::new(beacon_block),
|
beacon_block: Arc::new(beacon_block),
|
||||||
|
@ -21,8 +21,11 @@ pub struct ServerSentEventHandler<T: EthSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> ServerSentEventHandler<T> {
|
impl<T: EthSpec> ServerSentEventHandler<T> {
|
||||||
pub fn new(log: Logger) -> Self {
|
pub fn new(log: Logger, capacity_multiplier: usize) -> Self {
|
||||||
Self::new_with_capacity(log, DEFAULT_CHANNEL_CAPACITY)
|
Self::new_with_capacity(
|
||||||
|
log,
|
||||||
|
capacity_multiplier.saturating_mul(DEFAULT_CHANNEL_CAPACITY),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_with_capacity(log: Logger, capacity: usize) -> Self {
|
pub fn new_with_capacity(log: Logger, capacity: usize) -> Self {
|
||||||
|
@ -289,6 +289,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
debug!(log, "Database consolidation started");
|
debug!(log, "Database consolidation started");
|
||||||
|
|
||||||
let finalized_state_root = notif.finalized_state_root;
|
let finalized_state_root = notif.finalized_state_root;
|
||||||
|
let finalized_block_root = notif.finalized_checkpoint.root;
|
||||||
|
|
||||||
let finalized_state = match db.get_state(&finalized_state_root.into(), None) {
|
let finalized_state = match db.get_state(&finalized_state_root.into(), None) {
|
||||||
Ok(Some(state)) => state,
|
Ok(Some(state)) => state,
|
||||||
@ -342,7 +343,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match migrate_database(db.clone(), finalized_state_root.into(), &finalized_state) {
|
match migrate_database(
|
||||||
|
db.clone(),
|
||||||
|
finalized_state_root.into(),
|
||||||
|
finalized_block_root,
|
||||||
|
&finalized_state,
|
||||||
|
) {
|
||||||
Ok(()) => {}
|
Ok(()) => {}
|
||||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -9,7 +9,7 @@ use beacon_chain::{
|
|||||||
test_utils::{
|
test_utils::{
|
||||||
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
},
|
},
|
||||||
BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped,
|
BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
||||||
use int_to_bytes::int_to_bytes32;
|
use int_to_bytes::int_to_bytes32;
|
||||||
@ -47,6 +47,10 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
|
|||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec)
|
.spec(spec)
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
@ -79,6 +83,10 @@ fn get_harness_capella_spec(
|
|||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec.clone())
|
.spec(spec.clone())
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(validator_keypairs)
|
.keypairs(validator_keypairs)
|
||||||
.withdrawal_keypairs(
|
.withdrawal_keypairs(
|
||||||
KEYPAIRS[0..validator_count]
|
KEYPAIRS[0..validator_count]
|
||||||
|
@ -7,7 +7,8 @@ use beacon_chain::{
|
|||||||
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock,
|
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock,
|
||||||
};
|
};
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
BeaconSnapshot, BlockError, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer,
|
BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock,
|
||||||
|
NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
@ -138,6 +139,10 @@ async fn get_chain_segment_with_signed_blobs() -> (
|
|||||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
@ -7,7 +7,7 @@ use beacon_chain::otb_verification_service::{
|
|||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
canonical_head::{CachedHead, CanonicalHead},
|
canonical_head::{CachedHead, CanonicalHead},
|
||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer,
|
BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer,
|
||||||
OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped,
|
OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped,
|
||||||
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
||||||
@ -59,6 +59,10 @@ impl InvalidPayloadRig {
|
|||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec)
|
.spec(spec)
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.logger(test_logger())
|
.logger(test_logger())
|
||||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
@ -12,7 +12,7 @@ use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
|||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError,
|
data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError,
|
||||||
migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot,
|
migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot,
|
||||||
ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use eth2_network_config::get_trusted_setup;
|
use eth2_network_config::get_trusted_setup;
|
||||||
use kzg::TrustedSetup;
|
use kzg::TrustedSetup;
|
||||||
@ -20,7 +20,8 @@ use lazy_static::lazy_static;
|
|||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
use maplit::hashset;
|
use maplit::hashset;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use state_processing::BlockReplayer;
|
use slot_clock::{SlotClock, TestingSlotClock};
|
||||||
|
use state_processing::{state_advance::complete_state_advance, BlockReplayer};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
@ -77,6 +78,19 @@ fn get_store_with_spec(
|
|||||||
fn get_harness(
|
fn get_harness(
|
||||||
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
||||||
validator_count: usize,
|
validator_count: usize,
|
||||||
|
) -> TestHarness {
|
||||||
|
// Most tests expect to retain historic states, so we use this as the default.
|
||||||
|
let chain_config = ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
};
|
||||||
|
get_harness_generic(store, validator_count, chain_config)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_harness_generic(
|
||||||
|
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
||||||
|
validator_count: usize,
|
||||||
|
chain_config: ChainConfig,
|
||||||
) -> TestHarness {
|
) -> TestHarness {
|
||||||
let harness = TestHarness::builder(MinimalEthSpec)
|
let harness = TestHarness::builder(MinimalEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
@ -84,6 +98,7 @@ fn get_harness(
|
|||||||
.logger(store.logger().clone())
|
.logger(store.logger().clone())
|
||||||
.fresh_disk_store(store)
|
.fresh_disk_store(store)
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
.chain_config(chain_config)
|
||||||
.build();
|
.build();
|
||||||
harness.advance_slot();
|
harness.advance_slot();
|
||||||
harness
|
harness
|
||||||
@ -472,13 +487,15 @@ async fn block_replay_with_inaccurate_state_roots() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Slot must not be 0 mod 32 or else no blocks will be replayed.
|
// Slot must not be 0 mod 32 or else no blocks will be replayed.
|
||||||
let (mut head_state, head_root) = harness.get_current_state_and_root();
|
let (mut head_state, head_state_root) = harness.get_current_state_and_root();
|
||||||
|
let head_block_root = harness.head_block_root();
|
||||||
assert_ne!(head_state.slot() % 32, 0);
|
assert_ne!(head_state.slot() % 32, 0);
|
||||||
|
|
||||||
let mut fast_head_state = store
|
let (_, mut fast_head_state) = store
|
||||||
.get_inconsistent_state_for_attestation_verification_only(
|
.get_inconsistent_state_for_attestation_verification_only(
|
||||||
&head_root,
|
&head_block_root,
|
||||||
Some(head_state.slot()),
|
head_state.slot(),
|
||||||
|
head_state_root,
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -577,14 +594,7 @@ async fn block_replayer_hooks() {
|
|||||||
async fn delete_blocks_and_states() {
|
async fn delete_blocks_and_states() {
|
||||||
let db_path = tempdir().unwrap();
|
let db_path = tempdir().unwrap();
|
||||||
let store = get_store(&db_path);
|
let store = get_store(&db_path);
|
||||||
let validators_keypairs =
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||||
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
|
|
||||||
let harness = TestHarness::builder(MinimalEthSpec)
|
|
||||||
.default_spec()
|
|
||||||
.keypairs(validators_keypairs)
|
|
||||||
.fresh_disk_store(store.clone())
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
|
|
||||||
let unforked_blocks: u64 = 4 * E::slots_per_epoch();
|
let unforked_blocks: u64 = 4 * E::slots_per_epoch();
|
||||||
|
|
||||||
@ -1027,18 +1037,14 @@ fn check_shuffling_compatible(
|
|||||||
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let slots_per_epoch = rig.slots_per_epoch();
|
let slots_per_epoch = rig.slots_per_epoch();
|
||||||
let (mut state, state_root) = rig.get_current_state_and_root();
|
let (mut state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
@ -1137,18 +1143,14 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let slots_per_epoch = rig.slots_per_epoch();
|
let slots_per_epoch = rig.slots_per_epoch();
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
@ -1272,15 +1274,11 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
|||||||
const HONEST_VALIDATOR_COUNT: usize = 32;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let slots_per_epoch = rig.slots_per_epoch();
|
let slots_per_epoch = rig.slots_per_epoch();
|
||||||
let (mut state, state_root) = rig.get_current_state_and_root();
|
let (mut state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
@ -1364,18 +1362,14 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
|
async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
// Fill up 0th epoch with canonical chain blocks
|
// Fill up 0th epoch with canonical chain blocks
|
||||||
@ -1509,18 +1503,14 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
|
|||||||
// This is to check if state outside of normal block processing are pruned correctly.
|
// This is to check if state outside of normal block processing are pruned correctly.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn prunes_skipped_slots_states() {
|
async fn prunes_skipped_slots_states() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
||||||
@ -1638,18 +1628,14 @@ async fn prunes_skipped_slots_states() {
|
|||||||
// This is to check if state outside of normal block processing are pruned correctly.
|
// This is to check if state outside of normal block processing are pruned correctly.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn finalizes_non_epoch_start_slot() {
|
async fn finalizes_non_epoch_start_slot() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
||||||
@ -2068,39 +2054,82 @@ async fn garbage_collect_temp_states_from_failed_block() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn weak_subjectivity_sync() {
|
async fn weak_subjectivity_sync_easy() {
|
||||||
|
let num_initial_slots = E::slots_per_epoch() * 11;
|
||||||
|
let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9);
|
||||||
|
let slots = (1..num_initial_slots).map(Slot::new).collect();
|
||||||
|
weak_subjectivity_sync_test(slots, checkpoint_slot).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() {
|
||||||
|
let num_initial_slots = E::slots_per_epoch() * 11;
|
||||||
|
let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9);
|
||||||
|
let slots = (1..num_initial_slots)
|
||||||
|
.map(Slot::new)
|
||||||
|
.filter(|&slot| {
|
||||||
|
// Skip 3 slots leading up to the checkpoint slot.
|
||||||
|
slot <= checkpoint_slot - 3 || slot > checkpoint_slot
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
weak_subjectivity_sync_test(slots, checkpoint_slot).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() {
|
||||||
|
let num_initial_slots = E::slots_per_epoch() * 11;
|
||||||
|
let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3);
|
||||||
|
let slots = (1..num_initial_slots)
|
||||||
|
.map(Slot::new)
|
||||||
|
.filter(|&slot| {
|
||||||
|
// Skip 3 slots after the checkpoint slot.
|
||||||
|
slot <= checkpoint_slot || slot > checkpoint_slot + 3
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
weak_subjectivity_sync_test(slots, checkpoint_slot).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||||
// Build an initial chain on one harness, representing a synced node with full history.
|
// Build an initial chain on one harness, representing a synced node with full history.
|
||||||
let num_initial_blocks = E::slots_per_epoch() * 11;
|
|
||||||
let num_final_blocks = E::slots_per_epoch() * 2;
|
let num_final_blocks = E::slots_per_epoch() * 2;
|
||||||
|
|
||||||
let temp1 = tempdir().unwrap();
|
let temp1 = tempdir().unwrap();
|
||||||
let full_store = get_store(&temp1);
|
let full_store = get_store(&temp1);
|
||||||
let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT);
|
let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT);
|
||||||
|
|
||||||
|
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
||||||
harness
|
harness
|
||||||
.extend_chain(
|
.add_attested_blocks_at_slots(
|
||||||
num_initial_blocks as usize,
|
genesis_state.clone(),
|
||||||
BlockStrategy::OnCanonicalHead,
|
genesis_state_root,
|
||||||
AttestationStrategy::AllValidators,
|
&slots,
|
||||||
|
&all_validators,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let genesis_state = full_store
|
let wss_block_root = harness
|
||||||
.get_state(&harness.chain.genesis_state_root, Some(Slot::new(0)))
|
.chain
|
||||||
|
.block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let wss_checkpoint = harness.finalized_checkpoint();
|
let wss_state_root = harness
|
||||||
|
.chain
|
||||||
|
.state_root_at_slot(checkpoint_slot)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let wss_block = harness
|
let wss_block = harness
|
||||||
.chain
|
.chain
|
||||||
.store
|
.store
|
||||||
.get_full_block(&wss_checkpoint.root)
|
.get_full_block(&wss_block_root)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let wss_state = full_store
|
let wss_state = full_store
|
||||||
.get_state(&wss_block.state_root(), None)
|
.get_state(&wss_state_root, Some(checkpoint_slot))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let wss_slot = wss_block.slot();
|
|
||||||
|
|
||||||
// Add more blocks that advance finalization further.
|
// Add more blocks that advance finalization further.
|
||||||
harness.advance_slot();
|
harness.advance_slot();
|
||||||
@ -2130,19 +2159,25 @@ async fn weak_subjectivity_sync() {
|
|||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Initialise a new beacon chain from the finalized checkpoint
|
// Initialise a new beacon chain from the finalized checkpoint.
|
||||||
|
// The slot clock must be set to a time ahead of the checkpoint state.
|
||||||
|
let slot_clock = TestingSlotClock::new(
|
||||||
|
Slot::new(0),
|
||||||
|
Duration::from_secs(harness.chain.genesis_time),
|
||||||
|
Duration::from_secs(seconds_per_slot),
|
||||||
|
);
|
||||||
|
slot_clock.set_slot(harness.get_current_slot().as_u64());
|
||||||
let beacon_chain = BeaconChainBuilder::<DiskHarnessType<E>>::new(MinimalEthSpec)
|
let beacon_chain = BeaconChainBuilder::<DiskHarnessType<E>>::new(MinimalEthSpec)
|
||||||
.store(store.clone())
|
.store(store.clone())
|
||||||
.custom_spec(test_spec::<E>())
|
.custom_spec(test_spec::<E>())
|
||||||
.task_executor(harness.chain.task_executor.clone())
|
.task_executor(harness.chain.task_executor.clone())
|
||||||
|
.logger(log.clone())
|
||||||
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.logger(log.clone())
|
|
||||||
.store_migrator_config(MigratorConfig::default().blocking())
|
.store_migrator_config(MigratorConfig::default().blocking())
|
||||||
.dummy_eth1_backend()
|
.dummy_eth1_backend()
|
||||||
.expect("should build dummy backend")
|
.expect("should build dummy backend")
|
||||||
.testing_slot_clock(Duration::from_secs(seconds_per_slot))
|
.slot_clock(slot_clock)
|
||||||
.expect("should configure testing slot clock")
|
|
||||||
.shutdown_sender(shutdown_tx)
|
.shutdown_sender(shutdown_tx)
|
||||||
.chain_config(ChainConfig::default())
|
.chain_config(ChainConfig::default())
|
||||||
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
||||||
@ -2159,9 +2194,9 @@ async fn weak_subjectivity_sync() {
|
|||||||
|
|
||||||
// Apply blocks forward to reach head.
|
// Apply blocks forward to reach head.
|
||||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||||
let new_blocks = &chain_dump[wss_slot.as_usize() + 1..];
|
let new_blocks = chain_dump
|
||||||
|
.iter()
|
||||||
assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1);
|
.filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot);
|
||||||
|
|
||||||
for snapshot in new_blocks {
|
for snapshot in new_blocks {
|
||||||
let block_root = snapshot.beacon_block_root;
|
let block_root = snapshot.beacon_block_root;
|
||||||
@ -2271,13 +2306,17 @@ async fn weak_subjectivity_sync() {
|
|||||||
assert_eq!(forwards, expected);
|
assert_eq!(forwards, expected);
|
||||||
|
|
||||||
// All blocks can be loaded.
|
// All blocks can be loaded.
|
||||||
|
let mut prev_block_root = Hash256::zero();
|
||||||
for (block_root, slot) in beacon_chain
|
for (block_root, slot) in beacon_chain
|
||||||
.forwards_iter_block_roots(Slot::new(0))
|
.forwards_iter_block_roots(Slot::new(0))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(Result::unwrap)
|
.map(Result::unwrap)
|
||||||
{
|
{
|
||||||
let block = store.get_blinded_block(&block_root).unwrap().unwrap();
|
let block = store.get_blinded_block(&block_root).unwrap().unwrap();
|
||||||
assert_eq!(block.slot(), slot);
|
if block_root != prev_block_root {
|
||||||
|
assert_eq!(block.slot(), slot);
|
||||||
|
}
|
||||||
|
prev_block_root = block_root;
|
||||||
}
|
}
|
||||||
|
|
||||||
// All states from the oldest state slot can be loaded.
|
// All states from the oldest state slot can be loaded.
|
||||||
@ -2292,14 +2331,141 @@ async fn weak_subjectivity_sync() {
|
|||||||
assert_eq!(state.canonical_root(), state_root);
|
assert_eq!(state.canonical_root(), state_root);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Anchor slot is still set to the starting slot.
|
// Anchor slot is still set to the slot of the checkpoint block.
|
||||||
assert_eq!(store.get_anchor_slot(), Some(wss_slot));
|
assert_eq!(store.get_anchor_slot(), Some(wss_block.slot()));
|
||||||
|
|
||||||
// Reconstruct states.
|
// Reconstruct states.
|
||||||
store.clone().reconstruct_historic_states().unwrap();
|
store.clone().reconstruct_historic_states().unwrap();
|
||||||
assert_eq!(store.get_anchor_slot(), None);
|
assert_eq!(store.get_anchor_slot(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Test that blocks and attestations that refer to states around an unaligned split state are
|
||||||
|
/// processed correctly.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
|
||||||
|
let temp = tempdir().unwrap();
|
||||||
|
let store = get_store(&temp);
|
||||||
|
let chain_config = ChainConfig {
|
||||||
|
reconstruct_historic_states: false,
|
||||||
|
..ChainConfig::default()
|
||||||
|
};
|
||||||
|
let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config);
|
||||||
|
|
||||||
|
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let split_slot = Slot::new(E::slots_per_epoch() * 4);
|
||||||
|
let pre_skips = 1;
|
||||||
|
let post_skips = 1;
|
||||||
|
|
||||||
|
// Build the chain up to the intended split slot, with 3 skips before the split.
|
||||||
|
let slots = (1..=split_slot.as_u64() - pre_skips)
|
||||||
|
.map(Slot::new)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
||||||
|
harness
|
||||||
|
.add_attested_blocks_at_slots(
|
||||||
|
genesis_state.clone(),
|
||||||
|
genesis_state_root,
|
||||||
|
&slots,
|
||||||
|
&all_validators,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Before the split slot becomes finalized, create two forking blocks that build on the split
|
||||||
|
// block:
|
||||||
|
//
|
||||||
|
// - one that is invalid because it conflicts with finalization (slot <= finalized_slot)
|
||||||
|
// - one that is valid because its slot is not finalized (slot > finalized_slot)
|
||||||
|
let (unadvanced_split_state, unadvanced_split_state_root) =
|
||||||
|
harness.get_current_state_and_root();
|
||||||
|
|
||||||
|
let (invalid_fork_block, _) = harness
|
||||||
|
.make_block(unadvanced_split_state.clone(), split_slot)
|
||||||
|
.await;
|
||||||
|
let (valid_fork_block, _) = harness
|
||||||
|
.make_block(unadvanced_split_state.clone(), split_slot + 1)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Advance the chain so that the intended split slot is finalized.
|
||||||
|
// Do not attest in the epoch boundary slot, to make attestation production later easier (no
|
||||||
|
// equivocations).
|
||||||
|
let finalizing_slot = split_slot + 2 * E::slots_per_epoch();
|
||||||
|
for _ in 0..pre_skips + post_skips {
|
||||||
|
harness.advance_slot();
|
||||||
|
}
|
||||||
|
harness.extend_to_slot(finalizing_slot - 1).await;
|
||||||
|
harness
|
||||||
|
.add_block_at_slot(finalizing_slot, harness.get_current_state())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Check that the split slot is as intended.
|
||||||
|
let split = store.get_split_info();
|
||||||
|
assert_eq!(split.slot, split_slot);
|
||||||
|
assert_eq!(split.block_root, valid_fork_block.parent_root());
|
||||||
|
assert_ne!(split.state_root, unadvanced_split_state_root);
|
||||||
|
|
||||||
|
// Applying the invalid block should fail.
|
||||||
|
let err = harness
|
||||||
|
.chain
|
||||||
|
.process_block(
|
||||||
|
invalid_fork_block.canonical_root(),
|
||||||
|
Arc::new(invalid_fork_block.clone()),
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
|| Ok(()),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap_err();
|
||||||
|
assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. }));
|
||||||
|
|
||||||
|
// Applying the valid block should succeed, but it should not become head.
|
||||||
|
harness
|
||||||
|
.chain
|
||||||
|
.process_block(
|
||||||
|
valid_fork_block.canonical_root(),
|
||||||
|
Arc::new(valid_fork_block.clone()),
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
|| Ok(()),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
harness.chain.recompute_head_at_current_slot().await;
|
||||||
|
assert_ne!(harness.head_block_root(), valid_fork_block.canonical_root());
|
||||||
|
|
||||||
|
// Attestations to the split block in the next 2 epochs should be processed successfully.
|
||||||
|
let attestation_start_slot = harness.get_current_slot();
|
||||||
|
let attestation_end_slot = attestation_start_slot + 2 * E::slots_per_epoch();
|
||||||
|
let (split_state_root, mut advanced_split_state) = harness
|
||||||
|
.chain
|
||||||
|
.store
|
||||||
|
.get_advanced_hot_state(split.block_root, split.slot, split.state_root)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
complete_state_advance(
|
||||||
|
&mut advanced_split_state,
|
||||||
|
Some(split_state_root),
|
||||||
|
attestation_start_slot,
|
||||||
|
&harness.chain.spec,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
advanced_split_state
|
||||||
|
.build_caches(&harness.chain.spec)
|
||||||
|
.unwrap();
|
||||||
|
let advanced_split_state_root = advanced_split_state.update_tree_hash_cache().unwrap();
|
||||||
|
for slot in (attestation_start_slot.as_u64()..attestation_end_slot.as_u64()).map(Slot::new) {
|
||||||
|
let attestations = harness.make_attestations(
|
||||||
|
&all_validators,
|
||||||
|
&advanced_split_state,
|
||||||
|
advanced_split_state_root,
|
||||||
|
split.block_root.into(),
|
||||||
|
slot,
|
||||||
|
);
|
||||||
|
harness.advance_slot();
|
||||||
|
harness.process_attestations(attestations);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn finalizes_after_resuming_from_db() {
|
async fn finalizes_after_resuming_from_db() {
|
||||||
let validator_count = 16;
|
let validator_count = 16;
|
||||||
@ -2358,6 +2524,7 @@ async fn finalizes_after_resuming_from_db() {
|
|||||||
.default_spec()
|
.default_spec()
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.resumed_disk_store(store)
|
.resumed_disk_store(store)
|
||||||
|
.testing_slot_clock(original_chain.slot_clock.clone())
|
||||||
.execution_layer(original_chain.execution_layer.clone())
|
.execution_layer(original_chain.execution_layer.clone())
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@ -2611,6 +2778,9 @@ async fn schema_downgrade_to_min_version() {
|
|||||||
SchemaVersion(11)
|
SchemaVersion(11)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Save the slot clock so that the new harness doesn't revert in time.
|
||||||
|
let slot_clock = harness.chain.slot_clock.clone();
|
||||||
|
|
||||||
// Close the database to ensure everything is written to disk.
|
// Close the database to ensure everything is written to disk.
|
||||||
drop(store);
|
drop(store);
|
||||||
drop(harness);
|
drop(harness);
|
||||||
@ -2641,11 +2811,21 @@ async fn schema_downgrade_to_min_version() {
|
|||||||
)
|
)
|
||||||
.expect("schema upgrade from minimum version should work");
|
.expect("schema upgrade from minimum version should work");
|
||||||
|
|
||||||
// Rescreate the harness.
|
// Recreate the harness.
|
||||||
|
/*
|
||||||
|
let slot_clock = TestingSlotClock::new(
|
||||||
|
Slot::new(0),
|
||||||
|
Duration::from_secs(harness.chain.genesis_time),
|
||||||
|
Duration::from_secs(spec.seconds_per_slot),
|
||||||
|
);
|
||||||
|
slot_clock.set_slot(harness.get_current_slot().as_u64());
|
||||||
|
*/
|
||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
|
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
|
||||||
.logger(store.logger().clone())
|
.logger(store.logger().clone())
|
||||||
|
.testing_slot_clock(slot_clock)
|
||||||
.resumed_disk_store(store.clone())
|
.resumed_disk_store(store.clone())
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
.build();
|
.build();
|
||||||
|
@ -6,7 +6,7 @@ use beacon_chain::{
|
|||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
OP_POOL_DB_KEY,
|
OP_POOL_DB_KEY,
|
||||||
},
|
},
|
||||||
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use operation_pool::PersistedOperationPool;
|
use operation_pool::PersistedOperationPool;
|
||||||
@ -28,6 +28,10 @@ lazy_static! {
|
|||||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
|
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
|
||||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
@ -743,7 +743,6 @@ impl<E: EthSpec> Stream for InboundEvents<E> {
|
|||||||
pub struct BeaconProcessor<E: EthSpec> {
|
pub struct BeaconProcessor<E: EthSpec> {
|
||||||
pub network_globals: Arc<NetworkGlobals<E>>,
|
pub network_globals: Arc<NetworkGlobals<E>>,
|
||||||
pub executor: TaskExecutor,
|
pub executor: TaskExecutor,
|
||||||
pub max_workers: usize,
|
|
||||||
pub current_workers: usize,
|
pub current_workers: usize,
|
||||||
pub config: BeaconProcessorConfig,
|
pub config: BeaconProcessorConfig,
|
||||||
pub log: Logger,
|
pub log: Logger,
|
||||||
@ -756,7 +755,7 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
|||||||
/// - Performed immediately, if a worker is available.
|
/// - Performed immediately, if a worker is available.
|
||||||
/// - Queued for later processing, if no worker is currently available.
|
/// - Queued for later processing, if no worker is currently available.
|
||||||
///
|
///
|
||||||
/// Only `self.max_workers` will ever be spawned at one time. Each worker is a `tokio` task
|
/// Only `self.config.max_workers` will ever be spawned at one time. Each worker is a `tokio` task
|
||||||
/// started with `spawn_blocking`.
|
/// started with `spawn_blocking`.
|
||||||
///
|
///
|
||||||
/// The optional `work_journal_tx` allows for an outside process to receive a log of all work
|
/// The optional `work_journal_tx` allows for an outside process to receive a log of all work
|
||||||
@ -935,7 +934,7 @@ impl<E: EthSpec> BeaconProcessor<E> {
|
|||||||
let _ = work_journal_tx.try_send(id);
|
let _ = work_journal_tx.try_send(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
let can_spawn = self.current_workers < self.max_workers;
|
let can_spawn = self.current_workers < self.config.max_workers;
|
||||||
let drop_during_sync = work_event
|
let drop_during_sync = work_event
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(false, |event| event.drop_during_sync);
|
.map_or(false, |event| event.drop_during_sync);
|
||||||
|
@ -44,4 +44,3 @@ slasher_service = { path = "../../slasher/service" }
|
|||||||
monitoring_api = {path = "../../common/monitoring_api"}
|
monitoring_api = {path = "../../common/monitoring_api"}
|
||||||
execution_layer = { path = "../execution_layer" }
|
execution_layer = { path = "../execution_layer" }
|
||||||
beacon_processor = { path = "../beacon_processor" }
|
beacon_processor = { path = "../beacon_processor" }
|
||||||
num_cpus = "1.13.0"
|
|
||||||
|
@ -30,8 +30,6 @@ use network::{NetworkConfig, NetworkSenders, NetworkService};
|
|||||||
use slasher::Slasher;
|
use slasher::Slasher;
|
||||||
use slasher_service::SlasherService;
|
use slasher_service::SlasherService;
|
||||||
use slog::{debug, info, warn, Logger};
|
use slog::{debug, info, warn, Logger};
|
||||||
use state_processing::per_slot_processing;
|
|
||||||
use std::cmp;
|
|
||||||
use std::net::TcpListener;
|
use std::net::TcpListener;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -161,7 +159,10 @@ where
|
|||||||
let context = runtime_context.service_context("beacon".into());
|
let context = runtime_context.service_context("beacon".into());
|
||||||
let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?;
|
let spec = chain_spec.ok_or("beacon_chain_start_method requires a chain spec")?;
|
||||||
let event_handler = if self.http_api_config.enabled {
|
let event_handler = if self.http_api_config.enabled {
|
||||||
Some(ServerSentEventHandler::new(context.log().clone()))
|
Some(ServerSentEventHandler::new(
|
||||||
|
context.log().clone(),
|
||||||
|
self.http_api_config.sse_capacity_multiplier,
|
||||||
|
))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -316,7 +317,6 @@ where
|
|||||||
config.chain.checkpoint_sync_url_timeout,
|
config.chain.checkpoint_sync_url_timeout,
|
||||||
)),
|
)),
|
||||||
);
|
);
|
||||||
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
|
||||||
|
|
||||||
let deposit_snapshot = if config.sync_eth1_chain {
|
let deposit_snapshot = if config.sync_eth1_chain {
|
||||||
// We want to fetch deposit snapshot before fetching the finalized beacon state to
|
// We want to fetch deposit snapshot before fetching the finalized beacon state to
|
||||||
@ -367,7 +367,7 @@ where
|
|||||||
context.log(),
|
context.log(),
|
||||||
"Downloading finalized state";
|
"Downloading finalized state";
|
||||||
);
|
);
|
||||||
let mut state = remote
|
let state = remote
|
||||||
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Finalized, &spec)
|
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Finalized, &spec)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))?
|
.map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))?
|
||||||
@ -393,16 +393,6 @@ where
|
|||||||
|
|
||||||
debug!(context.log(), "Downloaded finalized block");
|
debug!(context.log(), "Downloaded finalized block");
|
||||||
|
|
||||||
let epoch_boundary_slot = state.slot() % slots_per_epoch;
|
|
||||||
if epoch_boundary_slot != 0 {
|
|
||||||
debug!(context.log(), "Advancing state to epoch boundary"; "state_slot" => state.slot(), "epoch_boundary_slot" => epoch_boundary_slot);
|
|
||||||
}
|
|
||||||
|
|
||||||
while state.slot() % slots_per_epoch != 0 {
|
|
||||||
per_slot_processing(&mut state, None, &spec)
|
|
||||||
.map_err(|e| format!("Error advancing state: {:?}", e))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
|
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
|
||||||
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
|
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
|
||||||
|
|
||||||
@ -775,7 +765,6 @@ where
|
|||||||
BeaconProcessor {
|
BeaconProcessor {
|
||||||
network_globals: network_globals.clone(),
|
network_globals: network_globals.clone(),
|
||||||
executor: beacon_processor_context.executor.clone(),
|
executor: beacon_processor_context.executor.clone(),
|
||||||
max_workers: cmp::max(1, num_cpus::get()),
|
|
||||||
current_workers: 0,
|
current_workers: 0,
|
||||||
config: beacon_processor_config,
|
config: beacon_processor_config,
|
||||||
log: beacon_processor_context.log().clone(),
|
log: beacon_processor_context.log().clone(),
|
||||||
|
@ -84,7 +84,6 @@ pub struct Config {
|
|||||||
pub monitoring_api: Option<monitoring_api::Config>,
|
pub monitoring_api: Option<monitoring_api::Config>,
|
||||||
pub slasher: Option<slasher::Config>,
|
pub slasher: Option<slasher::Config>,
|
||||||
pub logger_config: LoggerConfig,
|
pub logger_config: LoggerConfig,
|
||||||
pub always_prefer_builder_payload: bool,
|
|
||||||
pub beacon_processor: BeaconProcessorConfig,
|
pub beacon_processor: BeaconProcessorConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +113,6 @@ impl Default for Config {
|
|||||||
validator_monitor_pubkeys: vec![],
|
validator_monitor_pubkeys: vec![],
|
||||||
validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
|
validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
|
||||||
logger_config: LoggerConfig::default(),
|
logger_config: LoggerConfig::default(),
|
||||||
always_prefer_builder_payload: false,
|
|
||||||
beacon_processor: <_>::default(),
|
beacon_processor: <_>::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,6 @@ pub fn attester_duties<T: BeaconChainTypes>(
|
|||||||
.epoch(T::EthSpec::slots_per_epoch());
|
.epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
if request_epoch == current_epoch
|
if request_epoch == current_epoch
|
||||||
|| request_epoch == tolerant_current_epoch
|
|
||||||
|| request_epoch == current_epoch + 1
|
|| request_epoch == current_epoch + 1
|
||||||
|| request_epoch == tolerant_current_epoch + 1
|
|| request_epoch == tolerant_current_epoch + 1
|
||||||
{
|
{
|
||||||
@ -46,7 +45,7 @@ pub fn attester_duties<T: BeaconChainTypes>(
|
|||||||
request_epoch, current_epoch
|
request_epoch, current_epoch
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
// request_epoch < current_epoch
|
// request_epoch < current_epoch, in fact we only allow `request_epoch == current_epoch-1` in this case
|
||||||
compute_historic_attester_duties(request_epoch, request_indices, chain)
|
compute_historic_attester_duties(request_epoch, request_indices, chain)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,10 @@ use tokio::sync::{
|
|||||||
mpsc::{Sender, UnboundedSender},
|
mpsc::{Sender, UnboundedSender},
|
||||||
oneshot,
|
oneshot,
|
||||||
};
|
};
|
||||||
use tokio_stream::{wrappers::BroadcastStream, StreamExt};
|
use tokio_stream::{
|
||||||
|
wrappers::{errors::BroadcastStreamRecvError, BroadcastStream},
|
||||||
|
StreamExt,
|
||||||
|
};
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError,
|
Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError,
|
||||||
BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload,
|
BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload,
|
||||||
@ -132,6 +135,7 @@ pub struct Config {
|
|||||||
pub allow_sync_stalled: bool,
|
pub allow_sync_stalled: bool,
|
||||||
pub spec_fork_name: Option<ForkName>,
|
pub spec_fork_name: Option<ForkName>,
|
||||||
pub data_dir: PathBuf,
|
pub data_dir: PathBuf,
|
||||||
|
pub sse_capacity_multiplier: usize,
|
||||||
pub enable_beacon_processor: bool,
|
pub enable_beacon_processor: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,6 +150,7 @@ impl Default for Config {
|
|||||||
allow_sync_stalled: false,
|
allow_sync_stalled: false,
|
||||||
spec_fork_name: None,
|
spec_fork_name: None,
|
||||||
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
|
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
|
||||||
|
sse_capacity_multiplier: 1,
|
||||||
enable_beacon_processor: true,
|
enable_beacon_processor: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4373,22 +4378,29 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
receivers.push(BroadcastStream::new(receiver).map(|msg| {
|
receivers.push(
|
||||||
match msg {
|
BroadcastStream::new(receiver)
|
||||||
Ok(data) => Event::default()
|
.map(|msg| {
|
||||||
.event(data.topic_name())
|
match msg {
|
||||||
.json_data(data)
|
Ok(data) => Event::default()
|
||||||
.map_err(|e| {
|
.event(data.topic_name())
|
||||||
warp_utils::reject::server_sent_event_error(format!(
|
.json_data(data)
|
||||||
"{:?}",
|
.unwrap_or_else(|e| {
|
||||||
e
|
Event::default()
|
||||||
))
|
.comment(format!("error - bad json: {e:?}"))
|
||||||
}),
|
}),
|
||||||
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
// Do not terminate the stream if the channel fills
|
||||||
format!("{:?}", e),
|
// up. Just drop some messages and send a comment to
|
||||||
)),
|
// the client.
|
||||||
}
|
Err(BroadcastStreamRecvError::Lagged(n)) => {
|
||||||
}));
|
Event::default().comment(format!(
|
||||||
|
"error - dropped {n} messages"
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.map(Ok::<_, std::convert::Infallible>),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(warp_utils::reject::custom_server_error(
|
return Err(warp_utils::reject::custom_server_error(
|
||||||
@ -4398,7 +4410,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
let s = futures::stream::select_all(receivers);
|
let s = futures::stream::select_all(receivers);
|
||||||
|
|
||||||
Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
|
Ok(warp::sse::reply(warp::sse::keep_alive().stream(s)))
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
@ -181,7 +181,14 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
let eth1_service =
|
let eth1_service =
|
||||||
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap();
|
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap();
|
||||||
|
|
||||||
let beacon_processor_config = BeaconProcessorConfig::default();
|
let beacon_processor_config = BeaconProcessorConfig {
|
||||||
|
// The number of workers must be greater than one. Tests which use the
|
||||||
|
// builder workflow sometimes require an internal HTTP request in order
|
||||||
|
// to fulfill an already in-flight HTTP request, therefore having only
|
||||||
|
// one worker will result in a deadlock.
|
||||||
|
max_workers: 2,
|
||||||
|
..BeaconProcessorConfig::default()
|
||||||
|
};
|
||||||
let BeaconProcessorChannels {
|
let BeaconProcessorChannels {
|
||||||
beacon_processor_tx,
|
beacon_processor_tx,
|
||||||
beacon_processor_rx,
|
beacon_processor_rx,
|
||||||
@ -193,11 +200,6 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
BeaconProcessor {
|
BeaconProcessor {
|
||||||
network_globals: network_globals.clone(),
|
network_globals: network_globals.clone(),
|
||||||
executor: test_runtime.task_executor.clone(),
|
executor: test_runtime.task_executor.clone(),
|
||||||
// The number of workers must be greater than one. Tests which use the
|
|
||||||
// builder workflow sometimes require an internal HTTP request in order
|
|
||||||
// to fulfill an already in-flight HTTP request, therefore having only
|
|
||||||
// one worker will result in a deadlock.
|
|
||||||
max_workers: 2,
|
|
||||||
current_workers: 0,
|
current_workers: 0,
|
||||||
config: beacon_processor_config,
|
config: beacon_processor_config,
|
||||||
log: log.clone(),
|
log: log.clone(),
|
||||||
@ -222,6 +224,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
allow_sync_stalled: false,
|
allow_sync_stalled: false,
|
||||||
data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR),
|
data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR),
|
||||||
spec_fork_name: None,
|
spec_fork_name: None,
|
||||||
|
sse_capacity_multiplier: 1,
|
||||||
enable_beacon_processor: true,
|
enable_beacon_processor: true,
|
||||||
},
|
},
|
||||||
chain: Some(chain),
|
chain: Some(chain),
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use beacon_chain::test_utils::RelativeSyncCommittee;
|
use beacon_chain::test_utils::RelativeSyncCommittee;
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||||
BeaconChain, StateSkipConfig, WhenSlotSkipped,
|
BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use environment::null_logger;
|
use environment::null_logger;
|
||||||
use eth2::{
|
use eth2::{
|
||||||
@ -77,6 +77,7 @@ struct ApiTester {
|
|||||||
|
|
||||||
struct ApiTesterConfig {
|
struct ApiTesterConfig {
|
||||||
spec: ChainSpec,
|
spec: ChainSpec,
|
||||||
|
retain_historic_states: bool,
|
||||||
builder_threshold: Option<u128>,
|
builder_threshold: Option<u128>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,11 +87,19 @@ impl Default for ApiTesterConfig {
|
|||||||
spec.shard_committee_period = 2;
|
spec.shard_committee_period = 2;
|
||||||
Self {
|
Self {
|
||||||
spec,
|
spec,
|
||||||
|
retain_historic_states: false,
|
||||||
builder_threshold: None,
|
builder_threshold: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ApiTesterConfig {
|
||||||
|
fn retain_historic_states(mut self) -> Self {
|
||||||
|
self.retain_historic_states = true;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ApiTester {
|
impl ApiTester {
|
||||||
pub async fn new() -> Self {
|
pub async fn new() -> Self {
|
||||||
// This allows for testing voluntary exits without building out a massive chain.
|
// This allows for testing voluntary exits without building out a massive chain.
|
||||||
@ -118,6 +127,10 @@ impl ApiTester {
|
|||||||
let harness = Arc::new(
|
let harness = Arc::new(
|
||||||
BeaconChainHarness::builder(MainnetEthSpec)
|
BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec.clone())
|
.spec(spec.clone())
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: config.retain_historic_states,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.logger(logging::test_logger())
|
.logger(logging::test_logger())
|
||||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
@ -379,6 +392,7 @@ impl ApiTester {
|
|||||||
pub async fn new_mev_tester_no_builder_threshold() -> Self {
|
pub async fn new_mev_tester_no_builder_threshold() -> Self {
|
||||||
let mut config = ApiTesterConfig {
|
let mut config = ApiTesterConfig {
|
||||||
builder_threshold: Some(0),
|
builder_threshold: Some(0),
|
||||||
|
retain_historic_states: false,
|
||||||
spec: E::default_spec(),
|
spec: E::default_spec(),
|
||||||
};
|
};
|
||||||
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
@ -4807,7 +4821,7 @@ async fn get_validator_duties_attester_with_skip_slots() {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_proposer() {
|
async fn get_validator_duties_proposer() {
|
||||||
ApiTester::new()
|
ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states())
|
||||||
.await
|
.await
|
||||||
.test_get_validator_duties_proposer()
|
.test_get_validator_duties_proposer()
|
||||||
.await;
|
.await;
|
||||||
@ -4815,7 +4829,7 @@ async fn get_validator_duties_proposer() {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_proposer_with_skip_slots() {
|
async fn get_validator_duties_proposer_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states())
|
||||||
.await
|
.await
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_duties_proposer()
|
.test_get_validator_duties_proposer()
|
||||||
@ -5147,6 +5161,7 @@ async fn builder_payload_chosen_by_profit() {
|
|||||||
async fn builder_works_post_capella() {
|
async fn builder_works_post_capella() {
|
||||||
let mut config = ApiTesterConfig {
|
let mut config = ApiTesterConfig {
|
||||||
builder_threshold: Some(0),
|
builder_threshold: Some(0),
|
||||||
|
retain_historic_states: false,
|
||||||
spec: E::default_spec(),
|
spec: E::default_spec(),
|
||||||
};
|
};
|
||||||
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
|
@ -37,7 +37,6 @@ logging = { path = "../../common/logging" }
|
|||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
igd = "0.12.1"
|
igd = "0.12.1"
|
||||||
itertools = "0.10.0"
|
itertools = "0.10.0"
|
||||||
num_cpus = "1.13.0"
|
|
||||||
lru_cache = { path = "../../common/lru_cache" }
|
lru_cache = { path = "../../common/lru_cache" }
|
||||||
if-addrs = "0.6.4"
|
if-addrs = "0.6.4"
|
||||||
strum = "0.24.0"
|
strum = "0.24.0"
|
||||||
|
@ -2302,7 +2302,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError(
|
AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError(
|
||||||
HotColdDBError::AttestationStateIsFinalized { .. },
|
HotColdDBError::FinalizedStateNotInHotDatabase { .. },
|
||||||
))) => {
|
))) => {
|
||||||
debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id);
|
debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id);
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
|
@ -23,7 +23,6 @@ use lighthouse_network::{
|
|||||||
Client, MessageId, NetworkGlobals, PeerId, Response,
|
Client, MessageId, NetworkGlobals, PeerId, Response,
|
||||||
};
|
};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::cmp;
|
|
||||||
use std::iter::Iterator;
|
use std::iter::Iterator;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -235,7 +234,6 @@ impl TestRig {
|
|||||||
let beacon_processor = BeaconProcessor {
|
let beacon_processor = BeaconProcessor {
|
||||||
network_globals,
|
network_globals,
|
||||||
executor,
|
executor,
|
||||||
max_workers: cmp::max(1, num_cpus::get()),
|
|
||||||
current_workers: 0,
|
current_workers: 0,
|
||||||
config: beacon_processor_config,
|
config: beacon_processor_config,
|
||||||
log: log.clone(),
|
log: log.clone(),
|
||||||
|
@ -389,6 +389,15 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE ON \
|
stalled. This is useful for very small testnets. TESTING ONLY. DO NOT USE ON \
|
||||||
MAINNET.")
|
MAINNET.")
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("http-sse-capacity-multiplier")
|
||||||
|
.long("http-sse-capacity-multiplier")
|
||||||
|
.takes_value(true)
|
||||||
|
.default_value("1")
|
||||||
|
.value_name("N")
|
||||||
|
.help("Multiplier to apply to the length of HTTP server-sent-event (SSE) channels. \
|
||||||
|
Increasing this value can prevent messages from being dropped.")
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("http-enable-beacon-processor")
|
Arg::with_name("http-enable-beacon-processor")
|
||||||
.long("http-enable-beacon-processor")
|
.long("http-enable-beacon-processor")
|
||||||
|
@ -150,6 +150,9 @@ pub fn get_config<E: EthSpec>(
|
|||||||
client_config.http_api.allow_sync_stalled = true;
|
client_config.http_api.allow_sync_stalled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client_config.http_api.sse_capacity_multiplier =
|
||||||
|
parse_required(cli_args, "http-sse-capacity-multiplier")?;
|
||||||
|
|
||||||
client_config.http_api.enable_beacon_processor =
|
client_config.http_api.enable_beacon_processor =
|
||||||
parse_required(cli_args, "http-enable-beacon-processor")?;
|
parse_required(cli_args, "http-enable-beacon-processor")?;
|
||||||
|
|
||||||
@ -349,6 +352,8 @@ pub fn get_config<E: EthSpec>(
|
|||||||
el_config.default_datadir = client_config.data_dir().clone();
|
el_config.default_datadir = client_config.data_dir().clone();
|
||||||
el_config.builder_profit_threshold =
|
el_config.builder_profit_threshold =
|
||||||
clap_utils::parse_required(cli_args, "builder-profit-threshold")?;
|
clap_utils::parse_required(cli_args, "builder-profit-threshold")?;
|
||||||
|
el_config.always_prefer_builder_payload =
|
||||||
|
cli_args.is_present("always-prefer-builder-payload");
|
||||||
el_config.ignore_builder_override_suggestion_threshold =
|
el_config.ignore_builder_override_suggestion_threshold =
|
||||||
clap_utils::parse_required(cli_args, "ignore-builder-override-suggestion-threshold")?;
|
clap_utils::parse_required(cli_args, "ignore-builder-override-suggestion-threshold")?;
|
||||||
let execution_timeout_multiplier =
|
let execution_timeout_multiplier =
|
||||||
@ -837,10 +842,6 @@ pub fn get_config<E: EthSpec>(
|
|||||||
if cli_args.is_present("genesis-backfill") {
|
if cli_args.is_present("genesis-backfill") {
|
||||||
client_config.chain.genesis_backfill = true;
|
client_config.chain.genesis_backfill = true;
|
||||||
}
|
}
|
||||||
// Payload selection configs
|
|
||||||
if cli_args.is_present("always-prefer-builder-payload") {
|
|
||||||
client_config.always_prefer_builder_payload = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backfill sync rate-limiting
|
// Backfill sync rate-limiting
|
||||||
client_config.beacon_processor.enable_backfill_rate_limiting =
|
client_config.beacon_processor.enable_backfill_rate_limiting =
|
||||||
|
@ -14,7 +14,7 @@ use crate::memory_store::MemoryStore;
|
|||||||
use crate::metadata::{
|
use crate::metadata::{
|
||||||
AnchorInfo, BlobInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY,
|
AnchorInfo, BlobInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY,
|
||||||
BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION,
|
BLOB_INFO_KEY, COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION,
|
||||||
PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY,
|
PRUNING_CHECKPOINT_KEY, SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN,
|
||||||
};
|
};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -159,10 +159,10 @@ pub enum HotColdDBError {
|
|||||||
IterationError {
|
IterationError {
|
||||||
unexpected_key: BytesKey,
|
unexpected_key: BytesKey,
|
||||||
},
|
},
|
||||||
AttestationStateIsFinalized {
|
FinalizedStateNotInHotDatabase {
|
||||||
split_slot: Slot,
|
split_slot: Slot,
|
||||||
request_slot: Option<Slot>,
|
request_slot: Slot,
|
||||||
state_root: Hash256,
|
block_root: Hash256,
|
||||||
},
|
},
|
||||||
Rollback,
|
Rollback,
|
||||||
}
|
}
|
||||||
@ -688,7 +688,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
/// upon that state (e.g., state roots). Additionally, only states from the hot store are
|
/// upon that state (e.g., state roots). Additionally, only states from the hot store are
|
||||||
/// returned.
|
/// returned.
|
||||||
///
|
///
|
||||||
/// See `Self::get_state` for information about `slot`.
|
/// See `Self::get_advanced_hot_state` for information about `max_slot`.
|
||||||
///
|
///
|
||||||
/// ## Warning
|
/// ## Warning
|
||||||
///
|
///
|
||||||
@ -700,23 +700,78 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
/// - `state.block_roots`
|
/// - `state.block_roots`
|
||||||
pub fn get_inconsistent_state_for_attestation_verification_only(
|
pub fn get_inconsistent_state_for_attestation_verification_only(
|
||||||
&self,
|
&self,
|
||||||
state_root: &Hash256,
|
block_root: &Hash256,
|
||||||
slot: Option<Slot>,
|
max_slot: Slot,
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
state_root: Hash256,
|
||||||
|
) -> Result<Option<(Hash256, BeaconState<E>)>, Error> {
|
||||||
metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT);
|
metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT);
|
||||||
|
self.get_advanced_hot_state_with_strategy(
|
||||||
|
*block_root,
|
||||||
|
max_slot,
|
||||||
|
state_root,
|
||||||
|
StateProcessingStrategy::Inconsistent,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
let split_slot = self.get_split_slot();
|
/// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`.
|
||||||
|
///
|
||||||
|
/// The `state_root` argument is used to look up the block's un-advanced state in case an
|
||||||
|
/// advanced state is not found.
|
||||||
|
///
|
||||||
|
/// Return the `(result_state_root, state)` satisfying:
|
||||||
|
///
|
||||||
|
/// - `result_state_root == state.canonical_root()`
|
||||||
|
/// - `state.slot() <= max_slot`
|
||||||
|
/// - `state.get_latest_block_root(result_state_root) == block_root`
|
||||||
|
///
|
||||||
|
/// Presently this is only used to avoid loading the un-advanced split state, but in future will
|
||||||
|
/// be expanded to return states from an in-memory cache.
|
||||||
|
pub fn get_advanced_hot_state(
|
||||||
|
&self,
|
||||||
|
block_root: Hash256,
|
||||||
|
max_slot: Slot,
|
||||||
|
state_root: Hash256,
|
||||||
|
) -> Result<Option<(Hash256, BeaconState<E>)>, Error> {
|
||||||
|
self.get_advanced_hot_state_with_strategy(
|
||||||
|
block_root,
|
||||||
|
max_slot,
|
||||||
|
state_root,
|
||||||
|
StateProcessingStrategy::Accurate,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if slot.map_or(false, |slot| slot < split_slot) {
|
/// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`.
|
||||||
Err(HotColdDBError::AttestationStateIsFinalized {
|
pub fn get_advanced_hot_state_with_strategy(
|
||||||
split_slot,
|
&self,
|
||||||
request_slot: slot,
|
block_root: Hash256,
|
||||||
state_root: *state_root,
|
max_slot: Slot,
|
||||||
|
state_root: Hash256,
|
||||||
|
state_processing_strategy: StateProcessingStrategy,
|
||||||
|
) -> Result<Option<(Hash256, BeaconState<E>)>, Error> {
|
||||||
|
// Hold a read lock on the split point so it can't move while we're trying to load the
|
||||||
|
// state.
|
||||||
|
let split = self.split.read_recursive();
|
||||||
|
|
||||||
|
// Sanity check max-slot against the split slot.
|
||||||
|
if max_slot < split.slot {
|
||||||
|
return Err(HotColdDBError::FinalizedStateNotInHotDatabase {
|
||||||
|
split_slot: split.slot,
|
||||||
|
request_slot: max_slot,
|
||||||
|
block_root,
|
||||||
}
|
}
|
||||||
.into())
|
.into());
|
||||||
} else {
|
|
||||||
self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let state_root = if block_root == split.block_root && split.slot <= max_slot {
|
||||||
|
split.state_root
|
||||||
|
} else {
|
||||||
|
state_root
|
||||||
|
};
|
||||||
|
let state = self
|
||||||
|
.load_hot_state(&state_root, state_processing_strategy)?
|
||||||
|
.map(|state| (state_root, state));
|
||||||
|
drop(split);
|
||||||
|
Ok(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk.
|
/// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk.
|
||||||
@ -1434,8 +1489,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
*self.split.read_recursive()
|
*self.split.read_recursive()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_split(&self, slot: Slot, state_root: Hash256) {
|
pub fn set_split(&self, slot: Slot, state_root: Hash256, block_root: Hash256) {
|
||||||
*self.split.write() = Split { slot, state_root };
|
*self.split.write() = Split {
|
||||||
|
slot,
|
||||||
|
state_root,
|
||||||
|
block_root,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch the slot of the most recently stored restore point.
|
/// Fetch the slot of the most recently stored restore point.
|
||||||
@ -1470,25 +1529,36 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Initialise the anchor info for checkpoint sync starting from `block`.
|
/// Initialise the anchor info for checkpoint sync starting from `block`.
|
||||||
pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result<KeyValueStoreOp, Error> {
|
pub fn init_anchor_info(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, E>,
|
||||||
|
retain_historic_states: bool,
|
||||||
|
) -> Result<KeyValueStoreOp, Error> {
|
||||||
let anchor_slot = block.slot();
|
let anchor_slot = block.slot();
|
||||||
let slots_per_restore_point = self.config.slots_per_restore_point;
|
let slots_per_restore_point = self.config.slots_per_restore_point;
|
||||||
|
|
||||||
// Set the `state_upper_limit` to the slot of the *next* restore point.
|
let state_upper_limit = if !retain_historic_states {
|
||||||
// See `get_state_upper_limit` for rationale.
|
STATE_UPPER_LIMIT_NO_RETAIN
|
||||||
let next_restore_point_slot = if anchor_slot % slots_per_restore_point == 0 {
|
} else if anchor_slot % slots_per_restore_point == 0 {
|
||||||
anchor_slot
|
anchor_slot
|
||||||
} else {
|
} else {
|
||||||
|
// Set the `state_upper_limit` to the slot of the *next* restore point.
|
||||||
|
// See `get_state_upper_limit` for rationale.
|
||||||
(anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point
|
(anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point
|
||||||
};
|
};
|
||||||
let anchor_info = AnchorInfo {
|
let anchor_info = if state_upper_limit == 0 && anchor_slot == 0 {
|
||||||
anchor_slot,
|
// Genesis archive node: no anchor because we *will* store all states.
|
||||||
oldest_block_slot: anchor_slot,
|
None
|
||||||
oldest_block_parent: block.parent_root(),
|
} else {
|
||||||
state_upper_limit: next_restore_point_slot,
|
Some(AnchorInfo {
|
||||||
state_lower_limit: self.spec.genesis_slot,
|
anchor_slot,
|
||||||
|
oldest_block_slot: anchor_slot,
|
||||||
|
oldest_block_parent: block.parent_root(),
|
||||||
|
state_upper_limit,
|
||||||
|
state_lower_limit: self.spec.genesis_slot,
|
||||||
|
})
|
||||||
};
|
};
|
||||||
self.compare_and_set_anchor_info(None, Some(anchor_info))
|
self.compare_and_set_anchor_info(None, anchor_info)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a clone of the store's anchor info.
|
/// Get a clone of the store's anchor info.
|
||||||
@ -1667,11 +1737,26 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
self.hot_db.put(&CONFIG_KEY, &self.config.as_disk_config())
|
self.hot_db.put(&CONFIG_KEY, &self.config.as_disk_config())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load the split point from disk.
|
/// Load the split point from disk, sans block root.
|
||||||
fn load_split(&self) -> Result<Option<Split>, Error> {
|
fn load_split_partial(&self) -> Result<Option<Split>, Error> {
|
||||||
self.hot_db.get(&SPLIT_KEY)
|
self.hot_db.get(&SPLIT_KEY)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load the split point from disk, including block root.
|
||||||
|
fn load_split(&self) -> Result<Option<Split>, Error> {
|
||||||
|
match self.load_split_partial()? {
|
||||||
|
Some(mut split) => {
|
||||||
|
// Load the hot state summary to get the block root.
|
||||||
|
let summary = self.load_hot_state_summary(&split.state_root)?.ok_or(
|
||||||
|
HotColdDBError::MissingSplitState(split.state_root, split.slot),
|
||||||
|
)?;
|
||||||
|
split.block_root = summary.latest_block_root;
|
||||||
|
Ok(Some(split))
|
||||||
|
}
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Stage the split for storage to disk.
|
/// Stage the split for storage to disk.
|
||||||
pub fn store_split_in_batch(&self) -> KeyValueStoreOp {
|
pub fn store_split_in_batch(&self) -> KeyValueStoreOp {
|
||||||
self.split.read_recursive().as_kv_store_op(SPLIT_KEY)
|
self.split.read_recursive().as_kv_store_op(SPLIT_KEY)
|
||||||
@ -2089,43 +2174,40 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
/// Advance the split point of the store, moving new finalized states to the freezer.
|
/// Advance the split point of the store, moving new finalized states to the freezer.
|
||||||
pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||||
frozen_head_root: Hash256,
|
finalized_state_root: Hash256,
|
||||||
frozen_head: &BeaconState<E>,
|
finalized_block_root: Hash256,
|
||||||
|
finalized_state: &BeaconState<E>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
store.log,
|
store.log,
|
||||||
"Freezer migration started";
|
"Freezer migration started";
|
||||||
"slot" => frozen_head.slot()
|
"slot" => finalized_state.slot()
|
||||||
);
|
);
|
||||||
|
|
||||||
// 0. Check that the migration is sensible.
|
// 0. Check that the migration is sensible.
|
||||||
// The new frozen head must increase the current split slot, and lie on an epoch
|
// The new finalized state must increase the current split slot, and lie on an epoch
|
||||||
// boundary (in order for the hot state summary scheme to work).
|
// boundary (in order for the hot state summary scheme to work).
|
||||||
let current_split_slot = store.split.read_recursive().slot;
|
let current_split_slot = store.split.read_recursive().slot;
|
||||||
let anchor_slot = store
|
let anchor_info = store.anchor_info.read_recursive().clone();
|
||||||
.anchor_info
|
let anchor_slot = anchor_info.as_ref().map(|a| a.anchor_slot);
|
||||||
.read_recursive()
|
|
||||||
.as_ref()
|
|
||||||
.map(|a| a.anchor_slot);
|
|
||||||
|
|
||||||
if frozen_head.slot() < current_split_slot {
|
if finalized_state.slot() < current_split_slot {
|
||||||
return Err(HotColdDBError::FreezeSlotError {
|
return Err(HotColdDBError::FreezeSlotError {
|
||||||
current_split_slot,
|
current_split_slot,
|
||||||
proposed_split_slot: frozen_head.slot(),
|
proposed_split_slot: finalized_state.slot(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if frozen_head.slot() % E::slots_per_epoch() != 0 {
|
if finalized_state.slot() % E::slots_per_epoch() != 0 {
|
||||||
return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into());
|
return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new();
|
let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new();
|
||||||
|
|
||||||
// 1. Copy all of the states between the head and the split slot, from the hot DB
|
// 1. Copy all of the states between the new finalized state and the split slot, from the hot DB
|
||||||
// to the cold DB. Delete the execution payloads of these now-finalized blocks.
|
// to the cold DB. Delete the execution payloads of these now-finalized blocks.
|
||||||
let state_root_iter = RootsIterator::new(&store, frozen_head);
|
let state_root_iter = RootsIterator::new(&store, finalized_state);
|
||||||
|
|
||||||
for maybe_tuple in state_root_iter.take_while(|result| match result {
|
for maybe_tuple in state_root_iter.take_while(|result| match result {
|
||||||
Ok((_, _, slot)) => {
|
Ok((_, _, slot)) => {
|
||||||
slot >= ¤t_split_slot
|
slot >= ¤t_split_slot
|
||||||
@ -2135,6 +2217,29 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
}) {
|
}) {
|
||||||
let (block_root, state_root, slot) = maybe_tuple?;
|
let (block_root, state_root, slot) = maybe_tuple?;
|
||||||
|
|
||||||
|
// Delete the execution payload if payload pruning is enabled. At a skipped slot we may
|
||||||
|
// delete the payload for the finalized block itself, but that's OK as we only guarantee
|
||||||
|
// that payloads are present for slots >= the split slot. The payload fetching code is also
|
||||||
|
// forgiving of missing payloads.
|
||||||
|
if store.config.prune_payloads {
|
||||||
|
hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the old summary, and the full state if we lie on an epoch boundary.
|
||||||
|
hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot)));
|
||||||
|
|
||||||
|
// Do not try to store states if a restore point is yet to be stored, or will never be
|
||||||
|
// stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state
|
||||||
|
// which always needs to be copied from the hot DB to the freezer and should not be deleted.
|
||||||
|
if slot != 0
|
||||||
|
&& anchor_info
|
||||||
|
.as_ref()
|
||||||
|
.map_or(false, |anchor| slot < anchor.state_upper_limit)
|
||||||
|
{
|
||||||
|
debug!(store.log, "Pruning finalized state"; "slot" => slot);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new();
|
let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||||
|
|
||||||
if slot % store.config.slots_per_restore_point == 0 {
|
if slot % store.config.slots_per_restore_point == 0 {
|
||||||
@ -2153,17 +2258,6 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
// There are data dependencies between calls to `store_cold_state()` that prevent us from
|
// There are data dependencies between calls to `store_cold_state()` that prevent us from
|
||||||
// doing one big call to `store.cold_db.do_atomically()` at end of the loop.
|
// doing one big call to `store.cold_db.do_atomically()` at end of the loop.
|
||||||
store.cold_db.do_atomically(cold_db_ops)?;
|
store.cold_db.do_atomically(cold_db_ops)?;
|
||||||
|
|
||||||
// Delete the old summary, and the full state if we lie on an epoch boundary.
|
|
||||||
hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot)));
|
|
||||||
|
|
||||||
// Delete the execution payload if payload pruning is enabled. At a skipped slot we may
|
|
||||||
// delete the payload for the finalized block itself, but that's OK as we only guarantee
|
|
||||||
// that payloads are present for slots >= the split slot. The payload fetching code is also
|
|
||||||
// forgiving of missing payloads.
|
|
||||||
if store.config.prune_payloads {
|
|
||||||
hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning: Critical section. We have to take care not to put any of the two databases in an
|
// Warning: Critical section. We have to take care not to put any of the two databases in an
|
||||||
@ -2203,8 +2297,9 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
// Before updating the in-memory split value, we flush it to disk first, so that should the
|
// Before updating the in-memory split value, we flush it to disk first, so that should the
|
||||||
// OS process die at this point, we pick up from the right place after a restart.
|
// OS process die at this point, we pick up from the right place after a restart.
|
||||||
let split = Split {
|
let split = Split {
|
||||||
slot: frozen_head.slot(),
|
slot: finalized_state.slot(),
|
||||||
state_root: frozen_head_root,
|
state_root: finalized_state_root,
|
||||||
|
block_root: finalized_block_root,
|
||||||
};
|
};
|
||||||
store.hot_db.put_sync(&SPLIT_KEY, &split)?;
|
store.hot_db.put_sync(&SPLIT_KEY, &split)?;
|
||||||
|
|
||||||
@ -2220,7 +2315,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
debug!(
|
debug!(
|
||||||
store.log,
|
store.log,
|
||||||
"Freezer migration complete";
|
"Freezer migration complete";
|
||||||
"slot" => frozen_head.slot()
|
"slot" => finalized_state.slot()
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -2229,8 +2324,16 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
/// Struct for storing the split slot and state root in the database.
|
/// Struct for storing the split slot and state root in the database.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Encode, Decode, Deserialize, Serialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Encode, Decode, Deserialize, Serialize)]
|
||||||
pub struct Split {
|
pub struct Split {
|
||||||
pub(crate) slot: Slot,
|
pub slot: Slot,
|
||||||
pub(crate) state_root: Hash256,
|
pub state_root: Hash256,
|
||||||
|
/// The block root of the split state.
|
||||||
|
///
|
||||||
|
/// This is used to provide special handling for the split state in the case where there are
|
||||||
|
/// skipped slots. The split state will *always* be the advanced state, so callers
|
||||||
|
/// who only have the finalized block root should use `get_advanced_hot_state` to get this state,
|
||||||
|
/// rather than fetching `block.state_root()` (the unaligned state) which will have been pruned.
|
||||||
|
#[ssz(skip_serializing, skip_deserializing)]
|
||||||
|
pub block_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StoreItem for Split {
|
impl StoreItem for Split {
|
||||||
|
@ -17,6 +17,9 @@ pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
|
|||||||
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
||||||
pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6);
|
pub const BLOB_INFO_KEY: Hash256 = Hash256::repeat_byte(6);
|
||||||
|
|
||||||
|
/// State upper limit value used to indicate that a node is not storing historic states.
|
||||||
|
pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX);
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct SchemaVersion(pub u64);
|
pub struct SchemaVersion(pub u64);
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ Once backfill is complete, a `INFO Historical block download complete` log will
|
|||||||
|
|
||||||
> Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node.
|
> Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node.
|
||||||
|
|
||||||
> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node.
|
> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node.
|
||||||
|
|
||||||
## FAQ
|
## FAQ
|
||||||
|
|
||||||
@ -116,8 +116,9 @@ states:
|
|||||||
database. Additionally, the genesis block is always available.
|
database. Additionally, the genesis block is always available.
|
||||||
* `state_lower_limit`: All states with slots _less than or equal to_ this value are available in
|
* `state_lower_limit`: All states with slots _less than or equal to_ this value are available in
|
||||||
the database. The minimum value is 0, indicating that the genesis state is always available.
|
the database. The minimum value is 0, indicating that the genesis state is always available.
|
||||||
* `state_upper_limit`: All states with slots _greater than or equal to_ this value are available
|
* `state_upper_limit`: All states with slots _greater than or equal to_ `min(split.slot,
|
||||||
in the database.
|
state_upper_limit)` are available in the database. In the case where the `state_upper_limit` is
|
||||||
|
higher than the `split.slot`, this means states are not being written to the freezer database.
|
||||||
|
|
||||||
Reconstruction runs from the state lower limit to the upper limit, narrowing the window of
|
Reconstruction runs from the state lower limit to the upper limit, narrowing the window of
|
||||||
unavailable states as it goes. It will log messages like the following to show its progress:
|
unavailable states as it goes. It will log messages like the following to show its progress:
|
||||||
@ -153,18 +154,8 @@ To manually specify a checkpoint use the following two flags:
|
|||||||
* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob
|
* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob
|
||||||
* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob
|
* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob
|
||||||
|
|
||||||
_Both_ the state and block must be provided and **must** adhere to the [Alignment
|
_Both_ the state and block must be provided and the state **must** match the block. The
|
||||||
Requirements](#alignment-requirements) described below.
|
state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary,
|
||||||
|
in which case it will be assumed to be finalized at that epoch.
|
||||||
### Alignment Requirements
|
|
||||||
|
|
||||||
* The block must be a finalized block from an epoch boundary, i.e. `block.slot() % 32 == 0`.
|
|
||||||
* The state must be the state corresponding to `block` with `state.slot() == block.slot()`
|
|
||||||
and `state.hash_tree_root() == block.state_root()`.
|
|
||||||
|
|
||||||
These requirements are imposed to align with Lighthouse's database schema, and notably exclude
|
|
||||||
finalized blocks from skipped slots. You can avoid alignment issues by using
|
|
||||||
[Automatic Checkpoint Sync](#automatic-checkpoint-sync), which will search for a suitable block
|
|
||||||
and state pair.
|
|
||||||
|
|
||||||
[weak-subj]: https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/
|
[weak-subj]: https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/
|
||||||
|
@ -9,7 +9,7 @@ particularly useful for development but still a good way to ensure you have the
|
|||||||
base dependencies.
|
base dependencies.
|
||||||
|
|
||||||
The additional requirements for developers are:
|
The additional requirements for developers are:
|
||||||
- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/anvil). This is used to
|
- [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil). This is used to
|
||||||
simulate the execution chain during tests. You'll get failures during tests if you
|
simulate the execution chain during tests. You'll get failures during tests if you
|
||||||
don't have `anvil` available on your `PATH`.
|
don't have `anvil` available on your `PATH`.
|
||||||
- [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by
|
- [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by
|
||||||
|
@ -751,7 +751,7 @@ where
|
|||||||
.unrealized_justified_checkpoint
|
.unrealized_justified_checkpoint
|
||||||
.zip(parent_block.unrealized_finalized_checkpoint)
|
.zip(parent_block.unrealized_finalized_checkpoint)
|
||||||
.filter(|(parent_justified, parent_finalized)| {
|
.filter(|(parent_justified, parent_finalized)| {
|
||||||
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 >= block_epoch
|
parent_justified.epoch == block_epoch && parent_finalized.epoch + 1 == block_epoch
|
||||||
});
|
});
|
||||||
|
|
||||||
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some((
|
let (unrealized_justified_checkpoint, unrealized_finalized_checkpoint) = if let Some((
|
||||||
|
@ -366,21 +366,6 @@ fn genesis_backfill_with_historic_flag() {
|
|||||||
.with_config(|config| assert_eq!(config.chain.genesis_backfill, true));
|
.with_config(|config| assert_eq!(config.chain.genesis_backfill, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn always_prefer_builder_payload_flag() {
|
|
||||||
CommandLineTest::new()
|
|
||||||
.flag("always-prefer-builder-payload", None)
|
|
||||||
.run_with_zero_port()
|
|
||||||
.with_config(|config| assert!(config.always_prefer_builder_payload));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn no_flag_sets_always_prefer_builder_payload_to_false() {
|
|
||||||
CommandLineTest::new()
|
|
||||||
.run_with_zero_port()
|
|
||||||
.with_config(|config| assert!(!config.always_prefer_builder_payload));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests for Eth1 flags.
|
// Tests for Eth1 flags.
|
||||||
#[test]
|
#[test]
|
||||||
fn dummy_eth1_flag() {
|
fn dummy_eth1_flag() {
|
||||||
@ -735,6 +720,38 @@ fn builder_fallback_flags() {
|
|||||||
);
|
);
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
run_payload_builder_flag_test_with_config(
|
||||||
|
"builder",
|
||||||
|
"http://meow.cats",
|
||||||
|
Some("always-prefer-builder-payload"),
|
||||||
|
None,
|
||||||
|
|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config
|
||||||
|
.execution_layer
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.always_prefer_builder_payload,
|
||||||
|
true
|
||||||
|
);
|
||||||
|
},
|
||||||
|
);
|
||||||
|
run_payload_builder_flag_test_with_config(
|
||||||
|
"builder",
|
||||||
|
"http://meow.cats",
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
|config| {
|
||||||
|
assert_eq!(
|
||||||
|
config
|
||||||
|
.execution_layer
|
||||||
|
.as_ref()
|
||||||
|
.unwrap()
|
||||||
|
.always_prefer_builder_payload,
|
||||||
|
false
|
||||||
|
);
|
||||||
|
},
|
||||||
|
);
|
||||||
run_payload_builder_flag_test_with_config(
|
run_payload_builder_flag_test_with_config(
|
||||||
"builder",
|
"builder",
|
||||||
"http://meow.cats",
|
"http://meow.cats",
|
||||||
@ -2420,3 +2437,18 @@ fn beacon_processor_zero_workers() {
|
|||||||
.flag("beacon-processor-max-workers", Some("0"))
|
.flag("beacon-processor-max-workers", Some("0"))
|
||||||
.run_with_zero_port();
|
.run_with_zero_port();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn http_sse_capacity_multiplier_default() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn http_sse_capacity_multiplier_override() {
|
||||||
|
CommandLineTest::new()
|
||||||
|
.flag("http-sse-capacity-multiplier", Some("10"))
|
||||||
|
.run_with_zero_port()
|
||||||
|
.with_config(|config| assert_eq!(config.http_api.sse_capacity_multiplier, 10));
|
||||||
|
}
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
FROM rust:1.68.2-bullseye AS builder
|
|
||||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
|
|
||||||
COPY . lighthouse
|
|
||||||
|
|
||||||
# Build lighthouse directly with a cargo build command, bypassing the Makefile.
|
|
||||||
RUN cd lighthouse && LD_LIBRARY_PATH=/lighthouse/testing/antithesis/libvoidstar/ RUSTFLAGS="-Cpasses=sancov-module -Cllvm-args=-sanitizer-coverage-level=3 -Cllvm-args=-sanitizer-coverage-trace-pc-guard -Ccodegen-units=1 -Cdebuginfo=2 -L/lighthouse/testing/antithesis/libvoidstar/ -lvoidstar" cargo build --release --manifest-path lighthouse/Cargo.toml --target x86_64-unknown-linux-gnu --features modern --verbose --bin lighthouse
|
|
||||||
# build lcli binary directly with cargo install command, bypassing the makefile
|
|
||||||
RUN cargo install --path /lighthouse/lcli --force --locked
|
|
||||||
|
|
||||||
FROM ubuntu:latest
|
|
||||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \
|
|
||||||
libssl-dev \
|
|
||||||
ca-certificates \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# create and move the libvoidstar file
|
|
||||||
RUN mkdir libvoidstar
|
|
||||||
COPY --from=builder /lighthouse/testing/antithesis/libvoidstar/libvoidstar.so /usr/lib/libvoidstar.so
|
|
||||||
|
|
||||||
# set the env variable to avoid having to always set it
|
|
||||||
ENV LD_LIBRARY_PATH=/usr/lib
|
|
||||||
# move the lighthouse binary and lcli binary
|
|
||||||
COPY --from=builder /lighthouse/target/x86_64-unknown-linux-gnu/release/lighthouse /usr/local/bin/lighthouse
|
|
||||||
COPY --from=builder /lighthouse/target/release/lcli /usr/local/bin/lcli
|
|
Binary file not shown.
@ -7,7 +7,7 @@ use beacon_chain::{
|
|||||||
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
||||||
},
|
},
|
||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChainTypes, CachedHead, NotifyExecutionLayer,
|
BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@ -303,6 +303,10 @@ impl<E: EthSpec> Tester<E> {
|
|||||||
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
|
let harness = BeaconChainHarness::<EphemeralHarnessType<E>>::builder(E::default())
|
||||||
.spec(spec.clone())
|
.spec(spec.clone())
|
||||||
.keypairs(vec![])
|
.keypairs(vec![])
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.genesis_state_ephemeral_store(case.anchor_state.clone())
|
.genesis_state_ephemeral_store(case.anchor_state.clone())
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
.recalculate_fork_times_with_genesis(0)
|
.recalculate_fork_times_with_genesis(0)
|
||||||
|
@ -115,6 +115,9 @@ pub fn testing_client_config() -> ClientConfig {
|
|||||||
genesis_time: now,
|
genesis_time: now,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Simulator tests expect historic states to be available for post-run checks.
|
||||||
|
client_config.chain.reconstruct_historic_states = true;
|
||||||
|
|
||||||
// Specify a constant count of beacon processor workers. Having this number
|
// Specify a constant count of beacon processor workers. Having this number
|
||||||
// too low can cause annoying HTTP timeouts, especially on Github runners
|
// too low can cause annoying HTTP timeouts, especially on Github runners
|
||||||
// with 2 logical CPUs.
|
// with 2 logical CPUs.
|
||||||
|
@ -2,8 +2,10 @@ use crate::beacon_node_fallback::{OfflineOnFailure, RequireSynced};
|
|||||||
use crate::{
|
use crate::{
|
||||||
doppelganger_service::DoppelgangerStatus,
|
doppelganger_service::DoppelgangerStatus,
|
||||||
duties_service::{DutiesService, Error},
|
duties_service::{DutiesService, Error},
|
||||||
|
http_metrics::metrics,
|
||||||
validator_store::Error as ValidatorStoreError,
|
validator_store::Error as ValidatorStoreError,
|
||||||
};
|
};
|
||||||
|
|
||||||
use futures::future::join_all;
|
use futures::future::join_all;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||||
@ -426,6 +428,10 @@ pub async fn poll_sync_committee_duties_for_period<T: SlotClock + 'static, E: Et
|
|||||||
RequireSynced::No,
|
RequireSynced::No,
|
||||||
OfflineOnFailure::Yes,
|
OfflineOnFailure::Yes,
|
||||||
|beacon_node| async move {
|
|beacon_node| async move {
|
||||||
|
let _timer = metrics::start_timer_vec(
|
||||||
|
&metrics::DUTIES_SERVICE_TIMES,
|
||||||
|
&[metrics::VALIDATOR_DUTIES_SYNC_HTTP_POST],
|
||||||
|
);
|
||||||
beacon_node
|
beacon_node
|
||||||
.post_validator_duties_sync(period_start_epoch, local_indices)
|
.post_validator_duties_sync(period_start_epoch, local_indices)
|
||||||
.await
|
.await
|
||||||
|
@ -29,6 +29,7 @@ pub const UPDATE_ATTESTERS_FETCH: &str = "update_attesters_fetch";
|
|||||||
pub const UPDATE_ATTESTERS_STORE: &str = "update_attesters_store";
|
pub const UPDATE_ATTESTERS_STORE: &str = "update_attesters_store";
|
||||||
pub const ATTESTER_DUTIES_HTTP_POST: &str = "attester_duties_http_post";
|
pub const ATTESTER_DUTIES_HTTP_POST: &str = "attester_duties_http_post";
|
||||||
pub const PROPOSER_DUTIES_HTTP_GET: &str = "proposer_duties_http_get";
|
pub const PROPOSER_DUTIES_HTTP_GET: &str = "proposer_duties_http_get";
|
||||||
|
pub const VALIDATOR_DUTIES_SYNC_HTTP_POST: &str = "validator_duties_sync_http_post";
|
||||||
pub const VALIDATOR_ID_HTTP_GET: &str = "validator_id_http_get";
|
pub const VALIDATOR_ID_HTTP_GET: &str = "validator_id_http_get";
|
||||||
pub const SUBSCRIPTIONS_HTTP_POST: &str = "subscriptions_http_post";
|
pub const SUBSCRIPTIONS_HTTP_POST: &str = "subscriptions_http_post";
|
||||||
pub const UPDATE_PROPOSERS: &str = "update_proposers";
|
pub const UPDATE_PROPOSERS: &str = "update_proposers";
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
#![recursion_limit = "256"]
|
#![recursion_limit = "256"]
|
||||||
#![cfg(unix)]
|
#![cfg(unix)]
|
||||||
|
|
||||||
use beacon_chain::test_utils::{
|
use beacon_chain::{
|
||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||||
|
ChainConfig,
|
||||||
};
|
};
|
||||||
use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts};
|
use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts};
|
||||||
use http_api::test_utils::{create_api_server, ApiServer};
|
use http_api::test_utils::{create_api_server, ApiServer};
|
||||||
@ -91,6 +92,10 @@ impl TesterBuilder {
|
|||||||
pub async fn new() -> TesterBuilder {
|
pub async fn new() -> TesterBuilder {
|
||||||
let harness = BeaconChainHarness::builder(E::default())
|
let harness = BeaconChainHarness::builder(E::default())
|
||||||
.default_spec()
|
.default_spec()
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.build();
|
.build();
|
||||||
|
Loading…
Reference in New Issue
Block a user