Remove checkpoint alignment requirements and enable historic state pruning (#4610)
## Issue Addressed Closes #3210 Closes #3211 ## Proposed Changes - Checkpoint sync from the latest finalized state regardless of its alignment. - Add the `block_root` to the database's split point. This is _only_ added to the in-memory split in order to avoid a schema migration. See `load_split`. - Add a new method to the DB called `get_advanced_state`, which looks up a state _by block root_, with a `state_root` as fallback. Using this method prevents accidental accesses of the split's unadvanced state, which does not exist in the hot DB and is not guaranteed to exist in the freezer DB at all. Previously Lighthouse would look up this state _from the freezer DB_, even if it was required for block/attestation processing, which was suboptimal. - Replace several state look-ups in block and attestation processing with `get_advanced_state` so that they can't hit the split block's unadvanced state. - Do not store any states in the freezer database by default. All states will be deleted upon being evicted from the hot database unless `--reconstruct-historic-states` is set. The anchor info which was previously used for checkpoint sync is used to implement this, including when syncing from genesis. ## Additional Info Needs further testing. I want to stress-test the pruned database under Hydra. The `get_advanced_state` method is intended to become more relevant over time: `tree-states` includes an identically named method that returns advanced states from its in-memory cache. Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
parent
687c58fde0
commit
20067b9465
@ -4656,6 +4656,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self.log,
|
self.log,
|
||||||
"Produced block on state";
|
"Produced block on state";
|
||||||
"block_size" => block_size,
|
"block_size" => block_size,
|
||||||
|
"slot" => block.slot(),
|
||||||
);
|
);
|
||||||
|
|
||||||
metrics::observe(&metrics::BLOCK_SIZE, block_size as f64);
|
metrics::observe(&metrics::BLOCK_SIZE, block_size as f64);
|
||||||
@ -5571,14 +5572,16 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt {
|
let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt {
|
||||||
(state, state_root)
|
(state, state_root)
|
||||||
} else {
|
} else {
|
||||||
let state_root = head_block.state_root;
|
let block_state_root = head_block.state_root;
|
||||||
let state = self
|
let max_slot = shuffling_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
let (state_root, state) = self
|
||||||
.store
|
.store
|
||||||
.get_inconsistent_state_for_attestation_verification_only(
|
.get_inconsistent_state_for_attestation_verification_only(
|
||||||
&state_root,
|
&head_block_root,
|
||||||
Some(head_block.slot),
|
max_slot,
|
||||||
|
block_state_root,
|
||||||
)?
|
)?
|
||||||
.ok_or(Error::MissingBeaconState(head_block.state_root))?;
|
.ok_or(Error::MissingBeaconState(block_state_root))?;
|
||||||
(state, state_root)
|
(state, state_root)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -321,9 +321,17 @@ where
|
|||||||
.deconstruct()
|
.deconstruct()
|
||||||
.0;
|
.0;
|
||||||
|
|
||||||
let state = self
|
let max_slot = self
|
||||||
|
.justified_checkpoint
|
||||||
|
.epoch
|
||||||
|
.start_slot(E::slots_per_epoch());
|
||||||
|
let (_, state) = self
|
||||||
.store
|
.store
|
||||||
.get_state(&justified_block.state_root(), Some(justified_block.slot()))
|
.get_advanced_hot_state(
|
||||||
|
self.justified_checkpoint.root,
|
||||||
|
max_slot,
|
||||||
|
justified_block.state_root(),
|
||||||
|
)
|
||||||
.map_err(Error::FailedToReadState)?
|
.map_err(Error::FailedToReadState)?
|
||||||
.ok_or_else(|| Error::MissingState(justified_block.state_root()))?;
|
.ok_or_else(|| Error::MissingState(justified_block.state_root()))?;
|
||||||
|
|
||||||
|
@ -1261,7 +1261,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
|
|
||||||
// Perform a sanity check on the pre-state.
|
// Perform a sanity check on the pre-state.
|
||||||
let parent_slot = parent.beacon_block.slot();
|
let parent_slot = parent.beacon_block.slot();
|
||||||
if state.slot() < parent_slot || state.slot() > parent_slot + 1 {
|
if state.slot() < parent_slot || state.slot() > block.slot() {
|
||||||
return Err(BeaconChainError::BadPreState {
|
return Err(BeaconChainError::BadPreState {
|
||||||
parent_root: parent.beacon_block_root,
|
parent_root: parent.beacon_block_root,
|
||||||
parent_slot,
|
parent_slot,
|
||||||
@ -1760,13 +1760,18 @@ fn load_parent<T: BeaconChainTypes>(
|
|||||||
BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root()))
|
BlockError::from(BeaconChainError::MissingBeaconBlock(block.parent_root()))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Load the parent blocks state from the database, returning an error if it is not found.
|
// Load the parent block's state from the database, returning an error if it is not found.
|
||||||
// It is an error because if we know the parent block we should also know the parent state.
|
// It is an error because if we know the parent block we should also know the parent state.
|
||||||
let parent_state_root = parent_block.state_root();
|
// Retrieve any state that is advanced through to at most `block.slot()`: this is
|
||||||
let parent_state = chain
|
// particularly important if `block` descends from the finalized/split block, but at a slot
|
||||||
.get_state(&parent_state_root, Some(parent_block.slot()))?
|
// prior to the finalized slot (which is invalid and inaccessible in our DB schema).
|
||||||
|
let (parent_state_root, parent_state) = chain
|
||||||
|
.store
|
||||||
|
.get_advanced_hot_state(root, block.slot(), parent_block.state_root())?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root))
|
BeaconChainError::DBInconsistent(
|
||||||
|
format!("Missing state for parent block {root:?}",),
|
||||||
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES);
|
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES);
|
||||||
|
@ -24,8 +24,9 @@ use operation_pool::{OperationPool, PersistedOperationPool};
|
|||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
|
use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold};
|
||||||
use slasher::Slasher;
|
use slasher::Slasher;
|
||||||
use slog::{crit, error, info, Logger};
|
use slog::{crit, debug, error, info, Logger};
|
||||||
use slot_clock::{SlotClock, TestingSlotClock};
|
use slot_clock::{SlotClock, TestingSlotClock};
|
||||||
|
use state_processing::per_slot_processing;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -287,7 +288,7 @@ where
|
|||||||
let genesis_state = store
|
let genesis_state = store
|
||||||
.get_state(&genesis_block.state_root(), Some(genesis_block.slot()))
|
.get_state(&genesis_block.state_root(), Some(genesis_block.slot()))
|
||||||
.map_err(|e| descriptive_db_error("genesis state", &e))?
|
.map_err(|e| descriptive_db_error("genesis state", &e))?
|
||||||
.ok_or("Genesis block not found in store")?;
|
.ok_or("Genesis state not found in store")?;
|
||||||
|
|
||||||
self.genesis_time = Some(genesis_state.genesis_time());
|
self.genesis_time = Some(genesis_state.genesis_time());
|
||||||
|
|
||||||
@ -382,6 +383,16 @@ where
|
|||||||
let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?;
|
let (genesis, updated_builder) = self.set_genesis_state(beacon_state)?;
|
||||||
self = updated_builder;
|
self = updated_builder;
|
||||||
|
|
||||||
|
// Stage the database's metadata fields for atomic storage when `build` is called.
|
||||||
|
// Since v4.4.0 we will set the anchor with a dummy state upper limit in order to prevent
|
||||||
|
// historic states from being retained (unless `--reconstruct-historic-states` is set).
|
||||||
|
let retain_historic_states = self.chain_config.reconstruct_historic_states;
|
||||||
|
self.pending_io_batch.push(
|
||||||
|
store
|
||||||
|
.init_anchor_info(genesis.beacon_block.message(), retain_historic_states)
|
||||||
|
.map_err(|e| format!("Failed to initialize genesis anchor: {:?}", e))?,
|
||||||
|
);
|
||||||
|
|
||||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis)
|
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis)
|
||||||
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
||||||
let current_slot = None;
|
let current_slot = None;
|
||||||
@ -408,30 +419,28 @@ where
|
|||||||
weak_subj_block: SignedBeaconBlock<TEthSpec>,
|
weak_subj_block: SignedBeaconBlock<TEthSpec>,
|
||||||
genesis_state: BeaconState<TEthSpec>,
|
genesis_state: BeaconState<TEthSpec>,
|
||||||
) -> Result<Self, String> {
|
) -> Result<Self, String> {
|
||||||
let store = self.store.clone().ok_or("genesis_state requires a store")?;
|
let store = self
|
||||||
|
.store
|
||||||
|
.clone()
|
||||||
|
.ok_or("weak_subjectivity_state requires a store")?;
|
||||||
|
let log = self
|
||||||
|
.log
|
||||||
|
.as_ref()
|
||||||
|
.ok_or("weak_subjectivity_state requires a log")?;
|
||||||
|
|
||||||
let weak_subj_slot = weak_subj_state.slot();
|
// Ensure the state is advanced to an epoch boundary.
|
||||||
let weak_subj_block_root = weak_subj_block.canonical_root();
|
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
||||||
let weak_subj_state_root = weak_subj_block.state_root();
|
if weak_subj_state.slot() % slots_per_epoch != 0 {
|
||||||
|
debug!(
|
||||||
// Check that the given block lies on an epoch boundary. Due to the database only storing
|
log,
|
||||||
// full states on epoch boundaries and at restore points it would be difficult to support
|
"Advancing checkpoint state to boundary";
|
||||||
// starting from a mid-epoch state.
|
"state_slot" => weak_subj_state.slot(),
|
||||||
if weak_subj_slot % TEthSpec::slots_per_epoch() != 0 {
|
"block_slot" => weak_subj_block.slot(),
|
||||||
return Err(format!(
|
);
|
||||||
"Checkpoint block at slot {} is not aligned to epoch start. \
|
while weak_subj_state.slot() % slots_per_epoch != 0 {
|
||||||
Please supply an aligned checkpoint with block.slot % 32 == 0",
|
per_slot_processing(&mut weak_subj_state, None, &self.spec)
|
||||||
weak_subj_block.slot(),
|
.map_err(|e| format!("Error advancing state: {e:?}"))?;
|
||||||
));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Check that the block and state have consistent slots and state roots.
|
|
||||||
if weak_subj_state.slot() != weak_subj_block.slot() {
|
|
||||||
return Err(format!(
|
|
||||||
"Slot of snapshot block ({}) does not match snapshot state ({})",
|
|
||||||
weak_subj_block.slot(),
|
|
||||||
weak_subj_state.slot(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prime all caches before storing the state in the database and computing the tree hash
|
// Prime all caches before storing the state in the database and computing the tree hash
|
||||||
@ -439,15 +448,19 @@ where
|
|||||||
weak_subj_state
|
weak_subj_state
|
||||||
.build_caches(&self.spec)
|
.build_caches(&self.spec)
|
||||||
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
|
.map_err(|e| format!("Error building caches on checkpoint state: {e:?}"))?;
|
||||||
|
let weak_subj_state_root = weak_subj_state
|
||||||
let computed_state_root = weak_subj_state
|
|
||||||
.update_tree_hash_cache()
|
.update_tree_hash_cache()
|
||||||
.map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?;
|
.map_err(|e| format!("Error computing checkpoint state root: {:?}", e))?;
|
||||||
|
|
||||||
if weak_subj_state_root != computed_state_root {
|
let weak_subj_slot = weak_subj_state.slot();
|
||||||
|
let weak_subj_block_root = weak_subj_block.canonical_root();
|
||||||
|
|
||||||
|
// Validate the state's `latest_block_header` against the checkpoint block.
|
||||||
|
let state_latest_block_root = weak_subj_state.get_latest_block_root(weak_subj_state_root);
|
||||||
|
if weak_subj_block_root != state_latest_block_root {
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"Snapshot state root does not match block, expected: {:?}, got: {:?}",
|
"Snapshot state's most recent block root does not match block, expected: {:?}, got: {:?}",
|
||||||
weak_subj_state_root, computed_state_root
|
weak_subj_block_root, state_latest_block_root
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -464,7 +477,7 @@ where
|
|||||||
|
|
||||||
// Set the store's split point *before* storing genesis so that genesis is stored
|
// Set the store's split point *before* storing genesis so that genesis is stored
|
||||||
// immediately in the freezer DB.
|
// immediately in the freezer DB.
|
||||||
store.set_split(weak_subj_slot, weak_subj_state_root);
|
store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root);
|
||||||
let (_, updated_builder) = self.set_genesis_state(genesis_state)?;
|
let (_, updated_builder) = self.set_genesis_state(genesis_state)?;
|
||||||
self = updated_builder;
|
self = updated_builder;
|
||||||
|
|
||||||
@ -480,10 +493,11 @@ where
|
|||||||
// Stage the database's metadata fields for atomic storage when `build` is called.
|
// Stage the database's metadata fields for atomic storage when `build` is called.
|
||||||
// This prevents the database from restarting in an inconsistent state if the anchor
|
// This prevents the database from restarting in an inconsistent state if the anchor
|
||||||
// info or split point is written before the `PersistedBeaconChain`.
|
// info or split point is written before the `PersistedBeaconChain`.
|
||||||
|
let retain_historic_states = self.chain_config.reconstruct_historic_states;
|
||||||
self.pending_io_batch.push(store.store_split_in_batch());
|
self.pending_io_batch.push(store.store_split_in_batch());
|
||||||
self.pending_io_batch.push(
|
self.pending_io_batch.push(
|
||||||
store
|
store
|
||||||
.init_anchor_info(weak_subj_block.message())
|
.init_anchor_info(weak_subj_block.message(), retain_historic_states)
|
||||||
.map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?,
|
.map_err(|e| format!("Failed to initialize anchor info: {:?}", e))?,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -503,13 +517,12 @@ where
|
|||||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot)
|
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &snapshot)
|
||||||
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
.map_err(|e| format!("Unable to initialize fork choice store: {e:?}"))?;
|
||||||
|
|
||||||
let current_slot = Some(snapshot.beacon_block.slot());
|
|
||||||
let fork_choice = ForkChoice::from_anchor(
|
let fork_choice = ForkChoice::from_anchor(
|
||||||
fc_store,
|
fc_store,
|
||||||
snapshot.beacon_block_root,
|
snapshot.beacon_block_root,
|
||||||
&snapshot.beacon_block,
|
&snapshot.beacon_block,
|
||||||
&snapshot.beacon_state,
|
&snapshot.beacon_state,
|
||||||
current_slot,
|
Some(weak_subj_slot),
|
||||||
&self.spec,
|
&self.spec,
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?;
|
.map_err(|e| format!("Unable to initialize ForkChoice: {:?}", e))?;
|
||||||
@ -672,9 +685,8 @@ where
|
|||||||
Err(e) => return Err(descriptive_db_error("head block", &e)),
|
Err(e) => return Err(descriptive_db_error("head block", &e)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let head_state_root = head_block.state_root();
|
let (_head_state_root, head_state) = store
|
||||||
let head_state = store
|
.get_advanced_hot_state(head_block_root, current_slot, head_block.state_root())
|
||||||
.get_state(&head_state_root, Some(head_block.slot()))
|
|
||||||
.map_err(|e| descriptive_db_error("head state", &e))?
|
.map_err(|e| descriptive_db_error("head state", &e))?
|
||||||
.ok_or("Head state not found in store")?;
|
.ok_or("Head state not found in store")?;
|
||||||
|
|
||||||
|
@ -47,7 +47,8 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead};
|
use eth2::types::{EventKind, SseChainReorg, SseFinalizedCheckpoint, SseHead, SseLateHead};
|
||||||
use fork_choice::{
|
use fork_choice::{
|
||||||
ExecutionStatus, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock, ResetPayloadStatuses,
|
ExecutionStatus, ForkChoiceStore, ForkChoiceView, ForkchoiceUpdateParameters, ProtoBlock,
|
||||||
|
ResetPayloadStatuses,
|
||||||
};
|
};
|
||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||||
@ -298,10 +299,10 @@ impl<T: BeaconChainTypes> CanonicalHead<T> {
|
|||||||
let beacon_block = store
|
let beacon_block = store
|
||||||
.get_full_block(&beacon_block_root)?
|
.get_full_block(&beacon_block_root)?
|
||||||
.ok_or(Error::MissingBeaconBlock(beacon_block_root))?;
|
.ok_or(Error::MissingBeaconBlock(beacon_block_root))?;
|
||||||
let beacon_state_root = beacon_block.state_root();
|
let current_slot = fork_choice.fc_store().get_current_slot();
|
||||||
let beacon_state = store
|
let (_, beacon_state) = store
|
||||||
.get_state(&beacon_state_root, Some(beacon_block.slot()))?
|
.get_advanced_hot_state(beacon_block_root, current_slot, beacon_block.state_root())?
|
||||||
.ok_or(Error::MissingBeaconState(beacon_state_root))?;
|
.ok_or(Error::MissingBeaconState(beacon_block.state_root()))?;
|
||||||
|
|
||||||
let snapshot = BeaconSnapshot {
|
let snapshot = BeaconSnapshot {
|
||||||
beacon_block_root,
|
beacon_block_root,
|
||||||
@ -669,10 +670,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.get_full_block(&new_view.head_block_root)?
|
.get_full_block(&new_view.head_block_root)?
|
||||||
.ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?;
|
.ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?;
|
||||||
|
|
||||||
let beacon_state_root = beacon_block.state_root();
|
let (_, beacon_state) = self
|
||||||
let beacon_state: BeaconState<T::EthSpec> = self
|
.store
|
||||||
.get_state(&beacon_state_root, Some(beacon_block.slot()))?
|
.get_advanced_hot_state(
|
||||||
.ok_or(Error::MissingBeaconState(beacon_state_root))?;
|
new_view.head_block_root,
|
||||||
|
current_slot,
|
||||||
|
beacon_block.state_root(),
|
||||||
|
)?
|
||||||
|
.ok_or(Error::MissingBeaconState(beacon_block.state_root()))?;
|
||||||
|
|
||||||
Ok(BeaconSnapshot {
|
Ok(BeaconSnapshot {
|
||||||
beacon_block: Arc::new(beacon_block),
|
beacon_block: Arc::new(beacon_block),
|
||||||
|
@ -266,6 +266,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
debug!(log, "Database consolidation started");
|
debug!(log, "Database consolidation started");
|
||||||
|
|
||||||
let finalized_state_root = notif.finalized_state_root;
|
let finalized_state_root = notif.finalized_state_root;
|
||||||
|
let finalized_block_root = notif.finalized_checkpoint.root;
|
||||||
|
|
||||||
let finalized_state = match db.get_state(&finalized_state_root.into(), None) {
|
let finalized_state = match db.get_state(&finalized_state_root.into(), None) {
|
||||||
Ok(Some(state)) => state,
|
Ok(Some(state)) => state,
|
||||||
@ -319,7 +320,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Ho
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match migrate_database(db.clone(), finalized_state_root.into(), &finalized_state) {
|
match migrate_database(
|
||||||
|
db.clone(),
|
||||||
|
finalized_state_root.into(),
|
||||||
|
finalized_block_root,
|
||||||
|
&finalized_state,
|
||||||
|
) {
|
||||||
Ok(()) => {}
|
Ok(()) => {}
|
||||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -9,7 +9,7 @@ use beacon_chain::{
|
|||||||
test_utils::{
|
test_utils::{
|
||||||
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
},
|
},
|
||||||
BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped,
|
BeaconChain, BeaconChainError, BeaconChainTypes, ChainConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
||||||
use int_to_bytes::int_to_bytes32;
|
use int_to_bytes::int_to_bytes32;
|
||||||
@ -47,6 +47,10 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessTyp
|
|||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec)
|
.spec(spec)
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
@ -79,6 +83,10 @@ fn get_harness_capella_spec(
|
|||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec.clone())
|
.spec(spec.clone())
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(validator_keypairs)
|
.keypairs(validator_keypairs)
|
||||||
.withdrawal_keypairs(
|
.withdrawal_keypairs(
|
||||||
KEYPAIRS[0..validator_count]
|
KEYPAIRS[0..validator_count]
|
||||||
|
@ -4,7 +4,8 @@ use beacon_chain::test_utils::{
|
|||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
};
|
};
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
BeaconSnapshot, BlockError, ChainSegmentResult, IntoExecutionPendingBlock, NotifyExecutionLayer,
|
BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock,
|
||||||
|
NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
@ -69,6 +70,10 @@ async fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
|
|||||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<E>> {
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
@ -7,7 +7,7 @@ use beacon_chain::otb_verification_service::{
|
|||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
canonical_head::{CachedHead, CanonicalHead},
|
canonical_head::{CachedHead, CanonicalHead},
|
||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer,
|
BeaconChainError, BlockError, ChainConfig, ExecutionPayloadError, NotifyExecutionLayer,
|
||||||
OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped,
|
OverrideForkchoiceUpdate, StateSkipConfig, WhenSlotSkipped,
|
||||||
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
||||||
@ -59,6 +59,10 @@ impl InvalidPayloadRig {
|
|||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec)
|
.spec(spec)
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.logger(test_logger())
|
.logger(test_logger())
|
||||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
@ -9,14 +9,15 @@ use beacon_chain::test_utils::{
|
|||||||
use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
||||||
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
|
BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig,
|
||||||
ServerSentEventHandler, WhenSlotSkipped,
|
NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
use maplit::hashset;
|
use maplit::hashset;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use state_processing::BlockReplayer;
|
use slot_clock::{SlotClock, TestingSlotClock};
|
||||||
|
use state_processing::{state_advance::complete_state_advance, BlockReplayer};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
@ -65,6 +66,19 @@ fn get_store_with_spec(
|
|||||||
fn get_harness(
|
fn get_harness(
|
||||||
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
||||||
validator_count: usize,
|
validator_count: usize,
|
||||||
|
) -> TestHarness {
|
||||||
|
// Most tests expect to retain historic states, so we use this as the default.
|
||||||
|
let chain_config = ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
};
|
||||||
|
get_harness_generic(store, validator_count, chain_config)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_harness_generic(
|
||||||
|
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
||||||
|
validator_count: usize,
|
||||||
|
chain_config: ChainConfig,
|
||||||
) -> TestHarness {
|
) -> TestHarness {
|
||||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
@ -72,6 +86,7 @@ fn get_harness(
|
|||||||
.logger(store.logger().clone())
|
.logger(store.logger().clone())
|
||||||
.fresh_disk_store(store)
|
.fresh_disk_store(store)
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
.chain_config(chain_config)
|
||||||
.build();
|
.build();
|
||||||
harness.advance_slot();
|
harness.advance_slot();
|
||||||
harness
|
harness
|
||||||
@ -460,13 +475,15 @@ async fn block_replay_with_inaccurate_state_roots() {
|
|||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Slot must not be 0 mod 32 or else no blocks will be replayed.
|
// Slot must not be 0 mod 32 or else no blocks will be replayed.
|
||||||
let (mut head_state, head_root) = harness.get_current_state_and_root();
|
let (mut head_state, head_state_root) = harness.get_current_state_and_root();
|
||||||
|
let head_block_root = harness.head_block_root();
|
||||||
assert_ne!(head_state.slot() % 32, 0);
|
assert_ne!(head_state.slot() % 32, 0);
|
||||||
|
|
||||||
let mut fast_head_state = store
|
let (_, mut fast_head_state) = store
|
||||||
.get_inconsistent_state_for_attestation_verification_only(
|
.get_inconsistent_state_for_attestation_verification_only(
|
||||||
&head_root,
|
&head_block_root,
|
||||||
Some(head_state.slot()),
|
head_state.slot(),
|
||||||
|
head_state_root,
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -565,14 +582,7 @@ async fn block_replayer_hooks() {
|
|||||||
async fn delete_blocks_and_states() {
|
async fn delete_blocks_and_states() {
|
||||||
let db_path = tempdir().unwrap();
|
let db_path = tempdir().unwrap();
|
||||||
let store = get_store(&db_path);
|
let store = get_store(&db_path);
|
||||||
let validators_keypairs =
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||||
types::test_utils::generate_deterministic_keypairs(LOW_VALIDATOR_COUNT);
|
|
||||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
|
||||||
.default_spec()
|
|
||||||
.keypairs(validators_keypairs)
|
|
||||||
.fresh_disk_store(store.clone())
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
|
|
||||||
let unforked_blocks: u64 = 4 * E::slots_per_epoch();
|
let unforked_blocks: u64 = 4 * E::slots_per_epoch();
|
||||||
|
|
||||||
@ -1015,18 +1025,14 @@ fn check_shuffling_compatible(
|
|||||||
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let slots_per_epoch = rig.slots_per_epoch();
|
let slots_per_epoch = rig.slots_per_epoch();
|
||||||
let (mut state, state_root) = rig.get_current_state_and_root();
|
let (mut state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
@ -1125,18 +1131,14 @@ async fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
async fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let slots_per_epoch = rig.slots_per_epoch();
|
let slots_per_epoch = rig.slots_per_epoch();
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
@ -1260,15 +1262,11 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
|||||||
const HONEST_VALIDATOR_COUNT: usize = 32;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let slots_per_epoch = rig.slots_per_epoch();
|
let slots_per_epoch = rig.slots_per_epoch();
|
||||||
let (mut state, state_root) = rig.get_current_state_and_root();
|
let (mut state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
@ -1352,18 +1350,14 @@ async fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
|
async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
// Fill up 0th epoch with canonical chain blocks
|
// Fill up 0th epoch with canonical chain blocks
|
||||||
@ -1497,18 +1491,14 @@ async fn prunes_fork_growing_past_youngest_finalized_checkpoint() {
|
|||||||
// This is to check if state outside of normal block processing are pruned correctly.
|
// This is to check if state outside of normal block processing are pruned correctly.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn prunes_skipped_slots_states() {
|
async fn prunes_skipped_slots_states() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
||||||
@ -1626,18 +1616,14 @@ async fn prunes_skipped_slots_states() {
|
|||||||
// This is to check if state outside of normal block processing are pruned correctly.
|
// This is to check if state outside of normal block processing are pruned correctly.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn finalizes_non_epoch_start_slot() {
|
async fn finalizes_non_epoch_start_slot() {
|
||||||
const HONEST_VALIDATOR_COUNT: usize = 32 + 0;
|
const HONEST_VALIDATOR_COUNT: usize = 32;
|
||||||
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16 - 0;
|
const ADVERSARIAL_VALIDATOR_COUNT: usize = 16;
|
||||||
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
const VALIDATOR_COUNT: usize = HONEST_VALIDATOR_COUNT + ADVERSARIAL_VALIDATOR_COUNT;
|
||||||
let validators_keypairs = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
||||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||||
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
let adversarial_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||||
let rig = BeaconChainHarness::builder(MinimalEthSpec)
|
let db_path = tempdir().unwrap();
|
||||||
.default_spec()
|
let store = get_store(&db_path);
|
||||||
.keypairs(validators_keypairs)
|
let rig = get_harness(store.clone(), VALIDATOR_COUNT);
|
||||||
.fresh_ephemeral_store()
|
|
||||||
.mock_execution_layer()
|
|
||||||
.build();
|
|
||||||
let (state, state_root) = rig.get_current_state_and_root();
|
let (state, state_root) = rig.get_current_state_and_root();
|
||||||
|
|
||||||
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
let canonical_slots_zeroth_epoch: Vec<Slot> =
|
||||||
@ -2053,39 +2039,82 @@ async fn garbage_collect_temp_states_from_failed_block() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn weak_subjectivity_sync() {
|
async fn weak_subjectivity_sync_easy() {
|
||||||
|
let num_initial_slots = E::slots_per_epoch() * 11;
|
||||||
|
let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9);
|
||||||
|
let slots = (1..num_initial_slots).map(Slot::new).collect();
|
||||||
|
weak_subjectivity_sync_test(slots, checkpoint_slot).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() {
|
||||||
|
let num_initial_slots = E::slots_per_epoch() * 11;
|
||||||
|
let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9);
|
||||||
|
let slots = (1..num_initial_slots)
|
||||||
|
.map(Slot::new)
|
||||||
|
.filter(|&slot| {
|
||||||
|
// Skip 3 slots leading up to the checkpoint slot.
|
||||||
|
slot <= checkpoint_slot - 3 || slot > checkpoint_slot
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
weak_subjectivity_sync_test(slots, checkpoint_slot).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() {
|
||||||
|
let num_initial_slots = E::slots_per_epoch() * 11;
|
||||||
|
let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3);
|
||||||
|
let slots = (1..num_initial_slots)
|
||||||
|
.map(Slot::new)
|
||||||
|
.filter(|&slot| {
|
||||||
|
// Skip 3 slots after the checkpoint slot.
|
||||||
|
slot <= checkpoint_slot || slot > checkpoint_slot + 3
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
weak_subjectivity_sync_test(slots, checkpoint_slot).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||||
// Build an initial chain on one harness, representing a synced node with full history.
|
// Build an initial chain on one harness, representing a synced node with full history.
|
||||||
let num_initial_blocks = E::slots_per_epoch() * 11;
|
|
||||||
let num_final_blocks = E::slots_per_epoch() * 2;
|
let num_final_blocks = E::slots_per_epoch() * 2;
|
||||||
|
|
||||||
let temp1 = tempdir().unwrap();
|
let temp1 = tempdir().unwrap();
|
||||||
let full_store = get_store(&temp1);
|
let full_store = get_store(&temp1);
|
||||||
let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT);
|
let harness = get_harness(full_store.clone(), LOW_VALIDATOR_COUNT);
|
||||||
|
|
||||||
|
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
||||||
harness
|
harness
|
||||||
.extend_chain(
|
.add_attested_blocks_at_slots(
|
||||||
num_initial_blocks as usize,
|
genesis_state.clone(),
|
||||||
BlockStrategy::OnCanonicalHead,
|
genesis_state_root,
|
||||||
AttestationStrategy::AllValidators,
|
&slots,
|
||||||
|
&all_validators,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let genesis_state = full_store
|
let wss_block_root = harness
|
||||||
.get_state(&harness.chain.genesis_state_root, Some(Slot::new(0)))
|
.chain
|
||||||
|
.block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let wss_checkpoint = harness.finalized_checkpoint();
|
let wss_state_root = harness
|
||||||
|
.chain
|
||||||
|
.state_root_at_slot(checkpoint_slot)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let wss_block = harness
|
let wss_block = harness
|
||||||
.chain
|
.chain
|
||||||
.store
|
.store
|
||||||
.get_full_block(&wss_checkpoint.root)
|
.get_full_block(&wss_block_root)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let wss_state = full_store
|
let wss_state = full_store
|
||||||
.get_state(&wss_block.state_root(), None)
|
.get_state(&wss_state_root, Some(checkpoint_slot))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let wss_slot = wss_block.slot();
|
|
||||||
|
|
||||||
// Add more blocks that advance finalization further.
|
// Add more blocks that advance finalization further.
|
||||||
harness.advance_slot();
|
harness.advance_slot();
|
||||||
@ -2104,20 +2133,26 @@ async fn weak_subjectivity_sync() {
|
|||||||
let spec = test_spec::<E>();
|
let spec = test_spec::<E>();
|
||||||
let seconds_per_slot = spec.seconds_per_slot;
|
let seconds_per_slot = spec.seconds_per_slot;
|
||||||
|
|
||||||
// Initialise a new beacon chain from the finalized checkpoint
|
// Initialise a new beacon chain from the finalized checkpoint.
|
||||||
|
// The slot clock must be set to a time ahead of the checkpoint state.
|
||||||
|
let slot_clock = TestingSlotClock::new(
|
||||||
|
Slot::new(0),
|
||||||
|
Duration::from_secs(harness.chain.genesis_time),
|
||||||
|
Duration::from_secs(seconds_per_slot),
|
||||||
|
);
|
||||||
|
slot_clock.set_slot(harness.get_current_slot().as_u64());
|
||||||
let beacon_chain = Arc::new(
|
let beacon_chain = Arc::new(
|
||||||
BeaconChainBuilder::new(MinimalEthSpec)
|
BeaconChainBuilder::new(MinimalEthSpec)
|
||||||
.store(store.clone())
|
.store(store.clone())
|
||||||
.custom_spec(test_spec::<E>())
|
.custom_spec(test_spec::<E>())
|
||||||
.task_executor(harness.chain.task_executor.clone())
|
.task_executor(harness.chain.task_executor.clone())
|
||||||
|
.logger(log.clone())
|
||||||
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.logger(log.clone())
|
|
||||||
.store_migrator_config(MigratorConfig::default().blocking())
|
.store_migrator_config(MigratorConfig::default().blocking())
|
||||||
.dummy_eth1_backend()
|
.dummy_eth1_backend()
|
||||||
.expect("should build dummy backend")
|
.expect("should build dummy backend")
|
||||||
.testing_slot_clock(Duration::from_secs(seconds_per_slot))
|
.slot_clock(slot_clock)
|
||||||
.expect("should configure testing slot clock")
|
|
||||||
.shutdown_sender(shutdown_tx)
|
.shutdown_sender(shutdown_tx)
|
||||||
.chain_config(ChainConfig::default())
|
.chain_config(ChainConfig::default())
|
||||||
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
.event_handler(Some(ServerSentEventHandler::new_with_capacity(
|
||||||
@ -2131,9 +2166,9 @@ async fn weak_subjectivity_sync() {
|
|||||||
|
|
||||||
// Apply blocks forward to reach head.
|
// Apply blocks forward to reach head.
|
||||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||||
let new_blocks = &chain_dump[wss_slot.as_usize() + 1..];
|
let new_blocks = chain_dump
|
||||||
|
.iter()
|
||||||
assert_eq!(new_blocks[0].beacon_block.slot(), wss_slot + 1);
|
.filter(|snapshot| snapshot.beacon_block.slot() > checkpoint_slot);
|
||||||
|
|
||||||
for snapshot in new_blocks {
|
for snapshot in new_blocks {
|
||||||
let full_block = harness
|
let full_block = harness
|
||||||
@ -2219,13 +2254,17 @@ async fn weak_subjectivity_sync() {
|
|||||||
assert_eq!(forwards, expected);
|
assert_eq!(forwards, expected);
|
||||||
|
|
||||||
// All blocks can be loaded.
|
// All blocks can be loaded.
|
||||||
|
let mut prev_block_root = Hash256::zero();
|
||||||
for (block_root, slot) in beacon_chain
|
for (block_root, slot) in beacon_chain
|
||||||
.forwards_iter_block_roots(Slot::new(0))
|
.forwards_iter_block_roots(Slot::new(0))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(Result::unwrap)
|
.map(Result::unwrap)
|
||||||
{
|
{
|
||||||
let block = store.get_blinded_block(&block_root).unwrap().unwrap();
|
let block = store.get_blinded_block(&block_root).unwrap().unwrap();
|
||||||
assert_eq!(block.slot(), slot);
|
if block_root != prev_block_root {
|
||||||
|
assert_eq!(block.slot(), slot);
|
||||||
|
}
|
||||||
|
prev_block_root = block_root;
|
||||||
}
|
}
|
||||||
|
|
||||||
// All states from the oldest state slot can be loaded.
|
// All states from the oldest state slot can be loaded.
|
||||||
@ -2240,14 +2279,141 @@ async fn weak_subjectivity_sync() {
|
|||||||
assert_eq!(state.canonical_root(), state_root);
|
assert_eq!(state.canonical_root(), state_root);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Anchor slot is still set to the starting slot.
|
// Anchor slot is still set to the slot of the checkpoint block.
|
||||||
assert_eq!(store.get_anchor_slot(), Some(wss_slot));
|
assert_eq!(store.get_anchor_slot(), Some(wss_block.slot()));
|
||||||
|
|
||||||
// Reconstruct states.
|
// Reconstruct states.
|
||||||
store.clone().reconstruct_historic_states().unwrap();
|
store.clone().reconstruct_historic_states().unwrap();
|
||||||
assert_eq!(store.get_anchor_slot(), None);
|
assert_eq!(store.get_anchor_slot(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Test that blocks and attestations that refer to states around an unaligned split state are
|
||||||
|
/// processed correctly.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn process_blocks_and_attestations_for_unaligned_checkpoint() {
|
||||||
|
let temp = tempdir().unwrap();
|
||||||
|
let store = get_store(&temp);
|
||||||
|
let chain_config = ChainConfig {
|
||||||
|
reconstruct_historic_states: false,
|
||||||
|
..ChainConfig::default()
|
||||||
|
};
|
||||||
|
let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config);
|
||||||
|
|
||||||
|
let all_validators = (0..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let split_slot = Slot::new(E::slots_per_epoch() * 4);
|
||||||
|
let pre_skips = 1;
|
||||||
|
let post_skips = 1;
|
||||||
|
|
||||||
|
// Build the chain up to the intended split slot, with 3 skips before the split.
|
||||||
|
let slots = (1..=split_slot.as_u64() - pre_skips)
|
||||||
|
.map(Slot::new)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let (genesis_state, genesis_state_root) = harness.get_current_state_and_root();
|
||||||
|
harness
|
||||||
|
.add_attested_blocks_at_slots(
|
||||||
|
genesis_state.clone(),
|
||||||
|
genesis_state_root,
|
||||||
|
&slots,
|
||||||
|
&all_validators,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Before the split slot becomes finalized, create two forking blocks that build on the split
|
||||||
|
// block:
|
||||||
|
//
|
||||||
|
// - one that is invalid because it conflicts with finalization (slot <= finalized_slot)
|
||||||
|
// - one that is valid because its slot is not finalized (slot > finalized_slot)
|
||||||
|
let (unadvanced_split_state, unadvanced_split_state_root) =
|
||||||
|
harness.get_current_state_and_root();
|
||||||
|
|
||||||
|
let (invalid_fork_block, _) = harness
|
||||||
|
.make_block(unadvanced_split_state.clone(), split_slot)
|
||||||
|
.await;
|
||||||
|
let (valid_fork_block, _) = harness
|
||||||
|
.make_block(unadvanced_split_state.clone(), split_slot + 1)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Advance the chain so that the intended split slot is finalized.
|
||||||
|
// Do not attest in the epoch boundary slot, to make attestation production later easier (no
|
||||||
|
// equivocations).
|
||||||
|
let finalizing_slot = split_slot + 2 * E::slots_per_epoch();
|
||||||
|
for _ in 0..pre_skips + post_skips {
|
||||||
|
harness.advance_slot();
|
||||||
|
}
|
||||||
|
harness.extend_to_slot(finalizing_slot - 1).await;
|
||||||
|
harness
|
||||||
|
.add_block_at_slot(finalizing_slot, harness.get_current_state())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Check that the split slot is as intended.
|
||||||
|
let split = store.get_split_info();
|
||||||
|
assert_eq!(split.slot, split_slot);
|
||||||
|
assert_eq!(split.block_root, valid_fork_block.parent_root());
|
||||||
|
assert_ne!(split.state_root, unadvanced_split_state_root);
|
||||||
|
|
||||||
|
// Applying the invalid block should fail.
|
||||||
|
let err = harness
|
||||||
|
.chain
|
||||||
|
.process_block(
|
||||||
|
invalid_fork_block.canonical_root(),
|
||||||
|
Arc::new(invalid_fork_block.clone()),
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
|| Ok(()),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap_err();
|
||||||
|
assert!(matches!(err, BlockError::WouldRevertFinalizedSlot { .. }));
|
||||||
|
|
||||||
|
// Applying the valid block should succeed, but it should not become head.
|
||||||
|
harness
|
||||||
|
.chain
|
||||||
|
.process_block(
|
||||||
|
valid_fork_block.canonical_root(),
|
||||||
|
Arc::new(valid_fork_block.clone()),
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
|| Ok(()),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
harness.chain.recompute_head_at_current_slot().await;
|
||||||
|
assert_ne!(harness.head_block_root(), valid_fork_block.canonical_root());
|
||||||
|
|
||||||
|
// Attestations to the split block in the next 2 epochs should be processed successfully.
|
||||||
|
let attestation_start_slot = harness.get_current_slot();
|
||||||
|
let attestation_end_slot = attestation_start_slot + 2 * E::slots_per_epoch();
|
||||||
|
let (split_state_root, mut advanced_split_state) = harness
|
||||||
|
.chain
|
||||||
|
.store
|
||||||
|
.get_advanced_hot_state(split.block_root, split.slot, split.state_root)
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
complete_state_advance(
|
||||||
|
&mut advanced_split_state,
|
||||||
|
Some(split_state_root),
|
||||||
|
attestation_start_slot,
|
||||||
|
&harness.chain.spec,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
advanced_split_state
|
||||||
|
.build_caches(&harness.chain.spec)
|
||||||
|
.unwrap();
|
||||||
|
let advanced_split_state_root = advanced_split_state.update_tree_hash_cache().unwrap();
|
||||||
|
for slot in (attestation_start_slot.as_u64()..attestation_end_slot.as_u64()).map(Slot::new) {
|
||||||
|
let attestations = harness.make_attestations(
|
||||||
|
&all_validators,
|
||||||
|
&advanced_split_state,
|
||||||
|
advanced_split_state_root,
|
||||||
|
split.block_root.into(),
|
||||||
|
slot,
|
||||||
|
);
|
||||||
|
harness.advance_slot();
|
||||||
|
harness.process_attestations(attestations);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn finalizes_after_resuming_from_db() {
|
async fn finalizes_after_resuming_from_db() {
|
||||||
let validator_count = 16;
|
let validator_count = 16;
|
||||||
@ -2306,6 +2472,7 @@ async fn finalizes_after_resuming_from_db() {
|
|||||||
.default_spec()
|
.default_spec()
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.resumed_disk_store(store)
|
.resumed_disk_store(store)
|
||||||
|
.testing_slot_clock(original_chain.slot_clock.clone())
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@ -2559,6 +2726,9 @@ async fn schema_downgrade_to_min_version() {
|
|||||||
SchemaVersion(11)
|
SchemaVersion(11)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Save the slot clock so that the new harness doesn't revert in time.
|
||||||
|
let slot_clock = harness.chain.slot_clock.clone();
|
||||||
|
|
||||||
// Close the database to ensure everything is written to disk.
|
// Close the database to ensure everything is written to disk.
|
||||||
drop(store);
|
drop(store);
|
||||||
drop(harness);
|
drop(harness);
|
||||||
@ -2589,11 +2759,21 @@ async fn schema_downgrade_to_min_version() {
|
|||||||
)
|
)
|
||||||
.expect("schema upgrade from minimum version should work");
|
.expect("schema upgrade from minimum version should work");
|
||||||
|
|
||||||
// Rescreate the harness.
|
// Recreate the harness.
|
||||||
|
/*
|
||||||
|
let slot_clock = TestingSlotClock::new(
|
||||||
|
Slot::new(0),
|
||||||
|
Duration::from_secs(harness.chain.genesis_time),
|
||||||
|
Duration::from_secs(spec.seconds_per_slot),
|
||||||
|
);
|
||||||
|
slot_clock.set_slot(harness.get_current_slot().as_u64());
|
||||||
|
*/
|
||||||
|
|
||||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
|
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
|
||||||
.logger(store.logger().clone())
|
.logger(store.logger().clone())
|
||||||
|
.testing_slot_clock(slot_clock)
|
||||||
.resumed_disk_store(store.clone())
|
.resumed_disk_store(store.clone())
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
.build();
|
.build();
|
||||||
|
@ -6,7 +6,7 @@ use beacon_chain::{
|
|||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
OP_POOL_DB_KEY,
|
OP_POOL_DB_KEY,
|
||||||
},
|
},
|
||||||
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
BeaconChain, ChainConfig, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use operation_pool::PersistedOperationPool;
|
use operation_pool::PersistedOperationPool;
|
||||||
@ -28,6 +28,10 @@ lazy_static! {
|
|||||||
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
|
fn get_harness(validator_count: usize) -> BeaconChainHarness<EphemeralHarnessType<MinimalEthSpec>> {
|
||||||
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
let harness = BeaconChainHarness::builder(MinimalEthSpec)
|
||||||
.default_spec()
|
.default_spec()
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
.keypairs(KEYPAIRS[0..validator_count].to_vec())
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
|
@ -6,11 +6,11 @@ edition = "2021"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serde_yaml = "0.8.13"
|
serde_yaml = "0.8.13"
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
|
||||||
operation_pool = { path = "../operation_pool" }
|
operation_pool = { path = "../operation_pool" }
|
||||||
tokio = "1.14.0"
|
tokio = "1.14.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
network = { path = "../network" }
|
network = { path = "../network" }
|
||||||
|
@ -309,7 +309,6 @@ where
|
|||||||
config.chain.checkpoint_sync_url_timeout,
|
config.chain.checkpoint_sync_url_timeout,
|
||||||
)),
|
)),
|
||||||
);
|
);
|
||||||
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
|
||||||
|
|
||||||
let deposit_snapshot = if config.sync_eth1_chain {
|
let deposit_snapshot = if config.sync_eth1_chain {
|
||||||
// We want to fetch deposit snapshot before fetching the finalized beacon state to
|
// We want to fetch deposit snapshot before fetching the finalized beacon state to
|
||||||
@ -356,10 +355,23 @@ where
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(context.log(), "Downloading finalized block");
|
debug!(
|
||||||
// Find a suitable finalized block on an epoch boundary.
|
context.log(),
|
||||||
let mut block = remote
|
"Downloading finalized state";
|
||||||
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec)
|
);
|
||||||
|
let state = remote
|
||||||
|
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Finalized, &spec)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Error loading checkpoint state from remote: {:?}", e))?
|
||||||
|
.ok_or_else(|| "Checkpoint state missing from remote".to_string())?;
|
||||||
|
|
||||||
|
debug!(context.log(), "Downloaded finalized state"; "slot" => ?state.slot());
|
||||||
|
|
||||||
|
let finalized_block_slot = state.latest_block_header().slot;
|
||||||
|
|
||||||
|
debug!(context.log(), "Downloading finalized block"; "block_slot" => ?finalized_block_slot);
|
||||||
|
let block = remote
|
||||||
|
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(finalized_block_slot), &spec)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| match e {
|
.map_err(|e| match e {
|
||||||
ApiError::InvalidSsz(e) => format!(
|
ApiError::InvalidSsz(e) => format!(
|
||||||
@ -373,65 +385,15 @@ where
|
|||||||
|
|
||||||
debug!(context.log(), "Downloaded finalized block");
|
debug!(context.log(), "Downloaded finalized block");
|
||||||
|
|
||||||
let mut block_slot = block.slot();
|
|
||||||
|
|
||||||
while block.slot() % slots_per_epoch != 0 {
|
|
||||||
block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
context.log(),
|
|
||||||
"Searching for aligned checkpoint block";
|
|
||||||
"block_slot" => block_slot
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(found_block) = remote
|
|
||||||
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Slot(block_slot), &spec)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
format!("Error fetching block at slot {}: {:?}", block_slot, e)
|
|
||||||
})?
|
|
||||||
{
|
|
||||||
block = found_block;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
context.log(),
|
|
||||||
"Downloaded aligned finalized block";
|
|
||||||
"block_root" => ?block.canonical_root(),
|
|
||||||
"block_slot" => block.slot(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_root = block.state_root();
|
|
||||||
debug!(
|
|
||||||
context.log(),
|
|
||||||
"Downloading finalized state";
|
|
||||||
"state_root" => ?state_root
|
|
||||||
);
|
|
||||||
let state = remote
|
|
||||||
.get_debug_beacon_states_ssz::<TEthSpec>(StateId::Root(state_root), &spec)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
format!(
|
|
||||||
"Error loading checkpoint state from remote {:?}: {:?}",
|
|
||||||
state_root, e
|
|
||||||
)
|
|
||||||
})?
|
|
||||||
.ok_or_else(|| {
|
|
||||||
format!("Checkpoint state missing from remote: {:?}", state_root)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
debug!(context.log(), "Downloaded finalized state");
|
|
||||||
|
|
||||||
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
|
let genesis_state = BeaconState::from_ssz_bytes(&genesis_state_bytes, &spec)
|
||||||
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
|
.map_err(|e| format!("Unable to parse genesis state SSZ: {:?}", e))?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
context.log(),
|
context.log(),
|
||||||
"Loaded checkpoint block and state";
|
"Loaded checkpoint block and state";
|
||||||
"slot" => block.slot(),
|
"block_slot" => block.slot(),
|
||||||
|
"state_slot" => state.slot(),
|
||||||
"block_root" => ?block.canonical_root(),
|
"block_root" => ?block.canonical_root(),
|
||||||
"state_root" => ?state_root,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let service =
|
let service =
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use beacon_chain::test_utils::RelativeSyncCommittee;
|
use beacon_chain::test_utils::RelativeSyncCommittee;
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||||
BeaconChain, StateSkipConfig, WhenSlotSkipped,
|
BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use environment::null_logger;
|
use environment::null_logger;
|
||||||
use eth2::{
|
use eth2::{
|
||||||
@ -77,6 +77,7 @@ struct ApiTester {
|
|||||||
|
|
||||||
struct ApiTesterConfig {
|
struct ApiTesterConfig {
|
||||||
spec: ChainSpec,
|
spec: ChainSpec,
|
||||||
|
retain_historic_states: bool,
|
||||||
builder_threshold: Option<u128>,
|
builder_threshold: Option<u128>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,11 +87,19 @@ impl Default for ApiTesterConfig {
|
|||||||
spec.shard_committee_period = 2;
|
spec.shard_committee_period = 2;
|
||||||
Self {
|
Self {
|
||||||
spec,
|
spec,
|
||||||
|
retain_historic_states: false,
|
||||||
builder_threshold: None,
|
builder_threshold: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ApiTesterConfig {
|
||||||
|
fn retain_historic_states(mut self) -> Self {
|
||||||
|
self.retain_historic_states = true;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ApiTester {
|
impl ApiTester {
|
||||||
pub async fn new() -> Self {
|
pub async fn new() -> Self {
|
||||||
// This allows for testing voluntary exits without building out a massive chain.
|
// This allows for testing voluntary exits without building out a massive chain.
|
||||||
@ -118,6 +127,10 @@ impl ApiTester {
|
|||||||
let harness = Arc::new(
|
let harness = Arc::new(
|
||||||
BeaconChainHarness::builder(MainnetEthSpec)
|
BeaconChainHarness::builder(MainnetEthSpec)
|
||||||
.spec(spec.clone())
|
.spec(spec.clone())
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: config.retain_historic_states,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.logger(logging::test_logger())
|
.logger(logging::test_logger())
|
||||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
@ -375,6 +388,7 @@ impl ApiTester {
|
|||||||
pub async fn new_mev_tester_no_builder_threshold() -> Self {
|
pub async fn new_mev_tester_no_builder_threshold() -> Self {
|
||||||
let mut config = ApiTesterConfig {
|
let mut config = ApiTesterConfig {
|
||||||
builder_threshold: Some(0),
|
builder_threshold: Some(0),
|
||||||
|
retain_historic_states: false,
|
||||||
spec: E::default_spec(),
|
spec: E::default_spec(),
|
||||||
};
|
};
|
||||||
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
@ -4705,7 +4719,7 @@ async fn get_validator_duties_attester_with_skip_slots() {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_proposer() {
|
async fn get_validator_duties_proposer() {
|
||||||
ApiTester::new()
|
ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states())
|
||||||
.await
|
.await
|
||||||
.test_get_validator_duties_proposer()
|
.test_get_validator_duties_proposer()
|
||||||
.await;
|
.await;
|
||||||
@ -4713,7 +4727,7 @@ async fn get_validator_duties_proposer() {
|
|||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_proposer_with_skip_slots() {
|
async fn get_validator_duties_proposer_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new_from_config(ApiTesterConfig::default().retain_historic_states())
|
||||||
.await
|
.await
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_duties_proposer()
|
.test_get_validator_duties_proposer()
|
||||||
@ -5045,6 +5059,7 @@ async fn builder_payload_chosen_by_profit() {
|
|||||||
async fn builder_works_post_capella() {
|
async fn builder_works_post_capella() {
|
||||||
let mut config = ApiTesterConfig {
|
let mut config = ApiTesterConfig {
|
||||||
builder_threshold: Some(0),
|
builder_threshold: Some(0),
|
||||||
|
retain_historic_states: false,
|
||||||
spec: E::default_spec(),
|
spec: E::default_spec(),
|
||||||
};
|
};
|
||||||
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
config.spec.altair_fork_epoch = Some(Epoch::new(0));
|
||||||
|
@ -2062,7 +2062,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError(
|
AttnError::BeaconChainError(BeaconChainError::DBError(Error::HotColdDBError(
|
||||||
HotColdDBError::AttestationStateIsFinalized { .. },
|
HotColdDBError::FinalizedStateNotInHotDatabase { .. },
|
||||||
))) => {
|
))) => {
|
||||||
debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id);
|
debug!(self.log, "Attestation for finalized state"; "peer_id" => % peer_id);
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
|
@ -14,7 +14,7 @@ use crate::memory_store::MemoryStore;
|
|||||||
use crate::metadata::{
|
use crate::metadata::{
|
||||||
AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY,
|
AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY,
|
||||||
COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY,
|
COMPACTION_TIMESTAMP_KEY, CONFIG_KEY, CURRENT_SCHEMA_VERSION, PRUNING_CHECKPOINT_KEY,
|
||||||
SCHEMA_VERSION_KEY, SPLIT_KEY,
|
SCHEMA_VERSION_KEY, SPLIT_KEY, STATE_UPPER_LIMIT_NO_RETAIN,
|
||||||
};
|
};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -110,10 +110,10 @@ pub enum HotColdDBError {
|
|||||||
IterationError {
|
IterationError {
|
||||||
unexpected_key: BytesKey,
|
unexpected_key: BytesKey,
|
||||||
},
|
},
|
||||||
AttestationStateIsFinalized {
|
FinalizedStateNotInHotDatabase {
|
||||||
split_slot: Slot,
|
split_slot: Slot,
|
||||||
request_slot: Option<Slot>,
|
request_slot: Slot,
|
||||||
state_root: Hash256,
|
block_root: Hash256,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -545,7 +545,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
/// upon that state (e.g., state roots). Additionally, only states from the hot store are
|
/// upon that state (e.g., state roots). Additionally, only states from the hot store are
|
||||||
/// returned.
|
/// returned.
|
||||||
///
|
///
|
||||||
/// See `Self::get_state` for information about `slot`.
|
/// See `Self::get_advanced_hot_state` for information about `max_slot`.
|
||||||
///
|
///
|
||||||
/// ## Warning
|
/// ## Warning
|
||||||
///
|
///
|
||||||
@ -557,23 +557,78 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
/// - `state.block_roots`
|
/// - `state.block_roots`
|
||||||
pub fn get_inconsistent_state_for_attestation_verification_only(
|
pub fn get_inconsistent_state_for_attestation_verification_only(
|
||||||
&self,
|
&self,
|
||||||
state_root: &Hash256,
|
block_root: &Hash256,
|
||||||
slot: Option<Slot>,
|
max_slot: Slot,
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
state_root: Hash256,
|
||||||
|
) -> Result<Option<(Hash256, BeaconState<E>)>, Error> {
|
||||||
metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT);
|
metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT);
|
||||||
|
self.get_advanced_hot_state_with_strategy(
|
||||||
|
*block_root,
|
||||||
|
max_slot,
|
||||||
|
state_root,
|
||||||
|
StateProcessingStrategy::Inconsistent,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
let split_slot = self.get_split_slot();
|
/// Get a state with `latest_block_root == block_root` advanced through to at most `max_slot`.
|
||||||
|
///
|
||||||
|
/// The `state_root` argument is used to look up the block's un-advanced state in case an
|
||||||
|
/// advanced state is not found.
|
||||||
|
///
|
||||||
|
/// Return the `(result_state_root, state)` satisfying:
|
||||||
|
///
|
||||||
|
/// - `result_state_root == state.canonical_root()`
|
||||||
|
/// - `state.slot() <= max_slot`
|
||||||
|
/// - `state.get_latest_block_root(result_state_root) == block_root`
|
||||||
|
///
|
||||||
|
/// Presently this is only used to avoid loading the un-advanced split state, but in future will
|
||||||
|
/// be expanded to return states from an in-memory cache.
|
||||||
|
pub fn get_advanced_hot_state(
|
||||||
|
&self,
|
||||||
|
block_root: Hash256,
|
||||||
|
max_slot: Slot,
|
||||||
|
state_root: Hash256,
|
||||||
|
) -> Result<Option<(Hash256, BeaconState<E>)>, Error> {
|
||||||
|
self.get_advanced_hot_state_with_strategy(
|
||||||
|
block_root,
|
||||||
|
max_slot,
|
||||||
|
state_root,
|
||||||
|
StateProcessingStrategy::Accurate,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if slot.map_or(false, |slot| slot < split_slot) {
|
/// Same as `get_advanced_hot_state` but taking a `StateProcessingStrategy`.
|
||||||
Err(HotColdDBError::AttestationStateIsFinalized {
|
pub fn get_advanced_hot_state_with_strategy(
|
||||||
split_slot,
|
&self,
|
||||||
request_slot: slot,
|
block_root: Hash256,
|
||||||
state_root: *state_root,
|
max_slot: Slot,
|
||||||
|
state_root: Hash256,
|
||||||
|
state_processing_strategy: StateProcessingStrategy,
|
||||||
|
) -> Result<Option<(Hash256, BeaconState<E>)>, Error> {
|
||||||
|
// Hold a read lock on the split point so it can't move while we're trying to load the
|
||||||
|
// state.
|
||||||
|
let split = self.split.read_recursive();
|
||||||
|
|
||||||
|
// Sanity check max-slot against the split slot.
|
||||||
|
if max_slot < split.slot {
|
||||||
|
return Err(HotColdDBError::FinalizedStateNotInHotDatabase {
|
||||||
|
split_slot: split.slot,
|
||||||
|
request_slot: max_slot,
|
||||||
|
block_root,
|
||||||
}
|
}
|
||||||
.into())
|
.into());
|
||||||
} else {
|
|
||||||
self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let state_root = if block_root == split.block_root && split.slot <= max_slot {
|
||||||
|
split.state_root
|
||||||
|
} else {
|
||||||
|
state_root
|
||||||
|
};
|
||||||
|
let state = self
|
||||||
|
.load_hot_state(&state_root, state_processing_strategy)?
|
||||||
|
.map(|state| (state_root, state));
|
||||||
|
drop(split);
|
||||||
|
Ok(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk.
|
/// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk.
|
||||||
@ -1180,8 +1235,12 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
*self.split.read_recursive()
|
*self.split.read_recursive()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_split(&self, slot: Slot, state_root: Hash256) {
|
pub fn set_split(&self, slot: Slot, state_root: Hash256, block_root: Hash256) {
|
||||||
*self.split.write() = Split { slot, state_root };
|
*self.split.write() = Split {
|
||||||
|
slot,
|
||||||
|
state_root,
|
||||||
|
block_root,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch the slot of the most recently stored restore point.
|
/// Fetch the slot of the most recently stored restore point.
|
||||||
@ -1216,25 +1275,36 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Initialise the anchor info for checkpoint sync starting from `block`.
|
/// Initialise the anchor info for checkpoint sync starting from `block`.
|
||||||
pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result<KeyValueStoreOp, Error> {
|
pub fn init_anchor_info(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<'_, E>,
|
||||||
|
retain_historic_states: bool,
|
||||||
|
) -> Result<KeyValueStoreOp, Error> {
|
||||||
let anchor_slot = block.slot();
|
let anchor_slot = block.slot();
|
||||||
let slots_per_restore_point = self.config.slots_per_restore_point;
|
let slots_per_restore_point = self.config.slots_per_restore_point;
|
||||||
|
|
||||||
// Set the `state_upper_limit` to the slot of the *next* restore point.
|
let state_upper_limit = if !retain_historic_states {
|
||||||
// See `get_state_upper_limit` for rationale.
|
STATE_UPPER_LIMIT_NO_RETAIN
|
||||||
let next_restore_point_slot = if anchor_slot % slots_per_restore_point == 0 {
|
} else if anchor_slot % slots_per_restore_point == 0 {
|
||||||
anchor_slot
|
anchor_slot
|
||||||
} else {
|
} else {
|
||||||
|
// Set the `state_upper_limit` to the slot of the *next* restore point.
|
||||||
|
// See `get_state_upper_limit` for rationale.
|
||||||
(anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point
|
(anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point
|
||||||
};
|
};
|
||||||
let anchor_info = AnchorInfo {
|
let anchor_info = if state_upper_limit == 0 && anchor_slot == 0 {
|
||||||
anchor_slot,
|
// Genesis archive node: no anchor because we *will* store all states.
|
||||||
oldest_block_slot: anchor_slot,
|
None
|
||||||
oldest_block_parent: block.parent_root(),
|
} else {
|
||||||
state_upper_limit: next_restore_point_slot,
|
Some(AnchorInfo {
|
||||||
state_lower_limit: self.spec.genesis_slot,
|
anchor_slot,
|
||||||
|
oldest_block_slot: anchor_slot,
|
||||||
|
oldest_block_parent: block.parent_root(),
|
||||||
|
state_upper_limit,
|
||||||
|
state_lower_limit: self.spec.genesis_slot,
|
||||||
|
})
|
||||||
};
|
};
|
||||||
self.compare_and_set_anchor_info(None, Some(anchor_info))
|
self.compare_and_set_anchor_info(None, anchor_info)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a clone of the store's anchor info.
|
/// Get a clone of the store's anchor info.
|
||||||
@ -1361,11 +1431,26 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
self.hot_db.put(&CONFIG_KEY, &self.config.as_disk_config())
|
self.hot_db.put(&CONFIG_KEY, &self.config.as_disk_config())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load the split point from disk.
|
/// Load the split point from disk, sans block root.
|
||||||
fn load_split(&self) -> Result<Option<Split>, Error> {
|
fn load_split_partial(&self) -> Result<Option<Split>, Error> {
|
||||||
self.hot_db.get(&SPLIT_KEY)
|
self.hot_db.get(&SPLIT_KEY)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load the split point from disk, including block root.
|
||||||
|
fn load_split(&self) -> Result<Option<Split>, Error> {
|
||||||
|
match self.load_split_partial()? {
|
||||||
|
Some(mut split) => {
|
||||||
|
// Load the hot state summary to get the block root.
|
||||||
|
let summary = self.load_hot_state_summary(&split.state_root)?.ok_or(
|
||||||
|
HotColdDBError::MissingSplitState(split.state_root, split.slot),
|
||||||
|
)?;
|
||||||
|
split.block_root = summary.latest_block_root;
|
||||||
|
Ok(Some(split))
|
||||||
|
}
|
||||||
|
None => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Stage the split for storage to disk.
|
/// Stage the split for storage to disk.
|
||||||
pub fn store_split_in_batch(&self) -> KeyValueStoreOp {
|
pub fn store_split_in_batch(&self) -> KeyValueStoreOp {
|
||||||
self.split.read_recursive().as_kv_store_op(SPLIT_KEY)
|
self.split.read_recursive().as_kv_store_op(SPLIT_KEY)
|
||||||
@ -1611,42 +1696,40 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
/// Advance the split point of the store, moving new finalized states to the freezer.
|
/// Advance the split point of the store, moving new finalized states to the freezer.
|
||||||
pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||||
frozen_head_root: Hash256,
|
finalized_state_root: Hash256,
|
||||||
frozen_head: &BeaconState<E>,
|
finalized_block_root: Hash256,
|
||||||
|
finalized_state: &BeaconState<E>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
debug!(
|
debug!(
|
||||||
store.log,
|
store.log,
|
||||||
"Freezer migration started";
|
"Freezer migration started";
|
||||||
"slot" => frozen_head.slot()
|
"slot" => finalized_state.slot()
|
||||||
);
|
);
|
||||||
|
|
||||||
// 0. Check that the migration is sensible.
|
// 0. Check that the migration is sensible.
|
||||||
// The new frozen head must increase the current split slot, and lie on an epoch
|
// The new finalized state must increase the current split slot, and lie on an epoch
|
||||||
// boundary (in order for the hot state summary scheme to work).
|
// boundary (in order for the hot state summary scheme to work).
|
||||||
let current_split_slot = store.split.read_recursive().slot;
|
let current_split_slot = store.split.read_recursive().slot;
|
||||||
let anchor_slot = store
|
let anchor_info = store.anchor_info.read_recursive().clone();
|
||||||
.anchor_info
|
let anchor_slot = anchor_info.as_ref().map(|a| a.anchor_slot);
|
||||||
.read_recursive()
|
|
||||||
.as_ref()
|
|
||||||
.map(|a| a.anchor_slot);
|
|
||||||
|
|
||||||
if frozen_head.slot() < current_split_slot {
|
if finalized_state.slot() < current_split_slot {
|
||||||
return Err(HotColdDBError::FreezeSlotError {
|
return Err(HotColdDBError::FreezeSlotError {
|
||||||
current_split_slot,
|
current_split_slot,
|
||||||
proposed_split_slot: frozen_head.slot(),
|
proposed_split_slot: finalized_state.slot(),
|
||||||
}
|
}
|
||||||
.into());
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
if frozen_head.slot() % E::slots_per_epoch() != 0 {
|
if finalized_state.slot() % E::slots_per_epoch() != 0 {
|
||||||
return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into());
|
return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new();
|
let mut hot_db_ops: Vec<StoreOp<E>> = Vec::new();
|
||||||
|
|
||||||
// 1. Copy all of the states between the head and the split slot, from the hot DB
|
// 1. Copy all of the states between the new finalized state and the split slot, from the hot DB
|
||||||
// to the cold DB. Delete the execution payloads of these now-finalized blocks.
|
// to the cold DB. Delete the execution payloads of these now-finalized blocks.
|
||||||
let state_root_iter = RootsIterator::new(&store, frozen_head);
|
let state_root_iter = RootsIterator::new(&store, finalized_state);
|
||||||
for maybe_tuple in state_root_iter.take_while(|result| match result {
|
for maybe_tuple in state_root_iter.take_while(|result| match result {
|
||||||
Ok((_, _, slot)) => {
|
Ok((_, _, slot)) => {
|
||||||
slot >= ¤t_split_slot
|
slot >= ¤t_split_slot
|
||||||
@ -1656,6 +1739,29 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
}) {
|
}) {
|
||||||
let (block_root, state_root, slot) = maybe_tuple?;
|
let (block_root, state_root, slot) = maybe_tuple?;
|
||||||
|
|
||||||
|
// Delete the execution payload if payload pruning is enabled. At a skipped slot we may
|
||||||
|
// delete the payload for the finalized block itself, but that's OK as we only guarantee
|
||||||
|
// that payloads are present for slots >= the split slot. The payload fetching code is also
|
||||||
|
// forgiving of missing payloads.
|
||||||
|
if store.config.prune_payloads {
|
||||||
|
hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the old summary, and the full state if we lie on an epoch boundary.
|
||||||
|
hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot)));
|
||||||
|
|
||||||
|
// Do not try to store states if a restore point is yet to be stored, or will never be
|
||||||
|
// stored (see `STATE_UPPER_LIMIT_NO_RETAIN`). Make an exception for the genesis state
|
||||||
|
// which always needs to be copied from the hot DB to the freezer and should not be deleted.
|
||||||
|
if slot != 0
|
||||||
|
&& anchor_info
|
||||||
|
.as_ref()
|
||||||
|
.map_or(false, |anchor| slot < anchor.state_upper_limit)
|
||||||
|
{
|
||||||
|
debug!(store.log, "Pruning finalized state"; "slot" => slot);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new();
|
let mut cold_db_ops: Vec<KeyValueStoreOp> = Vec::new();
|
||||||
|
|
||||||
if slot % store.config.slots_per_restore_point == 0 {
|
if slot % store.config.slots_per_restore_point == 0 {
|
||||||
@ -1674,17 +1780,6 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
// There are data dependencies between calls to `store_cold_state()` that prevent us from
|
// There are data dependencies between calls to `store_cold_state()` that prevent us from
|
||||||
// doing one big call to `store.cold_db.do_atomically()` at end of the loop.
|
// doing one big call to `store.cold_db.do_atomically()` at end of the loop.
|
||||||
store.cold_db.do_atomically(cold_db_ops)?;
|
store.cold_db.do_atomically(cold_db_ops)?;
|
||||||
|
|
||||||
// Delete the old summary, and the full state if we lie on an epoch boundary.
|
|
||||||
hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot)));
|
|
||||||
|
|
||||||
// Delete the execution payload if payload pruning is enabled. At a skipped slot we may
|
|
||||||
// delete the payload for the finalized block itself, but that's OK as we only guarantee
|
|
||||||
// that payloads are present for slots >= the split slot. The payload fetching code is also
|
|
||||||
// forgiving of missing payloads.
|
|
||||||
if store.config.prune_payloads {
|
|
||||||
hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warning: Critical section. We have to take care not to put any of the two databases in an
|
// Warning: Critical section. We have to take care not to put any of the two databases in an
|
||||||
@ -1724,8 +1819,9 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
// Before updating the in-memory split value, we flush it to disk first, so that should the
|
// Before updating the in-memory split value, we flush it to disk first, so that should the
|
||||||
// OS process die at this point, we pick up from the right place after a restart.
|
// OS process die at this point, we pick up from the right place after a restart.
|
||||||
let split = Split {
|
let split = Split {
|
||||||
slot: frozen_head.slot(),
|
slot: finalized_state.slot(),
|
||||||
state_root: frozen_head_root,
|
state_root: finalized_state_root,
|
||||||
|
block_root: finalized_block_root,
|
||||||
};
|
};
|
||||||
store.hot_db.put_sync(&SPLIT_KEY, &split)?;
|
store.hot_db.put_sync(&SPLIT_KEY, &split)?;
|
||||||
|
|
||||||
@ -1741,7 +1837,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
debug!(
|
debug!(
|
||||||
store.log,
|
store.log,
|
||||||
"Freezer migration complete";
|
"Freezer migration complete";
|
||||||
"slot" => frozen_head.slot()
|
"slot" => finalized_state.slot()
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -1750,8 +1846,16 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
/// Struct for storing the split slot and state root in the database.
|
/// Struct for storing the split slot and state root in the database.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)]
|
||||||
pub struct Split {
|
pub struct Split {
|
||||||
pub(crate) slot: Slot,
|
pub slot: Slot,
|
||||||
pub(crate) state_root: Hash256,
|
pub state_root: Hash256,
|
||||||
|
/// The block root of the split state.
|
||||||
|
///
|
||||||
|
/// This is used to provide special handling for the split state in the case where there are
|
||||||
|
/// skipped slots. The split state will *always* be the advanced state, so callers
|
||||||
|
/// who only have the finalized block root should use `get_advanced_hot_state` to get this state,
|
||||||
|
/// rather than fetching `block.state_root()` (the unaligned state) which will have been pruned.
|
||||||
|
#[ssz(skip_serializing, skip_deserializing)]
|
||||||
|
pub block_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StoreItem for Split {
|
impl StoreItem for Split {
|
||||||
|
@ -16,6 +16,9 @@ pub const PRUNING_CHECKPOINT_KEY: Hash256 = Hash256::repeat_byte(3);
|
|||||||
pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
|
pub const COMPACTION_TIMESTAMP_KEY: Hash256 = Hash256::repeat_byte(4);
|
||||||
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
pub const ANCHOR_INFO_KEY: Hash256 = Hash256::repeat_byte(5);
|
||||||
|
|
||||||
|
/// State upper limit value used to indicate that a node is not storing historic states.
|
||||||
|
pub const STATE_UPPER_LIMIT_NO_RETAIN: Slot = Slot::new(u64::MAX);
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub struct SchemaVersion(pub u64);
|
pub struct SchemaVersion(pub u64);
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ Once backfill is complete, a `INFO Historical block download complete` log will
|
|||||||
|
|
||||||
> Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node.
|
> Note: Since [v4.1.0](https://github.com/sigp/lighthouse/releases/tag/v4.1.0), Lighthouse implements rate-limited backfilling to mitigate validator performance issues after a recent checkpoint sync. This means that the speed at which historical blocks are downloaded is limited, typically to less than 20 slots/sec. This will not affect validator performance. However, if you would still prefer to sync the chain as fast as possible, you can add the flag `--disable-backfill-rate-limiting` to the beacon node.
|
||||||
|
|
||||||
> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node.
|
> Note: Since [v4.2.0](https://github.com/sigp/lighthouse/releases/tag/v4.2.0), Lighthouse limits the backfill sync to only sync backwards to the weak subjectivity point (approximately 5 months). This will help to save disk space. However, if you would like to sync back to the genesis, you can add the flag `--genesis-backfill` to the beacon node.
|
||||||
|
|
||||||
## FAQ
|
## FAQ
|
||||||
|
|
||||||
@ -116,8 +116,9 @@ states:
|
|||||||
database. Additionally, the genesis block is always available.
|
database. Additionally, the genesis block is always available.
|
||||||
* `state_lower_limit`: All states with slots _less than or equal to_ this value are available in
|
* `state_lower_limit`: All states with slots _less than or equal to_ this value are available in
|
||||||
the database. The minimum value is 0, indicating that the genesis state is always available.
|
the database. The minimum value is 0, indicating that the genesis state is always available.
|
||||||
* `state_upper_limit`: All states with slots _greater than or equal to_ this value are available
|
* `state_upper_limit`: All states with slots _greater than or equal to_ `min(split.slot,
|
||||||
in the database.
|
state_upper_limit)` are available in the database. In the case where the `state_upper_limit` is
|
||||||
|
higher than the `split.slot`, this means states are not being written to the freezer database.
|
||||||
|
|
||||||
Reconstruction runs from the state lower limit to the upper limit, narrowing the window of
|
Reconstruction runs from the state lower limit to the upper limit, narrowing the window of
|
||||||
unavailable states as it goes. It will log messages like the following to show its progress:
|
unavailable states as it goes. It will log messages like the following to show its progress:
|
||||||
@ -153,18 +154,8 @@ To manually specify a checkpoint use the following two flags:
|
|||||||
* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob
|
* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob
|
||||||
* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob
|
* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob
|
||||||
|
|
||||||
_Both_ the state and block must be provided and **must** adhere to the [Alignment
|
_Both_ the state and block must be provided and the state **must** match the block. The
|
||||||
Requirements](#alignment-requirements) described below.
|
state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary,
|
||||||
|
in which case it will be assumed to be finalized at that epoch.
|
||||||
### Alignment Requirements
|
|
||||||
|
|
||||||
* The block must be a finalized block from an epoch boundary, i.e. `block.slot() % 32 == 0`.
|
|
||||||
* The state must be the state corresponding to `block` with `state.slot() == block.slot()`
|
|
||||||
and `state.hash_tree_root() == block.state_root()`.
|
|
||||||
|
|
||||||
These requirements are imposed to align with Lighthouse's database schema, and notably exclude
|
|
||||||
finalized blocks from skipped slots. You can avoid alignment issues by using
|
|
||||||
[Automatic Checkpoint Sync](#automatic-checkpoint-sync), which will search for a suitable block
|
|
||||||
and state pair.
|
|
||||||
|
|
||||||
[weak-subj]: https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/
|
[weak-subj]: https://blog.ethereum.org/2014/11/25/proof-stake-learned-love-weak-subjectivity/
|
||||||
|
@ -355,7 +355,7 @@ where
|
|||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<Self, Error<T::Error>> {
|
) -> Result<Self, Error<T::Error>> {
|
||||||
// Sanity check: the anchor must lie on an epoch boundary.
|
// Sanity check: the anchor must lie on an epoch boundary.
|
||||||
if anchor_block.slot() % E::slots_per_epoch() != 0 {
|
if anchor_state.slot() % E::slots_per_epoch() != 0 {
|
||||||
return Err(Error::InvalidAnchor {
|
return Err(Error::InvalidAnchor {
|
||||||
block_slot: anchor_block.slot(),
|
block_slot: anchor_block.slot(),
|
||||||
state_slot: anchor_state.slot(),
|
state_slot: anchor_state.slot(),
|
||||||
@ -391,6 +391,7 @@ where
|
|||||||
let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot());
|
let current_slot = current_slot.unwrap_or_else(|| fc_store.get_current_slot());
|
||||||
|
|
||||||
let proto_array = ProtoArrayForkChoice::new::<E>(
|
let proto_array = ProtoArrayForkChoice::new::<E>(
|
||||||
|
current_slot,
|
||||||
finalized_block_slot,
|
finalized_block_slot,
|
||||||
finalized_block_state_root,
|
finalized_block_state_root,
|
||||||
*fc_store.justified_checkpoint(),
|
*fc_store.justified_checkpoint(),
|
||||||
|
@ -80,6 +80,7 @@ impl ForkChoiceTestDefinition {
|
|||||||
let junk_shuffling_id =
|
let junk_shuffling_id =
|
||||||
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
|
AttestationShufflingId::from_components(Epoch::new(0), Hash256::zero());
|
||||||
let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
let mut fork_choice = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||||
|
self.finalized_block_slot,
|
||||||
self.finalized_block_slot,
|
self.finalized_block_slot,
|
||||||
Hash256::zero(),
|
Hash256::zero(),
|
||||||
self.justified_checkpoint,
|
self.justified_checkpoint,
|
||||||
|
@ -345,6 +345,7 @@ pub struct ProtoArrayForkChoice {
|
|||||||
impl ProtoArrayForkChoice {
|
impl ProtoArrayForkChoice {
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new<E: EthSpec>(
|
pub fn new<E: EthSpec>(
|
||||||
|
current_slot: Slot,
|
||||||
finalized_block_slot: Slot,
|
finalized_block_slot: Slot,
|
||||||
finalized_block_state_root: Hash256,
|
finalized_block_state_root: Hash256,
|
||||||
justified_checkpoint: Checkpoint,
|
justified_checkpoint: Checkpoint,
|
||||||
@ -380,7 +381,7 @@ impl ProtoArrayForkChoice {
|
|||||||
};
|
};
|
||||||
|
|
||||||
proto_array
|
proto_array
|
||||||
.on_block::<E>(block, finalized_block_slot)
|
.on_block::<E>(block, current_slot)
|
||||||
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
|
.map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -983,6 +984,7 @@ mod test_compute_deltas {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||||
|
genesis_slot,
|
||||||
genesis_slot,
|
genesis_slot,
|
||||||
state_root,
|
state_root,
|
||||||
genesis_checkpoint,
|
genesis_checkpoint,
|
||||||
@ -1108,6 +1110,7 @@ mod test_compute_deltas {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
let mut fc = ProtoArrayForkChoice::new::<MainnetEthSpec>(
|
||||||
|
genesis_slot,
|
||||||
genesis_slot,
|
genesis_slot,
|
||||||
junk_state_root,
|
junk_state_root,
|
||||||
genesis_checkpoint,
|
genesis_checkpoint,
|
||||||
|
@ -21,7 +21,7 @@ impl From<ArithError> for Error {
|
|||||||
///
|
///
|
||||||
/// If the root of the supplied `state` is known, then it can be passed as `state_root`. If
|
/// If the root of the supplied `state` is known, then it can be passed as `state_root`. If
|
||||||
/// `state_root` is `None`, the root of `state` will be computed using a cached tree hash.
|
/// `state_root` is `None`, the root of `state` will be computed using a cached tree hash.
|
||||||
/// Providing the `state_root` makes this function several orders of magniude faster.
|
/// Providing the `state_root` makes this function several orders of magnitude faster.
|
||||||
pub fn per_slot_processing<T: EthSpec>(
|
pub fn per_slot_processing<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
state_root: Option<Hash256>,
|
state_root: Option<Hash256>,
|
||||||
|
@ -7,7 +7,7 @@ use beacon_chain::{
|
|||||||
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
obtain_indexed_attestation_and_committees_per_slot, VerifiedAttestation,
|
||||||
},
|
},
|
||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChainTypes, CachedHead, NotifyExecutionLayer,
|
BeaconChainTypes, CachedHead, ChainConfig, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
use execution_layer::{json_structures::JsonPayloadStatusV1Status, PayloadStatusV1};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@ -303,6 +303,10 @@ impl<E: EthSpec> Tester<E> {
|
|||||||
let harness = BeaconChainHarness::builder(E::default())
|
let harness = BeaconChainHarness::builder(E::default())
|
||||||
.spec(spec.clone())
|
.spec(spec.clone())
|
||||||
.keypairs(vec![])
|
.keypairs(vec![])
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.genesis_state_ephemeral_store(case.anchor_state.clone())
|
.genesis_state_ephemeral_store(case.anchor_state.clone())
|
||||||
.mock_execution_layer()
|
.mock_execution_layer()
|
||||||
.recalculate_fork_times_with_genesis(0)
|
.recalculate_fork_times_with_genesis(0)
|
||||||
|
@ -115,6 +115,9 @@ pub fn testing_client_config() -> ClientConfig {
|
|||||||
genesis_time: now,
|
genesis_time: now,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Simulator tests expect historic states to be available for post-run checks.
|
||||||
|
client_config.chain.reconstruct_historic_states = true;
|
||||||
|
|
||||||
// Specify a constant count of beacon processor workers. Having this number
|
// Specify a constant count of beacon processor workers. Having this number
|
||||||
// too low can cause annoying HTTP timeouts, especially on Github runners
|
// too low can cause annoying HTTP timeouts, especially on Github runners
|
||||||
// with 2 logical CPUs.
|
// with 2 logical CPUs.
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
#![recursion_limit = "256"]
|
#![recursion_limit = "256"]
|
||||||
#![cfg(unix)]
|
#![cfg(unix)]
|
||||||
|
|
||||||
use beacon_chain::test_utils::{
|
use beacon_chain::{
|
||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||||
|
ChainConfig,
|
||||||
};
|
};
|
||||||
use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts};
|
use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts};
|
||||||
use http_api::test_utils::{create_api_server, ApiServer};
|
use http_api::test_utils::{create_api_server, ApiServer};
|
||||||
@ -91,6 +92,10 @@ impl TesterBuilder {
|
|||||||
pub async fn new() -> TesterBuilder {
|
pub async fn new() -> TesterBuilder {
|
||||||
let harness = BeaconChainHarness::builder(E::default())
|
let harness = BeaconChainHarness::builder(E::default())
|
||||||
.default_spec()
|
.default_spec()
|
||||||
|
.chain_config(ChainConfig {
|
||||||
|
reconstruct_historic_states: true,
|
||||||
|
..ChainConfig::default()
|
||||||
|
})
|
||||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
.fresh_ephemeral_store()
|
.fresh_ephemeral_store()
|
||||||
.build();
|
.build();
|
||||||
|
Loading…
Reference in New Issue
Block a user