Prune abandoned forks (#916)
* Address compiler warning * Prune abandoned fork choice forks * New approach to pruning * Wrap some block hashes in a newtype pattern For increased type safety. * Add Graphviz chain dump emitter for debugging * Fix broken test case * Make prunes_abandoned_forks use real DiskStore * Mark finalized blocks in the GraphViz output * Refine debug stringification of Slot and Epoch Before this commit: print!("{:?}", Slot(123)) == "Slot(\n123\n)". After this commit: print!("{:?", Slot(123)) == "Slot(123)". * Simplify build_block() * Rewrite test case using more composable test primitives * Working rewritten test case * Tighten fork prunning test checks * Add another pruning test case * Bugfix: Finalized blocks weren't always properly detected * Pruning: Add pruning_does_not_touch_blocks_prior_to_finalization test case * Tighten pruning tests: check if heads are tracked properly * Add a failing test case for a buggy scenario * Change name of function to a more accurate one * Fix failing test case * Test case: Were skipped slots' states pruned? * Style fix: Simplify dereferencing * Tighten pruning tests: check if abandoned states are deleted * Towards atomicity of db ops * Correct typo * Prune also skipped slots' states * New logic for handling skipped states * Make skipped slots test pass * Post conflict resolution fixes * Formatting fixes * Tests passing * Block hashes in Graphviz node labels * Removed unused changes * Fix bug with states having < SlotsPerHistoricalRoot roots * Consolidate State/BlockRootsIterator for pruning * Address review feedback * Fix a bug in pruning tests * Detach prune_abandoned_forks() from its object * Move migrate.rs from store to beacon_chain * Move forks pruning onto a background thread * Bugfix: Heads weren't pruned when prune set contained only the head * Rename: freeze_to_state() -> process_finalization() * Eliminate redundant function parameter Co-authored-by: Michael Sproul <michael@sigmaprime.io>
This commit is contained in:
parent
b374ead24b
commit
9c3f76a33b
@ -4,6 +4,7 @@ use crate::events::{EventHandler, EventKind};
|
||||
use crate::fork_choice::{Error as ForkChoiceError, ForkChoice};
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::metrics;
|
||||
use crate::migrate::Migrate;
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::snapshot_cache::SnapshotCache;
|
||||
@ -26,14 +27,16 @@ use state_processing::{
|
||||
use std::borrow::Cow;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::io::prelude::*;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use store::iter::{
|
||||
BlockRootsIterator, ReverseBlockRootIterator, ReverseStateRootIterator, StateRootsIterator,
|
||||
BlockRootsIterator, ParentRootBlockIterator, ReverseBlockRootIterator,
|
||||
ReverseStateRootIterator, StateRootsIterator,
|
||||
};
|
||||
use store::{Error as DBError, Migrate, StateBatch, Store};
|
||||
use store::{Error as DBError, StateBatch, Store};
|
||||
use tree_hash::TreeHash;
|
||||
use types::*;
|
||||
|
||||
@ -170,7 +173,7 @@ pub struct HeadInfo {
|
||||
|
||||
pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||
type Store: store::Store<Self::EthSpec>;
|
||||
type StoreMigrator: store::Migrate<Self::Store, Self::EthSpec>;
|
||||
type StoreMigrator: Migrate<Self::Store, Self::EthSpec>;
|
||||
type SlotClock: slot_clock::SlotClock;
|
||||
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>;
|
||||
type EthSpec: types::EthSpec;
|
||||
@ -202,7 +205,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
/// A handler for events generated by the beacon chain.
|
||||
pub event_handler: T::EventHandler,
|
||||
/// Used to track the heads of the beacon chain.
|
||||
pub(crate) head_tracker: HeadTracker,
|
||||
pub(crate) head_tracker: Arc<HeadTracker>,
|
||||
/// A cache dedicated to block processing.
|
||||
pub(crate) block_processing_cache: TimeoutRwLock<SnapshotCache<T::EthSpec>>,
|
||||
/// Caches the shuffling for a given epoch and state root.
|
||||
@ -498,6 +501,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.head_tracker.heads()
|
||||
}
|
||||
|
||||
pub fn knows_head(&self, block_hash: &SignedBeaconBlockHash) -> bool {
|
||||
self.head_tracker.contains_head((*block_hash).into())
|
||||
}
|
||||
|
||||
/// Returns the `BeaconState` at the given slot.
|
||||
///
|
||||
/// Returns `None` when the state is not found in the database or there is an error skipping
|
||||
@ -1796,6 +1803,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let beacon_block_root = self.fork_choice.find_head(&self)?;
|
||||
|
||||
let current_head = self.head_info()?;
|
||||
let old_finalized_root = current_head.finalized_checkpoint.root;
|
||||
|
||||
if beacon_block_root == current_head.block_root {
|
||||
return Ok(());
|
||||
@ -1923,7 +1931,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
});
|
||||
|
||||
if new_finalized_epoch != old_finalized_epoch {
|
||||
self.after_finalization(old_finalized_epoch, finalized_root)?;
|
||||
self.after_finalization(
|
||||
old_finalized_epoch,
|
||||
finalized_root,
|
||||
old_finalized_root.into(),
|
||||
)?;
|
||||
}
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconHeadChanged {
|
||||
@ -1942,6 +1954,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&self,
|
||||
old_finalized_epoch: Epoch,
|
||||
finalized_block_root: Hash256,
|
||||
old_finalized_root: SignedBeaconBlockHash,
|
||||
) -> Result<(), Error> {
|
||||
let finalized_block = self
|
||||
.store
|
||||
@ -1981,10 +1994,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
// TODO: configurable max finality distance
|
||||
let max_finality_distance = 0;
|
||||
self.store_migrator.freeze_to_state(
|
||||
self.store_migrator.process_finalization(
|
||||
finalized_block.state_root,
|
||||
finalized_state,
|
||||
max_finality_distance,
|
||||
Arc::clone(&self.head_tracker),
|
||||
old_finalized_root,
|
||||
finalized_block_root.into(),
|
||||
);
|
||||
|
||||
let _ = self.event_handler.register(EventKind::BeaconFinalization {
|
||||
@ -2052,6 +2068,100 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
Ok(dump)
|
||||
}
|
||||
|
||||
pub fn dump_as_dot<W: Write>(&self, output: &mut W) {
|
||||
let canonical_head_hash = self
|
||||
.canonical_head
|
||||
.try_read_for(HEAD_LOCK_TIMEOUT)
|
||||
.ok_or_else(|| Error::CanonicalHeadLockTimeout)
|
||||
.unwrap()
|
||||
.beacon_block_root;
|
||||
let mut visited: HashSet<Hash256> = HashSet::new();
|
||||
let mut finalized_blocks: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
let genesis_block_hash = Hash256::zero();
|
||||
write!(output, "digraph beacon {{\n").unwrap();
|
||||
write!(output, "\t_{:?}[label=\"genesis\"];\n", genesis_block_hash).unwrap();
|
||||
|
||||
// Canonical head needs to be processed first as otherwise finalized blocks aren't detected
|
||||
// properly.
|
||||
let heads = {
|
||||
let mut heads = self.heads();
|
||||
let canonical_head_index = heads
|
||||
.iter()
|
||||
.position(|(block_hash, _)| *block_hash == canonical_head_hash)
|
||||
.unwrap();
|
||||
let (canonical_head_hash, canonical_head_slot) =
|
||||
heads.swap_remove(canonical_head_index);
|
||||
heads.insert(0, (canonical_head_hash, canonical_head_slot));
|
||||
heads
|
||||
};
|
||||
|
||||
for (head_hash, _head_slot) in heads {
|
||||
for (block_hash, signed_beacon_block) in
|
||||
ParentRootBlockIterator::new(&*self.store, head_hash)
|
||||
{
|
||||
if visited.contains(&block_hash) {
|
||||
break;
|
||||
}
|
||||
visited.insert(block_hash);
|
||||
|
||||
if signed_beacon_block.slot() % T::EthSpec::slots_per_epoch() == 0 {
|
||||
let block = self.get_block(&block_hash).unwrap().unwrap();
|
||||
let state = self
|
||||
.get_state(&block.state_root(), Some(block.slot()))
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
finalized_blocks.insert(state.finalized_checkpoint.root);
|
||||
}
|
||||
|
||||
if block_hash == canonical_head_hash {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box3d];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else if finalized_blocks.contains(&block_hash) {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=Msquare];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
} else {
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?}[label=\"{} ({})\" shape=box];\n",
|
||||
block_hash,
|
||||
block_hash,
|
||||
signed_beacon_block.slot()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
write!(
|
||||
output,
|
||||
"\t_{:?} -> _{:?};\n",
|
||||
block_hash,
|
||||
signed_beacon_block.parent_root()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
write!(output, "}}\n").unwrap();
|
||||
}
|
||||
|
||||
// Used for debugging
|
||||
#[allow(dead_code)]
|
||||
pub fn dump_dot_file(&self, file_name: &str) {
|
||||
let mut file = std::fs::File::create(file_name).unwrap();
|
||||
self.dump_as_dot(&mut file);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> Drop for BeaconChain<T> {
|
||||
|
@ -5,6 +5,7 @@ use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||
use crate::events::NullEventHandler;
|
||||
use crate::fork_choice::SszForkChoice;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::migrate::Migrate;
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::shuffling_cache::ShufflingCache;
|
||||
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
|
||||
@ -47,7 +48,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
for Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -96,7 +97,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -431,7 +432,7 @@ where
|
||||
event_handler: self
|
||||
.event_handler
|
||||
.ok_or_else(|| "Cannot build without an event handler".to_string())?,
|
||||
head_tracker: self.head_tracker.unwrap_or_default(),
|
||||
head_tracker: Arc::new(self.head_tracker.unwrap_or_default()),
|
||||
block_processing_cache: TimeoutRwLock::new(SnapshotCache::new(
|
||||
DEFAULT_SNAPSHOT_CACHE_SIZE,
|
||||
canonical_head,
|
||||
@ -463,7 +464,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -533,7 +534,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -571,7 +572,7 @@ impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -610,7 +611,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -642,12 +643,12 @@ fn genesis_block<T: EthSpec>(
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::migrate::{MemoryStore, NullMigrator};
|
||||
use eth2_hashing::hash;
|
||||
use genesis::{generate_deterministic_keypairs, interop_genesis_state};
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use ssz::Encode;
|
||||
use std::time::Duration;
|
||||
use store::{migrate::NullMigrator, MemoryStore};
|
||||
use tempfile::tempdir;
|
||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||
|
||||
|
@ -25,11 +25,22 @@ impl HeadTracker {
|
||||
/// the upstream user.
|
||||
pub fn register_block<E: EthSpec>(&self, block_root: Hash256, block: &BeaconBlock<E>) {
|
||||
let mut map = self.0.write();
|
||||
|
||||
map.remove(&block.parent_root);
|
||||
map.insert(block_root, block.slot);
|
||||
}
|
||||
|
||||
/// Removes abandoned head.
|
||||
pub fn remove_head(&self, block_root: Hash256) {
|
||||
let mut map = self.0.write();
|
||||
debug_assert!(map.contains_key(&block_root));
|
||||
map.remove(&block_root);
|
||||
}
|
||||
|
||||
/// Returns true iff `block_root` is a recognized head.
|
||||
pub fn contains_head(&self, block_root: Hash256) -> bool {
|
||||
self.0.read().contains_key(&block_root)
|
||||
}
|
||||
|
||||
/// Returns the list of heads in the chain.
|
||||
pub fn heads(&self) -> Vec<(Hash256, Slot)> {
|
||||
self.0
|
||||
|
@ -11,6 +11,7 @@ pub mod events;
|
||||
mod fork_choice;
|
||||
mod head_tracker;
|
||||
mod metrics;
|
||||
pub mod migrate;
|
||||
mod persisted_beacon_chain;
|
||||
mod shuffling_cache;
|
||||
mod snapshot_cache;
|
||||
|
349
beacon_node/beacon_chain/src/migrate.rs
Normal file
349
beacon_node/beacon_chain/src/migrate.rs
Normal file
@ -0,0 +1,349 @@
|
||||
use crate::errors::BeaconChainError;
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, warn, Logger};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::iter::FromIterator;
|
||||
use std::mem;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use store::iter::{ParentRootBlockIterator, RootsIterator};
|
||||
use store::{hot_cold_store::HotColdDBError, Error, SimpleDiskStore, Store};
|
||||
pub use store::{DiskStore, MemoryStore};
|
||||
use types::*;
|
||||
use types::{BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Trait for migration processes that update the database upon finalization.
|
||||
pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
||||
fn new(db: Arc<S>, log: Logger) -> Self;
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
_state_root: Hash256,
|
||||
_new_finalized_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
_head_tracker: Arc<HeadTracker>,
|
||||
_old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
_new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
}
|
||||
|
||||
/// Traverses live heads and prunes blocks and states of chains that we know can't be built
|
||||
/// upon because finalization would prohibit it. This is a optimisation intended to save disk
|
||||
/// space.
|
||||
///
|
||||
/// Assumptions:
|
||||
/// * It is called after every finalization.
|
||||
fn prune_abandoned_forks(
|
||||
store: Arc<S>,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_slot: Slot,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
let old_finalized_slot = store
|
||||
.get_block(&old_finalized_block_hash.into())?
|
||||
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(old_finalized_block_hash.into()))?
|
||||
.slot();
|
||||
|
||||
// Collect hashes from new_finalized_block back to old_finalized_block (inclusive)
|
||||
let mut found_block = false; // hack for `take_until`
|
||||
let newly_finalized_blocks: HashMap<SignedBeaconBlockHash, Slot> = HashMap::from_iter(
|
||||
ParentRootBlockIterator::new(&*store, new_finalized_block_hash.into())
|
||||
.take_while(|(block_hash, _)| {
|
||||
if found_block {
|
||||
false
|
||||
} else {
|
||||
found_block |= *block_hash == old_finalized_block_hash.into();
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|(block_hash, block)| (block_hash.into(), block.slot())),
|
||||
);
|
||||
|
||||
// We don't know which blocks are shared among abandoned chains, so we buffer and delete
|
||||
// everything in one fell swoop.
|
||||
let mut abandoned_blocks: HashSet<SignedBeaconBlockHash> = HashSet::new();
|
||||
let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new();
|
||||
let mut abandoned_heads: HashSet<Hash256> = HashSet::new();
|
||||
|
||||
for (head_hash, head_slot) in head_tracker.heads() {
|
||||
let mut potentially_abandoned_head: Option<Hash256> = Some(head_hash);
|
||||
let mut potentially_abandoned_blocks: Vec<(
|
||||
Slot,
|
||||
Option<SignedBeaconBlockHash>,
|
||||
Option<BeaconStateHash>,
|
||||
)> = Vec::new();
|
||||
|
||||
let head_state_hash = store
|
||||
.get_block(&head_hash)?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(head_hash.into()))?
|
||||
.state_root();
|
||||
|
||||
let iterator = std::iter::once((head_hash, head_state_hash, head_slot))
|
||||
.chain(RootsIterator::from_block(Arc::clone(&store), head_hash)?);
|
||||
for (block_hash, state_hash, slot) in iterator {
|
||||
if slot < old_finalized_slot {
|
||||
// We must assume here any candidate chains include old_finalized_block_hash,
|
||||
// i.e. there aren't any forks starting at a block that is a strict ancestor of
|
||||
// old_finalized_block_hash.
|
||||
break;
|
||||
}
|
||||
match newly_finalized_blocks.get(&block_hash.into()).copied() {
|
||||
// Block is not finalized, mark it and its state for deletion
|
||||
None => {
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
Some(block_hash.into()),
|
||||
Some(state_hash.into()),
|
||||
));
|
||||
}
|
||||
Some(finalized_slot) => {
|
||||
// Block root is finalized, and we have reached the slot it was finalized
|
||||
// at: we've hit a shared part of the chain.
|
||||
if finalized_slot == slot {
|
||||
// The first finalized block of a candidate chain lies after (in terms
|
||||
// of slots order) the newly finalized block. It's not a candidate for
|
||||
// prunning.
|
||||
if finalized_slot == new_finalized_slot {
|
||||
potentially_abandoned_blocks.clear();
|
||||
potentially_abandoned_head.take();
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
// Block root is finalized, but we're at a skip slot: delete the state only.
|
||||
else {
|
||||
potentially_abandoned_blocks.push((
|
||||
slot,
|
||||
None,
|
||||
Some(state_hash.into()),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
abandoned_heads.extend(potentially_abandoned_head.into_iter());
|
||||
if !potentially_abandoned_blocks.is_empty() {
|
||||
abandoned_blocks.extend(
|
||||
potentially_abandoned_blocks
|
||||
.iter()
|
||||
.filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash),
|
||||
);
|
||||
abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map(
|
||||
|(slot, _, maybe_state_hash)| match maybe_state_hash {
|
||||
None => None,
|
||||
Some(state_hash) => Some((*slot, *state_hash)),
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// XXX Should be performed atomically, see
|
||||
// https://github.com/sigp/lighthouse/issues/692
|
||||
for block_hash in abandoned_blocks.into_iter() {
|
||||
store.delete_block(&block_hash.into())?;
|
||||
}
|
||||
for (slot, state_hash) in abandoned_states.into_iter() {
|
||||
store.delete_state(&state_hash.into(), slot)?;
|
||||
}
|
||||
for head_hash in abandoned_heads.into_iter() {
|
||||
head_tracker.remove_head(head_hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that does nothing, for stores that don't need migration.
|
||||
pub struct NullMigrator;
|
||||
|
||||
impl<E: EthSpec> Migrate<SimpleDiskStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<SimpleDiskStore<E>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<MemoryStore<E>>, _: Logger) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||
///
|
||||
/// Mostly useful for tests.
|
||||
pub struct BlockingMigrator<S> {
|
||||
db: Arc<S>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
||||
fn new(db: Arc<S>, _: Logger) -> Self {
|
||||
BlockingMigrator { db }
|
||||
}
|
||||
|
||||
fn process_finalization(
|
||||
&self,
|
||||
state_root: Hash256,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
if let Err(e) = S::process_finalization(self.db.clone(), state_root, &new_finalized_state) {
|
||||
// This migrator is only used for testing, so we just log to stderr without a logger.
|
||||
eprintln!("Migration error: {:?}", e);
|
||||
}
|
||||
|
||||
if let Err(e) = Self::prune_abandoned_forks(
|
||||
self.db.clone(),
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_state.slot,
|
||||
) {
|
||||
eprintln!("Pruning error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MpscSender<E> = mpsc::Sender<(
|
||||
Hash256,
|
||||
BeaconState<E>,
|
||||
Arc<HeadTracker>,
|
||||
SignedBeaconBlockHash,
|
||||
SignedBeaconBlockHash,
|
||||
Slot,
|
||||
)>;
|
||||
|
||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec> {
|
||||
db: Arc<DiskStore<E>>,
|
||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||
log: Logger,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
||||
fn new(db: Arc<DiskStore<E>>, log: Logger) -> Self {
|
||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
|
||||
Self { db, tx_thread, log }
|
||||
}
|
||||
|
||||
/// Perform the freezing operation on the database,
|
||||
fn process_finalization(
|
||||
&self,
|
||||
finalized_state_root: Hash256,
|
||||
new_finalized_state: BeaconState<E>,
|
||||
max_finality_distance: u64,
|
||||
head_tracker: Arc<HeadTracker>,
|
||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||
) {
|
||||
if !self.needs_migration(new_finalized_state.slot, max_finality_distance) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
|
||||
|
||||
let new_finalized_slot = new_finalized_state.slot;
|
||||
if let Err(tx_err) = tx.send((
|
||||
finalized_state_root,
|
||||
new_finalized_state,
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
)) {
|
||||
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone(), self.log.clone());
|
||||
|
||||
drop(mem::replace(tx, new_tx));
|
||||
let old_thread = mem::replace(thread, new_thread);
|
||||
|
||||
// Join the old thread, which will probably have panicked, or may have
|
||||
// halted normally just now as a result of us dropping the old `mpsc::Sender`.
|
||||
if let Err(thread_err) = old_thread.join() {
|
||||
warn!(
|
||||
self.log,
|
||||
"Migration thread died, so it was restarted";
|
||||
"reason" => format!("{:?}", thread_err)
|
||||
);
|
||||
}
|
||||
|
||||
// Retry at most once, we could recurse but that would risk overflowing the stack.
|
||||
let _ = tx.send(tx_err.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BackgroundMigrator<E> {
|
||||
/// Return true if a migration needs to be performed, given a new `finalized_slot`.
|
||||
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
|
||||
let finality_distance = finalized_slot - self.db.get_split_slot();
|
||||
finality_distance > max_finality_distance
|
||||
}
|
||||
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<DiskStore<E>>,
|
||||
log: Logger,
|
||||
) -> (
|
||||
mpsc::Sender<(
|
||||
Hash256,
|
||||
BeaconState<E>,
|
||||
Arc<HeadTracker>,
|
||||
SignedBeaconBlockHash,
|
||||
SignedBeaconBlockHash,
|
||||
Slot,
|
||||
)>,
|
||||
thread::JoinHandle<()>,
|
||||
) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok((
|
||||
state_root,
|
||||
state,
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
)) = rx.recv()
|
||||
{
|
||||
match DiskStore::process_finalization(db.clone(), state_root, &state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
log,
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
match Self::prune_abandoned_forks(
|
||||
db.clone(),
|
||||
head_tracker,
|
||||
old_finalized_block_hash,
|
||||
new_finalized_block_hash,
|
||||
new_finalized_slot,
|
||||
) {
|
||||
Ok(()) => {}
|
||||
Err(e) => warn!(log, "Block pruning failed: {:?}", e),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(tx, thread)
|
||||
}
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
pub use crate::beacon_chain::{
|
||||
BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY,
|
||||
};
|
||||
use crate::migrate::{BlockingMigrator, Migrate, NullMigrator};
|
||||
pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
@ -15,16 +16,16 @@ use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use slot_clock::TestingSlotClock;
|
||||
use state_processing::per_slot_processing;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{
|
||||
migrate::{BlockingMigrator, NullMigrator},
|
||||
DiskStore, MemoryStore, Migrate, Store,
|
||||
};
|
||||
use store::{DiskStore, MemoryStore, Store};
|
||||
use tempfile::{tempdir, TempDir};
|
||||
use tree_hash::TreeHash;
|
||||
use types::{
|
||||
AggregateSignature, Attestation, BeaconState, ChainSpec, Domain, EthSpec, Hash256, Keypair,
|
||||
SecretKey, Signature, SignedBeaconBlock, SignedRoot, Slot,
|
||||
AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, EthSpec,
|
||||
Hash256, Keypair, SecretKey, Signature, SignedBeaconBlock, SignedBeaconBlockHash, SignedRoot,
|
||||
Slot,
|
||||
};
|
||||
|
||||
pub use types::test_utils::generate_deterministic_keypairs;
|
||||
@ -136,7 +137,10 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.logger(log.clone())
|
||||
.custom_spec(spec.clone())
|
||||
.store(store.clone())
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(store))
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
||||
store,
|
||||
log.clone(),
|
||||
))
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.genesis_state(
|
||||
interop_genesis_state::<E>(&keypairs, HARNESS_GENESIS_TIME, &spec)
|
||||
@ -176,7 +180,10 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
||||
.logger(log.clone())
|
||||
.custom_spec(spec)
|
||||
.store(store.clone())
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(store))
|
||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
||||
store,
|
||||
log.clone(),
|
||||
))
|
||||
.data_dir(data_dir.path().to_path_buf())
|
||||
.resume_from_db()
|
||||
.expect("should resume beacon chain from db")
|
||||
@ -278,6 +285,127 @@ where
|
||||
head_block_root.expect("did not produce any blocks")
|
||||
}
|
||||
|
||||
/// Returns current canonical head slot
|
||||
pub fn get_chain_slot(&self) -> Slot {
|
||||
self.chain.slot().unwrap()
|
||||
}
|
||||
|
||||
/// Returns current canonical head state
|
||||
pub fn get_head_state(&self) -> BeaconState<E> {
|
||||
self.chain.head().unwrap().beacon_state
|
||||
}
|
||||
|
||||
/// Adds a single block (synchronously) onto either the canonical chain (block_strategy ==
|
||||
/// OnCanonicalHead) or a fork (block_strategy == ForkCanonicalChainAt).
|
||||
pub fn add_block(
|
||||
&self,
|
||||
state: &BeaconState<E>,
|
||||
block_strategy: BlockStrategy,
|
||||
slot: Slot,
|
||||
validators: &[usize],
|
||||
) -> (SignedBeaconBlockHash, BeaconState<E>) {
|
||||
while self.chain.slot().expect("should have a slot") < slot {
|
||||
self.advance_slot();
|
||||
}
|
||||
|
||||
let (block, new_state) = self.build_block(state.clone(), slot, block_strategy);
|
||||
|
||||
let outcome = self
|
||||
.chain
|
||||
.process_block(block)
|
||||
.expect("should not error during block processing");
|
||||
|
||||
self.chain.fork_choice().expect("should find head");
|
||||
|
||||
if let BlockProcessingOutcome::Processed { block_root } = outcome {
|
||||
let attestation_strategy = AttestationStrategy::SomeValidators(validators.to_vec());
|
||||
self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot);
|
||||
(block_root.into(), new_state)
|
||||
} else {
|
||||
panic!("block should be successfully processed: {:?}", outcome);
|
||||
}
|
||||
}
|
||||
|
||||
/// `add_block()` repeated `num_blocks` times.
|
||||
pub fn add_blocks(
|
||||
&self,
|
||||
mut state: BeaconState<E>,
|
||||
mut slot: Slot,
|
||||
num_blocks: usize,
|
||||
attesting_validators: &[usize],
|
||||
block_strategy: BlockStrategy,
|
||||
) -> (
|
||||
HashMap<Slot, SignedBeaconBlockHash>,
|
||||
HashMap<Slot, BeaconStateHash>,
|
||||
Slot,
|
||||
SignedBeaconBlockHash,
|
||||
BeaconState<E>,
|
||||
) {
|
||||
let mut blocks: HashMap<Slot, SignedBeaconBlockHash> = HashMap::with_capacity(num_blocks);
|
||||
let mut states: HashMap<Slot, BeaconStateHash> = HashMap::with_capacity(num_blocks);
|
||||
for _ in 0..num_blocks {
|
||||
let (new_root_hash, new_state) =
|
||||
self.add_block(&state, block_strategy, slot, attesting_validators);
|
||||
blocks.insert(slot, new_root_hash);
|
||||
states.insert(slot, new_state.tree_hash_root().into());
|
||||
state = new_state;
|
||||
slot += 1;
|
||||
}
|
||||
let head_hash = blocks[&(slot - 1)];
|
||||
(blocks, states, slot, head_hash, state)
|
||||
}
|
||||
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_canonical_chain_blocks(
|
||||
&self,
|
||||
state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
num_blocks: usize,
|
||||
attesting_validators: &[usize],
|
||||
) -> (
|
||||
HashMap<Slot, SignedBeaconBlockHash>,
|
||||
HashMap<Slot, BeaconStateHash>,
|
||||
Slot,
|
||||
SignedBeaconBlockHash,
|
||||
BeaconState<E>,
|
||||
) {
|
||||
let block_strategy = BlockStrategy::OnCanonicalHead;
|
||||
self.add_blocks(
|
||||
state,
|
||||
slot,
|
||||
num_blocks,
|
||||
attesting_validators,
|
||||
block_strategy,
|
||||
)
|
||||
}
|
||||
|
||||
/// A wrapper on `add_blocks()` to avoid passing enums explicitly.
|
||||
pub fn add_stray_blocks(
|
||||
&self,
|
||||
state: BeaconState<E>,
|
||||
slot: Slot,
|
||||
num_blocks: usize,
|
||||
attesting_validators: &[usize],
|
||||
) -> (
|
||||
HashMap<Slot, SignedBeaconBlockHash>,
|
||||
HashMap<Slot, BeaconStateHash>,
|
||||
Slot,
|
||||
SignedBeaconBlockHash,
|
||||
BeaconState<E>,
|
||||
) {
|
||||
let block_strategy = BlockStrategy::ForkCanonicalChainAt {
|
||||
previous_slot: slot,
|
||||
first_slot: slot + 2,
|
||||
};
|
||||
self.add_blocks(
|
||||
state,
|
||||
slot + 2,
|
||||
num_blocks,
|
||||
attesting_validators,
|
||||
block_strategy,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a newly created block, signed by the proposer for the given slot.
|
||||
fn build_block(
|
||||
&self,
|
||||
|
@ -6,9 +6,12 @@ extern crate lazy_static;
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||
};
|
||||
use beacon_chain::AttestationProcessingOutcome;
|
||||
use beacon_chain::BeaconSnapshot;
|
||||
use beacon_chain::{AttestationProcessingOutcome, StateSkipConfig};
|
||||
use rand::Rng;
|
||||
use sloggers::{null::NullLoggerBuilder, Build};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use store::{
|
||||
iter::{BlockRootsIterator, StateRootsIterator},
|
||||
@ -700,6 +703,551 @@ fn check_shuffling_compatible(
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
||||
#[test]
|
||||
fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_pre_finalization, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks(
|
||||
harness.get_head_state(),
|
||||
slot,
|
||||
slots_per_epoch - 1,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Precondition: Ensure all stray_blocks blocks are still known
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness
|
||||
.chain
|
||||
.get_state(&state_hash.into(), Some(slot))
|
||||
.unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
// Precondition: Only genesis is finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
assert!(harness.chain.knows_head(&stray_head));
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks_post_finalization, _, _, _, _) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 5, &honest_validators);
|
||||
|
||||
// Postcondition: New blocks got finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks_pre_finalization[&Slot::new(slots_per_epoch as u64)],
|
||||
canonical_blocks_post_finalization[&Slot::new((slots_per_epoch * 2) as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_none(),
|
||||
"abandoned block {} should have been pruned",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(!harness.chain.knows_head(&stray_head));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let all_validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Fill up 0th epoch
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 1st epoch
|
||||
let (_, _, canonical_slot, shared_head, canonical_state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, 1, &all_validators);
|
||||
let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks(
|
||||
canonical_state.clone(),
|
||||
canonical_slot,
|
||||
1,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Preconditions
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks, _, _, _, _) = harness.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch * 5,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
// Postconditions
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)],
|
||||
canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
for &block_hash in stray_blocks.values() {
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.get_block(&block_hash.into())
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"stray block {} should have been pruned",
|
||||
block_hash,
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(!harness.chain.knows_head(&stray_head));
|
||||
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Fill up 0th epoch with canonical chain blocks
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 1st epoch. Contains a fork.
|
||||
let (stray_blocks, stray_states, _, stray_head, _) =
|
||||
harness.add_stray_blocks(state.clone(), slot, slots_per_epoch - 1, &faulty_validators);
|
||||
|
||||
// Preconditions
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
// Trigger finalization
|
||||
let (_, _, _, _, _) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 4, &honest_validators);
|
||||
|
||||
// Postconditions
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(harness.chain.knows_head(&stray_head));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prunes_fork_running_past_finalized_checkpoint() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Fill up 0th epoch with canonical chain blocks
|
||||
let slot = harness.get_chain_slot();
|
||||
let state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 1st epoch. Contains a fork.
|
||||
let (stray_blocks_first_epoch, stray_states_first_epoch, stray_slot, _, stray_state) =
|
||||
harness.add_stray_blocks(state.clone(), slot, slots_per_epoch, &faulty_validators);
|
||||
|
||||
let (canonical_blocks_first_epoch, _, canonical_slot, _, canonical_state) =
|
||||
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
||||
|
||||
// Fill up 2nd epoch. Extends both the canonical chain and the fork.
|
||||
let (stray_blocks_second_epoch, stray_states_second_epoch, _, stray_head, _) = harness
|
||||
.add_stray_blocks(
|
||||
stray_state,
|
||||
stray_slot,
|
||||
slots_per_epoch - 1,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Precondition: Ensure all stray_blocks blocks are still known
|
||||
let stray_blocks: HashMap<Slot, SignedBeaconBlockHash> = stray_blocks_first_epoch
|
||||
.into_iter()
|
||||
.chain(stray_blocks_second_epoch.into_iter())
|
||||
.collect();
|
||||
|
||||
let stray_states: HashMap<Slot, BeaconStateHash> = stray_states_first_epoch
|
||||
.into_iter()
|
||||
.chain(stray_states_second_epoch.into_iter())
|
||||
.collect();
|
||||
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
// Precondition: Only genesis is finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
assert!(harness.chain.knows_head(&stray_head));
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks_second_epoch, _, _, _, _) = harness.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch * 4,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
// Postconditions
|
||||
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
||||
.into_iter()
|
||||
.chain(canonical_blocks_first_epoch.into_iter())
|
||||
.chain(canonical_blocks_second_epoch.into_iter())
|
||||
.collect();
|
||||
|
||||
// Postcondition: New blocks got finalized
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks[&Slot::new(slots_per_epoch as u64)],
|
||||
canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_none(),
|
||||
"abandoned block {} should have been pruned",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
assert!(!harness.chain.knows_head(&stray_head));
|
||||
}
|
||||
|
||||
// This is to check if state outside of normal block processing are pruned correctly.
|
||||
#[test]
|
||||
fn prunes_skipped_slots_states() {
|
||||
const VALIDATOR_COUNT: usize = 24;
|
||||
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
||||
let db_path = tempdir().unwrap();
|
||||
let store = get_store(&db_path);
|
||||
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
||||
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
||||
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
||||
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
||||
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
||||
|
||||
// Arrange skipped slots so as to cross the epoch boundary. That way, we excercise the code
|
||||
// responsible for storing state outside of normal block processing.
|
||||
|
||||
let canonical_slot = harness.get_chain_slot();
|
||||
let canonical_state = harness.get_head_state();
|
||||
let (canonical_blocks_zeroth_epoch, _, canonical_slot, _, canonical_state) = harness
|
||||
.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch - 1,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
let (stray_blocks, stray_states, stray_slot, _, _) = harness.add_stray_blocks(
|
||||
canonical_state.clone(),
|
||||
canonical_slot,
|
||||
slots_per_epoch,
|
||||
&faulty_validators,
|
||||
);
|
||||
|
||||
// Preconditions
|
||||
for &block_hash in stray_blocks.values() {
|
||||
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
||||
assert!(
|
||||
block.is_some(),
|
||||
"stray block {} should be still present",
|
||||
block_hash
|
||||
);
|
||||
}
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_some(),
|
||||
"stray state {} at slot {} should be still present",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
assert_eq!(
|
||||
get_finalized_epoch_boundary_blocks(&chain_dump),
|
||||
vec![Hash256::zero().into()].into_iter().collect(),
|
||||
);
|
||||
|
||||
// Make sure slots were skipped
|
||||
let stray_state = harness
|
||||
.chain
|
||||
.state_at_slot(stray_slot, StateSkipConfig::WithoutStateRoots)
|
||||
.unwrap();
|
||||
let block_root = stray_state.get_block_root(canonical_slot - 1);
|
||||
assert_eq!(stray_state.get_block_root(canonical_slot), block_root);
|
||||
assert_eq!(stray_state.get_block_root(canonical_slot + 1), block_root);
|
||||
|
||||
let skipped_slots = vec![canonical_slot, canonical_slot + 1];
|
||||
for &slot in &skipped_slots {
|
||||
assert_eq!(stray_state.get_block_root(slot), block_root);
|
||||
let state_hash = stray_state.get_state_root(slot).unwrap();
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.get_state(&state_hash, Some(slot))
|
||||
.unwrap()
|
||||
.is_some(),
|
||||
"skipped slots state should be still present"
|
||||
);
|
||||
}
|
||||
|
||||
// Trigger finalization
|
||||
let (canonical_blocks_post_finalization, _, _, _, _) = harness.add_canonical_chain_blocks(
|
||||
canonical_state,
|
||||
canonical_slot,
|
||||
slots_per_epoch * 5,
|
||||
&honest_validators,
|
||||
);
|
||||
|
||||
// Postconditions
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
||||
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
||||
.into_iter()
|
||||
.chain(canonical_blocks_post_finalization.into_iter())
|
||||
.collect();
|
||||
assert_eq!(
|
||||
finalized_blocks,
|
||||
vec![
|
||||
Hash256::zero().into(),
|
||||
canonical_blocks[&Slot::new(slots_per_epoch as u64)],
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
);
|
||||
|
||||
for (&slot, &state_hash) in &stray_states {
|
||||
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
||||
assert!(
|
||||
state.is_none(),
|
||||
"stray state {} at slot {} should have been deleted",
|
||||
state_hash,
|
||||
slot
|
||||
);
|
||||
}
|
||||
|
||||
for &slot in &skipped_slots {
|
||||
assert_eq!(stray_state.get_block_root(slot), block_root);
|
||||
let state_hash = stray_state.get_state_root(slot).unwrap();
|
||||
assert!(
|
||||
harness
|
||||
.chain
|
||||
.get_state(&state_hash, Some(slot))
|
||||
.unwrap()
|
||||
.is_none(),
|
||||
"skipped slot states should have been pruned"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that the head state's slot matches `expected_slot`.
|
||||
fn check_slot(harness: &TestHarness, expected_slot: u64) {
|
||||
let state = &harness.chain.head().expect("should get head").beacon_state;
|
||||
@ -823,3 +1371,19 @@ fn check_iterators(harness: &TestHarness) {
|
||||
Some(Slot::new(0))
|
||||
);
|
||||
}
|
||||
|
||||
fn get_finalized_epoch_boundary_blocks(
|
||||
dump: &[BeaconSnapshot<MinimalEthSpec>],
|
||||
) -> HashSet<SignedBeaconBlockHash> {
|
||||
dump.iter()
|
||||
.cloned()
|
||||
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn get_blocks(dump: &[BeaconSnapshot<MinimalEthSpec>]) -> HashSet<SignedBeaconBlockHash> {
|
||||
dump.iter()
|
||||
.cloned()
|
||||
.map(|checkpoint| checkpoint.beacon_block_root.into())
|
||||
.collect()
|
||||
}
|
||||
|
@ -4,11 +4,9 @@ use crate::Client;
|
||||
use beacon_chain::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
eth1_chain::{CachingEth1Backend, Eth1Chain},
|
||||
migrate::{BackgroundMigrator, Migrate, NullMigrator},
|
||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||
store::{
|
||||
migrate::{BackgroundMigrator, Migrate, NullMigrator},
|
||||
DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig,
|
||||
},
|
||||
store::{DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig},
|
||||
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
|
||||
};
|
||||
use environment::RuntimeContext;
|
||||
@ -68,7 +66,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -365,7 +363,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + Clone + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -411,7 +409,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
@ -459,7 +457,7 @@ impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: store::Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -501,7 +499,7 @@ impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TStoreMigrator: store::Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TStoreMigrator: Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, SimpleDiskStore<TEthSpec>> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -561,10 +559,15 @@ where
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
{
|
||||
pub fn background_migrator(mut self) -> Result<Self, String> {
|
||||
let context = self
|
||||
.runtime_context
|
||||
.as_ref()
|
||||
.ok_or_else(|| "disk_store requires a log".to_string())?
|
||||
.service_context("freezer_db".into());
|
||||
let store = self.store.clone().ok_or_else(|| {
|
||||
"background_migrator requires the store to be initialized".to_string()
|
||||
})?;
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store));
|
||||
self.store_migrator = Some(BackgroundMigrator::new(store, context.log.clone()));
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
@ -582,7 +585,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TSlotClock: SlotClock + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
@ -688,7 +691,7 @@ impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
||||
>
|
||||
where
|
||||
TStore: Store<TEthSpec> + 'static,
|
||||
TStoreMigrator: store::Migrate<TStore, TEthSpec>,
|
||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||
TEthSpec: EthSpec + 'static,
|
||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||
|
@ -408,7 +408,7 @@ impl Service {
|
||||
.map(|vec| {
|
||||
let first = vec.first().cloned().unwrap_or_else(|| 0);
|
||||
let last = vec.last().map(|n| n + 1).unwrap_or_else(|| 0);
|
||||
(first..last)
|
||||
first..last
|
||||
})
|
||||
.collect::<Vec<Range<u64>>>()
|
||||
})
|
||||
|
@ -10,6 +10,7 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis};
|
||||
pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir};
|
||||
pub use eth2_config::Eth2Config;
|
||||
|
||||
use beacon_chain::migrate::{BackgroundMigrator, DiskStore};
|
||||
use beacon_chain::{
|
||||
builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender,
|
||||
slot_clock::SystemTimeSlotClock,
|
||||
@ -20,7 +21,6 @@ use environment::RuntimeContext;
|
||||
use futures::{Future, IntoFuture};
|
||||
use slog::{info, warn};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use store::{migrate::BackgroundMigrator, DiskStore};
|
||||
use types::EthSpec;
|
||||
|
||||
/// A type-alias to the tighten the definition of a production-intended `Client`.
|
||||
|
@ -204,7 +204,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
||||
}
|
||||
|
||||
/// Advance the split point of the store, moving new finalized states to the freezer.
|
||||
fn freeze_to_state(
|
||||
fn process_finalization(
|
||||
store: Arc<Self>,
|
||||
frozen_head_root: Hash256,
|
||||
frozen_head: &BeaconState<E>,
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::Store;
|
||||
use crate::{Error, Store};
|
||||
use std::borrow::Cow;
|
||||
use std::marker::PhantomData;
|
||||
use std::sync::Arc;
|
||||
@ -43,12 +43,95 @@ impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, StateRootsIterator<'a, E, U
|
||||
}
|
||||
|
||||
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
||||
inner: RootsIterator<'a, T, U>,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> {
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::new(store, beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::owned(store, beacon_state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner
|
||||
.next()
|
||||
.map(|(_, state_root, slot)| (state_root, slot))
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the
|
||||
/// iterator returns `None` indefinitely.
|
||||
///
|
||||
/// Uses the `block_roots` field of `BeaconState` as the source of block roots and will
|
||||
/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been
|
||||
/// exhausted.
|
||||
///
|
||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||
pub struct BlockRootsIterator<'a, T: EthSpec, U> {
|
||||
inner: RootsIterator<'a, T, U>,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> BlockRootsIterator<'a, T, U> {
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::new(store, beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self {
|
||||
Self {
|
||||
inner: RootsIterator::owned(store, beacon_state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner
|
||||
.next()
|
||||
.map(|(block_root, _, slot)| (block_root, slot))
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`.
|
||||
pub struct RootsIterator<'a, T: EthSpec, U> {
|
||||
store: Arc<U>,
|
||||
beacon_state: Cow<'a, BeaconState<T>>,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
|
||||
impl<'a, T: EthSpec, U> Clone for RootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: self.store.clone(),
|
||||
@ -58,7 +141,7 @@ impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> {
|
||||
impl<'a, T: EthSpec, U: Store<T>> RootsIterator<'a, T, U> {
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
store,
|
||||
@ -74,10 +157,21 @@ impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> {
|
||||
beacon_state: Cow::Owned(beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_block(store: Arc<U>, block_hash: Hash256) -> Result<Self, Error> {
|
||||
let block = store
|
||||
.get_block(&block_hash)?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?;
|
||||
let state = store
|
||||
.get_state(&block.state_root(), Some(block.slot()))?
|
||||
.ok_or_else(|| BeaconStateError::MissingBeaconState(block.state_root().into()))?;
|
||||
Ok(Self::owned(store, state))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for RootsIterator<'a, T, U> {
|
||||
/// (block_root, state_root, slot)
|
||||
type Item = (Hash256, Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.slot == 0 || self.slot > self.beacon_state.slot {
|
||||
@ -86,18 +180,22 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
|
||||
|
||||
self.slot -= 1;
|
||||
|
||||
match self.beacon_state.get_state_root(self.slot) {
|
||||
Ok(root) => Some((*root, self.slot)),
|
||||
Err(BeaconStateError::SlotOutOfBounds) => {
|
||||
match (
|
||||
self.beacon_state.get_block_root(self.slot),
|
||||
self.beacon_state.get_state_root(self.slot),
|
||||
) {
|
||||
(Ok(block_root), Ok(state_root)) => Some((*block_root, *state_root, self.slot)),
|
||||
(Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => {
|
||||
// Read a `BeaconState` from the store that has access to prior historical roots.
|
||||
let beacon_state =
|
||||
next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?;
|
||||
|
||||
self.beacon_state = Cow::Owned(beacon_state);
|
||||
|
||||
let root = self.beacon_state.get_state_root(self.slot).ok()?;
|
||||
let block_root = *self.beacon_state.get_block_root(self.slot).ok()?;
|
||||
let state_root = *self.beacon_state.get_state_root(self.slot).ok()?;
|
||||
|
||||
Some((*root, self.slot))
|
||||
Some((block_root, state_root, self.slot))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
@ -165,79 +263,7 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockIterator<'a, T, U> {
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let (root, _slot) = self.roots.next()?;
|
||||
self.roots.store.get_block(&root).ok()?
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the
|
||||
/// iterator returns `None` indefinitely.
|
||||
///
|
||||
/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will
|
||||
/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been
|
||||
/// exhausted.
|
||||
///
|
||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||
pub struct BlockRootsIterator<'a, T: EthSpec, U> {
|
||||
store: Arc<U>,
|
||||
beacon_state: Cow<'a, BeaconState<T>>,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: self.store.clone(),
|
||||
beacon_state: self.beacon_state.clone(),
|
||||
slot: self.slot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> BlockRootsIterator<'a, T, U> {
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
|
||||
Self {
|
||||
store,
|
||||
slot: beacon_state.slot,
|
||||
beacon_state: Cow::Borrowed(beacon_state),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
||||
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self {
|
||||
Self {
|
||||
store,
|
||||
slot: beacon_state.slot,
|
||||
beacon_state: Cow::Owned(beacon_state),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> {
|
||||
type Item = (Hash256, Slot);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.slot == 0 || self.slot > self.beacon_state.slot {
|
||||
return None;
|
||||
}
|
||||
|
||||
self.slot -= 1;
|
||||
|
||||
match self.beacon_state.get_block_root(self.slot) {
|
||||
Ok(root) => Some((*root, self.slot)),
|
||||
Err(BeaconStateError::SlotOutOfBounds) => {
|
||||
// Read a `BeaconState` from the store that has access to prior historical roots.
|
||||
let beacon_state =
|
||||
next_historical_root_backtrack_state(&*self.store, &self.beacon_state)?;
|
||||
|
||||
self.beacon_state = Cow::Owned(beacon_state);
|
||||
|
||||
let root = self.beacon_state.get_block_root(self.slot).ok()?;
|
||||
|
||||
Some((*root, self.slot))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
self.roots.inner.store.get_block(&root).ok()?
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ pub mod chunked_vector;
|
||||
pub mod config;
|
||||
mod errors;
|
||||
mod forwards_iter;
|
||||
mod hot_cold_store;
|
||||
pub mod hot_cold_store;
|
||||
mod impls;
|
||||
mod leveldb_store;
|
||||
mod memory_store;
|
||||
@ -24,7 +24,6 @@ mod partial_beacon_state;
|
||||
mod state_batch;
|
||||
|
||||
pub mod iter;
|
||||
pub mod migrate;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@ -32,7 +31,6 @@ pub use self::config::StoreConfig;
|
||||
pub use self::hot_cold_store::{HotColdDB as DiskStore, HotStateSummary};
|
||||
pub use self::leveldb_store::LevelDB as SimpleDiskStore;
|
||||
pub use self::memory_store::MemoryStore;
|
||||
pub use self::migrate::Migrate;
|
||||
pub use self::partial_beacon_state::PartialBeaconState;
|
||||
pub use errors::Error;
|
||||
pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer;
|
||||
@ -132,7 +130,7 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
||||
}
|
||||
|
||||
/// (Optionally) Move all data before the frozen slot to the freezer database.
|
||||
fn freeze_to_state(
|
||||
fn process_finalization(
|
||||
_store: Arc<Self>,
|
||||
_frozen_head_root: Hash256,
|
||||
_frozen_head: &BeaconState<E>,
|
||||
|
@ -1,153 +0,0 @@
|
||||
use crate::{
|
||||
hot_cold_store::HotColdDBError, DiskStore, Error, MemoryStore, SimpleDiskStore, Store,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use slog::{debug, warn};
|
||||
use std::mem;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use types::{BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
/// Trait for migration processes that update the database upon finalization.
|
||||
pub trait Migrate<S, E: EthSpec>: Send + Sync + 'static {
|
||||
fn new(db: Arc<S>) -> Self;
|
||||
|
||||
fn freeze_to_state(
|
||||
&self,
|
||||
_state_root: Hash256,
|
||||
_state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that does nothing, for stores that don't need migration.
|
||||
pub struct NullMigrator;
|
||||
|
||||
impl<E: EthSpec> Migrate<SimpleDiskStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<SimpleDiskStore<E>>) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
||||
fn new(_: Arc<MemoryStore<E>>) -> Self {
|
||||
NullMigrator
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||
///
|
||||
/// Mostly useful for tests.
|
||||
pub struct BlockingMigrator<S>(Arc<S>);
|
||||
|
||||
impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
||||
fn new(db: Arc<S>) -> Self {
|
||||
BlockingMigrator(db)
|
||||
}
|
||||
|
||||
fn freeze_to_state(
|
||||
&self,
|
||||
state_root: Hash256,
|
||||
state: BeaconState<E>,
|
||||
_max_finality_distance: u64,
|
||||
) {
|
||||
if let Err(e) = S::freeze_to_state(self.0.clone(), state_root, &state) {
|
||||
// This migrator is only used for testing, so we just log to stderr without a logger.
|
||||
eprintln!("Migration error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type MpscSender<E> = mpsc::Sender<(Hash256, BeaconState<E>)>;
|
||||
|
||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||
pub struct BackgroundMigrator<E: EthSpec> {
|
||||
db: Arc<DiskStore<E>>,
|
||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
||||
fn new(db: Arc<DiskStore<E>>) -> Self {
|
||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone()));
|
||||
Self { db, tx_thread }
|
||||
}
|
||||
|
||||
/// Perform the freezing operation on the database,
|
||||
fn freeze_to_state(
|
||||
&self,
|
||||
finalized_state_root: Hash256,
|
||||
finalized_state: BeaconState<E>,
|
||||
max_finality_distance: u64,
|
||||
) {
|
||||
if !self.needs_migration(finalized_state.slot, max_finality_distance) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (ref mut tx, ref mut thread) = *self.tx_thread.lock();
|
||||
|
||||
if let Err(tx_err) = tx.send((finalized_state_root, finalized_state)) {
|
||||
let (new_tx, new_thread) = Self::spawn_thread(self.db.clone());
|
||||
|
||||
drop(mem::replace(tx, new_tx));
|
||||
let old_thread = mem::replace(thread, new_thread);
|
||||
|
||||
// Join the old thread, which will probably have panicked, or may have
|
||||
// halted normally just now as a result of us dropping the old `mpsc::Sender`.
|
||||
if let Err(thread_err) = old_thread.join() {
|
||||
warn!(
|
||||
self.db.log,
|
||||
"Migration thread died, so it was restarted";
|
||||
"reason" => format!("{:?}", thread_err)
|
||||
);
|
||||
}
|
||||
|
||||
// Retry at most once, we could recurse but that would risk overflowing the stack.
|
||||
let _ = tx.send(tx_err.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> BackgroundMigrator<E> {
|
||||
/// Return true if a migration needs to be performed, given a new `finalized_slot`.
|
||||
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
|
||||
let finality_distance = finalized_slot - self.db.get_split_slot();
|
||||
finality_distance > max_finality_distance
|
||||
}
|
||||
|
||||
/// Spawn a new child thread to run the migration process.
|
||||
///
|
||||
/// Return a channel handle for sending new finalized states to the thread.
|
||||
fn spawn_thread(
|
||||
db: Arc<DiskStore<E>>,
|
||||
) -> (
|
||||
mpsc::Sender<(Hash256, BeaconState<E>)>,
|
||||
thread::JoinHandle<()>,
|
||||
) {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let thread = thread::spawn(move || {
|
||||
while let Ok((state_root, state)) = rx.recv() {
|
||||
match DiskStore::freeze_to_state(db.clone(), state_root, &state) {
|
||||
Ok(()) => {}
|
||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||
debug!(
|
||||
db.log,
|
||||
"Database migration postponed, unaligned finalized block";
|
||||
"slot" => slot.as_u64()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
db.log,
|
||||
"Database migration failed";
|
||||
"error" => format!("{:?}", e)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(tx, thread)
|
||||
}
|
||||
}
|
@ -12,6 +12,7 @@ use serde_derive::{Deserialize, Serialize};
|
||||
use ssz::ssz_encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use ssz_types::{typenum::Unsigned, BitVector, FixedVector};
|
||||
use std::fmt;
|
||||
use swap_or_not_shuffle::compute_shuffled_index;
|
||||
use test_random_derive::TestRandom;
|
||||
use tree_hash::TreeHash;
|
||||
@ -80,6 +81,8 @@ pub enum Error {
|
||||
///
|
||||
/// This represents a serious bug in either the spec or Lighthouse!
|
||||
ArithError(ArithError),
|
||||
MissingBeaconBlock(SignedBeaconBlockHash),
|
||||
MissingBeaconState(BeaconStateHash),
|
||||
}
|
||||
|
||||
/// Control whether an epoch-indexed field can be indexed at the next epoch or not.
|
||||
@ -98,6 +101,33 @@ impl AllowNextEpoch {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
|
||||
pub struct BeaconStateHash(Hash256);
|
||||
|
||||
impl fmt::Debug for BeaconStateHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "BeaconStateHash({:?})", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for BeaconStateHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Hash256> for BeaconStateHash {
|
||||
fn from(hash: Hash256) -> BeaconStateHash {
|
||||
BeaconStateHash(hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BeaconStateHash> for Hash256 {
|
||||
fn from(beacon_state_hash: BeaconStateHash) -> Hash256 {
|
||||
beacon_state_hash.0
|
||||
}
|
||||
}
|
||||
|
||||
/// The state of the `BeaconChain` at some slot.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
@ -614,6 +644,14 @@ impl<T: EthSpec> BeaconState<T> {
|
||||
Ok(&self.block_roots[i])
|
||||
}
|
||||
|
||||
pub fn get_block_state_roots(
|
||||
&self,
|
||||
slot: Slot,
|
||||
) -> Result<(SignedBeaconBlockHash, BeaconStateHash), Error> {
|
||||
let i = self.get_latest_block_roots_index(slot)?;
|
||||
Ok((self.block_roots[i].into(), self.state_roots[i].into()))
|
||||
}
|
||||
|
||||
/// Sets the latest state root for slot.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
|
@ -69,7 +69,7 @@ pub use crate::indexed_attestation::IndexedAttestation;
|
||||
pub use crate::pending_attestation::PendingAttestation;
|
||||
pub use crate::proposer_slashing::ProposerSlashing;
|
||||
pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch};
|
||||
pub use crate::signed_beacon_block::SignedBeaconBlock;
|
||||
pub use crate::signed_beacon_block::{SignedBeaconBlock, SignedBeaconBlockHash};
|
||||
pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader;
|
||||
pub use crate::signed_voluntary_exit::SignedVoluntaryExit;
|
||||
pub use crate::signing_root::{SignedRoot, SigningRoot};
|
||||
|
@ -1,11 +1,40 @@
|
||||
use crate::{test_utils::TestRandom, BeaconBlock, EthSpec, Hash256, Slot};
|
||||
use bls::Signature;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use test_random_derive::TestRandom;
|
||||
use tree_hash::TreeHash;
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
|
||||
pub struct SignedBeaconBlockHash(Hash256);
|
||||
|
||||
impl fmt::Debug for SignedBeaconBlockHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "SignedBeaconBlockHash({:?})", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SignedBeaconBlockHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Hash256> for SignedBeaconBlockHash {
|
||||
fn from(hash: Hash256) -> SignedBeaconBlockHash {
|
||||
SignedBeaconBlockHash(hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignedBeaconBlockHash> for Hash256 {
|
||||
fn from(signed_beacon_block_hash: SignedBeaconBlockHash) -> Hash256 {
|
||||
signed_beacon_block_hash.0
|
||||
}
|
||||
}
|
||||
|
||||
/// A `BeaconBlock` and a signature from its proposer.
|
||||
///
|
||||
/// Spec v0.11.1
|
||||
|
@ -21,11 +21,11 @@ use std::hash::{Hash, Hasher};
|
||||
use std::iter::Iterator;
|
||||
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign};
|
||||
|
||||
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct Slot(u64);
|
||||
|
||||
#[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
#[derive(Eq, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
pub struct Epoch(u64);
|
||||
|
||||
impl_common!(Slot);
|
||||
|
@ -195,6 +195,16 @@ macro_rules! impl_display {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_debug {
|
||||
($type: ident) => {
|
||||
impl fmt::Debug for $type {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}({:?})", stringify!($type), self.0)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! impl_ssz {
|
||||
($type: ident) => {
|
||||
impl Encode for $type {
|
||||
@ -275,6 +285,7 @@ macro_rules! impl_common {
|
||||
impl_math_between!($type, u64);
|
||||
impl_math!($type);
|
||||
impl_display!($type);
|
||||
impl_debug!($type);
|
||||
impl_ssz!($type);
|
||||
impl_hash!($type);
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user