Forwards block root iterators (#672)

* Implement forwards block root iterators

* Clean up errors and docs
This commit is contained in:
Michael Sproul 2019-12-06 18:52:11 +11:00 committed by Paul Hauner
parent 779873680b
commit bd1b61a5b1
23 changed files with 573 additions and 187 deletions

View File

@ -94,7 +94,7 @@ pub enum AttestationProcessingOutcome {
} }
pub trait BeaconChainTypes: Send + Sync + 'static { pub trait BeaconChainTypes: Send + Sync + 'static {
type Store: store::Store; type Store: store::Store<Self::EthSpec>;
type StoreMigrator: store::Migrate<Self::Store, Self::EthSpec>; type StoreMigrator: store::Migrate<Self::Store, Self::EthSpec>;
type SlotClock: slot_clock::SlotClock; type SlotClock: slot_clock::SlotClock;
type LmdGhost: LmdGhost<Self::Store, Self::EthSpec>; type LmdGhost: LmdGhost<Self::Store, Self::EthSpec>;

View File

@ -52,7 +52,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEve
TEventHandler, TEventHandler,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
@ -108,7 +108,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEve
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
@ -402,7 +402,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
@ -462,7 +462,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEthSpec, TEventHandler>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
@ -514,7 +514,7 @@ impl<TStore, TStoreMigrator, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
@ -555,7 +555,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<TStore, TEthSpec> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,

View File

@ -184,7 +184,7 @@ pub struct CachingEth1Backend<T: EthSpec, S> {
_phantom: PhantomData<T>, _phantom: PhantomData<T>,
} }
impl<T: EthSpec, S: Store> CachingEth1Backend<T, S> { impl<T: EthSpec, S: Store<T>> CachingEth1Backend<T, S> {
/// Instantiates `self` with empty caches. /// Instantiates `self` with empty caches.
/// ///
/// Does not connect to the eth1 node or start any tasks to keep the cache updated. /// Does not connect to the eth1 node or start any tasks to keep the cache updated.
@ -213,7 +213,7 @@ impl<T: EthSpec, S: Store> CachingEth1Backend<T, S> {
} }
} }
impl<T: EthSpec, S: Store> Eth1ChainBackend<T> for CachingEth1Backend<T, S> { impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T> for CachingEth1Backend<T, S> {
fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> { fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> {
// Note: we do not return random junk if this function call fails as it would be caused by // Note: we do not return random junk if this function call fails as it would be caused by
// an internal error. // an internal error.
@ -372,7 +372,7 @@ fn random_eth1_data() -> Eth1Data {
/// Returns `state.eth1_data.block_hash` at the start of eth1 voting period defined by /// Returns `state.eth1_data.block_hash` at the start of eth1 voting period defined by
/// `state.slot`. /// `state.slot`.
fn eth1_block_hash_at_start_of_voting_period<T: EthSpec, S: Store>( fn eth1_block_hash_at_start_of_voting_period<T: EthSpec, S: Store<T>>(
store: Arc<S>, store: Arc<S>,
state: &BeaconState<T>, state: &BeaconState<T>,
) -> Result<Hash256, Error> { ) -> Result<Hash256, Error> {
@ -392,7 +392,7 @@ fn eth1_block_hash_at_start_of_voting_period<T: EthSpec, S: Store>(
.map_err(Error::UnableToGetPreviousStateRoot)?; .map_err(Error::UnableToGetPreviousStateRoot)?;
store store
.get_state::<T>(&prev_state_root, Some(slot)) .get_state(&prev_state_root, Some(slot))
.map_err(Error::StoreError)? .map_err(Error::StoreError)?
.map(|state| state.eth1_data.block_hash) .map(|state| state.eth1_data.block_hash)
.ok_or_else(|| Error::PreviousStateNotInDB(*prev_state_root)) .ok_or_else(|| Error::PreviousStateNotInDB(*prev_state_root))
@ -581,7 +581,7 @@ mod test {
use store::MemoryStore; use store::MemoryStore;
use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder}; use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder};
fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E, MemoryStore>, E> { fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E, MemoryStore<E>>, E> {
let eth1_config = Eth1Config { let eth1_config = Eth1Config {
..Eth1Config::default() ..Eth1Config::default()
}; };

View File

@ -40,8 +40,8 @@ pub type BaseHarnessType<TStore, TStoreMigrator, TEthSpec> = Witness<
NullEventHandler<TEthSpec>, NullEventHandler<TEthSpec>,
>; >;
pub type HarnessType<E> = BaseHarnessType<MemoryStore, NullMigrator, E>; pub type HarnessType<E> = BaseHarnessType<MemoryStore<E>, NullMigrator, E>;
pub type DiskHarnessType<E> = BaseHarnessType<DiskStore, BlockingMigrator<DiskStore>, E>; pub type DiskHarnessType<E> = BaseHarnessType<DiskStore<E>, BlockingMigrator<DiskStore<E>>, E>;
/// Indicates how the `BeaconChainHarness` should produce blocks. /// Indicates how the `BeaconChainHarness` should produce blocks.
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
@ -120,7 +120,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
/// Instantiate a new harness with `validator_count` initial validators. /// Instantiate a new harness with `validator_count` initial validators.
pub fn new_with_disk_store( pub fn new_with_disk_store(
eth_spec_instance: E, eth_spec_instance: E,
store: Arc<DiskStore>, store: Arc<DiskStore<E>>,
keypairs: Vec<Keypair>, keypairs: Vec<Keypair>,
) -> Self { ) -> Self {
let spec = E::default_spec(); let spec = E::default_spec();
@ -160,7 +160,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
/// Instantiate a new harness with `validator_count` initial validators. /// Instantiate a new harness with `validator_count` initial validators.
pub fn resume_from_disk_store( pub fn resume_from_disk_store(
eth_spec_instance: E, eth_spec_instance: E,
store: Arc<DiskStore>, store: Arc<DiskStore<E>>,
keypairs: Vec<Keypair>, keypairs: Vec<Keypair>,
) -> Self { ) -> Self {
let spec = E::default_spec(); let spec = E::default_spec();
@ -197,7 +197,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
impl<S, M, E> BeaconChainHarness<BaseHarnessType<S, M, E>> impl<S, M, E> BeaconChainHarness<BaseHarnessType<S, M, E>>
where where
S: Store, S: Store<E>,
M: Migrate<S, E>, M: Migrate<S, E>,
E: EthSpec, E: EthSpec,
{ {

View File

@ -23,14 +23,14 @@ lazy_static! {
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
} }
fn get_store(db_path: &TempDir) -> Arc<DiskStore> { fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
let spec = E::default_spec(); let spec = E::default_spec();
let hot_path = db_path.path().join("hot_db"); let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db"); let cold_path = db_path.path().join("cold_db");
let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64; let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64;
let log = NullLoggerBuilder.build().expect("logger should build"); let log = NullLoggerBuilder.build().expect("logger should build");
Arc::new( Arc::new(
DiskStore::open::<E>(&hot_path, &cold_path, slots_per_restore_point, spec, log) DiskStore::open(&hot_path, &cold_path, slots_per_restore_point, spec, log)
.expect("disk store should initialize"), .expect("disk store should initialize"),
) )
} }

View File

@ -26,19 +26,19 @@ lazy_static! {
type E = MinimalEthSpec; type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>; type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
fn get_store(db_path: &TempDir) -> Arc<DiskStore> { fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
let spec = MinimalEthSpec::default_spec(); let spec = MinimalEthSpec::default_spec();
let hot_path = db_path.path().join("hot_db"); let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db"); let cold_path = db_path.path().join("cold_db");
let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64; let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64;
let log = NullLoggerBuilder.build().expect("logger should build"); let log = NullLoggerBuilder.build().expect("logger should build");
Arc::new( Arc::new(
DiskStore::open::<E>(&hot_path, &cold_path, slots_per_restore_point, spec, log) DiskStore::open(&hot_path, &cold_path, slots_per_restore_point, spec, log)
.expect("disk store should initialize"), .expect("disk store should initialize"),
) )
} }
fn get_harness(store: Arc<DiskStore>, validator_count: usize) -> TestHarness { fn get_harness(store: Arc<DiskStore<E>>, validator_count: usize) -> TestHarness {
let harness = BeaconChainHarness::new_with_disk_store( let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec, MinimalEthSpec,
store, store,
@ -265,7 +265,7 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) {
} }
/// Check that the DiskStore's split_slot is equal to the start slot of the last finalized epoch. /// Check that the DiskStore's split_slot is equal to the start slot of the last finalized epoch.
fn check_split_slot(harness: &TestHarness, store: Arc<DiskStore>) { fn check_split_slot(harness: &TestHarness, store: Arc<DiskStore<E>>) {
let split_slot = store.get_split_slot(); let split_slot = store.get_split_slot();
assert_eq!( assert_eq!(
harness harness
@ -286,7 +286,7 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
assert_eq!(chain_dump.len() as u64, expected_len); assert_eq!(chain_dump.len() as u64, expected_len);
for checkpoint in chain_dump { for checkpoint in &chain_dump {
// Check that the tree hash of the stored state is as expected // Check that the tree hash of the stored state is as expected
assert_eq!( assert_eq!(
checkpoint.beacon_state_root, checkpoint.beacon_state_root,
@ -300,13 +300,41 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
harness harness
.chain .chain
.store .store
.get_state::<E>(&checkpoint.beacon_state_root, None) .get_state(&checkpoint.beacon_state_root, None)
.expect("no error") .expect("no error")
.expect("state exists") .expect("state exists")
.slot, .slot,
checkpoint.beacon_state.slot checkpoint.beacon_state.slot
); );
} }
// Check the forwards block roots iterator against the chain dump
let chain_dump_block_roots = chain_dump
.iter()
.map(|checkpoint| (checkpoint.beacon_block_root, checkpoint.beacon_block.slot))
.collect::<Vec<_>>();
let head = harness.chain.head();
let mut forward_block_roots = Store::forwards_block_roots_iterator(
harness.chain.store.clone(),
Slot::new(0),
head.beacon_state,
head.beacon_block_root,
&harness.spec,
)
.collect::<Vec<_>>();
// Drop the block roots for skipped slots.
forward_block_roots.dedup_by_key(|(block_root, _)| *block_root);
for i in 0..std::cmp::max(chain_dump_block_roots.len(), forward_block_roots.len()) {
assert_eq!(
chain_dump_block_roots[i],
forward_block_roots[i],
"split slot is {}",
harness.chain.store.get_split_slot()
);
}
} }
/// Check that state and block root iterators can reach genesis /// Check that state and block root iterators can reach genesis

View File

@ -80,7 +80,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEve
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>, TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TSlotClock: SlotClock + Clone + 'static, TSlotClock: SlotClock + Clone + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
@ -395,7 +395,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>, TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TSlotClock: SlotClock + Clone + 'static, TSlotClock: SlotClock + Clone + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
@ -442,7 +442,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>, TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
@ -482,7 +482,7 @@ where
impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder< ClientBuilder<
Witness< Witness<
DiskStore, DiskStore<TEthSpec>,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
TLmdGhost, TLmdGhost,
@ -493,8 +493,8 @@ impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandle
> >
where where
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TStoreMigrator: store::Migrate<DiskStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
TLmdGhost: LmdGhost<DiskStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<DiskStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
@ -516,7 +516,7 @@ where
.clone() .clone()
.ok_or_else(|| "disk_store requires a chain spec".to_string())?; .ok_or_else(|| "disk_store requires a chain spec".to_string())?;
let store = DiskStore::open::<TEthSpec>( let store = DiskStore::open(
hot_path, hot_path,
cold_path, cold_path,
slots_per_restore_point, slots_per_restore_point,
@ -532,7 +532,7 @@ where
impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder< ClientBuilder<
Witness< Witness<
SimpleDiskStore, SimpleDiskStore<TEthSpec>,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
TLmdGhost, TLmdGhost,
@ -543,8 +543,8 @@ impl<TStoreMigrator, TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandle
> >
where where
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TStoreMigrator: store::Migrate<SimpleDiskStore, TEthSpec> + 'static, TStoreMigrator: store::Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
TLmdGhost: LmdGhost<SimpleDiskStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
@ -561,7 +561,7 @@ where
impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler> impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder< ClientBuilder<
Witness< Witness<
MemoryStore, MemoryStore<TEthSpec>,
NullMigrator, NullMigrator,
TSlotClock, TSlotClock,
TLmdGhost, TLmdGhost,
@ -572,7 +572,7 @@ impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
> >
where where
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<MemoryStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<MemoryStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
@ -591,7 +591,7 @@ where
impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler> impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder< ClientBuilder<
Witness< Witness<
DiskStore, DiskStore<TEthSpec>,
BackgroundMigrator<TEthSpec>, BackgroundMigrator<TEthSpec>,
TSlotClock, TSlotClock,
TLmdGhost, TLmdGhost,
@ -602,7 +602,7 @@ impl<TSlotClock, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
> >
where where
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<DiskStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<DiskStore<TEthSpec>, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
@ -629,7 +629,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TLmdGhost, TEthSpec, TEventHandler>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>, TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
@ -731,7 +731,7 @@ impl<TStore, TStoreMigrator, TLmdGhost, TEth1Backend, TEthSpec, TEventHandler>
>, >,
> >
where where
TStore: Store + 'static, TStore: Store<TEthSpec> + 'static,
TStoreMigrator: store::Migrate<TStore, TEthSpec>, TStoreMigrator: store::Migrate<TStore, TEthSpec>,
TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static, TLmdGhost: LmdGhost<TStore, TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,

View File

@ -25,11 +25,11 @@ use types::EthSpec;
/// A type-alias to the tighten the definition of a production-intended `Client`. /// A type-alias to the tighten the definition of a production-intended `Client`.
pub type ProductionClient<E> = Client< pub type ProductionClient<E> = Client<
Witness< Witness<
DiskStore, DiskStore<E>,
BackgroundMigrator<E>, BackgroundMigrator<E>,
SystemTimeSlotClock, SystemTimeSlotClock,
ThreadSafeReducedTree<DiskStore, E>, ThreadSafeReducedTree<DiskStore<E>, E>,
CachingEth1Backend<E, DiskStore>, CachingEth1Backend<E, DiskStore<E>>,
E, E,
WebSocketSender<E>, WebSocketSender<E>,
>, >,

View File

@ -1,7 +1,7 @@
use super::*; use super::*;
use ssz::{Decode, DecodeError}; use ssz::{Decode, DecodeError};
fn get_block_bytes<T: Store, E: EthSpec>( fn get_block_bytes<T: Store<E>, E: EthSpec>(
store: &T, store: &T,
root: Hash256, root: Hash256,
) -> Result<Option<Vec<u8>>, Error> { ) -> Result<Option<Vec<u8>>, Error> {
@ -23,7 +23,7 @@ fn read_parent_root_from_block_bytes(bytes: &[u8]) -> Result<Hash256, DecodeErro
Hash256::from_ssz_bytes(slice) Hash256::from_ssz_bytes(slice)
} }
pub fn get_block_at_preceeding_slot<T: Store, E: EthSpec>( pub fn get_block_at_preceeding_slot<T: Store<E>, E: EthSpec>(
store: &T, store: &T,
slot: Slot, slot: Slot,
start_root: Hash256, start_root: Hash256,
@ -36,7 +36,7 @@ pub fn get_block_at_preceeding_slot<T: Store, E: EthSpec>(
) )
} }
fn get_at_preceeding_slot<T: Store, E: EthSpec>( fn get_at_preceeding_slot<T: Store<E>, E: EthSpec>(
store: &T, store: &T,
slot: Slot, slot: Slot,
mut root: Hash256, mut root: Hash256,
@ -107,7 +107,7 @@ mod tests {
} }
fn build_chain( fn build_chain(
store: &impl Store, store: &impl Store<MinimalEthSpec>,
slots: &[usize], slots: &[usize],
spec: &ChainSpec, spec: &ChainSpec,
) -> Vec<(Hash256, BeaconBlock)> { ) -> Vec<(Hash256, BeaconBlock)> {
@ -157,7 +157,7 @@ mod tests {
#[test] #[test]
fn chain_with_skips() { fn chain_with_skips() {
let store = MemoryStore::open(); let store = MemoryStore::<MinimalEthSpec>::open();
let spec = MinimalEthSpec::default_spec(); let spec = MinimalEthSpec::default_spec();
let slots = vec![0, 1, 2, 5]; let slots = vec![0, 1, 2, 5];
@ -181,14 +181,14 @@ mod tests {
// Slot that doesn't exist // Slot that doesn't exist
let (source_root, _source_block) = &blocks_and_roots[3]; let (source_root, _source_block) = &blocks_and_roots[3];
assert!(store assert!(store
.get_block_at_preceeding_slot::<MinimalEthSpec>(*source_root, Slot::new(3)) .get_block_at_preceeding_slot(*source_root, Slot::new(3))
.unwrap() .unwrap()
.is_none()); .is_none());
// Slot too high // Slot too high
let (source_root, _source_block) = &blocks_and_roots[3]; let (source_root, _source_block) = &blocks_and_roots[3];
assert!(store assert!(store
.get_block_at_preceeding_slot::<MinimalEthSpec>(*source_root, Slot::new(3)) .get_block_at_preceeding_slot(*source_root, Slot::new(3))
.unwrap() .unwrap()
.is_none()); .is_none());
} }

View File

@ -0,0 +1,118 @@
use crate::chunked_vector::{chunk_key, Chunk, Field};
use crate::DiskStore;
use slog::error;
use std::sync::Arc;
use types::{ChainSpec, EthSpec, Slot};
/// Iterator over the values of a `BeaconState` vector field (like `block_roots`).
///
/// Uses the freezer DB's separate table to load the values.
pub struct ChunkedVectorIter<F, E>
where
F: Field<E>,
E: EthSpec,
{
pub(crate) store: Arc<DiskStore<E>>,
current_vindex: usize,
pub(crate) end_vindex: usize,
next_cindex: usize,
current_chunk: Chunk<F::Value>,
}
impl<F, E> ChunkedVectorIter<F, E>
where
F: Field<E>,
E: EthSpec,
{
/// Create a new iterator which can yield elements from `start_vindex` up to the last
/// index stored by the restore point at `last_restore_point_slot`.
///
/// The `last_restore_point` slot should be the slot of a recent restore point as obtained from
/// `DiskStore::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can
/// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`).
pub fn new(
store: Arc<DiskStore<E>>,
start_vindex: usize,
last_restore_point_slot: Slot,
spec: &ChainSpec,
) -> Self {
let (_, end_vindex) = F::start_and_end_vindex(last_restore_point_slot, spec);
// Set the next chunk to the one containing `start_vindex`.
let next_cindex = start_vindex / F::chunk_size();
// Set the current chunk to the empty chunk, it will never be read.
let current_chunk = Chunk::default();
Self {
store,
current_vindex: start_vindex,
end_vindex,
next_cindex,
current_chunk,
}
}
}
impl<F, E> Iterator for ChunkedVectorIter<F, E>
where
F: Field<E>,
E: EthSpec,
{
type Item = (usize, F::Value);
fn next(&mut self) -> Option<Self::Item> {
let chunk_size = F::chunk_size();
// Range exhausted, return `None` forever.
if self.current_vindex >= self.end_vindex {
None
}
// Value lies in the current chunk, return it.
else if self.current_vindex < self.next_cindex * chunk_size {
let vindex = self.current_vindex;
let val = self
.current_chunk
.values
.get(vindex % chunk_size)
.cloned()
.or_else(|| {
error!(
self.store.log,
"Missing chunk value in forwards iterator";
"vector index" => vindex
);
None
})?;
self.current_vindex += 1;
Some((vindex, val))
}
// Need to load the next chunk, load it and recurse back into the in-range case.
else {
self.current_chunk = Chunk::load(
&self.store.cold_db,
F::column(),
&chunk_key(self.next_cindex as u64),
)
.map_err(|e| {
error!(
self.store.log,
"Database error in forwards iterator";
"chunk index" => self.next_cindex,
"error" => format!("{:?}", e)
);
e
})
.ok()?
.or_else(|| {
error!(
self.store.log,
"Missing chunk in forwards iterator";
"chunk index" => self.next_cindex
);
None
})?;
self.next_cindex += 1;
self.next()
}
}
}

View File

@ -34,7 +34,7 @@ pub enum UpdatePattern {
/// Map a chunk index to bytes that can be used to key the NoSQL database. /// Map a chunk index to bytes that can be used to key the NoSQL database.
/// ///
/// We shift chunks up by 1 to make room for a genesis chunk that is handled separately. /// We shift chunks up by 1 to make room for a genesis chunk that is handled separately.
fn chunk_key(cindex: u64) -> [u8; 8] { pub fn chunk_key(cindex: u64) -> [u8; 8] {
(cindex + 1).to_be_bytes() (cindex + 1).to_be_bytes()
} }
@ -177,7 +177,7 @@ pub trait Field<E: EthSpec>: Copy {
/// Load the genesis value for a fixed length field from the store. /// Load the genesis value for a fixed length field from the store.
/// ///
/// This genesis value should be used to fill the initial state of the vector. /// This genesis value should be used to fill the initial state of the vector.
fn load_genesis_value<S: Store>(store: &S) -> Result<Self::Value, Error> { fn load_genesis_value<S: Store<E>>(store: &S) -> Result<Self::Value, Error> {
let key = &genesis_value_key()[..]; let key = &genesis_value_key()[..];
let chunk = let chunk =
Chunk::load(store, Self::column(), key)?.ok_or(ChunkError::MissingGenesisValue)?; Chunk::load(store, Self::column(), key)?.ok_or(ChunkError::MissingGenesisValue)?;
@ -192,7 +192,10 @@ pub trait Field<E: EthSpec>: Copy {
/// ///
/// Check the existing value (if any) for consistency with the value we intend to store, and /// Check the existing value (if any) for consistency with the value we intend to store, and
/// return an error if they are inconsistent. /// return an error if they are inconsistent.
fn check_and_store_genesis_value<S: Store>(store: &S, value: Self::Value) -> Result<(), Error> { fn check_and_store_genesis_value<S: Store<E>>(
store: &S,
value: Self::Value,
) -> Result<(), Error> {
let key = &genesis_value_key()[..]; let key = &genesis_value_key()[..];
if let Some(existing_chunk) = Chunk::<Self::Value>::load(store, Self::column(), key)? { if let Some(existing_chunk) = Chunk::<Self::Value>::load(store, Self::column(), key)? {
@ -324,7 +327,7 @@ field!(
|state: &BeaconState<_>, index, _| safe_modulo_index(&state.randao_mixes, index) |state: &BeaconState<_>, index, _| safe_modulo_index(&state.randao_mixes, index)
); );
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: Store>( pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: Store<E>>(
field: F, field: F,
store: &S, store: &S,
state: &BeaconState<E>, state: &BeaconState<E>,
@ -384,7 +387,7 @@ fn store_range<F, E, S, I>(
where where
F: Field<E>, F: Field<E>,
E: EthSpec, E: EthSpec,
S: Store, S: Store<E>,
I: Iterator<Item = usize>, I: Iterator<Item = usize>,
{ {
for chunk_index in range { for chunk_index in range {
@ -414,7 +417,7 @@ where
// Chunks at the end index are included. // Chunks at the end index are included.
// TODO: could be more efficient with a real range query (perhaps RocksDB) // TODO: could be more efficient with a real range query (perhaps RocksDB)
fn range_query<S: Store, T: Decode + Encode>( fn range_query<S: Store<E>, E: EthSpec, T: Decode + Encode>(
store: &S, store: &S,
column: DBColumn, column: DBColumn,
start_index: usize, start_index: usize,
@ -479,7 +482,7 @@ fn stitch<T: Default + Clone>(
Ok(result) Ok(result)
} }
pub fn load_vector_from_db<F: FixedLengthField<E>, E: EthSpec, S: Store>( pub fn load_vector_from_db<F: FixedLengthField<E>, E: EthSpec, S: Store<E>>(
store: &S, store: &S,
slot: Slot, slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
@ -511,7 +514,7 @@ pub fn load_vector_from_db<F: FixedLengthField<E>, E: EthSpec, S: Store>(
} }
/// The historical roots are stored in vector chunks, despite not actually being a vector. /// The historical roots are stored in vector chunks, despite not actually being a vector.
pub fn load_variable_list_from_db<F: VariableLengthField<E>, E: EthSpec, S: Store>( pub fn load_variable_list_from_db<F: VariableLengthField<E>, E: EthSpec, S: Store<E>>(
store: &S, store: &S,
slot: Slot, slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
@ -571,14 +574,23 @@ where
Chunk { values } Chunk { values }
} }
pub fn load<S: Store>(store: &S, column: DBColumn, key: &[u8]) -> Result<Option<Self>, Error> { pub fn load<S: Store<E>, E: EthSpec>(
store: &S,
column: DBColumn,
key: &[u8],
) -> Result<Option<Self>, Error> {
store store
.get_bytes(column.into(), key)? .get_bytes(column.into(), key)?
.map(|bytes| Self::decode(&bytes)) .map(|bytes| Self::decode(&bytes))
.transpose() .transpose()
} }
pub fn store<S: Store>(&self, store: &S, column: DBColumn, key: &[u8]) -> Result<(), Error> { pub fn store<S: Store<E>, E: EthSpec>(
&self,
store: &S,
column: DBColumn,
key: &[u8],
) -> Result<(), Error> {
store.put_bytes(column.into(), key, &self.encode()?)?; store.put_bytes(column.into(), key, &self.encode()?)?;
Ok(()) Ok(())
} }

View File

@ -0,0 +1,164 @@
use crate::chunked_iter::ChunkedVectorIter;
use crate::chunked_vector::BlockRoots;
use crate::iter::{BlockRootsIterator, ReverseBlockRootIterator};
use crate::{DiskStore, Store};
use slog::error;
use std::sync::Arc;
use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot};
/// Forwards block roots iterator that makes use of the `block_roots` table in the freezer DB.
pub struct FrozenForwardsBlockRootsIterator<E: EthSpec> {
inner: ChunkedVectorIter<BlockRoots, E>,
}
/// Forwards block roots iterator that reverses a backwards iterator (only good for short ranges).
pub struct SimpleForwardsBlockRootsIterator {
// Values from the backwards iterator (in slot descending order)
values: Vec<(Hash256, Slot)>,
}
/// Fusion of the above two approaches to forwards iteration. Fast and efficient.
pub enum HybridForwardsBlockRootsIterator<E: EthSpec> {
PreFinalization {
iter: FrozenForwardsBlockRootsIterator<E>,
/// Data required by the `PostFinalization` iterator when we get to it.
continuation_data: Option<(BeaconState<E>, Hash256)>,
},
PostFinalization {
iter: SimpleForwardsBlockRootsIterator,
},
}
impl<E: EthSpec> FrozenForwardsBlockRootsIterator<E> {
pub fn new(
store: Arc<DiskStore<E>>,
start_slot: Slot,
last_restore_point_slot: Slot,
spec: &ChainSpec,
) -> Self {
Self {
inner: ChunkedVectorIter::new(
store,
start_slot.as_usize(),
last_restore_point_slot,
spec,
),
}
}
}
impl<E: EthSpec> Iterator for FrozenForwardsBlockRootsIterator<E> {
type Item = (Hash256, Slot);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(|(slot, block_hash)| (block_hash, Slot::from(slot)))
}
}
impl SimpleForwardsBlockRootsIterator {
pub fn new<S: Store<E>, E: EthSpec>(
store: Arc<S>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
) -> Self {
// Iterate backwards from the end state, stopping at the start slot.
Self {
values: ReverseBlockRootIterator::new(
(end_block_root, end_state.slot),
BlockRootsIterator::owned(store, end_state),
)
.take_while(|(_, slot)| *slot >= start_slot)
.collect(),
}
}
}
impl Iterator for SimpleForwardsBlockRootsIterator {
type Item = (Hash256, Slot);
fn next(&mut self) -> Option<Self::Item> {
// Pop from the end of the vector to get the block roots in slot-ascending order.
self.values.pop()
}
}
impl<E: EthSpec> HybridForwardsBlockRootsIterator<E> {
pub fn new(
store: Arc<DiskStore<E>>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
spec: &ChainSpec,
) -> Self {
use HybridForwardsBlockRootsIterator::*;
let latest_restore_point_slot = store.get_latest_restore_point_slot();
if start_slot < latest_restore_point_slot {
PreFinalization {
iter: FrozenForwardsBlockRootsIterator::new(
store,
start_slot,
latest_restore_point_slot,
spec,
),
continuation_data: Some((end_state, end_block_root)),
}
} else {
PostFinalization {
iter: SimpleForwardsBlockRootsIterator::new(
store,
start_slot,
end_state,
end_block_root,
),
}
}
}
}
impl<E: EthSpec> Iterator for HybridForwardsBlockRootsIterator<E> {
type Item = (Hash256, Slot);
fn next(&mut self) -> Option<Self::Item> {
use HybridForwardsBlockRootsIterator::*;
match self {
PreFinalization {
iter,
continuation_data,
} => {
match iter.next() {
Some(x) => Some(x),
// Once the pre-finalization iterator is consumed, transition
// to a post-finalization iterator beginning from the last slot
// of the pre iterator.
None => {
let (end_state, end_block_root) =
continuation_data.take().or_else(|| {
error!(
iter.inner.store.log,
"HybridForwardsBlockRootsIterator: logic error"
);
None
})?;
*self = PostFinalization {
iter: SimpleForwardsBlockRootsIterator::new(
iter.inner.store.clone(),
Slot::from(iter.inner.end_vindex),
end_state,
end_block_root,
),
};
self.next()
}
}
}
PostFinalization { iter } => iter.next(),
}
}
}

View File

@ -1,6 +1,7 @@
use crate::chunked_vector::{ use crate::chunked_vector::{
store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots,
}; };
use crate::forwards_iter::HybridForwardsBlockRootsIterator;
use crate::iter::{ParentRootBlockIterator, StateRootsIterator}; use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
use crate::{ use crate::{
leveldb_store::LevelDB, DBColumn, Error, PartialBeaconState, SimpleStoreItem, Store, StoreItem, leveldb_store::LevelDB, DBColumn, Error, PartialBeaconState, SimpleStoreItem, Store, StoreItem,
@ -14,6 +15,7 @@ use state_processing::{
SlotProcessingError, SlotProcessingError,
}; };
use std::convert::TryInto; use std::convert::TryInto;
use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use types::*; use types::*;
@ -25,7 +27,7 @@ pub const SPLIT_DB_KEY: &str = "FREEZERDBSPLITFREEZERDBSPLITFREE";
/// ///
/// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores /// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores
/// intermittent "restore point" states pre-finalization. /// intermittent "restore point" states pre-finalization.
pub struct HotColdDB { pub struct HotColdDB<E: EthSpec> {
/// The slot and state root at the point where the database is split between hot and cold. /// The slot and state root at the point where the database is split between hot and cold.
/// ///
/// States with slots less than `split.slot` are in the cold DB, while states with slots /// States with slots less than `split.slot` are in the cold DB, while states with slots
@ -34,15 +36,17 @@ pub struct HotColdDB {
/// Number of slots per restore point state in the freezer database. /// Number of slots per restore point state in the freezer database.
slots_per_restore_point: u64, slots_per_restore_point: u64,
/// Cold database containing compact historical data. /// Cold database containing compact historical data.
cold_db: LevelDB, pub(crate) cold_db: LevelDB<E>,
/// Hot database containing duplicated but quick-to-access recent data. /// Hot database containing duplicated but quick-to-access recent data.
/// ///
/// The hot database also contains all blocks. /// The hot database also contains all blocks.
hot_db: LevelDB, pub(crate) hot_db: LevelDB<E>,
/// Chain spec. /// Chain spec.
spec: ChainSpec, spec: ChainSpec,
/// Logger. /// Logger.
pub(crate) log: Logger, pub(crate) log: Logger,
/// Mere vessel for E.
_phantom: PhantomData<E>,
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -71,7 +75,9 @@ pub enum HotColdDbError {
RestorePointBlockHashError(BeaconStateError), RestorePointBlockHashError(BeaconStateError),
} }
impl Store for HotColdDB { impl<E: EthSpec> Store<E> for HotColdDB<E> {
type ForwardsBlockRootsIterator = HybridForwardsBlockRootsIterator<E>;
// Defer to the hot database for basic operations (including blocks for now) // Defer to the hot database for basic operations (including blocks for now)
fn get_bytes(&self, column: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> { fn get_bytes(&self, column: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
self.hot_db.get_bytes(column, key) self.hot_db.get_bytes(column, key)
@ -90,11 +96,7 @@ impl Store for HotColdDB {
} }
/// Store a state in the store. /// Store a state in the store.
fn put_state<E: EthSpec>( fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
&self,
state_root: &Hash256,
state: &BeaconState<E>,
) -> Result<(), Error> {
if state.slot < self.get_split_slot() { if state.slot < self.get_split_slot() {
self.store_archive_state(state_root, state) self.store_archive_state(state_root, state)
} else { } else {
@ -103,7 +105,7 @@ impl Store for HotColdDB {
} }
/// Fetch a state from the store. /// Fetch a state from the store.
fn get_state<E: EthSpec>( fn get_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
slot: Option<Slot>, slot: Option<Slot>,
@ -129,7 +131,7 @@ impl Store for HotColdDB {
} }
/// Advance the split point of the store, moving new finalized states to the freezer. /// Advance the split point of the store, moving new finalized states to the freezer.
fn freeze_to_state<E: EthSpec>( fn freeze_to_state(
store: Arc<Self>, store: Arc<Self>,
frozen_head_root: Hash256, frozen_head_root: Hash256,
frozen_head: &BeaconState<E>, frozen_head: &BeaconState<E>,
@ -157,7 +159,7 @@ impl Store for HotColdDB {
for (state_root, slot) in for (state_root, slot) in
state_root_iter.take_while(|&(_, slot)| slot >= current_split_slot) state_root_iter.take_while(|&(_, slot)| slot >= current_split_slot)
{ {
if slot % store.slots_per_restore_point == 0 { if slot % dbg!(store.slots_per_restore_point) == 0 {
let state: BeaconState<E> = store let state: BeaconState<E> = store
.hot_db .hot_db
.get_state(&state_root, None)? .get_state(&state_root, None)?
@ -195,20 +197,30 @@ impl Store for HotColdDB {
Ok(()) Ok(())
} }
fn forwards_block_roots_iterator(
store: Arc<Self>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
spec: &ChainSpec,
) -> Self::ForwardsBlockRootsIterator {
HybridForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root, spec)
}
} }
impl HotColdDB { impl<E: EthSpec> HotColdDB<E> {
/// Open a new or existing database, with the given paths to the hot and cold DBs. /// Open a new or existing database, with the given paths to the hot and cold DBs.
/// ///
/// The `slots_per_restore_point` parameter must be a divisor of `SLOTS_PER_HISTORICAL_ROOT`. /// The `slots_per_restore_point` parameter must be a divisor of `SLOTS_PER_HISTORICAL_ROOT`.
pub fn open<E: EthSpec>( pub fn open(
hot_path: &Path, hot_path: &Path,
cold_path: &Path, cold_path: &Path,
slots_per_restore_point: u64, slots_per_restore_point: u64,
spec: ChainSpec, spec: ChainSpec,
log: Logger, log: Logger,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
Self::verify_slots_per_restore_point::<E>(slots_per_restore_point)?; Self::verify_slots_per_restore_point(slots_per_restore_point)?;
let db = HotColdDB { let db = HotColdDB {
split: RwLock::new(Split::default()), split: RwLock::new(Split::default()),
@ -217,6 +229,7 @@ impl HotColdDB {
hot_db: LevelDB::open(hot_path)?, hot_db: LevelDB::open(hot_path)?,
spec, spec,
log, log,
_phantom: PhantomData,
}; };
// Load the previous split slot from the database (if any). This ensures we can // Load the previous split slot from the database (if any). This ensures we can
@ -230,7 +243,7 @@ impl HotColdDB {
/// Store a pre-finalization state in the freezer database. /// Store a pre-finalization state in the freezer database.
/// ///
/// Will return an error if the state does not lie on a restore point boundary. /// Will return an error if the state does not lie on a restore point boundary.
pub fn store_archive_state<E: EthSpec>( pub fn store_archive_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
state: &BeaconState<E>, state: &BeaconState<E>,
@ -251,6 +264,7 @@ impl HotColdDB {
"slot" => state.slot, "slot" => state.slot,
"state_root" => format!("{:?}", state_root) "state_root" => format!("{:?}", state_root)
); );
println!("Creating restore point {}", state.slot);
// 1. Convert to PartialBeaconState and store that in the DB. // 1. Convert to PartialBeaconState and store that in the DB.
let partial_state = PartialBeaconState::from_state_forgetful(state); let partial_state = PartialBeaconState::from_state_forgetful(state);
@ -273,7 +287,7 @@ impl HotColdDB {
/// Load a pre-finalization state from the freezer database. /// Load a pre-finalization state from the freezer database.
/// ///
/// Will reconstruct the state if it lies between restore points. /// Will reconstruct the state if it lies between restore points.
pub fn load_archive_state<E: EthSpec>( pub fn load_archive_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
slot: Slot, slot: Slot,
@ -286,10 +300,7 @@ impl HotColdDB {
} }
/// Load a restore point state by its `state_root`. /// Load a restore point state by its `state_root`.
fn load_restore_point<E: EthSpec>( fn load_restore_point(&self, state_root: &Hash256) -> Result<BeaconState<E>, Error> {
&self,
state_root: &Hash256,
) -> Result<BeaconState<E>, Error> {
let mut partial_state = PartialBeaconState::db_get(&self.cold_db, state_root)? let mut partial_state = PartialBeaconState::db_get(&self.cold_db, state_root)?
.ok_or_else(|| HotColdDbError::MissingRestorePoint(*state_root))?; .ok_or_else(|| HotColdDbError::MissingRestorePoint(*state_root))?;
@ -303,7 +314,7 @@ impl HotColdDB {
} }
/// Load a restore point state by its `restore_point_index`. /// Load a restore point state by its `restore_point_index`.
fn load_restore_point_by_index<E: EthSpec>( fn load_restore_point_by_index(
&self, &self,
restore_point_index: u64, restore_point_index: u64,
) -> Result<BeaconState<E>, Error> { ) -> Result<BeaconState<E>, Error> {
@ -312,7 +323,7 @@ impl HotColdDB {
} }
/// Load a state that lies between restore points. /// Load a state that lies between restore points.
fn load_intermediate_state<E: EthSpec>( fn load_intermediate_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
slot: Slot, slot: Slot,
@ -330,7 +341,7 @@ impl HotColdDB {
let high_restore_point = if high_restore_point_idx * self.slots_per_restore_point let high_restore_point = if high_restore_point_idx * self.slots_per_restore_point
>= split.slot.as_u64() >= split.slot.as_u64()
{ {
self.get_state::<E>(&split.state_root, Some(split.slot))? self.get_state(&split.state_root, Some(split.slot))?
.ok_or_else(|| HotColdDbError::MissingSplitState(split.state_root, split.slot))? .ok_or_else(|| HotColdDbError::MissingSplitState(split.state_root, split.slot))?
} else { } else {
self.load_restore_point_by_index(high_restore_point_idx)? self.load_restore_point_by_index(high_restore_point_idx)?
@ -365,7 +376,7 @@ impl HotColdDB {
/// Get a suitable block root for backtracking from `high_restore_point` to the state at `slot`. /// Get a suitable block root for backtracking from `high_restore_point` to the state at `slot`.
/// ///
/// Defaults to the block root for `slot`, which *should* be in range. /// Defaults to the block root for `slot`, which *should* be in range.
fn get_high_restore_point_block_root<E: EthSpec>( fn get_high_restore_point_block_root(
&self, &self,
high_restore_point: &BeaconState<E>, high_restore_point: &BeaconState<E>,
slot: Slot, slot: Slot,
@ -381,7 +392,7 @@ impl HotColdDB {
/// ///
/// Blocks are returned in slot-ascending order, suitable for replaying on a state with slot /// Blocks are returned in slot-ascending order, suitable for replaying on a state with slot
/// equal to `start_slot`, to reach a state with slot equal to `end_slot`. /// equal to `start_slot`, to reach a state with slot equal to `end_slot`.
fn load_blocks_to_replay<E: EthSpec>( fn load_blocks_to_replay(
&self, &self,
start_slot: Slot, start_slot: Slot,
end_slot: Slot, end_slot: Slot,
@ -402,7 +413,7 @@ impl HotColdDB {
/// Replay `blocks` on top of `state` until `target_slot` is reached. /// Replay `blocks` on top of `state` until `target_slot` is reached.
/// ///
/// Will skip slots as necessary. /// Will skip slots as necessary.
fn replay_blocks<E: EthSpec>( fn replay_blocks(
&self, &self,
mut state: BeaconState<E>, mut state: BeaconState<E>,
blocks: Vec<BeaconBlock<E>>, blocks: Vec<BeaconBlock<E>>,
@ -440,6 +451,11 @@ impl HotColdDB {
self.split.read().slot self.split.read().slot
} }
/// Fetch the slot of the most recently stored restore point.
pub fn get_latest_restore_point_slot(&self) -> Slot {
self.get_split_slot() / self.slots_per_restore_point * self.slots_per_restore_point
}
/// Load the split point from disk. /// Load the split point from disk.
fn load_split(&self) -> Result<Option<Split>, Error> { fn load_split(&self) -> Result<Option<Split>, Error> {
let key = Hash256::from_slice(SPLIT_DB_KEY.as_bytes()); let key = Hash256::from_slice(SPLIT_DB_KEY.as_bytes());
@ -498,9 +514,7 @@ impl HotColdDB {
/// This ensures that we have at least one restore point within range of our state /// This ensures that we have at least one restore point within range of our state
/// root history when iterating backwards (and allows for more frequent restore points if /// root history when iterating backwards (and allows for more frequent restore points if
/// desired). /// desired).
fn verify_slots_per_restore_point<E: EthSpec>( fn verify_slots_per_restore_point(slots_per_restore_point: u64) -> Result<(), HotColdDbError> {
slots_per_restore_point: u64,
) -> Result<(), HotColdDbError> {
let slots_per_historical_root = E::SlotsPerHistoricalRoot::to_u64(); let slots_per_historical_root = E::SlotsPerHistoricalRoot::to_u64();
if slots_per_restore_point > 0 && slots_per_historical_root % slots_per_restore_point == 0 { if slots_per_restore_point > 0 && slots_per_historical_root % slots_per_restore_point == 0 {
Ok(()) Ok(())

View File

@ -4,7 +4,7 @@ use ssz_derive::{Decode, Encode};
use std::convert::TryInto; use std::convert::TryInto;
use types::beacon_state::{BeaconTreeHashCache, CommitteeCache, CACHED_EPOCHS}; use types::beacon_state::{BeaconTreeHashCache, CommitteeCache, CACHED_EPOCHS};
pub fn store_full_state<S: Store, E: EthSpec>( pub fn store_full_state<S: Store<E>, E: EthSpec>(
store: &S, store: &S,
state_root: &Hash256, state_root: &Hash256,
state: &BeaconState<E>, state: &BeaconState<E>,
@ -21,7 +21,7 @@ pub fn store_full_state<S: Store, E: EthSpec>(
result result
} }
pub fn get_full_state<S: Store, E: EthSpec>( pub fn get_full_state<S: Store<E>, E: EthSpec>(
store: &S, store: &S,
state_root: &Hash256, state_root: &Hash256,
) -> Result<Option<BeaconState<E>>, Error> { ) -> Result<Option<BeaconState<E>>, Error> {

View File

@ -12,12 +12,14 @@ use types::{
/// ///
/// It is assumed that all ancestors for this object are stored in the database. If this is not the /// It is assumed that all ancestors for this object are stored in the database. If this is not the
/// case, the iterator will start returning `None` prior to genesis. /// case, the iterator will start returning `None` prior to genesis.
pub trait AncestorIter<U: Store, I: Iterator> { pub trait AncestorIter<U: Store<E>, E: EthSpec, I: Iterator> {
/// Returns an iterator over the roots of the ancestors of `self`. /// Returns an iterator over the roots of the ancestors of `self`.
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<I>; fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<I>;
} }
impl<'a, U: Store, E: EthSpec> AncestorIter<U, BlockRootsIterator<'a, E, U>> for BeaconBlock<E> { impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, BlockRootsIterator<'a, E, U>>
for BeaconBlock<E>
{
/// Iterates across all available prior block roots of `self`, starting at the most recent and ending /// Iterates across all available prior block roots of `self`, starting at the most recent and ending
/// at genesis. /// at genesis.
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<BlockRootsIterator<'a, E, U>> { fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<BlockRootsIterator<'a, E, U>> {
@ -27,7 +29,9 @@ impl<'a, U: Store, E: EthSpec> AncestorIter<U, BlockRootsIterator<'a, E, U>> for
} }
} }
impl<'a, U: Store, E: EthSpec> AncestorIter<U, StateRootsIterator<'a, E, U>> for BeaconState<E> { impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, StateRootsIterator<'a, E, U>>
for BeaconState<E>
{
/// Iterates across all available prior state roots of `self`, starting at the most recent and ending /// Iterates across all available prior state roots of `self`, starting at the most recent and ending
/// at genesis. /// at genesis.
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<StateRootsIterator<'a, E, U>> { fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<StateRootsIterator<'a, E, U>> {
@ -52,7 +56,7 @@ impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> {
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self { pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
Self { Self {
store, store,
@ -70,7 +74,7 @@ impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
type Item = (Hash256, Slot); type Item = (Hash256, Slot);
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -99,13 +103,13 @@ impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> {
} }
/// Block iterator that uses the `parent_root` of each block to backtrack. /// Block iterator that uses the `parent_root` of each block to backtrack.
pub struct ParentRootBlockIterator<'a, E: EthSpec, S: Store> { pub struct ParentRootBlockIterator<'a, E: EthSpec, S: Store<E>> {
store: &'a S, store: &'a S,
next_block_root: Hash256, next_block_root: Hash256,
_phantom: PhantomData<E>, _phantom: PhantomData<E>,
} }
impl<'a, E: EthSpec, S: Store> ParentRootBlockIterator<'a, E, S> { impl<'a, E: EthSpec, S: Store<E>> ParentRootBlockIterator<'a, E, S> {
pub fn new(store: &'a S, start_block_root: Hash256) -> Self { pub fn new(store: &'a S, start_block_root: Hash256) -> Self {
Self { Self {
store, store,
@ -115,7 +119,7 @@ impl<'a, E: EthSpec, S: Store> ParentRootBlockIterator<'a, E, S> {
} }
} }
impl<'a, E: EthSpec, S: Store> Iterator for ParentRootBlockIterator<'a, E, S> { impl<'a, E: EthSpec, S: Store<E>> Iterator for ParentRootBlockIterator<'a, E, S> {
type Item = BeaconBlock<E>; type Item = BeaconBlock<E>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -137,7 +141,7 @@ pub struct BlockIterator<'a, T: EthSpec, U> {
roots: BlockRootsIterator<'a, T, U>, roots: BlockRootsIterator<'a, T, U>,
} }
impl<'a, T: EthSpec, U: Store> BlockIterator<'a, T, U> { impl<'a, T: EthSpec, U: Store<T>> BlockIterator<'a, T, U> {
/// Create a new iterator over all blocks in the given `beacon_state` and prior states. /// Create a new iterator over all blocks in the given `beacon_state` and prior states.
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self { pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
Self { Self {
@ -153,7 +157,7 @@ impl<'a, T: EthSpec, U: Store> BlockIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockIterator<'a, T, U> {
type Item = BeaconBlock<T>; type Item = BeaconBlock<T>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -186,7 +190,7 @@ impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> { impl<'a, T: EthSpec, U: Store<T>> BlockRootsIterator<'a, T, U> {
/// Create a new iterator over all block roots in the given `beacon_state` and prior states. /// Create a new iterator over all block roots in the given `beacon_state` and prior states.
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self { pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self {
Self { Self {
@ -206,7 +210,7 @@ impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> {
type Item = (Hash256, Slot); type Item = (Hash256, Slot);
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -235,7 +239,7 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> {
} }
/// Fetch the next state to use whilst backtracking in `*RootsIterator`. /// Fetch the next state to use whilst backtracking in `*RootsIterator`.
fn next_historical_root_backtrack_state<E: EthSpec, S: Store>( fn next_historical_root_backtrack_state<E: EthSpec, S: Store<E>>(
store: &S, store: &S,
current_state: &BeaconState<E>, current_state: &BeaconState<E>,
) -> Option<BeaconState<E>> { ) -> Option<BeaconState<E>> {

View File

@ -1,4 +1,5 @@
use super::*; use super::*;
use crate::forwards_iter::SimpleForwardsBlockRootsIterator;
use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::impls::beacon_state::{get_full_state, store_full_state};
use crate::metrics; use crate::metrics;
use db_key::Key; use db_key::Key;
@ -6,14 +7,16 @@ use leveldb::database::kv::KV;
use leveldb::database::Database; use leveldb::database::Database;
use leveldb::error::Error as LevelDBError; use leveldb::error::Error as LevelDBError;
use leveldb::options::{Options, ReadOptions, WriteOptions}; use leveldb::options::{Options, ReadOptions, WriteOptions};
use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
/// A wrapped leveldb database. /// A wrapped leveldb database.
pub struct LevelDB { pub struct LevelDB<E: EthSpec> {
db: Database<BytesKey>, db: Database<BytesKey>,
_phantom: PhantomData<E>,
} }
impl LevelDB { impl<E: EthSpec> LevelDB<E> {
/// Open a database at `path`, creating a new database if one does not already exist. /// Open a database at `path`, creating a new database if one does not already exist.
pub fn open(path: &Path) -> Result<Self, Error> { pub fn open(path: &Path) -> Result<Self, Error> {
let mut options = Options::new(); let mut options = Options::new();
@ -22,7 +25,10 @@ impl LevelDB {
let db = Database::open(path, options)?; let db = Database::open(path, options)?;
Ok(Self { db }) Ok(Self {
db,
_phantom: PhantomData,
})
} }
fn read_options(&self) -> ReadOptions<BytesKey> { fn read_options(&self) -> ReadOptions<BytesKey> {
@ -55,7 +61,9 @@ impl Key for BytesKey {
} }
} }
impl Store for LevelDB { impl<E: EthSpec> Store<E> for LevelDB<E> {
type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator;
/// Retrieve some bytes in `column` with `key`. /// Retrieve some bytes in `column` with `key`.
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> { fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
let column_key = Self::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
@ -110,22 +118,28 @@ impl Store for LevelDB {
} }
/// Store a state in the store. /// Store a state in the store.
fn put_state<E: EthSpec>( fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
&self,
state_root: &Hash256,
state: &BeaconState<E>,
) -> Result<(), Error> {
store_full_state(self, state_root, state) store_full_state(self, state_root, state)
} }
/// Fetch a state from the store. /// Fetch a state from the store.
fn get_state<E: EthSpec>( fn get_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
_: Option<Slot>, _: Option<Slot>,
) -> Result<Option<BeaconState<E>>, Error> { ) -> Result<Option<BeaconState<E>>, Error> {
get_full_state(self, state_root) get_full_state(self, state_root)
} }
fn forwards_block_roots_iterator(
store: Arc<Self>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
_: &ChainSpec,
) -> Self::ForwardsBlockRootsIterator {
SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root)
}
} }
impl From<LevelDBError> for Error { impl From<LevelDBError> for Error {

View File

@ -11,9 +11,11 @@
extern crate lazy_static; extern crate lazy_static;
mod block_at_slot; mod block_at_slot;
pub mod chunked_iter;
pub mod chunked_vector; pub mod chunked_vector;
pub mod config; pub mod config;
mod errors; mod errors;
mod forwards_iter;
mod hot_cold_store; mod hot_cold_store;
mod impls; mod impls;
mod leveldb_store; mod leveldb_store;
@ -42,7 +44,9 @@ pub use types::*;
/// A `Store` is fundamentally backed by a key-value database, however it provides support for /// A `Store` is fundamentally backed by a key-value database, however it provides support for
/// columns. A simple column implementation might involve prefixing a key with some bytes unique to /// columns. A simple column implementation might involve prefixing a key with some bytes unique to
/// each column. /// each column.
pub trait Store: Sync + Send + Sized + 'static { pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
type ForwardsBlockRootsIterator: Iterator<Item = (Hash256, Slot)>;
/// Retrieve some bytes in `column` with `key`. /// Retrieve some bytes in `column` with `key`.
fn get_bytes(&self, column: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error>; fn get_bytes(&self, column: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error>;
@ -76,14 +80,10 @@ pub trait Store: Sync + Send + Sized + 'static {
} }
/// Store a state in the store. /// Store a state in the store.
fn put_state<E: EthSpec>( fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error>;
&self,
state_root: &Hash256,
state: &BeaconState<E>,
) -> Result<(), Error>;
/// Fetch a state from the store. /// Fetch a state from the store.
fn get_state<E: EthSpec>( fn get_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
slot: Option<Slot>, slot: Option<Slot>,
@ -94,7 +94,7 @@ pub trait Store: Sync + Send + Sized + 'static {
/// ///
/// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the /// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the
/// slot of `start_block_root`. /// slot of `start_block_root`.
fn get_block_at_preceeding_slot<E: EthSpec>( fn get_block_at_preceeding_slot(
&self, &self,
start_block_root: Hash256, start_block_root: Hash256,
slot: Slot, slot: Slot,
@ -103,13 +103,31 @@ pub trait Store: Sync + Send + Sized + 'static {
} }
/// (Optionally) Move all data before the frozen slot to the freezer database. /// (Optionally) Move all data before the frozen slot to the freezer database.
fn freeze_to_state<E: EthSpec>( fn freeze_to_state(
_store: Arc<Self>, _store: Arc<Self>,
_frozen_head_root: Hash256, _frozen_head_root: Hash256,
_frozen_head: &BeaconState<E>, _frozen_head: &BeaconState<E>,
) -> Result<(), Error> { ) -> Result<(), Error> {
Ok(()) Ok(())
} }
/// Get a forwards (slot-ascending) iterator over the beacon block roots since `start_slot`.
///
/// Will be efficient for frozen portions of the database if using `DiskStore`.
///
/// The `end_state` and `end_block_root` are required for backtracking in the post-finalization
/// part of the chain, and should be usually be set to the current head. Importantly, the
/// `end_state` must be a state that has had a block applied to it, and the hash of that
/// block must be `end_block_root`.
// NOTE: could maybe optimise by getting the `BeaconState` and end block root from a closure, as
// it's not always required.
fn forwards_block_roots_iterator(
store: Arc<Self>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
spec: &ChainSpec,
) -> Self::ForwardsBlockRootsIterator;
} }
/// A unique column identifier. /// A unique column identifier.
@ -165,16 +183,16 @@ pub trait SimpleStoreItem: Sized {
/// An item that may be stored in a `Store`. /// An item that may be stored in a `Store`.
pub trait StoreItem: Sized { pub trait StoreItem: Sized {
/// Store `self`. /// Store `self`.
fn db_put<S: Store>(&self, store: &S, key: &Hash256) -> Result<(), Error>; fn db_put<S: Store<E>, E: EthSpec>(&self, store: &S, key: &Hash256) -> Result<(), Error>;
/// Retrieve an instance of `Self` from `store`. /// Retrieve an instance of `Self` from `store`.
fn db_get<S: Store>(store: &S, key: &Hash256) -> Result<Option<Self>, Error>; fn db_get<S: Store<E>, E: EthSpec>(store: &S, key: &Hash256) -> Result<Option<Self>, Error>;
/// Return `true` if an instance of `Self` exists in `store`. /// Return `true` if an instance of `Self` exists in `store`.
fn db_exists<S: Store>(store: &S, key: &Hash256) -> Result<bool, Error>; fn db_exists<S: Store<E>, E: EthSpec>(store: &S, key: &Hash256) -> Result<bool, Error>;
/// Delete an instance of `Self` from `store`. /// Delete an instance of `Self` from `store`.
fn db_delete<S: Store>(store: &S, key: &Hash256) -> Result<(), Error>; fn db_delete<S: Store<E>, E: EthSpec>(store: &S, key: &Hash256) -> Result<(), Error>;
} }
impl<T> StoreItem for T impl<T> StoreItem for T
@ -182,7 +200,7 @@ where
T: SimpleStoreItem, T: SimpleStoreItem,
{ {
/// Store `self`. /// Store `self`.
fn db_put<S: Store>(&self, store: &S, key: &Hash256) -> Result<(), Error> { fn db_put<S: Store<E>, E: EthSpec>(&self, store: &S, key: &Hash256) -> Result<(), Error> {
let column = Self::db_column().into(); let column = Self::db_column().into();
let key = key.as_bytes(); let key = key.as_bytes();
@ -192,7 +210,7 @@ where
} }
/// Retrieve an instance of `Self`. /// Retrieve an instance of `Self`.
fn db_get<S: Store>(store: &S, key: &Hash256) -> Result<Option<Self>, Error> { fn db_get<S: Store<E>, E: EthSpec>(store: &S, key: &Hash256) -> Result<Option<Self>, Error> {
let column = Self::db_column().into(); let column = Self::db_column().into();
let key = key.as_bytes(); let key = key.as_bytes();
@ -203,7 +221,7 @@ where
} }
/// Return `true` if an instance of `Self` exists in `Store`. /// Return `true` if an instance of `Self` exists in `Store`.
fn db_exists<S: Store>(store: &S, key: &Hash256) -> Result<bool, Error> { fn db_exists<S: Store<E>, E: EthSpec>(store: &S, key: &Hash256) -> Result<bool, Error> {
let column = Self::db_column().into(); let column = Self::db_column().into();
let key = key.as_bytes(); let key = key.as_bytes();
@ -211,7 +229,7 @@ where
} }
/// Delete `self` from the `Store`. /// Delete `self` from the `Store`.
fn db_delete<S: Store>(store: &S, key: &Hash256) -> Result<(), Error> { fn db_delete<S: Store<E>, E: EthSpec>(store: &S, key: &Hash256) -> Result<(), Error> {
let column = Self::db_column().into(); let column = Self::db_column().into();
let key = key.as_bytes(); let key = key.as_bytes();
@ -246,7 +264,7 @@ mod tests {
} }
} }
fn test_impl(store: impl Store) { fn test_impl(store: impl Store<MinimalEthSpec>) {
let key = Hash256::random(); let key = Hash256::random();
let item = StorableThing { a: 1, b: 42 }; let item = StorableThing { a: 1, b: 42 };
@ -275,7 +293,7 @@ mod tests {
let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64; let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64;
let spec = MinimalEthSpec::default_spec(); let spec = MinimalEthSpec::default_spec();
let log = NullLoggerBuilder.build().unwrap(); let log = NullLoggerBuilder.build().unwrap();
let store = DiskStore::open::<MinimalEthSpec>( let store = DiskStore::open(
&hot_dir.path(), &hot_dir.path(),
&cold_dir.path(), &cold_dir.path(),
slots_per_restore_point, slots_per_restore_point,
@ -305,7 +323,7 @@ mod tests {
#[test] #[test]
fn exists() { fn exists() {
let store = MemoryStore::open(); let store = MemoryStore::<MinimalEthSpec>::open();
let key = Hash256::random(); let key = Hash256::random();
let item = StorableThing { a: 1, b: 42 }; let item = StorableThing { a: 1, b: 42 };

View File

@ -1,29 +1,35 @@
use super::{Error, Store}; use super::{Error, Store};
use crate::forwards_iter::SimpleForwardsBlockRootsIterator;
use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::impls::beacon_state::{get_full_state, store_full_state};
use parking_lot::RwLock; use parking_lot::RwLock;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData;
use std::sync::Arc;
use types::*; use types::*;
type DBHashMap = HashMap<Vec<u8>, Vec<u8>>; type DBHashMap = HashMap<Vec<u8>, Vec<u8>>;
/// A thread-safe `HashMap` wrapper. /// A thread-safe `HashMap` wrapper.
pub struct MemoryStore { pub struct MemoryStore<E: EthSpec> {
db: RwLock<DBHashMap>, db: RwLock<DBHashMap>,
_phantom: PhantomData<E>,
} }
impl Clone for MemoryStore { impl<E: EthSpec> Clone for MemoryStore<E> {
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self {
db: RwLock::new(self.db.read().clone()), db: RwLock::new(self.db.read().clone()),
_phantom: PhantomData,
} }
} }
} }
impl MemoryStore { impl<E: EthSpec> MemoryStore<E> {
/// Create a new, empty database. /// Create a new, empty database.
pub fn open() -> Self { pub fn open() -> Self {
Self { Self {
db: RwLock::new(HashMap::new()), db: RwLock::new(HashMap::new()),
_phantom: PhantomData,
} }
} }
@ -34,10 +40,12 @@ impl MemoryStore {
} }
} }
impl Store for MemoryStore { impl<E: EthSpec> Store<E> for MemoryStore<E> {
type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator;
/// Get the value of some key from the database. Returns `None` if the key does not exist. /// Get the value of some key from the database. Returns `None` if the key does not exist.
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> { fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
let column_key = MemoryStore::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
Ok(self Ok(self
.db .db
@ -48,7 +56,7 @@ impl Store for MemoryStore {
/// Puts a key in the database. /// Puts a key in the database.
fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> {
let column_key = MemoryStore::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
self.db.write().insert(column_key, val.to_vec()); self.db.write().insert(column_key, val.to_vec());
@ -57,14 +65,14 @@ impl Store for MemoryStore {
/// Return true if some key exists in some column. /// Return true if some key exists in some column.
fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> { fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> {
let column_key = MemoryStore::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
Ok(self.db.read().contains_key(&column_key)) Ok(self.db.read().contains_key(&column_key))
} }
/// Delete some key from the database. /// Delete some key from the database.
fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> {
let column_key = MemoryStore::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
self.db.write().remove(&column_key); self.db.write().remove(&column_key);
@ -72,20 +80,26 @@ impl Store for MemoryStore {
} }
/// Store a state in the store. /// Store a state in the store.
fn put_state<E: EthSpec>( fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
&self,
state_root: &Hash256,
state: &BeaconState<E>,
) -> Result<(), Error> {
store_full_state(self, state_root, state) store_full_state(self, state_root, state)
} }
/// Fetch a state from the store. /// Fetch a state from the store.
fn get_state<E: EthSpec>( fn get_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
_: Option<Slot>, _: Option<Slot>,
) -> Result<Option<BeaconState<E>>, Error> { ) -> Result<Option<BeaconState<E>>, Error> {
get_full_state(self, state_root) get_full_state(self, state_root)
} }
fn forwards_block_roots_iterator(
store: Arc<Self>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
_: &ChainSpec,
) -> Self::ForwardsBlockRootsIterator {
SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root)
}
} }

View File

@ -23,14 +23,14 @@ pub trait Migrate<S, E: EthSpec>: Send + Sync + 'static {
/// Migrator that does nothing, for stores that don't need migration. /// Migrator that does nothing, for stores that don't need migration.
pub struct NullMigrator; pub struct NullMigrator;
impl<E: EthSpec> Migrate<SimpleDiskStore, E> for NullMigrator { impl<E: EthSpec> Migrate<SimpleDiskStore<E>, E> for NullMigrator {
fn new(_: Arc<SimpleDiskStore>) -> Self { fn new(_: Arc<SimpleDiskStore<E>>) -> Self {
NullMigrator NullMigrator
} }
} }
impl<E: EthSpec> Migrate<MemoryStore, E> for NullMigrator { impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
fn new(_: Arc<MemoryStore>) -> Self { fn new(_: Arc<MemoryStore<E>>) -> Self {
NullMigrator NullMigrator
} }
} }
@ -40,7 +40,7 @@ impl<E: EthSpec> Migrate<MemoryStore, E> for NullMigrator {
/// Mostly useful for tests. /// Mostly useful for tests.
pub struct BlockingMigrator<S>(Arc<S>); pub struct BlockingMigrator<S>(Arc<S>);
impl<E: EthSpec, S: Store> Migrate<S, E> for BlockingMigrator<S> { impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
fn new(db: Arc<S>) -> Self { fn new(db: Arc<S>) -> Self {
BlockingMigrator(db) BlockingMigrator(db)
} }
@ -60,15 +60,15 @@ impl<E: EthSpec, S: Store> Migrate<S, E> for BlockingMigrator<S> {
/// Migrator that runs a background thread to migrate state from the hot to the cold database. /// Migrator that runs a background thread to migrate state from the hot to the cold database.
pub struct BackgroundMigrator<E: EthSpec> { pub struct BackgroundMigrator<E: EthSpec> {
db: Arc<DiskStore>, db: Arc<DiskStore<E>>,
tx_thread: Mutex<( tx_thread: Mutex<(
mpsc::Sender<(Hash256, BeaconState<E>)>, mpsc::Sender<(Hash256, BeaconState<E>)>,
thread::JoinHandle<()>, thread::JoinHandle<()>,
)>, )>,
} }
impl<E: EthSpec> Migrate<DiskStore, E> for BackgroundMigrator<E> { impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
fn new(db: Arc<DiskStore>) -> Self { fn new(db: Arc<DiskStore<E>>) -> Self {
let tx_thread = Mutex::new(Self::spawn_thread(db.clone())); let tx_thread = Mutex::new(Self::spawn_thread(db.clone()));
Self { db, tx_thread } Self { db, tx_thread }
} }
@ -119,7 +119,7 @@ impl<E: EthSpec> BackgroundMigrator<E> {
/// ///
/// Return a channel handle for sending new finalized states to the thread. /// Return a channel handle for sending new finalized states to the thread.
fn spawn_thread( fn spawn_thread(
db: Arc<DiskStore>, db: Arc<DiskStore<E>>,
) -> ( ) -> (
mpsc::Sender<(Hash256, BeaconState<E>)>, mpsc::Sender<(Hash256, BeaconState<E>)>,
thread::JoinHandle<()>, thread::JoinHandle<()>,

View File

@ -111,7 +111,11 @@ impl<T: EthSpec> PartialBeaconState<T> {
} }
} }
pub fn load_block_roots<S: Store>(&mut self, store: &S, spec: &ChainSpec) -> Result<(), Error> { pub fn load_block_roots<S: Store<T>>(
&mut self,
store: &S,
spec: &ChainSpec,
) -> Result<(), Error> {
if self.block_roots.is_none() { if self.block_roots.is_none() {
self.block_roots = Some(load_vector_from_db::<BlockRoots, T, _>( self.block_roots = Some(load_vector_from_db::<BlockRoots, T, _>(
store, self.slot, spec, store, self.slot, spec,
@ -120,7 +124,11 @@ impl<T: EthSpec> PartialBeaconState<T> {
Ok(()) Ok(())
} }
pub fn load_state_roots<S: Store>(&mut self, store: &S, spec: &ChainSpec) -> Result<(), Error> { pub fn load_state_roots<S: Store<T>>(
&mut self,
store: &S,
spec: &ChainSpec,
) -> Result<(), Error> {
if self.state_roots.is_none() { if self.state_roots.is_none() {
self.state_roots = Some(load_vector_from_db::<StateRoots, T, _>( self.state_roots = Some(load_vector_from_db::<StateRoots, T, _>(
store, self.slot, spec, store, self.slot, spec,
@ -129,7 +137,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
Ok(()) Ok(())
} }
pub fn load_historical_roots<S: Store>( pub fn load_historical_roots<S: Store<T>>(
&mut self, &mut self,
store: &S, store: &S,
spec: &ChainSpec, spec: &ChainSpec,
@ -142,7 +150,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
Ok(()) Ok(())
} }
pub fn load_randao_mixes<S: Store>( pub fn load_randao_mixes<S: Store<T>>(
&mut self, &mut self,
store: &S, store: &S,
spec: &ChainSpec, spec: &ChainSpec,

View File

@ -10,7 +10,7 @@ pub type Result<T> = std::result::Result<T, String>;
// Note: the `PartialEq` bound is only required for testing. If it becomes a serious annoyance we // Note: the `PartialEq` bound is only required for testing. If it becomes a serious annoyance we
// can remove it. // can remove it.
pub trait LmdGhost<S: Store, E: EthSpec>: PartialEq + Send + Sync + Sized { pub trait LmdGhost<S: Store<E>, E: EthSpec>: PartialEq + Send + Sync + Sized {
/// Create a new instance, with the given `store` and `finalized_root`. /// Create a new instance, with the given `store` and `finalized_root`.
fn new(store: Arc<S>, finalized_block: &BeaconBlock<E>, finalized_root: Hash256) -> Self; fn new(store: Arc<S>, finalized_block: &BeaconBlock<E>, finalized_root: Hash256) -> Self;

View File

@ -64,7 +64,7 @@ impl<T, E> PartialEq for ThreadSafeReducedTree<T, E> {
impl<T, E> LmdGhost<T, E> for ThreadSafeReducedTree<T, E> impl<T, E> LmdGhost<T, E> for ThreadSafeReducedTree<T, E>
where where
T: Store, T: Store<E>,
E: EthSpec, E: EthSpec,
{ {
fn new(store: Arc<T>, genesis_block: &BeaconBlock<E>, genesis_root: Hash256) -> Self { fn new(store: Arc<T>, genesis_block: &BeaconBlock<E>, genesis_root: Hash256) -> Self {
@ -218,7 +218,7 @@ impl<T, E> PartialEq for ReducedTree<T, E> {
impl<T, E> ReducedTree<T, E> impl<T, E> ReducedTree<T, E>
where where
T: Store, T: Store<E>,
E: EthSpec, E: EthSpec,
{ {
pub fn new(store: Arc<T>, genesis_block: &BeaconBlock<E>, genesis_root: Hash256) -> Self { pub fn new(store: Arc<T>, genesis_block: &BeaconBlock<E>, genesis_root: Hash256) -> Self {
@ -976,16 +976,15 @@ mod tests {
#[test] #[test]
fn test_reduced_tree_ssz() { fn test_reduced_tree_ssz() {
let store = Arc::new(MemoryStore::open()); let store = Arc::new(MemoryStore::<MinimalEthSpec>::open());
let tree = ReducedTree::<MemoryStore, MinimalEthSpec>::new( let tree = ReducedTree::new(
store.clone(), store.clone(),
&BeaconBlock::empty(&MinimalEthSpec::default_spec()), &BeaconBlock::empty(&MinimalEthSpec::default_spec()),
Hash256::zero(), Hash256::zero(),
); );
let ssz_tree = ReducedTreeSsz::from_reduced_tree(&tree); let ssz_tree = ReducedTreeSsz::from_reduced_tree(&tree);
let bytes = tree.as_bytes(); let bytes = tree.as_bytes();
let recovered_tree = let recovered_tree = ReducedTree::from_bytes(&bytes, store.clone()).unwrap();
ReducedTree::<MemoryStore, MinimalEthSpec>::from_bytes(&bytes, store.clone()).unwrap();
let recovered_ssz = ReducedTreeSsz::from_reduced_tree(&recovered_tree); let recovered_ssz = ReducedTreeSsz::from_reduced_tree(&recovered_tree);
assert_eq!(ssz_tree, recovered_ssz); assert_eq!(ssz_tree, recovered_ssz);

View File

@ -10,17 +10,14 @@ use beacon_chain::test_utils::{
use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree};
use rand::{prelude::*, rngs::StdRng}; use rand::{prelude::*, rngs::StdRng};
use std::sync::Arc; use std::sync::Arc;
use store::{ use store::{iter::AncestorIter, MemoryStore, Store};
iter::{AncestorIter, BlockRootsIterator},
MemoryStore, Store,
};
use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot}; use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot};
// Should ideally be divisible by 3. // Should ideally be divisible by 3.
pub const VALIDATOR_COUNT: usize = 3 * 8; pub const VALIDATOR_COUNT: usize = 3 * 8;
type TestEthSpec = MinimalEthSpec; type TestEthSpec = MinimalEthSpec;
type ThreadSafeReducedTree = BaseThreadSafeReducedTree<MemoryStore, TestEthSpec>; type ThreadSafeReducedTree = BaseThreadSafeReducedTree<MemoryStore<TestEthSpec>, TestEthSpec>;
type BeaconChainHarness = BaseBeaconChainHarness<HarnessType<TestEthSpec>>; type BeaconChainHarness = BaseBeaconChainHarness<HarnessType<TestEthSpec>>;
type RootAndSlot = (Hash256, Slot); type RootAndSlot = (Hash256, Slot);
@ -86,16 +83,14 @@ impl ForkedHarness {
faulty_fork_blocks, faulty_fork_blocks,
); );
let mut honest_roots = let mut honest_roots = get_ancestor_roots(harness.chain.store.clone(), honest_head);
get_ancestor_roots::<TestEthSpec, _>(harness.chain.store.clone(), honest_head);
honest_roots.insert( honest_roots.insert(
0, 0,
(honest_head, get_slot_for_block_root(&harness, honest_head)), (honest_head, get_slot_for_block_root(&harness, honest_head)),
); );
let mut faulty_roots = let mut faulty_roots = get_ancestor_roots(harness.chain.store.clone(), faulty_head);
get_ancestor_roots::<TestEthSpec, _>(harness.chain.store.clone(), faulty_head);
faulty_roots.insert( faulty_roots.insert(
0, 0,
@ -121,7 +116,7 @@ impl ForkedHarness {
} }
} }
pub fn store_clone(&self) -> MemoryStore { pub fn store_clone(&self) -> MemoryStore<TestEthSpec> {
(*self.harness.chain.store).clone() (*self.harness.chain.store).clone()
} }
@ -131,7 +126,7 @@ impl ForkedHarness {
// //
// Taking a clone here ensures that each fork choice gets it's own store so there is no // Taking a clone here ensures that each fork choice gets it's own store so there is no
// cross-contamination between tests. // cross-contamination between tests.
let store: MemoryStore = self.store_clone(); let store: MemoryStore<TestEthSpec> = self.store_clone();
ThreadSafeReducedTree::new( ThreadSafeReducedTree::new(
Arc::new(store), Arc::new(store),
@ -155,7 +150,7 @@ impl ForkedHarness {
} }
/// Helper: returns all the ancestor roots and slots for a given block_root. /// Helper: returns all the ancestor roots and slots for a given block_root.
fn get_ancestor_roots<E: EthSpec, U: Store>( fn get_ancestor_roots<U: Store<TestEthSpec>>(
store: Arc<U>, store: Arc<U>,
block_root: Hash256, block_root: Hash256,
) -> Vec<(Hash256, Slot)> { ) -> Vec<(Hash256, Slot)> {
@ -164,9 +159,7 @@ fn get_ancestor_roots<E: EthSpec, U: Store>(
.expect("block should exist") .expect("block should exist")
.expect("store should not error"); .expect("store should not error");
<BeaconBlock<TestEthSpec> as AncestorIter<_, BlockRootsIterator<TestEthSpec, _>>>::try_iter_ancestor_roots( <BeaconBlock<TestEthSpec> as AncestorIter<_, _, _>>::try_iter_ancestor_roots(&block, store)
&block, store,
)
.expect("should be able to create ancestor iter") .expect("should be able to create ancestor iter")
.collect() .collect()
} }