Make key value storage abstractions more accurate (#1267)

* Layer do_atomically() abstractions properly

* Reduce allocs and DRY get_key_for_col()

* Parameterize HotColdDB with hot and cold item stores

* -impl Store for MemoryStore

* Replace Store uses with HotColdDB

* Ditch Store trait

* cargo fmt

* Style fix

* Readd missing dep that broke the build
This commit is contained in:
Adam Szkoda 2020-06-16 03:34:04 +02:00 committed by GitHub
parent 6b8c96662f
commit 9db0c28051
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 589 additions and 575 deletions

3
Cargo.lock generated
View File

@ -4312,7 +4312,7 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
name = "slashing_protection" name = "slashing_protection"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"parking_lot 0.9.0", "parking_lot 0.10.2",
"r2d2", "r2d2",
"r2d2_sqlite", "r2d2_sqlite",
"rayon", "rayon",
@ -4643,6 +4643,7 @@ dependencies = [
"serde", "serde",
"serde_derive", "serde_derive",
"slog", "slog",
"sloggers",
"state_processing", "state_processing",
"tempfile", "tempfile",
"tree_hash", "tree_hash",

View File

@ -40,7 +40,7 @@ use std::io::prelude::*;
use std::sync::Arc; use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator};
use store::{Error as DBError, Store}; use store::{Error as DBError, HotColdDB};
use types::*; use types::*;
// Text included in blocks. // Text included in blocks.
@ -148,10 +148,11 @@ pub struct HeadInfo {
} }
pub trait BeaconChainTypes: Send + Sync + 'static { pub trait BeaconChainTypes: Send + Sync + 'static {
type Store: store::Store<Self::EthSpec>; type HotStore: store::ItemStore<Self::EthSpec>;
type StoreMigrator: Migrate<Self::EthSpec>; type ColdStore: store::ItemStore<Self::EthSpec>;
type StoreMigrator: Migrate<Self::EthSpec, Self::HotStore, Self::ColdStore>;
type SlotClock: slot_clock::SlotClock; type SlotClock: slot_clock::SlotClock;
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>; type Eth1Chain: Eth1ChainBackend<Self::EthSpec>;
type EthSpec: types::EthSpec; type EthSpec: types::EthSpec;
type EventHandler: EventHandler<Self::EthSpec>; type EventHandler: EventHandler<Self::EthSpec>;
} }
@ -161,7 +162,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static {
pub struct BeaconChain<T: BeaconChainTypes> { pub struct BeaconChain<T: BeaconChainTypes> {
pub spec: ChainSpec, pub spec: ChainSpec,
/// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB. /// Persistent storage for blocks, states, etc. Typically an on-disk store, such as LevelDB.
pub store: Arc<T::Store>, pub store: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
/// Database migrator for running background maintenance on the store. /// Database migrator for running background maintenance on the store.
pub store_migrator: T::StoreMigrator, pub store_migrator: T::StoreMigrator,
/// Reports the current slot, typically based upon the system clock. /// Reports the current slot, typically based upon the system clock.
@ -185,7 +186,7 @@ pub struct BeaconChain<T: BeaconChainTypes> {
/// Maintains a record of which validators have proposed blocks for each slot. /// Maintains a record of which validators have proposed blocks for each slot.
pub observed_block_producers: ObservedBlockProducers<T::EthSpec>, pub observed_block_producers: ObservedBlockProducers<T::EthSpec>,
/// Provides information from the Ethereum 1 (PoW) chain. /// Provides information from the Ethereum 1 (PoW) chain.
pub eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec, T::Store>>, pub eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
/// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received.
pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>, pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>,
/// The root of the genesis block. /// The root of the genesis block.
@ -335,16 +336,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
pub fn forwards_iter_block_roots( pub fn forwards_iter_block_roots(
&self, &self,
start_slot: Slot, start_slot: Slot,
) -> Result<<T::Store as Store<T::EthSpec>>::ForwardsBlockRootsIterator, Error> { ) -> Result<impl Iterator<Item = Result<(Hash256, Slot), Error>>, Error> {
let local_head = self.head()?; let local_head = self.head()?;
Ok(T::Store::forwards_block_roots_iterator( let iter = HotColdDB::forwards_block_roots_iterator(
self.store.clone(), self.store.clone(),
start_slot, start_slot,
local_head.beacon_state, local_head.beacon_state,
local_head.beacon_block_root, local_head.beacon_block_root,
&self.spec, &self.spec,
)?) )?;
Ok(iter.map(|result| result.map_err(Into::into)))
} }
/// Traverse backwards from `block_root` to find the block roots of its ancestors. /// Traverse backwards from `block_root` to find the block roots of its ancestors.

View File

@ -24,7 +24,7 @@ use std::marker::PhantomData;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::Store; use store::{HotColdDB, ItemStore};
use types::{ use types::{
BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Signature, SignedBeaconBlock, Slot, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Signature, SignedBeaconBlock, Slot,
}; };
@ -33,28 +33,48 @@ pub const PUBKEY_CACHE_FILENAME: &str = "pubkey_cache.ssz";
/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing
/// functionality and only exists to satisfy the type system. /// functionality and only exists to satisfy the type system.
pub struct Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>( pub struct Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>(
PhantomData<( PhantomData<(
TStore,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
TEth1Backend, TEth1Backend,
TEthSpec, TEthSpec,
TEventHandler, TEventHandler,
THotStore,
TColdStore,
)>, )>,
); );
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> BeaconChainTypes impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
for Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> BeaconChainTypes
for Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>
where where
TStore: Store<TEthSpec> + 'static, THotStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec> + 'static, TColdStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
{ {
type Store = TStore; type HotStore = THotStore;
type ColdStore = TColdStore;
type StoreMigrator = TStoreMigrator; type StoreMigrator = TStoreMigrator;
type SlotClock = TSlotClock; type SlotClock = TSlotClock;
type Eth1Chain = TEth1Backend; type Eth1Chain = TEth1Backend;
@ -71,7 +91,7 @@ where
/// ///
/// See the tests for an example of a complete working example. /// See the tests for an example of a complete working example.
pub struct BeaconChainBuilder<T: BeaconChainTypes> { pub struct BeaconChainBuilder<T: BeaconChainTypes> {
store: Option<Arc<T::Store>>, store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
store_migrator: Option<T::StoreMigrator>, store_migrator: Option<T::StoreMigrator>,
canonical_head: Option<BeaconSnapshot<T::EthSpec>>, canonical_head: Option<BeaconSnapshot<T::EthSpec>>,
/// The finalized checkpoint to anchor the chain. May be genesis or a higher /// The finalized checkpoint to anchor the chain. May be genesis or a higher
@ -80,7 +100,7 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
genesis_block_root: Option<Hash256>, genesis_block_root: Option<Hash256>,
op_pool: Option<OperationPool<T::EthSpec>>, op_pool: Option<OperationPool<T::EthSpec>>,
fork_choice: Option<ForkChoice<T>>, fork_choice: Option<ForkChoice<T>>,
eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec, T::Store>>, eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
event_handler: Option<T::EventHandler>, event_handler: Option<T::EventHandler>,
slot_clock: Option<T::SlotClock>, slot_clock: Option<T::SlotClock>,
head_tracker: Option<HeadTracker>, head_tracker: Option<HeadTracker>,
@ -92,15 +112,24 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
log: Option<Logger>, log: Option<Logger>,
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
BeaconChainBuilder< BeaconChainBuilder<
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>, Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
> >
where where
TStore: Store<TEthSpec> + 'static, THotStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec> + 'static, TColdStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
{ {
@ -142,7 +171,7 @@ where
/// Sets the store (database). /// Sets the store (database).
/// ///
/// Should generally be called early in the build chain. /// Should generally be called early in the build chain.
pub fn store(mut self, store: Arc<TStore>) -> Self { pub fn store(mut self, store: Arc<HotColdDB<TEthSpec, THotStore, TColdStore>>) -> Self {
self.store = Some(store); self.store = Some(store);
self self
} }
@ -379,7 +408,15 @@ where
self, self,
) -> Result< ) -> Result<
BeaconChain< BeaconChain<
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>, Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
>, >,
String, String,
> { > {
@ -480,17 +517,26 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
BeaconChainBuilder< BeaconChainBuilder<
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>, Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
> >
where where
TStore: Store<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
TStoreMigrator: Migrate<TEthSpec> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
/// Initializes a fork choice with the `ThreadSafeReducedTree` backend. /// Initializes a fork choice with the `ThreadSafeReducedTree` backend.
/// ///
@ -543,20 +589,22 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEthSpec, TEventHandler, THotStore, TColdStore>
BeaconChainBuilder< BeaconChainBuilder<
Witness< Witness<
TStore,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
CachingEth1Backend<TEthSpec, TStore>, CachingEth1Backend<TEthSpec>,
TEthSpec, TEthSpec,
TEventHandler, TEventHandler,
THotStore,
TColdStore,
>, >,
> >
where where
TStore: Store<TEthSpec> + 'static, THotStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec> + 'static, TColdStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
@ -572,12 +620,8 @@ where
.log .log
.as_ref() .as_ref()
.ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?; .ok_or_else(|| "dummy_eth1_backend requires a log".to_string())?;
let store = self
.store
.clone()
.ok_or_else(|| "dummy_eth1_backend requires a store.".to_string())?;
let backend = CachingEth1Backend::new(Eth1Config::default(), log.clone(), store); let backend = CachingEth1Backend::new(Eth1Config::default(), log.clone());
let mut eth1_chain = Eth1Chain::new(backend); let mut eth1_chain = Eth1Chain::new(backend);
eth1_chain.use_dummy_backend = true; eth1_chain.use_dummy_backend = true;
@ -588,14 +632,23 @@ where
} }
} }
impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
BeaconChainBuilder< BeaconChainBuilder<
Witness<TStore, TStoreMigrator, TestingSlotClock, TEth1Backend, TEthSpec, TEventHandler>, Witness<
TStoreMigrator,
TestingSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
> >
where where
TStore: Store<TEthSpec> + 'static, THotStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec> + 'static, TColdStore: ItemStore<TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
{ {
@ -620,22 +673,24 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
BeaconChainBuilder< BeaconChainBuilder<
Witness< Witness<
TStore,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
TEth1Backend, TEth1Backend,
TEthSpec, TEthSpec,
NullEventHandler<TEthSpec>, NullEventHandler<TEthSpec>,
THotStore,
TColdStore,
>, >,
> >
where where
TStore: Store<TEthSpec> + 'static, THotStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec> + 'static, TColdStore: ItemStore<TEthSpec> + 'static,
TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore> + 'static,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
{ {
/// Sets the `BeaconChain` event handler to `NullEventHandler`. /// Sets the `BeaconChain` event handler to `NullEventHandler`.
@ -665,12 +720,14 @@ fn genesis_block<T: EthSpec>(
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::migrate::{MemoryStore, NullMigrator}; use crate::migrate::NullMigrator;
use eth2_hashing::hash; use eth2_hashing::hash;
use genesis::{generate_deterministic_keypairs, interop_genesis_state}; use genesis::{generate_deterministic_keypairs, interop_genesis_state};
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use ssz::Encode; use ssz::Encode;
use std::time::Duration; use std::time::Duration;
use store::config::StoreConfig;
use store::{HotColdDB, MemoryStore};
use tempfile::tempdir; use tempfile::tempdir;
use types::{EthSpec, MinimalEthSpec, Slot}; use types::{EthSpec, MinimalEthSpec, Slot};
@ -687,7 +744,12 @@ mod test {
let genesis_time = 13_371_337; let genesis_time = 13_371_337;
let log = get_logger(); let log = get_logger();
let store = Arc::new(MemoryStore::open()); let store: HotColdDB<
MinimalEthSpec,
MemoryStore<MinimalEthSpec>,
MemoryStore<MinimalEthSpec>,
> = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone())
.unwrap();
let spec = MinimalEthSpec::default_spec(); let spec = MinimalEthSpec::default_spec();
let data_dir = tempdir().expect("should create temporary data_dir"); let data_dir = tempdir().expect("should create temporary data_dir");
@ -700,7 +762,7 @@ mod test {
let chain = BeaconChainBuilder::new(MinimalEthSpec) let chain = BeaconChainBuilder::new(MinimalEthSpec)
.logger(log.clone()) .logger(log.clone())
.store(store) .store(Arc::new(store))
.store_migrator(NullMigrator) .store_migrator(NullMigrator)
.data_dir(data_dir.path().to_path_buf()) .data_dir(data_dir.path().to_path_buf())
.genesis_state(genesis_state) .genesis_state(genesis_state)

View File

@ -10,8 +10,7 @@ use std::cmp::Ordering;
use std::collections::HashMap; use std::collections::HashMap;
use std::iter::DoubleEndedIterator; use std::iter::DoubleEndedIterator;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc; use store::{DBColumn, Error as StoreError, StoreItem};
use store::{DBColumn, Error as StoreError, Store, StoreItem};
use types::{ use types::{
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
DEPOSIT_TREE_DEPTH, DEPOSIT_TREE_DEPTH,
@ -75,24 +74,22 @@ impl StoreItem for SszEth1 {
} }
/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. /// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`.
pub struct Eth1Chain<T, E, S> pub struct Eth1Chain<T, E>
where where
T: Eth1ChainBackend<E, S>, T: Eth1ChainBackend<E>,
E: EthSpec, E: EthSpec,
S: Store<E>,
{ {
backend: T, backend: T,
/// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method /// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method
/// will be used instead. /// will be used instead.
pub use_dummy_backend: bool, pub use_dummy_backend: bool,
_phantom: PhantomData<(E, S)>, _phantom: PhantomData<E>,
} }
impl<T, E, S> Eth1Chain<T, E, S> impl<T, E> Eth1Chain<T, E>
where where
T: Eth1ChainBackend<E, S>, T: Eth1ChainBackend<E>,
E: EthSpec, E: EthSpec,
S: Store<E>,
{ {
pub fn new(backend: T) -> Self { pub fn new(backend: T) -> Self {
Self { Self {
@ -110,7 +107,7 @@ where
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Eth1Data, Error> { ) -> Result<Eth1Data, Error> {
if self.use_dummy_backend { if self.use_dummy_backend {
let dummy_backend: DummyEth1ChainBackend<E, S> = DummyEth1ChainBackend::default(); let dummy_backend: DummyEth1ChainBackend<E> = DummyEth1ChainBackend::default();
dummy_backend.eth1_data(state, spec) dummy_backend.eth1_data(state, spec)
} else { } else {
self.backend.eth1_data(state, spec) self.backend.eth1_data(state, spec)
@ -132,7 +129,7 @@ where
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Vec<Deposit>, Error> { ) -> Result<Vec<Deposit>, Error> {
if self.use_dummy_backend { if self.use_dummy_backend {
let dummy_backend: DummyEth1ChainBackend<E, S> = DummyEth1ChainBackend::default(); let dummy_backend: DummyEth1ChainBackend<E> = DummyEth1ChainBackend::default();
dummy_backend.queued_deposits(state, eth1_data_vote, spec) dummy_backend.queued_deposits(state, eth1_data_vote, spec)
} else { } else {
self.backend.queued_deposits(state, eth1_data_vote, spec) self.backend.queued_deposits(state, eth1_data_vote, spec)
@ -145,11 +142,10 @@ where
pub fn from_ssz_container( pub fn from_ssz_container(
ssz_container: &SszEth1, ssz_container: &SszEth1,
config: Eth1Config, config: Eth1Config,
store: Arc<S>,
log: &Logger, log: &Logger,
) -> Result<Self, String> { ) -> Result<Self, String> {
let backend = let backend =
Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, store, log.clone())?; Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, log.clone())?;
Ok(Self { Ok(Self {
use_dummy_backend: ssz_container.use_dummy_backend, use_dummy_backend: ssz_container.use_dummy_backend,
backend, backend,
@ -171,7 +167,7 @@ where
} }
} }
pub trait Eth1ChainBackend<T: EthSpec, S: Store<T>>: Sized + Send + Sync { pub trait Eth1ChainBackend<T: EthSpec>: Sized + Send + Sync {
/// Returns the `Eth1Data` that should be included in a block being produced for the given /// Returns the `Eth1Data` that should be included in a block being produced for the given
/// `state`. /// `state`.
fn eth1_data(&self, beacon_state: &BeaconState<T>, spec: &ChainSpec) fn eth1_data(&self, beacon_state: &BeaconState<T>, spec: &ChainSpec)
@ -195,12 +191,7 @@ pub trait Eth1ChainBackend<T: EthSpec, S: Store<T>>: Sized + Send + Sync {
fn as_bytes(&self) -> Vec<u8>; fn as_bytes(&self) -> Vec<u8>;
/// Create a `Eth1ChainBackend` instance given encoded bytes. /// Create a `Eth1ChainBackend` instance given encoded bytes.
fn from_bytes( fn from_bytes(bytes: &[u8], config: Eth1Config, log: Logger) -> Result<Self, String>;
bytes: &[u8],
config: Eth1Config,
store: Arc<S>,
log: Logger,
) -> Result<Self, String>;
} }
/// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data. /// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data.
@ -208,9 +199,9 @@ pub trait Eth1ChainBackend<T: EthSpec, S: Store<T>>: Sized + Send + Sync {
/// Never creates deposits, therefore the validator set is static. /// Never creates deposits, therefore the validator set is static.
/// ///
/// This was used in the 2019 Canada interop workshops. /// This was used in the 2019 Canada interop workshops.
pub struct DummyEth1ChainBackend<T: EthSpec, S: Store<T>>(PhantomData<(T, S)>); pub struct DummyEth1ChainBackend<T: EthSpec>(PhantomData<T>);
impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for DummyEth1ChainBackend<T, S> { impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
/// Produce some deterministic junk based upon the current epoch. /// Produce some deterministic junk based upon the current epoch.
fn eth1_data(&self, state: &BeaconState<T>, _spec: &ChainSpec) -> Result<Eth1Data, Error> { fn eth1_data(&self, state: &BeaconState<T>, _spec: &ChainSpec) -> Result<Eth1Data, Error> {
let current_epoch = state.current_epoch(); let current_epoch = state.current_epoch();
@ -243,17 +234,12 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for DummyEth1ChainBackend<T
} }
/// Create dummy eth1 backend. /// Create dummy eth1 backend.
fn from_bytes( fn from_bytes(_bytes: &[u8], _config: Eth1Config, _log: Logger) -> Result<Self, String> {
_bytes: &[u8],
_config: Eth1Config,
_store: Arc<S>,
_log: Logger,
) -> Result<Self, String> {
Ok(Self(PhantomData)) Ok(Self(PhantomData))
} }
} }
impl<T: EthSpec, S: Store<T>> Default for DummyEth1ChainBackend<T, S> { impl<T: EthSpec> Default for DummyEth1ChainBackend<T> {
fn default() -> Self { fn default() -> Self {
Self(PhantomData) Self(PhantomData)
} }
@ -265,21 +251,19 @@ impl<T: EthSpec, S: Store<T>> Default for DummyEth1ChainBackend<T, S> {
/// The `core` connects to some external eth1 client (e.g., Parity/Geth) and polls it for /// The `core` connects to some external eth1 client (e.g., Parity/Geth) and polls it for
/// information. /// information.
#[derive(Clone)] #[derive(Clone)]
pub struct CachingEth1Backend<T: EthSpec, S> { pub struct CachingEth1Backend<T: EthSpec> {
pub core: HttpService, pub core: HttpService,
store: Arc<S>,
log: Logger, log: Logger,
_phantom: PhantomData<T>, _phantom: PhantomData<T>,
} }
impl<T: EthSpec, S: Store<T>> CachingEth1Backend<T, S> { impl<T: EthSpec> CachingEth1Backend<T> {
/// Instantiates `self` with empty caches. /// Instantiates `self` with empty caches.
/// ///
/// Does not connect to the eth1 node or start any tasks to keep the cache updated. /// Does not connect to the eth1 node or start any tasks to keep the cache updated.
pub fn new(config: Eth1Config, log: Logger, store: Arc<S>) -> Self { pub fn new(config: Eth1Config, log: Logger) -> Self {
Self { Self {
core: HttpService::new(config, log.clone()), core: HttpService::new(config, log.clone()),
store,
log, log,
_phantom: PhantomData, _phantom: PhantomData,
} }
@ -291,17 +275,16 @@ impl<T: EthSpec, S: Store<T>> CachingEth1Backend<T, S> {
} }
/// Instantiates `self` from an existing service. /// Instantiates `self` from an existing service.
pub fn from_service(service: HttpService, store: Arc<S>) -> Self { pub fn from_service(service: HttpService) -> Self {
Self { Self {
log: service.log.clone(), log: service.log.clone(),
core: service, core: service,
store,
_phantom: PhantomData, _phantom: PhantomData,
} }
} }
} }
impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S> { impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> { fn eth1_data(&self, state: &BeaconState<T>, spec: &ChainSpec) -> Result<Eth1Data, Error> {
let period = T::SlotsPerEth1VotingPeriod::to_u64(); let period = T::SlotsPerEth1VotingPeriod::to_u64();
let voting_period_start_slot = (state.slot / period) * period; let voting_period_start_slot = (state.slot / period) * period;
@ -406,16 +389,10 @@ impl<T: EthSpec, S: Store<T>> Eth1ChainBackend<T, S> for CachingEth1Backend<T, S
} }
/// Recover the cached backend from encoded bytes. /// Recover the cached backend from encoded bytes.
fn from_bytes( fn from_bytes(bytes: &[u8], config: Eth1Config, log: Logger) -> Result<Self, String> {
bytes: &[u8],
config: Eth1Config,
store: Arc<S>,
log: Logger,
) -> Result<Self, String> {
let inner = HttpService::from_bytes(bytes, config, log.clone())?; let inner = HttpService::from_bytes(bytes, config, log.clone())?;
Ok(Self { Ok(Self {
core: inner, core: inner,
store,
log, log,
_phantom: PhantomData, _phantom: PhantomData,
}) })
@ -572,17 +549,15 @@ mod test {
mod eth1_chain_json_backend { mod eth1_chain_json_backend {
use super::*; use super::*;
use eth1::DepositLog; use eth1::DepositLog;
use store::MemoryStore;
use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder}; use types::test_utils::{generate_deterministic_keypair, TestingDepositBuilder};
fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E, MemoryStore<E>>, E, MemoryStore<E>> { fn get_eth1_chain() -> Eth1Chain<CachingEth1Backend<E>, E> {
let eth1_config = Eth1Config { let eth1_config = Eth1Config {
..Eth1Config::default() ..Eth1Config::default()
}; };
let log = null_logger().unwrap(); let log = null_logger().unwrap();
let store = Arc::new(MemoryStore::open()); Eth1Chain::new(CachingEth1Backend::new(eth1_config, log))
Eth1Chain::new(CachingEth1Backend::new(eth1_config, log, store))
} }
fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog {

View File

@ -9,14 +9,16 @@ use std::sync::Arc;
use std::thread; use std::thread;
use store::hot_cold_store::{process_finalization, HotColdDBError}; use store::hot_cold_store::{process_finalization, HotColdDBError};
use store::iter::{ParentRootBlockIterator, RootsIterator}; use store::iter::{ParentRootBlockIterator, RootsIterator};
use store::{Error, Store, StoreOp}; use store::{Error, ItemStore, StoreOp};
pub use store::{HotColdDB, MemoryStore}; pub use store::{HotColdDB, MemoryStore};
use types::*; use types::*;
use types::{BeaconState, EthSpec, Hash256, Slot}; use types::{BeaconState, EthSpec, Hash256, Slot};
/// Trait for migration processes that update the database upon finalization. /// Trait for migration processes that update the database upon finalization.
pub trait Migrate<E: EthSpec>: Send + Sync + 'static { pub trait Migrate<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>:
fn new(db: Arc<HotColdDB<E>>, log: Logger) -> Self; Send + Sync + 'static
{
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self;
fn process_finalization( fn process_finalization(
&self, &self,
@ -30,13 +32,13 @@ pub trait Migrate<E: EthSpec>: Send + Sync + 'static {
} }
/// Traverses live heads and prunes blocks and states of chains that we know can't be built /// Traverses live heads and prunes blocks and states of chains that we know can't be built
/// upon because finalization would prohibit it. This is a optimisation intended to save disk /// upon because finalization would prohibit it. This is an optimisation intended to save disk
/// space. /// space.
/// ///
/// Assumptions: /// Assumptions:
/// * It is called after every finalization. /// * It is called after every finalization.
fn prune_abandoned_forks( fn prune_abandoned_forks(
store: Arc<HotColdDB<E>>, store: Arc<HotColdDB<E, Hot, Cold>>,
head_tracker: Arc<HeadTracker>, head_tracker: Arc<HeadTracker>,
old_finalized_block_hash: SignedBeaconBlockHash, old_finalized_block_hash: SignedBeaconBlockHash,
new_finalized_block_hash: SignedBeaconBlockHash, new_finalized_block_hash: SignedBeaconBlockHash,
@ -171,8 +173,8 @@ pub trait Migrate<E: EthSpec>: Send + Sync + 'static {
/// Migrator that does nothing, for stores that don't need migration. /// Migrator that does nothing, for stores that don't need migration.
pub struct NullMigrator; pub struct NullMigrator;
impl<E: EthSpec> Migrate<E> for NullMigrator { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold> for NullMigrator {
fn new(_: Arc<HotColdDB<E>>, _: Logger) -> Self { fn new(_: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
NullMigrator NullMigrator
} }
} }
@ -180,12 +182,14 @@ impl<E: EthSpec> Migrate<E> for NullMigrator {
/// Migrator that immediately calls the store's migration function, blocking the current execution. /// Migrator that immediately calls the store's migration function, blocking the current execution.
/// ///
/// Mostly useful for tests. /// Mostly useful for tests.
pub struct BlockingMigrator<E: EthSpec> { pub struct BlockingMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
db: Arc<HotColdDB<E>>, db: Arc<HotColdDB<E, Hot, Cold>>,
} }
impl<E: EthSpec> Migrate<E> for BlockingMigrator<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
fn new(db: Arc<HotColdDB<E>>, _: Logger) -> Self { for BlockingMigrator<E, Hot, Cold>
{
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, _: Logger) -> Self {
BlockingMigrator { db } BlockingMigrator { db }
} }
@ -225,14 +229,16 @@ type MpscSender<E> = mpsc::Sender<(
)>; )>;
/// Migrator that runs a background thread to migrate state from the hot to the cold database. /// Migrator that runs a background thread to migrate state from the hot to the cold database.
pub struct BackgroundMigrator<E: EthSpec> { pub struct BackgroundMigrator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
db: Arc<HotColdDB<E>>, db: Arc<HotColdDB<E, Hot, Cold>>,
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>, tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
log: Logger, log: Logger,
} }
impl<E: EthSpec> Migrate<E> for BackgroundMigrator<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Migrate<E, Hot, Cold>
fn new(db: Arc<HotColdDB<E>>, log: Logger) -> Self { for BackgroundMigrator<E, Hot, Cold>
{
fn new(db: Arc<HotColdDB<E, Hot, Cold>>, log: Logger) -> Self {
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone())); let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
Self { db, tx_thread, log } Self { db, tx_thread, log }
} }
@ -283,7 +289,7 @@ impl<E: EthSpec> Migrate<E> for BackgroundMigrator<E> {
} }
} }
impl<E: EthSpec> BackgroundMigrator<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> BackgroundMigrator<E, Hot, Cold> {
/// Return true if a migration needs to be performed, given a new `finalized_slot`. /// Return true if a migration needs to be performed, given a new `finalized_slot`.
fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool { fn needs_migration(&self, finalized_slot: Slot, max_finality_distance: u64) -> bool {
let finality_distance = finalized_slot - self.db.get_split_slot(); let finality_distance = finalized_slot - self.db.get_split_slot();
@ -294,7 +300,7 @@ impl<E: EthSpec> BackgroundMigrator<E> {
/// ///
/// Return a channel handle for sending new finalized states to the thread. /// Return a channel handle for sending new finalized states to the thread.
fn spawn_thread( fn spawn_thread(
db: Arc<HotColdDB<E>>, db: Arc<HotColdDB<E, Hot, Cold>>,
log: Logger, log: Logger,
) -> ( ) -> (
mpsc::Sender<( mpsc::Sender<(

View File

@ -18,7 +18,7 @@ use std::borrow::Cow;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::{HotColdDB, MemoryStore, Store}; use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore};
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
@ -34,17 +34,19 @@ pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690;
// This parameter is required by a builder but not used because we use the `TestingSlotClock`. // This parameter is required by a builder but not used because we use the `TestingSlotClock`.
pub const HARNESS_SLOT_TIME: Duration = Duration::from_secs(1); pub const HARNESS_SLOT_TIME: Duration = Duration::from_secs(1);
pub type BaseHarnessType<TStore, TStoreMigrator, TEthSpec> = Witness< pub type BaseHarnessType<TStoreMigrator, TEthSpec, THotStore, TColdStore> = Witness<
TStore,
TStoreMigrator, TStoreMigrator,
TestingSlotClock, TestingSlotClock,
CachingEth1Backend<TEthSpec, TStore>, CachingEth1Backend<TEthSpec>,
TEthSpec, TEthSpec,
NullEventHandler<TEthSpec>, NullEventHandler<TEthSpec>,
THotStore,
TColdStore,
>; >;
pub type HarnessType<E> = BaseHarnessType<MemoryStore<E>, NullMigrator, E>; pub type HarnessType<E> = BaseHarnessType<NullMigrator, E, MemoryStore<E>, MemoryStore<E>>;
pub type DiskHarnessType<E> = BaseHarnessType<HotColdDB<E>, BlockingMigrator<E>, E>; pub type DiskHarnessType<E> =
BaseHarnessType<BlockingMigrator<E, LevelDB<E>, LevelDB<E>>, E, LevelDB<E>, LevelDB<E>>;
/// Indicates how the `BeaconChainHarness` should produce blocks. /// Indicates how the `BeaconChainHarness` should produce blocks.
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
@ -84,12 +86,12 @@ pub struct BeaconChainHarness<T: BeaconChainTypes> {
impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> { impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
/// Instantiate a new harness with `validator_count` initial validators. /// Instantiate a new harness with `validator_count` initial validators.
pub fn new(eth_spec_instance: E, keypairs: Vec<Keypair>) -> Self { pub fn new(eth_spec_instance: E, keypairs: Vec<Keypair>, config: StoreConfig) -> Self {
// Setting the target aggregators to really high means that _all_ validators in the // Setting the target aggregators to really high means that _all_ validators in the
// committee are required to produce an aggregate. This is overkill, however with small // committee are required to produce an aggregate. This is overkill, however with small
// validator counts it's the only way to be certain there is _at least one_ aggregator per // validator counts it's the only way to be certain there is _at least one_ aggregator per
// committee. // committee.
Self::new_with_target_aggregators(eth_spec_instance, keypairs, 1 << 32) Self::new_with_target_aggregators(eth_spec_instance, keypairs, 1 << 32, config)
} }
/// Instantiate a new harness with `validator_count` initial validators and a custom /// Instantiate a new harness with `validator_count` initial validators and a custom
@ -98,6 +100,7 @@ impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
eth_spec_instance: E, eth_spec_instance: E,
keypairs: Vec<Keypair>, keypairs: Vec<Keypair>,
target_aggregators_per_committee: u64, target_aggregators_per_committee: u64,
config: StoreConfig,
) -> Self { ) -> Self {
let data_dir = tempdir().expect("should create temporary data_dir"); let data_dir = tempdir().expect("should create temporary data_dir");
let mut spec = E::default_spec(); let mut spec = E::default_spec();
@ -105,11 +108,11 @@ impl<E: EthSpec> BeaconChainHarness<HarnessType<E>> {
spec.target_aggregators_per_committee = target_aggregators_per_committee; spec.target_aggregators_per_committee = target_aggregators_per_committee;
let log = NullLoggerBuilder.build().expect("logger should build"); let log = NullLoggerBuilder.build().expect("logger should build");
let store = HotColdDB::open_ephemeral(config, spec.clone(), log.clone()).unwrap();
let chain = BeaconChainBuilder::new(eth_spec_instance) let chain = BeaconChainBuilder::new(eth_spec_instance)
.logger(log.clone()) .logger(log)
.custom_spec(spec.clone()) .custom_spec(spec.clone())
.store(Arc::new(MemoryStore::open())) .store(Arc::new(store))
.store_migrator(NullMigrator) .store_migrator(NullMigrator)
.data_dir(data_dir.path().to_path_buf()) .data_dir(data_dir.path().to_path_buf())
.genesis_state( .genesis_state(
@ -140,7 +143,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
/// Instantiate a new harness with `validator_count` initial validators. /// Instantiate a new harness with `validator_count` initial validators.
pub fn new_with_disk_store( pub fn new_with_disk_store(
eth_spec_instance: E, eth_spec_instance: E,
store: Arc<HotColdDB<E>>, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
keypairs: Vec<Keypair>, keypairs: Vec<Keypair>,
) -> Self { ) -> Self {
let data_dir = tempdir().expect("should create temporary data_dir"); let data_dir = tempdir().expect("should create temporary data_dir");
@ -180,7 +183,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
/// Instantiate a new harness with `validator_count` initial validators. /// Instantiate a new harness with `validator_count` initial validators.
pub fn resume_from_disk_store( pub fn resume_from_disk_store(
eth_spec_instance: E, eth_spec_instance: E,
store: Arc<HotColdDB<E>>, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
keypairs: Vec<Keypair>, keypairs: Vec<Keypair>,
data_dir: TempDir, data_dir: TempDir,
) -> Self { ) -> Self {
@ -192,7 +195,10 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
.logger(log.clone()) .logger(log.clone())
.custom_spec(spec) .custom_spec(spec)
.store(store.clone()) .store(store.clone())
.store_migrator(<BlockingMigrator<_> as Migrate<E>>::new(store, log.clone())) .store_migrator(<BlockingMigrator<_, _, _> as Migrate<E, _, _>>::new(
store,
log.clone(),
))
.data_dir(data_dir.path().to_path_buf()) .data_dir(data_dir.path().to_path_buf())
.resume_from_db() .resume_from_db()
.expect("should resume beacon chain from db") .expect("should resume beacon chain from db")
@ -215,11 +221,12 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
} }
} }
impl<S, M, E> BeaconChainHarness<BaseHarnessType<S, M, E>> impl<M, E, Hot, Cold> BeaconChainHarness<BaseHarnessType<M, E, Hot, Cold>>
where where
S: Store<E>, M: Migrate<E, Hot, Cold>,
M: Migrate<E>,
E: EthSpec, E: EthSpec,
Hot: ItemStore<E>,
Cold: ItemStore<E>,
{ {
/// Advance the slot of the `BeaconChain`. /// Advance the slot of the `BeaconChain`.
/// ///

View File

@ -7,6 +7,7 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}, test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
StateSkipConfig, StateSkipConfig,
}; };
use store::config::StoreConfig;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot}; use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
@ -25,7 +26,11 @@ lazy_static! {
fn produces_attestations() { fn produces_attestations() {
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4; let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
let harness = BeaconChainHarness::new(MainnetEthSpec, KEYPAIRS[..].to_vec()); let harness = BeaconChainHarness::new(
MainnetEthSpec,
KEYPAIRS[..].to_vec(),
StoreConfig::default(),
);
// Skip past the genesis slot. // Skip past the genesis slot.
harness.advance_slot(); harness.advance_slot();

View File

@ -9,7 +9,7 @@ use beacon_chain::{
BeaconChain, BeaconChainTypes, BeaconChain, BeaconChainTypes,
}; };
use state_processing::per_slot_processing; use state_processing::per_slot_processing;
use store::Store; use store::config::StoreConfig;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, EthSpec, Hash256, test_utils::generate_deterministic_keypair, AggregateSignature, Attestation, EthSpec, Hash256,
@ -36,6 +36,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
// A kind-of arbitrary number that ensures that _some_ validators are aggregators, but // A kind-of arbitrary number that ensures that _some_ validators are aggregators, but
// not all. // not all.
4, 4,
StoreConfig::default(),
); );
harness.advance_slot(); harness.advance_slot();

View File

@ -7,6 +7,7 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType}, test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, HarnessType},
BeaconSnapshot, BlockError, BeaconSnapshot, BlockError,
}; };
use store::config::StoreConfig;
use types::{ use types::{
test_utils::generate_deterministic_keypair, AggregateSignature, AttestationData, test_utils::generate_deterministic_keypair, AggregateSignature, AttestationData,
AttesterSlashing, Checkpoint, Deposit, DepositData, Epoch, EthSpec, Hash256, AttesterSlashing, Checkpoint, Deposit, DepositData, Epoch, EthSpec, Hash256,
@ -47,7 +48,11 @@ fn get_chain_segment() -> Vec<BeaconSnapshot<E>> {
} }
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> { fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<E>> {
let harness = BeaconChainHarness::new(MainnetEthSpec, KEYPAIRS[0..validator_count].to_vec()); let harness = BeaconChainHarness::new(
MainnetEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
);
harness.advance_slot(); harness.advance_slot();

View File

@ -9,7 +9,7 @@ use beacon_chain::{
}; };
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use std::sync::Arc; use std::sync::Arc;
use store::{HotColdDB, StoreConfig}; use store::{HotColdDB, LevelDB, StoreConfig};
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
use types::{EthSpec, Keypair, MinimalEthSpec}; use types::{EthSpec, Keypair, MinimalEthSpec};
@ -23,7 +23,7 @@ lazy_static! {
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
} }
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E>> { fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
let spec = E::default_spec(); let spec = E::default_spec();
let hot_path = db_path.path().join("hot_db"); let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db"); let cold_path = db_path.path().join("cold_db");

View File

@ -16,7 +16,7 @@ use std::collections::HashSet;
use std::sync::Arc; use std::sync::Arc;
use store::{ use store::{
iter::{BlockRootsIterator, StateRootsIterator}, iter::{BlockRootsIterator, StateRootsIterator},
HotColdDB, Store, StoreConfig, HotColdDB, LevelDB, StoreConfig,
}; };
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
use tree_hash::TreeHash; use tree_hash::TreeHash;
@ -35,7 +35,7 @@ lazy_static! {
type E = MinimalEthSpec; type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>; type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E>> { fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
let spec = MinimalEthSpec::default_spec(); let spec = MinimalEthSpec::default_spec();
let hot_path = db_path.path().join("hot_db"); let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db"); let cold_path = db_path.path().join("cold_db");
@ -47,7 +47,10 @@ fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E>> {
) )
} }
fn get_harness(store: Arc<HotColdDB<E>>, validator_count: usize) -> TestHarness { fn get_harness(
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
validator_count: usize,
) -> TestHarness {
let harness = BeaconChainHarness::new_with_disk_store( let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec, MinimalEthSpec,
store, store,
@ -1310,7 +1313,7 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) {
} }
/// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. /// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch.
fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E>>) { fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) {
let split_slot = store.get_split_slot(); let split_slot = store.get_split_slot();
assert_eq!( assert_eq!(
harness harness
@ -1361,7 +1364,7 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let head = harness.chain.head().expect("should get head"); let head = harness.chain.head().expect("should get head");
let mut forward_block_roots = Store::forwards_block_roots_iterator( let mut forward_block_roots = HotColdDB::forwards_block_roots_iterator(
harness.chain.store.clone(), harness.chain.store.clone(),
Slot::new(0), Slot::new(0),
head.beacon_state, head.beacon_state,

View File

@ -13,7 +13,7 @@ use operation_pool::PersistedOperationPool;
use state_processing::{ use state_processing::{
per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError, per_slot_processing, per_slot_processing::Error as SlotProcessingError, EpochProcessingError,
}; };
use store::Store; use store::config::StoreConfig;
use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; use types::{BeaconStateError, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot};
// Should ideally be divisible by 3. // Should ideally be divisible by 3.
@ -25,7 +25,11 @@ lazy_static! {
} }
fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<MinimalEthSpec>> { fn get_harness(validator_count: usize) -> BeaconChainHarness<HarnessType<MinimalEthSpec>> {
let harness = BeaconChainHarness::new(MinimalEthSpec, KEYPAIRS[0..validator_count].to_vec()); let harness = BeaconChainHarness::new(
MinimalEthSpec,
KEYPAIRS[0..validator_count].to_vec(),
StoreConfig::default(),
);
harness.advance_slot(); harness.advance_slot();

View File

@ -5,9 +5,9 @@ use beacon_chain::events::TeeEventHandler;
use beacon_chain::{ use beacon_chain::{
builder::{BeaconChainBuilder, Witness}, builder::{BeaconChainBuilder, Witness},
eth1_chain::{CachingEth1Backend, Eth1Chain}, eth1_chain::{CachingEth1Backend, Eth1Chain},
migrate::{BackgroundMigrator, Migrate, NullMigrator}, migrate::{BackgroundMigrator, Migrate},
slot_clock::{SlotClock, SystemTimeSlotClock}, slot_clock::{SlotClock, SystemTimeSlotClock},
store::{HotColdDB, MemoryStore, Store, StoreConfig}, store::{HotColdDB, ItemStore, LevelDB, StoreConfig},
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler, BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
}; };
use bus::Bus; use bus::Bus;
@ -50,7 +50,7 @@ pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000;
/// `self.memory_store(..)` has been called. /// `self.memory_store(..)` has been called.
pub struct ClientBuilder<T: BeaconChainTypes> { pub struct ClientBuilder<T: BeaconChainTypes> {
slot_clock: Option<T::SlotClock>, slot_clock: Option<T::SlotClock>,
store: Option<Arc<T::Store>>, store: Option<Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>>,
store_migrator: Option<T::StoreMigrator>, store_migrator: Option<T::StoreMigrator>,
runtime_context: Option<RuntimeContext<T::EthSpec>>, runtime_context: Option<RuntimeContext<T::EthSpec>>,
chain_spec: Option<ChainSpec>, chain_spec: Option<ChainSpec>,
@ -65,17 +65,26 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
eth_spec_instance: T::EthSpec, eth_spec_instance: T::EthSpec,
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
ClientBuilder< ClientBuilder<
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>, Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
> >
where where
TStore: Store<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
TStoreMigrator: Migrate<TEthSpec>,
TSlotClock: SlotClock + Clone + 'static, TSlotClock: SlotClock + Clone + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
/// Instantiates a new, empty builder. /// Instantiates a new, empty builder.
/// ///
@ -350,8 +359,17 @@ where
/// If type inference errors are being raised, see the comment on the definition of `Self`. /// If type inference errors are being raised, see the comment on the definition of `Self`.
pub fn build( pub fn build(
self, self,
) -> Client<Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>> ) -> Client<
{ Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
> {
Client { Client {
beacon_chain: self.beacon_chain, beacon_chain: self.beacon_chain,
network_globals: self.network_globals, network_globals: self.network_globals,
@ -361,17 +379,26 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
ClientBuilder< ClientBuilder<
Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>, Witness<
TStoreMigrator,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
> >
where where
TStore: Store<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
TStoreMigrator: Migrate<TEthSpec>,
TSlotClock: SlotClock + Clone + 'static, TSlotClock: SlotClock + Clone + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
/// Consumes the internal `BeaconChainBuilder`, attaching the resulting `BeaconChain` to self. /// Consumes the internal `BeaconChainBuilder`, attaching the resulting `BeaconChain` to self.
pub fn build_beacon_chain(mut self) -> Result<Self, String> { pub fn build_beacon_chain(mut self) -> Result<Self, String> {
@ -401,23 +428,25 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
ClientBuilder< ClientBuilder<
Witness< Witness<
TStore,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
TEth1Backend, TEth1Backend,
TEthSpec, TEthSpec,
WebSocketSender<TEthSpec>, WebSocketSender<TEthSpec>,
THotStore,
TColdStore,
>, >,
> >
where where
TStore: Store<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
TStoreMigrator: Migrate<TEthSpec>,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
/// Specifies that the `BeaconChain` should publish events using the WebSocket server. /// Specifies that the `BeaconChain` should publish events using the WebSocket server.
pub fn websocket_event_handler(mut self, config: WebSocketConfig) -> Result<Self, String> { pub fn websocket_event_handler(mut self, config: WebSocketConfig) -> Result<Self, String> {
@ -442,23 +471,25 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, THotStore, TColdStore>
ClientBuilder< ClientBuilder<
Witness< Witness<
TStore,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
TEth1Backend, TEth1Backend,
TEthSpec, TEthSpec,
TeeEventHandler<TEthSpec>, TeeEventHandler<TEthSpec>,
THotStore,
TColdStore,
>, >,
> >
where where
TStore: Store<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
TStoreMigrator: Migrate<TEthSpec>,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
/// Specifies that the `BeaconChain` should publish events using the WebSocket server. /// Specifies that the `BeaconChain` should publish events using the WebSocket server.
pub fn tee_event_handler( pub fn tee_event_handler(
@ -490,18 +521,19 @@ where
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder< ClientBuilder<
Witness< Witness<
HotColdDB<TEthSpec>,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
TEth1Backend, TEth1Backend,
TEthSpec, TEthSpec,
TEventHandler, TEventHandler,
LevelDB<TEthSpec>,
LevelDB<TEthSpec>,
>, >,
> >
where where
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TStoreMigrator: Migrate<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, LevelDB<TEthSpec>, LevelDB<TEthSpec>> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, HotColdDB<TEthSpec>> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
{ {
@ -529,50 +561,25 @@ where
} }
} }
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler> impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
ClientBuilder< ClientBuilder<
Witness< Witness<
MemoryStore<TEthSpec>, BackgroundMigrator<TEthSpec, THotStore, TColdStore>,
NullMigrator,
TSlotClock, TSlotClock,
TEth1Backend, TEth1Backend,
TEthSpec, TEthSpec,
TEventHandler, TEventHandler,
THotStore,
TColdStore,
>, >,
> >
where where
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, MemoryStore<TEthSpec>> + 'static, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static,
{
/// Specifies that the `Client` should use a `MemoryStore` database.
///
/// Also sets the `store_migrator` to the `NullMigrator`, as that's the only viable choice.
pub fn memory_store(mut self) -> Self {
let store = MemoryStore::open();
self.store = Some(Arc::new(store));
self.store_migrator = Some(NullMigrator);
self
}
}
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
ClientBuilder<
Witness<
HotColdDB<TEthSpec>,
BackgroundMigrator<TEthSpec>,
TSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
>,
>
where
TSlotClock: SlotClock + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, HotColdDB<TEthSpec>> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
pub fn background_migrator(mut self) -> Result<Self, String> { pub fn background_migrator(mut self) -> Result<Self, String> {
let context = self let context = self
@ -588,23 +595,25 @@ where
} }
} }
impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler> impl<TStoreMigrator, TSlotClock, TEthSpec, TEventHandler, THotStore, TColdStore>
ClientBuilder< ClientBuilder<
Witness< Witness<
TStore,
TStoreMigrator, TStoreMigrator,
TSlotClock, TSlotClock,
CachingEth1Backend<TEthSpec, TStore>, CachingEth1Backend<TEthSpec>,
TEthSpec, TEthSpec,
TEventHandler, TEventHandler,
THotStore,
TColdStore,
>, >,
> >
where where
TStore: Store<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
TStoreMigrator: Migrate<TEthSpec>,
TSlotClock: SlotClock + 'static, TSlotClock: SlotClock + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
/// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node /// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node
/// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during /// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during
@ -618,10 +627,6 @@ where
let beacon_chain_builder = self let beacon_chain_builder = self
.beacon_chain_builder .beacon_chain_builder
.ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?; .ok_or_else(|| "caching_eth1_backend requires a beacon_chain_builder")?;
let store = self
.store
.clone()
.ok_or_else(|| "caching_eth1_backend requires a store".to_string())?;
let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { let backend = if let Some(eth1_service_from_genesis) = self.eth1_service {
eth1_service_from_genesis.update_config(config)?; eth1_service_from_genesis.update_config(config)?;
@ -636,7 +641,7 @@ where
// adding earlier blocks too. // adding earlier blocks too.
eth1_service_from_genesis.drop_block_cache(); eth1_service_from_genesis.drop_block_cache();
CachingEth1Backend::from_service(eth1_service_from_genesis, store) CachingEth1Backend::from_service(eth1_service_from_genesis)
} else { } else {
beacon_chain_builder beacon_chain_builder
.get_persisted_eth1_backend()? .get_persisted_eth1_backend()?
@ -644,18 +649,11 @@ where
Eth1Chain::from_ssz_container( Eth1Chain::from_ssz_container(
&persisted, &persisted,
config.clone(), config.clone(),
store.clone(),
&context.log().clone(), &context.log().clone(),
) )
.map(|chain| chain.into_backend()) .map(|chain| chain.into_backend())
}) })
.unwrap_or_else(|| { .unwrap_or_else(|| Ok(CachingEth1Backend::new(config, context.log().clone())))?
Ok(CachingEth1Backend::new(
config,
context.log().clone(),
store,
))
})?
}; };
self.eth1_service = None; self.eth1_service = None;
@ -699,16 +697,25 @@ where
} }
} }
impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler> impl<TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler, THotStore, TColdStore>
ClientBuilder< ClientBuilder<
Witness<TStore, TStoreMigrator, SystemTimeSlotClock, TEth1Backend, TEthSpec, TEventHandler>, Witness<
TStoreMigrator,
SystemTimeSlotClock,
TEth1Backend,
TEthSpec,
TEventHandler,
THotStore,
TColdStore,
>,
> >
where where
TStore: Store<TEthSpec> + 'static, TStoreMigrator: Migrate<TEthSpec, THotStore, TColdStore>,
TStoreMigrator: Migrate<TEthSpec>, TEth1Backend: Eth1ChainBackend<TEthSpec> + 'static,
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
TEthSpec: EthSpec + 'static, TEthSpec: EthSpec + 'static,
TEventHandler: EventHandler<TEthSpec> + 'static, TEventHandler: EventHandler<TEthSpec> + 'static,
THotStore: ItemStore<TEthSpec> + 'static,
TColdStore: ItemStore<TEthSpec> + 'static,
{ {
/// Specifies that the slot clock should read the time from the computers system clock. /// Specifies that the slot clock should read the time from the computers system clock.
pub fn system_time_slot_clock(mut self) -> Result<Self, String> { pub fn system_time_slot_clock(mut self) -> Result<Self, String> {

View File

@ -17,19 +17,21 @@ mod tests {
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use slot_clock::{SlotClock, SystemTimeSlotClock}; use slot_clock::{SlotClock, SystemTimeSlotClock};
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
use store::MemoryStore; use store::config::StoreConfig;
use store::{HotColdDB, MemoryStore};
use tempfile::tempdir; use tempfile::tempdir;
use types::{CommitteeIndex, EnrForkId, EthSpec, MinimalEthSpec}; use types::{CommitteeIndex, EnrForkId, EthSpec, MinimalEthSpec};
const SLOT_DURATION_MILLIS: u64 = 200; const SLOT_DURATION_MILLIS: u64 = 200;
type TestBeaconChainType = Witness< type TestBeaconChainType = Witness<
MemoryStore<MinimalEthSpec>,
NullMigrator, NullMigrator,
SystemTimeSlotClock, SystemTimeSlotClock,
CachingEth1Backend<MinimalEthSpec, MemoryStore<MinimalEthSpec>>, CachingEth1Backend<MinimalEthSpec>,
MinimalEthSpec, MinimalEthSpec,
NullEventHandler<MinimalEthSpec>, NullEventHandler<MinimalEthSpec>,
MemoryStore<MinimalEthSpec>,
MemoryStore<MinimalEthSpec>,
>; >;
pub struct TestBeaconChain { pub struct TestBeaconChain {
@ -44,11 +46,14 @@ mod tests {
let keypairs = generate_deterministic_keypairs(1); let keypairs = generate_deterministic_keypairs(1);
let log = get_logger(); let log = get_logger();
let store =
HotColdDB::open_ephemeral(StoreConfig::default(), spec.clone(), log.clone())
.unwrap();
let chain = Arc::new( let chain = Arc::new(
BeaconChainBuilder::new(MinimalEthSpec) BeaconChainBuilder::new(MinimalEthSpec)
.logger(log.clone()) .logger(log.clone())
.custom_spec(spec.clone()) .custom_spec(spec.clone())
.store(Arc::new(MemoryStore::open())) .store(Arc::new(store))
.store_migrator(NullMigrator) .store_migrator(NullMigrator)
.data_dir(data_dir.path().to_path_buf()) .data_dir(data_dir.path().to_path_buf())
.genesis_state( .genesis_state(
@ -85,7 +90,7 @@ mod tests {
} }
lazy_static! { lazy_static! {
static ref CHAIN: TestBeaconChain = { TestBeaconChain::new_with_system_clock() }; static ref CHAIN: TestBeaconChain = TestBeaconChain::new_with_system_clock();
} }
fn get_attestation_service() -> AttestationService<TestBeaconChainType> { fn get_attestation_service() -> AttestationService<TestBeaconChainType> {

View File

@ -1,13 +1,15 @@
use eth2_libp2p::Enr; use eth2_libp2p::Enr;
use rlp; use rlp;
use std::sync::Arc; use std::sync::Arc;
use store::{DBColumn, Error as StoreError, Store, StoreItem}; use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem};
use types::{EthSpec, Hash256}; use types::{EthSpec, Hash256};
/// 32-byte key for accessing the `DhtEnrs`. /// 32-byte key for accessing the `DhtEnrs`.
pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE"; pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE";
pub fn load_dht<T: Store<E>, E: EthSpec>(store: Arc<T>) -> Vec<Enr> { pub fn load_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: Arc<HotColdDB<E, Hot, Cold>>,
) -> Vec<Enr> {
// Load DHT from store // Load DHT from store
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
match store.get_item(&key) { match store.get_item(&key) {
@ -20,8 +22,8 @@ pub fn load_dht<T: Store<E>, E: EthSpec>(store: Arc<T>) -> Vec<Enr> {
} }
/// Attempt to persist the ENR's in the DHT to `self.store`. /// Attempt to persist the ENR's in the DHT to `self.store`.
pub fn persist_dht<T: Store<E>, E: EthSpec>( pub fn persist_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: Arc<T>, store: Arc<HotColdDB<E, Hot, Cold>>,
enrs: Vec<Enr>, enrs: Vec<Enr>,
) -> Result<(), store::Error> { ) -> Result<(), store::Error> {
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
@ -56,14 +58,19 @@ impl StoreItem for PersistedDht {
mod tests { mod tests {
use super::*; use super::*;
use eth2_libp2p::Enr; use eth2_libp2p::Enr;
use sloggers::{null::NullLoggerBuilder, Build};
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use store::config::StoreConfig;
use store::{MemoryStore, Store}; use store::{HotColdDB, MemoryStore};
use types::Hash256; use types::{ChainSpec, Hash256, MinimalEthSpec};
use types::MinimalEthSpec;
#[test] #[test]
fn test_persisted_dht() { fn test_persisted_dht() {
let store = Arc::new(MemoryStore::<MinimalEthSpec>::open()); let log = NullLoggerBuilder.build().unwrap();
let store: HotColdDB<
MinimalEthSpec,
MemoryStore<MinimalEthSpec>,
MemoryStore<MinimalEthSpec>,
> = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap();
let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()];
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
store store

View File

@ -13,7 +13,6 @@ use itertools::process_results;
use slog::{debug, error, o, trace, warn}; use slog::{debug, error, o, trace, warn};
use ssz::Encode; use ssz::Encode;
use std::sync::Arc; use std::sync::Arc;
use store::Store;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use types::{ use types::{
Attestation, ChainSpec, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock, Attestation, ChainSpec, Epoch, EthSpec, Hash256, SignedAggregateAndProof, SignedBeaconBlock,

View File

@ -17,6 +17,7 @@ use rest_types::ValidatorSubscription;
use slog::{debug, error, info, o, trace}; use slog::{debug, error, info, o, trace};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::HotColdDB;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::time::Delay; use tokio::time::Delay;
use types::EthSpec; use types::EthSpec;
@ -40,7 +41,7 @@ pub struct NetworkService<T: BeaconChainTypes> {
/// lighthouse. /// lighthouse.
router_send: mpsc::UnboundedSender<RouterMessage<T::EthSpec>>, router_send: mpsc::UnboundedSender<RouterMessage<T::EthSpec>>,
/// A reference to lighthouse's database to persist the DHT. /// A reference to lighthouse's database to persist the DHT.
store: Arc<T::Store>, store: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
/// A collection of global variables, accessible outside of the network service. /// A collection of global variables, accessible outside of the network service.
network_globals: Arc<NetworkGlobals<T::EthSpec>>, network_globals: Arc<NetworkGlobals<T::EthSpec>>,
/// A delay that expires when a new fork takes place. /// A delay that expires when a new fork takes place.
@ -78,7 +79,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
let (network_globals, mut libp2p) = let (network_globals, mut libp2p) =
LibP2PService::new(executor.clone(), config, enr_fork_id, &network_log)?; LibP2PService::new(executor.clone(), config, enr_fork_id, &network_log)?;
for enr in load_dht::<T::Store, T::EthSpec>(store.clone()) { for enr in load_dht::<T::EthSpec, T::HotStore, T::ColdStore>(store.clone()) {
libp2p.swarm.add_enr(enr); libp2p.swarm.add_enr(enr);
} }
@ -142,7 +143,7 @@ fn spawn_service<T: BeaconChainTypes>(
"Number of peers" => format!("{}", enrs.len()), "Number of peers" => format!("{}", enrs.len()),
); );
match persist_dht::<T::Store, T::EthSpec>(service.store.clone(), enrs) { match persist_dht::<T::EthSpec, T::HotStore, T::ColdStore>(service.store.clone(), enrs) {
Err(e) => error!( Err(e) => error!(
service.log, service.log,
"Failed to persist DHT on drop"; "Failed to persist DHT on drop";

View File

@ -9,6 +9,7 @@ mod tests {
use sloggers::{null::NullLoggerBuilder, Build}; use sloggers::{null::NullLoggerBuilder, Build};
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use store::config::StoreConfig;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec}; use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec};
@ -22,7 +23,12 @@ mod tests {
let log = get_logger(); let log = get_logger();
let beacon_chain = Arc::new( let beacon_chain = Arc::new(
BeaconChainHarness::new(MinimalEthSpec, generate_deterministic_keypairs(8)).chain, BeaconChainHarness::new(
MinimalEthSpec,
generate_deterministic_keypairs(8),
StoreConfig::default(),
)
.chain,
); );
let store = beacon_chain.store.clone(); let store = beacon_chain.store.clone();

View File

@ -13,7 +13,6 @@ use rest_types::{
}; };
use std::io::Write; use std::io::Write;
use std::sync::Arc; use std::sync::Arc;
use store::Store;
use slog::{error, Logger}; use slog::{error, Logger};
use types::{ use types::{

View File

@ -8,7 +8,7 @@ use hyper::{Body, Request};
use itertools::process_results; use itertools::process_results;
use network::NetworkMessage; use network::NetworkMessage;
use ssz::Decode; use ssz::Decode;
use store::{iter::AncestorIter, Store}; use store::iter::AncestorIter;
use types::{ use types::{
BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedBeaconBlock, Slot, BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedBeaconBlock, Slot,
}; };

View File

@ -11,7 +11,8 @@ pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir};
pub use eth2_config::Eth2Config; pub use eth2_config::Eth2Config;
use beacon_chain::events::TeeEventHandler; use beacon_chain::events::TeeEventHandler;
use beacon_chain::migrate::{BackgroundMigrator, HotColdDB}; use beacon_chain::migrate::BackgroundMigrator;
use beacon_chain::store::LevelDB;
use beacon_chain::{ use beacon_chain::{
builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock,
}; };
@ -25,12 +26,13 @@ use types::EthSpec;
/// A type-alias to the tighten the definition of a production-intended `Client`. /// A type-alias to the tighten the definition of a production-intended `Client`.
pub type ProductionClient<E> = Client< pub type ProductionClient<E> = Client<
Witness< Witness<
HotColdDB<E>, BackgroundMigrator<E, LevelDB<E>, LevelDB<E>>,
BackgroundMigrator<E>,
SystemTimeSlotClock, SystemTimeSlotClock,
CachingEth1Backend<E, HotColdDB<E>>, CachingEth1Backend<E>,
E, E,
TeeEventHandler<E>, TeeEventHandler<E>,
LevelDB<E>,
LevelDB<E>,
>, >,
>; >;

View File

@ -29,3 +29,4 @@ serde_derive = "1.0.110"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
lru = "0.5.1" lru = "0.5.1"
sloggers = "1.0.0"

View File

@ -1,5 +1,5 @@
use crate::chunked_vector::{chunk_key, Chunk, Field}; use crate::chunked_vector::{chunk_key, Chunk, Field};
use crate::HotColdDB; use crate::{HotColdDB, ItemStore};
use slog::error; use slog::error;
use std::sync::Arc; use std::sync::Arc;
use types::{ChainSpec, EthSpec, Slot}; use types::{ChainSpec, EthSpec, Slot};
@ -7,22 +7,26 @@ use types::{ChainSpec, EthSpec, Slot};
/// Iterator over the values of a `BeaconState` vector field (like `block_roots`). /// Iterator over the values of a `BeaconState` vector field (like `block_roots`).
/// ///
/// Uses the freezer DB's separate table to load the values. /// Uses the freezer DB's separate table to load the values.
pub struct ChunkedVectorIter<F, E> pub struct ChunkedVectorIter<F, E, Hot, Cold>
where where
F: Field<E>, F: Field<E>,
E: EthSpec, E: EthSpec,
Hot: ItemStore<E>,
Cold: ItemStore<E>,
{ {
pub(crate) store: Arc<HotColdDB<E>>, pub(crate) store: Arc<HotColdDB<E, Hot, Cold>>,
current_vindex: usize, current_vindex: usize,
pub(crate) end_vindex: usize, pub(crate) end_vindex: usize,
next_cindex: usize, next_cindex: usize,
current_chunk: Chunk<F::Value>, current_chunk: Chunk<F::Value>,
} }
impl<F, E> ChunkedVectorIter<F, E> impl<F, E, Hot, Cold> ChunkedVectorIter<F, E, Hot, Cold>
where where
F: Field<E>, F: Field<E>,
E: EthSpec, E: EthSpec,
Hot: ItemStore<E>,
Cold: ItemStore<E>,
{ {
/// Create a new iterator which can yield elements from `start_vindex` up to the last /// Create a new iterator which can yield elements from `start_vindex` up to the last
/// index stored by the restore point at `last_restore_point_slot`. /// index stored by the restore point at `last_restore_point_slot`.
@ -31,7 +35,7 @@ where
/// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can /// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can
/// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`).
pub fn new( pub fn new(
store: Arc<HotColdDB<E>>, store: Arc<HotColdDB<E, Hot, Cold>>,
start_vindex: usize, start_vindex: usize,
last_restore_point_slot: Slot, last_restore_point_slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
@ -53,10 +57,12 @@ where
} }
} }
impl<F, E> Iterator for ChunkedVectorIter<F, E> impl<F, E, Hot, Cold> Iterator for ChunkedVectorIter<F, E, Hot, Cold>
where where
F: Field<E>, F: Field<E>,
E: EthSpec, E: EthSpec,
Hot: ItemStore<E>,
Cold: ItemStore<E>,
{ {
type Item = (usize, F::Value); type Item = (usize, F::Value);

View File

@ -2,14 +2,14 @@ use crate::chunked_iter::ChunkedVectorIter;
use crate::chunked_vector::BlockRoots; use crate::chunked_vector::BlockRoots;
use crate::errors::{Error, Result}; use crate::errors::{Error, Result};
use crate::iter::BlockRootsIterator; use crate::iter::BlockRootsIterator;
use crate::{HotColdDB, Store}; use crate::{HotColdDB, ItemStore};
use itertools::process_results; use itertools::process_results;
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot};
/// Forwards block roots iterator that makes use of the `block_roots` table in the freezer DB. /// Forwards block roots iterator that makes use of the `block_roots` table in the freezer DB.
pub struct FrozenForwardsBlockRootsIterator<E: EthSpec> { pub struct FrozenForwardsBlockRootsIterator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
inner: ChunkedVectorIter<BlockRoots, E>, inner: ChunkedVectorIter<BlockRoots, E, Hot, Cold>,
} }
/// Forwards block roots iterator that reverses a backwards iterator (only good for short ranges). /// Forwards block roots iterator that reverses a backwards iterator (only good for short ranges).
@ -19,9 +19,9 @@ pub struct SimpleForwardsBlockRootsIterator {
} }
/// Fusion of the above two approaches to forwards iteration. Fast and efficient. /// Fusion of the above two approaches to forwards iteration. Fast and efficient.
pub enum HybridForwardsBlockRootsIterator<E: EthSpec> { pub enum HybridForwardsBlockRootsIterator<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
PreFinalization { PreFinalization {
iter: Box<FrozenForwardsBlockRootsIterator<E>>, iter: Box<FrozenForwardsBlockRootsIterator<E, Hot, Cold>>,
/// Data required by the `PostFinalization` iterator when we get to it. /// Data required by the `PostFinalization` iterator when we get to it.
continuation_data: Box<Option<(BeaconState<E>, Hash256)>>, continuation_data: Box<Option<(BeaconState<E>, Hash256)>>,
}, },
@ -30,9 +30,11 @@ pub enum HybridForwardsBlockRootsIterator<E: EthSpec> {
}, },
} }
impl<E: EthSpec> FrozenForwardsBlockRootsIterator<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
FrozenForwardsBlockRootsIterator<E, Hot, Cold>
{
pub fn new( pub fn new(
store: Arc<HotColdDB<E>>, store: Arc<HotColdDB<E, Hot, Cold>>,
start_slot: Slot, start_slot: Slot,
last_restore_point_slot: Slot, last_restore_point_slot: Slot,
spec: &ChainSpec, spec: &ChainSpec,
@ -48,7 +50,9 @@ impl<E: EthSpec> FrozenForwardsBlockRootsIterator<E> {
} }
} }
impl<E: EthSpec> Iterator for FrozenForwardsBlockRootsIterator<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Iterator
for FrozenForwardsBlockRootsIterator<E, Hot, Cold>
{
type Item = (Hash256, Slot); type Item = (Hash256, Slot);
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -59,8 +63,8 @@ impl<E: EthSpec> Iterator for FrozenForwardsBlockRootsIterator<E> {
} }
impl SimpleForwardsBlockRootsIterator { impl SimpleForwardsBlockRootsIterator {
pub fn new<S: Store<E>, E: EthSpec>( pub fn new<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: Arc<S>, store: Arc<HotColdDB<E, Hot, Cold>>,
start_slot: Slot, start_slot: Slot,
end_state: BeaconState<E>, end_state: BeaconState<E>,
end_block_root: Hash256, end_block_root: Hash256,
@ -87,9 +91,11 @@ impl Iterator for SimpleForwardsBlockRootsIterator {
} }
} }
impl<E: EthSpec> HybridForwardsBlockRootsIterator<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
HybridForwardsBlockRootsIterator<E, Hot, Cold>
{
pub fn new( pub fn new(
store: Arc<HotColdDB<E>>, store: Arc<HotColdDB<E, Hot, Cold>>,
start_slot: Slot, start_slot: Slot,
end_state: BeaconState<E>, end_state: BeaconState<E>,
end_block_root: Hash256, end_block_root: Hash256,
@ -157,7 +163,9 @@ impl<E: EthSpec> HybridForwardsBlockRootsIterator<E> {
} }
} }
impl<E: EthSpec> Iterator for HybridForwardsBlockRootsIterator<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Iterator
for HybridForwardsBlockRootsIterator<E, Hot, Cold>
{
type Item = Result<(Hash256, Slot)>; type Item = Result<(Hash256, Slot)>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {

View File

@ -5,10 +5,12 @@ use crate::config::StoreConfig;
use crate::forwards_iter::HybridForwardsBlockRootsIterator; use crate::forwards_iter::HybridForwardsBlockRootsIterator;
use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::impls::beacon_state::{get_full_state, store_full_state};
use crate::iter::{ParentRootBlockIterator, StateRootsIterator}; use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
use crate::leveldb_store::LevelDB;
use crate::memory_store::MemoryStore;
use crate::metrics; use crate::metrics;
use crate::{ use crate::{
leveldb_store::LevelDB, DBColumn, Error, ItemStore, KeyValueStore, PartialBeaconState, Store, get_key_for_col, DBColumn, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem,
StoreItem, StoreOp, StoreOp,
}; };
use lru::LruCache; use lru::LruCache;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
@ -32,7 +34,7 @@ pub const SPLIT_DB_KEY: &str = "FREEZERDBSPLITFREEZERDBSPLITFREE";
/// ///
/// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores /// Stores vector fields like the `block_roots` and `state_roots` separately, and only stores
/// intermittent "restore point" states pre-finalization. /// intermittent "restore point" states pre-finalization.
pub struct HotColdDB<E: EthSpec> { pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
/// The slot and state root at the point where the database is split between hot and cold. /// The slot and state root at the point where the database is split between hot and cold.
/// ///
/// States with slots less than `split.slot` are in the cold DB, while states with slots /// States with slots less than `split.slot` are in the cold DB, while states with slots
@ -40,11 +42,11 @@ pub struct HotColdDB<E: EthSpec> {
split: RwLock<Split>, split: RwLock<Split>,
config: StoreConfig, config: StoreConfig,
/// Cold database containing compact historical data. /// Cold database containing compact historical data.
pub(crate) cold_db: LevelDB<E>, pub(crate) cold_db: Cold,
/// Hot database containing duplicated but quick-to-access recent data. /// Hot database containing duplicated but quick-to-access recent data.
/// ///
/// The hot database also contains all blocks. /// The hot database also contains all blocks.
pub(crate) hot_db: LevelDB<E>, pub(crate) hot_db: Hot,
/// LRU cache of deserialized blocks. Updated whenever a block is loaded. /// LRU cache of deserialized blocks. Updated whenever a block is loaded.
block_cache: Mutex<LruCache<Hash256, SignedBeaconBlock<E>>>, block_cache: Mutex<LruCache<Hash256, SignedBeaconBlock<E>>>,
/// Chain spec. /// Chain spec.
@ -84,11 +86,13 @@ pub enum HotColdDBError {
RestorePointBlockHashError(BeaconStateError), RestorePointBlockHashError(BeaconStateError),
} }
impl<E: EthSpec> Store<E> for HotColdDB<E> { impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold> {
type ForwardsBlockRootsIterator = HybridForwardsBlockRootsIterator<E>;
/// Store a block and update the LRU cache. /// Store a block and update the LRU cache.
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error> { pub fn put_block(
&self,
block_root: &Hash256,
block: SignedBeaconBlock<E>,
) -> Result<(), Error> {
// Store on disk. // Store on disk.
self.hot_db.put(block_root, &block)?; self.hot_db.put(block_root, &block)?;
@ -99,7 +103,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
} }
/// Fetch a block from the store. /// Fetch a block from the store.
fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error> { pub fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error> {
metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT); metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT);
// Check the cache. // Check the cache.
@ -120,12 +124,12 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
} }
/// Delete a block from the store and the block cache. /// Delete a block from the store and the block cache.
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { pub fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
self.block_cache.lock().pop(block_root); self.block_cache.lock().pop(block_root);
self.hot_db.delete::<SignedBeaconBlock<E>>(block_root) self.hot_db.delete::<SignedBeaconBlock<E>>(block_root)
} }
fn put_state_summary( pub fn put_state_summary(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
summary: HotStateSummary, summary: HotStateSummary,
@ -134,7 +138,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
} }
/// Store a state in the store. /// Store a state in the store.
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> { pub fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
if state.slot < self.get_split_slot() { if state.slot < self.get_split_slot() {
self.store_cold_state(state_root, &state) self.store_cold_state(state_root, &state)
} else { } else {
@ -143,7 +147,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
} }
/// Fetch a state from the store. /// Fetch a state from the store.
fn get_state( pub fn get_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
slot: Option<Slot>, slot: Option<Slot>,
@ -170,7 +174,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
/// than the split point. You shouldn't delete states from the finalized portion of the chain /// than the split point. You shouldn't delete states from the finalized portion of the chain
/// (which are frozen, and won't be deleted), or valid descendents of the finalized checkpoint /// (which are frozen, and won't be deleted), or valid descendents of the finalized checkpoint
/// (which will be deleted by this function but shouldn't be). /// (which will be deleted by this function but shouldn't be).
fn delete_state(&self, state_root: &Hash256, slot: Slot) -> Result<(), Error> { pub fn delete_state(&self, state_root: &Hash256, slot: Slot) -> Result<(), Error> {
// Delete the state summary. // Delete the state summary.
self.hot_db self.hot_db
.key_delete(DBColumn::BeaconStateSummary.into(), state_root.as_bytes())?; .key_delete(DBColumn::BeaconStateSummary.into(), state_root.as_bytes())?;
@ -184,20 +188,20 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
Ok(()) Ok(())
} }
fn forwards_block_roots_iterator( pub fn forwards_block_roots_iterator(
store: Arc<Self>, store: Arc<Self>,
start_slot: Slot, start_slot: Slot,
end_state: BeaconState<E>, end_state: BeaconState<E>,
end_block_root: Hash256, end_block_root: Hash256,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Self::ForwardsBlockRootsIterator, Error> { ) -> Result<impl Iterator<Item = Result<(Hash256, Slot), Error>>, Error> {
HybridForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root, spec) HybridForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root, spec)
} }
/// Load an epoch boundary state by using the hot state summary look-up. /// Load an epoch boundary state by using the hot state summary look-up.
/// ///
/// Will fall back to the cold DB if a hot state summary is not found. /// Will fall back to the cold DB if a hot state summary is not found.
fn load_epoch_boundary_state( pub fn load_epoch_boundary_state(
&self, &self,
state_root: &Hash256, state_root: &Hash256,
) -> Result<Option<BeaconState<E>>, Error> { ) -> Result<Option<BeaconState<E>>, Error> {
@ -226,21 +230,49 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
} }
} }
fn put_item<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> { pub fn put_item<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> {
self.hot_db.put(key, item) self.hot_db.put(key, item)
} }
fn get_item<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> { pub fn get_item<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> {
self.hot_db.get(key) self.hot_db.get(key)
} }
fn item_exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error> { pub fn item_exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error> {
self.hot_db.exists::<I>(key) self.hot_db.exists::<I>(key)
} }
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> { pub fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> {
let mut guard = self.block_cache.lock(); let mut guard = self.block_cache.lock();
self.hot_db.do_atomically(batch)?;
let mut key_value_batch: Vec<KeyValueStoreOp> = Vec::with_capacity(batch.len());
for op in batch {
match op {
StoreOp::DeleteBlock(block_hash) => {
let untyped_hash: Hash256 = (*block_hash).into();
let key =
get_key_for_col(DBColumn::BeaconBlock.into(), untyped_hash.as_bytes());
key_value_batch.push(KeyValueStoreOp::DeleteKey(key));
}
StoreOp::DeleteState(state_hash, slot) => {
let untyped_hash: Hash256 = (*state_hash).into();
let state_summary_key = get_key_for_col(
DBColumn::BeaconStateSummary.into(),
untyped_hash.as_bytes(),
);
key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key));
if *slot % E::slots_per_epoch() == 0 {
let state_key =
get_key_for_col(DBColumn::BeaconState.into(), untyped_hash.as_bytes());
key_value_batch.push(KeyValueStoreOp::DeleteKey(state_key));
}
}
}
}
self.hot_db.do_atomically(&key_value_batch)?;
for op in batch { for op in batch {
match op { match op {
StoreOp::DeleteBlock(block_hash) => { StoreOp::DeleteBlock(block_hash) => {
@ -254,7 +286,30 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
} }
} }
impl<E: EthSpec> HotColdDB<E> { impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
pub fn open_ephemeral(
config: StoreConfig,
spec: ChainSpec,
log: Logger,
) -> Result<HotColdDB<E, MemoryStore<E>, MemoryStore<E>>, Error> {
Self::verify_slots_per_restore_point(config.slots_per_restore_point)?;
let db = HotColdDB {
split: RwLock::new(Split::default()),
cold_db: MemoryStore::open(),
hot_db: MemoryStore::open(),
block_cache: Mutex::new(LruCache::new(config.block_cache_size)),
config,
spec,
log,
_phantom: PhantomData,
};
Ok(db)
}
}
impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
/// Open a new or existing database, with the given paths to the hot and cold DBs. /// Open a new or existing database, with the given paths to the hot and cold DBs.
/// ///
/// The `slots_per_restore_point` parameter must be a divisor of `SLOTS_PER_HISTORICAL_ROOT`. /// The `slots_per_restore_point` parameter must be a divisor of `SLOTS_PER_HISTORICAL_ROOT`.
@ -264,7 +319,7 @@ impl<E: EthSpec> HotColdDB<E> {
config: StoreConfig, config: StoreConfig,
spec: ChainSpec, spec: ChainSpec,
log: Logger, log: Logger,
) -> Result<Self, Error> { ) -> Result<HotColdDB<E, LevelDB<E>, LevelDB<E>>, Error> {
Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; Self::verify_slots_per_restore_point(config.slots_per_restore_point)?;
let db = HotColdDB { let db = HotColdDB {
@ -285,7 +340,9 @@ impl<E: EthSpec> HotColdDB<E> {
} }
Ok(db) Ok(db)
} }
}
impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold> {
/// Store a post-finalization state efficiently in the hot database. /// Store a post-finalization state efficiently in the hot database.
/// ///
/// On an epoch boundary, store a full state. On an intermediate slot, store /// On an epoch boundary, store a full state. On an intermediate slot, store
@ -675,8 +732,8 @@ impl<E: EthSpec> HotColdDB<E> {
} }
/// Advance the split point of the store, moving new finalized states to the freezer. /// Advance the split point of the store, moving new finalized states to the freezer.
pub fn process_finalization<E: EthSpec>( pub fn process_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: Arc<HotColdDB<E>>, store: Arc<HotColdDB<E, Hot, Cold>>,
frozen_head_root: Hash256, frozen_head_root: Hash256,
frozen_head: &BeaconState<E>, frozen_head: &BeaconState<E>,
) -> Result<(), Error> { ) -> Result<(), Error> {

View File

@ -1,4 +1,4 @@
use crate::{Error, Store}; use crate::{Error, HotColdDB, ItemStore};
use std::borrow::Cow; use std::borrow::Cow;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc; use std::sync::Arc;
@ -12,17 +12,20 @@ use types::{
/// ///
/// It is assumed that all ancestors for this object are stored in the database. If this is not the /// It is assumed that all ancestors for this object are stored in the database. If this is not the
/// case, the iterator will start returning `None` prior to genesis. /// case, the iterator will start returning `None` prior to genesis.
pub trait AncestorIter<U: Store<E>, E: EthSpec, I: Iterator> { pub trait AncestorIter<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>, I: Iterator> {
/// Returns an iterator over the roots of the ancestors of `self`. /// Returns an iterator over the roots of the ancestors of `self`.
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<I>; fn try_iter_ancestor_roots(&self, store: Arc<HotColdDB<E, Hot, Cold>>) -> Option<I>;
} }
impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, BlockRootsIterator<'a, E, U>> impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
for SignedBeaconBlock<E> AncestorIter<E, Hot, Cold, BlockRootsIterator<'a, E, Hot, Cold>> for SignedBeaconBlock<E>
{ {
/// Iterates across all available prior block roots of `self`, starting at the most recent and ending /// Iterates across all available prior block roots of `self`, starting at the most recent and ending
/// at genesis. /// at genesis.
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<BlockRootsIterator<'a, E, U>> { fn try_iter_ancestor_roots(
&self,
store: Arc<HotColdDB<E, Hot, Cold>>,
) -> Option<BlockRootsIterator<'a, E, Hot, Cold>> {
let state = store let state = store
.get_state(&self.message.state_root, Some(self.message.slot)) .get_state(&self.message.state_root, Some(self.message.slot))
.ok()??; .ok()??;
@ -31,22 +34,27 @@ impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, BlockRootsIterator<'a, E, U
} }
} }
impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, StateRootsIterator<'a, E, U>> impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
for BeaconState<E> AncestorIter<E, Hot, Cold, StateRootsIterator<'a, E, Hot, Cold>> for BeaconState<E>
{ {
/// Iterates across all available prior state roots of `self`, starting at the most recent and ending /// Iterates across all available prior state roots of `self`, starting at the most recent and ending
/// at genesis. /// at genesis.
fn try_iter_ancestor_roots(&self, store: Arc<U>) -> Option<StateRootsIterator<'a, E, U>> { fn try_iter_ancestor_roots(
&self,
store: Arc<HotColdDB<E, Hot, Cold>>,
) -> Option<StateRootsIterator<'a, E, Hot, Cold>> {
// The `self.clone()` here is wasteful. // The `self.clone()` here is wasteful.
Some(StateRootsIterator::owned(store, self.clone())) Some(StateRootsIterator::owned(store, self.clone()))
} }
} }
pub struct StateRootsIterator<'a, T: EthSpec, U: Store<T>> { pub struct StateRootsIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
inner: RootsIterator<'a, T, U>, inner: RootsIterator<'a, T, Hot, Cold>,
} }
impl<'a, T: EthSpec, U: Store<T>> Clone for StateRootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Clone
for StateRootsIterator<'a, T, Hot, Cold>
{
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self {
inner: self.inner.clone(), inner: self.inner.clone(),
@ -54,21 +62,23 @@ impl<'a, T: EthSpec, U: Store<T>> Clone for StateRootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store<T>> StateRootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> StateRootsIterator<'a, T, Hot, Cold> {
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self { pub fn new(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: &'a BeaconState<T>) -> Self {
Self { Self {
inner: RootsIterator::new(store, beacon_state), inner: RootsIterator::new(store, beacon_state),
} }
} }
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self { pub fn owned(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: BeaconState<T>) -> Self {
Self { Self {
inner: RootsIterator::owned(store, beacon_state), inner: RootsIterator::owned(store, beacon_state),
} }
} }
} }
impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
for StateRootsIterator<'a, T, Hot, Cold>
{
type Item = Result<(Hash256, Slot), Error>; type Item = Result<(Hash256, Slot), Error>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -86,11 +96,13 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
/// exhausted. /// exhausted.
/// ///
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
pub struct BlockRootsIterator<'a, T: EthSpec, U: Store<T>> { pub struct BlockRootsIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
inner: RootsIterator<'a, T, U>, inner: RootsIterator<'a, T, Hot, Cold>,
} }
impl<'a, T: EthSpec, U: Store<T>> Clone for BlockRootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Clone
for BlockRootsIterator<'a, T, Hot, Cold>
{
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self {
inner: self.inner.clone(), inner: self.inner.clone(),
@ -98,23 +110,25 @@ impl<'a, T: EthSpec, U: Store<T>> Clone for BlockRootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store<T>> BlockRootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> BlockRootsIterator<'a, T, Hot, Cold> {
/// Create a new iterator over all block roots in the given `beacon_state` and prior states. /// Create a new iterator over all block roots in the given `beacon_state` and prior states.
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self { pub fn new(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: &'a BeaconState<T>) -> Self {
Self { Self {
inner: RootsIterator::new(store, beacon_state), inner: RootsIterator::new(store, beacon_state),
} }
} }
/// Create a new iterator over all block roots in the given `beacon_state` and prior states. /// Create a new iterator over all block roots in the given `beacon_state` and prior states.
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self { pub fn owned(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: BeaconState<T>) -> Self {
Self { Self {
inner: RootsIterator::owned(store, beacon_state), inner: RootsIterator::owned(store, beacon_state),
} }
} }
} }
impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
for BlockRootsIterator<'a, T, Hot, Cold>
{
type Item = Result<(Hash256, Slot), Error>; type Item = Result<(Hash256, Slot), Error>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -125,13 +139,15 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> {
} }
/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`. /// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`.
pub struct RootsIterator<'a, T: EthSpec, U: Store<T>> { pub struct RootsIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
store: Arc<U>, store: Arc<HotColdDB<T, Hot, Cold>>,
beacon_state: Cow<'a, BeaconState<T>>, beacon_state: Cow<'a, BeaconState<T>>,
slot: Slot, slot: Slot,
} }
impl<'a, T: EthSpec, U: Store<T>> Clone for RootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Clone
for RootsIterator<'a, T, Hot, Cold>
{
fn clone(&self) -> Self { fn clone(&self) -> Self {
Self { Self {
store: self.store.clone(), store: self.store.clone(),
@ -141,8 +157,8 @@ impl<'a, T: EthSpec, U: Store<T>> Clone for RootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store<T>> RootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> RootsIterator<'a, T, Hot, Cold> {
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self { pub fn new(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: &'a BeaconState<T>) -> Self {
Self { Self {
store, store,
slot: beacon_state.slot, slot: beacon_state.slot,
@ -150,7 +166,7 @@ impl<'a, T: EthSpec, U: Store<T>> RootsIterator<'a, T, U> {
} }
} }
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self { pub fn owned(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: BeaconState<T>) -> Self {
Self { Self {
store, store,
slot: beacon_state.slot, slot: beacon_state.slot,
@ -158,7 +174,10 @@ impl<'a, T: EthSpec, U: Store<T>> RootsIterator<'a, T, U> {
} }
} }
pub fn from_block(store: Arc<U>, block_hash: Hash256) -> Result<Self, Error> { pub fn from_block(
store: Arc<HotColdDB<T, Hot, Cold>>,
block_hash: Hash256,
) -> Result<Self, Error> {
let block = store let block = store
.get_block(&block_hash)? .get_block(&block_hash)?
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?;
@ -198,7 +217,9 @@ impl<'a, T: EthSpec, U: Store<T>> RootsIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store<T>> Iterator for RootsIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
for RootsIterator<'a, T, Hot, Cold>
{
/// (block_root, state_root, slot) /// (block_root, state_root, slot)
type Item = Result<(Hash256, Hash256, Slot), Error>; type Item = Result<(Hash256, Hash256, Slot), Error>;
@ -208,14 +229,16 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for RootsIterator<'a, T, U> {
} }
/// Block iterator that uses the `parent_root` of each block to backtrack. /// Block iterator that uses the `parent_root` of each block to backtrack.
pub struct ParentRootBlockIterator<'a, E: EthSpec, S: Store<E>> { pub struct ParentRootBlockIterator<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
store: &'a S, store: &'a HotColdDB<E, Hot, Cold>,
next_block_root: Hash256, next_block_root: Hash256,
_phantom: PhantomData<E>, _phantom: PhantomData<E>,
} }
impl<'a, E: EthSpec, S: Store<E>> ParentRootBlockIterator<'a, E, S> { impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
pub fn new(store: &'a S, start_block_root: Hash256) -> Self { ParentRootBlockIterator<'a, E, Hot, Cold>
{
pub fn new(store: &'a HotColdDB<E, Hot, Cold>, start_block_root: Hash256) -> Self {
Self { Self {
store, store,
next_block_root: start_block_root, next_block_root: start_block_root,
@ -240,7 +263,9 @@ impl<'a, E: EthSpec, S: Store<E>> ParentRootBlockIterator<'a, E, S> {
} }
} }
impl<'a, E: EthSpec, S: Store<E>> Iterator for ParentRootBlockIterator<'a, E, S> { impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Iterator
for ParentRootBlockIterator<'a, E, Hot, Cold>
{
type Item = Result<(Hash256, SignedBeaconBlock<E>), Error>; type Item = Result<(Hash256, SignedBeaconBlock<E>), Error>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -250,20 +275,20 @@ impl<'a, E: EthSpec, S: Store<E>> Iterator for ParentRootBlockIterator<'a, E, S>
#[derive(Clone)] #[derive(Clone)]
/// Extends `BlockRootsIterator`, returning `SignedBeaconBlock` instances, instead of their roots. /// Extends `BlockRootsIterator`, returning `SignedBeaconBlock` instances, instead of their roots.
pub struct BlockIterator<'a, T: EthSpec, U: Store<T>> { pub struct BlockIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
roots: BlockRootsIterator<'a, T, U>, roots: BlockRootsIterator<'a, T, Hot, Cold>,
} }
impl<'a, T: EthSpec, U: Store<T>> BlockIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> BlockIterator<'a, T, Hot, Cold> {
/// Create a new iterator over all blocks in the given `beacon_state` and prior states. /// Create a new iterator over all blocks in the given `beacon_state` and prior states.
pub fn new(store: Arc<U>, beacon_state: &'a BeaconState<T>) -> Self { pub fn new(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: &'a BeaconState<T>) -> Self {
Self { Self {
roots: BlockRootsIterator::new(store, beacon_state), roots: BlockRootsIterator::new(store, beacon_state),
} }
} }
/// Create a new iterator over all blocks in the given `beacon_state` and prior states. /// Create a new iterator over all blocks in the given `beacon_state` and prior states.
pub fn owned(store: Arc<U>, beacon_state: BeaconState<T>) -> Self { pub fn owned(store: Arc<HotColdDB<T, Hot, Cold>>, beacon_state: BeaconState<T>) -> Self {
Self { Self {
roots: BlockRootsIterator::owned(store, beacon_state), roots: BlockRootsIterator::owned(store, beacon_state),
} }
@ -279,7 +304,9 @@ impl<'a, T: EthSpec, U: Store<T>> BlockIterator<'a, T, U> {
} }
} }
impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockIterator<'a, T, U> { impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
for BlockIterator<'a, T, Hot, Cold>
{
type Item = Result<SignedBeaconBlock<T>, Error>; type Item = Result<SignedBeaconBlock<T>, Error>;
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
@ -288,8 +315,8 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockIterator<'a, T, U> {
} }
/// Fetch the next state to use whilst backtracking in `*RootsIterator`. /// Fetch the next state to use whilst backtracking in `*RootsIterator`.
fn next_historical_root_backtrack_state<E: EthSpec, S: Store<E>>( fn next_historical_root_backtrack_state<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: &S, store: &HotColdDB<E, Hot, Cold>,
current_state: &BeaconState<E>, current_state: &BeaconState<E>,
) -> Result<BeaconState<E>, Error> { ) -> Result<BeaconState<E>, Error> {
// For compatibility with the freezer database's restore points, we load a state at // For compatibility with the freezer database's restore points, we load a state at
@ -312,8 +339,10 @@ fn slot_of_prev_restore_point<E: EthSpec>(current_slot: Slot) -> Slot {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::MemoryStore; use crate::config::StoreConfig;
use types::{test_utils::TestingBeaconStateBuilder, Keypair, MainnetEthSpec}; use crate::HotColdDB;
use sloggers::{null::NullLoggerBuilder, Build};
use types::{test_utils::TestingBeaconStateBuilder, ChainSpec, Keypair, MainnetEthSpec};
fn get_state<T: EthSpec>() -> BeaconState<T> { fn get_state<T: EthSpec>() -> BeaconState<T> {
let builder = TestingBeaconStateBuilder::from_single_keypair( let builder = TestingBeaconStateBuilder::from_single_keypair(
@ -327,7 +356,10 @@ mod test {
#[test] #[test]
fn block_root_iter() { fn block_root_iter() {
let store = Arc::new(MemoryStore::open()); let log = NullLoggerBuilder.build().unwrap();
let store = Arc::new(
HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(),
);
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
let mut state_a: BeaconState<MainnetEthSpec> = get_state(); let mut state_a: BeaconState<MainnetEthSpec> = get_state();
@ -371,7 +403,10 @@ mod test {
#[test] #[test]
fn state_root_iter() { fn state_root_iter() {
let store = Arc::new(MemoryStore::open()); let log = NullLoggerBuilder.build().unwrap();
let store = Arc::new(
HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap(),
);
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
let mut state_a: BeaconState<MainnetEthSpec> = get_state(); let mut state_a: BeaconState<MainnetEthSpec> = get_state();

View File

@ -48,7 +48,7 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES);
self.db self.db
.get(self.read_options(), column_key) .get(self.read_options(), BytesKey::from_vec(column_key))
.map_err(Into::into) .map_err(Into::into)
.map(|opt| { .map(|opt| {
opt.map(|bytes| { opt.map(|bytes| {
@ -68,7 +68,7 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
let timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); let timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES);
self.db self.db
.put(self.write_options(), column_key, val) .put(self.write_options(), BytesKey::from_vec(column_key), val)
.map_err(Into::into) .map_err(Into::into)
.map(|()| { .map(|()| {
metrics::stop_timer(timer); metrics::stop_timer(timer);
@ -82,7 +82,7 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT); metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT);
self.db self.db
.get(self.read_options(), column_key) .get(self.read_options(), BytesKey::from_vec(column_key))
.map_err(Into::into) .map_err(Into::into)
.and_then(|val| Ok(val.is_some())) .and_then(|val| Ok(val.is_some()))
} }
@ -94,34 +94,16 @@ impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT); metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT);
self.db self.db
.delete(self.write_options(), column_key) .delete(self.write_options(), BytesKey::from_vec(column_key))
.map_err(Into::into) .map_err(Into::into)
} }
fn do_atomically(&self, ops_batch: &[StoreOp]) -> Result<(), Error> { fn do_atomically(&self, ops_batch: &[KeyValueStoreOp]) -> Result<(), Error> {
let mut leveldb_batch = Writebatch::new(); let mut leveldb_batch = Writebatch::new();
for op in ops_batch { for op in ops_batch.into_iter() {
match op { match op {
StoreOp::DeleteBlock(block_hash) => { KeyValueStoreOp::DeleteKey(key) => {
let untyped_hash: Hash256 = (*block_hash).into(); leveldb_batch.delete(BytesKey::from_vec(key.to_vec()));
let key =
get_key_for_col(DBColumn::BeaconBlock.into(), untyped_hash.as_bytes());
leveldb_batch.delete(key);
}
StoreOp::DeleteState(state_hash, slot) => {
let untyped_hash: Hash256 = (*state_hash).into();
let state_summary_key = get_key_for_col(
DBColumn::BeaconStateSummary.into(),
untyped_hash.as_bytes(),
);
leveldb_batch.delete(state_summary_key);
if *slot % E::slots_per_epoch() == 0 {
let state_key =
get_key_for_col(DBColumn::BeaconState.into(), untyped_hash.as_bytes());
leveldb_batch.delete(state_key);
}
} }
} }
} }
@ -147,10 +129,10 @@ impl Key for BytesKey {
} }
} }
fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey { impl BytesKey {
let mut col = col.as_bytes().to_vec(); fn from_vec(key: Vec<u8>) -> Self {
col.append(&mut key.to_vec()); Self { key }
BytesKey { key: col } }
} }
impl From<LevelDBError> for Error { impl From<LevelDBError> for Error {

View File

@ -25,8 +25,6 @@ mod state_batch;
pub mod iter; pub mod iter;
use std::sync::Arc;
pub use self::config::StoreConfig; pub use self::config::StoreConfig;
pub use self::hot_cold_store::{HotColdDB, HotStateSummary}; pub use self::hot_cold_store::{HotColdDB, HotStateSummary};
pub use self::leveldb_store::LevelDB; pub use self::leveldb_store::LevelDB;
@ -52,7 +50,17 @@ pub trait KeyValueStore<E: EthSpec>: Sync + Send + Sized + 'static {
fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>; fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>;
/// Execute either all of the operations in `batch` or none at all, returning an error. /// Execute either all of the operations in `batch` or none at all, returning an error.
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>; fn do_atomically(&self, batch: &[KeyValueStoreOp]) -> Result<(), Error>;
}
pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec<u8> {
let mut result = column.as_bytes().to_vec();
result.extend_from_slice(key);
result
}
pub enum KeyValueStoreOp {
DeleteKey(Vec<u8>),
} }
pub trait ItemStore<E: EthSpec>: KeyValueStore<E> + Sync + Send + Sized + 'static { pub trait ItemStore<E: EthSpec>: KeyValueStore<E> + Sync + Send + Sized + 'static {
@ -93,75 +101,6 @@ pub trait ItemStore<E: EthSpec>: KeyValueStore<E> + Sync + Send + Sized + 'stati
} }
} }
/// An object capable of storing and retrieving objects implementing `StoreItem`.
///
/// A `Store` is fundamentally backed by a key-value database, however it provides support for
/// columns. A simple column implementation might involve prefixing a key with some bytes unique to
/// each column.
pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
type ForwardsBlockRootsIterator: Iterator<Item = Result<(Hash256, Slot), Error>>;
/// Store a block in the store.
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error>;
/// Fetch a block from the store.
fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error>;
/// Delete a block from the store.
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error>;
/// Store a state in the store.
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error>;
/// Store a state summary in the store.
fn put_state_summary(
&self,
state_root: &Hash256,
summary: HotStateSummary,
) -> Result<(), Error>;
/// Fetch a state from the store.
fn get_state(
&self,
state_root: &Hash256,
slot: Option<Slot>,
) -> Result<Option<BeaconState<E>>, Error>;
/// Delete a state from the store.
fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error>;
/// Get a forwards (slot-ascending) iterator over the beacon block roots since `start_slot`.
///
/// Will be efficient for frozen portions of the database if using `HotColdDB`.
///
/// The `end_state` and `end_block_root` are required for backtracking in the post-finalization
/// part of the chain, and should be usually be set to the current head. Importantly, the
/// `end_state` must be a state that has had a block applied to it, and the hash of that
/// block must be `end_block_root`.
// NOTE: could maybe optimise by getting the `BeaconState` and end block root from a closure, as
// it's not always required.
fn forwards_block_roots_iterator(
store: Arc<Self>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
spec: &ChainSpec,
) -> Result<Self::ForwardsBlockRootsIterator, Error>;
fn load_epoch_boundary_state(
&self,
state_root: &Hash256,
) -> Result<Option<BeaconState<E>>, Error>;
fn put_item<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error>;
fn get_item<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error>;
fn item_exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error>;
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>;
}
/// Reified key-value storage operation. Helps in modifying the storage atomically. /// Reified key-value storage operation. Helps in modifying the storage atomically.
/// See also https://github.com/sigp/lighthouse/issues/692 /// See also https://github.com/sigp/lighthouse/issues/692
pub enum StoreOp { pub enum StoreOp {

View File

@ -1,12 +1,7 @@
use super::{DBColumn, Error, ItemStore, KeyValueStore, Store, StoreOp}; use super::{Error, ItemStore, KeyValueStore, KeyValueStoreOp};
use crate::forwards_iter::SimpleForwardsBlockRootsIterator;
use crate::hot_cold_store::HotStateSummary;
use crate::impls::beacon_state::{get_full_state, store_full_state};
use crate::StoreItem;
use parking_lot::RwLock; use parking_lot::RwLock;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc;
use types::*; use types::*;
type DBHashMap = HashMap<Vec<u8>, Vec<u8>>; type DBHashMap = HashMap<Vec<u8>, Vec<u8>>;
@ -46,53 +41,34 @@ impl<E: EthSpec> KeyValueStore<E> for MemoryStore<E> {
/// Get the value of some key from the database. Returns `None` if the key does not exist. /// Get the value of some key from the database. Returns `None` if the key does not exist.
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> { fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
let column_key = Self::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
Ok(self.db.read().get(&column_key).cloned()) Ok(self.db.read().get(&column_key).cloned())
} }
/// Puts a key in the database. /// Puts a key in the database.
fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> {
let column_key = Self::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
self.db.write().insert(column_key, val.to_vec()); self.db.write().insert(column_key, val.to_vec());
Ok(()) Ok(())
} }
/// Return true if some key exists in some column. /// Return true if some key exists in some column.
fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> { fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> {
let column_key = Self::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
Ok(self.db.read().contains_key(&column_key)) Ok(self.db.read().contains_key(&column_key))
} }
/// Delete some key from the database. /// Delete some key from the database.
fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> {
let column_key = Self::get_key_for_col(col, key); let column_key = Self::get_key_for_col(col, key);
self.db.write().remove(&column_key); self.db.write().remove(&column_key);
Ok(()) Ok(())
} }
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> { fn do_atomically(&self, batch: &[KeyValueStoreOp]) -> Result<(), Error> {
for op in batch { for op in batch {
match op { match op {
StoreOp::DeleteBlock(block_hash) => { KeyValueStoreOp::DeleteKey(hash) => {
let untyped_hash: Hash256 = (*block_hash).into(); self.db.write().remove(hash);
self.key_delete(DBColumn::BeaconBlock.into(), untyped_hash.as_bytes())?;
}
StoreOp::DeleteState(state_hash, slot) => {
let untyped_hash: Hash256 = (*state_hash).into();
if *slot % E::slots_per_epoch() == 0 {
self.key_delete(DBColumn::BeaconState.into(), untyped_hash.as_bytes())?;
} else {
self.key_delete(
DBColumn::BeaconStateSummary.into(),
untyped_hash.as_bytes(),
)?;
}
} }
} }
} }
@ -101,94 +77,3 @@ impl<E: EthSpec> KeyValueStore<E> for MemoryStore<E> {
} }
impl<E: EthSpec> ItemStore<E> for MemoryStore<E> {} impl<E: EthSpec> ItemStore<E> for MemoryStore<E> {}
impl<E: EthSpec> Store<E> for MemoryStore<E> {
type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator;
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error> {
self.put(block_root, &block)
}
fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error> {
self.get(block_root)
}
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
self.key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())
}
fn put_state_summary(
&self,
state_root: &Hash256,
summary: HotStateSummary,
) -> Result<(), Error> {
self.put(state_root, &summary).map_err(Into::into)
}
/// Store a state in the store.
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
store_full_state(self, state_root, &state)
}
/// Fetch a state from the store.
fn get_state(
&self,
state_root: &Hash256,
_: Option<Slot>,
) -> Result<Option<BeaconState<E>>, Error> {
get_full_state(self, state_root)
}
fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error> {
self.key_delete(DBColumn::BeaconState.into(), state_root.as_bytes())
}
fn forwards_block_roots_iterator(
store: Arc<Self>,
start_slot: Slot,
end_state: BeaconState<E>,
end_block_root: Hash256,
_: &ChainSpec,
) -> Result<Self::ForwardsBlockRootsIterator, Error> {
SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root)
}
/// Load the most recent ancestor state of `state_root` which lies on an epoch boundary.
///
/// If `state_root` corresponds to an epoch boundary state, then that state itself should be
/// returned.
fn load_epoch_boundary_state(
&self,
state_root: &Hash256,
) -> Result<Option<BeaconState<E>>, Error> {
// The default implementation is not very efficient, but isn't used in prod.
// See `HotColdDB` for the optimized implementation.
if let Some(state) = self.get_state(state_root, None)? {
let epoch_boundary_slot = state.slot / E::slots_per_epoch() * E::slots_per_epoch();
if state.slot == epoch_boundary_slot {
Ok(Some(state))
} else {
let epoch_boundary_state_root = state.get_state_root(epoch_boundary_slot)?;
self.get_state(epoch_boundary_state_root, Some(epoch_boundary_slot))
}
} else {
Ok(None)
}
}
fn put_item<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> {
self.put(key, item)
}
fn get_item<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> {
self.get(key)
}
fn item_exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error> {
self.exists::<I>(key)
}
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> {
KeyValueStore::do_atomically(self, batch)
}
}

View File

@ -1,4 +1,4 @@
use crate::{Error, HotStateSummary, Store}; use crate::{Error, HotColdDB, HotStateSummary, ItemStore};
use types::{BeaconState, EthSpec, Hash256}; use types::{BeaconState, EthSpec, Hash256};
/// A collection of states to be stored in the database. /// A collection of states to be stored in the database.
@ -36,7 +36,10 @@ impl<E: EthSpec> StateBatch<E> {
/// Write the batch to the database. /// Write the batch to the database.
/// ///
/// May fail to write the full batch if any of the items error (i.e. not atomic!) /// May fail to write the full batch if any of the items error (i.e. not atomic!)
pub fn commit<S: Store<E>>(self, store: &S) -> Result<(), Error> { pub fn commit<Hot: ItemStore<E>, Cold: ItemStore<E>>(
self,
store: &HotColdDB<E, Hot, Cold>,
) -> Result<(), Error> {
self.items.into_iter().try_for_each(|item| match item { self.items.into_iter().try_for_each(|item| match item {
BatchItem::Full(state_root, state) => store.put_state(&state_root, &state), BatchItem::Full(state_root, state) => store.put_state(&state_root, &state),
BatchItem::Summary(state_root, summary) => { BatchItem::Summary(state_root, summary) => {