Clean up database abstractions (#1200)
* Remove redundant method * Pull out a method out of a struct * More precise db access abstractions * Move fake trait method out of it * cargo fmt * Fix compilation error after refactoring * Move another fake method out the Store trait * Get rid of superfluous method * Fix refactoring bug * Rename: SimpleStoreItem -> StoreItem * Get rid of the confusing DiskStore type alias * Get rid of SimpleDiskStore type alias * Correction: A method took both self and a ref to Self
This commit is contained in:
parent
08e6b4961d
commit
91cb14ac41
@ -151,7 +151,7 @@ pub struct HeadInfo {
|
|||||||
|
|
||||||
pub trait BeaconChainTypes: Send + Sync + 'static {
|
pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||||
type Store: store::Store<Self::EthSpec>;
|
type Store: store::Store<Self::EthSpec>;
|
||||||
type StoreMigrator: Migrate<Self::Store, Self::EthSpec>;
|
type StoreMigrator: Migrate<Self::EthSpec>;
|
||||||
type SlotClock: slot_clock::SlotClock;
|
type SlotClock: slot_clock::SlotClock;
|
||||||
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>;
|
type Eth1Chain: Eth1ChainBackend<Self::EthSpec, Self::Store>;
|
||||||
type EthSpec: types::EthSpec;
|
type EthSpec: types::EthSpec;
|
||||||
@ -241,7 +241,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
|
|
||||||
let fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE);
|
let fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE);
|
||||||
|
|
||||||
self.store.put(
|
self.store.put_item(
|
||||||
&Hash256::from_slice(&FORK_CHOICE_DB_KEY),
|
&Hash256::from_slice(&FORK_CHOICE_DB_KEY),
|
||||||
&self.fork_choice.as_ssz_container(),
|
&self.fork_choice.as_ssz_container(),
|
||||||
)?;
|
)?;
|
||||||
@ -250,7 +250,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD);
|
let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD);
|
||||||
|
|
||||||
self.store
|
self.store
|
||||||
.put(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?;
|
.put_item(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?;
|
||||||
|
|
||||||
metrics::stop_timer(head_timer);
|
metrics::stop_timer(head_timer);
|
||||||
|
|
||||||
@ -266,7 +266,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn persist_op_pool(&self) -> Result<(), Error> {
|
pub fn persist_op_pool(&self) -> Result<(), Error> {
|
||||||
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||||
|
|
||||||
self.store.put(
|
self.store.put_item(
|
||||||
&Hash256::from_slice(&OP_POOL_DB_KEY),
|
&Hash256::from_slice(&OP_POOL_DB_KEY),
|
||||||
&PersistedOperationPool::from_operation_pool(&self.op_pool),
|
&PersistedOperationPool::from_operation_pool(&self.op_pool),
|
||||||
)?;
|
)?;
|
||||||
@ -281,7 +281,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||||
|
|
||||||
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||||
self.store.put(
|
self.store.put_item(
|
||||||
&Hash256::from_slice(Ð1_CACHE_DB_KEY),
|
&Hash256::from_slice(Ð1_CACHE_DB_KEY),
|
||||||
ð1_chain.as_ssz_container(),
|
ð1_chain.as_ssz_container(),
|
||||||
)?;
|
)?;
|
||||||
@ -426,7 +426,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.map(|(root, _)| root);
|
.map(|(root, _)| root);
|
||||||
|
|
||||||
if let Some(block_root) = root {
|
if let Some(block_root) = root {
|
||||||
Ok(self.store.get(&block_root)?)
|
Ok(self.store.get_item(&block_root)?)
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
@ -1934,7 +1934,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
|
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
|
||||||
Ok(!self
|
Ok(!self
|
||||||
.store
|
.store
|
||||||
.exists::<SignedBeaconBlock<T::EthSpec>>(beacon_block_root)?)
|
.item_exists::<SignedBeaconBlock<T::EthSpec>>(beacon_block_root)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
|
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
|
||||||
|
@ -48,7 +48,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
for Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
for Witness<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
TStoreMigrator: Migrate<TEthSpec> + 'static,
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
@ -98,7 +98,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
TStoreMigrator: Migrate<TEthSpec> + 'static,
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
@ -184,7 +184,7 @@ where
|
|||||||
.ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?;
|
.ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?;
|
||||||
|
|
||||||
store
|
store
|
||||||
.get::<SszEth1>(&Hash256::from_slice(Ð1_CACHE_DB_KEY))
|
.get_item::<SszEth1>(&Hash256::from_slice(Ð1_CACHE_DB_KEY))
|
||||||
.map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e))
|
.map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ where
|
|||||||
.ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?;
|
.ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?;
|
||||||
|
|
||||||
Ok(store
|
Ok(store
|
||||||
.get::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||||
.is_some())
|
.is_some())
|
||||||
}
|
}
|
||||||
@ -227,7 +227,7 @@ where
|
|||||||
.ok_or_else(|| "resume_from_db requires a store.".to_string())?;
|
.ok_or_else(|| "resume_from_db requires a store.".to_string())?;
|
||||||
|
|
||||||
let chain = store
|
let chain = store
|
||||||
.get::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
||||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
"No persisted beacon chain found in store. Try purging the beacon chain database."
|
"No persisted beacon chain found in store. Try purging the beacon chain database."
|
||||||
@ -242,7 +242,7 @@ where
|
|||||||
|
|
||||||
let head_block_root = chain.canonical_head_block_root;
|
let head_block_root = chain.canonical_head_block_root;
|
||||||
let head_block = store
|
let head_block = store
|
||||||
.get::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
|
.get_item::<SignedBeaconBlock<TEthSpec>>(&head_block_root)
|
||||||
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
|
.map_err(|e| format!("DB error when reading head block: {:?}", e))?
|
||||||
.ok_or_else(|| "Head block not found in store".to_string())?;
|
.ok_or_else(|| "Head block not found in store".to_string())?;
|
||||||
let head_state_root = head_block.state_root();
|
let head_state_root = head_block.state_root();
|
||||||
@ -253,7 +253,7 @@ where
|
|||||||
|
|
||||||
self.op_pool = Some(
|
self.op_pool = Some(
|
||||||
store
|
store
|
||||||
.get::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY))
|
.get_item::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY))
|
||||||
.map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))?
|
.map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))?
|
||||||
.map(|persisted| persisted.into_operation_pool(&head_state, &self.spec))
|
.map(|persisted| persisted.into_operation_pool(&head_state, &self.spec))
|
||||||
.unwrap_or_else(|| OperationPool::new()),
|
.unwrap_or_else(|| OperationPool::new()),
|
||||||
@ -261,7 +261,7 @@ where
|
|||||||
|
|
||||||
let finalized_block_root = head_state.finalized_checkpoint.root;
|
let finalized_block_root = head_state.finalized_checkpoint.root;
|
||||||
let finalized_block = store
|
let finalized_block = store
|
||||||
.get::<SignedBeaconBlock<TEthSpec>>(&finalized_block_root)
|
.get_item::<SignedBeaconBlock<TEthSpec>>(&finalized_block_root)
|
||||||
.map_err(|e| format!("DB error when reading finalized block: {:?}", e))?
|
.map_err(|e| format!("DB error when reading finalized block: {:?}", e))?
|
||||||
.ok_or_else(|| "Finalized block not found in store".to_string())?;
|
.ok_or_else(|| "Finalized block not found in store".to_string())?;
|
||||||
let finalized_state_root = finalized_block.state_root();
|
let finalized_state_root = finalized_block.state_root();
|
||||||
@ -317,16 +317,18 @@ where
|
|||||||
.put_state(&beacon_state_root, &beacon_state)
|
.put_state(&beacon_state_root, &beacon_state)
|
||||||
.map_err(|e| format!("Failed to store genesis state: {:?}", e))?;
|
.map_err(|e| format!("Failed to store genesis state: {:?}", e))?;
|
||||||
store
|
store
|
||||||
.put(&beacon_block_root, &beacon_block)
|
.put_item(&beacon_block_root, &beacon_block)
|
||||||
.map_err(|e| format!("Failed to store genesis block: {:?}", e))?;
|
.map_err(|e| format!("Failed to store genesis block: {:?}", e))?;
|
||||||
|
|
||||||
// Store the genesis block under the `ZERO_HASH` key.
|
// Store the genesis block under the `ZERO_HASH` key.
|
||||||
store.put(&Hash256::zero(), &beacon_block).map_err(|e| {
|
store
|
||||||
format!(
|
.put_item(&Hash256::zero(), &beacon_block)
|
||||||
"Failed to store genesis block under 0x00..00 alias: {:?}",
|
.map_err(|e| {
|
||||||
e
|
format!(
|
||||||
)
|
"Failed to store genesis block under 0x00..00 alias: {:?}",
|
||||||
})?;
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
self.finalized_snapshot = Some(BeaconSnapshot {
|
self.finalized_snapshot = Some(BeaconSnapshot {
|
||||||
beacon_block_root,
|
beacon_block_root,
|
||||||
@ -484,7 +486,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
TStoreMigrator: Migrate<TEthSpec> + 'static,
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
@ -501,7 +503,7 @@ where
|
|||||||
.ok_or_else(|| "reduced_tree_fork_choice requires a store.".to_string())?;
|
.ok_or_else(|| "reduced_tree_fork_choice requires a store.".to_string())?;
|
||||||
|
|
||||||
let persisted_fork_choice = store
|
let persisted_fork_choice = store
|
||||||
.get::<SszForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
|
.get_item::<SszForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
|
||||||
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?;
|
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?;
|
||||||
|
|
||||||
let fork_choice = if let Some(persisted) = persisted_fork_choice {
|
let fork_choice = if let Some(persisted) = persisted_fork_choice {
|
||||||
@ -554,7 +556,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
TStoreMigrator: Migrate<TEthSpec> + 'static,
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||||
@ -592,7 +594,7 @@ impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
TStoreMigrator: Migrate<TEthSpec> + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||||
@ -631,7 +633,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec> + 'static,
|
TStoreMigrator: Migrate<TEthSpec> + 'static,
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
|
@ -10,7 +10,7 @@ use std::collections::HashMap;
|
|||||||
use std::iter::DoubleEndedIterator;
|
use std::iter::DoubleEndedIterator;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem, Store};
|
use store::{DBColumn, Error as StoreError, Store, StoreItem};
|
||||||
use types::{
|
use types::{
|
||||||
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
||||||
DEPOSIT_TREE_DEPTH,
|
DEPOSIT_TREE_DEPTH,
|
||||||
@ -59,7 +59,7 @@ pub struct SszEth1 {
|
|||||||
backend_bytes: Vec<u8>,
|
backend_bytes: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for SszEth1 {
|
impl StoreItem for SszEth1 {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::Eth1Cache
|
DBColumn::Eth1Cache
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ use ssz::{Decode, Encode};
|
|||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use state_processing::common::get_indexed_attestation;
|
use state_processing::common::get_indexed_attestation;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem};
|
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||||
use types::{BeaconBlock, BeaconState, BeaconStateError, Epoch, Hash256, IndexedAttestation, Slot};
|
use types::{BeaconBlock, BeaconState, BeaconStateError, Epoch, Hash256, IndexedAttestation, Slot};
|
||||||
|
|
||||||
type Result<T> = std::result::Result<T, Error>;
|
type Result<T> = std::result::Result<T, Error>;
|
||||||
@ -285,7 +285,7 @@ impl From<String> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for SszForkChoice {
|
impl StoreItem for SszForkChoice {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::ForkChoice
|
DBColumn::ForkChoice
|
||||||
}
|
}
|
||||||
|
@ -7,15 +7,16 @@ use std::mem;
|
|||||||
use std::sync::mpsc;
|
use std::sync::mpsc;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
use store::hot_cold_store::{process_finalization, HotColdDBError};
|
||||||
use store::iter::{ParentRootBlockIterator, RootsIterator};
|
use store::iter::{ParentRootBlockIterator, RootsIterator};
|
||||||
use store::{hot_cold_store::HotColdDBError, Error, SimpleDiskStore, Store, StoreOp};
|
use store::{Error, Store, StoreOp};
|
||||||
pub use store::{DiskStore, MemoryStore};
|
pub use store::{HotColdDB, MemoryStore};
|
||||||
use types::*;
|
use types::*;
|
||||||
use types::{BeaconState, EthSpec, Hash256, Slot};
|
use types::{BeaconState, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
/// Trait for migration processes that update the database upon finalization.
|
/// Trait for migration processes that update the database upon finalization.
|
||||||
pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
pub trait Migrate<E: EthSpec>: Send + Sync + 'static {
|
||||||
fn new(db: Arc<S>, log: Logger) -> Self;
|
fn new(db: Arc<HotColdDB<E>>, log: Logger) -> Self;
|
||||||
|
|
||||||
fn process_finalization(
|
fn process_finalization(
|
||||||
&self,
|
&self,
|
||||||
@ -35,7 +36,7 @@ pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
|||||||
/// Assumptions:
|
/// Assumptions:
|
||||||
/// * It is called after every finalization.
|
/// * It is called after every finalization.
|
||||||
fn prune_abandoned_forks(
|
fn prune_abandoned_forks(
|
||||||
store: Arc<S>,
|
store: Arc<HotColdDB<E>>,
|
||||||
head_tracker: Arc<HeadTracker>,
|
head_tracker: Arc<HeadTracker>,
|
||||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||||
@ -164,14 +165,8 @@ pub trait Migrate<S: Store<E>, E: EthSpec>: Send + Sync + 'static {
|
|||||||
/// Migrator that does nothing, for stores that don't need migration.
|
/// Migrator that does nothing, for stores that don't need migration.
|
||||||
pub struct NullMigrator;
|
pub struct NullMigrator;
|
||||||
|
|
||||||
impl<E: EthSpec> Migrate<SimpleDiskStore<E>, E> for NullMigrator {
|
impl<E: EthSpec> Migrate<E> for NullMigrator {
|
||||||
fn new(_: Arc<SimpleDiskStore<E>>, _: Logger) -> Self {
|
fn new(_: Arc<HotColdDB<E>>, _: Logger) -> Self {
|
||||||
NullMigrator
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
|
||||||
fn new(_: Arc<MemoryStore<E>>, _: Logger) -> Self {
|
|
||||||
NullMigrator
|
NullMigrator
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -179,12 +174,12 @@ impl<E: EthSpec> Migrate<MemoryStore<E>, E> for NullMigrator {
|
|||||||
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
/// Migrator that immediately calls the store's migration function, blocking the current execution.
|
||||||
///
|
///
|
||||||
/// Mostly useful for tests.
|
/// Mostly useful for tests.
|
||||||
pub struct BlockingMigrator<S> {
|
pub struct BlockingMigrator<E: EthSpec> {
|
||||||
db: Arc<S>,
|
db: Arc<HotColdDB<E>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
impl<E: EthSpec> Migrate<E> for BlockingMigrator<E> {
|
||||||
fn new(db: Arc<S>, _: Logger) -> Self {
|
fn new(db: Arc<HotColdDB<E>>, _: Logger) -> Self {
|
||||||
BlockingMigrator { db }
|
BlockingMigrator { db }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,7 +192,7 @@ impl<E: EthSpec, S: Store<E>> Migrate<S, E> for BlockingMigrator<S> {
|
|||||||
old_finalized_block_hash: SignedBeaconBlockHash,
|
old_finalized_block_hash: SignedBeaconBlockHash,
|
||||||
new_finalized_block_hash: SignedBeaconBlockHash,
|
new_finalized_block_hash: SignedBeaconBlockHash,
|
||||||
) {
|
) {
|
||||||
if let Err(e) = S::process_finalization(self.db.clone(), state_root, &new_finalized_state) {
|
if let Err(e) = process_finalization(self.db.clone(), state_root, &new_finalized_state) {
|
||||||
// This migrator is only used for testing, so we just log to stderr without a logger.
|
// This migrator is only used for testing, so we just log to stderr without a logger.
|
||||||
eprintln!("Migration error: {:?}", e);
|
eprintln!("Migration error: {:?}", e);
|
||||||
}
|
}
|
||||||
@ -225,13 +220,13 @@ type MpscSender<E> = mpsc::Sender<(
|
|||||||
|
|
||||||
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
/// Migrator that runs a background thread to migrate state from the hot to the cold database.
|
||||||
pub struct BackgroundMigrator<E: EthSpec> {
|
pub struct BackgroundMigrator<E: EthSpec> {
|
||||||
db: Arc<DiskStore<E>>,
|
db: Arc<HotColdDB<E>>,
|
||||||
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
tx_thread: Mutex<(MpscSender<E>, thread::JoinHandle<()>)>,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> Migrate<DiskStore<E>, E> for BackgroundMigrator<E> {
|
impl<E: EthSpec> Migrate<E> for BackgroundMigrator<E> {
|
||||||
fn new(db: Arc<DiskStore<E>>, log: Logger) -> Self {
|
fn new(db: Arc<HotColdDB<E>>, log: Logger) -> Self {
|
||||||
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
|
let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone()));
|
||||||
Self { db, tx_thread, log }
|
Self { db, tx_thread, log }
|
||||||
}
|
}
|
||||||
@ -293,7 +288,7 @@ impl<E: EthSpec> BackgroundMigrator<E> {
|
|||||||
///
|
///
|
||||||
/// Return a channel handle for sending new finalized states to the thread.
|
/// Return a channel handle for sending new finalized states to the thread.
|
||||||
fn spawn_thread(
|
fn spawn_thread(
|
||||||
db: Arc<DiskStore<E>>,
|
db: Arc<HotColdDB<E>>,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> (
|
) -> (
|
||||||
mpsc::Sender<(
|
mpsc::Sender<(
|
||||||
@ -317,7 +312,7 @@ impl<E: EthSpec> BackgroundMigrator<E> {
|
|||||||
new_finalized_slot,
|
new_finalized_slot,
|
||||||
)) = rx.recv()
|
)) = rx.recv()
|
||||||
{
|
{
|
||||||
match DiskStore::process_finalization(db.clone(), state_root, &state) {
|
match process_finalization(db.clone(), state_root, &state) {
|
||||||
Ok(()) => {}
|
Ok(()) => {}
|
||||||
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => {
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use crate::head_tracker::SszHeadTracker;
|
use crate::head_tracker::SszHeadTracker;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem};
|
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||||
use types::Hash256;
|
use types::Hash256;
|
||||||
|
|
||||||
#[derive(Clone, Encode, Decode)]
|
#[derive(Clone, Encode, Decode)]
|
||||||
@ -11,7 +11,7 @@ pub struct PersistedBeaconChain {
|
|||||||
pub ssz_head_tracker: SszHeadTracker,
|
pub ssz_head_tracker: SszHeadTracker,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for PersistedBeaconChain {
|
impl StoreItem for PersistedBeaconChain {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconChain
|
DBColumn::BeaconChain
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ use std::borrow::Cow;
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use store::{DiskStore, MemoryStore, Store};
|
use store::{HotColdDB, MemoryStore, Store};
|
||||||
use tempfile::{tempdir, TempDir};
|
use tempfile::{tempdir, TempDir};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
@ -44,7 +44,7 @@ pub type BaseHarnessType<TStore, TStoreMigrator, TEthSpec> = Witness<
|
|||||||
>;
|
>;
|
||||||
|
|
||||||
pub type HarnessType<E> = BaseHarnessType<MemoryStore<E>, NullMigrator, E>;
|
pub type HarnessType<E> = BaseHarnessType<MemoryStore<E>, NullMigrator, E>;
|
||||||
pub type DiskHarnessType<E> = BaseHarnessType<DiskStore<E>, BlockingMigrator<DiskStore<E>>, E>;
|
pub type DiskHarnessType<E> = BaseHarnessType<HotColdDB<E>, BlockingMigrator<E>, E>;
|
||||||
|
|
||||||
/// Indicates how the `BeaconChainHarness` should produce blocks.
|
/// Indicates how the `BeaconChainHarness` should produce blocks.
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
@ -140,7 +140,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
|||||||
/// Instantiate a new harness with `validator_count` initial validators.
|
/// Instantiate a new harness with `validator_count` initial validators.
|
||||||
pub fn new_with_disk_store(
|
pub fn new_with_disk_store(
|
||||||
eth_spec_instance: E,
|
eth_spec_instance: E,
|
||||||
store: Arc<DiskStore<E>>,
|
store: Arc<HotColdDB<E>>,
|
||||||
keypairs: Vec<Keypair>,
|
keypairs: Vec<Keypair>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let data_dir = tempdir().expect("should create temporary data_dir");
|
let data_dir = tempdir().expect("should create temporary data_dir");
|
||||||
@ -152,10 +152,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
|||||||
.logger(log.clone())
|
.logger(log.clone())
|
||||||
.custom_spec(spec.clone())
|
.custom_spec(spec.clone())
|
||||||
.store(store.clone())
|
.store(store.clone())
|
||||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
.store_migrator(BlockingMigrator::new(store, log.clone()))
|
||||||
store,
|
|
||||||
log.clone(),
|
|
||||||
))
|
|
||||||
.data_dir(data_dir.path().to_path_buf())
|
.data_dir(data_dir.path().to_path_buf())
|
||||||
.genesis_state(
|
.genesis_state(
|
||||||
interop_genesis_state::<E>(&keypairs, HARNESS_GENESIS_TIME, &spec)
|
interop_genesis_state::<E>(&keypairs, HARNESS_GENESIS_TIME, &spec)
|
||||||
@ -183,7 +180,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
|||||||
/// Instantiate a new harness with `validator_count` initial validators.
|
/// Instantiate a new harness with `validator_count` initial validators.
|
||||||
pub fn resume_from_disk_store(
|
pub fn resume_from_disk_store(
|
||||||
eth_spec_instance: E,
|
eth_spec_instance: E,
|
||||||
store: Arc<DiskStore<E>>,
|
store: Arc<HotColdDB<E>>,
|
||||||
keypairs: Vec<Keypair>,
|
keypairs: Vec<Keypair>,
|
||||||
data_dir: TempDir,
|
data_dir: TempDir,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -195,10 +192,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
|||||||
.logger(log.clone())
|
.logger(log.clone())
|
||||||
.custom_spec(spec)
|
.custom_spec(spec)
|
||||||
.store(store.clone())
|
.store(store.clone())
|
||||||
.store_migrator(<BlockingMigrator<_> as Migrate<_, E>>::new(
|
.store_migrator(<BlockingMigrator<_> as Migrate<E>>::new(store, log.clone()))
|
||||||
store,
|
|
||||||
log.clone(),
|
|
||||||
))
|
|
||||||
.data_dir(data_dir.path().to_path_buf())
|
.data_dir(data_dir.path().to_path_buf())
|
||||||
.resume_from_db()
|
.resume_from_db()
|
||||||
.expect("should resume beacon chain from db")
|
.expect("should resume beacon chain from db")
|
||||||
@ -224,7 +218,7 @@ impl<E: EthSpec> BeaconChainHarness<DiskHarnessType<E>> {
|
|||||||
impl<S, M, E> BeaconChainHarness<BaseHarnessType<S, M, E>>
|
impl<S, M, E> BeaconChainHarness<BaseHarnessType<S, M, E>>
|
||||||
where
|
where
|
||||||
S: Store<E>,
|
S: Store<E>,
|
||||||
M: Migrate<S, E>,
|
M: Migrate<E>,
|
||||||
E: EthSpec,
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
/// Advance the slot of the `BeaconChain`.
|
/// Advance the slot of the `BeaconChain`.
|
||||||
|
@ -988,7 +988,7 @@ fn attestation_that_skips_epochs() {
|
|||||||
let block_slot = harness
|
let block_slot = harness
|
||||||
.chain
|
.chain
|
||||||
.store
|
.store
|
||||||
.get::<SignedBeaconBlock<E>>(&block_root)
|
.get_item::<SignedBeaconBlock<E>>(&block_root)
|
||||||
.expect("should not error getting block")
|
.expect("should not error getting block")
|
||||||
.expect("should find attestation block")
|
.expect("should find attestation block")
|
||||||
.message
|
.message
|
||||||
|
@ -9,7 +9,7 @@ use beacon_chain::{
|
|||||||
};
|
};
|
||||||
use sloggers::{null::NullLoggerBuilder, Build};
|
use sloggers::{null::NullLoggerBuilder, Build};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{DiskStore, StoreConfig};
|
use store::{HotColdDB, StoreConfig};
|
||||||
use tempfile::{tempdir, TempDir};
|
use tempfile::{tempdir, TempDir};
|
||||||
use types::{EthSpec, Keypair, MinimalEthSpec};
|
use types::{EthSpec, Keypair, MinimalEthSpec};
|
||||||
|
|
||||||
@ -23,14 +23,14 @@ lazy_static! {
|
|||||||
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
|
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E>> {
|
||||||
let spec = E::default_spec();
|
let spec = E::default_spec();
|
||||||
let hot_path = db_path.path().join("hot_db");
|
let hot_path = db_path.path().join("hot_db");
|
||||||
let cold_path = db_path.path().join("cold_db");
|
let cold_path = db_path.path().join("cold_db");
|
||||||
let config = StoreConfig::default();
|
let config = StoreConfig::default();
|
||||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||||
Arc::new(
|
Arc::new(
|
||||||
DiskStore::open(&hot_path, &cold_path, config, spec, log)
|
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||||
.expect("disk store should initialize"),
|
.expect("disk store should initialize"),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ use std::collections::HashSet;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{
|
use store::{
|
||||||
iter::{BlockRootsIterator, StateRootsIterator},
|
iter::{BlockRootsIterator, StateRootsIterator},
|
||||||
DiskStore, Store, StoreConfig,
|
HotColdDB, Store, StoreConfig,
|
||||||
};
|
};
|
||||||
use tempfile::{tempdir, TempDir};
|
use tempfile::{tempdir, TempDir};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
@ -35,19 +35,19 @@ lazy_static! {
|
|||||||
type E = MinimalEthSpec;
|
type E = MinimalEthSpec;
|
||||||
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
||||||
|
|
||||||
fn get_store(db_path: &TempDir) -> Arc<DiskStore<E>> {
|
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E>> {
|
||||||
let spec = MinimalEthSpec::default_spec();
|
let spec = MinimalEthSpec::default_spec();
|
||||||
let hot_path = db_path.path().join("hot_db");
|
let hot_path = db_path.path().join("hot_db");
|
||||||
let cold_path = db_path.path().join("cold_db");
|
let cold_path = db_path.path().join("cold_db");
|
||||||
let config = StoreConfig::default();
|
let config = StoreConfig::default();
|
||||||
let log = NullLoggerBuilder.build().expect("logger should build");
|
let log = NullLoggerBuilder.build().expect("logger should build");
|
||||||
Arc::new(
|
Arc::new(
|
||||||
DiskStore::open(&hot_path, &cold_path, config, spec, log)
|
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
||||||
.expect("disk store should initialize"),
|
.expect("disk store should initialize"),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_harness(store: Arc<DiskStore<E>>, validator_count: usize) -> TestHarness {
|
fn get_harness(store: Arc<HotColdDB<E>>, validator_count: usize) -> TestHarness {
|
||||||
let harness = BeaconChainHarness::new_with_disk_store(
|
let harness = BeaconChainHarness::new_with_disk_store(
|
||||||
MinimalEthSpec,
|
MinimalEthSpec,
|
||||||
store,
|
store,
|
||||||
@ -1305,8 +1305,8 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check that the DiskStore's split_slot is equal to the start slot of the last finalized epoch.
|
/// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch.
|
||||||
fn check_split_slot(harness: &TestHarness, store: Arc<DiskStore<E>>) {
|
fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E>>) {
|
||||||
let split_slot = store.get_split_slot();
|
let split_slot = store.get_split_slot();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
harness
|
harness
|
||||||
|
@ -354,7 +354,7 @@ fn roundtrip_operation_pool() {
|
|||||||
let restored_op_pool = harness
|
let restored_op_pool = harness
|
||||||
.chain
|
.chain
|
||||||
.store
|
.store
|
||||||
.get::<PersistedOperationPool<MinimalEthSpec>>(&key)
|
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&key)
|
||||||
.expect("should read db")
|
.expect("should read db")
|
||||||
.expect("should find op pool")
|
.expect("should find op pool")
|
||||||
.into_operation_pool(&head_state, &harness.spec);
|
.into_operation_pool(&head_state, &harness.spec);
|
||||||
|
@ -6,7 +6,7 @@ use beacon_chain::{
|
|||||||
eth1_chain::{CachingEth1Backend, Eth1Chain},
|
eth1_chain::{CachingEth1Backend, Eth1Chain},
|
||||||
migrate::{BackgroundMigrator, Migrate, NullMigrator},
|
migrate::{BackgroundMigrator, Migrate, NullMigrator},
|
||||||
slot_clock::{SlotClock, SystemTimeSlotClock},
|
slot_clock::{SlotClock, SystemTimeSlotClock},
|
||||||
store::{DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig},
|
store::{HotColdDB, MemoryStore, Store, StoreConfig},
|
||||||
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
|
BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler,
|
||||||
};
|
};
|
||||||
use environment::RuntimeContext;
|
use environment::RuntimeContext;
|
||||||
@ -65,7 +65,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
TStoreMigrator: Migrate<TEthSpec>,
|
||||||
TSlotClock: SlotClock + Clone + 'static,
|
TSlotClock: SlotClock + Clone + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
@ -376,7 +376,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
TStoreMigrator: Migrate<TEthSpec>,
|
||||||
TSlotClock: SlotClock + Clone + 'static,
|
TSlotClock: SlotClock + Clone + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
@ -423,7 +423,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
TStoreMigrator: Migrate<TEthSpec>,
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
@ -462,7 +462,7 @@ where
|
|||||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||||
ClientBuilder<
|
ClientBuilder<
|
||||||
Witness<
|
Witness<
|
||||||
DiskStore<TEthSpec>,
|
HotColdDB<TEthSpec>,
|
||||||
TStoreMigrator,
|
TStoreMigrator,
|
||||||
TSlotClock,
|
TSlotClock,
|
||||||
TEth1Backend,
|
TEth1Backend,
|
||||||
@ -472,12 +472,12 @@ impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TStoreMigrator: Migrate<DiskStore<TEthSpec>, TEthSpec> + 'static,
|
TStoreMigrator: Migrate<TEthSpec> + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, HotColdDB<TEthSpec>> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||||
{
|
{
|
||||||
/// Specifies that the `Client` should use a `DiskStore` database.
|
/// Specifies that the `Client` should use a `HotColdDB` database.
|
||||||
pub fn disk_store(
|
pub fn disk_store(
|
||||||
mut self,
|
mut self,
|
||||||
hot_path: &Path,
|
hot_path: &Path,
|
||||||
@ -494,40 +494,13 @@ where
|
|||||||
.clone()
|
.clone()
|
||||||
.ok_or_else(|| "disk_store requires a chain spec".to_string())?;
|
.ok_or_else(|| "disk_store requires a chain spec".to_string())?;
|
||||||
|
|
||||||
let store = DiskStore::open(hot_path, cold_path, config, spec, context.log)
|
let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log)
|
||||||
.map_err(|e| format!("Unable to open database: {:?}", e))?;
|
.map_err(|e| format!("Unable to open database: {:?}", e))?;
|
||||||
self.store = Some(Arc::new(store));
|
self.store = Some(Arc::new(store));
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TStoreMigrator, TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|
||||||
ClientBuilder<
|
|
||||||
Witness<
|
|
||||||
SimpleDiskStore<TEthSpec>,
|
|
||||||
TStoreMigrator,
|
|
||||||
TSlotClock,
|
|
||||||
TEth1Backend,
|
|
||||||
TEthSpec,
|
|
||||||
TEventHandler,
|
|
||||||
>,
|
|
||||||
>
|
|
||||||
where
|
|
||||||
TSlotClock: SlotClock + 'static,
|
|
||||||
TStoreMigrator: Migrate<SimpleDiskStore<TEthSpec>, TEthSpec> + 'static,
|
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, SimpleDiskStore<TEthSpec>> + 'static,
|
|
||||||
TEthSpec: EthSpec + 'static,
|
|
||||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
|
||||||
{
|
|
||||||
/// Specifies that the `Client` should use a `DiskStore` database.
|
|
||||||
pub fn simple_disk_store(mut self, path: &Path) -> Result<Self, String> {
|
|
||||||
let store =
|
|
||||||
SimpleDiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e))?;
|
|
||||||
self.store = Some(Arc::new(store));
|
|
||||||
Ok(self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||||
ClientBuilder<
|
ClientBuilder<
|
||||||
Witness<
|
Witness<
|
||||||
@ -559,7 +532,7 @@ where
|
|||||||
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
||||||
ClientBuilder<
|
ClientBuilder<
|
||||||
Witness<
|
Witness<
|
||||||
DiskStore<TEthSpec>,
|
HotColdDB<TEthSpec>,
|
||||||
BackgroundMigrator<TEthSpec>,
|
BackgroundMigrator<TEthSpec>,
|
||||||
TSlotClock,
|
TSlotClock,
|
||||||
TEth1Backend,
|
TEth1Backend,
|
||||||
@ -569,7 +542,7 @@ impl<TSlotClock, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, DiskStore<TEthSpec>> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, HotColdDB<TEthSpec>> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||||
{
|
{
|
||||||
@ -600,7 +573,7 @@ impl<TStore, TStoreMigrator, TSlotClock, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
TStoreMigrator: Migrate<TEthSpec>,
|
||||||
TSlotClock: SlotClock + 'static,
|
TSlotClock: SlotClock + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||||
@ -706,7 +679,7 @@ impl<TStore, TStoreMigrator, TEth1Backend, TEthSpec, TEventHandler>
|
|||||||
>
|
>
|
||||||
where
|
where
|
||||||
TStore: Store<TEthSpec> + 'static,
|
TStore: Store<TEthSpec> + 'static,
|
||||||
TStoreMigrator: Migrate<TStore, TEthSpec>,
|
TStoreMigrator: Migrate<TEthSpec>,
|
||||||
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
TEth1Backend: Eth1ChainBackend<TEthSpec, TStore> + 'static,
|
||||||
TEthSpec: EthSpec + 'static,
|
TEthSpec: EthSpec + 'static,
|
||||||
TEventHandler: EventHandler<TEthSpec> + 'static,
|
TEventHandler: EventHandler<TEthSpec> + 'static,
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use eth2_libp2p::Enr;
|
use eth2_libp2p::Enr;
|
||||||
use rlp;
|
use rlp;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem, Store};
|
use store::{DBColumn, Error as StoreError, Store, StoreItem};
|
||||||
use types::{EthSpec, Hash256};
|
use types::{EthSpec, Hash256};
|
||||||
|
|
||||||
/// 32-byte key for accessing the `DhtEnrs`.
|
/// 32-byte key for accessing the `DhtEnrs`.
|
||||||
@ -10,7 +10,7 @@ pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE";
|
|||||||
pub fn load_dht<T: Store<E>, E: EthSpec>(store: Arc<T>) -> Vec<Enr> {
|
pub fn load_dht<T: Store<E>, E: EthSpec>(store: Arc<T>) -> Vec<Enr> {
|
||||||
// Load DHT from store
|
// Load DHT from store
|
||||||
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
||||||
match store.get(&key) {
|
match store.get_item(&key) {
|
||||||
Ok(Some(p)) => {
|
Ok(Some(p)) => {
|
||||||
let p: PersistedDht = p;
|
let p: PersistedDht = p;
|
||||||
p.enrs
|
p.enrs
|
||||||
@ -25,7 +25,7 @@ pub fn persist_dht<T: Store<E>, E: EthSpec>(
|
|||||||
enrs: Vec<Enr>,
|
enrs: Vec<Enr>,
|
||||||
) -> Result<(), store::Error> {
|
) -> Result<(), store::Error> {
|
||||||
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
||||||
store.put(&key, &PersistedDht { enrs })?;
|
store.put_item(&key, &PersistedDht { enrs })?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,7 +34,7 @@ pub struct PersistedDht {
|
|||||||
pub enrs: Vec<Enr>,
|
pub enrs: Vec<Enr>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for PersistedDht {
|
impl StoreItem for PersistedDht {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::DhtEnrs
|
DBColumn::DhtEnrs
|
||||||
}
|
}
|
||||||
@ -67,9 +67,9 @@ mod tests {
|
|||||||
let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()];
|
let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()];
|
||||||
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
||||||
store
|
store
|
||||||
.put(&key, &PersistedDht { enrs: enrs.clone() })
|
.put_item(&key, &PersistedDht { enrs: enrs.clone() })
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dht: PersistedDht = store.get(&key).unwrap().unwrap();
|
let dht: PersistedDht = store.get_item(&key).unwrap().unwrap();
|
||||||
assert_eq!(dht.enrs, enrs);
|
assert_eq!(dht.enrs, enrs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -252,7 +252,7 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
} else if self
|
} else if self
|
||||||
.chain
|
.chain
|
||||||
.store
|
.store
|
||||||
.exists::<SignedBeaconBlock<T::EthSpec>>(&remote.head_root)
|
.item_exists::<SignedBeaconBlock<T::EthSpec>>(&remote.head_root)
|
||||||
.unwrap_or_else(|_| false)
|
.unwrap_or_else(|_| false)
|
||||||
{
|
{
|
||||||
debug!(
|
debug!(
|
||||||
|
@ -4,7 +4,7 @@ use parking_lot::RwLock;
|
|||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use store::{DBColumn, Error as StoreError, SimpleStoreItem};
|
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
/// SSZ-serializable version of `OperationPool`.
|
/// SSZ-serializable version of `OperationPool`.
|
||||||
@ -102,7 +102,7 @@ impl<T: EthSpec> PersistedOperationPool<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> SimpleStoreItem for PersistedOperationPool<T> {
|
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::OpPool
|
DBColumn::OpPool
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis};
|
|||||||
pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir};
|
pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir};
|
||||||
pub use eth2_config::Eth2Config;
|
pub use eth2_config::Eth2Config;
|
||||||
|
|
||||||
use beacon_chain::migrate::{BackgroundMigrator, DiskStore};
|
use beacon_chain::migrate::{BackgroundMigrator, HotColdDB};
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender,
|
builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender,
|
||||||
slot_clock::SystemTimeSlotClock,
|
slot_clock::SystemTimeSlotClock,
|
||||||
@ -25,10 +25,10 @@ use types::EthSpec;
|
|||||||
/// A type-alias to the tighten the definition of a production-intended `Client`.
|
/// A type-alias to the tighten the definition of a production-intended `Client`.
|
||||||
pub type ProductionClient<E> = Client<
|
pub type ProductionClient<E> = Client<
|
||||||
Witness<
|
Witness<
|
||||||
DiskStore<E>,
|
HotColdDB<E>,
|
||||||
BackgroundMigrator<E>,
|
BackgroundMigrator<E>,
|
||||||
SystemTimeSlotClock,
|
SystemTimeSlotClock,
|
||||||
CachingEth1Backend<E, DiskStore<E>>,
|
CachingEth1Backend<E, HotColdDB<E>>,
|
||||||
E,
|
E,
|
||||||
WebSocketSender<E>,
|
WebSocketSender<E>,
|
||||||
>,
|
>,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::chunked_vector::{chunk_key, Chunk, Field};
|
use crate::chunked_vector::{chunk_key, Chunk, Field};
|
||||||
use crate::DiskStore;
|
use crate::HotColdDB;
|
||||||
use slog::error;
|
use slog::error;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{ChainSpec, EthSpec, Slot};
|
use types::{ChainSpec, EthSpec, Slot};
|
||||||
@ -12,7 +12,7 @@ where
|
|||||||
F: Field<E>,
|
F: Field<E>,
|
||||||
E: EthSpec,
|
E: EthSpec,
|
||||||
{
|
{
|
||||||
pub(crate) store: Arc<DiskStore<E>>,
|
pub(crate) store: Arc<HotColdDB<E>>,
|
||||||
current_vindex: usize,
|
current_vindex: usize,
|
||||||
pub(crate) end_vindex: usize,
|
pub(crate) end_vindex: usize,
|
||||||
next_cindex: usize,
|
next_cindex: usize,
|
||||||
@ -28,10 +28,10 @@ where
|
|||||||
/// index stored by the restore point at `last_restore_point_slot`.
|
/// index stored by the restore point at `last_restore_point_slot`.
|
||||||
///
|
///
|
||||||
/// The `last_restore_point` slot should be the slot of a recent restore point as obtained from
|
/// The `last_restore_point` slot should be the slot of a recent restore point as obtained from
|
||||||
/// `DiskStore::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can
|
/// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can
|
||||||
/// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`).
|
/// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`).
|
||||||
pub fn new(
|
pub fn new(
|
||||||
store: Arc<DiskStore<E>>,
|
store: Arc<HotColdDB<E>>,
|
||||||
start_vindex: usize,
|
start_vindex: usize,
|
||||||
last_restore_point_slot: Slot,
|
last_restore_point_slot: Slot,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -177,7 +177,7 @@ pub trait Field<E: EthSpec>: Copy {
|
|||||||
/// Load the genesis value for a fixed length field from the store.
|
/// Load the genesis value for a fixed length field from the store.
|
||||||
///
|
///
|
||||||
/// This genesis value should be used to fill the initial state of the vector.
|
/// This genesis value should be used to fill the initial state of the vector.
|
||||||
fn load_genesis_value<S: Store<E>>(store: &S) -> Result<Self::Value, Error> {
|
fn load_genesis_value<S: KeyValueStore<E>>(store: &S) -> Result<Self::Value, Error> {
|
||||||
let key = &genesis_value_key()[..];
|
let key = &genesis_value_key()[..];
|
||||||
let chunk =
|
let chunk =
|
||||||
Chunk::load(store, Self::column(), key)?.ok_or(ChunkError::MissingGenesisValue)?;
|
Chunk::load(store, Self::column(), key)?.ok_or(ChunkError::MissingGenesisValue)?;
|
||||||
@ -192,7 +192,7 @@ pub trait Field<E: EthSpec>: Copy {
|
|||||||
///
|
///
|
||||||
/// Check the existing value (if any) for consistency with the value we intend to store, and
|
/// Check the existing value (if any) for consistency with the value we intend to store, and
|
||||||
/// return an error if they are inconsistent.
|
/// return an error if they are inconsistent.
|
||||||
fn check_and_store_genesis_value<S: Store<E>>(
|
fn check_and_store_genesis_value<S: KeyValueStore<E>>(
|
||||||
store: &S,
|
store: &S,
|
||||||
value: Self::Value,
|
value: Self::Value,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -327,7 +327,7 @@ field!(
|
|||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(&state.randao_mixes, index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(&state.randao_mixes, index)
|
||||||
);
|
);
|
||||||
|
|
||||||
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: Store<E>>(
|
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
|
||||||
field: F,
|
field: F,
|
||||||
store: &S,
|
store: &S,
|
||||||
state: &BeaconState<E>,
|
state: &BeaconState<E>,
|
||||||
@ -387,7 +387,7 @@ fn store_range<F, E, S, I>(
|
|||||||
where
|
where
|
||||||
F: Field<E>,
|
F: Field<E>,
|
||||||
E: EthSpec,
|
E: EthSpec,
|
||||||
S: Store<E>,
|
S: KeyValueStore<E>,
|
||||||
I: Iterator<Item = usize>,
|
I: Iterator<Item = usize>,
|
||||||
{
|
{
|
||||||
for chunk_index in range {
|
for chunk_index in range {
|
||||||
@ -417,7 +417,7 @@ where
|
|||||||
|
|
||||||
// Chunks at the end index are included.
|
// Chunks at the end index are included.
|
||||||
// TODO: could be more efficient with a real range query (perhaps RocksDB)
|
// TODO: could be more efficient with a real range query (perhaps RocksDB)
|
||||||
fn range_query<S: Store<E>, E: EthSpec, T: Decode + Encode>(
|
fn range_query<S: KeyValueStore<E>, E: EthSpec, T: Decode + Encode>(
|
||||||
store: &S,
|
store: &S,
|
||||||
column: DBColumn,
|
column: DBColumn,
|
||||||
start_index: usize,
|
start_index: usize,
|
||||||
@ -482,7 +482,7 @@ fn stitch<T: Default + Clone>(
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_vector_from_db<F: FixedLengthField<E>, E: EthSpec, S: Store<E>>(
|
pub fn load_vector_from_db<F: FixedLengthField<E>, E: EthSpec, S: KeyValueStore<E>>(
|
||||||
store: &S,
|
store: &S,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -514,7 +514,7 @@ pub fn load_vector_from_db<F: FixedLengthField<E>, E: EthSpec, S: Store<E>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The historical roots are stored in vector chunks, despite not actually being a vector.
|
/// The historical roots are stored in vector chunks, despite not actually being a vector.
|
||||||
pub fn load_variable_list_from_db<F: VariableLengthField<E>, E: EthSpec, S: Store<E>>(
|
pub fn load_variable_list_from_db<F: VariableLengthField<E>, E: EthSpec, S: KeyValueStore<E>>(
|
||||||
store: &S,
|
store: &S,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -574,7 +574,7 @@ where
|
|||||||
Chunk { values }
|
Chunk { values }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load<S: Store<E>, E: EthSpec>(
|
pub fn load<S: KeyValueStore<E>, E: EthSpec>(
|
||||||
store: &S,
|
store: &S,
|
||||||
column: DBColumn,
|
column: DBColumn,
|
||||||
key: &[u8],
|
key: &[u8],
|
||||||
@ -585,7 +585,7 @@ where
|
|||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn store<S: Store<E>, E: EthSpec>(
|
pub fn store<S: KeyValueStore<E>, E: EthSpec>(
|
||||||
&self,
|
&self,
|
||||||
store: &S,
|
store: &S,
|
||||||
column: DBColumn,
|
column: DBColumn,
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use crate::chunked_iter::ChunkedVectorIter;
|
use crate::chunked_iter::ChunkedVectorIter;
|
||||||
use crate::chunked_vector::BlockRoots;
|
use crate::chunked_vector::BlockRoots;
|
||||||
use crate::iter::{BlockRootsIterator, ReverseBlockRootIterator};
|
use crate::iter::{BlockRootsIterator, ReverseBlockRootIterator};
|
||||||
use crate::{DiskStore, Store};
|
use crate::{HotColdDB, Store};
|
||||||
use slog::error;
|
use slog::error;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot};
|
use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot};
|
||||||
@ -31,7 +31,7 @@ pub enum HybridForwardsBlockRootsIterator<E: EthSpec> {
|
|||||||
|
|
||||||
impl<E: EthSpec> FrozenForwardsBlockRootsIterator<E> {
|
impl<E: EthSpec> FrozenForwardsBlockRootsIterator<E> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
store: Arc<DiskStore<E>>,
|
store: Arc<HotColdDB<E>>,
|
||||||
start_slot: Slot,
|
start_slot: Slot,
|
||||||
last_restore_point_slot: Slot,
|
last_restore_point_slot: Slot,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -87,7 +87,7 @@ impl Iterator for SimpleForwardsBlockRootsIterator {
|
|||||||
|
|
||||||
impl<E: EthSpec> HybridForwardsBlockRootsIterator<E> {
|
impl<E: EthSpec> HybridForwardsBlockRootsIterator<E> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
store: Arc<DiskStore<E>>,
|
store: Arc<HotColdDB<E>>,
|
||||||
start_slot: Slot,
|
start_slot: Slot,
|
||||||
end_state: BeaconState<E>,
|
end_state: BeaconState<E>,
|
||||||
end_block_root: Hash256,
|
end_block_root: Hash256,
|
||||||
|
@ -3,11 +3,12 @@ use crate::chunked_vector::{
|
|||||||
};
|
};
|
||||||
use crate::config::StoreConfig;
|
use crate::config::StoreConfig;
|
||||||
use crate::forwards_iter::HybridForwardsBlockRootsIterator;
|
use crate::forwards_iter::HybridForwardsBlockRootsIterator;
|
||||||
use crate::impls::beacon_state::store_full_state;
|
use crate::impls::beacon_state::{get_full_state, store_full_state};
|
||||||
use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
|
use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use crate::{
|
use crate::{
|
||||||
leveldb_store::LevelDB, DBColumn, Error, PartialBeaconState, SimpleStoreItem, Store, StoreOp,
|
leveldb_store::LevelDB, DBColumn, Error, ItemStore, KeyValueStore, PartialBeaconState, Store,
|
||||||
|
StoreItem, StoreOp,
|
||||||
};
|
};
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
@ -86,27 +87,10 @@ pub enum HotColdDBError {
|
|||||||
impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
||||||
type ForwardsBlockRootsIterator = HybridForwardsBlockRootsIterator<E>;
|
type ForwardsBlockRootsIterator = HybridForwardsBlockRootsIterator<E>;
|
||||||
|
|
||||||
// Defer to the hot database for basic operations (including blocks for now)
|
|
||||||
fn get_bytes(&self, column: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
|
||||||
self.hot_db.get_bytes(column, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn put_bytes(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error> {
|
|
||||||
self.hot_db.put_bytes(column, key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_exists(&self, column: &str, key: &[u8]) -> Result<bool, Error> {
|
|
||||||
self.hot_db.key_exists(column, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error> {
|
|
||||||
self.hot_db.key_delete(column, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Store a block and update the LRU cache.
|
/// Store a block and update the LRU cache.
|
||||||
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error> {
|
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error> {
|
||||||
// Store on disk.
|
// Store on disk.
|
||||||
self.put(block_root, &block)?;
|
self.hot_db.put(block_root, &block)?;
|
||||||
|
|
||||||
// Update cache.
|
// Update cache.
|
||||||
self.block_cache.lock().put(*block_root, block);
|
self.block_cache.lock().put(*block_root, block);
|
||||||
@ -125,7 +109,7 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fetch from database.
|
// Fetch from database.
|
||||||
match self.get::<SignedBeaconBlock<E>>(block_root)? {
|
match self.hot_db.get::<SignedBeaconBlock<E>>(block_root)? {
|
||||||
Some(block) => {
|
Some(block) => {
|
||||||
// Add to cache.
|
// Add to cache.
|
||||||
self.block_cache.lock().put(*block_root, block.clone());
|
self.block_cache.lock().put(*block_root, block.clone());
|
||||||
@ -138,7 +122,15 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
|||||||
/// Delete a block from the store and the block cache.
|
/// Delete a block from the store and the block cache.
|
||||||
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
|
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
|
||||||
self.block_cache.lock().pop(block_root);
|
self.block_cache.lock().pop(block_root);
|
||||||
self.delete::<SignedBeaconBlock<E>>(block_root)
|
self.hot_db.delete::<SignedBeaconBlock<E>>(block_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn put_state_summary(
|
||||||
|
&self,
|
||||||
|
state_root: &Hash256,
|
||||||
|
summary: HotStateSummary,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.hot_db.put(state_root, &summary).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store a state in the store.
|
/// Store a state in the store.
|
||||||
@ -155,17 +147,6 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
|||||||
&self,
|
&self,
|
||||||
state_root: &Hash256,
|
state_root: &Hash256,
|
||||||
slot: Option<Slot>,
|
slot: Option<Slot>,
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
|
||||||
self.get_state_with(state_root, slot)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a state from the store.
|
|
||||||
///
|
|
||||||
/// Fetch a state from the store, controlling which cache fields are cloned.
|
|
||||||
fn get_state_with(
|
|
||||||
&self,
|
|
||||||
state_root: &Hash256,
|
|
||||||
slot: Option<Slot>,
|
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
) -> Result<Option<BeaconState<E>>, Error> {
|
||||||
metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT);
|
metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT);
|
||||||
|
|
||||||
@ -203,96 +184,6 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> {
|
|
||||||
let mut guard = self.block_cache.lock();
|
|
||||||
self.hot_db.do_atomically(batch)?;
|
|
||||||
for op in batch {
|
|
||||||
match op {
|
|
||||||
StoreOp::DeleteBlock(block_hash) => {
|
|
||||||
let untyped_hash: Hash256 = (*block_hash).into();
|
|
||||||
guard.pop(&untyped_hash);
|
|
||||||
}
|
|
||||||
StoreOp::DeleteState(_, _) => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance the split point of the store, moving new finalized states to the freezer.
|
|
||||||
fn process_finalization(
|
|
||||||
store: Arc<Self>,
|
|
||||||
frozen_head_root: Hash256,
|
|
||||||
frozen_head: &BeaconState<E>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
debug!(
|
|
||||||
store.log,
|
|
||||||
"Freezer migration started";
|
|
||||||
"slot" => frozen_head.slot
|
|
||||||
);
|
|
||||||
|
|
||||||
// 0. Check that the migration is sensible.
|
|
||||||
// The new frozen head must increase the current split slot, and lie on an epoch
|
|
||||||
// boundary (in order for the hot state summary scheme to work).
|
|
||||||
let current_split_slot = store.get_split_slot();
|
|
||||||
|
|
||||||
if frozen_head.slot < current_split_slot {
|
|
||||||
return Err(HotColdDBError::FreezeSlotError {
|
|
||||||
current_split_slot,
|
|
||||||
proposed_split_slot: frozen_head.slot,
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
if frozen_head.slot % E::slots_per_epoch() != 0 {
|
|
||||||
return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into());
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. Copy all of the states between the head and the split slot, from the hot DB
|
|
||||||
// to the cold DB.
|
|
||||||
let state_root_iter = StateRootsIterator::new(store.clone(), frozen_head);
|
|
||||||
|
|
||||||
let mut to_delete = vec![];
|
|
||||||
for (state_root, slot) in
|
|
||||||
state_root_iter.take_while(|&(_, slot)| slot >= current_split_slot)
|
|
||||||
{
|
|
||||||
if slot % store.config.slots_per_restore_point == 0 {
|
|
||||||
let state: BeaconState<E> = store
|
|
||||||
.hot_db
|
|
||||||
.get_state(&state_root, None)?
|
|
||||||
.ok_or_else(|| HotColdDBError::MissingStateToFreeze(state_root))?;
|
|
||||||
|
|
||||||
store.store_cold_state(&state_root, &state)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store a pointer from this state root to its slot, so we can later reconstruct states
|
|
||||||
// from their state root alone.
|
|
||||||
store.store_cold_state_slot(&state_root, slot)?;
|
|
||||||
|
|
||||||
// Delete the old summary, and the full state if we lie on an epoch boundary.
|
|
||||||
to_delete.push((state_root, slot));
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Update the split slot
|
|
||||||
*store.split.write() = Split {
|
|
||||||
slot: frozen_head.slot,
|
|
||||||
state_root: frozen_head_root,
|
|
||||||
};
|
|
||||||
store.store_split()?;
|
|
||||||
|
|
||||||
// 3. Delete from the hot DB
|
|
||||||
for (state_root, slot) in to_delete {
|
|
||||||
store.delete_state(&state_root, slot)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
store.log,
|
|
||||||
"Freezer migration complete";
|
|
||||||
"slot" => frozen_head.slot
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn forwards_block_roots_iterator(
|
fn forwards_block_roots_iterator(
|
||||||
store: Arc<Self>,
|
store: Arc<Self>,
|
||||||
start_slot: Slot,
|
start_slot: Slot,
|
||||||
@ -334,6 +225,33 @@ impl<E: EthSpec> Store<E> for HotColdDB<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn put_item<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> {
|
||||||
|
self.hot_db.put(key, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_item<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> {
|
||||||
|
self.hot_db.get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn item_exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error> {
|
||||||
|
self.hot_db.exists::<I>(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> {
|
||||||
|
let mut guard = self.block_cache.lock();
|
||||||
|
self.hot_db.do_atomically(batch)?;
|
||||||
|
for op in batch {
|
||||||
|
match op {
|
||||||
|
StoreOp::DeleteBlock(block_hash) => {
|
||||||
|
let untyped_hash: Hash256 = (*block_hash).into();
|
||||||
|
guard.pop(&untyped_hash);
|
||||||
|
}
|
||||||
|
StoreOp::DeleteState(_, _) => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> HotColdDB<E> {
|
impl<E: EthSpec> HotColdDB<E> {
|
||||||
@ -408,9 +326,7 @@ impl<E: EthSpec> HotColdDB<E> {
|
|||||||
epoch_boundary_state_root,
|
epoch_boundary_state_root,
|
||||||
}) = self.load_hot_state_summary(state_root)?
|
}) = self.load_hot_state_summary(state_root)?
|
||||||
{
|
{
|
||||||
let boundary_state = self
|
let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)?
|
||||||
.hot_db
|
|
||||||
.get_state(&epoch_boundary_state_root, None)?
|
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root)
|
HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root)
|
||||||
})?;
|
})?;
|
||||||
@ -758,6 +674,77 @@ impl<E: EthSpec> HotColdDB<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Advance the split point of the store, moving new finalized states to the freezer.
|
||||||
|
pub fn process_finalization<E: EthSpec>(
|
||||||
|
store: Arc<HotColdDB<E>>,
|
||||||
|
frozen_head_root: Hash256,
|
||||||
|
frozen_head: &BeaconState<E>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
debug!(
|
||||||
|
store.log,
|
||||||
|
"Freezer migration started";
|
||||||
|
"slot" => frozen_head.slot
|
||||||
|
);
|
||||||
|
|
||||||
|
// 0. Check that the migration is sensible.
|
||||||
|
// The new frozen head must increase the current split slot, and lie on an epoch
|
||||||
|
// boundary (in order for the hot state summary scheme to work).
|
||||||
|
let current_split_slot = store.get_split_slot();
|
||||||
|
|
||||||
|
if frozen_head.slot < current_split_slot {
|
||||||
|
return Err(HotColdDBError::FreezeSlotError {
|
||||||
|
current_split_slot,
|
||||||
|
proposed_split_slot: frozen_head.slot,
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
if frozen_head.slot % E::slots_per_epoch() != 0 {
|
||||||
|
return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Copy all of the states between the head and the split slot, from the hot DB
|
||||||
|
// to the cold DB.
|
||||||
|
let state_root_iter = StateRootsIterator::new(store.clone(), frozen_head);
|
||||||
|
|
||||||
|
let mut to_delete = vec![];
|
||||||
|
for (state_root, slot) in state_root_iter.take_while(|&(_, slot)| slot >= current_split_slot) {
|
||||||
|
if slot % store.config.slots_per_restore_point == 0 {
|
||||||
|
let state: BeaconState<E> = get_full_state(&store.hot_db, &state_root)?
|
||||||
|
.ok_or_else(|| HotColdDBError::MissingStateToFreeze(state_root))?;
|
||||||
|
|
||||||
|
store.store_cold_state(&state_root, &state)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store a pointer from this state root to its slot, so we can later reconstruct states
|
||||||
|
// from their state root alone.
|
||||||
|
store.store_cold_state_slot(&state_root, slot)?;
|
||||||
|
|
||||||
|
// Delete the old summary, and the full state if we lie on an epoch boundary.
|
||||||
|
to_delete.push((state_root, slot));
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Update the split slot
|
||||||
|
*store.split.write() = Split {
|
||||||
|
slot: frozen_head.slot,
|
||||||
|
state_root: frozen_head_root,
|
||||||
|
};
|
||||||
|
store.store_split()?;
|
||||||
|
|
||||||
|
// 3. Delete from the hot DB
|
||||||
|
for (state_root, slot) in to_delete {
|
||||||
|
store.delete_state(&state_root, slot)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
store.log,
|
||||||
|
"Freezer migration complete";
|
||||||
|
"slot" => frozen_head.slot
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Struct for storing the split slot and state root in the database.
|
/// Struct for storing the split slot and state root in the database.
|
||||||
#[derive(Debug, Clone, Copy, Default, Encode, Decode)]
|
#[derive(Debug, Clone, Copy, Default, Encode, Decode)]
|
||||||
struct Split {
|
struct Split {
|
||||||
@ -765,7 +752,7 @@ struct Split {
|
|||||||
state_root: Hash256,
|
state_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for Split {
|
impl StoreItem for Split {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconMeta
|
DBColumn::BeaconMeta
|
||||||
}
|
}
|
||||||
@ -789,7 +776,7 @@ pub struct HotStateSummary {
|
|||||||
epoch_boundary_state_root: Hash256,
|
epoch_boundary_state_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for HotStateSummary {
|
impl StoreItem for HotStateSummary {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconStateSummary
|
DBColumn::BeaconStateSummary
|
||||||
}
|
}
|
||||||
@ -832,7 +819,7 @@ struct ColdStateSummary {
|
|||||||
slot: Slot,
|
slot: Slot,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for ColdStateSummary {
|
impl StoreItem for ColdStateSummary {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconStateSummary
|
DBColumn::BeaconStateSummary
|
||||||
}
|
}
|
||||||
@ -852,7 +839,7 @@ struct RestorePointHash {
|
|||||||
state_root: Hash256,
|
state_root: Hash256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for RestorePointHash {
|
impl StoreItem for RestorePointHash {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconRestorePoint
|
DBColumn::BeaconRestorePoint
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
|
|||||||
pub mod beacon_state;
|
pub mod beacon_state;
|
||||||
pub mod partial_beacon_state;
|
pub mod partial_beacon_state;
|
||||||
|
|
||||||
impl<T: EthSpec> SimpleStoreItem for SignedBeaconBlock<T> {
|
impl<T: EthSpec> StoreItem for SignedBeaconBlock<T> {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconBlock
|
DBColumn::BeaconBlock
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,8 @@ use ssz_derive::{Decode, Encode};
|
|||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use types::beacon_state::{CloneConfig, CommitteeCache, CACHED_EPOCHS};
|
use types::beacon_state::{CloneConfig, CommitteeCache, CACHED_EPOCHS};
|
||||||
|
|
||||||
pub fn store_full_state<S: Store<E>, E: EthSpec>(
|
pub fn store_full_state<KV: KeyValueStore<E>, E: EthSpec>(
|
||||||
store: &S,
|
store: &KV,
|
||||||
state_root: &Hash256,
|
state_root: &Hash256,
|
||||||
state: &BeaconState<E>,
|
state: &BeaconState<E>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -24,13 +24,13 @@ pub fn store_full_state<S: Store<E>, E: EthSpec>(
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_full_state<S: Store<E>, E: EthSpec>(
|
pub fn get_full_state<KV: KeyValueStore<E>, E: EthSpec>(
|
||||||
store: &S,
|
db: &KV,
|
||||||
state_root: &Hash256,
|
state_root: &Hash256,
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
) -> Result<Option<BeaconState<E>>, Error> {
|
||||||
let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES);
|
let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES);
|
||||||
|
|
||||||
match store.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? {
|
match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? {
|
||||||
Some(bytes) => {
|
Some(bytes) => {
|
||||||
let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES);
|
let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES);
|
||||||
let container = StorageContainer::from_ssz_bytes(&bytes)?;
|
let container = StorageContainer::from_ssz_bytes(&bytes)?;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
|
|
||||||
impl<T: EthSpec> SimpleStoreItem for PartialBeaconState<T> {
|
impl<T: EthSpec> StoreItem for PartialBeaconState<T> {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconState
|
DBColumn::BeaconState
|
||||||
}
|
}
|
||||||
|
@ -42,11 +42,11 @@ impl<'a, U: Store<E>, E: EthSpec> AncestorIter<U, E, StateRootsIterator<'a, E, U
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct StateRootsIterator<'a, T: EthSpec, U> {
|
pub struct StateRootsIterator<'a, T: EthSpec, U: Store<T>> {
|
||||||
inner: RootsIterator<'a, T, U>,
|
inner: RootsIterator<'a, T, U>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> {
|
impl<'a, T: EthSpec, U: Store<T>> Clone for StateRootsIterator<'a, T, U> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: self.inner.clone(),
|
inner: self.inner.clone(),
|
||||||
@ -86,11 +86,11 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for StateRootsIterator<'a, T, U> {
|
|||||||
/// exhausted.
|
/// exhausted.
|
||||||
///
|
///
|
||||||
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
||||||
pub struct BlockRootsIterator<'a, T: EthSpec, U> {
|
pub struct BlockRootsIterator<'a, T: EthSpec, U: Store<T>> {
|
||||||
inner: RootsIterator<'a, T, U>,
|
inner: RootsIterator<'a, T, U>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> {
|
impl<'a, T: EthSpec, U: Store<T>> Clone for BlockRootsIterator<'a, T, U> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: self.inner.clone(),
|
inner: self.inner.clone(),
|
||||||
@ -125,13 +125,13 @@ impl<'a, T: EthSpec, U: Store<T>> Iterator for BlockRootsIterator<'a, T, U> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`.
|
/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`.
|
||||||
pub struct RootsIterator<'a, T: EthSpec, U> {
|
pub struct RootsIterator<'a, T: EthSpec, U: Store<T>> {
|
||||||
store: Arc<U>,
|
store: Arc<U>,
|
||||||
beacon_state: Cow<'a, BeaconState<T>>,
|
beacon_state: Cow<'a, BeaconState<T>>,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T: EthSpec, U> Clone for RootsIterator<'a, T, U> {
|
impl<'a, T: EthSpec, U: Store<T>> Clone for RootsIterator<'a, T, U> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
store: self.store.clone(),
|
store: self.store.clone(),
|
||||||
@ -245,7 +245,7 @@ impl<'a, E: EthSpec, S: Store<E>> Iterator for ParentRootBlockIterator<'a, E, S>
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
/// Extends `BlockRootsIterator`, returning `SignedBeaconBlock` instances, instead of their roots.
|
/// Extends `BlockRootsIterator`, returning `SignedBeaconBlock` instances, instead of their roots.
|
||||||
pub struct BlockIterator<'a, T: EthSpec, U> {
|
pub struct BlockIterator<'a, T: EthSpec, U: Store<T>> {
|
||||||
roots: BlockRootsIterator<'a, T, U>,
|
roots: BlockRootsIterator<'a, T, U>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::forwards_iter::SimpleForwardsBlockRootsIterator;
|
|
||||||
use crate::impls::beacon_state::{get_full_state, store_full_state};
|
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use db_key::Key;
|
use db_key::Key;
|
||||||
use leveldb::database::batch::{Batch, Writebatch};
|
use leveldb::database::batch::{Batch, Writebatch};
|
||||||
@ -39,35 +37,12 @@ impl<E: EthSpec> LevelDB<E> {
|
|||||||
fn write_options(&self) -> WriteOptions {
|
fn write_options(&self) -> WriteOptions {
|
||||||
WriteOptions::new()
|
WriteOptions::new()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey {
|
|
||||||
let mut col = col.as_bytes().to_vec();
|
|
||||||
col.append(&mut key.to_vec());
|
|
||||||
BytesKey { key: col }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Used for keying leveldb.
|
impl<E: EthSpec> KeyValueStore<E> for LevelDB<E> {
|
||||||
pub struct BytesKey {
|
|
||||||
key: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Key for BytesKey {
|
|
||||||
fn from_u8(key: &[u8]) -> Self {
|
|
||||||
Self { key: key.to_vec() }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_slice<T, F: Fn(&[u8]) -> T>(&self, f: F) -> T {
|
|
||||||
f(self.key.as_slice())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: EthSpec> Store<E> for LevelDB<E> {
|
|
||||||
type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator;
|
|
||||||
|
|
||||||
/// Retrieve some bytes in `column` with `key`.
|
/// Retrieve some bytes in `column` with `key`.
|
||||||
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
||||||
let column_key = Self::get_key_for_col(col, key);
|
let column_key = get_key_for_col(col, key);
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::DISK_DB_READ_COUNT);
|
metrics::inc_counter(&metrics::DISK_DB_READ_COUNT);
|
||||||
let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES);
|
let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES);
|
||||||
@ -86,7 +61,7 @@ impl<E: EthSpec> Store<E> for LevelDB<E> {
|
|||||||
|
|
||||||
/// Store some `value` in `column`, indexed with `key`.
|
/// Store some `value` in `column`, indexed with `key`.
|
||||||
fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
||||||
let column_key = Self::get_key_for_col(col, key);
|
let column_key = get_key_for_col(col, key);
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::DISK_DB_WRITE_COUNT);
|
metrics::inc_counter(&metrics::DISK_DB_WRITE_COUNT);
|
||||||
metrics::inc_counter_by(&metrics::DISK_DB_WRITE_BYTES, val.len() as i64);
|
metrics::inc_counter_by(&metrics::DISK_DB_WRITE_BYTES, val.len() as i64);
|
||||||
@ -102,7 +77,7 @@ impl<E: EthSpec> Store<E> for LevelDB<E> {
|
|||||||
|
|
||||||
/// Return `true` if `key` exists in `column`.
|
/// Return `true` if `key` exists in `column`.
|
||||||
fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> {
|
fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> {
|
||||||
let column_key = Self::get_key_for_col(col, key);
|
let column_key = get_key_for_col(col, key);
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT);
|
metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT);
|
||||||
|
|
||||||
@ -114,7 +89,7 @@ impl<E: EthSpec> Store<E> for LevelDB<E> {
|
|||||||
|
|
||||||
/// Removes `key` from `column`.
|
/// Removes `key` from `column`.
|
||||||
fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> {
|
fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> {
|
||||||
let column_key = Self::get_key_for_col(col, key);
|
let column_key = get_key_for_col(col, key);
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT);
|
metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT);
|
||||||
|
|
||||||
@ -123,56 +98,28 @@ impl<E: EthSpec> Store<E> for LevelDB<E> {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store a state in the store.
|
|
||||||
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
|
|
||||||
store_full_state(self, state_root, &state)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch a state from the store.
|
|
||||||
fn get_state(
|
|
||||||
&self,
|
|
||||||
state_root: &Hash256,
|
|
||||||
_: Option<Slot>,
|
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
|
||||||
get_full_state(self, state_root)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn forwards_block_roots_iterator(
|
|
||||||
store: Arc<Self>,
|
|
||||||
start_slot: Slot,
|
|
||||||
end_state: BeaconState<E>,
|
|
||||||
end_block_root: Hash256,
|
|
||||||
_: &ChainSpec,
|
|
||||||
) -> Self::ForwardsBlockRootsIterator {
|
|
||||||
SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn do_atomically(&self, ops_batch: &[StoreOp]) -> Result<(), Error> {
|
fn do_atomically(&self, ops_batch: &[StoreOp]) -> Result<(), Error> {
|
||||||
let mut leveldb_batch = Writebatch::new();
|
let mut leveldb_batch = Writebatch::new();
|
||||||
for op in ops_batch {
|
for op in ops_batch {
|
||||||
match op {
|
match op {
|
||||||
StoreOp::DeleteBlock(block_hash) => {
|
StoreOp::DeleteBlock(block_hash) => {
|
||||||
let untyped_hash: Hash256 = (*block_hash).into();
|
let untyped_hash: Hash256 = (*block_hash).into();
|
||||||
let key = Self::get_key_for_col(
|
let key =
|
||||||
DBColumn::BeaconBlock.into(),
|
get_key_for_col(DBColumn::BeaconBlock.into(), untyped_hash.as_bytes());
|
||||||
untyped_hash.as_bytes(),
|
|
||||||
);
|
|
||||||
leveldb_batch.delete(key);
|
leveldb_batch.delete(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
StoreOp::DeleteState(state_hash, slot) => {
|
StoreOp::DeleteState(state_hash, slot) => {
|
||||||
let untyped_hash: Hash256 = (*state_hash).into();
|
let untyped_hash: Hash256 = (*state_hash).into();
|
||||||
let state_summary_key = Self::get_key_for_col(
|
let state_summary_key = get_key_for_col(
|
||||||
DBColumn::BeaconStateSummary.into(),
|
DBColumn::BeaconStateSummary.into(),
|
||||||
untyped_hash.as_bytes(),
|
untyped_hash.as_bytes(),
|
||||||
);
|
);
|
||||||
leveldb_batch.delete(state_summary_key);
|
leveldb_batch.delete(state_summary_key);
|
||||||
|
|
||||||
if *slot % E::slots_per_epoch() == 0 {
|
if *slot % E::slots_per_epoch() == 0 {
|
||||||
let state_key = Self::get_key_for_col(
|
let state_key =
|
||||||
DBColumn::BeaconState.into(),
|
get_key_for_col(DBColumn::BeaconState.into(), untyped_hash.as_bytes());
|
||||||
untyped_hash.as_bytes(),
|
|
||||||
);
|
|
||||||
leveldb_batch.delete(state_key);
|
leveldb_batch.delete(state_key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -183,6 +130,29 @@ impl<E: EthSpec> Store<E> for LevelDB<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<E: EthSpec> ItemStore<E> for LevelDB<E> {}
|
||||||
|
|
||||||
|
/// Used for keying leveldb.
|
||||||
|
pub struct BytesKey {
|
||||||
|
key: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Key for BytesKey {
|
||||||
|
fn from_u8(key: &[u8]) -> Self {
|
||||||
|
Self { key: key.to_vec() }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_slice<T, F: Fn(&[u8]) -> T>(&self, f: F) -> T {
|
||||||
|
f(self.key.as_slice())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey {
|
||||||
|
let mut col = col.as_bytes().to_vec();
|
||||||
|
col.append(&mut key.to_vec());
|
||||||
|
BytesKey { key: col }
|
||||||
|
}
|
||||||
|
|
||||||
impl From<LevelDBError> for Error {
|
impl From<LevelDBError> for Error {
|
||||||
fn from(e: LevelDBError) -> Error {
|
fn from(e: LevelDBError) -> Error {
|
||||||
Error::DBError {
|
Error::DBError {
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Provides the following stores:
|
//! Provides the following stores:
|
||||||
//!
|
//!
|
||||||
//! - `DiskStore`: an on-disk store backed by leveldb. Used in production.
|
//! - `HotColdDB`: an on-disk store backed by leveldb. Used in production.
|
||||||
//! - `MemoryStore`: an in-memory store backed by a hash-map. Used for testing.
|
//! - `MemoryStore`: an in-memory store backed by a hash-map. Used for testing.
|
||||||
//!
|
//!
|
||||||
//! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See
|
//! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See
|
||||||
@ -28,8 +28,8 @@ pub mod iter;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub use self::config::StoreConfig;
|
pub use self::config::StoreConfig;
|
||||||
pub use self::hot_cold_store::{HotColdDB as DiskStore, HotStateSummary};
|
pub use self::hot_cold_store::{HotColdDB, HotStateSummary};
|
||||||
pub use self::leveldb_store::LevelDB as SimpleDiskStore;
|
pub use self::leveldb_store::LevelDB;
|
||||||
pub use self::memory_store::MemoryStore;
|
pub use self::memory_store::MemoryStore;
|
||||||
pub use self::partial_beacon_state::PartialBeaconState;
|
pub use self::partial_beacon_state::PartialBeaconState;
|
||||||
pub use errors::Error;
|
pub use errors::Error;
|
||||||
@ -38,14 +38,7 @@ pub use metrics::scrape_for_metrics;
|
|||||||
pub use state_batch::StateBatch;
|
pub use state_batch::StateBatch;
|
||||||
pub use types::*;
|
pub use types::*;
|
||||||
|
|
||||||
/// An object capable of storing and retrieving objects implementing `StoreItem`.
|
pub trait KeyValueStore<E: EthSpec>: Sync + Send + Sized + 'static {
|
||||||
///
|
|
||||||
/// A `Store` is fundamentally backed by a key-value database, however it provides support for
|
|
||||||
/// columns. A simple column implementation might involve prefixing a key with some bytes unique to
|
|
||||||
/// each column.
|
|
||||||
pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
|
||||||
type ForwardsBlockRootsIterator: Iterator<Item = (Hash256, Slot)>;
|
|
||||||
|
|
||||||
/// Retrieve some bytes in `column` with `key`.
|
/// Retrieve some bytes in `column` with `key`.
|
||||||
fn get_bytes(&self, column: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error>;
|
fn get_bytes(&self, column: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error>;
|
||||||
|
|
||||||
@ -58,8 +51,13 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
|||||||
/// Removes `key` from `column`.
|
/// Removes `key` from `column`.
|
||||||
fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>;
|
fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>;
|
||||||
|
|
||||||
|
/// Execute either all of the operations in `batch` or none at all, returning an error.
|
||||||
|
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait ItemStore<E: EthSpec>: KeyValueStore<E> + Sync + Send + Sized + 'static {
|
||||||
/// Store an item in `Self`.
|
/// Store an item in `Self`.
|
||||||
fn put<I: SimpleStoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> {
|
fn put<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> {
|
||||||
let column = I::db_column().into();
|
let column = I::db_column().into();
|
||||||
let key = key.as_bytes();
|
let key = key.as_bytes();
|
||||||
|
|
||||||
@ -68,7 +66,7 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieve an item from `Self`.
|
/// Retrieve an item from `Self`.
|
||||||
fn get<I: SimpleStoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> {
|
fn get<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> {
|
||||||
let column = I::db_column().into();
|
let column = I::db_column().into();
|
||||||
let key = key.as_bytes();
|
let key = key.as_bytes();
|
||||||
|
|
||||||
@ -79,7 +77,7 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if the given key represents an item in `Self`.
|
/// Returns `true` if the given key represents an item in `Self`.
|
||||||
fn exists<I: SimpleStoreItem>(&self, key: &Hash256) -> Result<bool, Error> {
|
fn exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error> {
|
||||||
let column = I::db_column().into();
|
let column = I::db_column().into();
|
||||||
let key = key.as_bytes();
|
let key = key.as_bytes();
|
||||||
|
|
||||||
@ -87,44 +85,40 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Remove an item from `Self`.
|
/// Remove an item from `Self`.
|
||||||
fn delete<I: SimpleStoreItem>(&self, key: &Hash256) -> Result<(), Error> {
|
fn delete<I: StoreItem>(&self, key: &Hash256) -> Result<(), Error> {
|
||||||
let column = I::db_column().into();
|
let column = I::db_column().into();
|
||||||
let key = key.as_bytes();
|
let key = key.as_bytes();
|
||||||
|
|
||||||
self.key_delete(column, key)
|
self.key_delete(column, key)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object capable of storing and retrieving objects implementing `StoreItem`.
|
||||||
|
///
|
||||||
|
/// A `Store` is fundamentally backed by a key-value database, however it provides support for
|
||||||
|
/// columns. A simple column implementation might involve prefixing a key with some bytes unique to
|
||||||
|
/// each column.
|
||||||
|
pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
||||||
|
type ForwardsBlockRootsIterator: Iterator<Item = (Hash256, Slot)>;
|
||||||
|
|
||||||
/// Store a block in the store.
|
/// Store a block in the store.
|
||||||
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error> {
|
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error>;
|
||||||
self.put(block_root, &block)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch a block from the store.
|
/// Fetch a block from the store.
|
||||||
fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error> {
|
fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error>;
|
||||||
self.get(block_root)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete a block from the store.
|
/// Delete a block from the store.
|
||||||
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
|
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error>;
|
||||||
self.key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Store a state in the store.
|
/// Store a state in the store.
|
||||||
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error>;
|
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error>;
|
||||||
|
|
||||||
/// Execute either all of the operations in `batch` or none at all, returning an error.
|
|
||||||
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>;
|
|
||||||
|
|
||||||
/// Store a state summary in the store.
|
/// Store a state summary in the store.
|
||||||
// NOTE: this is a hack for the HotColdDb, we could consider splitting this
|
|
||||||
// trait and removing the generic `S: Store` types everywhere?
|
|
||||||
fn put_state_summary(
|
fn put_state_summary(
|
||||||
&self,
|
&self,
|
||||||
state_root: &Hash256,
|
state_root: &Hash256,
|
||||||
summary: HotStateSummary,
|
summary: HotStateSummary,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error>;
|
||||||
self.put(state_root, &summary).map_err(Into::into)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch a state from the store.
|
/// Fetch a state from the store.
|
||||||
fn get_state(
|
fn get_state(
|
||||||
@ -133,33 +127,12 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
|||||||
slot: Option<Slot>,
|
slot: Option<Slot>,
|
||||||
) -> Result<Option<BeaconState<E>>, Error>;
|
) -> Result<Option<BeaconState<E>>, Error>;
|
||||||
|
|
||||||
/// Fetch a state from the store, controlling which cache fields are cloned.
|
|
||||||
fn get_state_with(
|
|
||||||
&self,
|
|
||||||
state_root: &Hash256,
|
|
||||||
slot: Option<Slot>,
|
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
|
||||||
// Default impl ignores config. Overriden in `HotColdDb`.
|
|
||||||
self.get_state(state_root, slot)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete a state from the store.
|
/// Delete a state from the store.
|
||||||
fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error> {
|
fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error>;
|
||||||
self.key_delete(DBColumn::BeaconState.into(), state_root.as_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// (Optionally) Move all data before the frozen slot to the freezer database.
|
|
||||||
fn process_finalization(
|
|
||||||
_store: Arc<Self>,
|
|
||||||
_frozen_head_root: Hash256,
|
|
||||||
_frozen_head: &BeaconState<E>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a forwards (slot-ascending) iterator over the beacon block roots since `start_slot`.
|
/// Get a forwards (slot-ascending) iterator over the beacon block roots since `start_slot`.
|
||||||
///
|
///
|
||||||
/// Will be efficient for frozen portions of the database if using `DiskStore`.
|
/// Will be efficient for frozen portions of the database if using `HotColdDB`.
|
||||||
///
|
///
|
||||||
/// The `end_state` and `end_block_root` are required for backtracking in the post-finalization
|
/// The `end_state` and `end_block_root` are required for backtracking in the post-finalization
|
||||||
/// part of the chain, and should be usually be set to the current head. Importantly, the
|
/// part of the chain, and should be usually be set to the current head. Importantly, the
|
||||||
@ -175,28 +148,18 @@ pub trait Store<E: EthSpec>: Sync + Send + Sized + 'static {
|
|||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Self::ForwardsBlockRootsIterator;
|
) -> Self::ForwardsBlockRootsIterator;
|
||||||
|
|
||||||
/// Load the most recent ancestor state of `state_root` which lies on an epoch boundary.
|
|
||||||
///
|
|
||||||
/// If `state_root` corresponds to an epoch boundary state, then that state itself should be
|
|
||||||
/// returned.
|
|
||||||
fn load_epoch_boundary_state(
|
fn load_epoch_boundary_state(
|
||||||
&self,
|
&self,
|
||||||
state_root: &Hash256,
|
state_root: &Hash256,
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
) -> Result<Option<BeaconState<E>>, Error>;
|
||||||
// The default implementation is not very efficient, but isn't used in prod.
|
|
||||||
// See `HotColdDB` for the optimized implementation.
|
fn put_item<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error>;
|
||||||
if let Some(state) = self.get_state(state_root, None)? {
|
|
||||||
let epoch_boundary_slot = state.slot / E::slots_per_epoch() * E::slots_per_epoch();
|
fn get_item<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error>;
|
||||||
if state.slot == epoch_boundary_slot {
|
|
||||||
Ok(Some(state))
|
fn item_exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error>;
|
||||||
} else {
|
|
||||||
let epoch_boundary_state_root = state.get_state_root(epoch_boundary_slot)?;
|
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>;
|
||||||
self.get_state(epoch_boundary_state_root, Some(epoch_boundary_slot))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reified key-value storage operation. Helps in modifying the storage atomically.
|
/// Reified key-value storage operation. Helps in modifying the storage atomically.
|
||||||
@ -252,7 +215,7 @@ impl Into<&'static str> for DBColumn {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// An item that may stored in a `Store` by serializing and deserializing from bytes.
|
/// An item that may stored in a `Store` by serializing and deserializing from bytes.
|
||||||
pub trait SimpleStoreItem: Sized {
|
pub trait StoreItem: Sized {
|
||||||
/// Identifies which column this item should be placed in.
|
/// Identifies which column this item should be placed in.
|
||||||
fn db_column() -> DBColumn;
|
fn db_column() -> DBColumn;
|
||||||
|
|
||||||
@ -278,7 +241,7 @@ mod tests {
|
|||||||
b: u64,
|
b: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleStoreItem for StorableThing {
|
impl StoreItem for StorableThing {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
DBColumn::BeaconBlock
|
DBColumn::BeaconBlock
|
||||||
}
|
}
|
||||||
@ -292,7 +255,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_impl(store: impl Store<MinimalEthSpec>) {
|
fn test_impl(store: impl ItemStore<MinimalEthSpec>) {
|
||||||
let key = Hash256::random();
|
let key = Hash256::random();
|
||||||
let item = StorableThing { a: 1, b: 42 };
|
let item = StorableThing { a: 1, b: 42 };
|
||||||
|
|
||||||
@ -312,31 +275,11 @@ mod tests {
|
|||||||
assert_eq!(store.get::<StorableThing>(&key).unwrap(), None);
|
assert_eq!(store.get::<StorableThing>(&key).unwrap(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn diskdb() {
|
|
||||||
use sloggers::{null::NullLoggerBuilder, Build};
|
|
||||||
|
|
||||||
let hot_dir = tempdir().unwrap();
|
|
||||||
let cold_dir = tempdir().unwrap();
|
|
||||||
let spec = MinimalEthSpec::default_spec();
|
|
||||||
let log = NullLoggerBuilder.build().unwrap();
|
|
||||||
let store = DiskStore::open(
|
|
||||||
&hot_dir.path(),
|
|
||||||
&cold_dir.path(),
|
|
||||||
StoreConfig::default(),
|
|
||||||
spec,
|
|
||||||
log,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
test_impl(store);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn simplediskdb() {
|
fn simplediskdb() {
|
||||||
let dir = tempdir().unwrap();
|
let dir = tempdir().unwrap();
|
||||||
let path = dir.path();
|
let path = dir.path();
|
||||||
let store = SimpleDiskStore::open(&path).unwrap();
|
let store = LevelDB::open(&path).unwrap();
|
||||||
|
|
||||||
test_impl(store);
|
test_impl(store);
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
use super::{DBColumn, Error, Store, StoreOp};
|
use super::{DBColumn, Error, ItemStore, KeyValueStore, Store, StoreOp};
|
||||||
use crate::forwards_iter::SimpleForwardsBlockRootsIterator;
|
use crate::forwards_iter::SimpleForwardsBlockRootsIterator;
|
||||||
|
use crate::hot_cold_store::HotStateSummary;
|
||||||
use crate::impls::beacon_state::{get_full_state, store_full_state};
|
use crate::impls::beacon_state::{get_full_state, store_full_state};
|
||||||
|
use crate::StoreItem;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
@ -40,9 +42,7 @@ impl<E: EthSpec> MemoryStore<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> Store<E> for MemoryStore<E> {
|
impl<E: EthSpec> KeyValueStore<E> for MemoryStore<E> {
|
||||||
type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator;
|
|
||||||
|
|
||||||
/// Get the value of some key from the database. Returns `None` if the key does not exist.
|
/// Get the value of some key from the database. Returns `None` if the key does not exist.
|
||||||
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
||||||
let column_key = Self::get_key_for_col(col, key);
|
let column_key = Self::get_key_for_col(col, key);
|
||||||
@ -75,20 +75,6 @@ impl<E: EthSpec> Store<E> for MemoryStore<E> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store a state in the store.
|
|
||||||
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
|
|
||||||
store_full_state(self, state_root, &state)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch a state from the store.
|
|
||||||
fn get_state(
|
|
||||||
&self,
|
|
||||||
state_root: &Hash256,
|
|
||||||
_: Option<Slot>,
|
|
||||||
) -> Result<Option<BeaconState<E>>, Error> {
|
|
||||||
get_full_state(self, state_root)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> {
|
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> {
|
||||||
for op in batch {
|
for op in batch {
|
||||||
match op {
|
match op {
|
||||||
@ -112,6 +98,50 @@ impl<E: EthSpec> Store<E> for MemoryStore<E> {
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: EthSpec> ItemStore<E> for MemoryStore<E> {}
|
||||||
|
|
||||||
|
impl<E: EthSpec> Store<E> for MemoryStore<E> {
|
||||||
|
type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator;
|
||||||
|
|
||||||
|
fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock<E>) -> Result<(), Error> {
|
||||||
|
self.put(block_root, &block)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_block(&self, block_root: &Hash256) -> Result<Option<SignedBeaconBlock<E>>, Error> {
|
||||||
|
self.get(block_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> {
|
||||||
|
self.key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn put_state_summary(
|
||||||
|
&self,
|
||||||
|
state_root: &Hash256,
|
||||||
|
summary: HotStateSummary,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.put(state_root, &summary).map_err(Into::into)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store a state in the store.
|
||||||
|
fn put_state(&self, state_root: &Hash256, state: &BeaconState<E>) -> Result<(), Error> {
|
||||||
|
store_full_state(self, state_root, &state)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch a state from the store.
|
||||||
|
fn get_state(
|
||||||
|
&self,
|
||||||
|
state_root: &Hash256,
|
||||||
|
_: Option<Slot>,
|
||||||
|
) -> Result<Option<BeaconState<E>>, Error> {
|
||||||
|
get_full_state(self, state_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error> {
|
||||||
|
self.key_delete(DBColumn::BeaconState.into(), state_root.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
fn forwards_block_roots_iterator(
|
fn forwards_block_roots_iterator(
|
||||||
store: Arc<Self>,
|
store: Arc<Self>,
|
||||||
@ -122,4 +152,43 @@ impl<E: EthSpec> Store<E> for MemoryStore<E> {
|
|||||||
) -> Self::ForwardsBlockRootsIterator {
|
) -> Self::ForwardsBlockRootsIterator {
|
||||||
SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root)
|
SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load the most recent ancestor state of `state_root` which lies on an epoch boundary.
|
||||||
|
///
|
||||||
|
/// If `state_root` corresponds to an epoch boundary state, then that state itself should be
|
||||||
|
/// returned.
|
||||||
|
fn load_epoch_boundary_state(
|
||||||
|
&self,
|
||||||
|
state_root: &Hash256,
|
||||||
|
) -> Result<Option<BeaconState<E>>, Error> {
|
||||||
|
// The default implementation is not very efficient, but isn't used in prod.
|
||||||
|
// See `HotColdDB` for the optimized implementation.
|
||||||
|
if let Some(state) = self.get_state(state_root, None)? {
|
||||||
|
let epoch_boundary_slot = state.slot / E::slots_per_epoch() * E::slots_per_epoch();
|
||||||
|
if state.slot == epoch_boundary_slot {
|
||||||
|
Ok(Some(state))
|
||||||
|
} else {
|
||||||
|
let epoch_boundary_state_root = state.get_state_root(epoch_boundary_slot)?;
|
||||||
|
self.get_state(epoch_boundary_state_root, Some(epoch_boundary_slot))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn put_item<I: StoreItem>(&self, key: &Hash256, item: &I) -> Result<(), Error> {
|
||||||
|
self.put(key, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_item<I: StoreItem>(&self, key: &Hash256) -> Result<Option<I>, Error> {
|
||||||
|
self.get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn item_exists<I: StoreItem>(&self, key: &Hash256) -> Result<bool, Error> {
|
||||||
|
self.exists::<I>(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> {
|
||||||
|
KeyValueStore::do_atomically(self, batch)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@ use crate::chunked_vector::{
|
|||||||
load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes,
|
load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes,
|
||||||
StateRoots,
|
StateRoots,
|
||||||
};
|
};
|
||||||
use crate::{Error, Store};
|
use crate::{Error, KeyValueStore};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use types::*;
|
use types::*;
|
||||||
@ -113,7 +113,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_block_roots<S: Store<T>>(
|
pub fn load_block_roots<S: KeyValueStore<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
store: &S,
|
store: &S,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -126,7 +126,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_state_roots<S: Store<T>>(
|
pub fn load_state_roots<S: KeyValueStore<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
store: &S,
|
store: &S,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -139,7 +139,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_historical_roots<S: Store<T>>(
|
pub fn load_historical_roots<S: KeyValueStore<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
store: &S,
|
store: &S,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
@ -152,7 +152,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_randao_mixes<S: Store<T>>(
|
pub fn load_randao_mixes<S: KeyValueStore<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
store: &S,
|
store: &S,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
|
@ -91,13 +91,6 @@ pub struct SszEncoder<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> SszEncoder<'a> {
|
impl<'a> SszEncoder<'a> {
|
||||||
/// Instantiate a new encoder for encoding a SSZ list.
|
|
||||||
///
|
|
||||||
/// Identical to `Self::container`.
|
|
||||||
pub fn list(buf: &'a mut Vec<u8>, num_fixed_bytes: usize) -> Self {
|
|
||||||
Self::container(buf, num_fixed_bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Instantiate a new encoder for encoding a SSZ container.
|
/// Instantiate a new encoder for encoding a SSZ container.
|
||||||
pub fn container(buf: &'a mut Vec<u8>, num_fixed_bytes: usize) -> Self {
|
pub fn container(buf: &'a mut Vec<u8>, num_fixed_bytes: usize) -> Self {
|
||||||
buf.reserve(num_fixed_bytes);
|
buf.reserve(num_fixed_bytes);
|
||||||
|
@ -256,7 +256,8 @@ macro_rules! impl_for_vec {
|
|||||||
item.ssz_append(buf);
|
item.ssz_append(buf);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut encoder = SszEncoder::list(buf, self.len() * BYTES_PER_LENGTH_OFFSET);
|
let mut encoder =
|
||||||
|
SszEncoder::container(buf, self.len() * BYTES_PER_LENGTH_OFFSET);
|
||||||
|
|
||||||
for item in self {
|
for item in self {
|
||||||
encoder.append(item);
|
encoder.append(item);
|
||||||
|
@ -196,7 +196,8 @@ where
|
|||||||
item.ssz_append(buf);
|
item.ssz_append(buf);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let mut encoder = ssz::SszEncoder::list(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET);
|
let mut encoder =
|
||||||
|
ssz::SszEncoder::container(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET);
|
||||||
|
|
||||||
for item in &self.vec {
|
for item in &self.vec {
|
||||||
encoder.append(item);
|
encoder.append(item);
|
||||||
|
Loading…
Reference in New Issue
Block a user