diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 544e6e006..be0881970 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -151,7 +151,7 @@ pub struct HeadInfo { pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; - type StoreMigrator: Migrate; + type StoreMigrator: Migrate; type SlotClock: slot_clock::SlotClock; type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; @@ -241,7 +241,7 @@ impl BeaconChain { let fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE); - self.store.put( + self.store.put_item( &Hash256::from_slice(&FORK_CHOICE_DB_KEY), &self.fork_choice.as_ssz_container(), )?; @@ -250,7 +250,7 @@ impl BeaconChain { let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD); self.store - .put(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?; + .put_item(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?; metrics::stop_timer(head_timer); @@ -266,7 +266,7 @@ impl BeaconChain { pub fn persist_op_pool(&self) -> Result<(), Error> { let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); - self.store.put( + self.store.put_item( &Hash256::from_slice(&OP_POOL_DB_KEY), &PersistedOperationPool::from_operation_pool(&self.op_pool), )?; @@ -281,7 +281,7 @@ impl BeaconChain { let timer = metrics::start_timer(&metrics::PERSIST_OP_POOL); if let Some(eth1_chain) = self.eth1_chain.as_ref() { - self.store.put( + self.store.put_item( &Hash256::from_slice(Ð1_CACHE_DB_KEY), ð1_chain.as_ssz_container(), )?; @@ -426,7 +426,7 @@ impl BeaconChain { .map(|(root, _)| root); if let Some(block_root) = root { - Ok(self.store.get(&block_root)?) + Ok(self.store.get_item(&block_root)?) } else { Ok(None) } @@ -1934,7 +1934,7 @@ impl BeaconChain { pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { Ok(!self .store - .exists::>(beacon_block_root)?) + .item_exists::>(beacon_block_root)?) } /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 840044520..ff8d5be71 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -48,7 +48,7 @@ impl for Witness where TStore: Store + 'static, - TStoreMigrator: Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -98,7 +98,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -184,7 +184,7 @@ where .ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?; store - .get::(&Hash256::from_slice(Ð1_CACHE_DB_KEY)) + .get_item::(&Hash256::from_slice(Ð1_CACHE_DB_KEY)) .map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e)) } @@ -196,7 +196,7 @@ where .ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?; Ok(store - .get::(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) + .get_item::(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) .map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))? .is_some()) } @@ -227,7 +227,7 @@ where .ok_or_else(|| "resume_from_db requires a store.".to_string())?; let chain = store - .get::(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) + .get_item::(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY)) .map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))? .ok_or_else(|| { "No persisted beacon chain found in store. Try purging the beacon chain database." @@ -242,7 +242,7 @@ where let head_block_root = chain.canonical_head_block_root; let head_block = store - .get::>(&head_block_root) + .get_item::>(&head_block_root) .map_err(|e| format!("DB error when reading head block: {:?}", e))? .ok_or_else(|| "Head block not found in store".to_string())?; let head_state_root = head_block.state_root(); @@ -253,7 +253,7 @@ where self.op_pool = Some( store - .get::>(&Hash256::from_slice(&OP_POOL_DB_KEY)) + .get_item::>(&Hash256::from_slice(&OP_POOL_DB_KEY)) .map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))? .map(|persisted| persisted.into_operation_pool(&head_state, &self.spec)) .unwrap_or_else(|| OperationPool::new()), @@ -261,7 +261,7 @@ where let finalized_block_root = head_state.finalized_checkpoint.root; let finalized_block = store - .get::>(&finalized_block_root) + .get_item::>(&finalized_block_root) .map_err(|e| format!("DB error when reading finalized block: {:?}", e))? .ok_or_else(|| "Finalized block not found in store".to_string())?; let finalized_state_root = finalized_block.state_root(); @@ -317,16 +317,18 @@ where .put_state(&beacon_state_root, &beacon_state) .map_err(|e| format!("Failed to store genesis state: {:?}", e))?; store - .put(&beacon_block_root, &beacon_block) + .put_item(&beacon_block_root, &beacon_block) .map_err(|e| format!("Failed to store genesis block: {:?}", e))?; // Store the genesis block under the `ZERO_HASH` key. - store.put(&Hash256::zero(), &beacon_block).map_err(|e| { - format!( - "Failed to store genesis block under 0x00..00 alias: {:?}", - e - ) - })?; + store + .put_item(&Hash256::zero(), &beacon_block) + .map_err(|e| { + format!( + "Failed to store genesis block under 0x00..00 alias: {:?}", + e + ) + })?; self.finalized_snapshot = Some(BeaconSnapshot { beacon_block_root, @@ -484,7 +486,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -501,7 +503,7 @@ where .ok_or_else(|| "reduced_tree_fork_choice requires a store.".to_string())?; let persisted_fork_choice = store - .get::(&Hash256::from_slice(&FORK_CHOICE_DB_KEY)) + .get_item::(&Hash256::from_slice(&FORK_CHOICE_DB_KEY)) .map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?; let fork_choice = if let Some(persisted) = persisted_fork_choice { @@ -554,7 +556,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -592,7 +594,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate + 'static, + TStoreMigrator: Migrate + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -631,7 +633,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate + 'static, + TStoreMigrator: Migrate + 'static, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 47929fdee..53a3104f1 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -10,7 +10,7 @@ use std::collections::HashMap; use std::iter::DoubleEndedIterator; use std::marker::PhantomData; use std::sync::Arc; -use store::{DBColumn, Error as StoreError, SimpleStoreItem, Store}; +use store::{DBColumn, Error as StoreError, Store, StoreItem}; use types::{ BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, DEPOSIT_TREE_DEPTH, @@ -59,7 +59,7 @@ pub struct SszEth1 { backend_bytes: Vec, } -impl SimpleStoreItem for SszEth1 { +impl StoreItem for SszEth1 { fn db_column() -> DBColumn { DBColumn::Eth1Cache } diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 6e1b912c1..c2718711d 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -8,7 +8,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::common::get_indexed_attestation; use std::marker::PhantomData; -use store::{DBColumn, Error as StoreError, SimpleStoreItem}; +use store::{DBColumn, Error as StoreError, StoreItem}; use types::{BeaconBlock, BeaconState, BeaconStateError, Epoch, Hash256, IndexedAttestation, Slot}; type Result = std::result::Result; @@ -285,7 +285,7 @@ impl From for Error { } } -impl SimpleStoreItem for SszForkChoice { +impl StoreItem for SszForkChoice { fn db_column() -> DBColumn { DBColumn::ForkChoice } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index d9d541be2..e90ee97a9 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -7,15 +7,16 @@ use std::mem; use std::sync::mpsc; use std::sync::Arc; use std::thread; +use store::hot_cold_store::{process_finalization, HotColdDBError}; use store::iter::{ParentRootBlockIterator, RootsIterator}; -use store::{hot_cold_store::HotColdDBError, Error, SimpleDiskStore, Store, StoreOp}; -pub use store::{DiskStore, MemoryStore}; +use store::{Error, Store, StoreOp}; +pub use store::{HotColdDB, MemoryStore}; use types::*; use types::{BeaconState, EthSpec, Hash256, Slot}; /// Trait for migration processes that update the database upon finalization. -pub trait Migrate, E: EthSpec>: Send + Sync + 'static { - fn new(db: Arc, log: Logger) -> Self; +pub trait Migrate: Send + Sync + 'static { + fn new(db: Arc>, log: Logger) -> Self; fn process_finalization( &self, @@ -35,7 +36,7 @@ pub trait Migrate, E: EthSpec>: Send + Sync + 'static { /// Assumptions: /// * It is called after every finalization. fn prune_abandoned_forks( - store: Arc, + store: Arc>, head_tracker: Arc, old_finalized_block_hash: SignedBeaconBlockHash, new_finalized_block_hash: SignedBeaconBlockHash, @@ -164,14 +165,8 @@ pub trait Migrate, E: EthSpec>: Send + Sync + 'static { /// Migrator that does nothing, for stores that don't need migration. pub struct NullMigrator; -impl Migrate, E> for NullMigrator { - fn new(_: Arc>, _: Logger) -> Self { - NullMigrator - } -} - -impl Migrate, E> for NullMigrator { - fn new(_: Arc>, _: Logger) -> Self { +impl Migrate for NullMigrator { + fn new(_: Arc>, _: Logger) -> Self { NullMigrator } } @@ -179,12 +174,12 @@ impl Migrate, E> for NullMigrator { /// Migrator that immediately calls the store's migration function, blocking the current execution. /// /// Mostly useful for tests. -pub struct BlockingMigrator { - db: Arc, +pub struct BlockingMigrator { + db: Arc>, } -impl> Migrate for BlockingMigrator { - fn new(db: Arc, _: Logger) -> Self { +impl Migrate for BlockingMigrator { + fn new(db: Arc>, _: Logger) -> Self { BlockingMigrator { db } } @@ -197,7 +192,7 @@ impl> Migrate for BlockingMigrator { old_finalized_block_hash: SignedBeaconBlockHash, new_finalized_block_hash: SignedBeaconBlockHash, ) { - if let Err(e) = S::process_finalization(self.db.clone(), state_root, &new_finalized_state) { + if let Err(e) = process_finalization(self.db.clone(), state_root, &new_finalized_state) { // This migrator is only used for testing, so we just log to stderr without a logger. eprintln!("Migration error: {:?}", e); } @@ -225,13 +220,13 @@ type MpscSender = mpsc::Sender<( /// Migrator that runs a background thread to migrate state from the hot to the cold database. pub struct BackgroundMigrator { - db: Arc>, + db: Arc>, tx_thread: Mutex<(MpscSender, thread::JoinHandle<()>)>, log: Logger, } -impl Migrate, E> for BackgroundMigrator { - fn new(db: Arc>, log: Logger) -> Self { +impl Migrate for BackgroundMigrator { + fn new(db: Arc>, log: Logger) -> Self { let tx_thread = Mutex::new(Self::spawn_thread(db.clone(), log.clone())); Self { db, tx_thread, log } } @@ -293,7 +288,7 @@ impl BackgroundMigrator { /// /// Return a channel handle for sending new finalized states to the thread. fn spawn_thread( - db: Arc>, + db: Arc>, log: Logger, ) -> ( mpsc::Sender<( @@ -317,7 +312,7 @@ impl BackgroundMigrator { new_finalized_slot, )) = rx.recv() { - match DiskStore::process_finalization(db.clone(), state_root, &state) { + match process_finalization(db.clone(), state_root, &state) { Ok(()) => {} Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 3c310b3f3..2ca29e9ed 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -1,7 +1,7 @@ use crate::head_tracker::SszHeadTracker; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use store::{DBColumn, Error as StoreError, SimpleStoreItem}; +use store::{DBColumn, Error as StoreError, StoreItem}; use types::Hash256; #[derive(Clone, Encode, Decode)] @@ -11,7 +11,7 @@ pub struct PersistedBeaconChain { pub ssz_head_tracker: SszHeadTracker, } -impl SimpleStoreItem for PersistedBeaconChain { +impl StoreItem for PersistedBeaconChain { fn db_column() -> DBColumn { DBColumn::BeaconChain } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8c2238128..752a56486 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -18,7 +18,7 @@ use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; -use store::{DiskStore, MemoryStore, Store}; +use store::{HotColdDB, MemoryStore, Store}; use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; use types::{ @@ -44,7 +44,7 @@ pub type BaseHarnessType = Witness< >; pub type HarnessType = BaseHarnessType, NullMigrator, E>; -pub type DiskHarnessType = BaseHarnessType, BlockingMigrator>, E>; +pub type DiskHarnessType = BaseHarnessType, BlockingMigrator, E>; /// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] @@ -140,7 +140,7 @@ impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. pub fn new_with_disk_store( eth_spec_instance: E, - store: Arc>, + store: Arc>, keypairs: Vec, ) -> Self { let data_dir = tempdir().expect("should create temporary data_dir"); @@ -152,10 +152,7 @@ impl BeaconChainHarness> { .logger(log.clone()) .custom_spec(spec.clone()) .store(store.clone()) - .store_migrator( as Migrate<_, E>>::new( - store, - log.clone(), - )) + .store_migrator(BlockingMigrator::new(store, log.clone())) .data_dir(data_dir.path().to_path_buf()) .genesis_state( interop_genesis_state::(&keypairs, HARNESS_GENESIS_TIME, &spec) @@ -183,7 +180,7 @@ impl BeaconChainHarness> { /// Instantiate a new harness with `validator_count` initial validators. pub fn resume_from_disk_store( eth_spec_instance: E, - store: Arc>, + store: Arc>, keypairs: Vec, data_dir: TempDir, ) -> Self { @@ -195,10 +192,7 @@ impl BeaconChainHarness> { .logger(log.clone()) .custom_spec(spec) .store(store.clone()) - .store_migrator( as Migrate<_, E>>::new( - store, - log.clone(), - )) + .store_migrator( as Migrate>::new(store, log.clone())) .data_dir(data_dir.path().to_path_buf()) .resume_from_db() .expect("should resume beacon chain from db") @@ -224,7 +218,7 @@ impl BeaconChainHarness> { impl BeaconChainHarness> where S: Store, - M: Migrate, + M: Migrate, E: EthSpec, { /// Advance the slot of the `BeaconChain`. diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 224339609..7a06ca65b 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -988,7 +988,7 @@ fn attestation_that_skips_epochs() { let block_slot = harness .chain .store - .get::>(&block_root) + .get_item::>(&block_root) .expect("should not error getting block") .expect("should find attestation block") .message diff --git a/beacon_node/beacon_chain/tests/persistence_tests.rs b/beacon_node/beacon_chain/tests/persistence_tests.rs index 5fa063a73..95670c55a 100644 --- a/beacon_node/beacon_chain/tests/persistence_tests.rs +++ b/beacon_node/beacon_chain/tests/persistence_tests.rs @@ -9,7 +9,7 @@ use beacon_chain::{ }; use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; -use store::{DiskStore, StoreConfig}; +use store::{HotColdDB, StoreConfig}; use tempfile::{tempdir, TempDir}; use types::{EthSpec, Keypair, MinimalEthSpec}; @@ -23,14 +23,14 @@ lazy_static! { static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); } -fn get_store(db_path: &TempDir) -> Arc> { +fn get_store(db_path: &TempDir) -> Arc> { let spec = E::default_spec(); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let config = StoreConfig::default(); let log = NullLoggerBuilder.build().expect("logger should build"); Arc::new( - DiskStore::open(&hot_path, &cold_path, config, spec, log) + HotColdDB::open(&hot_path, &cold_path, config, spec, log) .expect("disk store should initialize"), ) } diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index c08edfcfe..39b8556a7 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -16,7 +16,7 @@ use std::collections::HashSet; use std::sync::Arc; use store::{ iter::{BlockRootsIterator, StateRootsIterator}, - DiskStore, Store, StoreConfig, + HotColdDB, Store, StoreConfig, }; use tempfile::{tempdir, TempDir}; use tree_hash::TreeHash; @@ -35,19 +35,19 @@ lazy_static! { type E = MinimalEthSpec; type TestHarness = BeaconChainHarness>; -fn get_store(db_path: &TempDir) -> Arc> { +fn get_store(db_path: &TempDir) -> Arc> { let spec = MinimalEthSpec::default_spec(); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); let config = StoreConfig::default(); let log = NullLoggerBuilder.build().expect("logger should build"); Arc::new( - DiskStore::open(&hot_path, &cold_path, config, spec, log) + HotColdDB::open(&hot_path, &cold_path, config, spec, log) .expect("disk store should initialize"), ) } -fn get_harness(store: Arc>, validator_count: usize) -> TestHarness { +fn get_harness(store: Arc>, validator_count: usize) -> TestHarness { let harness = BeaconChainHarness::new_with_disk_store( MinimalEthSpec, store, @@ -1305,8 +1305,8 @@ fn check_finalization(harness: &TestHarness, expected_slot: u64) { ); } -/// Check that the DiskStore's split_slot is equal to the start slot of the last finalized epoch. -fn check_split_slot(harness: &TestHarness, store: Arc>) { +/// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch. +fn check_split_slot(harness: &TestHarness, store: Arc>) { let split_slot = store.get_split_slot(); assert_eq!( harness diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 0cc89d123..38d6083bf 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -354,7 +354,7 @@ fn roundtrip_operation_pool() { let restored_op_pool = harness .chain .store - .get::>(&key) + .get_item::>(&key) .expect("should read db") .expect("should find op pool") .into_operation_pool(&head_state, &harness.spec); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 8e0699bfe..717471ec6 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -6,7 +6,7 @@ use beacon_chain::{ eth1_chain::{CachingEth1Backend, Eth1Chain}, migrate::{BackgroundMigrator, Migrate, NullMigrator}, slot_clock::{SlotClock, SystemTimeSlotClock}, - store::{DiskStore, MemoryStore, SimpleDiskStore, Store, StoreConfig}, + store::{HotColdDB, MemoryStore, Store, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, EventHandler, }; use environment::RuntimeContext; @@ -65,7 +65,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + Clone + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -376,7 +376,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + Clone + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -423,7 +423,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + 'static, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, @@ -462,7 +462,7 @@ where impl ClientBuilder< Witness< - DiskStore, + HotColdDB, TStoreMigrator, TSlotClock, TEth1Backend, @@ -472,12 +472,12 @@ impl > where TSlotClock: SlotClock + 'static, - TStoreMigrator: Migrate, TEthSpec> + 'static, - TEth1Backend: Eth1ChainBackend> + 'static, + TStoreMigrator: Migrate + 'static, + TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { - /// Specifies that the `Client` should use a `DiskStore` database. + /// Specifies that the `Client` should use a `HotColdDB` database. pub fn disk_store( mut self, hot_path: &Path, @@ -494,40 +494,13 @@ where .clone() .ok_or_else(|| "disk_store requires a chain spec".to_string())?; - let store = DiskStore::open(hot_path, cold_path, config, spec, context.log) + let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log) .map_err(|e| format!("Unable to open database: {:?}", e))?; self.store = Some(Arc::new(store)); Ok(self) } } -impl - ClientBuilder< - Witness< - SimpleDiskStore, - TStoreMigrator, - TSlotClock, - TEth1Backend, - TEthSpec, - TEventHandler, - >, - > -where - TSlotClock: SlotClock + 'static, - TStoreMigrator: Migrate, TEthSpec> + 'static, - TEth1Backend: Eth1ChainBackend> + 'static, - TEthSpec: EthSpec + 'static, - TEventHandler: EventHandler + 'static, -{ - /// Specifies that the `Client` should use a `DiskStore` database. - pub fn simple_disk_store(mut self, path: &Path) -> Result { - let store = - SimpleDiskStore::open(path).map_err(|e| format!("Unable to open database: {:?}", e))?; - self.store = Some(Arc::new(store)); - Ok(self) - } -} - impl ClientBuilder< Witness< @@ -559,7 +532,7 @@ where impl ClientBuilder< Witness< - DiskStore, + HotColdDB, BackgroundMigrator, TSlotClock, TEth1Backend, @@ -569,7 +542,7 @@ impl > where TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend> + 'static, + TEth1Backend: Eth1ChainBackend> + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, { @@ -600,7 +573,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate, + TStoreMigrator: Migrate, TSlotClock: SlotClock + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, @@ -706,7 +679,7 @@ impl > where TStore: Store + 'static, - TStoreMigrator: Migrate, + TStoreMigrator: Migrate, TEth1Backend: Eth1ChainBackend + 'static, TEthSpec: EthSpec + 'static, TEventHandler: EventHandler + 'static, diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index bf9d70430..c0c811259 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -1,7 +1,7 @@ use eth2_libp2p::Enr; use rlp; use std::sync::Arc; -use store::{DBColumn, Error as StoreError, SimpleStoreItem, Store}; +use store::{DBColumn, Error as StoreError, Store, StoreItem}; use types::{EthSpec, Hash256}; /// 32-byte key for accessing the `DhtEnrs`. @@ -10,7 +10,7 @@ pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE"; pub fn load_dht, E: EthSpec>(store: Arc) -> Vec { // Load DHT from store let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); - match store.get(&key) { + match store.get_item(&key) { Ok(Some(p)) => { let p: PersistedDht = p; p.enrs @@ -25,7 +25,7 @@ pub fn persist_dht, E: EthSpec>( enrs: Vec, ) -> Result<(), store::Error> { let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); - store.put(&key, &PersistedDht { enrs })?; + store.put_item(&key, &PersistedDht { enrs })?; Ok(()) } @@ -34,7 +34,7 @@ pub struct PersistedDht { pub enrs: Vec, } -impl SimpleStoreItem for PersistedDht { +impl StoreItem for PersistedDht { fn db_column() -> DBColumn { DBColumn::DhtEnrs } @@ -67,9 +67,9 @@ mod tests { let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()]; let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes()); store - .put(&key, &PersistedDht { enrs: enrs.clone() }) + .put_item(&key, &PersistedDht { enrs: enrs.clone() }) .unwrap(); - let dht: PersistedDht = store.get(&key).unwrap().unwrap(); + let dht: PersistedDht = store.get_item(&key).unwrap().unwrap(); assert_eq!(dht.enrs, enrs); } } diff --git a/beacon_node/network/src/router/processor.rs b/beacon_node/network/src/router/processor.rs index 76f0cdf44..7cab8ad31 100644 --- a/beacon_node/network/src/router/processor.rs +++ b/beacon_node/network/src/router/processor.rs @@ -252,7 +252,7 @@ impl Processor { } else if self .chain .store - .exists::>(&remote.head_root) + .item_exists::>(&remote.head_root) .unwrap_or_else(|_| false) { debug!( diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 547877cfd..0e682f467 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -4,7 +4,7 @@ use parking_lot::RwLock; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use store::{DBColumn, Error as StoreError, SimpleStoreItem}; +use store::{DBColumn, Error as StoreError, StoreItem}; use types::*; /// SSZ-serializable version of `OperationPool`. @@ -102,7 +102,7 @@ impl PersistedOperationPool { } } -impl SimpleStoreItem for PersistedOperationPool { +impl StoreItem for PersistedOperationPool { fn db_column() -> DBColumn { DBColumn::OpPool } diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index dd1381ae4..ca3162a12 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -10,7 +10,7 @@ pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; pub use config::{get_data_dir, get_eth2_testnet_config, get_testnet_dir}; pub use eth2_config::Eth2Config; -use beacon_chain::migrate::{BackgroundMigrator, DiskStore}; +use beacon_chain::migrate::{BackgroundMigrator, HotColdDB}; use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, events::WebSocketSender, slot_clock::SystemTimeSlotClock, @@ -25,10 +25,10 @@ use types::EthSpec; /// A type-alias to the tighten the definition of a production-intended `Client`. pub type ProductionClient = Client< Witness< - DiskStore, + HotColdDB, BackgroundMigrator, SystemTimeSlotClock, - CachingEth1Backend>, + CachingEth1Backend>, E, WebSocketSender, >, diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs index 794bbbb89..979acd6ea 100644 --- a/beacon_node/store/src/chunked_iter.rs +++ b/beacon_node/store/src/chunked_iter.rs @@ -1,5 +1,5 @@ use crate::chunked_vector::{chunk_key, Chunk, Field}; -use crate::DiskStore; +use crate::HotColdDB; use slog::error; use std::sync::Arc; use types::{ChainSpec, EthSpec, Slot}; @@ -12,7 +12,7 @@ where F: Field, E: EthSpec, { - pub(crate) store: Arc>, + pub(crate) store: Arc>, current_vindex: usize, pub(crate) end_vindex: usize, next_cindex: usize, @@ -28,10 +28,10 @@ where /// index stored by the restore point at `last_restore_point_slot`. /// /// The `last_restore_point` slot should be the slot of a recent restore point as obtained from - /// `DiskStore::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can + /// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). pub fn new( - store: Arc>, + store: Arc>, start_vindex: usize, last_restore_point_slot: Slot, spec: &ChainSpec, diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs index b876545c7..eec16ba55 100644 --- a/beacon_node/store/src/chunked_vector.rs +++ b/beacon_node/store/src/chunked_vector.rs @@ -177,7 +177,7 @@ pub trait Field: Copy { /// Load the genesis value for a fixed length field from the store. /// /// This genesis value should be used to fill the initial state of the vector. - fn load_genesis_value>(store: &S) -> Result { + fn load_genesis_value>(store: &S) -> Result { let key = &genesis_value_key()[..]; let chunk = Chunk::load(store, Self::column(), key)?.ok_or(ChunkError::MissingGenesisValue)?; @@ -192,7 +192,7 @@ pub trait Field: Copy { /// /// Check the existing value (if any) for consistency with the value we intend to store, and /// return an error if they are inconsistent. - fn check_and_store_genesis_value>( + fn check_and_store_genesis_value>( store: &S, value: Self::Value, ) -> Result<(), Error> { @@ -327,7 +327,7 @@ field!( |state: &BeaconState<_>, index, _| safe_modulo_index(&state.randao_mixes, index) ); -pub fn store_updated_vector, E: EthSpec, S: Store>( +pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( field: F, store: &S, state: &BeaconState, @@ -387,7 +387,7 @@ fn store_range( where F: Field, E: EthSpec, - S: Store, + S: KeyValueStore, I: Iterator, { for chunk_index in range { @@ -417,7 +417,7 @@ where // Chunks at the end index are included. // TODO: could be more efficient with a real range query (perhaps RocksDB) -fn range_query, E: EthSpec, T: Decode + Encode>( +fn range_query, E: EthSpec, T: Decode + Encode>( store: &S, column: DBColumn, start_index: usize, @@ -482,7 +482,7 @@ fn stitch( Ok(result) } -pub fn load_vector_from_db, E: EthSpec, S: Store>( +pub fn load_vector_from_db, E: EthSpec, S: KeyValueStore>( store: &S, slot: Slot, spec: &ChainSpec, @@ -514,7 +514,7 @@ pub fn load_vector_from_db, E: EthSpec, S: Store>( } /// The historical roots are stored in vector chunks, despite not actually being a vector. -pub fn load_variable_list_from_db, E: EthSpec, S: Store>( +pub fn load_variable_list_from_db, E: EthSpec, S: KeyValueStore>( store: &S, slot: Slot, spec: &ChainSpec, @@ -574,7 +574,7 @@ where Chunk { values } } - pub fn load, E: EthSpec>( + pub fn load, E: EthSpec>( store: &S, column: DBColumn, key: &[u8], @@ -585,7 +585,7 @@ where .transpose() } - pub fn store, E: EthSpec>( + pub fn store, E: EthSpec>( &self, store: &S, column: DBColumn, diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 1c2431fc3..08b9d92a6 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -1,7 +1,7 @@ use crate::chunked_iter::ChunkedVectorIter; use crate::chunked_vector::BlockRoots; use crate::iter::{BlockRootsIterator, ReverseBlockRootIterator}; -use crate::{DiskStore, Store}; +use crate::{HotColdDB, Store}; use slog::error; use std::sync::Arc; use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; @@ -31,7 +31,7 @@ pub enum HybridForwardsBlockRootsIterator { impl FrozenForwardsBlockRootsIterator { pub fn new( - store: Arc>, + store: Arc>, start_slot: Slot, last_restore_point_slot: Slot, spec: &ChainSpec, @@ -87,7 +87,7 @@ impl Iterator for SimpleForwardsBlockRootsIterator { impl HybridForwardsBlockRootsIterator { pub fn new( - store: Arc>, + store: Arc>, start_slot: Slot, end_state: BeaconState, end_block_root: Hash256, diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 08c586af1..ec93d9756 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -3,11 +3,12 @@ use crate::chunked_vector::{ }; use crate::config::StoreConfig; use crate::forwards_iter::HybridForwardsBlockRootsIterator; -use crate::impls::beacon_state::store_full_state; +use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::iter::{ParentRootBlockIterator, StateRootsIterator}; use crate::metrics; use crate::{ - leveldb_store::LevelDB, DBColumn, Error, PartialBeaconState, SimpleStoreItem, Store, StoreOp, + leveldb_store::LevelDB, DBColumn, Error, ItemStore, KeyValueStore, PartialBeaconState, Store, + StoreItem, StoreOp, }; use lru::LruCache; use parking_lot::{Mutex, RwLock}; @@ -86,27 +87,10 @@ pub enum HotColdDBError { impl Store for HotColdDB { type ForwardsBlockRootsIterator = HybridForwardsBlockRootsIterator; - // Defer to the hot database for basic operations (including blocks for now) - fn get_bytes(&self, column: &str, key: &[u8]) -> Result>, Error> { - self.hot_db.get_bytes(column, key) - } - - fn put_bytes(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error> { - self.hot_db.put_bytes(column, key, value) - } - - fn key_exists(&self, column: &str, key: &[u8]) -> Result { - self.hot_db.key_exists(column, key) - } - - fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error> { - self.hot_db.key_delete(column, key) - } - /// Store a block and update the LRU cache. fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock) -> Result<(), Error> { // Store on disk. - self.put(block_root, &block)?; + self.hot_db.put(block_root, &block)?; // Update cache. self.block_cache.lock().put(*block_root, block); @@ -125,7 +109,7 @@ impl Store for HotColdDB { } // Fetch from database. - match self.get::>(block_root)? { + match self.hot_db.get::>(block_root)? { Some(block) => { // Add to cache. self.block_cache.lock().put(*block_root, block.clone()); @@ -138,7 +122,15 @@ impl Store for HotColdDB { /// Delete a block from the store and the block cache. fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { self.block_cache.lock().pop(block_root); - self.delete::>(block_root) + self.hot_db.delete::>(block_root) + } + + fn put_state_summary( + &self, + state_root: &Hash256, + summary: HotStateSummary, + ) -> Result<(), Error> { + self.hot_db.put(state_root, &summary).map_err(Into::into) } /// Store a state in the store. @@ -155,17 +147,6 @@ impl Store for HotColdDB { &self, state_root: &Hash256, slot: Option, - ) -> Result>, Error> { - self.get_state_with(state_root, slot) - } - - /// Get a state from the store. - /// - /// Fetch a state from the store, controlling which cache fields are cloned. - fn get_state_with( - &self, - state_root: &Hash256, - slot: Option, ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); @@ -203,96 +184,6 @@ impl Store for HotColdDB { Ok(()) } - fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> { - let mut guard = self.block_cache.lock(); - self.hot_db.do_atomically(batch)?; - for op in batch { - match op { - StoreOp::DeleteBlock(block_hash) => { - let untyped_hash: Hash256 = (*block_hash).into(); - guard.pop(&untyped_hash); - } - StoreOp::DeleteState(_, _) => (), - } - } - Ok(()) - } - - /// Advance the split point of the store, moving new finalized states to the freezer. - fn process_finalization( - store: Arc, - frozen_head_root: Hash256, - frozen_head: &BeaconState, - ) -> Result<(), Error> { - debug!( - store.log, - "Freezer migration started"; - "slot" => frozen_head.slot - ); - - // 0. Check that the migration is sensible. - // The new frozen head must increase the current split slot, and lie on an epoch - // boundary (in order for the hot state summary scheme to work). - let current_split_slot = store.get_split_slot(); - - if frozen_head.slot < current_split_slot { - return Err(HotColdDBError::FreezeSlotError { - current_split_slot, - proposed_split_slot: frozen_head.slot, - } - .into()); - } - - if frozen_head.slot % E::slots_per_epoch() != 0 { - return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into()); - } - - // 1. Copy all of the states between the head and the split slot, from the hot DB - // to the cold DB. - let state_root_iter = StateRootsIterator::new(store.clone(), frozen_head); - - let mut to_delete = vec![]; - for (state_root, slot) in - state_root_iter.take_while(|&(_, slot)| slot >= current_split_slot) - { - if slot % store.config.slots_per_restore_point == 0 { - let state: BeaconState = store - .hot_db - .get_state(&state_root, None)? - .ok_or_else(|| HotColdDBError::MissingStateToFreeze(state_root))?; - - store.store_cold_state(&state_root, &state)?; - } - - // Store a pointer from this state root to its slot, so we can later reconstruct states - // from their state root alone. - store.store_cold_state_slot(&state_root, slot)?; - - // Delete the old summary, and the full state if we lie on an epoch boundary. - to_delete.push((state_root, slot)); - } - - // 2. Update the split slot - *store.split.write() = Split { - slot: frozen_head.slot, - state_root: frozen_head_root, - }; - store.store_split()?; - - // 3. Delete from the hot DB - for (state_root, slot) in to_delete { - store.delete_state(&state_root, slot)?; - } - - debug!( - store.log, - "Freezer migration complete"; - "slot" => frozen_head.slot - ); - - Ok(()) - } - fn forwards_block_roots_iterator( store: Arc, start_slot: Slot, @@ -334,6 +225,33 @@ impl Store for HotColdDB { } } } + + fn put_item(&self, key: &Hash256, item: &I) -> Result<(), Error> { + self.hot_db.put(key, item) + } + + fn get_item(&self, key: &Hash256) -> Result, Error> { + self.hot_db.get(key) + } + + fn item_exists(&self, key: &Hash256) -> Result { + self.hot_db.exists::(key) + } + + fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> { + let mut guard = self.block_cache.lock(); + self.hot_db.do_atomically(batch)?; + for op in batch { + match op { + StoreOp::DeleteBlock(block_hash) => { + let untyped_hash: Hash256 = (*block_hash).into(); + guard.pop(&untyped_hash); + } + StoreOp::DeleteState(_, _) => (), + } + } + Ok(()) + } } impl HotColdDB { @@ -408,9 +326,7 @@ impl HotColdDB { epoch_boundary_state_root, }) = self.load_hot_state_summary(state_root)? { - let boundary_state = self - .hot_db - .get_state(&epoch_boundary_state_root, None)? + let boundary_state = get_full_state(&self.hot_db, &epoch_boundary_state_root)? .ok_or_else(|| { HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root) })?; @@ -758,6 +674,77 @@ impl HotColdDB { } } +/// Advance the split point of the store, moving new finalized states to the freezer. +pub fn process_finalization( + store: Arc>, + frozen_head_root: Hash256, + frozen_head: &BeaconState, +) -> Result<(), Error> { + debug!( + store.log, + "Freezer migration started"; + "slot" => frozen_head.slot + ); + + // 0. Check that the migration is sensible. + // The new frozen head must increase the current split slot, and lie on an epoch + // boundary (in order for the hot state summary scheme to work). + let current_split_slot = store.get_split_slot(); + + if frozen_head.slot < current_split_slot { + return Err(HotColdDBError::FreezeSlotError { + current_split_slot, + proposed_split_slot: frozen_head.slot, + } + .into()); + } + + if frozen_head.slot % E::slots_per_epoch() != 0 { + return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot).into()); + } + + // 1. Copy all of the states between the head and the split slot, from the hot DB + // to the cold DB. + let state_root_iter = StateRootsIterator::new(store.clone(), frozen_head); + + let mut to_delete = vec![]; + for (state_root, slot) in state_root_iter.take_while(|&(_, slot)| slot >= current_split_slot) { + if slot % store.config.slots_per_restore_point == 0 { + let state: BeaconState = get_full_state(&store.hot_db, &state_root)? + .ok_or_else(|| HotColdDBError::MissingStateToFreeze(state_root))?; + + store.store_cold_state(&state_root, &state)?; + } + + // Store a pointer from this state root to its slot, so we can later reconstruct states + // from their state root alone. + store.store_cold_state_slot(&state_root, slot)?; + + // Delete the old summary, and the full state if we lie on an epoch boundary. + to_delete.push((state_root, slot)); + } + + // 2. Update the split slot + *store.split.write() = Split { + slot: frozen_head.slot, + state_root: frozen_head_root, + }; + store.store_split()?; + + // 3. Delete from the hot DB + for (state_root, slot) in to_delete { + store.delete_state(&state_root, slot)?; + } + + debug!( + store.log, + "Freezer migration complete"; + "slot" => frozen_head.slot + ); + + Ok(()) +} + /// Struct for storing the split slot and state root in the database. #[derive(Debug, Clone, Copy, Default, Encode, Decode)] struct Split { @@ -765,7 +752,7 @@ struct Split { state_root: Hash256, } -impl SimpleStoreItem for Split { +impl StoreItem for Split { fn db_column() -> DBColumn { DBColumn::BeaconMeta } @@ -789,7 +776,7 @@ pub struct HotStateSummary { epoch_boundary_state_root: Hash256, } -impl SimpleStoreItem for HotStateSummary { +impl StoreItem for HotStateSummary { fn db_column() -> DBColumn { DBColumn::BeaconStateSummary } @@ -832,7 +819,7 @@ struct ColdStateSummary { slot: Slot, } -impl SimpleStoreItem for ColdStateSummary { +impl StoreItem for ColdStateSummary { fn db_column() -> DBColumn { DBColumn::BeaconStateSummary } @@ -852,7 +839,7 @@ struct RestorePointHash { state_root: Hash256, } -impl SimpleStoreItem for RestorePointHash { +impl StoreItem for RestorePointHash { fn db_column() -> DBColumn { DBColumn::BeaconRestorePoint } diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 51346bbcf..b4bc1d581 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; pub mod beacon_state; pub mod partial_beacon_state; -impl SimpleStoreItem for SignedBeaconBlock { +impl StoreItem for SignedBeaconBlock { fn db_column() -> DBColumn { DBColumn::BeaconBlock } diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index da42b59c0..8b57f80e1 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -4,8 +4,8 @@ use ssz_derive::{Decode, Encode}; use std::convert::TryInto; use types::beacon_state::{CloneConfig, CommitteeCache, CACHED_EPOCHS}; -pub fn store_full_state, E: EthSpec>( - store: &S, +pub fn store_full_state, E: EthSpec>( + store: &KV, state_root: &Hash256, state: &BeaconState, ) -> Result<(), Error> { @@ -24,13 +24,13 @@ pub fn store_full_state, E: EthSpec>( result } -pub fn get_full_state, E: EthSpec>( - store: &S, +pub fn get_full_state, E: EthSpec>( + db: &KV, state_root: &Hash256, ) -> Result>, Error> { let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); - match store.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? { + match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? { Some(bytes) => { let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES); let container = StorageContainer::from_ssz_bytes(&bytes)?; diff --git a/beacon_node/store/src/impls/partial_beacon_state.rs b/beacon_node/store/src/impls/partial_beacon_state.rs index b7fcc8bdb..c3c284314 100644 --- a/beacon_node/store/src/impls/partial_beacon_state.rs +++ b/beacon_node/store/src/impls/partial_beacon_state.rs @@ -1,7 +1,7 @@ use crate::*; use ssz::{Decode, Encode}; -impl SimpleStoreItem for PartialBeaconState { +impl StoreItem for PartialBeaconState { fn db_column() -> DBColumn { DBColumn::BeaconState } diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 6a19045a2..34d2d7e9d 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -42,11 +42,11 @@ impl<'a, U: Store, E: EthSpec> AncestorIter { +pub struct StateRootsIterator<'a, T: EthSpec, U: Store> { inner: RootsIterator<'a, T, U>, } -impl<'a, T: EthSpec, U> Clone for StateRootsIterator<'a, T, U> { +impl<'a, T: EthSpec, U: Store> Clone for StateRootsIterator<'a, T, U> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -86,11 +86,11 @@ impl<'a, T: EthSpec, U: Store> Iterator for StateRootsIterator<'a, T, U> { /// exhausted. /// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -pub struct BlockRootsIterator<'a, T: EthSpec, U> { +pub struct BlockRootsIterator<'a, T: EthSpec, U: Store> { inner: RootsIterator<'a, T, U>, } -impl<'a, T: EthSpec, U> Clone for BlockRootsIterator<'a, T, U> { +impl<'a, T: EthSpec, U: Store> Clone for BlockRootsIterator<'a, T, U> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -125,13 +125,13 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { } /// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`. -pub struct RootsIterator<'a, T: EthSpec, U> { +pub struct RootsIterator<'a, T: EthSpec, U: Store> { store: Arc, beacon_state: Cow<'a, BeaconState>, slot: Slot, } -impl<'a, T: EthSpec, U> Clone for RootsIterator<'a, T, U> { +impl<'a, T: EthSpec, U: Store> Clone for RootsIterator<'a, T, U> { fn clone(&self) -> Self { Self { store: self.store.clone(), @@ -245,7 +245,7 @@ impl<'a, E: EthSpec, S: Store> Iterator for ParentRootBlockIterator<'a, E, S> #[derive(Clone)] /// Extends `BlockRootsIterator`, returning `SignedBeaconBlock` instances, instead of their roots. -pub struct BlockIterator<'a, T: EthSpec, U> { +pub struct BlockIterator<'a, T: EthSpec, U: Store> { roots: BlockRootsIterator<'a, T, U>, } diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 32fe5bbc9..3a7d55889 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -1,6 +1,4 @@ use super::*; -use crate::forwards_iter::SimpleForwardsBlockRootsIterator; -use crate::impls::beacon_state::{get_full_state, store_full_state}; use crate::metrics; use db_key::Key; use leveldb::database::batch::{Batch, Writebatch}; @@ -39,35 +37,12 @@ impl LevelDB { fn write_options(&self) -> WriteOptions { WriteOptions::new() } - - fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey { - let mut col = col.as_bytes().to_vec(); - col.append(&mut key.to_vec()); - BytesKey { key: col } - } } -/// Used for keying leveldb. -pub struct BytesKey { - key: Vec, -} - -impl Key for BytesKey { - fn from_u8(key: &[u8]) -> Self { - Self { key: key.to_vec() } - } - - fn as_slice T>(&self, f: F) -> T { - f(self.key.as_slice()) - } -} - -impl Store for LevelDB { - type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator; - +impl KeyValueStore for LevelDB { /// Retrieve some bytes in `column` with `key`. fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = get_key_for_col(col, key); metrics::inc_counter(&metrics::DISK_DB_READ_COUNT); let timer = metrics::start_timer(&metrics::DISK_DB_READ_TIMES); @@ -86,7 +61,7 @@ impl Store for LevelDB { /// Store some `value` in `column`, indexed with `key`. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = get_key_for_col(col, key); metrics::inc_counter(&metrics::DISK_DB_WRITE_COUNT); metrics::inc_counter_by(&metrics::DISK_DB_WRITE_BYTES, val.len() as i64); @@ -102,7 +77,7 @@ impl Store for LevelDB { /// Return `true` if `key` exists in `column`. fn key_exists(&self, col: &str, key: &[u8]) -> Result { - let column_key = Self::get_key_for_col(col, key); + let column_key = get_key_for_col(col, key); metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT); @@ -114,7 +89,7 @@ impl Store for LevelDB { /// Removes `key` from `column`. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { - let column_key = Self::get_key_for_col(col, key); + let column_key = get_key_for_col(col, key); metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT); @@ -123,56 +98,28 @@ impl Store for LevelDB { .map_err(Into::into) } - /// Store a state in the store. - fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { - store_full_state(self, state_root, &state) - } - - /// Fetch a state from the store. - fn get_state( - &self, - state_root: &Hash256, - _: Option, - ) -> Result>, Error> { - get_full_state(self, state_root) - } - - fn forwards_block_roots_iterator( - store: Arc, - start_slot: Slot, - end_state: BeaconState, - end_block_root: Hash256, - _: &ChainSpec, - ) -> Self::ForwardsBlockRootsIterator { - SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root) - } - fn do_atomically(&self, ops_batch: &[StoreOp]) -> Result<(), Error> { let mut leveldb_batch = Writebatch::new(); for op in ops_batch { match op { StoreOp::DeleteBlock(block_hash) => { let untyped_hash: Hash256 = (*block_hash).into(); - let key = Self::get_key_for_col( - DBColumn::BeaconBlock.into(), - untyped_hash.as_bytes(), - ); + let key = + get_key_for_col(DBColumn::BeaconBlock.into(), untyped_hash.as_bytes()); leveldb_batch.delete(key); } StoreOp::DeleteState(state_hash, slot) => { let untyped_hash: Hash256 = (*state_hash).into(); - let state_summary_key = Self::get_key_for_col( + let state_summary_key = get_key_for_col( DBColumn::BeaconStateSummary.into(), untyped_hash.as_bytes(), ); leveldb_batch.delete(state_summary_key); if *slot % E::slots_per_epoch() == 0 { - let state_key = Self::get_key_for_col( - DBColumn::BeaconState.into(), - untyped_hash.as_bytes(), - ); + let state_key = + get_key_for_col(DBColumn::BeaconState.into(), untyped_hash.as_bytes()); leveldb_batch.delete(state_key); } } @@ -183,6 +130,29 @@ impl Store for LevelDB { } } +impl ItemStore for LevelDB {} + +/// Used for keying leveldb. +pub struct BytesKey { + key: Vec, +} + +impl Key for BytesKey { + fn from_u8(key: &[u8]) -> Self { + Self { key: key.to_vec() } + } + + fn as_slice T>(&self, f: F) -> T { + f(self.key.as_slice()) + } +} + +fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey { + let mut col = col.as_bytes().to_vec(); + col.append(&mut key.to_vec()); + BytesKey { key: col } +} + impl From for Error { fn from(e: LevelDBError) -> Error { Error::DBError { diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 558342609..55489679e 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -2,7 +2,7 @@ //! //! Provides the following stores: //! -//! - `DiskStore`: an on-disk store backed by leveldb. Used in production. +//! - `HotColdDB`: an on-disk store backed by leveldb. Used in production. //! - `MemoryStore`: an in-memory store backed by a hash-map. Used for testing. //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See @@ -28,8 +28,8 @@ pub mod iter; use std::sync::Arc; pub use self::config::StoreConfig; -pub use self::hot_cold_store::{HotColdDB as DiskStore, HotStateSummary}; -pub use self::leveldb_store::LevelDB as SimpleDiskStore; +pub use self::hot_cold_store::{HotColdDB, HotStateSummary}; +pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; pub use self::partial_beacon_state::PartialBeaconState; pub use errors::Error; @@ -38,14 +38,7 @@ pub use metrics::scrape_for_metrics; pub use state_batch::StateBatch; pub use types::*; -/// An object capable of storing and retrieving objects implementing `StoreItem`. -/// -/// A `Store` is fundamentally backed by a key-value database, however it provides support for -/// columns. A simple column implementation might involve prefixing a key with some bytes unique to -/// each column. -pub trait Store: Sync + Send + Sized + 'static { - type ForwardsBlockRootsIterator: Iterator; - +pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Retrieve some bytes in `column` with `key`. fn get_bytes(&self, column: &str, key: &[u8]) -> Result>, Error>; @@ -58,8 +51,13 @@ pub trait Store: Sync + Send + Sized + 'static { /// Removes `key` from `column`. fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>; + /// Execute either all of the operations in `batch` or none at all, returning an error. + fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>; +} + +pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'static { /// Store an item in `Self`. - fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> { + fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> { let column = I::db_column().into(); let key = key.as_bytes(); @@ -68,7 +66,7 @@ pub trait Store: Sync + Send + Sized + 'static { } /// Retrieve an item from `Self`. - fn get(&self, key: &Hash256) -> Result, Error> { + fn get(&self, key: &Hash256) -> Result, Error> { let column = I::db_column().into(); let key = key.as_bytes(); @@ -79,7 +77,7 @@ pub trait Store: Sync + Send + Sized + 'static { } /// Returns `true` if the given key represents an item in `Self`. - fn exists(&self, key: &Hash256) -> Result { + fn exists(&self, key: &Hash256) -> Result { let column = I::db_column().into(); let key = key.as_bytes(); @@ -87,44 +85,40 @@ pub trait Store: Sync + Send + Sized + 'static { } /// Remove an item from `Self`. - fn delete(&self, key: &Hash256) -> Result<(), Error> { + fn delete(&self, key: &Hash256) -> Result<(), Error> { let column = I::db_column().into(); let key = key.as_bytes(); self.key_delete(column, key) } +} + +/// An object capable of storing and retrieving objects implementing `StoreItem`. +/// +/// A `Store` is fundamentally backed by a key-value database, however it provides support for +/// columns. A simple column implementation might involve prefixing a key with some bytes unique to +/// each column. +pub trait Store: Sync + Send + Sized + 'static { + type ForwardsBlockRootsIterator: Iterator; /// Store a block in the store. - fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock) -> Result<(), Error> { - self.put(block_root, &block) - } + fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock) -> Result<(), Error>; /// Fetch a block from the store. - fn get_block(&self, block_root: &Hash256) -> Result>, Error> { - self.get(block_root) - } + fn get_block(&self, block_root: &Hash256) -> Result>, Error>; /// Delete a block from the store. - fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { - self.key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes()) - } + fn delete_block(&self, block_root: &Hash256) -> Result<(), Error>; /// Store a state in the store. fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error>; - /// Execute either all of the operations in `batch` or none at all, returning an error. - fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>; - /// Store a state summary in the store. - // NOTE: this is a hack for the HotColdDb, we could consider splitting this - // trait and removing the generic `S: Store` types everywhere? fn put_state_summary( &self, state_root: &Hash256, summary: HotStateSummary, - ) -> Result<(), Error> { - self.put(state_root, &summary).map_err(Into::into) - } + ) -> Result<(), Error>; /// Fetch a state from the store. fn get_state( @@ -133,33 +127,12 @@ pub trait Store: Sync + Send + Sized + 'static { slot: Option, ) -> Result>, Error>; - /// Fetch a state from the store, controlling which cache fields are cloned. - fn get_state_with( - &self, - state_root: &Hash256, - slot: Option, - ) -> Result>, Error> { - // Default impl ignores config. Overriden in `HotColdDb`. - self.get_state(state_root, slot) - } - /// Delete a state from the store. - fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error> { - self.key_delete(DBColumn::BeaconState.into(), state_root.as_bytes()) - } - - /// (Optionally) Move all data before the frozen slot to the freezer database. - fn process_finalization( - _store: Arc, - _frozen_head_root: Hash256, - _frozen_head: &BeaconState, - ) -> Result<(), Error> { - Ok(()) - } + fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error>; /// Get a forwards (slot-ascending) iterator over the beacon block roots since `start_slot`. /// - /// Will be efficient for frozen portions of the database if using `DiskStore`. + /// Will be efficient for frozen portions of the database if using `HotColdDB`. /// /// The `end_state` and `end_block_root` are required for backtracking in the post-finalization /// part of the chain, and should be usually be set to the current head. Importantly, the @@ -175,28 +148,18 @@ pub trait Store: Sync + Send + Sized + 'static { spec: &ChainSpec, ) -> Self::ForwardsBlockRootsIterator; - /// Load the most recent ancestor state of `state_root` which lies on an epoch boundary. - /// - /// If `state_root` corresponds to an epoch boundary state, then that state itself should be - /// returned. fn load_epoch_boundary_state( &self, state_root: &Hash256, - ) -> Result>, Error> { - // The default implementation is not very efficient, but isn't used in prod. - // See `HotColdDB` for the optimized implementation. - if let Some(state) = self.get_state(state_root, None)? { - let epoch_boundary_slot = state.slot / E::slots_per_epoch() * E::slots_per_epoch(); - if state.slot == epoch_boundary_slot { - Ok(Some(state)) - } else { - let epoch_boundary_state_root = state.get_state_root(epoch_boundary_slot)?; - self.get_state(epoch_boundary_state_root, Some(epoch_boundary_slot)) - } - } else { - Ok(None) - } - } + ) -> Result>, Error>; + + fn put_item(&self, key: &Hash256, item: &I) -> Result<(), Error>; + + fn get_item(&self, key: &Hash256) -> Result, Error>; + + fn item_exists(&self, key: &Hash256) -> Result; + + fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error>; } /// Reified key-value storage operation. Helps in modifying the storage atomically. @@ -252,7 +215,7 @@ impl Into<&'static str> for DBColumn { } /// An item that may stored in a `Store` by serializing and deserializing from bytes. -pub trait SimpleStoreItem: Sized { +pub trait StoreItem: Sized { /// Identifies which column this item should be placed in. fn db_column() -> DBColumn; @@ -278,7 +241,7 @@ mod tests { b: u64, } - impl SimpleStoreItem for StorableThing { + impl StoreItem for StorableThing { fn db_column() -> DBColumn { DBColumn::BeaconBlock } @@ -292,7 +255,7 @@ mod tests { } } - fn test_impl(store: impl Store) { + fn test_impl(store: impl ItemStore) { let key = Hash256::random(); let item = StorableThing { a: 1, b: 42 }; @@ -312,31 +275,11 @@ mod tests { assert_eq!(store.get::(&key).unwrap(), None); } - #[test] - fn diskdb() { - use sloggers::{null::NullLoggerBuilder, Build}; - - let hot_dir = tempdir().unwrap(); - let cold_dir = tempdir().unwrap(); - let spec = MinimalEthSpec::default_spec(); - let log = NullLoggerBuilder.build().unwrap(); - let store = DiskStore::open( - &hot_dir.path(), - &cold_dir.path(), - StoreConfig::default(), - spec, - log, - ) - .unwrap(); - - test_impl(store); - } - #[test] fn simplediskdb() { let dir = tempdir().unwrap(); let path = dir.path(); - let store = SimpleDiskStore::open(&path).unwrap(); + let store = LevelDB::open(&path).unwrap(); test_impl(store); } diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 01483a905..45918e74e 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,6 +1,8 @@ -use super::{DBColumn, Error, Store, StoreOp}; +use super::{DBColumn, Error, ItemStore, KeyValueStore, Store, StoreOp}; use crate::forwards_iter::SimpleForwardsBlockRootsIterator; +use crate::hot_cold_store::HotStateSummary; use crate::impls::beacon_state::{get_full_state, store_full_state}; +use crate::StoreItem; use parking_lot::RwLock; use std::collections::HashMap; use std::marker::PhantomData; @@ -40,9 +42,7 @@ impl MemoryStore { } } -impl Store for MemoryStore { - type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator; - +impl KeyValueStore for MemoryStore { /// Get the value of some key from the database. Returns `None` if the key does not exist. fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { let column_key = Self::get_key_for_col(col, key); @@ -75,20 +75,6 @@ impl Store for MemoryStore { Ok(()) } - /// Store a state in the store. - fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { - store_full_state(self, state_root, &state) - } - - /// Fetch a state from the store. - fn get_state( - &self, - state_root: &Hash256, - _: Option, - ) -> Result>, Error> { - get_full_state(self, state_root) - } - fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> { for op in batch { match op { @@ -112,6 +98,50 @@ impl Store for MemoryStore { } Ok(()) } +} + +impl ItemStore for MemoryStore {} + +impl Store for MemoryStore { + type ForwardsBlockRootsIterator = SimpleForwardsBlockRootsIterator; + + fn put_block(&self, block_root: &Hash256, block: SignedBeaconBlock) -> Result<(), Error> { + self.put(block_root, &block) + } + + fn get_block(&self, block_root: &Hash256) -> Result>, Error> { + self.get(block_root) + } + + fn delete_block(&self, block_root: &Hash256) -> Result<(), Error> { + self.key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes()) + } + + fn put_state_summary( + &self, + state_root: &Hash256, + summary: HotStateSummary, + ) -> Result<(), Error> { + self.put(state_root, &summary).map_err(Into::into) + } + + /// Store a state in the store. + fn put_state(&self, state_root: &Hash256, state: &BeaconState) -> Result<(), Error> { + store_full_state(self, state_root, &state) + } + + /// Fetch a state from the store. + fn get_state( + &self, + state_root: &Hash256, + _: Option, + ) -> Result>, Error> { + get_full_state(self, state_root) + } + + fn delete_state(&self, state_root: &Hash256, _slot: Slot) -> Result<(), Error> { + self.key_delete(DBColumn::BeaconState.into(), state_root.as_bytes()) + } fn forwards_block_roots_iterator( store: Arc, @@ -122,4 +152,43 @@ impl Store for MemoryStore { ) -> Self::ForwardsBlockRootsIterator { SimpleForwardsBlockRootsIterator::new(store, start_slot, end_state, end_block_root) } + + /// Load the most recent ancestor state of `state_root` which lies on an epoch boundary. + /// + /// If `state_root` corresponds to an epoch boundary state, then that state itself should be + /// returned. + fn load_epoch_boundary_state( + &self, + state_root: &Hash256, + ) -> Result>, Error> { + // The default implementation is not very efficient, but isn't used in prod. + // See `HotColdDB` for the optimized implementation. + if let Some(state) = self.get_state(state_root, None)? { + let epoch_boundary_slot = state.slot / E::slots_per_epoch() * E::slots_per_epoch(); + if state.slot == epoch_boundary_slot { + Ok(Some(state)) + } else { + let epoch_boundary_state_root = state.get_state_root(epoch_boundary_slot)?; + self.get_state(epoch_boundary_state_root, Some(epoch_boundary_slot)) + } + } else { + Ok(None) + } + } + + fn put_item(&self, key: &Hash256, item: &I) -> Result<(), Error> { + self.put(key, item) + } + + fn get_item(&self, key: &Hash256) -> Result, Error> { + self.get(key) + } + + fn item_exists(&self, key: &Hash256) -> Result { + self.exists::(key) + } + + fn do_atomically(&self, batch: &[StoreOp]) -> Result<(), Error> { + KeyValueStore::do_atomically(self, batch) + } } diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index b0947a3cd..00c518a84 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -2,7 +2,7 @@ use crate::chunked_vector::{ load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots, }; -use crate::{Error, Store}; +use crate::{Error, KeyValueStore}; use ssz_derive::{Decode, Encode}; use std::convert::TryInto; use types::*; @@ -113,7 +113,7 @@ impl PartialBeaconState { } } - pub fn load_block_roots>( + pub fn load_block_roots>( &mut self, store: &S, spec: &ChainSpec, @@ -126,7 +126,7 @@ impl PartialBeaconState { Ok(()) } - pub fn load_state_roots>( + pub fn load_state_roots>( &mut self, store: &S, spec: &ChainSpec, @@ -139,7 +139,7 @@ impl PartialBeaconState { Ok(()) } - pub fn load_historical_roots>( + pub fn load_historical_roots>( &mut self, store: &S, spec: &ChainSpec, @@ -152,7 +152,7 @@ impl PartialBeaconState { Ok(()) } - pub fn load_randao_mixes>( + pub fn load_randao_mixes>( &mut self, store: &S, spec: &ChainSpec, diff --git a/consensus/ssz/src/encode.rs b/consensus/ssz/src/encode.rs index ab14f378e..88e970ea6 100644 --- a/consensus/ssz/src/encode.rs +++ b/consensus/ssz/src/encode.rs @@ -91,13 +91,6 @@ pub struct SszEncoder<'a> { } impl<'a> SszEncoder<'a> { - /// Instantiate a new encoder for encoding a SSZ list. - /// - /// Identical to `Self::container`. - pub fn list(buf: &'a mut Vec, num_fixed_bytes: usize) -> Self { - Self::container(buf, num_fixed_bytes) - } - /// Instantiate a new encoder for encoding a SSZ container. pub fn container(buf: &'a mut Vec, num_fixed_bytes: usize) -> Self { buf.reserve(num_fixed_bytes); diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index c9e9a4797..03b842144 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -256,7 +256,8 @@ macro_rules! impl_for_vec { item.ssz_append(buf); } } else { - let mut encoder = SszEncoder::list(buf, self.len() * BYTES_PER_LENGTH_OFFSET); + let mut encoder = + SszEncoder::container(buf, self.len() * BYTES_PER_LENGTH_OFFSET); for item in self { encoder.append(item); diff --git a/consensus/ssz_types/src/fixed_vector.rs b/consensus/ssz_types/src/fixed_vector.rs index 455bb512d..da2aa6616 100644 --- a/consensus/ssz_types/src/fixed_vector.rs +++ b/consensus/ssz_types/src/fixed_vector.rs @@ -196,7 +196,8 @@ where item.ssz_append(buf); } } else { - let mut encoder = ssz::SszEncoder::list(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); + let mut encoder = + ssz::SszEncoder::container(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); for item in &self.vec { encoder.append(item);