From 08b1808745a844b30e1bfc0ef58f603f4344b39e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 31 Mar 2019 18:57:48 +1100 Subject: [PATCH 01/21] Modify runtime to allow memory or disk db DiskDB is not working yet, but we'll get there! --- beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/src/initialise.rs | 43 +++++++++++++++++++++- beacon_node/client/src/client_config.rs | 6 +++ beacon_node/client/src/client_types.rs | 20 ++++++++-- beacon_node/src/main.rs | 9 +++++ beacon_node/src/run.rs | 41 ++++++++++++++++----- 6 files changed, 107 insertions(+), 13 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 37d96a497..da31bfa77 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] types = { path = "../eth2/types" } +db = { path = "./db" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs index 0951e06fb..44cef5fe1 100644 --- a/beacon_node/beacon_chain/src/initialise.rs +++ b/beacon_node/beacon_chain/src/initialise.rs @@ -61,7 +61,7 @@ pub fn initialise_beacon_chain( } /// Initialisation of a test beacon chain, uses an in memory db with fixed genesis time. -pub fn initialise_test_beacon_chain( +pub fn initialise_test_beacon_chain_with_memory_db( spec: &ChainSpec, _db_name: Option<&PathBuf>, ) -> Arc>> { @@ -100,3 +100,44 @@ pub fn initialise_test_beacon_chain( .expect("Terminate if beacon chain generation fails"), ) } + +/// Initialisation of a test beacon chain, uses an in memory db with fixed genesis time. +pub fn initialise_test_beacon_chain_with_disk_db( + spec: &ChainSpec, + db_name: Option<&PathBuf>, +) -> Arc>> { + let db = Arc::new(DiskDB::open(db_name.expect("Must have DB path"), None)); + let block_store = Arc::new(BeaconBlockStore::new(db.clone())); + let state_store = Arc::new(BeaconStateStore::new(db.clone())); + + let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, spec); + let (genesis_state, _keypairs) = state_builder.build(); + + let mut genesis_block = BeaconBlock::empty(spec); + genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + + // Slot clock + let slot_clock = SystemTimeSlotClock::new( + spec.genesis_slot, + genesis_state.genesis_time, + spec.seconds_per_slot, + ) + .expect("Unable to load SystemTimeSlotClock"); + // Choose the fork choice + let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + + // Genesis chain + //TODO: Handle error correctly + Arc::new( + BeaconChain::from_genesis( + state_store.clone(), + block_store.clone(), + slot_clock, + genesis_state, + genesis_block, + spec.clone(), + fork_choice, + ) + .expect("Terminate if beacon chain generation fails"), + ) +} diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index cad287f2c..c32379522 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -119,6 +119,12 @@ impl ClientConfig { } } + match args.value_of("db") { + Some("rocks") => config.db_type = DBType::RocksDB, + Some("memory") => config.db_type = DBType::Memory, + _ => unreachable!(), // clap prevents this. + }; + Ok(config) } } diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs index f5abc77ce..1d2f1d6ec 100644 --- a/beacon_node/client/src/client_types.rs +++ b/beacon_node/client/src/client_types.rs @@ -34,9 +34,9 @@ impl ClientTypes for StandardClientType { } } -pub struct TestingClientType; +pub struct MemoryDBTestingClientType; -impl ClientTypes for TestingClientType { +impl ClientTypes for MemoryDBTestingClientType { type DB = MemoryDB; type SlotClock = SystemTimeSlotClock; type ForkChoice = BitwiseLMDGhost; @@ -44,6 +44,20 @@ impl ClientTypes for TestingClientType { fn initialise_beacon_chain( config: &ClientConfig, ) -> Arc> { - initialise::initialise_test_beacon_chain(&config.spec, None) + initialise::initialise_test_beacon_chain_with_memory_db(&config.spec, None) + } +} + +pub struct DiskDBTestingClientType; + +impl ClientTypes for DiskDBTestingClientType { + type DB = DiskDB; + type SlotClock = SystemTimeSlotClock; + type ForkChoice = BitwiseLMDGhost; + + fn initialise_beacon_chain( + config: &ClientConfig, + ) -> Arc> { + initialise::initialise_test_beacon_chain_with_disk_db(&config.spec, Some(&config.db_name)) } } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index ea74c7376..8aa6da7d5 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -58,6 +58,15 @@ fn main() { .help("Listen port for RPC endpoint.") .takes_value(true), ) + .arg( + Arg::with_name("db") + .long("db") + .value_name("DB") + .help("Type of database to use.") + .takes_value(true) + .possible_values(&["rocks", "memory"]) + .default_value("memory"), + ) .get_matches(); // invalid arguments, panic diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 1d9156124..1afeb5408 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,15 +1,18 @@ -use client::client_types::TestingClientType; +use client::client_types::{DiskDBTestingClientType, MemoryDBTestingClientType}; use client::error; -use client::{notifier, Client, ClientConfig}; +use client::{notifier, Client, ClientConfig, ClientTypes}; +use db::DBType; use futures::sync::oneshot; use futures::Future; use slog::info; use std::cell::RefCell; use tokio::runtime::Builder; +use tokio::runtime::Runtime; +use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Result<()> { - let mut runtime = Builder::new() + let runtime = Builder::new() .name_prefix("main-") .clock(Clock::system()) .build() @@ -20,8 +23,32 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul "data_dir" => &config.data_dir.to_str(), "port" => &config.net_conf.listen_port); + let executor = runtime.executor(); + + match config.db_type { + DBType::RocksDB => { + let client: Client = + Client::new(config, log.clone(), &executor)?; + + run(client, executor, runtime, log) + } + DBType::Memory => { + let client: Client = + Client::new(config, log.clone(), &executor)?; + + run(client, executor, runtime, log) + } + } +} + +pub fn run( + client: Client, + executor: TaskExecutor, + mut runtime: Runtime, + log: &slog::Logger, +) -> error::Result<()> { // run service until ctrl-c - let (ctrlc_send, ctrlc) = oneshot::channel(); + let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); let ctrlc_send_c = RefCell::new(Some(ctrlc_send)); ctrlc::set_handler(move || { if let Some(ctrlc_send) = ctrlc_send_c.try_borrow_mut().unwrap().take() { @@ -32,14 +59,10 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul let (exit_signal, exit) = exit_future::signal(); - let executor = runtime.executor(); - - // currently testing - using TestingClientType - let client: Client = Client::new(config, log.clone(), &executor)?; notifier::run(&client, executor, exit); runtime - .block_on(ctrlc) + .block_on(ctrlc_oneshot) .map_err(|e| format!("Ctrlc oneshot failed: {:?}", e))?; // perform global shutdown operations. From f4bd46fe6697228a3aa1c58210400261427b0ec9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 31 Mar 2019 19:16:45 +1100 Subject: [PATCH 02/21] Fix rocks db startup issues --- beacon_node/db/src/disk_db.rs | 38 ++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/beacon_node/db/src/disk_db.rs b/beacon_node/db/src/disk_db.rs index 9d8a71bc4..f05320f7f 100644 --- a/beacon_node/db/src/disk_db.rs +++ b/beacon_node/db/src/disk_db.rs @@ -2,6 +2,7 @@ extern crate rocksdb; use super::rocksdb::Error as RocksError; use super::rocksdb::{Options, DB}; +use super::stores::COLUMNS; use super::{ClientDB, DBError, DBValue}; use std::fs; use std::path::Path; @@ -23,31 +24,32 @@ impl DiskDB { /// /// Panics if the database is unable to be created. pub fn open(path: &Path, columns: Option<&[&str]>) -> Self { - /* - * Initialise the options - */ + // Rocks options. let mut options = Options::default(); options.create_if_missing(true); - // TODO: ensure that columns are created (and remove - // the dead_code allow) - - /* - * Initialise the path - */ + // Ensure the path exists. fs::create_dir_all(&path).unwrap_or_else(|_| panic!("Unable to create {:?}", &path)); let db_path = path.join("database"); - /* - * Open the database - */ - let db = match columns { - None => DB::open(&options, db_path), - Some(columns) => DB::open_cf(&options, db_path, columns), - } - .expect("Unable to open local database");; + let columns = columns.unwrap_or(&COLUMNS); - Self { db } + if db_path.exists() { + Self { + db: DB::open_cf(&options, db_path, &COLUMNS) + .expect("Unable to open local database"), + } + } else { + let mut db = Self { + db: DB::open(&options, db_path).expect("Unable to open local database"), + }; + + for cf in columns { + db.create_col(cf).unwrap(); + } + + db + } } /// Create a RocksDB column family. Corresponds to the From b03dfdce593a7d9ab60c5e6cd2094f9ca2107328 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 1 Apr 2019 08:59:59 +1100 Subject: [PATCH 03/21] Fix genesis time issue, add logs --- beacon_node/src/run.rs | 10 ++++++++++ .../src/test_utils/testing_beacon_state_builder.rs | 7 +------ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 1afeb5408..52dc3973b 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -27,12 +27,22 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul match config.db_type { DBType::RocksDB => { + info!( + log, + "BeaconNode starting"; + "type" => "DiskDBTestingClientType" + ); let client: Client = Client::new(config, log.clone(), &executor)?; run(client, executor, runtime, log) } DBType::Memory => { + info!( + log, + "BeaconNode starting"; + "type" => "MemoryDBTestingClientType" + ); let client: Client = Client::new(config, log.clone(), &executor)?; diff --git a/eth2/types/src/test_utils/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/testing_beacon_state_builder.rs index def58b0d7..b0168eb78 100644 --- a/eth2/types/src/test_utils/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_state_builder.rs @@ -122,12 +122,7 @@ impl TestingBeaconStateBuilder { }) .collect(); - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() - - 30; - let genesis_time = now; // arbitrary + let genesis_time = 1554069200; // arbitrary let mut state = BeaconState::genesis( genesis_time, From ebe47a5b341b1bc710671f4a9dc22ddcf996a017 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 1 Apr 2019 14:56:32 +1100 Subject: [PATCH 04/21] Add `Store` and `db_encode_derive`. Implementation is not complete, but what is here works. --- Cargo.toml | 3 + beacon_node/db/Cargo.toml | 2 - beacon_node/db2/Cargo.toml | 16 + beacon_node/db2/src/disk_db.rs | 199 ++++++++++++ beacon_node/db2/src/lib.rs | 151 +++++++++ beacon_node/db2/src/memory_db.rs | 236 ++++++++++++++ .../db2/src/stores/beacon_block_store.rs | 246 ++++++++++++++ .../db2/src/stores/beacon_state_store.rs | 62 ++++ beacon_node/db2/src/stores/macros.rs | 103 ++++++ beacon_node/db2/src/stores/mod.rs | 25 ++ beacon_node/db2/src/stores/pow_chain_store.rs | 68 ++++ beacon_node/db2/src/stores/validator_store.rs | 215 ++++++++++++ beacon_node/db2/src/traits.rs | 38 +++ beacon_node/db_encode/Cargo.toml | 9 + beacon_node/db_encode/src/lib.rs | 59 ++++ beacon_node/db_encode_derive/Cargo.toml | 13 + beacon_node/db_encode_derive/src/lib.rs | 305 ++++++++++++++++++ 17 files changed, 1748 insertions(+), 2 deletions(-) create mode 100644 beacon_node/db2/Cargo.toml create mode 100644 beacon_node/db2/src/disk_db.rs create mode 100644 beacon_node/db2/src/lib.rs create mode 100644 beacon_node/db2/src/memory_db.rs create mode 100644 beacon_node/db2/src/stores/beacon_block_store.rs create mode 100644 beacon_node/db2/src/stores/beacon_state_store.rs create mode 100644 beacon_node/db2/src/stores/macros.rs create mode 100644 beacon_node/db2/src/stores/mod.rs create mode 100644 beacon_node/db2/src/stores/pow_chain_store.rs create mode 100644 beacon_node/db2/src/stores/validator_store.rs create mode 100644 beacon_node/db2/src/traits.rs create mode 100644 beacon_node/db_encode/Cargo.toml create mode 100644 beacon_node/db_encode/src/lib.rs create mode 100644 beacon_node/db_encode_derive/Cargo.toml create mode 100644 beacon_node/db_encode_derive/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 3ae62248b..008e83bae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,9 @@ members = [ "eth2/utils/test_random_derive", "beacon_node", "beacon_node/db", + "beacon_node/db2", + "beacon_node/db_encode", + "beacon_node/db_encode_derive", "beacon_node/client", "beacon_node/network", "beacon_node/eth2-libp2p", diff --git a/beacon_node/db/Cargo.toml b/beacon_node/db/Cargo.toml index 122aaa34d..ffb3585b9 100644 --- a/beacon_node/db/Cargo.toml +++ b/beacon_node/db/Cargo.toml @@ -9,5 +9,3 @@ blake2-rfc = "0.2.18" bls = { path = "../../eth2/utils/bls" } bytes = "0.4.10" rocksdb = "0.10.1" -ssz = { path = "../../eth2/utils/ssz" } -types = { path = "../../eth2/types" } diff --git a/beacon_node/db2/Cargo.toml b/beacon_node/db2/Cargo.toml new file mode 100644 index 000000000..8a5dbad5e --- /dev/null +++ b/beacon_node/db2/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "db2" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +blake2-rfc = "0.2.18" +bls = { path = "../../eth2/utils/bls" } +bytes = "0.4.10" +db_encode = { path = "../db_encode" } +db_encode_derive = { path = "../db_encode_derive" } +rocksdb = "0.10.1" +ssz = { path = "../../eth2/utils/ssz" } +ssz_derive = { path = "../../eth2/utils/ssz_derive" } +types = { path = "../../eth2/types" } diff --git a/beacon_node/db2/src/disk_db.rs b/beacon_node/db2/src/disk_db.rs new file mode 100644 index 000000000..f05320f7f --- /dev/null +++ b/beacon_node/db2/src/disk_db.rs @@ -0,0 +1,199 @@ +extern crate rocksdb; + +use super::rocksdb::Error as RocksError; +use super::rocksdb::{Options, DB}; +use super::stores::COLUMNS; +use super::{ClientDB, DBError, DBValue}; +use std::fs; +use std::path::Path; + +/// A on-disk database which implements the ClientDB trait. +/// +/// This implementation uses RocksDB with default options. +pub struct DiskDB { + db: DB, +} + +impl DiskDB { + /// Open the RocksDB database, optionally supplying columns if required. + /// + /// The RocksDB database will be contained in a directory titled + /// "database" in the supplied path. + /// + /// # Panics + /// + /// Panics if the database is unable to be created. + pub fn open(path: &Path, columns: Option<&[&str]>) -> Self { + // Rocks options. + let mut options = Options::default(); + options.create_if_missing(true); + + // Ensure the path exists. + fs::create_dir_all(&path).unwrap_or_else(|_| panic!("Unable to create {:?}", &path)); + let db_path = path.join("database"); + + let columns = columns.unwrap_or(&COLUMNS); + + if db_path.exists() { + Self { + db: DB::open_cf(&options, db_path, &COLUMNS) + .expect("Unable to open local database"), + } + } else { + let mut db = Self { + db: DB::open(&options, db_path).expect("Unable to open local database"), + }; + + for cf in columns { + db.create_col(cf).unwrap(); + } + + db + } + } + + /// Create a RocksDB column family. Corresponds to the + /// `create_cf()` function on the RocksDB API. + #[allow(dead_code)] + fn create_col(&mut self, col: &str) -> Result<(), DBError> { + match self.db.create_cf(col, &Options::default()) { + Err(e) => Err(e.into()), + Ok(_) => Ok(()), + } + } +} + +impl From for DBError { + fn from(e: RocksError) -> Self { + Self { + message: e.to_string(), + } + } +} + +impl ClientDB for DiskDB { + /// Get the value for some key on some column. + /// + /// Corresponds to the `get_cf()` method on the RocksDB API. + /// Will attempt to get the `ColumnFamily` and return an Err + /// if it fails. + fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => match self.db.get_cf(handle, key)? { + None => Ok(None), + Some(db_vec) => Ok(Some(DBValue::from(&*db_vec))), + }, + } + } + + /// Set some value for some key on some column. + /// + /// Corresponds to the `cf_handle()` method on the RocksDB API. + /// Will attempt to get the `ColumnFamily` and return an Err + /// if it fails. + fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()), + } + } + + /// Return true if some key exists in some column. + fn exists(&self, col: &str, key: &[u8]) -> Result { + /* + * I'm not sure if this is the correct way to read if some + * block exists. Naively I would expect this to unncessarily + * copy some data, but I could be wrong. + */ + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => Ok(self.db.get_cf(handle, key)?.is_some()), + } + } + + /// Delete the value for some key on some column. + /// + /// Corresponds to the `delete_cf()` method on the RocksDB API. + /// Will attempt to get the `ColumnFamily` and return an Err + /// if it fails. + fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { + match self.db.cf_handle(col) { + None => Err(DBError { + message: "Unknown column".to_string(), + }), + Some(handle) => { + self.db.delete_cf(handle, key)?; + Ok(()) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::super::ClientDB; + use super::*; + use std::sync::Arc; + use std::{env, fs, thread}; + + #[test] + #[ignore] + fn test_rocksdb_can_use_db() { + let pwd = env::current_dir().unwrap(); + let path = pwd.join("testdb_please_remove"); + let _ = fs::remove_dir_all(&path); + fs::create_dir_all(&path).unwrap(); + + let col_name: &str = "TestColumn"; + let column_families = vec![col_name]; + + let mut db = DiskDB::open(&path, None); + + for cf in column_families { + db.create_col(&cf).unwrap(); + } + + let db = Arc::new(db); + + let thread_count = 10; + let write_count = 10; + + // We're execting the product of these numbers to fit in one byte. + assert!(thread_count * write_count <= 255); + + let mut handles = vec![]; + for t in 0..thread_count { + let wc = write_count; + let db = db.clone(); + let col = col_name.clone(); + let handle = thread::spawn(move || { + for w in 0..wc { + let key = (t * w) as u8; + let val = 42; + db.put(&col, &vec![key], &vec![val]).unwrap(); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + for t in 0..thread_count { + for w in 0..write_count { + let key = (t * w) as u8; + let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); + assert_eq!(vec![42], val); + } + } + fs::remove_dir_all(&path).unwrap(); + } +} diff --git a/beacon_node/db2/src/lib.rs b/beacon_node/db2/src/lib.rs new file mode 100644 index 000000000..0704a84f5 --- /dev/null +++ b/beacon_node/db2/src/lib.rs @@ -0,0 +1,151 @@ +extern crate blake2_rfc as blake2; +extern crate bls; +extern crate rocksdb; + +mod disk_db; +mod memory_db; +pub mod stores; +mod traits; + +use self::stores::COLUMNS; +use db_encode::{db_encode, DBDecode, DBEncode}; +use ssz::DecodeError; +use std::sync::Arc; + +pub use self::disk_db::DiskDB; +pub use self::memory_db::MemoryDB; +pub use self::traits::{ClientDB, DBError, DBValue}; +pub use types::*; + +#[derive(Debug, PartialEq)] +pub enum Error { + SszDecodeError(DecodeError), + DBError { message: String }, +} + +impl From for Error { + fn from(e: DecodeError) -> Error { + Error::SszDecodeError(e) + } +} + +impl From for Error { + fn from(e: DBError) -> Error { + Error::DBError { message: e.message } + } +} + +/// Currently available database options +#[derive(Debug, Clone)] +pub enum DBType { + Memory, + RocksDB, +} + +pub enum DBColumn { + Block, + State, + BeaconChain, +} + +impl<'a> Into<&'a str> for DBColumn { + /// Returns a `&str` that can be used for keying a key-value data base. + fn into(self) -> &'a str { + match self { + DBColumn::Block => &"blk", + DBColumn::State => &"ste", + DBColumn::BeaconChain => &"bch", + } + } +} + +pub trait DBRecord: DBEncode + DBDecode { + fn db_column() -> DBColumn; +} + +pub struct Store +where + T: ClientDB, +{ + db: Arc, +} + +impl Store { + fn new_in_memory() -> Self { + Self { + db: Arc::new(MemoryDB::open()), + } + } +} + +impl Store +where + T: ClientDB, +{ + /// Put `item` in the store as `key`. + /// + /// The `item` must implement `DBRecord` which defines the db column used. + fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> + where + I: DBRecord, + { + let column = I::db_column().into(); + let key = key.as_bytes(); + let val = db_encode(item); + + self.db.put(column, key, &val).map_err(|e| e.into()) + } + + /// Retrieves an `Ok(Some(item)` from the store if `key` exists, otherwise returns `Ok(None)`. + /// + /// The `item` must implement `DBRecord` which defines the db column used. + fn get(&self, key: &Hash256) -> Result, Error> + where + I: DBRecord, + { + let column = I::db_column().into(); + let key = key.as_bytes(); + + match self.db.get(column, key)? { + Some(bytes) => { + let (item, _index) = I::db_decode(&bytes, 0)?; + Ok(Some(item)) + } + None => Ok(None), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use db_encode_derive::{DBDecode, DBEncode}; + use ssz::Decodable; + use ssz_derive::{Decode, Encode}; + + #[derive(PartialEq, Debug, Encode, Decode, DBEncode, DBDecode)] + struct StorableThing { + a: u64, + b: u64, + } + + impl DBRecord for StorableThing { + fn db_column() -> DBColumn { + DBColumn::Block + } + } + + #[test] + fn memorydb_can_store() { + let store = Store::new_in_memory(); + + let key = Hash256::random(); + let item = StorableThing { a: 1, b: 42 }; + + store.put(&key, &item).unwrap(); + + let retrieved = store.get(&key).unwrap().unwrap(); + + assert_eq!(item, retrieved); + } +} diff --git a/beacon_node/db2/src/memory_db.rs b/beacon_node/db2/src/memory_db.rs new file mode 100644 index 000000000..008e5912f --- /dev/null +++ b/beacon_node/db2/src/memory_db.rs @@ -0,0 +1,236 @@ +use super::blake2::blake2b::blake2b; +use super::COLUMNS; +use super::{ClientDB, DBError, DBValue}; +use std::collections::{HashMap, HashSet}; +use std::sync::RwLock; + +type DBHashMap = HashMap, Vec>; +type ColumnHashSet = HashSet; + +/// An in-memory database implementing the ClientDB trait. +/// +/// It is not particularily optimized, it exists for ease and speed of testing. It's not expected +/// this DB would be used outside of tests. +pub struct MemoryDB { + db: RwLock, + known_columns: RwLock, +} + +impl MemoryDB { + /// Open the in-memory database. + /// + /// All columns must be supplied initially, you will get an error if you try to access a column + /// that was not declared here. This condition is enforced artificially to simulate RocksDB. + pub fn open() -> Self { + let db: DBHashMap = HashMap::new(); + let mut known_columns: ColumnHashSet = HashSet::new(); + for col in &COLUMNS { + known_columns.insert(col.to_string()); + } + Self { + db: RwLock::new(db), + known_columns: RwLock::new(known_columns), + } + } + + /// Hashes a key and a column name in order to get a unique key for the supplied column. + fn get_key_for_col(col: &str, key: &[u8]) -> Vec { + blake2b(32, col.as_bytes(), key).as_bytes().to_vec() + } +} + +impl ClientDB for MemoryDB { + /// Get the value of some key from the database. Returns `None` if the key does not exist. + fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { + // Panic if the DB locks are poisoned. + let db = self.db.read().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + Ok(db.get(&column_key).and_then(|val| Some(val.clone()))) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } + + /// Puts a key in the database. + fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { + // Panic if the DB locks are poisoned. + let mut db = self.db.write().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + db.insert(column_key, val.to_vec()); + Ok(()) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } + + /// Return true if some key exists in some column. + fn exists(&self, col: &str, key: &[u8]) -> Result { + // Panic if the DB locks are poisoned. + let db = self.db.read().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + Ok(db.contains_key(&column_key)) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } + + /// Delete some key from the database. + fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { + // Panic if the DB locks are poisoned. + let mut db = self.db.write().unwrap(); + let known_columns = self.known_columns.read().unwrap(); + + if known_columns.contains(&col.to_string()) { + let column_key = MemoryDB::get_key_for_col(col, key); + db.remove(&column_key); + Ok(()) + } else { + Err(DBError { + message: "Unknown column".to_string(), + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::super::stores::{BLOCKS_DB_COLUMN, VALIDATOR_DB_COLUMN}; + use super::super::ClientDB; + use super::*; + use std::sync::Arc; + use std::thread; + + #[test] + fn test_memorydb_can_delete() { + let col_a: &str = BLOCKS_DB_COLUMN; + + let db = MemoryDB::open(); + + db.put(col_a, "dogs".as_bytes(), "lol".as_bytes()).unwrap(); + + assert_eq!( + db.get(col_a, "dogs".as_bytes()).unwrap().unwrap(), + "lol".as_bytes() + ); + + db.delete(col_a, "dogs".as_bytes()).unwrap(); + + assert_eq!(db.get(col_a, "dogs".as_bytes()).unwrap(), None); + } + + #[test] + fn test_memorydb_column_access() { + let col_a: &str = BLOCKS_DB_COLUMN; + let col_b: &str = VALIDATOR_DB_COLUMN; + + let db = MemoryDB::open(); + + /* + * Testing that if we write to the same key in different columns that + * there is not an overlap. + */ + db.put(col_a, "same".as_bytes(), "cat".as_bytes()).unwrap(); + db.put(col_b, "same".as_bytes(), "dog".as_bytes()).unwrap(); + + assert_eq!( + db.get(col_a, "same".as_bytes()).unwrap().unwrap(), + "cat".as_bytes() + ); + assert_eq!( + db.get(col_b, "same".as_bytes()).unwrap().unwrap(), + "dog".as_bytes() + ); + } + + #[test] + fn test_memorydb_unknown_column_access() { + let col_a: &str = BLOCKS_DB_COLUMN; + let col_x: &str = "ColumnX"; + + let db = MemoryDB::open(); + + /* + * Test that we get errors when using undeclared columns + */ + assert!(db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).is_ok()); + assert!(db.put(col_x, "cats".as_bytes(), "lol".as_bytes()).is_err()); + + assert!(db.get(col_a, "cats".as_bytes()).is_ok()); + assert!(db.get(col_x, "cats".as_bytes()).is_err()); + } + + #[test] + fn test_memorydb_exists() { + let col_a: &str = BLOCKS_DB_COLUMN; + let col_b: &str = VALIDATOR_DB_COLUMN; + + let db = MemoryDB::open(); + + /* + * Testing that if we write to the same key in different columns that + * there is not an overlap. + */ + db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).unwrap(); + + assert_eq!(true, db.exists(col_a, "cats".as_bytes()).unwrap()); + assert_eq!(false, db.exists(col_b, "cats".as_bytes()).unwrap()); + + assert_eq!(false, db.exists(col_a, "dogs".as_bytes()).unwrap()); + assert_eq!(false, db.exists(col_b, "dogs".as_bytes()).unwrap()); + } + + #[test] + fn test_memorydb_threading() { + let col_name: &str = BLOCKS_DB_COLUMN; + + let db = Arc::new(MemoryDB::open()); + + let thread_count = 10; + let write_count = 10; + + // We're execting the product of these numbers to fit in one byte. + assert!(thread_count * write_count <= 255); + + let mut handles = vec![]; + for t in 0..thread_count { + let wc = write_count; + let db = db.clone(); + let col = col_name.clone(); + let handle = thread::spawn(move || { + for w in 0..wc { + let key = (t * w) as u8; + let val = 42; + db.put(&col, &vec![key], &vec![val]).unwrap(); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + for t in 0..thread_count { + for w in 0..write_count { + let key = (t * w) as u8; + let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); + assert_eq!(vec![42], val); + } + } + } +} diff --git a/beacon_node/db2/src/stores/beacon_block_store.rs b/beacon_node/db2/src/stores/beacon_block_store.rs new file mode 100644 index 000000000..e2e16e60b --- /dev/null +++ b/beacon_node/db2/src/stores/beacon_block_store.rs @@ -0,0 +1,246 @@ +use super::BLOCKS_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use ssz::Decodable; +use std::sync::Arc; +use types::{BeaconBlock, Hash256, Slot}; + +#[derive(Clone, Debug, PartialEq)] +pub enum BeaconBlockAtSlotError { + UnknownBeaconBlock(Hash256), + InvalidBeaconBlock(Hash256), + DBError(String), +} + +pub struct BeaconBlockStore +where + T: ClientDB, +{ + db: Arc, +} + +// Implements `put`, `get`, `exists` and `delete` for the store. +impl_crud_for_store!(BeaconBlockStore, DB_COLUMN); + +impl BeaconBlockStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad BeaconBlock SSZ.".to_string(), + })?; + Ok(Some(block)) + } + } + } + + /// Retrieve the block at a slot given a "head_hash" and a slot. + /// + /// A "head_hash" must be a block hash with a slot number greater than or equal to the desired + /// slot. + /// + /// This function will read each block down the chain until it finds a block with the given + /// slot number. If the slot is skipped, the function will return None. + /// + /// If a block is found, a tuple of (block_hash, serialized_block) is returned. + /// + /// Note: this function uses a loop instead of recursion as the compiler is over-strict when it + /// comes to recursion and the `impl Trait` pattern. See: + /// https://stackoverflow.com/questions/54032940/using-impl-trait-in-a-recursive-function + pub fn block_at_slot( + &self, + head_hash: &Hash256, + slot: Slot, + ) -> Result, BeaconBlockAtSlotError> { + let mut current_hash = *head_hash; + + loop { + if let Some(block) = self.get_deserialized(¤t_hash)? { + if block.slot == slot { + break Ok(Some((current_hash, block))); + } else if block.slot < slot { + break Ok(None); + } else { + current_hash = block.previous_block_root; + } + } else { + break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); + } + } + } +} + +impl From for BeaconBlockAtSlotError { + fn from(e: DBError) -> Self { + BeaconBlockAtSlotError::DBError(e.message) + } +} + +#[cfg(test)] +mod tests { + use super::super::super::MemoryDB; + use super::*; + + use std::sync::Arc; + use std::thread; + + use ssz::ssz_encode; + use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use types::BeaconBlock; + use types::Hash256; + + test_crud_for_store!(BeaconBlockStore, DB_COLUMN); + + #[test] + fn head_hash_slot_too_low() { + let db = Arc::new(MemoryDB::open()); + let bs = Arc::new(BeaconBlockStore::new(db.clone())); + let mut rng = XorShiftRng::from_seed([42; 16]); + + let mut block = BeaconBlock::random_for_test(&mut rng); + block.slot = Slot::from(10_u64); + + let block_root = block.canonical_root(); + bs.put(&block_root, &ssz_encode(&block)).unwrap(); + + let result = bs.block_at_slot(&block_root, Slot::from(11_u64)).unwrap(); + assert_eq!(result, None); + } + + #[test] + fn test_invalid_block_at_slot() { + let db = Arc::new(MemoryDB::open()); + let store = BeaconBlockStore::new(db.clone()); + + let ssz = "definitly not a valid block".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); + assert_eq!( + store.block_at_slot(hash, Slot::from(42_u64)), + Err(BeaconBlockAtSlotError::DBError( + "Bad BeaconBlock SSZ.".into() + )) + ); + } + + #[test] + fn test_unknown_block_at_slot() { + let db = Arc::new(MemoryDB::open()); + let store = BeaconBlockStore::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + let other_hash = &Hash256::from([0xBB; 32]); + + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); + assert_eq!( + store.block_at_slot(other_hash, Slot::from(42_u64)), + Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash)) + ); + } + + #[test] + fn test_block_store_on_memory_db() { + let db = Arc::new(MemoryDB::open()); + let bs = Arc::new(BeaconBlockStore::new(db.clone())); + + let thread_count = 10; + let write_count = 10; + + let mut handles = vec![]; + for t in 0..thread_count { + let wc = write_count; + let bs = bs.clone(); + let handle = thread::spawn(move || { + for w in 0..wc { + let key = t * w; + let val = 42; + bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap(); + } + }); + handles.push(handle); + } + + for handle in handles { + handle.join().unwrap(); + } + + for t in 0..thread_count { + for w in 0..write_count { + let key = t * w; + assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap()); + let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap(); + assert_eq!(vec![42], val); + } + } + } + + #[test] + #[ignore] + fn test_block_at_slot() { + let db = Arc::new(MemoryDB::open()); + let bs = Arc::new(BeaconBlockStore::new(db.clone())); + let mut rng = XorShiftRng::from_seed([42; 16]); + + // Specify test block parameters. + let hashes = [ + Hash256::from([0; 32]), + Hash256::from([1; 32]), + Hash256::from([2; 32]), + Hash256::from([3; 32]), + Hash256::from([4; 32]), + ]; + let parent_hashes = [ + Hash256::from([255; 32]), // Genesis block. + Hash256::from([0; 32]), + Hash256::from([1; 32]), + Hash256::from([2; 32]), + Hash256::from([3; 32]), + ]; + let unknown_hash = Hash256::from([101; 32]); // different from all above + let slots: Vec = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect(); + + // Generate a vec of random blocks and store them in the DB. + let block_count = 5; + let mut blocks: Vec = Vec::with_capacity(5); + for i in 0..block_count { + let mut block = BeaconBlock::random_for_test(&mut rng); + + block.previous_block_root = parent_hashes[i]; + block.slot = slots[i]; + + let ssz = ssz_encode(&block); + db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap(); + + blocks.push(block); + } + + // Test that certain slots can be reached from certain hashes. + let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)]; + for (hashes_index, slot_index) in test_cases { + let (matched_block_hash, block) = bs + .block_at_slot(&hashes[hashes_index], slots[slot_index]) + .unwrap() + .unwrap(); + assert_eq!(matched_block_hash, hashes[slot_index]); + assert_eq!(block.slot, slots[slot_index]); + } + + let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap(); + assert_eq!(ssz, None); + + let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap(); + assert_eq!(ssz, None); + + let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2)); + assert_eq!( + ssz, + Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash)) + ); + } +} diff --git a/beacon_node/db2/src/stores/beacon_state_store.rs b/beacon_node/db2/src/stores/beacon_state_store.rs new file mode 100644 index 000000000..fd6ff569a --- /dev/null +++ b/beacon_node/db2/src/stores/beacon_state_store.rs @@ -0,0 +1,62 @@ +use super::STATES_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use ssz::Decodable; +use std::sync::Arc; +use types::{BeaconState, Hash256}; + +pub struct BeaconStateStore +where + T: ClientDB, +{ + db: Arc, +} + +// Implements `put`, `get`, `exists` and `delete` for the store. +impl_crud_for_store!(BeaconStateStore, DB_COLUMN); + +impl BeaconStateStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { + match self.get(&hash)? { + None => Ok(None), + Some(ssz) => { + let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError { + message: "Bad State SSZ.".to_string(), + })?; + Ok(Some(state)) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::super::super::MemoryDB; + use super::*; + + use ssz::ssz_encode; + use std::sync::Arc; + use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use types::Hash256; + + test_crud_for_store!(BeaconStateStore, DB_COLUMN); + + #[test] + fn test_reader() { + let db = Arc::new(MemoryDB::open()); + let store = BeaconStateStore::new(db.clone()); + + let mut rng = XorShiftRng::from_seed([42; 16]); + let state = BeaconState::random_for_test(&mut rng); + let state_root = state.canonical_root(); + + store.put(&state_root, &ssz_encode(&state)).unwrap(); + + let decoded = store.get_deserialized(&state_root).unwrap().unwrap(); + + assert_eq!(state, decoded); + } +} diff --git a/beacon_node/db2/src/stores/macros.rs b/beacon_node/db2/src/stores/macros.rs new file mode 100644 index 000000000..6c53e40ee --- /dev/null +++ b/beacon_node/db2/src/stores/macros.rs @@ -0,0 +1,103 @@ +macro_rules! impl_crud_for_store { + ($store: ident, $db_column: expr) => { + impl $store { + pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> { + self.db.put($db_column, hash.as_bytes(), ssz) + } + + pub fn get(&self, hash: &Hash256) -> Result>, DBError> { + self.db.get($db_column, hash.as_bytes()) + } + + pub fn exists(&self, hash: &Hash256) -> Result { + self.db.exists($db_column, hash.as_bytes()) + } + + pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> { + self.db.delete($db_column, hash.as_bytes()) + } + } + }; +} + +#[cfg(test)] +macro_rules! test_crud_for_store { + ($store: ident, $db_column: expr) => { + #[test] + fn test_put() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + + store.put(hash, ssz).unwrap(); + assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz); + } + + #[test] + fn test_get() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); + assert_eq!(store.get(hash).unwrap().unwrap(), ssz); + } + + #[test] + fn test_get_unknown() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + let other_hash = &Hash256::from([0xBB; 32]); + + db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap(); + assert_eq!(store.get(hash).unwrap(), None); + } + + #[test] + fn test_exists() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); + assert!(store.exists(hash).unwrap()); + } + + #[test] + fn test_block_does_not_exist() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + let other_hash = &Hash256::from([0xBB; 32]); + + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); + assert!(!store.exists(other_hash).unwrap()); + } + + #[test] + fn test_delete() { + let db = Arc::new(MemoryDB::open()); + let store = $store::new(db.clone()); + + let ssz = "some bytes".as_bytes(); + let hash = &Hash256::from([0xAA; 32]); + + db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); + assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); + + store.delete(hash).unwrap(); + assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); + } + }; +} diff --git a/beacon_node/db2/src/stores/mod.rs b/beacon_node/db2/src/stores/mod.rs new file mode 100644 index 000000000..44de7eed1 --- /dev/null +++ b/beacon_node/db2/src/stores/mod.rs @@ -0,0 +1,25 @@ +use super::{ClientDB, DBError}; + +#[macro_use] +mod macros; +mod beacon_block_store; +mod beacon_state_store; +mod pow_chain_store; +mod validator_store; + +pub use self::beacon_block_store::{BeaconBlockAtSlotError, BeaconBlockStore}; +pub use self::beacon_state_store::BeaconStateStore; +pub use self::pow_chain_store::PoWChainStore; +pub use self::validator_store::{ValidatorStore, ValidatorStoreError}; + +pub const BLOCKS_DB_COLUMN: &str = "blocks"; +pub const STATES_DB_COLUMN: &str = "states"; +pub const POW_CHAIN_DB_COLUMN: &str = "powchain"; +pub const VALIDATOR_DB_COLUMN: &str = "validator"; + +pub const COLUMNS: [&str; 4] = [ + BLOCKS_DB_COLUMN, + STATES_DB_COLUMN, + POW_CHAIN_DB_COLUMN, + VALIDATOR_DB_COLUMN, +]; diff --git a/beacon_node/db2/src/stores/pow_chain_store.rs b/beacon_node/db2/src/stores/pow_chain_store.rs new file mode 100644 index 000000000..5c8b97907 --- /dev/null +++ b/beacon_node/db2/src/stores/pow_chain_store.rs @@ -0,0 +1,68 @@ +use super::POW_CHAIN_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use std::sync::Arc; + +pub struct PoWChainStore +where + T: ClientDB, +{ + db: Arc, +} + +impl PoWChainStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> { + self.db.put(DB_COLUMN, hash, &[0]) + } + + pub fn block_hash_exists(&self, hash: &[u8]) -> Result { + self.db.exists(DB_COLUMN, hash) + } +} + +#[cfg(test)] +mod tests { + extern crate types; + + use super::super::super::MemoryDB; + use super::*; + + use self::types::Hash256; + + #[test] + fn test_put_block_hash() { + let db = Arc::new(MemoryDB::open()); + let store = PoWChainStore::new(db.clone()); + + let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); + store.put_block_hash(hash).unwrap(); + + assert!(db.exists(DB_COLUMN, hash).unwrap()); + } + + #[test] + fn test_block_hash_exists() { + let db = Arc::new(MemoryDB::open()); + let store = PoWChainStore::new(db.clone()); + + let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); + db.put(DB_COLUMN, hash, &[0]).unwrap(); + + assert!(store.block_hash_exists(hash).unwrap()); + } + + #[test] + fn test_block_hash_does_not_exist() { + let db = Arc::new(MemoryDB::open()); + let store = PoWChainStore::new(db.clone()); + + let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); + let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec(); + db.put(DB_COLUMN, hash, &[0]).unwrap(); + + assert!(!store.block_hash_exists(other_hash).unwrap()); + } +} diff --git a/beacon_node/db2/src/stores/validator_store.rs b/beacon_node/db2/src/stores/validator_store.rs new file mode 100644 index 000000000..02e90dc5c --- /dev/null +++ b/beacon_node/db2/src/stores/validator_store.rs @@ -0,0 +1,215 @@ +extern crate bytes; + +use self::bytes::{BufMut, BytesMut}; +use super::VALIDATOR_DB_COLUMN as DB_COLUMN; +use super::{ClientDB, DBError}; +use bls::PublicKey; +use ssz::{ssz_encode, Decodable}; +use std::sync::Arc; + +#[derive(Debug, PartialEq)] +pub enum ValidatorStoreError { + DBError(String), + DecodeError, +} + +impl From for ValidatorStoreError { + fn from(error: DBError) -> Self { + ValidatorStoreError::DBError(error.message) + } +} + +#[derive(Debug, PartialEq)] +enum KeyPrefixes { + PublicKey, +} + +pub struct ValidatorStore +where + T: ClientDB, +{ + db: Arc, +} + +impl ValidatorStore { + pub fn new(db: Arc) -> Self { + Self { db } + } + + fn prefix_bytes(&self, key_prefix: &KeyPrefixes) -> Vec { + match key_prefix { + KeyPrefixes::PublicKey => b"pubkey".to_vec(), + } + } + + fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) -> Vec { + let mut buf = BytesMut::with_capacity(6 + 8); + buf.put(self.prefix_bytes(key_prefix)); + buf.put_u64_be(index as u64); + buf.take().to_vec() + } + + pub fn put_public_key_by_index( + &self, + index: usize, + public_key: &PublicKey, + ) -> Result<(), ValidatorStoreError> { + let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); + let val = ssz_encode(public_key); + self.db + .put(DB_COLUMN, &key[..], &val[..]) + .map_err(ValidatorStoreError::from) + } + + pub fn get_public_key_by_index( + &self, + index: usize, + ) -> Result, ValidatorStoreError> { + let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); + let val = self.db.get(DB_COLUMN, &key[..])?; + match val { + None => Ok(None), + Some(val) => match PublicKey::ssz_decode(&val, 0) { + Ok((key, _)) => Ok(Some(key)), + Err(_) => Err(ValidatorStoreError::DecodeError), + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::super::super::MemoryDB; + use super::*; + use bls::Keypair; + + #[test] + fn test_prefix_bytes() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + assert_eq!( + store.prefix_bytes(&KeyPrefixes::PublicKey), + b"pubkey".to_vec() + ); + } + + #[test] + fn test_get_db_key_for_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let mut buf = BytesMut::with_capacity(6 + 8); + buf.put(b"pubkey".to_vec()); + buf.put_u64_be(42); + assert_eq!( + store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42), + buf.take().to_vec() + ) + } + + #[test] + fn test_put_public_key_by_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let index = 3; + let public_key = Keypair::random().pk; + + store.put_public_key_by_index(index, &public_key).unwrap(); + let public_key_at_index = db + .get( + DB_COLUMN, + &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], + ) + .unwrap() + .unwrap(); + + assert_eq!(public_key_at_index, ssz_encode(&public_key)); + } + + #[test] + fn test_get_public_key_by_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let index = 4; + let public_key = Keypair::random().pk; + + db.put( + DB_COLUMN, + &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], + &ssz_encode(&public_key)[..], + ) + .unwrap(); + + let public_key_at_index = store.get_public_key_by_index(index).unwrap().unwrap(); + assert_eq!(public_key_at_index, public_key); + } + + #[test] + fn test_get_public_key_by_unknown_index() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let public_key = Keypair::random().pk; + + db.put( + DB_COLUMN, + &store.get_db_key_for_index(&KeyPrefixes::PublicKey, 3)[..], + &ssz_encode(&public_key)[..], + ) + .unwrap(); + + let public_key_at_index = store.get_public_key_by_index(4).unwrap(); + assert_eq!(public_key_at_index, None); + } + + #[test] + fn test_get_invalid_public_key() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db.clone()); + + let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42); + db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap(); + + assert_eq!( + store.get_public_key_by_index(42), + Err(ValidatorStoreError::DecodeError) + ); + } + + #[test] + fn test_validator_store_put_get() { + let db = Arc::new(MemoryDB::open()); + let store = ValidatorStore::new(db); + + let keys = vec![ + Keypair::random(), + Keypair::random(), + Keypair::random(), + Keypair::random(), + Keypair::random(), + ]; + + for i in 0..keys.len() { + store.put_public_key_by_index(i, &keys[i].pk).unwrap(); + } + + /* + * Check all keys are retrieved correctly. + */ + for i in 0..keys.len() { + let retrieved = store.get_public_key_by_index(i).unwrap().unwrap(); + assert_eq!(retrieved, keys[i].pk); + } + + /* + * Check that an index that wasn't stored returns None. + */ + assert!(store + .get_public_key_by_index(keys.len() + 1) + .unwrap() + .is_none()); + } +} diff --git a/beacon_node/db2/src/traits.rs b/beacon_node/db2/src/traits.rs new file mode 100644 index 000000000..57ebf9353 --- /dev/null +++ b/beacon_node/db2/src/traits.rs @@ -0,0 +1,38 @@ +pub type DBValue = Vec; + +#[derive(Debug)] +pub struct DBError { + pub message: String, +} + +impl DBError { + pub fn new(message: String) -> Self { + Self { message } + } +} + +/// A generic database to be used by the "client' (i.e., +/// the lighthouse blockchain client). +/// +/// The purpose of having this generic trait is to allow the +/// program to use a persistent on-disk database during production, +/// but use a transient database during tests. +pub trait ClientDB: Sync + Send { + fn get(&self, col: &str, key: &[u8]) -> Result, DBError>; + + fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError>; + + fn exists(&self, col: &str, key: &[u8]) -> Result; + + fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError>; +} + +pub enum DBColumn { + Block, + State, + BeaconChain, +} + +pub trait DBStore { + fn db_column(&self) -> DBColumn; +} diff --git a/beacon_node/db_encode/Cargo.toml b/beacon_node/db_encode/Cargo.toml new file mode 100644 index 000000000..b4e919585 --- /dev/null +++ b/beacon_node/db_encode/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "db_encode" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +ethereum-types = "0.5" +ssz = { path = "../../eth2/utils/ssz" } diff --git a/beacon_node/db_encode/src/lib.rs b/beacon_node/db_encode/src/lib.rs new file mode 100644 index 000000000..993ba0e79 --- /dev/null +++ b/beacon_node/db_encode/src/lib.rs @@ -0,0 +1,59 @@ +use ethereum_types::{Address, H256}; +use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; + +/// Convenience function to encode an object. +pub fn db_encode(val: &T) -> Vec +where + T: DBEncode, +{ + let mut ssz_stream = SszStream::new(); + ssz_stream.append(val); + ssz_stream.drain() +} + +/// An encoding scheme based solely upon SSZ. +/// +/// The reason we have a separate encoding scheme is to allows us to store fields in the DB that we +/// don't want to transmit across the wire or hash. +/// +/// For example, the cache fields on `BeaconState` should be stored in the DB, but they should not +/// be hashed or transmitted across the wire. `DBEncode` allows us to define two serialization +/// methods, one that encodes the caches and one that does not. +pub trait DBEncode: Encodable + Sized { + fn db_encode(&self, s: &mut SszStream) { + s.append(&ssz_encode(self)); + } +} + +/// A decoding scheme based solely upon SSZ. +/// +/// See `DBEncode` for reasoning on why this trait exists. +pub trait DBDecode: Decodable { + fn db_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + Self::ssz_decode(bytes, index) + } +} + +// Implement encoding. +impl DBEncode for bool {} +impl DBEncode for u8 {} +impl DBEncode for u16 {} +impl DBEncode for u32 {} +impl DBEncode for u64 {} +impl DBEncode for usize {} +impl DBEncode for Vec where T: Encodable + Sized {} + +impl DBEncode for H256 {} +impl DBEncode for Address {} + +// Implement decoding. +impl DBDecode for bool {} +impl DBDecode for u8 {} +impl DBDecode for u16 {} +impl DBDecode for u32 {} +impl DBDecode for u64 {} +impl DBDecode for usize {} +impl DBDecode for Vec where T: Decodable {} + +impl DBDecode for H256 {} +impl DBDecode for Address {} diff --git a/beacon_node/db_encode_derive/Cargo.toml b/beacon_node/db_encode_derive/Cargo.toml new file mode 100644 index 000000000..b2fba85e3 --- /dev/null +++ b/beacon_node/db_encode_derive/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "db_encode_derive" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" +description = "Procedural derive macros for `db_encode` encoding and decoding." + +[lib] +proc-macro = true + +[dependencies] +syn = "0.15" +quote = "0.6" diff --git a/beacon_node/db_encode_derive/src/lib.rs b/beacon_node/db_encode_derive/src/lib.rs new file mode 100644 index 000000000..1de081419 --- /dev/null +++ b/beacon_node/db_encode_derive/src/lib.rs @@ -0,0 +1,305 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +/// Returns a Vec of `syn::Ident` for each named field in the struct. +/// +/// # Panics +/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. +fn get_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> { + struct_data + .fields + .iter() + .map(|f| match &f.ident { + Some(ref ident) => ident, + _ => panic!("db_derive only supports named struct fields."), + }) + .collect() +} + +/// Implements `db_encode::DBEncode` for some `struct`. +/// +/// Fields are encoded in the order they are defined. +#[proc_macro_derive(DBEncode)] +pub fn db_encode_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("db_derive only supports structs."), + }; + + let field_idents = get_named_field_idents(&struct_data); + + let output = quote! { + impl db_encode::DBEncode for #name { + fn db_encode(&self, s: &mut ssz::SszStream) { + #( + s.append(&self.#field_idents); + )* + } + } + }; + output.into() +} + +/// Implements `db_encode::DBEncode` for some `struct`. +/// +/// Fields are encoded in the order they are defined. +#[proc_macro_derive(DBDecode)] +pub fn db_decode_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("ssz_derive only supports structs."), + }; + + let field_idents = get_named_field_idents(&struct_data); + + // Using a var in an iteration always consumes the var, therefore we must make a `fields_a` and + // a `fields_b` in order to perform two loops. + // + // https://github.com/dtolnay/quote/issues/8 + let field_idents_a = &field_idents; + let field_idents_b = &field_idents; + + let output = quote! { + impl db_encode::DBDecode for #name { + fn db_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), ssz::DecodeError> { + #( + let (#field_idents_a, i) = <_>::ssz_decode(bytes, i)?; + )* + + Ok(( + Self { + #( + #field_idents_b, + )* + }, + i + )) + } + } + }; + output.into() +} + +/* +/// Returns true if some field has an attribute declaring it should not be deserialized. +/// +/// The field attribute is: `#[ssz(skip_deserializing)]` +fn should_skip_deserializing(field: &syn::Field) -> bool { + for attr in &field.attrs { + if attr.tts.to_string() == "( skip_deserializing )" { + return true; + } + } + false +} + +/// Implements `ssz::Decodable` for some `struct`. +/// +/// Fields are decoded in the order they are defined. +#[proc_macro_derive(Decode)] +pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("ssz_derive only supports structs."), + }; + + let all_idents = get_named_field_idents(&struct_data); + + // Build quotes for fields that should be deserialized and those that should be built from + // `Default`. + let mut quotes = vec![]; + for field in &struct_data.fields { + match &field.ident { + Some(ref ident) => { + if should_skip_deserializing(field) { + quotes.push(quote! { + let #ident = <_>::default(); + }); + } else { + quotes.push(quote! { + let (#ident, i) = <_>::ssz_decode(bytes, i)?; + }); + } + } + _ => panic!("ssz_derive only supports named struct fields."), + }; + } + + let output = quote! { + impl ssz::Decodable for #name { + fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), ssz::DecodeError> { + #( + #quotes + )* + + Ok(( + Self { + #( + #all_idents, + )* + }, + i + )) + } + } + }; + output.into() +} + +/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields +/// that should not be tree hashed. +/// +/// # Panics +/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. +fn get_tree_hashable_named_field_idents<'a>( + struct_data: &'a syn::DataStruct, +) -> Vec<&'a syn::Ident> { + struct_data + .fields + .iter() + .filter_map(|f| { + if should_skip_tree_hash(&f) { + None + } else { + Some(match &f.ident { + Some(ref ident) => ident, + _ => panic!("ssz_derive only supports named struct fields."), + }) + } + }) + .collect() +} + +/// Returns true if some field has an attribute declaring it should not be tree-hashed. +/// +/// The field attribute is: `#[tree_hash(skip_hashing)]` +fn should_skip_tree_hash(field: &syn::Field) -> bool { + for attr in &field.attrs { + if attr.tts.to_string() == "( skip_hashing )" { + return true; + } + } + false +} + +/// Implements `ssz::TreeHash` for some `struct`. +/// +/// Fields are processed in the order they are defined. +#[proc_macro_derive(TreeHash, attributes(tree_hash))] +pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("ssz_derive only supports structs."), + }; + + let field_idents = get_tree_hashable_named_field_idents(&struct_data); + + let output = quote! { + impl ssz::TreeHash for #name { + fn hash_tree_root(&self) -> Vec { + let mut list: Vec> = Vec::new(); + #( + list.push(self.#field_idents.hash_tree_root()); + )* + + ssz::merkle_hash(&mut list) + } + } + }; + output.into() +} + +/// Returns `true` if some `Ident` should be considered to be a signature type. +fn type_ident_is_signature(ident: &syn::Ident) -> bool { + match ident.to_string().as_ref() { + "Signature" => true, + "AggregateSignature" => true, + _ => false, + } +} + +/// Takes a `Field` where the type (`ty`) portion is a path (e.g., `types::Signature`) and returns +/// the final `Ident` in that path. +/// +/// E.g., for `types::Signature` returns `Signature`. +fn final_type_ident(field: &syn::Field) -> &syn::Ident { + match &field.ty { + syn::Type::Path(path) => &path.path.segments.last().unwrap().value().ident, + _ => panic!("ssz_derive only supports Path types."), + } +} + +/// Implements `ssz::TreeHash` for some `struct`, whilst excluding any fields following and +/// including a field that is of type "Signature" or "AggregateSignature". +/// +/// See: +/// https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots +/// +/// This is a rather horrendous macro, it will read the type of the object as a string and decide +/// if it's a signature by matching that string against "Signature" or "AggregateSignature". So, +/// it's important that you use those exact words as your type -- don't alias it to something else. +/// +/// If you can think of a better way to do this, please make an issue! +/// +/// Fields are processed in the order they are defined. +#[proc_macro_derive(SignedRoot)] +pub fn ssz_signed_root_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("ssz_derive only supports structs."), + }; + + let mut field_idents: Vec<&syn::Ident> = vec![]; + + for field in struct_data.fields.iter() { + let final_type_ident = final_type_ident(&field); + + if type_ident_is_signature(final_type_ident) { + break; + } else { + let ident = field + .ident + .as_ref() + .expect("ssz_derive only supports named_struct fields."); + field_idents.push(ident); + } + } + + let output = quote! { + impl ssz::SignedRoot for #name { + fn signed_root(&self) -> Vec { + let mut list: Vec> = Vec::new(); + #( + list.push(self.#field_idents.hash_tree_root()); + )* + + ssz::merkle_hash(&mut list) + } + } + }; + output.into() +} +*/ From d6664cb4ac3353020c3f30b821c0686461da40f7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 2 Apr 2019 13:41:04 +1100 Subject: [PATCH 05/21] Implement all methods on `Store` --- beacon_node/db2/src/lib.rs | 47 +++++++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/beacon_node/db2/src/lib.rs b/beacon_node/db2/src/lib.rs index 0704a84f5..55f419978 100644 --- a/beacon_node/db2/src/lib.rs +++ b/beacon_node/db2/src/lib.rs @@ -83,8 +83,6 @@ where T: ClientDB, { /// Put `item` in the store as `key`. - /// - /// The `item` must implement `DBRecord` which defines the db column used. fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> where I: DBRecord, @@ -96,9 +94,7 @@ where self.db.put(column, key, &val).map_err(|e| e.into()) } - /// Retrieves an `Ok(Some(item)` from the store if `key` exists, otherwise returns `Ok(None)`. - /// - /// The `item` must implement `DBRecord` which defines the db column used. + /// Retrieves an `Ok(Some(item))` from the store if `key` exists, otherwise returns `Ok(None)`. fn get(&self, key: &Hash256) -> Result, Error> where I: DBRecord, @@ -114,6 +110,28 @@ where None => Ok(None), } } + + /// Returns `Ok(true)` `key` exists in the store. + fn exists(&self, key: &Hash256) -> Result + where + I: DBRecord, + { + let column = I::db_column().into(); + let key = key.as_bytes(); + + self.db.exists(column, key).map_err(|e| e.into()) + } + + /// Returns `Ok(())` if `key` was deleted from the database or did not exist. + fn delete(&self, key: &Hash256) -> Result<(), Error> + where + I: DBRecord, + { + let column = I::db_column().into(); + let key = key.as_bytes(); + + self.db.delete(column, key).map_err(|e| e.into()) + } } #[cfg(test)] @@ -136,7 +154,7 @@ mod tests { } #[test] - fn memorydb_can_store() { + fn memorydb_can_store_and_retrieve() { let store = Store::new_in_memory(); let key = Hash256::random(); @@ -148,4 +166,21 @@ mod tests { assert_eq!(item, retrieved); } + + #[test] + fn exists() { + let store = Store::new_in_memory(); + let key = Hash256::random(); + let item = StorableThing { a: 1, b: 42 }; + + assert_eq!(store.exists::(&key).unwrap(), false); + + store.put(&key, &item).unwrap(); + + assert_eq!(store.exists::(&key).unwrap(), true); + + store.delete::(&key).unwrap(); + + assert_eq!(store.exists::(&key).unwrap(), false); + } } From 85266f8db0f41c2790c42e6f6565137b33f1580c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 1 May 2019 11:42:18 +1000 Subject: [PATCH 06/21] Trim db2 down to basic new parts --- beacon_node/db2/Cargo.toml | 1 + beacon_node/db2/src/disk_db.rs | 6 +- beacon_node/db2/src/errors.rs | 30 +++ beacon_node/db2/src/lib.rs | 133 ++++------ beacon_node/db2/src/memory_db.rs | 231 ++-------------- .../db2/src/stores/beacon_block_store.rs | 246 ------------------ .../db2/src/stores/beacon_state_store.rs | 62 ----- beacon_node/db2/src/stores/macros.rs | 103 -------- beacon_node/db2/src/stores/mod.rs | 25 -- beacon_node/db2/src/stores/pow_chain_store.rs | 68 ----- beacon_node/db2/src/stores/validator_store.rs | 215 --------------- beacon_node/db2/src/traits.rs | 38 --- 12 files changed, 113 insertions(+), 1045 deletions(-) create mode 100644 beacon_node/db2/src/errors.rs delete mode 100644 beacon_node/db2/src/stores/beacon_block_store.rs delete mode 100644 beacon_node/db2/src/stores/beacon_state_store.rs delete mode 100644 beacon_node/db2/src/stores/macros.rs delete mode 100644 beacon_node/db2/src/stores/mod.rs delete mode 100644 beacon_node/db2/src/stores/pow_chain_store.rs delete mode 100644 beacon_node/db2/src/stores/validator_store.rs delete mode 100644 beacon_node/db2/src/traits.rs diff --git a/beacon_node/db2/Cargo.toml b/beacon_node/db2/Cargo.toml index 8a5dbad5e..95e87c9ea 100644 --- a/beacon_node/db2/Cargo.toml +++ b/beacon_node/db2/Cargo.toml @@ -10,6 +10,7 @@ bls = { path = "../../eth2/utils/bls" } bytes = "0.4.10" db_encode = { path = "../db_encode" } db_encode_derive = { path = "../db_encode_derive" } +parking_lot = "0.7" rocksdb = "0.10.1" ssz = { path = "../../eth2/utils/ssz" } ssz_derive = { path = "../../eth2/utils/ssz_derive" } diff --git a/beacon_node/db2/src/disk_db.rs b/beacon_node/db2/src/disk_db.rs index f05320f7f..e2162e29a 100644 --- a/beacon_node/db2/src/disk_db.rs +++ b/beacon_node/db2/src/disk_db.rs @@ -1,9 +1,9 @@ extern crate rocksdb; -use super::rocksdb::Error as RocksError; -use super::rocksdb::{Options, DB}; -use super::stores::COLUMNS; +// use super::stores::COLUMNS; use super::{ClientDB, DBError, DBValue}; +use rocksdb::Error as RocksError; +use rocksdb::{Options, DB}; use std::fs; use std::path::Path; diff --git a/beacon_node/db2/src/errors.rs b/beacon_node/db2/src/errors.rs new file mode 100644 index 000000000..815b35a8e --- /dev/null +++ b/beacon_node/db2/src/errors.rs @@ -0,0 +1,30 @@ +use ssz::DecodeError; + +#[derive(Debug, PartialEq)] +pub enum Error { + SszDecodeError(DecodeError), + DBError { message: String }, +} + +impl From for Error { + fn from(e: DecodeError) -> Error { + Error::SszDecodeError(e) + } +} + +impl From for Error { + fn from(e: DBError) -> Error { + Error::DBError { message: e.message } + } +} + +#[derive(Debug)] +pub struct DBError { + pub message: String, +} + +impl DBError { + pub fn new(message: String) -> Self { + Self { message } + } +} diff --git a/beacon_node/db2/src/lib.rs b/beacon_node/db2/src/lib.rs index 55f419978..1ed9d5984 100644 --- a/beacon_node/db2/src/lib.rs +++ b/beacon_node/db2/src/lib.rs @@ -1,38 +1,42 @@ -extern crate blake2_rfc as blake2; -extern crate bls; -extern crate rocksdb; - -mod disk_db; +// mod disk_db; +mod errors; mod memory_db; -pub mod stores; -mod traits; -use self::stores::COLUMNS; use db_encode::{db_encode, DBDecode, DBEncode}; -use ssz::DecodeError; -use std::sync::Arc; -pub use self::disk_db::DiskDB; pub use self::memory_db::MemoryDB; -pub use self::traits::{ClientDB, DBError, DBValue}; +pub use errors::Error; pub use types::*; +pub type DBValue = Vec; -#[derive(Debug, PartialEq)] -pub enum Error { - SszDecodeError(DecodeError), - DBError { message: String }, +pub trait StoreDB: Sync + Send + Sized { + fn put(&self, key: &Hash256, item: &impl DBRecord) -> Result<(), Error> { + item.db_put(self, key) + } + + fn get(&self, key: &Hash256) -> Result, Error> { + I::db_get(self, key) + } + + fn exists(&self, key: &Hash256) -> Result { + I::db_exists(self, key) + } + + fn delete(&self, key: &Hash256) -> Result<(), Error> { + I::db_delete(self, key) + } + + fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error>; + + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>; + + fn key_exists(&self, col: &str, key: &[u8]) -> Result; + + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; } -impl From for Error { - fn from(e: DecodeError) -> Error { - Error::SszDecodeError(e) - } -} - -impl From for Error { - fn from(e: DBError) -> Error { - Error::DBError { message: e.message } - } +pub trait DBStore { + fn db_column(&self) -> DBColumn; } /// Currently available database options @@ -61,76 +65,41 @@ impl<'a> Into<&'a str> for DBColumn { pub trait DBRecord: DBEncode + DBDecode { fn db_column() -> DBColumn; -} -pub struct Store -where - T: ClientDB, -{ - db: Arc, -} - -impl Store { - fn new_in_memory() -> Self { - Self { - db: Arc::new(MemoryDB::open()), - } - } -} - -impl Store -where - T: ClientDB, -{ - /// Put `item` in the store as `key`. - fn put(&self, key: &Hash256, item: &I) -> Result<(), Error> - where - I: DBRecord, - { - let column = I::db_column().into(); - let key = key.as_bytes(); - let val = db_encode(item); - - self.db.put(column, key, &val).map_err(|e| e.into()) - } - - /// Retrieves an `Ok(Some(item))` from the store if `key` exists, otherwise returns `Ok(None)`. - fn get(&self, key: &Hash256) -> Result, Error> - where - I: DBRecord, - { - let column = I::db_column().into(); + fn db_put(&self, store: &impl StoreDB, key: &Hash256) -> Result<(), Error> { + let column = Self::db_column().into(); let key = key.as_bytes(); - match self.db.get(column, key)? { + store + .put_bytes(column, key, &db_encode(self)) + .map_err(|e| e.into()) + } + + fn db_get(store: &impl StoreDB, key: &Hash256) -> Result, Error> { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + match store.get_bytes(column, key)? { Some(bytes) => { - let (item, _index) = I::db_decode(&bytes, 0)?; + let (item, _index) = Self::db_decode(&bytes, 0)?; Ok(Some(item)) } None => Ok(None), } } - /// Returns `Ok(true)` `key` exists in the store. - fn exists(&self, key: &Hash256) -> Result - where - I: DBRecord, - { - let column = I::db_column().into(); + fn db_exists(store: &impl StoreDB, key: &Hash256) -> Result { + let column = Self::db_column().into(); let key = key.as_bytes(); - self.db.exists(column, key).map_err(|e| e.into()) + store.key_exists(column, key) } - /// Returns `Ok(())` if `key` was deleted from the database or did not exist. - fn delete(&self, key: &Hash256) -> Result<(), Error> - where - I: DBRecord, - { - let column = I::db_column().into(); + fn db_delete(store: &impl StoreDB, key: &Hash256) -> Result<(), Error> { + let column = Self::db_column().into(); let key = key.as_bytes(); - self.db.delete(column, key).map_err(|e| e.into()) + store.key_delete(column, key) } } @@ -155,7 +124,7 @@ mod tests { #[test] fn memorydb_can_store_and_retrieve() { - let store = Store::new_in_memory(); + let store = MemoryDB::open(); let key = Hash256::random(); let item = StorableThing { a: 1, b: 42 }; @@ -169,7 +138,7 @@ mod tests { #[test] fn exists() { - let store = Store::new_in_memory(); + let store = MemoryDB::open(); let key = Hash256::random(); let item = StorableThing { a: 1, b: 42 }; diff --git a/beacon_node/db2/src/memory_db.rs b/beacon_node/db2/src/memory_db.rs index 008e5912f..bc736e525 100644 --- a/beacon_node/db2/src/memory_db.rs +++ b/beacon_node/db2/src/memory_db.rs @@ -1,236 +1,61 @@ -use super::blake2::blake2b::blake2b; -use super::COLUMNS; -use super::{ClientDB, DBError, DBValue}; -use std::collections::{HashMap, HashSet}; -use std::sync::RwLock; +use super::{DBValue, Error, StoreDB}; +use parking_lot::RwLock; +use std::collections::HashMap; type DBHashMap = HashMap, Vec>; -type ColumnHashSet = HashSet; -/// An in-memory database implementing the ClientDB trait. -/// -/// It is not particularily optimized, it exists for ease and speed of testing. It's not expected -/// this DB would be used outside of tests. pub struct MemoryDB { db: RwLock, - known_columns: RwLock, } impl MemoryDB { - /// Open the in-memory database. - /// - /// All columns must be supplied initially, you will get an error if you try to access a column - /// that was not declared here. This condition is enforced artificially to simulate RocksDB. pub fn open() -> Self { - let db: DBHashMap = HashMap::new(); - let mut known_columns: ColumnHashSet = HashSet::new(); - for col in &COLUMNS { - known_columns.insert(col.to_string()); - } Self { - db: RwLock::new(db), - known_columns: RwLock::new(known_columns), + db: RwLock::new(HashMap::new()), } } - /// Hashes a key and a column name in order to get a unique key for the supplied column. fn get_key_for_col(col: &str, key: &[u8]) -> Vec { - blake2b(32, col.as_bytes(), key).as_bytes().to_vec() + let mut col = col.as_bytes().to_vec(); + col.append(&mut key.to_vec()); + col } } -impl ClientDB for MemoryDB { +impl StoreDB for MemoryDB { /// Get the value of some key from the database. Returns `None` if the key does not exist. - fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { - // Panic if the DB locks are poisoned. - let db = self.db.read().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - Ok(db.get(&column_key).and_then(|val| Some(val.clone()))) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } + Ok(self + .db + .read() + .get(&column_key) + .and_then(|val| Some(val.clone()))) } /// Puts a key in the database. - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { - // Panic if the DB locks are poisoned. - let mut db = self.db.write().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - db.insert(column_key, val.to_vec()); - Ok(()) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } + self.db.write().insert(column_key, val.to_vec()); + + Ok(()) } /// Return true if some key exists in some column. - fn exists(&self, col: &str, key: &[u8]) -> Result { - // Panic if the DB locks are poisoned. - let db = self.db.read().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn key_exists(&self, col: &str, key: &[u8]) -> Result { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - Ok(db.contains_key(&column_key)) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } + Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { - // Panic if the DB locks are poisoned. - let mut db = self.db.write().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - db.remove(&column_key); - Ok(()) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } - } -} - -#[cfg(test)] -mod tests { - use super::super::stores::{BLOCKS_DB_COLUMN, VALIDATOR_DB_COLUMN}; - use super::super::ClientDB; - use super::*; - use std::sync::Arc; - use std::thread; - - #[test] - fn test_memorydb_can_delete() { - let col_a: &str = BLOCKS_DB_COLUMN; - - let db = MemoryDB::open(); - - db.put(col_a, "dogs".as_bytes(), "lol".as_bytes()).unwrap(); - - assert_eq!( - db.get(col_a, "dogs".as_bytes()).unwrap().unwrap(), - "lol".as_bytes() - ); - - db.delete(col_a, "dogs".as_bytes()).unwrap(); - - assert_eq!(db.get(col_a, "dogs".as_bytes()).unwrap(), None); - } - - #[test] - fn test_memorydb_column_access() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_b: &str = VALIDATOR_DB_COLUMN; - - let db = MemoryDB::open(); - - /* - * Testing that if we write to the same key in different columns that - * there is not an overlap. - */ - db.put(col_a, "same".as_bytes(), "cat".as_bytes()).unwrap(); - db.put(col_b, "same".as_bytes(), "dog".as_bytes()).unwrap(); - - assert_eq!( - db.get(col_a, "same".as_bytes()).unwrap().unwrap(), - "cat".as_bytes() - ); - assert_eq!( - db.get(col_b, "same".as_bytes()).unwrap().unwrap(), - "dog".as_bytes() - ); - } - - #[test] - fn test_memorydb_unknown_column_access() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_x: &str = "ColumnX"; - - let db = MemoryDB::open(); - - /* - * Test that we get errors when using undeclared columns - */ - assert!(db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).is_ok()); - assert!(db.put(col_x, "cats".as_bytes(), "lol".as_bytes()).is_err()); - - assert!(db.get(col_a, "cats".as_bytes()).is_ok()); - assert!(db.get(col_x, "cats".as_bytes()).is_err()); - } - - #[test] - fn test_memorydb_exists() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_b: &str = VALIDATOR_DB_COLUMN; - - let db = MemoryDB::open(); - - /* - * Testing that if we write to the same key in different columns that - * there is not an overlap. - */ - db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).unwrap(); - - assert_eq!(true, db.exists(col_a, "cats".as_bytes()).unwrap()); - assert_eq!(false, db.exists(col_b, "cats".as_bytes()).unwrap()); - - assert_eq!(false, db.exists(col_a, "dogs".as_bytes()).unwrap()); - assert_eq!(false, db.exists(col_b, "dogs".as_bytes()).unwrap()); - } - - #[test] - fn test_memorydb_threading() { - let col_name: &str = BLOCKS_DB_COLUMN; - - let db = Arc::new(MemoryDB::open()); - - let thread_count = 10; - let write_count = 10; - - // We're execting the product of these numbers to fit in one byte. - assert!(thread_count * write_count <= 255); - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let db = db.clone(); - let col = col_name.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = (t * w) as u8; - let val = 42; - db.put(&col, &vec![key], &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = (t * w) as u8; - let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } + self.db.write().remove(&column_key); + + Ok(()) } } diff --git a/beacon_node/db2/src/stores/beacon_block_store.rs b/beacon_node/db2/src/stores/beacon_block_store.rs deleted file mode 100644 index e2e16e60b..000000000 --- a/beacon_node/db2/src/stores/beacon_block_store.rs +++ /dev/null @@ -1,246 +0,0 @@ -use super::BLOCKS_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use ssz::Decodable; -use std::sync::Arc; -use types::{BeaconBlock, Hash256, Slot}; - -#[derive(Clone, Debug, PartialEq)] -pub enum BeaconBlockAtSlotError { - UnknownBeaconBlock(Hash256), - InvalidBeaconBlock(Hash256), - DBError(String), -} - -pub struct BeaconBlockStore -where - T: ClientDB, -{ - db: Arc, -} - -// Implements `put`, `get`, `exists` and `delete` for the store. -impl_crud_for_store!(BeaconBlockStore, DB_COLUMN); - -impl BeaconBlockStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError { - message: "Bad BeaconBlock SSZ.".to_string(), - })?; - Ok(Some(block)) - } - } - } - - /// Retrieve the block at a slot given a "head_hash" and a slot. - /// - /// A "head_hash" must be a block hash with a slot number greater than or equal to the desired - /// slot. - /// - /// This function will read each block down the chain until it finds a block with the given - /// slot number. If the slot is skipped, the function will return None. - /// - /// If a block is found, a tuple of (block_hash, serialized_block) is returned. - /// - /// Note: this function uses a loop instead of recursion as the compiler is over-strict when it - /// comes to recursion and the `impl Trait` pattern. See: - /// https://stackoverflow.com/questions/54032940/using-impl-trait-in-a-recursive-function - pub fn block_at_slot( - &self, - head_hash: &Hash256, - slot: Slot, - ) -> Result, BeaconBlockAtSlotError> { - let mut current_hash = *head_hash; - - loop { - if let Some(block) = self.get_deserialized(¤t_hash)? { - if block.slot == slot { - break Ok(Some((current_hash, block))); - } else if block.slot < slot { - break Ok(None); - } else { - current_hash = block.previous_block_root; - } - } else { - break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); - } - } - } -} - -impl From for BeaconBlockAtSlotError { - fn from(e: DBError) -> Self { - BeaconBlockAtSlotError::DBError(e.message) - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - - use std::sync::Arc; - use std::thread; - - use ssz::ssz_encode; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::BeaconBlock; - use types::Hash256; - - test_crud_for_store!(BeaconBlockStore, DB_COLUMN); - - #[test] - fn head_hash_slot_too_low() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - let mut rng = XorShiftRng::from_seed([42; 16]); - - let mut block = BeaconBlock::random_for_test(&mut rng); - block.slot = Slot::from(10_u64); - - let block_root = block.canonical_root(); - bs.put(&block_root, &ssz_encode(&block)).unwrap(); - - let result = bs.block_at_slot(&block_root, Slot::from(11_u64)).unwrap(); - assert_eq!(result, None); - } - - #[test] - fn test_invalid_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconBlockStore::new(db.clone()); - - let ssz = "definitly not a valid block".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!( - store.block_at_slot(hash, Slot::from(42_u64)), - Err(BeaconBlockAtSlotError::DBError( - "Bad BeaconBlock SSZ.".into() - )) - ); - } - - #[test] - fn test_unknown_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconBlockStore::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!( - store.block_at_slot(other_hash, Slot::from(42_u64)), - Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash)) - ); - } - - #[test] - fn test_block_store_on_memory_db() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - - let thread_count = 10; - let write_count = 10; - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let bs = bs.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = t * w; - let val = 42; - bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = t * w; - assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap()); - let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } - } - - #[test] - #[ignore] - fn test_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - let mut rng = XorShiftRng::from_seed([42; 16]); - - // Specify test block parameters. - let hashes = [ - Hash256::from([0; 32]), - Hash256::from([1; 32]), - Hash256::from([2; 32]), - Hash256::from([3; 32]), - Hash256::from([4; 32]), - ]; - let parent_hashes = [ - Hash256::from([255; 32]), // Genesis block. - Hash256::from([0; 32]), - Hash256::from([1; 32]), - Hash256::from([2; 32]), - Hash256::from([3; 32]), - ]; - let unknown_hash = Hash256::from([101; 32]); // different from all above - let slots: Vec = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect(); - - // Generate a vec of random blocks and store them in the DB. - let block_count = 5; - let mut blocks: Vec = Vec::with_capacity(5); - for i in 0..block_count { - let mut block = BeaconBlock::random_for_test(&mut rng); - - block.previous_block_root = parent_hashes[i]; - block.slot = slots[i]; - - let ssz = ssz_encode(&block); - db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap(); - - blocks.push(block); - } - - // Test that certain slots can be reached from certain hashes. - let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)]; - for (hashes_index, slot_index) in test_cases { - let (matched_block_hash, block) = bs - .block_at_slot(&hashes[hashes_index], slots[slot_index]) - .unwrap() - .unwrap(); - assert_eq!(matched_block_hash, hashes[slot_index]); - assert_eq!(block.slot, slots[slot_index]); - } - - let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap(); - assert_eq!(ssz, None); - - let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap(); - assert_eq!(ssz, None); - - let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2)); - assert_eq!( - ssz, - Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash)) - ); - } -} diff --git a/beacon_node/db2/src/stores/beacon_state_store.rs b/beacon_node/db2/src/stores/beacon_state_store.rs deleted file mode 100644 index fd6ff569a..000000000 --- a/beacon_node/db2/src/stores/beacon_state_store.rs +++ /dev/null @@ -1,62 +0,0 @@ -use super::STATES_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use ssz::Decodable; -use std::sync::Arc; -use types::{BeaconState, Hash256}; - -pub struct BeaconStateStore -where - T: ClientDB, -{ - db: Arc, -} - -// Implements `put`, `get`, `exists` and `delete` for the store. -impl_crud_for_store!(BeaconStateStore, DB_COLUMN); - -impl BeaconStateStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError { - message: "Bad State SSZ.".to_string(), - })?; - Ok(Some(state)) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - - use ssz::ssz_encode; - use std::sync::Arc; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::Hash256; - - test_crud_for_store!(BeaconStateStore, DB_COLUMN); - - #[test] - fn test_reader() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconStateStore::new(db.clone()); - - let mut rng = XorShiftRng::from_seed([42; 16]); - let state = BeaconState::random_for_test(&mut rng); - let state_root = state.canonical_root(); - - store.put(&state_root, &ssz_encode(&state)).unwrap(); - - let decoded = store.get_deserialized(&state_root).unwrap().unwrap(); - - assert_eq!(state, decoded); - } -} diff --git a/beacon_node/db2/src/stores/macros.rs b/beacon_node/db2/src/stores/macros.rs deleted file mode 100644 index 6c53e40ee..000000000 --- a/beacon_node/db2/src/stores/macros.rs +++ /dev/null @@ -1,103 +0,0 @@ -macro_rules! impl_crud_for_store { - ($store: ident, $db_column: expr) => { - impl $store { - pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> { - self.db.put($db_column, hash.as_bytes(), ssz) - } - - pub fn get(&self, hash: &Hash256) -> Result>, DBError> { - self.db.get($db_column, hash.as_bytes()) - } - - pub fn exists(&self, hash: &Hash256) -> Result { - self.db.exists($db_column, hash.as_bytes()) - } - - pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> { - self.db.delete($db_column, hash.as_bytes()) - } - } - }; -} - -#[cfg(test)] -macro_rules! test_crud_for_store { - ($store: ident, $db_column: expr) => { - #[test] - fn test_put() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - store.put(hash, ssz).unwrap(); - assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz); - } - - #[test] - fn test_get() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!(store.get(hash).unwrap().unwrap(), ssz); - } - - #[test] - fn test_get_unknown() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap(); - assert_eq!(store.get(hash).unwrap(), None); - } - - #[test] - fn test_exists() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(store.exists(hash).unwrap()); - } - - #[test] - fn test_block_does_not_exist() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(!store.exists(other_hash).unwrap()); - } - - #[test] - fn test_delete() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); - - store.delete(hash).unwrap(); - assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); - } - }; -} diff --git a/beacon_node/db2/src/stores/mod.rs b/beacon_node/db2/src/stores/mod.rs deleted file mode 100644 index 44de7eed1..000000000 --- a/beacon_node/db2/src/stores/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::{ClientDB, DBError}; - -#[macro_use] -mod macros; -mod beacon_block_store; -mod beacon_state_store; -mod pow_chain_store; -mod validator_store; - -pub use self::beacon_block_store::{BeaconBlockAtSlotError, BeaconBlockStore}; -pub use self::beacon_state_store::BeaconStateStore; -pub use self::pow_chain_store::PoWChainStore; -pub use self::validator_store::{ValidatorStore, ValidatorStoreError}; - -pub const BLOCKS_DB_COLUMN: &str = "blocks"; -pub const STATES_DB_COLUMN: &str = "states"; -pub const POW_CHAIN_DB_COLUMN: &str = "powchain"; -pub const VALIDATOR_DB_COLUMN: &str = "validator"; - -pub const COLUMNS: [&str; 4] = [ - BLOCKS_DB_COLUMN, - STATES_DB_COLUMN, - POW_CHAIN_DB_COLUMN, - VALIDATOR_DB_COLUMN, -]; diff --git a/beacon_node/db2/src/stores/pow_chain_store.rs b/beacon_node/db2/src/stores/pow_chain_store.rs deleted file mode 100644 index 5c8b97907..000000000 --- a/beacon_node/db2/src/stores/pow_chain_store.rs +++ /dev/null @@ -1,68 +0,0 @@ -use super::POW_CHAIN_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use std::sync::Arc; - -pub struct PoWChainStore -where - T: ClientDB, -{ - db: Arc, -} - -impl PoWChainStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> { - self.db.put(DB_COLUMN, hash, &[0]) - } - - pub fn block_hash_exists(&self, hash: &[u8]) -> Result { - self.db.exists(DB_COLUMN, hash) - } -} - -#[cfg(test)] -mod tests { - extern crate types; - - use super::super::super::MemoryDB; - use super::*; - - use self::types::Hash256; - - #[test] - fn test_put_block_hash() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - store.put_block_hash(hash).unwrap(); - - assert!(db.exists(DB_COLUMN, hash).unwrap()); - } - - #[test] - fn test_block_hash_exists() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - db.put(DB_COLUMN, hash, &[0]).unwrap(); - - assert!(store.block_hash_exists(hash).unwrap()); - } - - #[test] - fn test_block_hash_does_not_exist() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec(); - db.put(DB_COLUMN, hash, &[0]).unwrap(); - - assert!(!store.block_hash_exists(other_hash).unwrap()); - } -} diff --git a/beacon_node/db2/src/stores/validator_store.rs b/beacon_node/db2/src/stores/validator_store.rs deleted file mode 100644 index 02e90dc5c..000000000 --- a/beacon_node/db2/src/stores/validator_store.rs +++ /dev/null @@ -1,215 +0,0 @@ -extern crate bytes; - -use self::bytes::{BufMut, BytesMut}; -use super::VALIDATOR_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use bls::PublicKey; -use ssz::{ssz_encode, Decodable}; -use std::sync::Arc; - -#[derive(Debug, PartialEq)] -pub enum ValidatorStoreError { - DBError(String), - DecodeError, -} - -impl From for ValidatorStoreError { - fn from(error: DBError) -> Self { - ValidatorStoreError::DBError(error.message) - } -} - -#[derive(Debug, PartialEq)] -enum KeyPrefixes { - PublicKey, -} - -pub struct ValidatorStore -where - T: ClientDB, -{ - db: Arc, -} - -impl ValidatorStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - fn prefix_bytes(&self, key_prefix: &KeyPrefixes) -> Vec { - match key_prefix { - KeyPrefixes::PublicKey => b"pubkey".to_vec(), - } - } - - fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) -> Vec { - let mut buf = BytesMut::with_capacity(6 + 8); - buf.put(self.prefix_bytes(key_prefix)); - buf.put_u64_be(index as u64); - buf.take().to_vec() - } - - pub fn put_public_key_by_index( - &self, - index: usize, - public_key: &PublicKey, - ) -> Result<(), ValidatorStoreError> { - let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); - let val = ssz_encode(public_key); - self.db - .put(DB_COLUMN, &key[..], &val[..]) - .map_err(ValidatorStoreError::from) - } - - pub fn get_public_key_by_index( - &self, - index: usize, - ) -> Result, ValidatorStoreError> { - let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); - let val = self.db.get(DB_COLUMN, &key[..])?; - match val { - None => Ok(None), - Some(val) => match PublicKey::ssz_decode(&val, 0) { - Ok((key, _)) => Ok(Some(key)), - Err(_) => Err(ValidatorStoreError::DecodeError), - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - use bls::Keypair; - - #[test] - fn test_prefix_bytes() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - assert_eq!( - store.prefix_bytes(&KeyPrefixes::PublicKey), - b"pubkey".to_vec() - ); - } - - #[test] - fn test_get_db_key_for_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let mut buf = BytesMut::with_capacity(6 + 8); - buf.put(b"pubkey".to_vec()); - buf.put_u64_be(42); - assert_eq!( - store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42), - buf.take().to_vec() - ) - } - - #[test] - fn test_put_public_key_by_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let index = 3; - let public_key = Keypair::random().pk; - - store.put_public_key_by_index(index, &public_key).unwrap(); - let public_key_at_index = db - .get( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], - ) - .unwrap() - .unwrap(); - - assert_eq!(public_key_at_index, ssz_encode(&public_key)); - } - - #[test] - fn test_get_public_key_by_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let index = 4; - let public_key = Keypair::random().pk; - - db.put( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], - &ssz_encode(&public_key)[..], - ) - .unwrap(); - - let public_key_at_index = store.get_public_key_by_index(index).unwrap().unwrap(); - assert_eq!(public_key_at_index, public_key); - } - - #[test] - fn test_get_public_key_by_unknown_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let public_key = Keypair::random().pk; - - db.put( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, 3)[..], - &ssz_encode(&public_key)[..], - ) - .unwrap(); - - let public_key_at_index = store.get_public_key_by_index(4).unwrap(); - assert_eq!(public_key_at_index, None); - } - - #[test] - fn test_get_invalid_public_key() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42); - db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap(); - - assert_eq!( - store.get_public_key_by_index(42), - Err(ValidatorStoreError::DecodeError) - ); - } - - #[test] - fn test_validator_store_put_get() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db); - - let keys = vec![ - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - ]; - - for i in 0..keys.len() { - store.put_public_key_by_index(i, &keys[i].pk).unwrap(); - } - - /* - * Check all keys are retrieved correctly. - */ - for i in 0..keys.len() { - let retrieved = store.get_public_key_by_index(i).unwrap().unwrap(); - assert_eq!(retrieved, keys[i].pk); - } - - /* - * Check that an index that wasn't stored returns None. - */ - assert!(store - .get_public_key_by_index(keys.len() + 1) - .unwrap() - .is_none()); - } -} diff --git a/beacon_node/db2/src/traits.rs b/beacon_node/db2/src/traits.rs deleted file mode 100644 index 57ebf9353..000000000 --- a/beacon_node/db2/src/traits.rs +++ /dev/null @@ -1,38 +0,0 @@ -pub type DBValue = Vec; - -#[derive(Debug)] -pub struct DBError { - pub message: String, -} - -impl DBError { - pub fn new(message: String) -> Self { - Self { message } - } -} - -/// A generic database to be used by the "client' (i.e., -/// the lighthouse blockchain client). -/// -/// The purpose of having this generic trait is to allow the -/// program to use a persistent on-disk database during production, -/// but use a transient database during tests. -pub trait ClientDB: Sync + Send { - fn get(&self, col: &str, key: &[u8]) -> Result, DBError>; - - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError>; - - fn exists(&self, col: &str, key: &[u8]) -> Result; - - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError>; -} - -pub enum DBColumn { - Block, - State, - BeaconChain, -} - -pub trait DBStore { - fn db_column(&self) -> DBColumn; -} From 157d4900aa62ec2619cc6693a3c22ca2e6c54e8d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 1 May 2019 11:59:18 +1000 Subject: [PATCH 07/21] Rename DB traits --- beacon_node/db2/src/lib.rs | 33 +++++++++++--------------------- beacon_node/db2/src/memory_db.rs | 4 ++-- 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/beacon_node/db2/src/lib.rs b/beacon_node/db2/src/lib.rs index 1ed9d5984..da00262e6 100644 --- a/beacon_node/db2/src/lib.rs +++ b/beacon_node/db2/src/lib.rs @@ -9,20 +9,20 @@ pub use errors::Error; pub use types::*; pub type DBValue = Vec; -pub trait StoreDB: Sync + Send + Sized { - fn put(&self, key: &Hash256, item: &impl DBRecord) -> Result<(), Error> { +pub trait Store: Sync + Send + Sized { + fn put(&self, key: &Hash256, item: &impl StorableItem) -> Result<(), Error> { item.db_put(self, key) } - fn get(&self, key: &Hash256) -> Result, Error> { + fn get(&self, key: &Hash256) -> Result, Error> { I::db_get(self, key) } - fn exists(&self, key: &Hash256) -> Result { + fn exists(&self, key: &Hash256) -> Result { I::db_exists(self, key) } - fn delete(&self, key: &Hash256) -> Result<(), Error> { + fn delete(&self, key: &Hash256) -> Result<(), Error> { I::db_delete(self, key) } @@ -35,17 +35,6 @@ pub trait StoreDB: Sync + Send + Sized { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; } -pub trait DBStore { - fn db_column(&self) -> DBColumn; -} - -/// Currently available database options -#[derive(Debug, Clone)] -pub enum DBType { - Memory, - RocksDB, -} - pub enum DBColumn { Block, State, @@ -63,10 +52,10 @@ impl<'a> Into<&'a str> for DBColumn { } } -pub trait DBRecord: DBEncode + DBDecode { +pub trait StorableItem: DBEncode + DBDecode { fn db_column() -> DBColumn; - fn db_put(&self, store: &impl StoreDB, key: &Hash256) -> Result<(), Error> { + fn db_put(&self, store: &impl Store, key: &Hash256) -> Result<(), Error> { let column = Self::db_column().into(); let key = key.as_bytes(); @@ -75,7 +64,7 @@ pub trait DBRecord: DBEncode + DBDecode { .map_err(|e| e.into()) } - fn db_get(store: &impl StoreDB, key: &Hash256) -> Result, Error> { + fn db_get(store: &impl Store, key: &Hash256) -> Result, Error> { let column = Self::db_column().into(); let key = key.as_bytes(); @@ -88,14 +77,14 @@ pub trait DBRecord: DBEncode + DBDecode { } } - fn db_exists(store: &impl StoreDB, key: &Hash256) -> Result { + fn db_exists(store: &impl Store, key: &Hash256) -> Result { let column = Self::db_column().into(); let key = key.as_bytes(); store.key_exists(column, key) } - fn db_delete(store: &impl StoreDB, key: &Hash256) -> Result<(), Error> { + fn db_delete(store: &impl Store, key: &Hash256) -> Result<(), Error> { let column = Self::db_column().into(); let key = key.as_bytes(); @@ -116,7 +105,7 @@ mod tests { b: u64, } - impl DBRecord for StorableThing { + impl StorableItem for StorableThing { fn db_column() -> DBColumn { DBColumn::Block } diff --git a/beacon_node/db2/src/memory_db.rs b/beacon_node/db2/src/memory_db.rs index bc736e525..83ff77ce1 100644 --- a/beacon_node/db2/src/memory_db.rs +++ b/beacon_node/db2/src/memory_db.rs @@ -1,4 +1,4 @@ -use super::{DBValue, Error, StoreDB}; +use super::{DBValue, Error, Store}; use parking_lot::RwLock; use std::collections::HashMap; @@ -22,7 +22,7 @@ impl MemoryDB { } } -impl StoreDB for MemoryDB { +impl Store for MemoryDB { /// Get the value of some key from the database. Returns `None` if the key does not exist. fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { let column_key = MemoryDB::get_key_for_col(col, key); From cf8a24c2bdb3a32988359b98044c045bb0a73fc6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 1 May 2019 14:29:03 +1000 Subject: [PATCH 08/21] Add enc/decode traits to store --- beacon_node/db2/src/impls.rs | 16 +++++++++++++++ beacon_node/db2/src/lib.rs | 38 +++++++++++++++++++++++++----------- 2 files changed, 43 insertions(+), 11 deletions(-) create mode 100644 beacon_node/db2/src/impls.rs diff --git a/beacon_node/db2/src/impls.rs b/beacon_node/db2/src/impls.rs new file mode 100644 index 000000000..9e607ddf5 --- /dev/null +++ b/beacon_node/db2/src/impls.rs @@ -0,0 +1,16 @@ +/* +use types::*; + +impl StoreEncode for Hash256 { + fn as_store_bytes(&self) -> Vec { + self.as_bytes().to_vec() + } +} + +impl StoreDecode for Hash256 { + fn from_store_bytes(bytes: &mut [u8]) -> Vec { + Hash256::from_slice() + self.as_bytes().to_vec() + } +} +*/ diff --git a/beacon_node/db2/src/lib.rs b/beacon_node/db2/src/lib.rs index da00262e6..3bff89512 100644 --- a/beacon_node/db2/src/lib.rs +++ b/beacon_node/db2/src/lib.rs @@ -1,9 +1,8 @@ // mod disk_db; mod errors; +mod impls; mod memory_db; -use db_encode::{db_encode, DBDecode, DBEncode}; - pub use self::memory_db::MemoryDB; pub use errors::Error; pub use types::*; @@ -35,6 +34,14 @@ pub trait Store: Sync + Send + Sized { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; } +pub trait StoreEncode { + fn as_store_bytes(&self) -> Vec; +} + +pub trait StoreDecode: Sized { + fn from_store_bytes(bytes: &mut [u8]) -> Result; +} + pub enum DBColumn { Block, State, @@ -52,7 +59,7 @@ impl<'a> Into<&'a str> for DBColumn { } } -pub trait StorableItem: DBEncode + DBDecode { +pub trait StorableItem: StoreEncode + StoreDecode + Sized { fn db_column() -> DBColumn; fn db_put(&self, store: &impl Store, key: &Hash256) -> Result<(), Error> { @@ -60,7 +67,7 @@ pub trait StorableItem: DBEncode + DBDecode { let key = key.as_bytes(); store - .put_bytes(column, key, &db_encode(self)) + .put_bytes(column, key, &self.as_store_bytes()) .map_err(|e| e.into()) } @@ -69,10 +76,7 @@ pub trait StorableItem: DBEncode + DBDecode { let key = key.as_bytes(); match store.get_bytes(column, key)? { - Some(bytes) => { - let (item, _index) = Self::db_decode(&bytes, 0)?; - Ok(Some(item)) - } + Some(mut bytes) => Ok(Some(Self::from_store_bytes(&mut bytes[..])?)), None => Ok(None), } } @@ -95,16 +99,28 @@ pub trait StorableItem: DBEncode + DBDecode { #[cfg(test)] mod tests { use super::*; - use db_encode_derive::{DBDecode, DBEncode}; - use ssz::Decodable; + use ssz::{ssz_encode, Decodable}; use ssz_derive::{Decode, Encode}; - #[derive(PartialEq, Debug, Encode, Decode, DBEncode, DBDecode)] + #[derive(PartialEq, Debug, Encode, Decode)] struct StorableThing { a: u64, b: u64, } + impl StoreEncode for StorableThing { + fn as_store_bytes(&self) -> Vec { + ssz_encode(self) + } + } + + impl StoreDecode for StorableThing { + fn from_store_bytes(bytes: &mut [u8]) -> Result { + let (item, _) = Self::ssz_decode(bytes, 0)?; + Ok(item) + } + } + impl StorableItem for StorableThing { fn db_column() -> DBColumn { DBColumn::Block From 182135b832299d927e64c447efd3c30fbc9784a1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 20 May 2019 18:01:51 +1000 Subject: [PATCH 09/21] Remove old DB crates, start fixing fork_choice --- Cargo.toml | 3 - beacon_node/db/Cargo.toml | 6 + beacon_node/db/src/block_at_slot.rs | 46 +++ beacon_node/db/src/disk_db.rs | 8 +- beacon_node/{db2 => db}/src/errors.rs | 0 beacon_node/db/src/impls.rs | 30 ++ beacon_node/db/src/lib.rs | 171 +++++++++- beacon_node/db/src/memory_db.rs | 231 ++----------- .../db/src/stores/beacon_block_store.rs | 246 -------------- .../db/src/stores/beacon_state_store.rs | 65 ---- beacon_node/db/src/stores/macros.rs | 103 ------ beacon_node/db/src/stores/mod.rs | 25 -- beacon_node/db/src/stores/pow_chain_store.rs | 68 ---- beacon_node/db/src/stores/validator_store.rs | 215 ------------ beacon_node/db/src/traits.rs | 28 -- beacon_node/db2/Cargo.toml | 17 - beacon_node/db2/src/disk_db.rs | 199 ------------ beacon_node/db2/src/impls.rs | 16 - beacon_node/db2/src/lib.rs | 160 --------- beacon_node/db2/src/memory_db.rs | 61 ---- beacon_node/db_encode/Cargo.toml | 9 - beacon_node/db_encode/src/lib.rs | 59 ---- beacon_node/db_encode_derive/Cargo.toml | 13 - beacon_node/db_encode_derive/src/lib.rs | 305 ------------------ eth2/fork_choice/src/lib.rs | 17 +- eth2/fork_choice/src/slow_lmd_ghost.rs | 36 +-- 26 files changed, 291 insertions(+), 1846 deletions(-) create mode 100644 beacon_node/db/src/block_at_slot.rs rename beacon_node/{db2 => db}/src/errors.rs (100%) create mode 100644 beacon_node/db/src/impls.rs delete mode 100644 beacon_node/db/src/stores/beacon_block_store.rs delete mode 100644 beacon_node/db/src/stores/beacon_state_store.rs delete mode 100644 beacon_node/db/src/stores/macros.rs delete mode 100644 beacon_node/db/src/stores/mod.rs delete mode 100644 beacon_node/db/src/stores/pow_chain_store.rs delete mode 100644 beacon_node/db/src/stores/validator_store.rs delete mode 100644 beacon_node/db/src/traits.rs delete mode 100644 beacon_node/db2/Cargo.toml delete mode 100644 beacon_node/db2/src/disk_db.rs delete mode 100644 beacon_node/db2/src/impls.rs delete mode 100644 beacon_node/db2/src/lib.rs delete mode 100644 beacon_node/db2/src/memory_db.rs delete mode 100644 beacon_node/db_encode/Cargo.toml delete mode 100644 beacon_node/db_encode/src/lib.rs delete mode 100644 beacon_node/db_encode_derive/Cargo.toml delete mode 100644 beacon_node/db_encode_derive/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 3c657bd4b..893189941 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,9 +23,6 @@ members = [ "eth2/utils/test_random_derive", "beacon_node", "beacon_node/db", - "beacon_node/db2", - "beacon_node/db_encode", - "beacon_node/db_encode_derive", "beacon_node/client", "beacon_node/network", "beacon_node/eth2-libp2p", diff --git a/beacon_node/db/Cargo.toml b/beacon_node/db/Cargo.toml index ffb3585b9..bb2f659f8 100644 --- a/beacon_node/db/Cargo.toml +++ b/beacon_node/db/Cargo.toml @@ -8,4 +8,10 @@ edition = "2018" blake2-rfc = "0.2.18" bls = { path = "../../eth2/utils/bls" } bytes = "0.4.10" +db_encode = { path = "../db_encode" } +db_encode_derive = { path = "../db_encode_derive" } +parking_lot = "0.7" rocksdb = "0.10.1" +ssz = { path = "../../eth2/utils/ssz" } +ssz_derive = { path = "../../eth2/utils/ssz_derive" } +types = { path = "../../eth2/types" } diff --git a/beacon_node/db/src/block_at_slot.rs b/beacon_node/db/src/block_at_slot.rs new file mode 100644 index 000000000..c18c8998c --- /dev/null +++ b/beacon_node/db/src/block_at_slot.rs @@ -0,0 +1,46 @@ +use super::*; +use ssz::{Decode, DecodeError}; + +fn get_block_bytes(store: &T, root: Hash256) -> Result>, Error> { + store.get_bytes(BeaconBlock::db_column().into(), &root[..]) +} + +fn read_slot_from_block_bytes(bytes: &[u8]) -> Result { + let end = std::cmp::min(Slot::ssz_fixed_len(), bytes.len()); + + Slot::from_ssz_bytes(&bytes[0..end]) +} + +fn read_previous_block_root_from_block_bytes(bytes: &[u8]) -> Result { + let previous_bytes = Slot::ssz_fixed_len(); + let slice = bytes + .get(previous_bytes..previous_bytes + Hash256::ssz_fixed_len()) + .ok_or_else(|| DecodeError::BytesInvalid("Not enough bytes.".to_string()))?; + + Hash256::from_ssz_bytes(slice) +} + +pub fn get_block_at_preceeding_slot( + store: &T, + slot: Slot, + start_root: Hash256, +) -> Result, Error> { + let mut root = start_root; + + loop { + if let Some(bytes) = get_block_bytes(store, root)? { + let this_slot = read_slot_from_block_bytes(&bytes)?; + + if this_slot == slot { + let block = BeaconBlock::from_ssz_bytes(&bytes)?; + break Ok(Some((root, block))); + } else if this_slot < slot { + break Ok(None); + } else { + root = read_previous_block_root_from_block_bytes(&bytes)?; + } + } else { + break Ok(None); + } + } +} diff --git a/beacon_node/db/src/disk_db.rs b/beacon_node/db/src/disk_db.rs index 087941951..e2162e29a 100644 --- a/beacon_node/db/src/disk_db.rs +++ b/beacon_node/db/src/disk_db.rs @@ -1,9 +1,9 @@ extern crate rocksdb; -use super::rocksdb::Error as RocksError; -use super::rocksdb::{Options, DB}; -use super::stores::COLUMNS; +// use super::stores::COLUMNS; use super::{ClientDB, DBError, DBValue}; +use rocksdb::Error as RocksError; +use rocksdb::{Options, DB}; use std::fs; use std::path::Path; @@ -99,7 +99,7 @@ impl ClientDB for DiskDB { None => Err(DBError { message: "Unknown column".to_string(), }), - Some(handle) => self.db.put_cf(handle, key, val).map_err(Into::into), + Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()), } } diff --git a/beacon_node/db2/src/errors.rs b/beacon_node/db/src/errors.rs similarity index 100% rename from beacon_node/db2/src/errors.rs rename to beacon_node/db/src/errors.rs diff --git a/beacon_node/db/src/impls.rs b/beacon_node/db/src/impls.rs new file mode 100644 index 000000000..91f8d52de --- /dev/null +++ b/beacon_node/db/src/impls.rs @@ -0,0 +1,30 @@ +use crate::*; +use ssz::{Decode, Encode}; + +impl StoreItem for BeaconBlock { + fn db_column() -> DBColumn { + DBColumn::BeaconBlock + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } +} + +impl StoreItem for BeaconState { + fn db_column() -> DBColumn { + DBColumn::BeaconState + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } +} diff --git a/beacon_node/db/src/lib.rs b/beacon_node/db/src/lib.rs index 5e710ae9a..8ac28092a 100644 --- a/beacon_node/db/src/lib.rs +++ b/beacon_node/db/src/lib.rs @@ -1,21 +1,160 @@ -extern crate blake2_rfc as blake2; -extern crate bls; -extern crate rocksdb; - -mod disk_db; +// mod disk_db; +mod block_at_slot; +mod errors; +mod impls; mod memory_db; -pub mod stores; -mod traits; -use self::stores::COLUMNS; - -pub use self::disk_db::DiskDB; pub use self::memory_db::MemoryDB; -pub use self::traits::{ClientDB, DBError, DBValue}; +pub use errors::Error; +pub use types::*; +pub type DBValue = Vec; -/// Currently available database options -#[derive(Debug, Clone)] -pub enum DBType { - Memory, - RocksDB, +pub trait Store: Sync + Send + Sized { + fn put(&self, key: &Hash256, item: &impl StoreItem) -> Result<(), Error> { + item.db_put(self, key) + } + + fn get(&self, key: &Hash256) -> Result, Error> { + I::db_get(self, key) + } + + fn exists(&self, key: &Hash256) -> Result { + I::db_exists(self, key) + } + + fn delete(&self, key: &Hash256) -> Result<(), Error> { + I::db_delete(self, key) + } + + fn get_block_at_preceeding_slot( + &self, + slot: Slot, + start_block_root: Hash256, + ) -> Result, Error> { + block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) + } + + fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error>; + + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>; + + fn key_exists(&self, col: &str, key: &[u8]) -> Result; + + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; +} + +pub enum DBColumn { + BeaconBlock, + BeaconState, + BeaconChain, +} + +impl<'a> Into<&'a str> for DBColumn { + /// Returns a `&str` that can be used for keying a key-value data base. + fn into(self) -> &'a str { + match self { + DBColumn::BeaconBlock => &"blk", + DBColumn::BeaconState => &"ste", + DBColumn::BeaconChain => &"bch", + } + } +} + +pub trait StoreItem: Sized { + fn db_column() -> DBColumn; + + fn as_store_bytes(&self) -> Vec; + + fn from_store_bytes(bytes: &mut [u8]) -> Result; + + fn db_put(&self, store: &impl Store, key: &Hash256) -> Result<(), Error> { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + store + .put_bytes(column, key, &self.as_store_bytes()) + .map_err(|e| e.into()) + } + + fn db_get(store: &impl Store, key: &Hash256) -> Result, Error> { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + match store.get_bytes(column, key)? { + Some(mut bytes) => Ok(Some(Self::from_store_bytes(&mut bytes[..])?)), + None => Ok(None), + } + } + + fn db_exists(store: &impl Store, key: &Hash256) -> Result { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + store.key_exists(column, key) + } + + fn db_delete(store: &impl Store, key: &Hash256) -> Result<(), Error> { + let column = Self::db_column().into(); + let key = key.as_bytes(); + + store.key_delete(column, key) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ssz::{Decode, Encode}; + use ssz_derive::{Decode, Encode}; + + #[derive(PartialEq, Debug, Encode, Decode)] + struct StorableThing { + a: u64, + b: u64, + } + + impl StoreItem for StorableThing { + fn db_column() -> DBColumn { + DBColumn::BeaconBlock + } + + fn as_store_bytes(&self) -> Vec { + self.as_ssz_bytes() + } + + fn from_store_bytes(bytes: &mut [u8]) -> Result { + Self::from_ssz_bytes(bytes).map_err(Into::into) + } + } + + #[test] + fn memorydb_can_store_and_retrieve() { + let store = MemoryDB::open(); + + let key = Hash256::random(); + let item = StorableThing { a: 1, b: 42 }; + + store.put(&key, &item).unwrap(); + + let retrieved = store.get(&key).unwrap().unwrap(); + + assert_eq!(item, retrieved); + } + + #[test] + fn exists() { + let store = MemoryDB::open(); + let key = Hash256::random(); + let item = StorableThing { a: 1, b: 42 }; + + assert_eq!(store.exists::(&key).unwrap(), false); + + store.put(&key, &item).unwrap(); + + assert_eq!(store.exists::(&key).unwrap(), true); + + store.delete::(&key).unwrap(); + + assert_eq!(store.exists::(&key).unwrap(), false); + } } diff --git a/beacon_node/db/src/memory_db.rs b/beacon_node/db/src/memory_db.rs index 008e5912f..83ff77ce1 100644 --- a/beacon_node/db/src/memory_db.rs +++ b/beacon_node/db/src/memory_db.rs @@ -1,236 +1,61 @@ -use super::blake2::blake2b::blake2b; -use super::COLUMNS; -use super::{ClientDB, DBError, DBValue}; -use std::collections::{HashMap, HashSet}; -use std::sync::RwLock; +use super::{DBValue, Error, Store}; +use parking_lot::RwLock; +use std::collections::HashMap; type DBHashMap = HashMap, Vec>; -type ColumnHashSet = HashSet; -/// An in-memory database implementing the ClientDB trait. -/// -/// It is not particularily optimized, it exists for ease and speed of testing. It's not expected -/// this DB would be used outside of tests. pub struct MemoryDB { db: RwLock, - known_columns: RwLock, } impl MemoryDB { - /// Open the in-memory database. - /// - /// All columns must be supplied initially, you will get an error if you try to access a column - /// that was not declared here. This condition is enforced artificially to simulate RocksDB. pub fn open() -> Self { - let db: DBHashMap = HashMap::new(); - let mut known_columns: ColumnHashSet = HashSet::new(); - for col in &COLUMNS { - known_columns.insert(col.to_string()); - } Self { - db: RwLock::new(db), - known_columns: RwLock::new(known_columns), + db: RwLock::new(HashMap::new()), } } - /// Hashes a key and a column name in order to get a unique key for the supplied column. fn get_key_for_col(col: &str, key: &[u8]) -> Vec { - blake2b(32, col.as_bytes(), key).as_bytes().to_vec() + let mut col = col.as_bytes().to_vec(); + col.append(&mut key.to_vec()); + col } } -impl ClientDB for MemoryDB { +impl Store for MemoryDB { /// Get the value of some key from the database. Returns `None` if the key does not exist. - fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { - // Panic if the DB locks are poisoned. - let db = self.db.read().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - Ok(db.get(&column_key).and_then(|val| Some(val.clone()))) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } + Ok(self + .db + .read() + .get(&column_key) + .and_then(|val| Some(val.clone()))) } /// Puts a key in the database. - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { - // Panic if the DB locks are poisoned. - let mut db = self.db.write().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - db.insert(column_key, val.to_vec()); - Ok(()) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } + self.db.write().insert(column_key, val.to_vec()); + + Ok(()) } /// Return true if some key exists in some column. - fn exists(&self, col: &str, key: &[u8]) -> Result { - // Panic if the DB locks are poisoned. - let db = self.db.read().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn key_exists(&self, col: &str, key: &[u8]) -> Result { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - Ok(db.contains_key(&column_key)) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } + Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { - // Panic if the DB locks are poisoned. - let mut db = self.db.write().unwrap(); - let known_columns = self.known_columns.read().unwrap(); + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { + let column_key = MemoryDB::get_key_for_col(col, key); - if known_columns.contains(&col.to_string()) { - let column_key = MemoryDB::get_key_for_col(col, key); - db.remove(&column_key); - Ok(()) - } else { - Err(DBError { - message: "Unknown column".to_string(), - }) - } - } -} - -#[cfg(test)] -mod tests { - use super::super::stores::{BLOCKS_DB_COLUMN, VALIDATOR_DB_COLUMN}; - use super::super::ClientDB; - use super::*; - use std::sync::Arc; - use std::thread; - - #[test] - fn test_memorydb_can_delete() { - let col_a: &str = BLOCKS_DB_COLUMN; - - let db = MemoryDB::open(); - - db.put(col_a, "dogs".as_bytes(), "lol".as_bytes()).unwrap(); - - assert_eq!( - db.get(col_a, "dogs".as_bytes()).unwrap().unwrap(), - "lol".as_bytes() - ); - - db.delete(col_a, "dogs".as_bytes()).unwrap(); - - assert_eq!(db.get(col_a, "dogs".as_bytes()).unwrap(), None); - } - - #[test] - fn test_memorydb_column_access() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_b: &str = VALIDATOR_DB_COLUMN; - - let db = MemoryDB::open(); - - /* - * Testing that if we write to the same key in different columns that - * there is not an overlap. - */ - db.put(col_a, "same".as_bytes(), "cat".as_bytes()).unwrap(); - db.put(col_b, "same".as_bytes(), "dog".as_bytes()).unwrap(); - - assert_eq!( - db.get(col_a, "same".as_bytes()).unwrap().unwrap(), - "cat".as_bytes() - ); - assert_eq!( - db.get(col_b, "same".as_bytes()).unwrap().unwrap(), - "dog".as_bytes() - ); - } - - #[test] - fn test_memorydb_unknown_column_access() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_x: &str = "ColumnX"; - - let db = MemoryDB::open(); - - /* - * Test that we get errors when using undeclared columns - */ - assert!(db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).is_ok()); - assert!(db.put(col_x, "cats".as_bytes(), "lol".as_bytes()).is_err()); - - assert!(db.get(col_a, "cats".as_bytes()).is_ok()); - assert!(db.get(col_x, "cats".as_bytes()).is_err()); - } - - #[test] - fn test_memorydb_exists() { - let col_a: &str = BLOCKS_DB_COLUMN; - let col_b: &str = VALIDATOR_DB_COLUMN; - - let db = MemoryDB::open(); - - /* - * Testing that if we write to the same key in different columns that - * there is not an overlap. - */ - db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).unwrap(); - - assert_eq!(true, db.exists(col_a, "cats".as_bytes()).unwrap()); - assert_eq!(false, db.exists(col_b, "cats".as_bytes()).unwrap()); - - assert_eq!(false, db.exists(col_a, "dogs".as_bytes()).unwrap()); - assert_eq!(false, db.exists(col_b, "dogs".as_bytes()).unwrap()); - } - - #[test] - fn test_memorydb_threading() { - let col_name: &str = BLOCKS_DB_COLUMN; - - let db = Arc::new(MemoryDB::open()); - - let thread_count = 10; - let write_count = 10; - - // We're execting the product of these numbers to fit in one byte. - assert!(thread_count * write_count <= 255); - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let db = db.clone(); - let col = col_name.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = (t * w) as u8; - let val = 42; - db.put(&col, &vec![key], &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = (t * w) as u8; - let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } + self.db.write().remove(&column_key); + + Ok(()) } } diff --git a/beacon_node/db/src/stores/beacon_block_store.rs b/beacon_node/db/src/stores/beacon_block_store.rs deleted file mode 100644 index 868caafe2..000000000 --- a/beacon_node/db/src/stores/beacon_block_store.rs +++ /dev/null @@ -1,246 +0,0 @@ -use super::BLOCKS_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use ssz::Decode; -use std::sync::Arc; -use types::{BeaconBlock, Hash256, Slot}; - -#[derive(Clone, Debug, PartialEq)] -pub enum BeaconBlockAtSlotError { - UnknownBeaconBlock(Hash256), - InvalidBeaconBlock(Hash256), - DBError(String), -} - -pub struct BeaconBlockStore -where - T: ClientDB, -{ - db: Arc, -} - -// Implements `put`, `get`, `exists` and `delete` for the store. -impl_crud_for_store!(BeaconBlockStore, DB_COLUMN); - -impl BeaconBlockStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn get_deserialized(&self, hash: &Hash256) -> Result, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let block = BeaconBlock::from_ssz_bytes(&ssz).map_err(|_| DBError { - message: "Bad BeaconBlock SSZ.".to_string(), - })?; - Ok(Some(block)) - } - } - } - - /// Retrieve the block at a slot given a "head_hash" and a slot. - /// - /// A "head_hash" must be a block hash with a slot number greater than or equal to the desired - /// slot. - /// - /// This function will read each block down the chain until it finds a block with the given - /// slot number. If the slot is skipped, the function will return None. - /// - /// If a block is found, a tuple of (block_hash, serialized_block) is returned. - /// - /// Note: this function uses a loop instead of recursion as the compiler is over-strict when it - /// comes to recursion and the `impl Trait` pattern. See: - /// https://stackoverflow.com/questions/54032940/using-impl-trait-in-a-recursive-function - pub fn block_at_slot( - &self, - head_hash: &Hash256, - slot: Slot, - ) -> Result, BeaconBlockAtSlotError> { - let mut current_hash = *head_hash; - - loop { - if let Some(block) = self.get_deserialized(¤t_hash)? { - if block.slot == slot { - break Ok(Some((current_hash, block))); - } else if block.slot < slot { - break Ok(None); - } else { - current_hash = block.previous_block_root; - } - } else { - break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); - } - } - } -} - -impl From for BeaconBlockAtSlotError { - fn from(e: DBError) -> Self { - BeaconBlockAtSlotError::DBError(e.message) - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - - use std::sync::Arc; - use std::thread; - - use ssz::ssz_encode; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::BeaconBlock; - use types::Hash256; - - test_crud_for_store!(BeaconBlockStore, DB_COLUMN); - - #[test] - fn head_hash_slot_too_low() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - let mut rng = XorShiftRng::from_seed([42; 16]); - - let mut block = BeaconBlock::random_for_test(&mut rng); - block.slot = Slot::from(10_u64); - - let block_root = block.canonical_root(); - bs.put(&block_root, &ssz_encode(&block)).unwrap(); - - let result = bs.block_at_slot(&block_root, Slot::from(11_u64)).unwrap(); - assert_eq!(result, None); - } - - #[test] - fn test_invalid_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconBlockStore::new(db.clone()); - - let ssz = "definitly not a valid block".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!( - store.block_at_slot(hash, Slot::from(42_u64)), - Err(BeaconBlockAtSlotError::DBError( - "Bad BeaconBlock SSZ.".into() - )) - ); - } - - #[test] - fn test_unknown_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconBlockStore::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!( - store.block_at_slot(other_hash, Slot::from(42_u64)), - Err(BeaconBlockAtSlotError::UnknownBeaconBlock(*other_hash)) - ); - } - - #[test] - fn test_block_store_on_memory_db() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - - let thread_count = 10; - let write_count = 10; - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let bs = bs.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = t * w; - let val = 42; - bs.put(&Hash256::from_low_u64_le(key), &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = t * w; - assert!(bs.exists(&Hash256::from_low_u64_le(key)).unwrap()); - let val = bs.get(&Hash256::from_low_u64_le(key)).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } - } - - #[test] - #[ignore] - fn test_block_at_slot() { - let db = Arc::new(MemoryDB::open()); - let bs = Arc::new(BeaconBlockStore::new(db.clone())); - let mut rng = XorShiftRng::from_seed([42; 16]); - - // Specify test block parameters. - let hashes = [ - Hash256::from([0; 32]), - Hash256::from([1; 32]), - Hash256::from([2; 32]), - Hash256::from([3; 32]), - Hash256::from([4; 32]), - ]; - let parent_hashes = [ - Hash256::from([255; 32]), // Genesis block. - Hash256::from([0; 32]), - Hash256::from([1; 32]), - Hash256::from([2; 32]), - Hash256::from([3; 32]), - ]; - let unknown_hash = Hash256::from([101; 32]); // different from all above - let slots: Vec = vec![0, 1, 3, 4, 5].iter().map(|x| Slot::new(*x)).collect(); - - // Generate a vec of random blocks and store them in the DB. - let block_count = 5; - let mut blocks: Vec = Vec::with_capacity(5); - for i in 0..block_count { - let mut block = BeaconBlock::random_for_test(&mut rng); - - block.previous_block_root = parent_hashes[i]; - block.slot = slots[i]; - - let ssz = ssz_encode(&block); - db.put(DB_COLUMN, hashes[i].as_bytes(), &ssz).unwrap(); - - blocks.push(block); - } - - // Test that certain slots can be reached from certain hashes. - let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)]; - for (hashes_index, slot_index) in test_cases { - let (matched_block_hash, block) = bs - .block_at_slot(&hashes[hashes_index], slots[slot_index]) - .unwrap() - .unwrap(); - assert_eq!(matched_block_hash, hashes[slot_index]); - assert_eq!(block.slot, slots[slot_index]); - } - - let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap(); - assert_eq!(ssz, None); - - let ssz = bs.block_at_slot(&hashes[4], Slot::new(6)).unwrap(); - assert_eq!(ssz, None); - - let ssz = bs.block_at_slot(&unknown_hash, Slot::new(2)); - assert_eq!( - ssz, - Err(BeaconBlockAtSlotError::UnknownBeaconBlock(unknown_hash)) - ); - } -} diff --git a/beacon_node/db/src/stores/beacon_state_store.rs b/beacon_node/db/src/stores/beacon_state_store.rs deleted file mode 100644 index 044290592..000000000 --- a/beacon_node/db/src/stores/beacon_state_store.rs +++ /dev/null @@ -1,65 +0,0 @@ -use super::STATES_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use ssz::Decode; -use std::sync::Arc; -use types::{BeaconState, EthSpec, Hash256}; - -pub struct BeaconStateStore -where - T: ClientDB, -{ - db: Arc, -} - -// Implements `put`, `get`, `exists` and `delete` for the store. -impl_crud_for_store!(BeaconStateStore, DB_COLUMN); - -impl BeaconStateStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn get_deserialized( - &self, - hash: &Hash256, - ) -> Result>, DBError> { - match self.get(&hash)? { - None => Ok(None), - Some(ssz) => { - let state = BeaconState::from_ssz_bytes(&ssz).map_err(|_| DBError { - message: "Bad State SSZ.".to_string(), - })?; - Ok(Some(state)) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - - use ssz::ssz_encode; - use std::sync::Arc; - use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use types::{FoundationBeaconState, Hash256}; - - test_crud_for_store!(BeaconStateStore, DB_COLUMN); - - #[test] - fn test_reader() { - let db = Arc::new(MemoryDB::open()); - let store = BeaconStateStore::new(db.clone()); - - let mut rng = XorShiftRng::from_seed([42; 16]); - let state: FoundationBeaconState = BeaconState::random_for_test(&mut rng); - let state_root = state.canonical_root(); - - store.put(&state_root, &ssz_encode(&state)).unwrap(); - - let decoded = store.get_deserialized(&state_root).unwrap().unwrap(); - - assert_eq!(state, decoded); - } -} diff --git a/beacon_node/db/src/stores/macros.rs b/beacon_node/db/src/stores/macros.rs deleted file mode 100644 index 6c53e40ee..000000000 --- a/beacon_node/db/src/stores/macros.rs +++ /dev/null @@ -1,103 +0,0 @@ -macro_rules! impl_crud_for_store { - ($store: ident, $db_column: expr) => { - impl $store { - pub fn put(&self, hash: &Hash256, ssz: &[u8]) -> Result<(), DBError> { - self.db.put($db_column, hash.as_bytes(), ssz) - } - - pub fn get(&self, hash: &Hash256) -> Result>, DBError> { - self.db.get($db_column, hash.as_bytes()) - } - - pub fn exists(&self, hash: &Hash256) -> Result { - self.db.exists($db_column, hash.as_bytes()) - } - - pub fn delete(&self, hash: &Hash256) -> Result<(), DBError> { - self.db.delete($db_column, hash.as_bytes()) - } - } - }; -} - -#[cfg(test)] -macro_rules! test_crud_for_store { - ($store: ident, $db_column: expr) => { - #[test] - fn test_put() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - store.put(hash, ssz).unwrap(); - assert_eq!(db.get(DB_COLUMN, hash.as_bytes()).unwrap().unwrap(), ssz); - } - - #[test] - fn test_get() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert_eq!(store.get(hash).unwrap().unwrap(), ssz); - } - - #[test] - fn test_get_unknown() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, other_hash.as_bytes(), ssz).unwrap(); - assert_eq!(store.get(hash).unwrap(), None); - } - - #[test] - fn test_exists() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(store.exists(hash).unwrap()); - } - - #[test] - fn test_block_does_not_exist() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - let other_hash = &Hash256::from([0xBB; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(!store.exists(other_hash).unwrap()); - } - - #[test] - fn test_delete() { - let db = Arc::new(MemoryDB::open()); - let store = $store::new(db.clone()); - - let ssz = "some bytes".as_bytes(); - let hash = &Hash256::from([0xAA; 32]); - - db.put(DB_COLUMN, hash.as_bytes(), ssz).unwrap(); - assert!(db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); - - store.delete(hash).unwrap(); - assert!(!db.exists(DB_COLUMN, hash.as_bytes()).unwrap()); - } - }; -} diff --git a/beacon_node/db/src/stores/mod.rs b/beacon_node/db/src/stores/mod.rs deleted file mode 100644 index 44de7eed1..000000000 --- a/beacon_node/db/src/stores/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::{ClientDB, DBError}; - -#[macro_use] -mod macros; -mod beacon_block_store; -mod beacon_state_store; -mod pow_chain_store; -mod validator_store; - -pub use self::beacon_block_store::{BeaconBlockAtSlotError, BeaconBlockStore}; -pub use self::beacon_state_store::BeaconStateStore; -pub use self::pow_chain_store::PoWChainStore; -pub use self::validator_store::{ValidatorStore, ValidatorStoreError}; - -pub const BLOCKS_DB_COLUMN: &str = "blocks"; -pub const STATES_DB_COLUMN: &str = "states"; -pub const POW_CHAIN_DB_COLUMN: &str = "powchain"; -pub const VALIDATOR_DB_COLUMN: &str = "validator"; - -pub const COLUMNS: [&str; 4] = [ - BLOCKS_DB_COLUMN, - STATES_DB_COLUMN, - POW_CHAIN_DB_COLUMN, - VALIDATOR_DB_COLUMN, -]; diff --git a/beacon_node/db/src/stores/pow_chain_store.rs b/beacon_node/db/src/stores/pow_chain_store.rs deleted file mode 100644 index 5c8b97907..000000000 --- a/beacon_node/db/src/stores/pow_chain_store.rs +++ /dev/null @@ -1,68 +0,0 @@ -use super::POW_CHAIN_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use std::sync::Arc; - -pub struct PoWChainStore -where - T: ClientDB, -{ - db: Arc, -} - -impl PoWChainStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> { - self.db.put(DB_COLUMN, hash, &[0]) - } - - pub fn block_hash_exists(&self, hash: &[u8]) -> Result { - self.db.exists(DB_COLUMN, hash) - } -} - -#[cfg(test)] -mod tests { - extern crate types; - - use super::super::super::MemoryDB; - use super::*; - - use self::types::Hash256; - - #[test] - fn test_put_block_hash() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - store.put_block_hash(hash).unwrap(); - - assert!(db.exists(DB_COLUMN, hash).unwrap()); - } - - #[test] - fn test_block_hash_exists() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - db.put(DB_COLUMN, hash, &[0]).unwrap(); - - assert!(store.block_hash_exists(hash).unwrap()); - } - - #[test] - fn test_block_hash_does_not_exist() { - let db = Arc::new(MemoryDB::open()); - let store = PoWChainStore::new(db.clone()); - - let hash = &Hash256::from([0xAA; 32]).as_bytes().to_vec(); - let other_hash = &Hash256::from([0xBB; 32]).as_bytes().to_vec(); - db.put(DB_COLUMN, hash, &[0]).unwrap(); - - assert!(!store.block_hash_exists(other_hash).unwrap()); - } -} diff --git a/beacon_node/db/src/stores/validator_store.rs b/beacon_node/db/src/stores/validator_store.rs deleted file mode 100644 index f653c9f71..000000000 --- a/beacon_node/db/src/stores/validator_store.rs +++ /dev/null @@ -1,215 +0,0 @@ -extern crate bytes; - -use self::bytes::{BufMut, BytesMut}; -use super::VALIDATOR_DB_COLUMN as DB_COLUMN; -use super::{ClientDB, DBError}; -use bls::PublicKey; -use ssz::{Decode, Encode}; -use std::sync::Arc; - -#[derive(Debug, PartialEq)] -pub enum ValidatorStoreError { - DBError(String), - DecodeError, -} - -impl From for ValidatorStoreError { - fn from(error: DBError) -> Self { - ValidatorStoreError::DBError(error.message) - } -} - -#[derive(Debug, PartialEq)] -enum KeyPrefixes { - PublicKey, -} - -pub struct ValidatorStore -where - T: ClientDB, -{ - db: Arc, -} - -impl ValidatorStore { - pub fn new(db: Arc) -> Self { - Self { db } - } - - fn prefix_bytes(&self, key_prefix: &KeyPrefixes) -> Vec { - match key_prefix { - KeyPrefixes::PublicKey => b"pubkey".to_vec(), - } - } - - fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) -> Vec { - let mut buf = BytesMut::with_capacity(6 + 8); - buf.put(self.prefix_bytes(key_prefix)); - buf.put_u64_be(index as u64); - buf.take().to_vec() - } - - pub fn put_public_key_by_index( - &self, - index: usize, - public_key: &PublicKey, - ) -> Result<(), ValidatorStoreError> { - let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); - let val = public_key.as_ssz_bytes(); - self.db - .put(DB_COLUMN, &key[..], &val[..]) - .map_err(ValidatorStoreError::from) - } - - pub fn get_public_key_by_index( - &self, - index: usize, - ) -> Result, ValidatorStoreError> { - let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); - let val = self.db.get(DB_COLUMN, &key[..])?; - match val { - None => Ok(None), - Some(val) => match PublicKey::from_ssz_bytes(&val) { - Ok(key) => Ok(Some(key)), - Err(_) => Err(ValidatorStoreError::DecodeError), - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::super::super::MemoryDB; - use super::*; - use bls::Keypair; - - #[test] - fn test_prefix_bytes() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - assert_eq!( - store.prefix_bytes(&KeyPrefixes::PublicKey), - b"pubkey".to_vec() - ); - } - - #[test] - fn test_get_db_key_for_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let mut buf = BytesMut::with_capacity(6 + 8); - buf.put(b"pubkey".to_vec()); - buf.put_u64_be(42); - assert_eq!( - store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42), - buf.take().to_vec() - ) - } - - #[test] - fn test_put_public_key_by_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let index = 3; - let public_key = Keypair::random().pk; - - store.put_public_key_by_index(index, &public_key).unwrap(); - let public_key_at_index = db - .get( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], - ) - .unwrap() - .unwrap(); - - assert_eq!(public_key_at_index, public_key.as_ssz_bytes()); - } - - #[test] - fn test_get_public_key_by_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let index = 4; - let public_key = Keypair::random().pk; - - db.put( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, index)[..], - &public_key.as_ssz_bytes(), - ) - .unwrap(); - - let public_key_at_index = store.get_public_key_by_index(index).unwrap().unwrap(); - assert_eq!(public_key_at_index, public_key); - } - - #[test] - fn test_get_public_key_by_unknown_index() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let public_key = Keypair::random().pk; - - db.put( - DB_COLUMN, - &store.get_db_key_for_index(&KeyPrefixes::PublicKey, 3)[..], - &public_key.as_ssz_bytes(), - ) - .unwrap(); - - let public_key_at_index = store.get_public_key_by_index(4).unwrap(); - assert_eq!(public_key_at_index, None); - } - - #[test] - fn test_get_invalid_public_key() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db.clone()); - - let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42); - db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap(); - - assert_eq!( - store.get_public_key_by_index(42), - Err(ValidatorStoreError::DecodeError) - ); - } - - #[test] - fn test_validator_store_put_get() { - let db = Arc::new(MemoryDB::open()); - let store = ValidatorStore::new(db); - - let keys = vec![ - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - Keypair::random(), - ]; - - for i in 0..keys.len() { - store.put_public_key_by_index(i, &keys[i].pk).unwrap(); - } - - /* - * Check all keys are retrieved correctly. - */ - for i in 0..keys.len() { - let retrieved = store.get_public_key_by_index(i).unwrap().unwrap(); - assert_eq!(retrieved, keys[i].pk); - } - - /* - * Check that an index that wasn't stored returns None. - */ - assert!(store - .get_public_key_by_index(keys.len() + 1) - .unwrap() - .is_none()); - } -} diff --git a/beacon_node/db/src/traits.rs b/beacon_node/db/src/traits.rs deleted file mode 100644 index 41be3e23d..000000000 --- a/beacon_node/db/src/traits.rs +++ /dev/null @@ -1,28 +0,0 @@ -pub type DBValue = Vec; - -#[derive(Debug)] -pub struct DBError { - pub message: String, -} - -impl DBError { - pub fn new(message: String) -> Self { - Self { message } - } -} - -/// A generic database to be used by the "client' (i.e., -/// the lighthouse blockchain client). -/// -/// The purpose of having this generic trait is to allow the -/// program to use a persistent on-disk database during production, -/// but use a transient database during tests. -pub trait ClientDB: Sync + Send { - fn get(&self, col: &str, key: &[u8]) -> Result, DBError>; - - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError>; - - fn exists(&self, col: &str, key: &[u8]) -> Result; - - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError>; -} diff --git a/beacon_node/db2/Cargo.toml b/beacon_node/db2/Cargo.toml deleted file mode 100644 index 95e87c9ea..000000000 --- a/beacon_node/db2/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "db2" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -blake2-rfc = "0.2.18" -bls = { path = "../../eth2/utils/bls" } -bytes = "0.4.10" -db_encode = { path = "../db_encode" } -db_encode_derive = { path = "../db_encode_derive" } -parking_lot = "0.7" -rocksdb = "0.10.1" -ssz = { path = "../../eth2/utils/ssz" } -ssz_derive = { path = "../../eth2/utils/ssz_derive" } -types = { path = "../../eth2/types" } diff --git a/beacon_node/db2/src/disk_db.rs b/beacon_node/db2/src/disk_db.rs deleted file mode 100644 index e2162e29a..000000000 --- a/beacon_node/db2/src/disk_db.rs +++ /dev/null @@ -1,199 +0,0 @@ -extern crate rocksdb; - -// use super::stores::COLUMNS; -use super::{ClientDB, DBError, DBValue}; -use rocksdb::Error as RocksError; -use rocksdb::{Options, DB}; -use std::fs; -use std::path::Path; - -/// A on-disk database which implements the ClientDB trait. -/// -/// This implementation uses RocksDB with default options. -pub struct DiskDB { - db: DB, -} - -impl DiskDB { - /// Open the RocksDB database, optionally supplying columns if required. - /// - /// The RocksDB database will be contained in a directory titled - /// "database" in the supplied path. - /// - /// # Panics - /// - /// Panics if the database is unable to be created. - pub fn open(path: &Path, columns: Option<&[&str]>) -> Self { - // Rocks options. - let mut options = Options::default(); - options.create_if_missing(true); - - // Ensure the path exists. - fs::create_dir_all(&path).unwrap_or_else(|_| panic!("Unable to create {:?}", &path)); - let db_path = path.join("database"); - - let columns = columns.unwrap_or(&COLUMNS); - - if db_path.exists() { - Self { - db: DB::open_cf(&options, db_path, &COLUMNS) - .expect("Unable to open local database"), - } - } else { - let mut db = Self { - db: DB::open(&options, db_path).expect("Unable to open local database"), - }; - - for cf in columns { - db.create_col(cf).unwrap(); - } - - db - } - } - - /// Create a RocksDB column family. Corresponds to the - /// `create_cf()` function on the RocksDB API. - #[allow(dead_code)] - fn create_col(&mut self, col: &str) -> Result<(), DBError> { - match self.db.create_cf(col, &Options::default()) { - Err(e) => Err(e.into()), - Ok(_) => Ok(()), - } - } -} - -impl From for DBError { - fn from(e: RocksError) -> Self { - Self { - message: e.to_string(), - } - } -} - -impl ClientDB for DiskDB { - /// Get the value for some key on some column. - /// - /// Corresponds to the `get_cf()` method on the RocksDB API. - /// Will attempt to get the `ColumnFamily` and return an Err - /// if it fails. - fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => match self.db.get_cf(handle, key)? { - None => Ok(None), - Some(db_vec) => Ok(Some(DBValue::from(&*db_vec))), - }, - } - } - - /// Set some value for some key on some column. - /// - /// Corresponds to the `cf_handle()` method on the RocksDB API. - /// Will attempt to get the `ColumnFamily` and return an Err - /// if it fails. - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()), - } - } - - /// Return true if some key exists in some column. - fn exists(&self, col: &str, key: &[u8]) -> Result { - /* - * I'm not sure if this is the correct way to read if some - * block exists. Naively I would expect this to unncessarily - * copy some data, but I could be wrong. - */ - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => Ok(self.db.get_cf(handle, key)?.is_some()), - } - } - - /// Delete the value for some key on some column. - /// - /// Corresponds to the `delete_cf()` method on the RocksDB API. - /// Will attempt to get the `ColumnFamily` and return an Err - /// if it fails. - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => { - self.db.delete_cf(handle, key)?; - Ok(()) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::super::ClientDB; - use super::*; - use std::sync::Arc; - use std::{env, fs, thread}; - - #[test] - #[ignore] - fn test_rocksdb_can_use_db() { - let pwd = env::current_dir().unwrap(); - let path = pwd.join("testdb_please_remove"); - let _ = fs::remove_dir_all(&path); - fs::create_dir_all(&path).unwrap(); - - let col_name: &str = "TestColumn"; - let column_families = vec![col_name]; - - let mut db = DiskDB::open(&path, None); - - for cf in column_families { - db.create_col(&cf).unwrap(); - } - - let db = Arc::new(db); - - let thread_count = 10; - let write_count = 10; - - // We're execting the product of these numbers to fit in one byte. - assert!(thread_count * write_count <= 255); - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let db = db.clone(); - let col = col_name.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = (t * w) as u8; - let val = 42; - db.put(&col, &vec![key], &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = (t * w) as u8; - let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } - fs::remove_dir_all(&path).unwrap(); - } -} diff --git a/beacon_node/db2/src/impls.rs b/beacon_node/db2/src/impls.rs deleted file mode 100644 index 9e607ddf5..000000000 --- a/beacon_node/db2/src/impls.rs +++ /dev/null @@ -1,16 +0,0 @@ -/* -use types::*; - -impl StoreEncode for Hash256 { - fn as_store_bytes(&self) -> Vec { - self.as_bytes().to_vec() - } -} - -impl StoreDecode for Hash256 { - fn from_store_bytes(bytes: &mut [u8]) -> Vec { - Hash256::from_slice() - self.as_bytes().to_vec() - } -} -*/ diff --git a/beacon_node/db2/src/lib.rs b/beacon_node/db2/src/lib.rs deleted file mode 100644 index 3bff89512..000000000 --- a/beacon_node/db2/src/lib.rs +++ /dev/null @@ -1,160 +0,0 @@ -// mod disk_db; -mod errors; -mod impls; -mod memory_db; - -pub use self::memory_db::MemoryDB; -pub use errors::Error; -pub use types::*; -pub type DBValue = Vec; - -pub trait Store: Sync + Send + Sized { - fn put(&self, key: &Hash256, item: &impl StorableItem) -> Result<(), Error> { - item.db_put(self, key) - } - - fn get(&self, key: &Hash256) -> Result, Error> { - I::db_get(self, key) - } - - fn exists(&self, key: &Hash256) -> Result { - I::db_exists(self, key) - } - - fn delete(&self, key: &Hash256) -> Result<(), Error> { - I::db_delete(self, key) - } - - fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error>; - - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>; - - fn key_exists(&self, col: &str, key: &[u8]) -> Result; - - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; -} - -pub trait StoreEncode { - fn as_store_bytes(&self) -> Vec; -} - -pub trait StoreDecode: Sized { - fn from_store_bytes(bytes: &mut [u8]) -> Result; -} - -pub enum DBColumn { - Block, - State, - BeaconChain, -} - -impl<'a> Into<&'a str> for DBColumn { - /// Returns a `&str` that can be used for keying a key-value data base. - fn into(self) -> &'a str { - match self { - DBColumn::Block => &"blk", - DBColumn::State => &"ste", - DBColumn::BeaconChain => &"bch", - } - } -} - -pub trait StorableItem: StoreEncode + StoreDecode + Sized { - fn db_column() -> DBColumn; - - fn db_put(&self, store: &impl Store, key: &Hash256) -> Result<(), Error> { - let column = Self::db_column().into(); - let key = key.as_bytes(); - - store - .put_bytes(column, key, &self.as_store_bytes()) - .map_err(|e| e.into()) - } - - fn db_get(store: &impl Store, key: &Hash256) -> Result, Error> { - let column = Self::db_column().into(); - let key = key.as_bytes(); - - match store.get_bytes(column, key)? { - Some(mut bytes) => Ok(Some(Self::from_store_bytes(&mut bytes[..])?)), - None => Ok(None), - } - } - - fn db_exists(store: &impl Store, key: &Hash256) -> Result { - let column = Self::db_column().into(); - let key = key.as_bytes(); - - store.key_exists(column, key) - } - - fn db_delete(store: &impl Store, key: &Hash256) -> Result<(), Error> { - let column = Self::db_column().into(); - let key = key.as_bytes(); - - store.key_delete(column, key) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use ssz::{ssz_encode, Decodable}; - use ssz_derive::{Decode, Encode}; - - #[derive(PartialEq, Debug, Encode, Decode)] - struct StorableThing { - a: u64, - b: u64, - } - - impl StoreEncode for StorableThing { - fn as_store_bytes(&self) -> Vec { - ssz_encode(self) - } - } - - impl StoreDecode for StorableThing { - fn from_store_bytes(bytes: &mut [u8]) -> Result { - let (item, _) = Self::ssz_decode(bytes, 0)?; - Ok(item) - } - } - - impl StorableItem for StorableThing { - fn db_column() -> DBColumn { - DBColumn::Block - } - } - - #[test] - fn memorydb_can_store_and_retrieve() { - let store = MemoryDB::open(); - - let key = Hash256::random(); - let item = StorableThing { a: 1, b: 42 }; - - store.put(&key, &item).unwrap(); - - let retrieved = store.get(&key).unwrap().unwrap(); - - assert_eq!(item, retrieved); - } - - #[test] - fn exists() { - let store = MemoryDB::open(); - let key = Hash256::random(); - let item = StorableThing { a: 1, b: 42 }; - - assert_eq!(store.exists::(&key).unwrap(), false); - - store.put(&key, &item).unwrap(); - - assert_eq!(store.exists::(&key).unwrap(), true); - - store.delete::(&key).unwrap(); - - assert_eq!(store.exists::(&key).unwrap(), false); - } -} diff --git a/beacon_node/db2/src/memory_db.rs b/beacon_node/db2/src/memory_db.rs deleted file mode 100644 index 83ff77ce1..000000000 --- a/beacon_node/db2/src/memory_db.rs +++ /dev/null @@ -1,61 +0,0 @@ -use super::{DBValue, Error, Store}; -use parking_lot::RwLock; -use std::collections::HashMap; - -type DBHashMap = HashMap, Vec>; - -pub struct MemoryDB { - db: RwLock, -} - -impl MemoryDB { - pub fn open() -> Self { - Self { - db: RwLock::new(HashMap::new()), - } - } - - fn get_key_for_col(col: &str, key: &[u8]) -> Vec { - let mut col = col.as_bytes().to_vec(); - col.append(&mut key.to_vec()); - col - } -} - -impl Store for MemoryDB { - /// Get the value of some key from the database. Returns `None` if the key does not exist. - fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { - let column_key = MemoryDB::get_key_for_col(col, key); - - Ok(self - .db - .read() - .get(&column_key) - .and_then(|val| Some(val.clone()))) - } - - /// Puts a key in the database. - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - let column_key = MemoryDB::get_key_for_col(col, key); - - self.db.write().insert(column_key, val.to_vec()); - - Ok(()) - } - - /// Return true if some key exists in some column. - fn key_exists(&self, col: &str, key: &[u8]) -> Result { - let column_key = MemoryDB::get_key_for_col(col, key); - - Ok(self.db.read().contains_key(&column_key)) - } - - /// Delete some key from the database. - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { - let column_key = MemoryDB::get_key_for_col(col, key); - - self.db.write().remove(&column_key); - - Ok(()) - } -} diff --git a/beacon_node/db_encode/Cargo.toml b/beacon_node/db_encode/Cargo.toml deleted file mode 100644 index b4e919585..000000000 --- a/beacon_node/db_encode/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "db_encode" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -ethereum-types = "0.5" -ssz = { path = "../../eth2/utils/ssz" } diff --git a/beacon_node/db_encode/src/lib.rs b/beacon_node/db_encode/src/lib.rs deleted file mode 100644 index 993ba0e79..000000000 --- a/beacon_node/db_encode/src/lib.rs +++ /dev/null @@ -1,59 +0,0 @@ -use ethereum_types::{Address, H256}; -use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; - -/// Convenience function to encode an object. -pub fn db_encode(val: &T) -> Vec -where - T: DBEncode, -{ - let mut ssz_stream = SszStream::new(); - ssz_stream.append(val); - ssz_stream.drain() -} - -/// An encoding scheme based solely upon SSZ. -/// -/// The reason we have a separate encoding scheme is to allows us to store fields in the DB that we -/// don't want to transmit across the wire or hash. -/// -/// For example, the cache fields on `BeaconState` should be stored in the DB, but they should not -/// be hashed or transmitted across the wire. `DBEncode` allows us to define two serialization -/// methods, one that encodes the caches and one that does not. -pub trait DBEncode: Encodable + Sized { - fn db_encode(&self, s: &mut SszStream) { - s.append(&ssz_encode(self)); - } -} - -/// A decoding scheme based solely upon SSZ. -/// -/// See `DBEncode` for reasoning on why this trait exists. -pub trait DBDecode: Decodable { - fn db_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { - Self::ssz_decode(bytes, index) - } -} - -// Implement encoding. -impl DBEncode for bool {} -impl DBEncode for u8 {} -impl DBEncode for u16 {} -impl DBEncode for u32 {} -impl DBEncode for u64 {} -impl DBEncode for usize {} -impl DBEncode for Vec where T: Encodable + Sized {} - -impl DBEncode for H256 {} -impl DBEncode for Address {} - -// Implement decoding. -impl DBDecode for bool {} -impl DBDecode for u8 {} -impl DBDecode for u16 {} -impl DBDecode for u32 {} -impl DBDecode for u64 {} -impl DBDecode for usize {} -impl DBDecode for Vec where T: Decodable {} - -impl DBDecode for H256 {} -impl DBDecode for Address {} diff --git a/beacon_node/db_encode_derive/Cargo.toml b/beacon_node/db_encode_derive/Cargo.toml deleted file mode 100644 index b2fba85e3..000000000 --- a/beacon_node/db_encode_derive/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "db_encode_derive" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" -description = "Procedural derive macros for `db_encode` encoding and decoding." - -[lib] -proc-macro = true - -[dependencies] -syn = "0.15" -quote = "0.6" diff --git a/beacon_node/db_encode_derive/src/lib.rs b/beacon_node/db_encode_derive/src/lib.rs deleted file mode 100644 index 1de081419..000000000 --- a/beacon_node/db_encode_derive/src/lib.rs +++ /dev/null @@ -1,305 +0,0 @@ -extern crate proc_macro; - -use proc_macro::TokenStream; -use quote::quote; -use syn::{parse_macro_input, DeriveInput}; - -/// Returns a Vec of `syn::Ident` for each named field in the struct. -/// -/// # Panics -/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> { - struct_data - .fields - .iter() - .map(|f| match &f.ident { - Some(ref ident) => ident, - _ => panic!("db_derive only supports named struct fields."), - }) - .collect() -} - -/// Implements `db_encode::DBEncode` for some `struct`. -/// -/// Fields are encoded in the order they are defined. -#[proc_macro_derive(DBEncode)] -pub fn db_encode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("db_derive only supports structs."), - }; - - let field_idents = get_named_field_idents(&struct_data); - - let output = quote! { - impl db_encode::DBEncode for #name { - fn db_encode(&self, s: &mut ssz::SszStream) { - #( - s.append(&self.#field_idents); - )* - } - } - }; - output.into() -} - -/// Implements `db_encode::DBEncode` for some `struct`. -/// -/// Fields are encoded in the order they are defined. -#[proc_macro_derive(DBDecode)] -pub fn db_decode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("ssz_derive only supports structs."), - }; - - let field_idents = get_named_field_idents(&struct_data); - - // Using a var in an iteration always consumes the var, therefore we must make a `fields_a` and - // a `fields_b` in order to perform two loops. - // - // https://github.com/dtolnay/quote/issues/8 - let field_idents_a = &field_idents; - let field_idents_b = &field_idents; - - let output = quote! { - impl db_encode::DBDecode for #name { - fn db_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), ssz::DecodeError> { - #( - let (#field_idents_a, i) = <_>::ssz_decode(bytes, i)?; - )* - - Ok(( - Self { - #( - #field_idents_b, - )* - }, - i - )) - } - } - }; - output.into() -} - -/* -/// Returns true if some field has an attribute declaring it should not be deserialized. -/// -/// The field attribute is: `#[ssz(skip_deserializing)]` -fn should_skip_deserializing(field: &syn::Field) -> bool { - for attr in &field.attrs { - if attr.tts.to_string() == "( skip_deserializing )" { - return true; - } - } - false -} - -/// Implements `ssz::Decodable` for some `struct`. -/// -/// Fields are decoded in the order they are defined. -#[proc_macro_derive(Decode)] -pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("ssz_derive only supports structs."), - }; - - let all_idents = get_named_field_idents(&struct_data); - - // Build quotes for fields that should be deserialized and those that should be built from - // `Default`. - let mut quotes = vec![]; - for field in &struct_data.fields { - match &field.ident { - Some(ref ident) => { - if should_skip_deserializing(field) { - quotes.push(quote! { - let #ident = <_>::default(); - }); - } else { - quotes.push(quote! { - let (#ident, i) = <_>::ssz_decode(bytes, i)?; - }); - } - } - _ => panic!("ssz_derive only supports named struct fields."), - }; - } - - let output = quote! { - impl ssz::Decodable for #name { - fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), ssz::DecodeError> { - #( - #quotes - )* - - Ok(( - Self { - #( - #all_idents, - )* - }, - i - )) - } - } - }; - output.into() -} - -/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields -/// that should not be tree hashed. -/// -/// # Panics -/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_tree_hashable_named_field_idents<'a>( - struct_data: &'a syn::DataStruct, -) -> Vec<&'a syn::Ident> { - struct_data - .fields - .iter() - .filter_map(|f| { - if should_skip_tree_hash(&f) { - None - } else { - Some(match &f.ident { - Some(ref ident) => ident, - _ => panic!("ssz_derive only supports named struct fields."), - }) - } - }) - .collect() -} - -/// Returns true if some field has an attribute declaring it should not be tree-hashed. -/// -/// The field attribute is: `#[tree_hash(skip_hashing)]` -fn should_skip_tree_hash(field: &syn::Field) -> bool { - for attr in &field.attrs { - if attr.tts.to_string() == "( skip_hashing )" { - return true; - } - } - false -} - -/// Implements `ssz::TreeHash` for some `struct`. -/// -/// Fields are processed in the order they are defined. -#[proc_macro_derive(TreeHash, attributes(tree_hash))] -pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("ssz_derive only supports structs."), - }; - - let field_idents = get_tree_hashable_named_field_idents(&struct_data); - - let output = quote! { - impl ssz::TreeHash for #name { - fn hash_tree_root(&self) -> Vec { - let mut list: Vec> = Vec::new(); - #( - list.push(self.#field_idents.hash_tree_root()); - )* - - ssz::merkle_hash(&mut list) - } - } - }; - output.into() -} - -/// Returns `true` if some `Ident` should be considered to be a signature type. -fn type_ident_is_signature(ident: &syn::Ident) -> bool { - match ident.to_string().as_ref() { - "Signature" => true, - "AggregateSignature" => true, - _ => false, - } -} - -/// Takes a `Field` where the type (`ty`) portion is a path (e.g., `types::Signature`) and returns -/// the final `Ident` in that path. -/// -/// E.g., for `types::Signature` returns `Signature`. -fn final_type_ident(field: &syn::Field) -> &syn::Ident { - match &field.ty { - syn::Type::Path(path) => &path.path.segments.last().unwrap().value().ident, - _ => panic!("ssz_derive only supports Path types."), - } -} - -/// Implements `ssz::TreeHash` for some `struct`, whilst excluding any fields following and -/// including a field that is of type "Signature" or "AggregateSignature". -/// -/// See: -/// https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots -/// -/// This is a rather horrendous macro, it will read the type of the object as a string and decide -/// if it's a signature by matching that string against "Signature" or "AggregateSignature". So, -/// it's important that you use those exact words as your type -- don't alias it to something else. -/// -/// If you can think of a better way to do this, please make an issue! -/// -/// Fields are processed in the order they are defined. -#[proc_macro_derive(SignedRoot)] -pub fn ssz_signed_root_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("ssz_derive only supports structs."), - }; - - let mut field_idents: Vec<&syn::Ident> = vec![]; - - for field in struct_data.fields.iter() { - let final_type_ident = final_type_ident(&field); - - if type_ident_is_signature(final_type_ident) { - break; - } else { - let ident = field - .ident - .as_ref() - .expect("ssz_derive only supports named_struct fields."); - field_idents.push(ident); - } - } - - let output = quote! { - impl ssz::SignedRoot for #name { - fn signed_root(&self) -> Vec { - let mut list: Vec> = Vec::new(); - #( - list.push(self.#field_idents.hash_tree_root()); - )* - - ssz::merkle_hash(&mut list) - } - } - }; - output.into() -} -*/ diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index 016cd5dea..50aedb7e2 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -16,22 +16,23 @@ //! [`slow_lmd_ghost`]: struct.SlowLmdGhost.html //! [`bitwise_lmd_ghost`]: struct.OptimisedLmdGhost.html -extern crate db; -extern crate ssz; -extern crate types; - +/* pub mod bitwise_lmd_ghost; pub mod longest_chain; pub mod optimized_lmd_ghost; +*/ pub mod slow_lmd_ghost; -use db::stores::BeaconBlockAtSlotError; -use db::DBError; +// use db::stores::BeaconBlockAtSlotError; +// use db::DBError; +use db::Error as DBError; use types::{BeaconBlock, ChainSpec, Hash256}; +/* pub use bitwise_lmd_ghost::BitwiseLMDGhost; pub use longest_chain::LongestChain; pub use optimized_lmd_ghost::OptimizedLMDGhost; +*/ pub use slow_lmd_ghost::SlowLMDGhost; /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures @@ -77,10 +78,11 @@ pub enum ForkChoiceError { impl From for ForkChoiceError { fn from(e: DBError) -> ForkChoiceError { - ForkChoiceError::StorageError(e.message) + ForkChoiceError::StorageError(format!("{:?}", e)) } } +/* impl From for ForkChoiceError { fn from(e: BeaconBlockAtSlotError) -> ForkChoiceError { match e { @@ -94,6 +96,7 @@ impl From for ForkChoiceError { } } } +*/ /// Fork choice options that are currently implemented. #[derive(Debug, Clone)] diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index c9aaa70d1..7768c2867 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -1,10 +1,7 @@ extern crate db; use crate::{ForkChoice, ForkChoiceError}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, -}; +use db::{Store, StoreItem}; use log::{debug, trace}; use std::collections::HashMap; use std::marker::PhantomData; @@ -13,32 +10,23 @@ use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot}; //TODO: Pruning and syncing -pub struct SlowLMDGhost { +pub struct SlowLMDGhost { /// The latest attestation targets as a map of validator index to block hash. //TODO: Could this be a fixed size vec latest_attestation_targets: HashMap, /// Stores the children for any given parent. children: HashMap>, - /// Block storage access. - block_store: Arc>, - /// State storage access. - state_store: Arc>, + /// Persistent storage + store: Arc, _phantom: PhantomData, } -impl SlowLMDGhost -where - T: ClientDB + Sized, -{ - pub fn new( - block_store: Arc>, - state_store: Arc>, - ) -> Self { +impl SlowLMDGhost { + pub fn new(store: Arc) -> Self { SlowLMDGhost { latest_attestation_targets: HashMap::new(), children: HashMap::new(), - block_store, - state_store, + store, _phantom: PhantomData, } } @@ -58,8 +46,8 @@ where let mut latest_votes: HashMap = HashMap::new(); // gets the current weighted votes let current_state: BeaconState = self - .state_store - .get_deserialized(&state_root)? + .store + .get(state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = @@ -90,8 +78,8 @@ where ) -> Result { let mut count = 0; let block_slot = self - .block_store - .get_deserialized(&block_root)? + .store + .get::(&block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))? .slot; @@ -108,7 +96,7 @@ where } } -impl ForkChoice for SlowLMDGhost { +impl ForkChoice for SlowLMDGhost { /// Process when a block is added fn add_block( &mut self, From f8c425d6b4bb56af4873a1ece9f8a63124bd5aff Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 12:58:11 +1000 Subject: [PATCH 10/21] Fix and add tests for db get block at slot --- beacon_node/db/Cargo.toml | 3 +- beacon_node/db/src/block_at_slot.rs | 137 ++++++++++++++++++++++++++++ beacon_node/db/src/lib.rs | 2 +- 3 files changed, 139 insertions(+), 3 deletions(-) diff --git a/beacon_node/db/Cargo.toml b/beacon_node/db/Cargo.toml index bb2f659f8..b6cdafe04 100644 --- a/beacon_node/db/Cargo.toml +++ b/beacon_node/db/Cargo.toml @@ -8,10 +8,9 @@ edition = "2018" blake2-rfc = "0.2.18" bls = { path = "../../eth2/utils/bls" } bytes = "0.4.10" -db_encode = { path = "../db_encode" } -db_encode_derive = { path = "../db_encode_derive" } parking_lot = "0.7" rocksdb = "0.10.1" ssz = { path = "../../eth2/utils/ssz" } ssz_derive = { path = "../../eth2/utils/ssz_derive" } +tree_hash = { path = "../../eth2/utils/tree_hash" } types = { path = "../../eth2/types" } diff --git a/beacon_node/db/src/block_at_slot.rs b/beacon_node/db/src/block_at_slot.rs index c18c8998c..4fa9635a2 100644 --- a/beacon_node/db/src/block_at_slot.rs +++ b/beacon_node/db/src/block_at_slot.rs @@ -44,3 +44,140 @@ pub fn get_block_at_preceeding_slot( } } } + +#[cfg(test)] +mod tests { + use super::*; + use ssz::Encode; + use tree_hash::TreeHash; + + #[test] + fn read_slot() { + let spec = FewValidatorsEthSpec::spec(); + + let test_slot = |slot: Slot| { + let mut block = BeaconBlock::empty(&spec); + block.slot = slot; + let bytes = block.as_ssz_bytes(); + assert_eq!(read_slot_from_block_bytes(&bytes).unwrap(), slot); + }; + + test_slot(Slot::new(0)); + test_slot(Slot::new(1)); + test_slot(Slot::new(42)); + test_slot(Slot::new(u64::max_value())); + } + + #[test] + fn bad_slot() { + for i in 0..8 { + assert!(read_slot_from_block_bytes(&vec![0; i]).is_err()); + } + } + + #[test] + fn read_previous_block_root() { + let spec = FewValidatorsEthSpec::spec(); + + let test_root = |root: Hash256| { + let mut block = BeaconBlock::empty(&spec); + block.previous_block_root = root; + let bytes = block.as_ssz_bytes(); + assert_eq!( + read_previous_block_root_from_block_bytes(&bytes).unwrap(), + root + ); + }; + + test_root(Hash256::random()); + test_root(Hash256::random()); + test_root(Hash256::random()); + } + + fn build_chain( + store: &impl Store, + slots: &[usize], + spec: &ChainSpec, + ) -> Vec<(Hash256, BeaconBlock)> { + let mut blocks_and_roots: Vec<(Hash256, BeaconBlock)> = vec![]; + + for (i, slot) in slots.iter().enumerate() { + let mut block = BeaconBlock::empty(spec); + block.slot = Slot::from(*slot); + + if i > 0 { + block.previous_block_root = blocks_and_roots[i - 1].0; + } + + let root = Hash256::from_slice(&block.tree_hash_root()); + + store.put(&root, &block).unwrap(); + blocks_and_roots.push((root, block)); + } + + blocks_and_roots + } + + #[test] + fn chain_without_skips() { + let n: usize = 10; + let store = MemoryDB::open(); + let spec = FewValidatorsEthSpec::spec(); + + let slots: Vec = (0..n).collect(); + let blocks_and_roots = build_chain(&store, &slots, &spec); + + for source in 1..n { + for target in 0..=source { + let (source_root, _source_block) = &blocks_and_roots[source]; + let (target_root, target_block) = &blocks_and_roots[target]; + + let (found_root, found_block) = store + .get_block_at_preceeding_slot(*source_root, target_block.slot) + .unwrap() + .unwrap(); + + assert_eq!(found_root, *target_root); + assert_eq!(found_block, *target_block); + } + } + } + + #[test] + fn chain_with_skips() { + let store = MemoryDB::open(); + let spec = FewValidatorsEthSpec::spec(); + + let slots = vec![0, 1, 2, 5]; + + let blocks_and_roots = build_chain(&store, &slots, &spec); + + // Valid slots + for target in 0..3 { + let (source_root, _source_block) = &blocks_and_roots[3]; + let (target_root, target_block) = &blocks_and_roots[target]; + + let (found_root, found_block) = store + .get_block_at_preceeding_slot(*source_root, target_block.slot) + .unwrap() + .unwrap(); + + assert_eq!(found_root, *target_root); + assert_eq!(found_block, *target_block); + } + + // Slot that doesn't exist + let (source_root, _source_block) = &blocks_and_roots[3]; + assert!(store + .get_block_at_preceeding_slot(*source_root, Slot::new(3)) + .unwrap() + .is_none()); + + // Slot too high + let (source_root, _source_block) = &blocks_and_roots[3]; + assert!(store + .get_block_at_preceeding_slot(*source_root, Slot::new(3)) + .unwrap() + .is_none()); + } +} diff --git a/beacon_node/db/src/lib.rs b/beacon_node/db/src/lib.rs index 8ac28092a..21b5b0a75 100644 --- a/beacon_node/db/src/lib.rs +++ b/beacon_node/db/src/lib.rs @@ -28,8 +28,8 @@ pub trait Store: Sync + Send + Sized { fn get_block_at_preceeding_slot( &self, - slot: Slot, start_block_root: Hash256, + slot: Slot, ) -> Result, Error> { block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) } From 2128d411bc6544202e159edd72f3d78fdb25d33e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 12:58:51 +1000 Subject: [PATCH 11/21] Migrate fork_choice over to new DB --- eth2/fork_choice/src/bitwise_lmd_ghost.rs | 56 ++++++++------------- eth2/fork_choice/src/lib.rs | 4 -- eth2/fork_choice/src/longest_chain.rs | 28 ++++------- eth2/fork_choice/src/optimized_lmd_ghost.rs | 56 ++++++++------------- eth2/fork_choice/src/slow_lmd_ghost.rs | 20 ++++---- 5 files changed, 65 insertions(+), 99 deletions(-) diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index 0bbac6bb6..a76970f01 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -3,10 +3,7 @@ extern crate bit_vec; use crate::{ForkChoice, ForkChoiceError}; use bit_vec::BitVec; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, -}; +use db::Store; use log::{debug, trace}; use std::collections::HashMap; use std::marker::PhantomData; @@ -34,7 +31,7 @@ fn power_of_2_below(x: u64) -> u64 { } /// Stores the necessary data structures to run the optimised bitwise lmd ghost algorithm. -pub struct BitwiseLMDGhost { +pub struct BitwiseLMDGhost { /// A cache of known ancestors at given heights for a specific block. //TODO: Consider FnvHashMap cache: HashMap, Hash256>, @@ -46,30 +43,21 @@ pub struct BitwiseLMDGhost { /// The latest attestation targets as a map of validator index to block hash. //TODO: Could this be a fixed size vec latest_attestation_targets: HashMap, - /// Block storage access. - block_store: Arc>, - /// State storage access. - state_store: Arc>, + /// Block and state storage. + store: Arc, max_known_height: SlotHeight, _phantom: PhantomData, } -impl BitwiseLMDGhost -where - T: ClientDB + Sized, -{ - pub fn new( - block_store: Arc>, - state_store: Arc>, - ) -> Self { +impl BitwiseLMDGhost { + pub fn new(store: Arc) -> Self { BitwiseLMDGhost { cache: HashMap::new(), ancestors: vec![HashMap::new(); 16], latest_attestation_targets: HashMap::new(), children: HashMap::new(), max_known_height: SlotHeight::new(0), - block_store, - state_store, + store, _phantom: PhantomData, } } @@ -89,8 +77,8 @@ where let mut latest_votes: HashMap = HashMap::new(); // gets the current weighted votes let current_state: BeaconState = self - .state_store - .get_deserialized(&state_root)? + .store + .get(&state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = @@ -121,8 +109,8 @@ where // return None if we can't get the block from the db. let block_height = { let block_slot = self - .block_store - .get_deserialized(&block_hash) + .store + .get::(&block_hash) .ok()? .expect("Should have returned already if None") .slot; @@ -243,7 +231,7 @@ where } } -impl ForkChoice for BitwiseLMDGhost { +impl ForkChoice for BitwiseLMDGhost { fn add_block( &mut self, block: &BeaconBlock, @@ -252,8 +240,8 @@ impl ForkChoice for BitwiseLMDGhost { ) -> Result<(), ForkChoiceError> { // get the height of the parent let parent_height = self - .block_store - .get_deserialized(&block.previous_block_root)? + .store + .get::(&block.previous_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))? .slot .height(spec.genesis_slot); @@ -304,16 +292,16 @@ impl ForkChoice for BitwiseLMDGhost { trace!("Old attestation found: {:?}", attestation_target); // get the height of the target block let block_height = self - .block_store - .get_deserialized(&target_block_root)? + .store + .get::(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .slot .height(spec.genesis_slot); // get the height of the past target block let past_block_height = self - .block_store - .get_deserialized(&attestation_target)? + .store + .get::(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .slot .height(spec.genesis_slot); @@ -337,8 +325,8 @@ impl ForkChoice for BitwiseLMDGhost { justified_block_start ); let block = self - .block_store - .get_deserialized(&justified_block_start)? + .store + .get::(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; let block_slot = block.slot; @@ -429,8 +417,8 @@ impl ForkChoice for BitwiseLMDGhost { // didn't find head yet, proceed to next iteration // update block height block_height = self - .block_store - .get_deserialized(¤t_head)? + .store + .get::(¤t_head)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? .slot .height(spec.genesis_slot); diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index 50aedb7e2..ed3a1ce08 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -16,11 +16,9 @@ //! [`slow_lmd_ghost`]: struct.SlowLmdGhost.html //! [`bitwise_lmd_ghost`]: struct.OptimisedLmdGhost.html -/* pub mod bitwise_lmd_ghost; pub mod longest_chain; pub mod optimized_lmd_ghost; -*/ pub mod slow_lmd_ghost; // use db::stores::BeaconBlockAtSlotError; @@ -28,11 +26,9 @@ pub mod slow_lmd_ghost; use db::Error as DBError; use types::{BeaconBlock, ChainSpec, Hash256}; -/* pub use bitwise_lmd_ghost::BitwiseLMDGhost; pub use longest_chain::LongestChain; pub use optimized_lmd_ghost::OptimizedLMDGhost; -*/ pub use slow_lmd_ghost::SlowLMDGhost; /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures diff --git a/eth2/fork_choice/src/longest_chain.rs b/eth2/fork_choice/src/longest_chain.rs index 423edc567..6aaf56c32 100644 --- a/eth2/fork_choice/src/longest_chain.rs +++ b/eth2/fork_choice/src/longest_chain.rs @@ -1,31 +1,25 @@ use crate::{ForkChoice, ForkChoiceError}; -use db::{stores::BeaconBlockStore, ClientDB}; +use db::Store; use std::sync::Arc; use types::{BeaconBlock, ChainSpec, Hash256, Slot}; -pub struct LongestChain -where - T: ClientDB + Sized, -{ +pub struct LongestChain { /// List of head block hashes head_block_hashes: Vec, - /// Block storage access. - block_store: Arc>, + /// Block storage. + store: Arc, } -impl LongestChain -where - T: ClientDB + Sized, -{ - pub fn new(block_store: Arc>) -> Self { +impl LongestChain { + pub fn new(store: Arc) -> Self { LongestChain { head_block_hashes: Vec::new(), - block_store, + store, } } } -impl ForkChoice for LongestChain { +impl ForkChoice for LongestChain { fn add_block( &mut self, block: &BeaconBlock, @@ -55,9 +49,9 @@ impl ForkChoice for LongestChain { * Load all the head_block hashes from the DB as SszBeaconBlocks. */ for (index, block_hash) in self.head_block_hashes.iter().enumerate() { - let block = self - .block_store - .get_deserialized(&block_hash)? + let block: BeaconBlock = self + .store + .get(&block_hash)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_hash))?; head_blocks.push((index, block)); } diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index 3f585e3c1..aa7d65c85 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -2,10 +2,7 @@ extern crate bit_vec; use crate::{ForkChoice, ForkChoiceError}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, -}; +use db::Store; use log::{debug, trace}; use std::cmp::Ordering; use std::collections::HashMap; @@ -34,7 +31,7 @@ fn power_of_2_below(x: u64) -> u64 { } /// Stores the necessary data structures to run the optimised lmd ghost algorithm. -pub struct OptimizedLMDGhost { +pub struct OptimizedLMDGhost { /// A cache of known ancestors at given heights for a specific block. //TODO: Consider FnvHashMap cache: HashMap, Hash256>, @@ -46,30 +43,21 @@ pub struct OptimizedLMDGhost { /// The latest attestation targets as a map of validator index to block hash. //TODO: Could this be a fixed size vec latest_attestation_targets: HashMap, - /// Block storage access. - block_store: Arc>, - /// State storage access. - state_store: Arc>, + /// Block and state storage. + store: Arc, max_known_height: SlotHeight, _phantom: PhantomData, } -impl OptimizedLMDGhost -where - T: ClientDB + Sized, -{ - pub fn new( - block_store: Arc>, - state_store: Arc>, - ) -> Self { +impl OptimizedLMDGhost { + pub fn new(store: Arc) -> Self { OptimizedLMDGhost { cache: HashMap::new(), ancestors: vec![HashMap::new(); 16], latest_attestation_targets: HashMap::new(), children: HashMap::new(), max_known_height: SlotHeight::new(0), - block_store, - state_store, + store, _phantom: PhantomData, } } @@ -89,8 +77,8 @@ where let mut latest_votes: HashMap = HashMap::new(); // gets the current weighted votes let current_state: BeaconState = self - .state_store - .get_deserialized(&state_root)? + .store + .get(&state_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?; let active_validator_indices = @@ -121,8 +109,8 @@ where // return None if we can't get the block from the db. let block_height = { let block_slot = self - .block_store - .get_deserialized(&block_hash) + .store + .get::(&block_hash) .ok()? .expect("Should have returned already if None") .slot; @@ -214,7 +202,7 @@ where } } -impl ForkChoice for OptimizedLMDGhost { +impl ForkChoice for OptimizedLMDGhost { fn add_block( &mut self, block: &BeaconBlock, @@ -223,8 +211,8 @@ impl ForkChoice for OptimizedLMDGhost { ) -> Result<(), ForkChoiceError> { // get the height of the parent let parent_height = self - .block_store - .get_deserialized(&block.previous_block_root)? + .store + .get::(&block.previous_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))? .slot .height(spec.genesis_slot); @@ -275,16 +263,16 @@ impl ForkChoice for OptimizedLMDGhost { trace!("Old attestation found: {:?}", attestation_target); // get the height of the target block let block_height = self - .block_store - .get_deserialized(&target_block_root)? + .store + .get::(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .slot .height(spec.genesis_slot); // get the height of the past target block let past_block_height = self - .block_store - .get_deserialized(&attestation_target)? + .store + .get::(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .slot .height(spec.genesis_slot); @@ -308,8 +296,8 @@ impl ForkChoice for OptimizedLMDGhost { justified_block_start ); let block = self - .block_store - .get_deserialized(&justified_block_start)? + .store + .get::(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; let block_slot = block.slot; @@ -400,8 +388,8 @@ impl ForkChoice for OptimizedLMDGhost { // didn't find head yet, proceed to next iteration // update block height block_height = self - .block_store - .get_deserialized(¤t_head)? + .store + .get::(¤t_head)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? .slot .height(spec.genesis_slot); diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index 7768c2867..a41eacbb2 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -1,7 +1,7 @@ extern crate db; use crate::{ForkChoice, ForkChoiceError}; -use db::{Store, StoreItem}; +use db::Store; use log::{debug, trace}; use std::collections::HashMap; use std::marker::PhantomData; @@ -16,7 +16,7 @@ pub struct SlowLMDGhost { latest_attestation_targets: HashMap, /// Stores the children for any given parent. children: HashMap>, - /// Persistent storage + /// Block and state storage. store: Arc, _phantom: PhantomData, } @@ -85,8 +85,8 @@ impl SlowLMDGhost { for (vote_hash, votes) in latest_votes.iter() { let (root_at_slot, _) = self - .block_store - .block_at_slot(&vote_hash, block_slot)? + .store + .get_block_at_preceeding_slot(*vote_hash, block_slot)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))?; if root_at_slot == *block_root { count += votes; @@ -138,16 +138,16 @@ impl ForkChoice for SlowLMDGhost { trace!("Old attestation found: {:?}", attestation_target); // get the height of the target block let block_height = self - .block_store - .get_deserialized(&target_block_root)? + .store + .get::(&target_block_root)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .slot .height(spec.genesis_slot); // get the height of the past target block let past_block_height = self - .block_store - .get_deserialized(&attestation_target)? + .store + .get::(&attestation_target)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .slot .height(spec.genesis_slot); @@ -168,8 +168,8 @@ impl ForkChoice for SlowLMDGhost { ) -> Result { debug!("Running LMD Ghost Fork-choice rule"); let start = self - .block_store - .get_deserialized(&justified_block_start)? + .store + .get::(&justified_block_start)? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; let start_state_root = start.state_root; From f1584dada470213d2477832ffc2c400d0e1694b5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 13:36:14 +1000 Subject: [PATCH 12/21] Update BeaconChain struct to use new store --- beacon_node/beacon_chain/src/beacon_chain.rs | 22 +++++++------------ .../testing_beacon_chain_builder.rs | 14 ++++-------- 2 files changed, 12 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index db5ea1cdb..57b697019 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,9 +1,6 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - ClientDB, DBError, -}; +use db::{Error as DBError, Store}; use fork_choice::{ForkChoice, ForkChoiceError}; use log::{debug, trace}; use operation_pool::DepositInsertStatus; @@ -83,9 +80,8 @@ impl BlockProcessingOutcome { } } -pub struct BeaconChain { - pub block_store: Arc>, - pub state_store: Arc>, +pub struct BeaconChain { + pub store: Arc, pub slot_clock: U, pub op_pool: OperationPool, canonical_head: RwLock>, @@ -97,15 +93,14 @@ pub struct BeaconChain BeaconChain where - T: ClientDB, + T: Store, U: SlotClock, F: ForkChoice, E: EthSpec, { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( - state_store: Arc>, - block_store: Arc>, + store: Arc, slot_clock: U, mut genesis_state: BeaconState, genesis_block: BeaconBlock, @@ -113,10 +108,10 @@ where fork_choice: F, ) -> Result { let state_root = genesis_state.canonical_root(); - state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; + store.put(&state_root, &genesis_state)?; let block_root = genesis_block.block_header().canonical_root(); - block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; + store.put(&block_root, &genesis_block)?; let finalized_head = RwLock::new(CheckPoint::new( genesis_block.clone(), @@ -134,8 +129,7 @@ where genesis_state.build_all_caches(&spec)?; Ok(Self { - block_store, - state_store, + store, slot_clock, op_pool: OperationPool::new(), state: RwLock::new(genesis_state), diff --git a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs index f7ff3cdae..ce3588674 100644 --- a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs @@ -1,8 +1,5 @@ pub use crate::{BeaconChain, BeaconChainError, CheckPoint}; -use db::{ - stores::{BeaconBlockStore, BeaconStateStore}, - MemoryDB, -}; +use db::MemoryDB; use fork_choice::BitwiseLMDGhost; use slot_clock::TestingSlotClock; use std::sync::Arc; @@ -19,11 +16,9 @@ pub struct TestingBeaconChainBuilder { impl TestingBeaconChainBuilder { pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain { - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); + let store = Arc::new(MemoryDB::open()); let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + let fork_choice = BitwiseLMDGhost::new(store.clone()); let (genesis_state, _keypairs) = self.state_builder.build(); @@ -32,8 +27,7 @@ impl TestingBeaconChainBuilder { // Create the Beacon Chain BeaconChain::from_genesis( - state_store.clone(), - block_store.clone(), + store, slot_clock, genesis_state, genesis_block, From 7d067926dd20beb1f2df314d35e33274fbfc1e97 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 16:29:34 +1000 Subject: [PATCH 13/21] Replace RocksDB with LevelDB --- beacon_node/db/Cargo.toml | 6 +- beacon_node/db/src/leveldb_store.rs | 100 ++++++++++++++++++++++++++++ beacon_node/db/src/lib.rs | 20 ++++++ 3 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 beacon_node/db/src/leveldb_store.rs diff --git a/beacon_node/db/Cargo.toml b/beacon_node/db/Cargo.toml index b6cdafe04..808d420a5 100644 --- a/beacon_node/db/Cargo.toml +++ b/beacon_node/db/Cargo.toml @@ -4,12 +4,16 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" +[dev-dependencies] +tempfile = "3" + [dependencies] blake2-rfc = "0.2.18" bls = { path = "../../eth2/utils/bls" } bytes = "0.4.10" +db-key = "0.0.5" +leveldb = "0.8.4" parking_lot = "0.7" -rocksdb = "0.10.1" ssz = { path = "../../eth2/utils/ssz" } ssz_derive = { path = "../../eth2/utils/ssz_derive" } tree_hash = { path = "../../eth2/utils/tree_hash" } diff --git a/beacon_node/db/src/leveldb_store.rs b/beacon_node/db/src/leveldb_store.rs new file mode 100644 index 000000000..c60e34283 --- /dev/null +++ b/beacon_node/db/src/leveldb_store.rs @@ -0,0 +1,100 @@ +use super::*; +use db_key::Key; +use leveldb::database::kv::KV; +use leveldb::database::Database; +use leveldb::error::Error as LevelDBError; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use parking_lot::RwLock; +use std::path::Path; + +pub struct LevelDB { + db: RwLock>, +} + +impl LevelDB { + pub fn open(path: &Path) -> Result { + let mut options = Options::new(); + + options.create_if_missing = true; + + let db = Database::open(path, options)?; + + Ok(Self { + db: RwLock::new(db), + }) + } + + fn read_options(&self) -> ReadOptions { + ReadOptions::new() + } + + fn write_options(&self) -> WriteOptions { + WriteOptions::new() + } + + fn get_key_for_col(col: &str, key: &[u8]) -> BytesKey { + let mut col = col.as_bytes().to_vec(); + col.append(&mut key.to_vec()); + BytesKey { key: col } + } +} + +pub struct BytesKey { + key: Vec, +} + +impl Key for BytesKey { + fn from_u8(key: &[u8]) -> Self { + Self { key: key.to_vec() } + } + + fn as_slice T>(&self, f: F) -> T { + f(self.key.as_slice()) + } +} + +impl Store for LevelDB { + fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { + let column_key = Self::get_key_for_col(col, key); + + self.db + .read() + .get(self.read_options(), column_key) + .map_err(Into::into) + } + + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { + let column_key = Self::get_key_for_col(col, key); + + self.db + .write() + .put(self.write_options(), column_key, val) + .map_err(Into::into) + } + + fn key_exists(&self, col: &str, key: &[u8]) -> Result { + let column_key = Self::get_key_for_col(col, key); + + self.db + .read() + .get(self.read_options(), column_key) + .map_err(Into::into) + .and_then(|val| Ok(val.is_some())) + } + + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { + let column_key = Self::get_key_for_col(col, key); + self.db + .write() + .delete(self.write_options(), column_key) + .map_err(Into::into) + } +} + +impl From for Error { + fn from(e: LevelDBError) -> Error { + Error::DBError { + message: format!("{:?}", e), + } + } +} diff --git a/beacon_node/db/src/lib.rs b/beacon_node/db/src/lib.rs index 21b5b0a75..d31dc06b0 100644 --- a/beacon_node/db/src/lib.rs +++ b/beacon_node/db/src/lib.rs @@ -2,8 +2,10 @@ mod block_at_slot; mod errors; mod impls; +mod leveldb_store; mod memory_db; +pub use self::leveldb_store::LevelDB; pub use self::memory_db::MemoryDB; pub use errors::Error; pub use types::*; @@ -106,6 +108,7 @@ mod tests { use super::*; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; + use tempfile::tempdir; #[derive(PartialEq, Debug, Encode, Decode)] struct StorableThing { @@ -127,6 +130,23 @@ mod tests { } } + #[test] + fn leveldb_can_store_and_retrieve() { + let dir = tempdir().unwrap(); + let path = dir.path(); + + let store = LevelDB::open(&path).unwrap(); + + let key = Hash256::random(); + let item = StorableThing { a: 1, b: 42 }; + + store.put(&key, &item).unwrap(); + + let retrieved = store.get(&key).unwrap().unwrap(); + + assert_eq!(item, retrieved); + } + #[test] fn memorydb_can_store_and_retrieve() { let store = MemoryDB::open(); From b3a94de086ae3b89d172779b8ab7aa5f7822e3d5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 16:36:06 +1000 Subject: [PATCH 14/21] Remove unnecessary RwLock from LevelDB --- beacon_node/db/src/leveldb_store.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/beacon_node/db/src/leveldb_store.rs b/beacon_node/db/src/leveldb_store.rs index c60e34283..a06585b11 100644 --- a/beacon_node/db/src/leveldb_store.rs +++ b/beacon_node/db/src/leveldb_store.rs @@ -4,11 +4,10 @@ use leveldb::database::kv::KV; use leveldb::database::Database; use leveldb::error::Error as LevelDBError; use leveldb::options::{Options, ReadOptions, WriteOptions}; -use parking_lot::RwLock; use std::path::Path; pub struct LevelDB { - db: RwLock>, + db: Database, } impl LevelDB { @@ -19,9 +18,7 @@ impl LevelDB { let db = Database::open(path, options)?; - Ok(Self { - db: RwLock::new(db), - }) + Ok(Self { db }) } fn read_options(&self) -> ReadOptions { @@ -58,7 +55,6 @@ impl Store for LevelDB { let column_key = Self::get_key_for_col(col, key); self.db - .read() .get(self.read_options(), column_key) .map_err(Into::into) } @@ -67,7 +63,6 @@ impl Store for LevelDB { let column_key = Self::get_key_for_col(col, key); self.db - .write() .put(self.write_options(), column_key, val) .map_err(Into::into) } @@ -76,7 +71,6 @@ impl Store for LevelDB { let column_key = Self::get_key_for_col(col, key); self.db - .read() .get(self.read_options(), column_key) .map_err(Into::into) .and_then(|val| Ok(val.is_some())) @@ -85,7 +79,6 @@ impl Store for LevelDB { fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db - .write() .delete(self.write_options(), column_key) .map_err(Into::into) } From 54f28df5b18bad81b471ace287f168fa9a8860ef Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 16:37:15 +1000 Subject: [PATCH 15/21] Improve testing for `Store` impls --- beacon_node/db/src/lib.rs | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/beacon_node/db/src/lib.rs b/beacon_node/db/src/lib.rs index d31dc06b0..c0b447c5f 100644 --- a/beacon_node/db/src/lib.rs +++ b/beacon_node/db/src/lib.rs @@ -130,35 +130,40 @@ mod tests { } } - #[test] - fn leveldb_can_store_and_retrieve() { - let dir = tempdir().unwrap(); - let path = dir.path(); - - let store = LevelDB::open(&path).unwrap(); - + fn test_impl(store: impl Store) { let key = Hash256::random(); let item = StorableThing { a: 1, b: 42 }; + assert_eq!(store.exists::(&key), Ok(false)); + store.put(&key, &item).unwrap(); - let retrieved = store.get(&key).unwrap().unwrap(); + assert_eq!(store.exists::(&key), Ok(true)); + let retrieved = store.get(&key).unwrap().unwrap(); assert_eq!(item, retrieved); + + store.delete::(&key).unwrap(); + + assert_eq!(store.exists::(&key), Ok(false)); + + assert_eq!(store.get::(&key), Ok(None)); } #[test] - fn memorydb_can_store_and_retrieve() { + fn leveldb() { + let dir = tempdir().unwrap(); + let path = dir.path(); + let store = LevelDB::open(&path).unwrap(); + + test_impl(store); + } + + #[test] + fn memorydb() { let store = MemoryDB::open(); - let key = Hash256::random(); - let item = StorableThing { a: 1, b: 42 }; - - store.put(&key, &item).unwrap(); - - let retrieved = store.get(&key).unwrap().unwrap(); - - assert_eq!(item, retrieved); + test_impl(store); } #[test] From 78368cc2cdedf3e467ba664fe05993f22f8c33d2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 16:49:56 +1000 Subject: [PATCH 16/21] Make LevelDB key type concrete (not generic) --- beacon_node/db/src/leveldb_store.rs | 10 +++++----- beacon_node/db/src/lib.rs | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/beacon_node/db/src/leveldb_store.rs b/beacon_node/db/src/leveldb_store.rs index a06585b11..10643d0cd 100644 --- a/beacon_node/db/src/leveldb_store.rs +++ b/beacon_node/db/src/leveldb_store.rs @@ -6,11 +6,11 @@ use leveldb::error::Error as LevelDBError; use leveldb::options::{Options, ReadOptions, WriteOptions}; use std::path::Path; -pub struct LevelDB { - db: Database, +pub struct LevelDB { + db: Database, } -impl LevelDB { +impl LevelDB { pub fn open(path: &Path) -> Result { let mut options = Options::new(); @@ -21,7 +21,7 @@ impl LevelDB { Ok(Self { db }) } - fn read_options(&self) -> ReadOptions { + fn read_options(&self) -> ReadOptions { ReadOptions::new() } @@ -50,7 +50,7 @@ impl Key for BytesKey { } } -impl Store for LevelDB { +impl Store for LevelDB { fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { let column_key = Self::get_key_for_col(col, key); diff --git a/beacon_node/db/src/lib.rs b/beacon_node/db/src/lib.rs index c0b447c5f..708ac698f 100644 --- a/beacon_node/db/src/lib.rs +++ b/beacon_node/db/src/lib.rs @@ -5,7 +5,7 @@ mod impls; mod leveldb_store; mod memory_db; -pub use self::leveldb_store::LevelDB; +pub use self::leveldb_store::LevelDB as DiskDB; pub use self::memory_db::MemoryDB; pub use errors::Error; pub use types::*; @@ -151,10 +151,10 @@ mod tests { } #[test] - fn leveldb() { + fn diskdb() { let dir = tempdir().unwrap(); let path = dir.path(); - let store = LevelDB::open(&path).unwrap(); + let store = DiskDB::open(&path).unwrap(); test_impl(store); } From 058829b64d9c3d68d2ab12a7f7646b679720d467 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 17:27:06 +1000 Subject: [PATCH 17/21] Update `beacon_chain` to latest DB --- beacon_node/beacon_chain/src/beacon_chain.rs | 48 +++++++++----------- beacon_node/beacon_chain/src/errors.rs | 2 +- beacon_node/beacon_chain/src/initialise.rs | 48 +++++++++----------- 3 files changed, 44 insertions(+), 54 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 57b697019..7c52fed5d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -7,7 +7,6 @@ use operation_pool::DepositInsertStatus; use operation_pool::OperationPool; use parking_lot::{RwLock, RwLockReadGuard}; use slot_clock::SlotClock; -use ssz::ssz_encode; use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, ExitValidationError, ProposerSlashingValidationError, TransferValidationError, @@ -229,7 +228,7 @@ where let new_state_root = state.get_state_root(earliest_historic_slot)?; // Break if the DB is unable to load the state. - state = match self.state_store.get_deserialized(&new_state_root) { + state = match self.store.get(&new_state_root) { Ok(Some(state)) => state, _ => break, } @@ -256,7 +255,7 @@ where /// /// May return a database error. pub fn get_block(&self, block_root: &Hash256) -> Result, Error> { - Ok(self.block_store.get_deserialized(block_root)?) + Ok(self.store.get(block_root)?) } /// Update the canonical head to some new values. @@ -582,7 +581,7 @@ where // Load the blocks parent block from the database, returning invalid if that block is not // found. let parent_block_root = block.previous_block_root; - let parent_block = match self.block_store.get_deserialized(&parent_block_root)? { + let parent_block: BeaconBlock = match self.store.get(&parent_block_root)? { Some(previous_block_root) => previous_block_root, None => { return Ok(BlockProcessingOutcome::InvalidBlock( @@ -595,15 +594,15 @@ where // It is an error because if know the parent block we should also know the parent state. let parent_state_root = parent_block.state_root; let parent_state = self - .state_store - .get_deserialized(&parent_state_root)? + .store + .get(&parent_state_root)? .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?; // TODO: check the block proposer signature BEFORE doing a state transition. This will // significantly lower exposure surface to DoS attacks. // Transition the parent state to the block slot. - let mut state = parent_state; + let mut state: BeaconState = parent_state; for _ in state.slot.as_u64()..block.slot.as_u64() { if let Err(e) = per_slot_processing(&mut state, &self.spec) { return Ok(BlockProcessingOutcome::InvalidBlock( @@ -629,8 +628,8 @@ where } // Store the block and state. - self.block_store.put(&block_root, &ssz_encode(&block)[..])?; - self.state_store.put(&state_root, &ssz_encode(&state)[..])?; + self.store.put(&block_root, &block)?; + self.store.put(&state_root, &state)?; // run the fork_choice add_block logic self.fork_choice @@ -723,15 +722,15 @@ where .find_head(&present_head, &self.spec)?; if new_head != present_head { - let block = self - .block_store - .get_deserialized(&new_head)? + let block: BeaconBlock = self + .store + .get(&new_head)? .ok_or_else(|| Error::MissingBeaconBlock(new_head))?; let block_root = block.canonical_root(); - let state = self - .state_store - .get_deserialized(&block.state_root)? + let state: BeaconState = self + .store + .get(&block.state_root)? .ok_or_else(|| Error::MissingBeaconState(block.state_root))?; let state_root = state.canonical_root(); @@ -746,7 +745,7 @@ where /// Returns `true` if the given block root has not been processed. pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { - Ok(!self.block_store.exists(beacon_block_root)?) + Ok(!self.store.exists::(beacon_block_root)?) } /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. @@ -772,19 +771,14 @@ where break; // Genesis has been reached. } - let beacon_block = self - .block_store - .get_deserialized(&beacon_block_root)? - .ok_or_else(|| { + let beacon_block: BeaconBlock = + self.store.get(&beacon_block_root)?.ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; let beacon_state_root = beacon_block.state_root; - let beacon_state = self - .state_store - .get_deserialized(&beacon_state_root)? - .ok_or_else(|| { - Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) - })?; + let beacon_state = self.store.get(&beacon_state_root)?.ok_or_else(|| { + Error::DBInconsistent(format!("Missing state {}", beacon_state_root)) + })?; let slot = CheckPoint { beacon_block, @@ -805,7 +799,7 @@ where impl From for Error { fn from(e: DBError) -> Error { - Error::DBError(e.message) + Error::DBError(e) } } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index a84e4b10e..dea7f63d9 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -20,7 +20,7 @@ pub enum BeaconChainError { UnableToReadSlot, BeaconStateError(BeaconStateError), DBInconsistent(String), - DBError(String), + DBError(db::Error), ForkChoiceError(ForkChoiceError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs index 6bc9c2599..197f7ace0 100644 --- a/beacon_node/beacon_chain/src/initialise.rs +++ b/beacon_node/beacon_chain/src/initialise.rs @@ -3,7 +3,6 @@ // testnet. These are examples. Also. there is code duplication which can/should be cleaned up. use crate::BeaconChain; -use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::{DiskDB, MemoryDB}; use fork_choice::BitwiseLMDGhost; use slot_clock::SystemTimeSlotClock; @@ -26,14 +25,9 @@ pub fn initialise_beacon_chain( FoundationEthSpec, >, > { - // set up the db - let db = Arc::new(DiskDB::open( - db_name.expect("Database directory must be included"), - None, - )); - - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); + let path = db_name.expect("db_name cannot be None."); + let store = DiskDB::open(path).expect("Unable to open DB."); + let store = Arc::new(store); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, &spec); let (genesis_state, _keypairs) = state_builder.build(); @@ -49,14 +43,13 @@ pub fn initialise_beacon_chain( ) .expect("Unable to load SystemTimeSlotClock"); // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + let fork_choice = BitwiseLMDGhost::new(store.clone()); // Genesis chain //TODO: Handle error correctly Arc::new( BeaconChain::from_genesis( - state_store.clone(), - block_store.clone(), + store, slot_clock, genesis_state, genesis_block, @@ -79,9 +72,7 @@ pub fn initialise_test_beacon_chain_with_memory_db( FewValidatorsEthSpec, >, > { - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); + let store = Arc::new(MemoryDB::open()); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, spec); let (genesis_state, _keypairs) = state_builder.build(); @@ -97,14 +88,13 @@ pub fn initialise_test_beacon_chain_with_memory_db( ) .expect("Unable to load SystemTimeSlotClock"); // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + let fork_choice = BitwiseLMDGhost::new(store.clone()); // Genesis chain //TODO: Handle error correctly Arc::new( BeaconChain::from_genesis( - state_store.clone(), - block_store.clone(), + store, slot_clock, genesis_state, genesis_block, @@ -119,16 +109,23 @@ pub fn initialise_test_beacon_chain_with_memory_db( pub fn initialise_test_beacon_chain_with_disk_db( spec: &ChainSpec, db_name: Option<&PathBuf>, -) -> Arc>> { - let db = Arc::new(DiskDB::open(db_name.expect("Must have DB path"), None)); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); +) -> Arc< + BeaconChain< + DiskDB, + SystemTimeSlotClock, + BitwiseLMDGhost, + FewValidatorsEthSpec, + >, +> { + let path = db_name.expect("db_name cannot be None."); + let store = DiskDB::open(path).expect("Unable to open DB."); + let store = Arc::new(store); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, spec); let (genesis_state, _keypairs) = state_builder.build(); let mut genesis_block = BeaconBlock::empty(spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); // Slot clock let slot_clock = SystemTimeSlotClock::new( @@ -138,14 +135,13 @@ pub fn initialise_test_beacon_chain_with_disk_db( ) .expect("Unable to load SystemTimeSlotClock"); // Choose the fork choice - let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + let fork_choice = BitwiseLMDGhost::new(store.clone()); // Genesis chain //TODO: Handle error correctly Arc::new( BeaconChain::from_genesis( - state_store.clone(), - block_store.clone(), + store, slot_clock, genesis_state, genesis_block, From b62f4477e1ef69bdb1607c5ad00e0f211420ce13 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 17:45:35 +1000 Subject: [PATCH 18/21] More project-wide fixes for new DB --- beacon_node/client/src/client_config.rs | 9 ++++++-- beacon_node/client/src/client_types.rs | 25 ++++++++++++++++----- beacon_node/client/src/lib.rs | 6 ++--- beacon_node/network/src/beacon_chain.rs | 4 ++-- beacon_node/rpc/src/beacon_chain.rs | 4 ++-- beacon_node/src/main.rs | 2 +- beacon_node/src/run.rs | 5 ++--- eth2/fork_choice/tests/tests.rs | 29 ++++++++++--------------- 8 files changed, 48 insertions(+), 36 deletions(-) diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 942a907e0..4309b8a64 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -1,5 +1,4 @@ use clap::ArgMatches; -use db::DBType; use fork_choice::ForkChoiceAlgorithm; use network::NetworkConfig; use slog::error; @@ -12,6 +11,12 @@ use types::multiaddr::ToMultiaddr; use types::Multiaddr; use types::{ChainSpec, EthSpec, LighthouseTestnetEthSpec}; +#[derive(Debug, Clone)] +pub enum DBType { + Memory, + Disk, +} + /// Stores the client configuration for this Lighthouse instance. #[derive(Debug, Clone)] pub struct ClientConfig { @@ -132,7 +137,7 @@ impl ClientConfig { } match args.value_of("db") { - Some("rocks") => config.db_type = DBType::RocksDB, + Some("disk") => config.db_type = DBType::Disk, Some("memory") => config.db_type = DBType::Memory, _ => unreachable!(), // clap prevents this. }; diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs index fdca11caa..c54028d28 100644 --- a/beacon_node/client/src/client_types.rs +++ b/beacon_node/client/src/client_types.rs @@ -1,6 +1,6 @@ use crate::{ArcBeaconChain, ClientConfig}; use beacon_chain::{ - db::{ClientDB, DiskDB, MemoryDB}, + db::{DiskDB, MemoryDB, Store}, fork_choice::BitwiseLMDGhost, initialise, slot_clock::{SlotClock, SystemTimeSlotClock}, @@ -9,7 +9,7 @@ use fork_choice::ForkChoice; use types::{EthSpec, FewValidatorsEthSpec, FoundationEthSpec}; pub trait ClientTypes { - type DB: ClientDB + 'static; + type DB: Store + 'static; type SlotClock: SlotClock + 'static; type ForkChoice: ForkChoice + 'static; type EthSpec: EthSpec + 'static; @@ -24,7 +24,7 @@ pub struct StandardClientType; impl ClientTypes for StandardClientType { type DB = DiskDB; type SlotClock = SystemTimeSlotClock; - type ForkChoice = BitwiseLMDGhost; + type ForkChoice = BitwiseLMDGhost; type EthSpec = FoundationEthSpec; fn initialise_beacon_chain( @@ -39,12 +39,27 @@ pub struct MemoryDBTestingClientType; impl ClientTypes for MemoryDBTestingClientType { type DB = MemoryDB; type SlotClock = SystemTimeSlotClock; - type ForkChoice = BitwiseLMDGhost; + type ForkChoice = BitwiseLMDGhost; type EthSpec = FewValidatorsEthSpec; fn initialise_beacon_chain( config: &ClientConfig, ) -> ArcBeaconChain { - initialise::initialise_test_beacon_chain(&config.spec, None) + initialise::initialise_test_beacon_chain_with_memory_db(&config.spec, None) + } +} + +pub struct DiskDBTestingClientType; + +impl ClientTypes for DiskDBTestingClientType { + type DB = DiskDB; + type SlotClock = SystemTimeSlotClock; + type ForkChoice = BitwiseLMDGhost; + type EthSpec = FewValidatorsEthSpec; + + fn initialise_beacon_chain( + config: &ClientConfig, + ) -> ArcBeaconChain { + initialise::initialise_test_beacon_chain_with_disk_db(&config.spec, Some(&config.db_name)) } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 5d7c221ef..00478b475 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -6,9 +6,9 @@ pub mod error; pub mod notifier; use beacon_chain::BeaconChain; -pub use client_config::ClientConfig; +pub use client_config::{ClientConfig, DBType}; pub use client_types::ClientTypes; -use db::ClientDB; +use db::Store; use exit_future::Signal; use fork_choice::ForkChoice; use futures::{future::Future, Stream}; @@ -146,7 +146,7 @@ impl Client { fn do_state_catchup(chain: &Arc>, log: &slog::Logger) where - T: ClientDB, + T: Store, U: SlotClock, F: ForkChoice, E: EthSpec, diff --git a/beacon_node/network/src/beacon_chain.rs b/beacon_node/network/src/beacon_chain.rs index a98aa73de..f123e4540 100644 --- a/beacon_node/network/src/beacon_chain.rs +++ b/beacon_node/network/src/beacon_chain.rs @@ -1,6 +1,6 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ - db::ClientDB, + db::Store, fork_choice::ForkChoice, parking_lot::RwLockReadGuard, slot_clock::SlotClock, @@ -66,7 +66,7 @@ pub trait BeaconChain: Send + Sync { impl BeaconChain for RawBeaconChain where - T: ClientDB + Sized, + T: Store, U: SlotClock, F: ForkChoice, E: EthSpec, diff --git a/beacon_node/rpc/src/beacon_chain.rs b/beacon_node/rpc/src/beacon_chain.rs index 7e75b32ce..9b6b05e9d 100644 --- a/beacon_node/rpc/src/beacon_chain.rs +++ b/beacon_node/rpc/src/beacon_chain.rs @@ -1,6 +1,6 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ - db::ClientDB, + db::Store, fork_choice::ForkChoice, parking_lot::{RwLockReadGuard, RwLockWriteGuard}, slot_clock::SlotClock, @@ -36,7 +36,7 @@ pub trait BeaconChain: Send + Sync { impl BeaconChain for RawBeaconChain where - T: ClientDB + Sized, + T: Store, U: SlotClock, F: ForkChoice, E: EthSpec, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index a1df0b63b..ef2121882 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -74,7 +74,7 @@ fn main() { .value_name("DB") .help("Type of database to use.") .takes_value(true) - .possible_values(&["rocks", "memory"]) + .possible_values(&["disk", "memory"]) .default_value("memory"), ) .get_matches(); diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 52dc3973b..ec421fc6e 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,7 +1,6 @@ use client::client_types::{DiskDBTestingClientType, MemoryDBTestingClientType}; -use client::error; +use client::{error, DBType}; use client::{notifier, Client, ClientConfig, ClientTypes}; -use db::DBType; use futures::sync::oneshot; use futures::Future; use slog::info; @@ -26,7 +25,7 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul let executor = runtime.executor(); match config.db_type { - DBType::RocksDB => { + DBType::Disk => { info!( log, "BeaconNode starting"; diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index 067d39da4..b4f4ede2c 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -14,8 +14,8 @@ extern crate yaml_rust; pub use beacon_chain::BeaconChain; use bls::Signature; -use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::MemoryDB; +use db::Store; // use env_logger::{Builder, Env}; use fork_choice::{ BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost, @@ -106,7 +106,7 @@ fn test_yaml_vectors( // process the tests for test_case in test_cases { // setup a fresh test - let (mut fork_choice, block_store, state_root) = + let (mut fork_choice, store, state_root) = setup_inital_state(&fork_choice_algo, emulated_validators); // keep a hashmap of block_id's to block_hashes (random hashes to abstract block_id) @@ -149,9 +149,7 @@ fn test_yaml_vectors( }; // Store the block. - block_store - .put(&block_hash, &ssz_encode(&beacon_block)[..]) - .unwrap(); + store.put(&block_hash, &beacon_block).unwrap(); // run add block for fork choice if not genesis if parent_id != block_id { @@ -222,29 +220,26 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec { fn setup_inital_state( fork_choice_algo: &ForkChoiceAlgorithm, num_validators: usize, -) -> (Box, Arc>, Hash256) { - let db = Arc::new(MemoryDB::open()); - let block_store = Arc::new(BeaconBlockStore::new(db.clone())); - let state_store = Arc::new(BeaconStateStore::new(db.clone())); +) -> (Box, Arc, Hash256) { + let store = Arc::new(MemoryDB::open()); // the fork choice instantiation let fork_choice: Box = match fork_choice_algo { ForkChoiceAlgorithm::OptimizedLMDGhost => { let f: OptimizedLMDGhost = - OptimizedLMDGhost::new(block_store.clone(), state_store.clone()); + OptimizedLMDGhost::new(store.clone()); Box::new(f) } ForkChoiceAlgorithm::BitwiseLMDGhost => { let f: BitwiseLMDGhost = - BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); + BitwiseLMDGhost::new(store.clone()); Box::new(f) } ForkChoiceAlgorithm::SlowLMDGhost => { - let f: SlowLMDGhost = - SlowLMDGhost::new(block_store.clone(), state_store.clone()); + let f: SlowLMDGhost = SlowLMDGhost::new(store.clone()); Box::new(f) } - ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())), + ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(store.clone())), }; let spec = FoundationEthSpec::spec(); @@ -255,12 +250,10 @@ fn setup_inital_state( let (state, _keypairs) = state_builder.build(); let state_root = state.canonical_root(); - state_store - .put(&state_root, &ssz_encode(&state)[..]) - .unwrap(); + store.put(&state_root, &state).unwrap(); // return initialised vars - (fork_choice, block_store, state_root) + (fork_choice, store, state_root) } // convert a block_id into a Hash256 -- assume input is hex encoded; From 3bcf5ba7064e92d28dcbf0f7d69d6386bdea4176 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 18:20:23 +1000 Subject: [PATCH 19/21] Rename `db` crate to `store` --- Cargo.toml | 2 +- beacon_node/Cargo.toml | 2 +- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/errors.rs | 2 +- beacon_node/beacon_chain/src/initialise.rs | 20 +++++++------- beacon_node/beacon_chain/src/lib.rs | 2 +- .../testing_beacon_chain_builder.rs | 12 ++++++--- beacon_node/client/Cargo.toml | 2 +- beacon_node/client/src/client_types.rs | 16 ++++++------ beacon_node/client/src/lib.rs | 2 +- beacon_node/network/src/beacon_chain.rs | 2 +- beacon_node/rpc/Cargo.toml | 2 +- beacon_node/rpc/src/beacon_chain.rs | 2 +- beacon_node/src/run.rs | 10 +++---- beacon_node/{db => store}/Cargo.toml | 2 +- .../{db => store}/src/block_at_slot.rs | 4 +-- beacon_node/{db => store}/src/disk_db.rs | 8 +++--- beacon_node/{db => store}/src/errors.rs | 0 beacon_node/{db => store}/src/impls.rs | 0 .../{db => store}/src/leveldb_store.rs | 0 beacon_node/{db => store}/src/lib.rs | 10 +++---- beacon_node/{db => store}/src/memory_db.rs | 14 +++++----- eth2/fork_choice/Cargo.toml | 2 +- eth2/fork_choice/src/bitwise_lmd_ghost.rs | 4 +-- eth2/fork_choice/src/lib.rs | 6 ++--- eth2/fork_choice/src/longest_chain.rs | 2 +- eth2/fork_choice/src/optimized_lmd_ghost.rs | 4 +-- eth2/fork_choice/src/slow_lmd_ghost.rs | 4 +-- eth2/fork_choice/tests/tests.rs | 26 +++++-------------- 30 files changed, 76 insertions(+), 90 deletions(-) rename beacon_node/{db => store}/Cargo.toml (96%) rename beacon_node/{db => store}/src/block_at_slot.rs (98%) rename beacon_node/{db => store}/src/disk_db.rs (97%) rename beacon_node/{db => store}/src/errors.rs (100%) rename beacon_node/{db => store}/src/impls.rs (100%) rename beacon_node/{db => store}/src/leveldb_store.rs (100%) rename beacon_node/{db => store}/src/lib.rs (95%) rename beacon_node/{db => store}/src/memory_db.rs (79%) diff --git a/Cargo.toml b/Cargo.toml index 893189941..00c354309 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ members = [ "eth2/utils/fisher_yates_shuffle", "eth2/utils/test_random_derive", "beacon_node", - "beacon_node/db", + "beacon_node/store", "beacon_node/client", "beacon_node/network", "beacon_node/eth2-libp2p", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index da31bfa77..d78a5b596 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] types = { path = "../eth2/types" } -db = { path = "./db" } +store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 34b6e11c6..3a84256a7 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] bls = { path = "../../eth2/utils/bls" } boolean-bitfield = { path = "../../eth2/utils/boolean-bitfield" } -db = { path = "../db" } +store = { path = "../store" } failure = "0.1" failure_derive = "0.1" hashing = { path = "../../eth2/utils/hashing" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7c52fed5d..f2c4b3dbe 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,6 +1,5 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; -use db::{Error as DBError, Store}; use fork_choice::{ForkChoice, ForkChoiceError}; use log::{debug, trace}; use operation_pool::DepositInsertStatus; @@ -16,6 +15,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, SlotProcessingError, }; use std::sync::Arc; +use store::{Error as DBError, Store}; use types::*; #[derive(Debug, PartialEq)] diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index dea7f63d9..73884916a 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -20,7 +20,7 @@ pub enum BeaconChainError { UnableToReadSlot, BeaconStateError(BeaconStateError), DBInconsistent(String), - DBError(db::Error), + DBError(store::Error), ForkChoiceError(ForkChoiceError), MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs index 197f7ace0..b9d950ed5 100644 --- a/beacon_node/beacon_chain/src/initialise.rs +++ b/beacon_node/beacon_chain/src/initialise.rs @@ -3,11 +3,11 @@ // testnet. These are examples. Also. there is code duplication which can/should be cleaned up. use crate::BeaconChain; -use db::{DiskDB, MemoryDB}; use fork_choice::BitwiseLMDGhost; use slot_clock::SystemTimeSlotClock; use std::path::PathBuf; use std::sync::Arc; +use store::{DiskStore, MemoryStore}; use tree_hash::TreeHash; use types::test_utils::TestingBeaconStateBuilder; use types::{BeaconBlock, ChainSpec, FewValidatorsEthSpec, FoundationEthSpec, Hash256}; @@ -19,14 +19,14 @@ pub fn initialise_beacon_chain( db_name: Option<&PathBuf>, ) -> Arc< BeaconChain< - DiskDB, + DiskStore, SystemTimeSlotClock, - BitwiseLMDGhost, + BitwiseLMDGhost, FoundationEthSpec, >, > { let path = db_name.expect("db_name cannot be None."); - let store = DiskDB::open(path).expect("Unable to open DB."); + let store = DiskStore::open(path).expect("Unable to open DB."); let store = Arc::new(store); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, &spec); @@ -66,13 +66,13 @@ pub fn initialise_test_beacon_chain_with_memory_db( _db_name: Option<&PathBuf>, ) -> Arc< BeaconChain< - MemoryDB, + MemoryStore, SystemTimeSlotClock, - BitwiseLMDGhost, + BitwiseLMDGhost, FewValidatorsEthSpec, >, > { - let store = Arc::new(MemoryDB::open()); + let store = Arc::new(MemoryStore::open()); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, spec); let (genesis_state, _keypairs) = state_builder.build(); @@ -111,14 +111,14 @@ pub fn initialise_test_beacon_chain_with_disk_db( db_name: Option<&PathBuf>, ) -> Arc< BeaconChain< - DiskDB, + DiskStore, SystemTimeSlotClock, - BitwiseLMDGhost, + BitwiseLMDGhost, FewValidatorsEthSpec, >, > { let path = db_name.expect("db_name cannot be None."); - let store = DiskDB::open(path).expect("Unable to open DB."); + let store = DiskStore::open(path).expect("Unable to open DB."); let store = Arc::new(store); let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(8, spec); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d8d85a8a6..6ac01a5d5 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -7,7 +7,6 @@ pub mod test_utils; pub use self::beacon_chain::{BeaconChain, BlockProcessingOutcome, InvalidBlock, ValidBlock}; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; -pub use db; pub use fork_choice; pub use parking_lot; pub use slot_clock; @@ -15,4 +14,5 @@ pub use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; +pub use store; pub use types; diff --git a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs index ce3588674..b6b1defcc 100644 --- a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs @@ -1,14 +1,18 @@ pub use crate::{BeaconChain, BeaconChainError, CheckPoint}; -use db::MemoryDB; use fork_choice::BitwiseLMDGhost; use slot_clock::TestingSlotClock; use std::sync::Arc; +use store::MemoryStore; use tree_hash::TreeHash; use types::*; use types::{test_utils::TestingBeaconStateBuilder, EthSpec, FewValidatorsEthSpec}; -type TestingBeaconChain = - BeaconChain, E>; +type TestingBeaconChain = BeaconChain< + MemoryStore, + TestingSlotClock, + BitwiseLMDGhost, + E, +>; pub struct TestingBeaconChainBuilder { state_builder: TestingBeaconStateBuilder, @@ -16,7 +20,7 @@ pub struct TestingBeaconChainBuilder { impl TestingBeaconChainBuilder { pub fn build(self, spec: &ChainSpec) -> TestingBeaconChain { - let store = Arc::new(MemoryDB::open()); + let store = Arc::new(MemoryStore::open()); let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); let fork_choice = BitwiseLMDGhost::new(store.clone()); diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8956dbb07..4a976eec4 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } -db = { path = "../db" } +store = { path = "../store" } rpc = { path = "../rpc" } fork_choice = { path = "../../eth2/fork_choice" } types = { path = "../../eth2/types" } diff --git a/beacon_node/client/src/client_types.rs b/beacon_node/client/src/client_types.rs index c54028d28..4cce42a06 100644 --- a/beacon_node/client/src/client_types.rs +++ b/beacon_node/client/src/client_types.rs @@ -1,9 +1,9 @@ use crate::{ArcBeaconChain, ClientConfig}; use beacon_chain::{ - db::{DiskDB, MemoryDB, Store}, fork_choice::BitwiseLMDGhost, initialise, slot_clock::{SlotClock, SystemTimeSlotClock}, + store::{DiskStore, MemoryStore, Store}, }; use fork_choice::ForkChoice; use types::{EthSpec, FewValidatorsEthSpec, FoundationEthSpec}; @@ -22,7 +22,7 @@ pub trait ClientTypes { pub struct StandardClientType; impl ClientTypes for StandardClientType { - type DB = DiskDB; + type DB = DiskStore; type SlotClock = SystemTimeSlotClock; type ForkChoice = BitwiseLMDGhost; type EthSpec = FoundationEthSpec; @@ -34,10 +34,10 @@ impl ClientTypes for StandardClientType { } } -pub struct MemoryDBTestingClientType; +pub struct MemoryStoreTestingClientType; -impl ClientTypes for MemoryDBTestingClientType { - type DB = MemoryDB; +impl ClientTypes for MemoryStoreTestingClientType { + type DB = MemoryStore; type SlotClock = SystemTimeSlotClock; type ForkChoice = BitwiseLMDGhost; type EthSpec = FewValidatorsEthSpec; @@ -49,10 +49,10 @@ impl ClientTypes for MemoryDBTestingClientType { } } -pub struct DiskDBTestingClientType; +pub struct DiskStoreTestingClientType; -impl ClientTypes for DiskDBTestingClientType { - type DB = DiskDB; +impl ClientTypes for DiskStoreTestingClientType { + type DB = DiskStore; type SlotClock = SystemTimeSlotClock; type ForkChoice = BitwiseLMDGhost; type EthSpec = FewValidatorsEthSpec; diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 00478b475..71d4013d3 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -8,7 +8,6 @@ pub mod notifier; use beacon_chain::BeaconChain; pub use client_config::{ClientConfig, DBType}; pub use client_types::ClientTypes; -use db::Store; use exit_future::Signal; use fork_choice::ForkChoice; use futures::{future::Future, Stream}; @@ -18,6 +17,7 @@ use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; use std::time::{Duration, Instant}; +use store::Store; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; use types::EthSpec; diff --git a/beacon_node/network/src/beacon_chain.rs b/beacon_node/network/src/beacon_chain.rs index f123e4540..2a42376f7 100644 --- a/beacon_node/network/src/beacon_chain.rs +++ b/beacon_node/network/src/beacon_chain.rs @@ -1,9 +1,9 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ - db::Store, fork_choice::ForkChoice, parking_lot::RwLockReadGuard, slot_clock::SlotClock, + store::Store, types::{BeaconState, ChainSpec}, AttestationValidationError, CheckPoint, }; diff --git a/beacon_node/rpc/Cargo.toml b/beacon_node/rpc/Cargo.toml index 3fc52c6b1..a361c94ab 100644 --- a/beacon_node/rpc/Cargo.toml +++ b/beacon_node/rpc/Cargo.toml @@ -17,7 +17,7 @@ protos = { path = "../../protos" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protobuf = "2.0.2" clap = "2.32.0" -db = { path = "../db" } +store = { path = "../store" } dirs = "1.0.3" futures = "0.1.23" slog = "^2.2.3" diff --git a/beacon_node/rpc/src/beacon_chain.rs b/beacon_node/rpc/src/beacon_chain.rs index 9b6b05e9d..d12baf1d1 100644 --- a/beacon_node/rpc/src/beacon_chain.rs +++ b/beacon_node/rpc/src/beacon_chain.rs @@ -1,9 +1,9 @@ use beacon_chain::BeaconChain as RawBeaconChain; use beacon_chain::{ - db::Store, fork_choice::ForkChoice, parking_lot::{RwLockReadGuard, RwLockWriteGuard}, slot_clock::SlotClock, + store::Store, types::{BeaconState, ChainSpec, Signature}, AttestationValidationError, BlockProductionError, }; diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index ec421fc6e..4cf930060 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,4 +1,4 @@ -use client::client_types::{DiskDBTestingClientType, MemoryDBTestingClientType}; +use client::client_types::{DiskStoreTestingClientType, MemoryStoreTestingClientType}; use client::{error, DBType}; use client::{notifier, Client, ClientConfig, ClientTypes}; use futures::sync::oneshot; @@ -29,9 +29,9 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul info!( log, "BeaconNode starting"; - "type" => "DiskDBTestingClientType" + "type" => "DiskStoreTestingClientType" ); - let client: Client = + let client: Client = Client::new(config, log.clone(), &executor)?; run(client, executor, runtime, log) @@ -40,9 +40,9 @@ pub fn run_beacon_node(config: ClientConfig, log: &slog::Logger) -> error::Resul info!( log, "BeaconNode starting"; - "type" => "MemoryDBTestingClientType" + "type" => "MemoryStoreTestingClientType" ); - let client: Client = + let client: Client = Client::new(config, log.clone(), &executor)?; run(client, executor, runtime, log) diff --git a/beacon_node/db/Cargo.toml b/beacon_node/store/Cargo.toml similarity index 96% rename from beacon_node/db/Cargo.toml rename to beacon_node/store/Cargo.toml index 808d420a5..a95dafa90 100644 --- a/beacon_node/db/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "db" +name = "store" version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/beacon_node/db/src/block_at_slot.rs b/beacon_node/store/src/block_at_slot.rs similarity index 98% rename from beacon_node/db/src/block_at_slot.rs rename to beacon_node/store/src/block_at_slot.rs index 4fa9635a2..4a8abaefd 100644 --- a/beacon_node/db/src/block_at_slot.rs +++ b/beacon_node/store/src/block_at_slot.rs @@ -121,7 +121,7 @@ mod tests { #[test] fn chain_without_skips() { let n: usize = 10; - let store = MemoryDB::open(); + let store = MemoryStore::open(); let spec = FewValidatorsEthSpec::spec(); let slots: Vec = (0..n).collect(); @@ -145,7 +145,7 @@ mod tests { #[test] fn chain_with_skips() { - let store = MemoryDB::open(); + let store = MemoryStore::open(); let spec = FewValidatorsEthSpec::spec(); let slots = vec![0, 1, 2, 5]; diff --git a/beacon_node/db/src/disk_db.rs b/beacon_node/store/src/disk_db.rs similarity index 97% rename from beacon_node/db/src/disk_db.rs rename to beacon_node/store/src/disk_db.rs index e2162e29a..eb2b885c6 100644 --- a/beacon_node/db/src/disk_db.rs +++ b/beacon_node/store/src/disk_db.rs @@ -10,11 +10,11 @@ use std::path::Path; /// A on-disk database which implements the ClientDB trait. /// /// This implementation uses RocksDB with default options. -pub struct DiskDB { +pub struct DiskStore { db: DB, } -impl DiskDB { +impl DiskStore { /// Open the RocksDB database, optionally supplying columns if required. /// /// The RocksDB database will be contained in a directory titled @@ -71,7 +71,7 @@ impl From for DBError { } } -impl ClientDB for DiskDB { +impl ClientDB for DiskStore { /// Get the value for some key on some column. /// /// Corresponds to the `get_cf()` method on the RocksDB API. @@ -154,7 +154,7 @@ mod tests { let col_name: &str = "TestColumn"; let column_families = vec![col_name]; - let mut db = DiskDB::open(&path, None); + let mut db = DiskStore::open(&path, None); for cf in column_families { db.create_col(&cf).unwrap(); diff --git a/beacon_node/db/src/errors.rs b/beacon_node/store/src/errors.rs similarity index 100% rename from beacon_node/db/src/errors.rs rename to beacon_node/store/src/errors.rs diff --git a/beacon_node/db/src/impls.rs b/beacon_node/store/src/impls.rs similarity index 100% rename from beacon_node/db/src/impls.rs rename to beacon_node/store/src/impls.rs diff --git a/beacon_node/db/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs similarity index 100% rename from beacon_node/db/src/leveldb_store.rs rename to beacon_node/store/src/leveldb_store.rs diff --git a/beacon_node/db/src/lib.rs b/beacon_node/store/src/lib.rs similarity index 95% rename from beacon_node/db/src/lib.rs rename to beacon_node/store/src/lib.rs index 708ac698f..096a88184 100644 --- a/beacon_node/db/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -5,8 +5,8 @@ mod impls; mod leveldb_store; mod memory_db; -pub use self::leveldb_store::LevelDB as DiskDB; -pub use self::memory_db::MemoryDB; +pub use self::leveldb_store::LevelDB as DiskStore; +pub use self::memory_db::MemoryStore; pub use errors::Error; pub use types::*; pub type DBValue = Vec; @@ -154,21 +154,21 @@ mod tests { fn diskdb() { let dir = tempdir().unwrap(); let path = dir.path(); - let store = DiskDB::open(&path).unwrap(); + let store = DiskStore::open(&path).unwrap(); test_impl(store); } #[test] fn memorydb() { - let store = MemoryDB::open(); + let store = MemoryStore::open(); test_impl(store); } #[test] fn exists() { - let store = MemoryDB::open(); + let store = MemoryStore::open(); let key = Hash256::random(); let item = StorableThing { a: 1, b: 42 }; diff --git a/beacon_node/db/src/memory_db.rs b/beacon_node/store/src/memory_db.rs similarity index 79% rename from beacon_node/db/src/memory_db.rs rename to beacon_node/store/src/memory_db.rs index 83ff77ce1..38b0c0698 100644 --- a/beacon_node/db/src/memory_db.rs +++ b/beacon_node/store/src/memory_db.rs @@ -4,11 +4,11 @@ use std::collections::HashMap; type DBHashMap = HashMap, Vec>; -pub struct MemoryDB { +pub struct MemoryStore { db: RwLock, } -impl MemoryDB { +impl MemoryStore { pub fn open() -> Self { Self { db: RwLock::new(HashMap::new()), @@ -22,10 +22,10 @@ impl MemoryDB { } } -impl Store for MemoryDB { +impl Store for MemoryStore { /// Get the value of some key from the database. Returns `None` if the key does not exist. fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { - let column_key = MemoryDB::get_key_for_col(col, key); + let column_key = MemoryStore::get_key_for_col(col, key); Ok(self .db @@ -36,7 +36,7 @@ impl Store for MemoryDB { /// Puts a key in the database. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { - let column_key = MemoryDB::get_key_for_col(col, key); + let column_key = MemoryStore::get_key_for_col(col, key); self.db.write().insert(column_key, val.to_vec()); @@ -45,14 +45,14 @@ impl Store for MemoryDB { /// Return true if some key exists in some column. fn key_exists(&self, col: &str, key: &[u8]) -> Result { - let column_key = MemoryDB::get_key_for_col(col, key); + let column_key = MemoryStore::get_key_for_col(col, key); Ok(self.db.read().contains_key(&column_key)) } /// Delete some key from the database. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { - let column_key = MemoryDB::get_key_for_col(col, key); + let column_key = MemoryStore::get_key_for_col(col, key); self.db.write().remove(&column_key); diff --git a/eth2/fork_choice/Cargo.toml b/eth2/fork_choice/Cargo.toml index 819b84055..f2e6825ed 100644 --- a/eth2/fork_choice/Cargo.toml +++ b/eth2/fork_choice/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Age Manning "] edition = "2018" [dependencies] -db = { path = "../../beacon_node/db" } +store = { path = "../../beacon_node/store" } ssz = { path = "../utils/ssz" } types = { path = "../types" } log = "0.4.6" diff --git a/eth2/fork_choice/src/bitwise_lmd_ghost.rs b/eth2/fork_choice/src/bitwise_lmd_ghost.rs index a76970f01..0e579c0b9 100644 --- a/eth2/fork_choice/src/bitwise_lmd_ghost.rs +++ b/eth2/fork_choice/src/bitwise_lmd_ghost.rs @@ -1,13 +1,11 @@ //! The optimised bitwise LMD-GHOST fork choice rule. -extern crate bit_vec; - use crate::{ForkChoice, ForkChoiceError}; use bit_vec::BitVec; -use db::Store; use log::{debug, trace}; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot, SlotHeight}; //TODO: Pruning - Children diff --git a/eth2/fork_choice/src/lib.rs b/eth2/fork_choice/src/lib.rs index ed3a1ce08..ffc40e6c6 100644 --- a/eth2/fork_choice/src/lib.rs +++ b/eth2/fork_choice/src/lib.rs @@ -21,9 +21,9 @@ pub mod longest_chain; pub mod optimized_lmd_ghost; pub mod slow_lmd_ghost; -// use db::stores::BeaconBlockAtSlotError; -// use db::DBError; -use db::Error as DBError; +// use store::stores::BeaconBlockAtSlotError; +// use store::DBError; +use store::Error as DBError; use types::{BeaconBlock, ChainSpec, Hash256}; pub use bitwise_lmd_ghost::BitwiseLMDGhost; diff --git a/eth2/fork_choice/src/longest_chain.rs b/eth2/fork_choice/src/longest_chain.rs index 6aaf56c32..11453cf49 100644 --- a/eth2/fork_choice/src/longest_chain.rs +++ b/eth2/fork_choice/src/longest_chain.rs @@ -1,6 +1,6 @@ use crate::{ForkChoice, ForkChoiceError}; -use db::Store; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, ChainSpec, Hash256, Slot}; pub struct LongestChain { diff --git a/eth2/fork_choice/src/optimized_lmd_ghost.rs b/eth2/fork_choice/src/optimized_lmd_ghost.rs index aa7d65c85..dba6e60da 100644 --- a/eth2/fork_choice/src/optimized_lmd_ghost.rs +++ b/eth2/fork_choice/src/optimized_lmd_ghost.rs @@ -1,13 +1,11 @@ //! The optimised bitwise LMD-GHOST fork choice rule. -extern crate bit_vec; - use crate::{ForkChoice, ForkChoiceError}; -use db::Store; use log::{debug, trace}; use std::cmp::Ordering; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot, SlotHeight}; //TODO: Pruning - Children diff --git a/eth2/fork_choice/src/slow_lmd_ghost.rs b/eth2/fork_choice/src/slow_lmd_ghost.rs index a41eacbb2..888356417 100644 --- a/eth2/fork_choice/src/slow_lmd_ghost.rs +++ b/eth2/fork_choice/src/slow_lmd_ghost.rs @@ -1,11 +1,9 @@ -extern crate db; - use crate::{ForkChoice, ForkChoiceError}; -use db::Store; use log::{debug, trace}; use std::collections::HashMap; use std::marker::PhantomData; use std::sync::Arc; +use store::Store; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, Slot}; //TODO: Pruning and syncing diff --git a/eth2/fork_choice/tests/tests.rs b/eth2/fork_choice/tests/tests.rs index b4f4ede2c..0327e8cb3 100644 --- a/eth2/fork_choice/tests/tests.rs +++ b/eth2/fork_choice/tests/tests.rs @@ -1,26 +1,14 @@ #![cfg(not(debug_assertions))] // Tests the available fork-choice algorithms -extern crate beacon_chain; -extern crate bls; -extern crate db; -// extern crate env_logger; // for debugging -extern crate fork_choice; -extern crate hex; -extern crate log; -extern crate slot_clock; -extern crate types; -extern crate yaml_rust; - pub use beacon_chain::BeaconChain; use bls::Signature; -use db::MemoryDB; -use db::Store; +use store::MemoryStore; +use store::Store; // use env_logger::{Builder, Env}; use fork_choice::{ BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost, }; -use ssz::ssz_encode; use std::collections::HashMap; use std::sync::Arc; use std::{fs::File, io::prelude::*, path::PathBuf}; @@ -220,23 +208,23 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec { fn setup_inital_state( fork_choice_algo: &ForkChoiceAlgorithm, num_validators: usize, -) -> (Box, Arc, Hash256) { - let store = Arc::new(MemoryDB::open()); +) -> (Box, Arc, Hash256) { + let store = Arc::new(MemoryStore::open()); // the fork choice instantiation let fork_choice: Box = match fork_choice_algo { ForkChoiceAlgorithm::OptimizedLMDGhost => { - let f: OptimizedLMDGhost = + let f: OptimizedLMDGhost = OptimizedLMDGhost::new(store.clone()); Box::new(f) } ForkChoiceAlgorithm::BitwiseLMDGhost => { - let f: BitwiseLMDGhost = + let f: BitwiseLMDGhost = BitwiseLMDGhost::new(store.clone()); Box::new(f) } ForkChoiceAlgorithm::SlowLMDGhost => { - let f: SlowLMDGhost = SlowLMDGhost::new(store.clone()); + let f: SlowLMDGhost = SlowLMDGhost::new(store.clone()); Box::new(f) } ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(store.clone())), From c840b76cac0609b5816f275fa11858e08cac1e40 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 18:49:24 +1000 Subject: [PATCH 20/21] Tidy `store` crate, add comments --- beacon_node/store/src/leveldb_store.rs | 9 ++- beacon_node/store/src/lib.rs | 56 ++++++++++++++++--- .../src/{memory_db.rs => memory_store.rs} | 6 +- beacon_node/store/src/store.rs | 37 ++++++++++++ 4 files changed, 96 insertions(+), 12 deletions(-) rename beacon_node/store/src/{memory_db.rs => memory_store.rs} (92%) create mode 100644 beacon_node/store/src/store.rs diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 10643d0cd..09aec46fa 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -6,11 +6,13 @@ use leveldb::error::Error as LevelDBError; use leveldb::options::{Options, ReadOptions, WriteOptions}; use std::path::Path; +/// A wrapped leveldb database. pub struct LevelDB { db: Database, } impl LevelDB { + /// Open a database at `path`, creating a new database if one does not already exist. pub fn open(path: &Path) -> Result { let mut options = Options::new(); @@ -36,6 +38,7 @@ impl LevelDB { } } +/// Used for keying leveldb. pub struct BytesKey { key: Vec, } @@ -51,7 +54,8 @@ impl Key for BytesKey { } impl Store for LevelDB { - fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { + /// Retrieve some bytes in `column` with `key`. + fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { let column_key = Self::get_key_for_col(col, key); self.db @@ -59,6 +63,7 @@ impl Store for LevelDB { .map_err(Into::into) } + /// Store some `value` in `column`, indexed with `key`. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); @@ -67,6 +72,7 @@ impl Store for LevelDB { .map_err(Into::into) } + /// Return `true` if `key` exists in `column`. fn key_exists(&self, col: &str, key: &[u8]) -> Result { let column_key = Self::get_key_for_col(col, key); @@ -76,6 +82,7 @@ impl Store for LevelDB { .and_then(|val| Ok(val.is_some())) } + /// Removes `key` from `column`. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); self.db diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 096a88184..59875601a 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -1,33 +1,55 @@ -// mod disk_db; +//! Storage functionality for Lighthouse. +//! +//! Provides the following stores: +//! +//! - `DiskStore`: an on-disk store backed by leveldb. Used in production. +//! - `MemoryStore`: an in-memory store backed by a hash-map. Used for testing. +//! +//! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See +//! tests for implementation examples. + mod block_at_slot; mod errors; mod impls; mod leveldb_store; -mod memory_db; +mod memory_store; pub use self::leveldb_store::LevelDB as DiskStore; -pub use self::memory_db::MemoryStore; +pub use self::memory_store::MemoryStore; pub use errors::Error; pub use types::*; -pub type DBValue = Vec; +/// An object capable of storing and retrieving objects implementing `StoreItem`. +/// +/// A `Store` is fundamentally backed by a key-value database, however it provides support for +/// columns. A simple column implementation might involve prefixing a key with some bytes unique to +/// each column. pub trait Store: Sync + Send + Sized { + /// Store an item in `Self`. fn put(&self, key: &Hash256, item: &impl StoreItem) -> Result<(), Error> { item.db_put(self, key) } + /// Retrieve an item from `Self`. fn get(&self, key: &Hash256) -> Result, Error> { I::db_get(self, key) } + /// Returns `true` if the given key represents an item in `Self`. fn exists(&self, key: &Hash256) -> Result { I::db_exists(self, key) } + /// Remove an item from `Self`. fn delete(&self, key: &Hash256) -> Result<(), Error> { I::db_delete(self, key) } + /// Given the root of an existing block in the store (`start_block_root`), return a parent + /// block with the specified `slot`. + /// + /// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the + /// slot of `start_block_root`. fn get_block_at_preceeding_slot( &self, start_block_root: Hash256, @@ -36,15 +58,20 @@ pub trait Store: Sync + Send + Sized { block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) } - fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error>; + /// Retrieve some bytes in `column` with `key`. + fn get_bytes(&self, column: &str, key: &[u8]) -> Result>, Error>; - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>; + /// Store some `value` in `column`, indexed with `key`. + fn put_bytes(&self, column: &str, key: &[u8], value: &[u8]) -> Result<(), Error>; - fn key_exists(&self, col: &str, key: &[u8]) -> Result; + /// Return `true` if `key` exists in `column`. + fn key_exists(&self, column: &str, key: &[u8]) -> Result; - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; + /// Removes `key` from `column`. + fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>; } +/// A unique column identifier. pub enum DBColumn { BeaconBlock, BeaconState, @@ -62,22 +89,31 @@ impl<'a> Into<&'a str> for DBColumn { } } +/// An item that may be stored in a `Store`. +/// +/// Provides default methods that are suitable for most applications, however when overridden they +/// provide full customizability of `Store` operations. pub trait StoreItem: Sized { + /// Identifies which column this item should be placed in. fn db_column() -> DBColumn; + /// Serialize `self` as bytes. fn as_store_bytes(&self) -> Vec; + /// De-serialize `self` from bytes. fn from_store_bytes(bytes: &mut [u8]) -> Result; + /// Store `self`. fn db_put(&self, store: &impl Store, key: &Hash256) -> Result<(), Error> { let column = Self::db_column().into(); let key = key.as_bytes(); store .put_bytes(column, key, &self.as_store_bytes()) - .map_err(|e| e.into()) + .map_err(Into::into) } + /// Retrieve an instance of `Self`. fn db_get(store: &impl Store, key: &Hash256) -> Result, Error> { let column = Self::db_column().into(); let key = key.as_bytes(); @@ -88,6 +124,7 @@ pub trait StoreItem: Sized { } } + /// Return `true` if an instance of `Self` exists in `Store`. fn db_exists(store: &impl Store, key: &Hash256) -> Result { let column = Self::db_column().into(); let key = key.as_bytes(); @@ -95,6 +132,7 @@ pub trait StoreItem: Sized { store.key_exists(column, key) } + /// Delete `self` from the `Store`. fn db_delete(store: &impl Store, key: &Hash256) -> Result<(), Error> { let column = Self::db_column().into(); let key = key.as_bytes(); diff --git a/beacon_node/store/src/memory_db.rs b/beacon_node/store/src/memory_store.rs similarity index 92% rename from beacon_node/store/src/memory_db.rs rename to beacon_node/store/src/memory_store.rs index 38b0c0698..086a16c26 100644 --- a/beacon_node/store/src/memory_db.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,14 +1,16 @@ -use super::{DBValue, Error, Store}; +use super::{Error, Store}; use parking_lot::RwLock; use std::collections::HashMap; type DBHashMap = HashMap, Vec>; +/// A thread-safe `HashMap` wrapper. pub struct MemoryStore { db: RwLock, } impl MemoryStore { + /// Create a new, empty database. pub fn open() -> Self { Self { db: RwLock::new(HashMap::new()), @@ -24,7 +26,7 @@ impl MemoryStore { impl Store for MemoryStore { /// Get the value of some key from the database. Returns `None` if the key does not exist. - fn get_bytes(&self, col: &str, key: &[u8]) -> Result, Error> { + fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { let column_key = MemoryStore::get_key_for_col(col, key); Ok(self diff --git a/beacon_node/store/src/store.rs b/beacon_node/store/src/store.rs new file mode 100644 index 000000000..5d18c7ba5 --- /dev/null +++ b/beacon_node/store/src/store.rs @@ -0,0 +1,37 @@ +use super::*; + +pub type Vec = Vec; + +pub trait Store: Sync + Send + Sized { + fn put(&self, key: &Hash256, item: &impl StoreItem) -> Result<(), Error> { + item.db_put(self, key) + } + + fn get(&self, key: &Hash256) -> Result, Error> { + I::db_get(self, key) + } + + fn exists(&self, key: &Hash256) -> Result { + I::db_exists(self, key) + } + + fn delete(&self, key: &Hash256) -> Result<(), Error> { + I::db_delete(self, key) + } + + fn get_block_at_preceeding_slot( + &self, + start_block_root: Hash256, + slot: Slot, + ) -> Result, Error> { + block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) + } + + fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error>; + + fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>; + + fn key_exists(&self, col: &str, key: &[u8]) -> Result; + + fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; +} From b41f91db1d66b91341a4af95903dd9d154bd6b82 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 21 May 2019 19:32:07 +1000 Subject: [PATCH 21/21] Rename disk db dir --- beacon_node/client/src/client_config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/client/src/client_config.rs b/beacon_node/client/src/client_config.rs index 4309b8a64..8d7176c2c 100644 --- a/beacon_node/client/src/client_config.rs +++ b/beacon_node/client/src/client_config.rs @@ -53,7 +53,7 @@ impl Default for ClientConfig { // default to memory db for now db_type: DBType::Memory, // default db name for disk-based dbs - db_name: data_dir.join("chain.db"), + db_name: data_dir.join("chain_db"), rpc_conf: rpc::RPCConfig::default(), } }