diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 741d9a95b..980a31390 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3031,7 +3031,7 @@ impl BeaconChain { // margin, or younger (of higher epoch number). if block_epoch >= import_boundary { if let Some(blobs) = blobs { - if blobs.blobs.len() > 0 { + if !blobs.blobs.is_empty() { //FIXME(sean) using this for debugging for now info!( self.log, "Writing blobs to store"; @@ -3042,10 +3042,9 @@ impl BeaconChain { } } } - let txn_lock = self.store.hot_db.begin_rw_transaction(); - if let Err(e) = self.store.do_atomically(ops) { + if let Err(e) = self.store.do_atomically_with_block_and_blobs_cache(ops) { error!( self.log, "Database write failed!"; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index fb1d79e96..70eec4ecc 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1380,7 +1380,9 @@ impl ExecutionPendingBlock { StoreOp::PutStateTemporaryFlag(state_root), ] }; - chain.store.do_atomically(state_batch)?; + chain + .store + .do_atomically_with_block_and_blobs_cache(state_batch)?; drop(txn_lock); confirmed_state_roots.push(state_root); diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 79910df29..85fd106ee 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -38,7 +38,7 @@ impl ValidatorPubkeyCache { }; let store_ops = cache.import_new_pubkeys(state)?; - store.do_atomically(store_ops)?; + store.do_atomically_with_block_and_blobs_cache(store_ops)?; Ok(cache) } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 23c4977b7..e80b6fd18 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -68,6 +68,7 @@ pub struct ClientBuilder { gossipsub_registry: Option, db_path: Option, freezer_db_path: Option, + blobs_db_path: Option, http_api_config: http_api::Config, http_metrics_config: http_metrics::Config, slasher: Option>>, @@ -100,6 +101,7 @@ where gossipsub_registry: None, db_path: None, freezer_db_path: None, + blobs_db_path: None, http_api_config: <_>::default(), http_metrics_config: <_>::default(), slasher: None, @@ -892,6 +894,7 @@ where mut self, hot_path: &Path, cold_path: &Path, + blobs_path: Option, config: StoreConfig, log: Logger, ) -> Result { @@ -907,6 +910,7 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); + self.blobs_db_path = blobs_path.clone(); let inner_spec = spec.clone(); let deposit_contract_deploy_block = context @@ -929,6 +933,7 @@ where let store = HotColdDB::open( hot_path, cold_path, + blobs_path, schema_upgrade, config, spec, diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 6c3a98a46..10eeb3a48 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -49,6 +49,13 @@ pub struct Config { pub db_name: String, /// Path where the freezer database will be located. pub freezer_db_path: Option, + /// Path where the blobs database will be located if blobs should be in a separate database. + /// + /// The capacity this location should hold varies with the data availability boundary. It + /// should be able to store < 69 GB when [MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS](types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS) is 4096 + /// epochs of 32 slots (up to 131072 bytes data per blob and up to 4 blobs per block, 88 bytes + /// of [BlobsSidecar](types::BlobsSidecar) metadata per block). + pub blobs_db_path: Option, pub log_file: PathBuf, /// If true, the node will use co-ordinated junk for eth1 values. /// @@ -89,6 +96,7 @@ impl Default for Config { data_dir: PathBuf::from(DEFAULT_ROOT_DIR), db_name: "chain_db".to_string(), freezer_db_path: None, + blobs_db_path: None, log_file: PathBuf::from(""), genesis: <_>::default(), store: <_>::default(), @@ -149,11 +157,27 @@ impl Config { .unwrap_or_else(|| self.default_freezer_db_path()) } + /// Returns the path to which the client may initialize the on-disk blobs database. + /// + /// Will attempt to use the user-supplied path from e.g. the CLI, or will default + /// to None. + pub fn get_blobs_db_path(&self) -> Option { + self.blobs_db_path.clone() + } + /// Get the freezer DB path, creating it if necessary. pub fn create_freezer_db_path(&self) -> Result { ensure_dir_exists(self.get_freezer_db_path()) } + /// Get the blobs DB path, creating it if necessary. + pub fn create_blobs_db_path(&self) -> Result, String> { + match self.get_blobs_db_path() { + Some(blobs_db_path) => Ok(Some(ensure_dir_exists(blobs_db_path)?)), + None => Ok(None), + } + } + /// Returns the "modern" path to the data_dir. /// /// See `Self::get_data_dir` documentation for more info. diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 45c7bed1f..e8d463bbe 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -218,7 +218,10 @@ impl BlockId { chain: &BeaconChain, ) -> Result>, warp::Rejection> { let root = self.root(chain)?.0; - match chain.store.get_blobs(&root) { + let Some(data_availability_boundary) = chain.data_availability_boundary() else { + return Err(warp_utils::reject::custom_not_found("Eip4844 fork disabled".into())); + }; + match chain.get_blobs(&root, data_availability_boundary) { Ok(Some(blob)) => Ok(Arc::new(blob)), Ok(None) => Err(warp_utils::reject::custom_not_found(format!( "Blob with block root {} is not in the store", diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e711dfca9..eb6754aa9 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -28,6 +28,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("Data directory for the freezer database.") .takes_value(true) ) + .arg( + Arg::with_name("blobs-dir") + .long("blobs-dir") + .value_name("DIR") + .help("Data directory for the blobs database.") + .takes_value(true) + ) /* * Network parameters. */ diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7ced91274..e8128cb79 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -390,6 +390,10 @@ pub fn get_config( client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } + if let Some(blobs_db_dir) = cli_args.value_of("blobs-dir") { + client_config.blobs_db_path = Some(PathBuf::from(blobs_db_dir)); + } + let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 650763dca..b098f57c7 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -64,6 +64,7 @@ impl ProductionBeaconNode { let _datadir = client_config.create_data_dir()?; let db_path = client_config.create_db_path()?; let freezer_db_path = client_config.create_freezer_db_path()?; + let blobs_db_path = client_config.create_blobs_db_path()?; let executor = context.executor.clone(); if let Some(legacy_dir) = client_config.get_existing_legacy_data_dir() { @@ -84,7 +85,13 @@ impl ProductionBeaconNode { .runtime_context(context) .chain_spec(spec) .http_api_config(client_config.http_api.clone()) - .disk_store(&db_path, &freezer_db_path, store_config, log.clone())?; + .disk_store( + &db_path, + &freezer_db_path, + blobs_db_path, + store_config, + log.clone(), + )?; let builder = if let Some(slasher_config) = client_config.slasher.clone() { let slasher = Arc::new( diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 329133632..c70ef8986 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -31,7 +31,7 @@ where "Garbage collecting {} temporary states", delete_ops.len() / 2 ); - self.do_atomically(delete_ops)?; + self.do_atomically_with_block_and_blobs_cache(delete_ops)?; } Ok(()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 99b516ee9..3128f6596 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -35,7 +35,7 @@ use state_processing::{ use std::cmp::min; use std::convert::TryInto; use std::marker::PhantomData; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use types::consts::eip4844::MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS; @@ -59,6 +59,8 @@ pub struct HotColdDB, Cold: ItemStore> { pub(crate) config: StoreConfig, /// Cold database containing compact historical data. pub cold_db: Cold, + /// Database containing blobs. If None, store falls back to use `cold_db`. + pub blobs_db: Option, /// Hot database containing duplicated but quick-to-access recent data. /// /// The hot database also contains all blocks. @@ -98,6 +100,8 @@ pub enum HotColdDBError { MissingExecutionPayload(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, + MissingPathToBlobsDatabase, + BlobsPreviouslyInDefaultStore, HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), BlockReplayBeaconError(BeaconStateError), @@ -119,6 +123,7 @@ pub enum HotColdDBError { request_slot: Option, state_root: Hash256, }, + Rollback, } impl HotColdDB, MemoryStore> { @@ -134,6 +139,7 @@ impl HotColdDB, MemoryStore> { anchor_info: RwLock::new(None), blob_info: RwLock::new(BlobInfo::default()), cold_db: MemoryStore::open(), + blobs_db: Some(MemoryStore::open()), hot_db: MemoryStore::open(), block_cache: Mutex::new(LruCache::new(config.block_cache_size)), blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), @@ -157,6 +163,7 @@ impl HotColdDB, LevelDB> { pub fn open( hot_path: &Path, cold_path: &Path, + blobs_db_path: Option, migrate_schema: impl FnOnce(Arc, SchemaVersion, SchemaVersion) -> Result<(), Error>, config: StoreConfig, spec: ChainSpec, @@ -169,6 +176,7 @@ impl HotColdDB, LevelDB> { anchor_info: RwLock::new(None), blob_info: RwLock::new(BlobInfo::default()), cold_db: LevelDB::open(cold_path)?, + blobs_db: None, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(LruCache::new(config.block_cache_size)), blob_cache: Mutex::new(LruCache::new(config.blob_cache_size)), @@ -213,6 +221,53 @@ impl HotColdDB, LevelDB> { ); } + // Open separate blobs directory if configured and same configuration was used on previous + // run. + let blob_info = db.load_blob_info()?; + let new_blob_info = { + match (&blob_info, &blobs_db_path) { + (Some(blob_info), Some(_)) => { + if !blob_info.blobs_db { + return Err(HotColdDBError::BlobsPreviouslyInDefaultStore.into()); + } + BlobInfo { + oldest_blob_slot: blob_info.oldest_blob_slot, + blobs_db: true, + } + } + (Some(blob_info), None) => { + if blob_info.blobs_db { + return Err(HotColdDBError::MissingPathToBlobsDatabase.into()); + } + BlobInfo { + oldest_blob_slot: blob_info.oldest_blob_slot, + blobs_db: false, + } + } + (None, Some(_)) => BlobInfo { + oldest_blob_slot: None, + blobs_db: true, + }, // first time starting up node + (None, None) => BlobInfo { + oldest_blob_slot: None, + blobs_db: false, + }, // first time starting up node + } + }; + if new_blob_info.blobs_db { + if let Some(path) = &blobs_db_path { + db.blobs_db = Some(LevelDB::open(path.as_path())?); + } + } + let blob_info = blob_info.unwrap_or(db.get_blob_info()); + db.compare_and_set_blob_info_with_write(blob_info, new_blob_info)?; + info!( + db.log, + "Blobs DB initialized"; + "use separate blobs db" => db.get_blob_info().blobs_db, + "path" => ?blobs_db_path + ); + // Ensure that the schema version of the on-disk database matches the software. // If the version is mismatched, an automatic migration will be attempted. let db = Arc::new(db); @@ -508,11 +563,14 @@ impl, Cold: ItemStore> HotColdDB self.hot_db .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())?; self.hot_db - .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes()) + .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes())?; + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + blobs_db.key_delete(DBColumn::BeaconBlob.into(), block_root.as_bytes()) } pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobsSidecar) -> Result<(), Error> { - self.hot_db.put_bytes( + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + blobs_db.put_bytes( DBColumn::BeaconBlob.into(), block_root.as_bytes(), &blobs.as_ssz_bytes(), @@ -521,21 +579,6 @@ impl, Cold: ItemStore> HotColdDB Ok(()) } - pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { - // FIXME(sean) I was attempting to use a blob cache here but was getting deadlocks, - // may want to attempt to use one again - if let Some(bytes) = self - .hot_db - .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? - { - let ret = BlobsSidecar::from_ssz_bytes(&bytes)?; - self.blob_cache.lock().put(*block_root, ret.clone()); - Ok(Some(ret)) - } else { - Ok(None) - } - } - pub fn blobs_as_kv_store_ops( &self, key: &Hash256, @@ -832,21 +875,75 @@ impl, Cold: ItemStore> HotColdDB Ok(key_value_batch) } - pub fn do_atomically(&self, batch: Vec>) -> Result<(), Error> { - // Update the block cache whilst holding a lock, to ensure that the cache updates atomically - // with the database. + pub fn do_atomically_with_block_and_blobs_cache( + &self, + batch: Vec>, + ) -> Result<(), Error> { + let mut blobs_to_delete = Vec::new(); + let (blobs_ops, hot_db_ops): (Vec>, Vec>) = + batch.into_iter().partition(|store_op| match store_op { + StoreOp::PutBlobs(_, _) => true, + StoreOp::DeleteBlobs(block_root) => { + match self.get_blobs(block_root) { + Ok(Some(blobs_sidecar)) => { + blobs_to_delete.push(blobs_sidecar); + } + Err(e) => { + error!( + self.log, "Error getting blobs"; + "block_root" => %block_root, + "error" => ?e + ); + } + _ => (), + } + true + } + StoreOp::PutBlock(_, _) | StoreOp::DeleteBlock(_) => false, + _ => false, + }); + + // Update database whilst holding a lock on cache, to ensure that the cache updates + // atomically with the database. let mut guard = self.block_cache.lock(); let mut guard_blob = self.blob_cache.lock(); - for op in &batch { + let blob_cache_ops = blobs_ops.clone(); + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + // Try to execute blobs store ops. + blobs_db.do_atomically(self.convert_to_kv_batch(blobs_ops)?)?; + + let hot_db_cache_ops = hot_db_ops.clone(); + // Try to execute hot db store ops. + let tx_res = match self.convert_to_kv_batch(hot_db_ops) { + Ok(kv_store_ops) => self.hot_db.do_atomically(kv_store_ops), + Err(e) => Err(e), + }; + // Rollback on failure + if let Err(e) = tx_res { + let mut blob_cache_ops = blob_cache_ops; + for op in blob_cache_ops.iter_mut() { + let reverse_op = match op { + StoreOp::PutBlobs(block_root, _) => StoreOp::DeleteBlobs(*block_root), + StoreOp::DeleteBlobs(_) => match blobs_to_delete.pop() { + Some(blobs) => StoreOp::PutBlobs(blobs.beacon_block_root, Arc::new(blobs)), + None => return Err(HotColdDBError::Rollback.into()), + }, + _ => return Err(HotColdDBError::Rollback.into()), + }; + *op = reverse_op; + } + blobs_db.do_atomically(self.convert_to_kv_batch(blob_cache_ops)?)?; + return Err(e); + } + + for op in hot_db_cache_ops { match op { StoreOp::PutBlock(block_root, block) => { - guard.put(*block_root, (**block).clone()); + guard.put(block_root, (*block).clone()); } - StoreOp::PutBlobs(block_root, blobs) => { - guard_blob.put(*block_root, (**blobs).clone()); - } + StoreOp::PutBlobs(_, _) => (), StoreOp::PutState(_, _) => (), @@ -857,12 +954,10 @@ impl, Cold: ItemStore> HotColdDB StoreOp::DeleteStateTemporaryFlag(_) => (), StoreOp::DeleteBlock(block_root) => { - guard.pop(block_root); + guard.pop(&block_root); } - StoreOp::DeleteBlobs(block_root) => { - guard_blob.pop(block_root); - } + StoreOp::DeleteBlobs(_) => (), StoreOp::DeleteState(_, _) => (), @@ -874,8 +969,20 @@ impl, Cold: ItemStore> HotColdDB } } - self.hot_db - .do_atomically(self.convert_to_kv_batch(batch)?)?; + for op in blob_cache_ops { + match op { + StoreOp::PutBlobs(block_root, blobs) => { + guard_blob.put(block_root, (*blobs).clone()); + } + + StoreOp::DeleteBlobs(block_root) => { + guard_blob.pop(&block_root); + } + + _ => (), + } + } + drop(guard); drop(guard_blob); @@ -1212,6 +1319,22 @@ impl, Cold: ItemStore> HotColdDB }) } + /// Fetch a blobs sidecar from the store. + pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); + + match blobs_db.get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? { + Some(ref blobs_bytes) => { + let blobs = BlobsSidecar::from_ssz_bytes(blobs_bytes)?; + // FIXME(sean) I was attempting to use a blob cache here but was getting deadlocks, + // may want to attempt to use one again + self.blob_cache.lock().put(*block_root, blobs.clone()); + Ok(Some(blobs)) + } + None => Ok(None), + } + } + /// Get a reference to the `ChainSpec` used by the database. pub fn get_chain_spec(&self) -> &ChainSpec { &self.spec @@ -1713,7 +1836,7 @@ impl, Cold: ItemStore> HotColdDB } } let payloads_pruned = ops.len(); - self.do_atomically(ops)?; + self.do_atomically_with_block_and_blobs_cache(ops)?; info!( self.log, "Execution payload pruning complete"; @@ -1863,16 +1986,14 @@ impl, Cold: ItemStore> HotColdDB } } let blobs_sidecars_pruned = ops.len(); - - let update_blob_info = self.compare_and_set_blob_info( - blob_info, - BlobInfo { - oldest_blob_slot: Some(end_slot + 1), - }, - )?; + let new_blob_info = BlobInfo { + oldest_blob_slot: Some(end_slot + 1), + blobs_db: blob_info.blobs_db, + }; + let update_blob_info = self.compare_and_set_blob_info(blob_info, new_blob_info)?; ops.push(StoreOp::KeyValueOp(update_blob_info)); - self.do_atomically(ops)?; + self.do_atomically_with_block_and_blobs_cache(ops)?; info!( self.log, "Blobs sidecar pruning complete"; @@ -2012,7 +2133,7 @@ pub fn migrate_database, Cold: ItemStore>( } // Delete the states from the hot database if we got this far. - store.do_atomically(hot_db_ops)?; + store.do_atomically_with_block_and_blobs_cache(hot_db_ops)?; debug!( store.log, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 1d7e92b80..3056c2929 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -101,6 +101,7 @@ pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { } #[must_use] +#[derive(Clone)] pub enum KeyValueStoreOp { PutKeyValue(Vec, Vec), DeleteKey(Vec), @@ -154,6 +155,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati /// Reified key-value storage operation. Helps in modifying the storage atomically. /// See also https://github.com/sigp/lighthouse/issues/692 +#[derive(Clone)] pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index b5de0048f..92117254f 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -124,6 +124,8 @@ impl StoreItem for AnchorInfo { pub struct BlobInfo { /// The slot after which blobs are available (>=). pub oldest_blob_slot: Option, + /// A separate blobs database is in use. + pub blobs_db: bool, } impl StoreItem for BlobInfo { diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index a33e6c149..7d5753434 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -104,6 +104,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("0"), ) + .arg( + Arg::with_name("blobs-dir") + .long("blobs-dir") + .value_name("DIR") + .help("Data directory for the blobs database.") + .takes_value(true), + ) .subcommand(migrate_cli_app()) .subcommand(version_cli_app()) .subcommand(inspect_cli_app()) @@ -123,6 +130,10 @@ fn parse_client_config( client_config.freezer_db_path = Some(freezer_dir); } + if let Some(blobs_db_dir) = clap_utils::parse_optional(cli_args, "blobs-dir")? { + client_config.blobs_db_path = Some(blobs_db_dir); + } + let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; client_config.store.slots_per_restore_point = sprp; client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; @@ -144,11 +155,13 @@ pub fn display_db_version( let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let mut version = CURRENT_SCHEMA_VERSION; HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + blobs_path, |_, from, _| { version = from; Ok(()) @@ -200,10 +213,12 @@ pub fn inspect_db( let spec = runtime_context.eth2_config.spec.clone(); let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + blobs_path, |_, _, _| Ok(()), client_config.store, spec, @@ -254,12 +269,14 @@ pub fn migrate_db( let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let mut from = CURRENT_SCHEMA_VERSION; let to = migrate_config.to; let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + blobs_path, |_, db_initial_version, _| { from = db_initial_version; Ok(()) @@ -294,10 +311,12 @@ pub fn prune_payloads( let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + blobs_path, |_, _, _| Ok(()), client_config.store, spec.clone(), @@ -318,10 +337,12 @@ pub fn prune_blobs( let spec = &runtime_context.eth2_config.spec; let hot_path = client_config.get_db_path(); let cold_path = client_config.get_freezer_db_path(); + let blobs_path = client_config.get_blobs_db_path(); let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, + blobs_path, |_, _, _| Ok(()), client_config.store, spec.clone(),