a60ab4eff2
## Proposed Changes In an attempt to fix OOM issues and database consistency issues observed by some users after the introduction of compaction in v0.3.4, this PR makes the following changes: * Run compaction less often: roughly every 1024 epochs, including after long periods of non-finality. I think the division check proposed by Paul is pretty solid, and ensures we don't miss any events where we should be compacting. LevelDB lacks an easy way to check the size of the DB, which would be another good trigger. * Make it possible to disable the compaction on finalization using `--auto-compact-db=false` * Make it possible to trigger a manual, single-threaded foreground compaction on start-up using `--compact-db` * Downgrade the pruning log to `DEBUG`, as it's particularly noisy during sync I would like to ship these changes to affected users ASAP, and will document them further in the Advanced Database section of the book if they prove effective.
83 lines
2.6 KiB
Rust
83 lines
2.6 KiB
Rust
use crate::{DBColumn, Error, StoreItem};
|
|
use serde_derive::{Deserialize, Serialize};
|
|
use ssz::{Decode, Encode};
|
|
use ssz_derive::{Decode, Encode};
|
|
use types::{EthSpec, MinimalEthSpec};
|
|
|
|
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
|
|
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
|
|
|
|
/// Database configuration parameters.
|
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
pub struct StoreConfig {
|
|
/// Number of slots to wait between storing restore points in the freezer database.
|
|
pub slots_per_restore_point: u64,
|
|
/// Maximum number of blocks to store in the in-memory block cache.
|
|
pub block_cache_size: usize,
|
|
/// Whether to compact the database on initialization.
|
|
pub compact_on_init: bool,
|
|
/// Whether to compact the database during database pruning.
|
|
pub compact_on_prune: bool,
|
|
}
|
|
|
|
/// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params.
|
|
#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)]
|
|
pub struct OnDiskStoreConfig {
|
|
pub slots_per_restore_point: u64,
|
|
// NOTE: redundant, see https://github.com/sigp/lighthouse/issues/1784
|
|
pub _block_cache_size: usize,
|
|
}
|
|
|
|
#[derive(Debug, Clone)]
|
|
pub enum StoreConfigError {
|
|
MismatchedSlotsPerRestorePoint { config: u64, on_disk: u64 },
|
|
}
|
|
|
|
impl Default for StoreConfig {
|
|
fn default() -> Self {
|
|
Self {
|
|
// Safe default for tests, shouldn't ever be read by a CLI node.
|
|
slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64,
|
|
block_cache_size: DEFAULT_BLOCK_CACHE_SIZE,
|
|
compact_on_init: false,
|
|
compact_on_prune: true,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl StoreConfig {
|
|
pub fn as_disk_config(&self) -> OnDiskStoreConfig {
|
|
OnDiskStoreConfig {
|
|
slots_per_restore_point: self.slots_per_restore_point,
|
|
_block_cache_size: DEFAULT_BLOCK_CACHE_SIZE,
|
|
}
|
|
}
|
|
|
|
pub fn check_compatibility(
|
|
&self,
|
|
on_disk_config: &OnDiskStoreConfig,
|
|
) -> Result<(), StoreConfigError> {
|
|
if self.slots_per_restore_point != on_disk_config.slots_per_restore_point {
|
|
return Err(StoreConfigError::MismatchedSlotsPerRestorePoint {
|
|
config: self.slots_per_restore_point,
|
|
on_disk: on_disk_config.slots_per_restore_point,
|
|
});
|
|
}
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
impl StoreItem for OnDiskStoreConfig {
|
|
fn db_column() -> DBColumn {
|
|
DBColumn::BeaconMeta
|
|
}
|
|
|
|
fn as_store_bytes(&self) -> Vec<u8> {
|
|
self.as_ssz_bytes()
|
|
}
|
|
|
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
|
Ok(Self::from_ssz_bytes(bytes)?)
|
|
}
|
|
}
|