From e8604757a20461738686dd07dbc909834cb9c474 Mon Sep 17 00:00:00 2001 From: ethDreamer Date: Sun, 30 Oct 2022 04:04:24 +0000 Subject: [PATCH] Deposit Cache Finalization & Fast WS Sync (#2915) ## Summary The deposit cache now has the ability to finalize deposits. This will cause it to drop unneeded deposit logs and hashes in the deposit Merkle tree that are no longer required to construct deposit proofs. The cache is finalized whenever the latest finalized checkpoint has a new `Eth1Data` with all deposits imported. This has three benefits: 1. Improves the speed of constructing Merkle proofs for deposits as we can just replay deposits since the last finalized checkpoint instead of all historical deposits when re-constructing the Merkle tree. 2. Significantly faster weak subjectivity sync as the deposit cache can be transferred to the newly syncing node in compressed form. The Merkle tree that stores `N` finalized deposits requires a maximum of `log2(N)` hashes. The newly syncing node then only needs to download deposits since the last finalized checkpoint to have a full tree. 3. Future proofing in preparation for [EIP-4444](https://eips.ethereum.org/EIPS/eip-4444) as execution nodes will no longer be required to store logs permanently so we won't always have all historical logs available to us. ## More Details Image to illustrate how the deposit contract merkle tree evolves and finalizes along with the resulting `DepositTreeSnapshot` ![image](https://user-images.githubusercontent.com/37123614/151465302-5fc56284-8a69-4998-b20e-45db3934ac70.png) ## Other Considerations I've changed the structure of the `SszDepositCache` so once you load & save your database from this version of lighthouse, you will no longer be able to load it from older versions. Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com> --- Cargo.lock | 3 +- beacon_node/beacon_chain/src/beacon_chain.rs | 70 +- .../beacon_chain/src/block_verification.rs | 8 + beacon_node/beacon_chain/src/builder.rs | 2 + beacon_node/beacon_chain/src/eth1_chain.rs | 25 +- .../src/eth1_finalization_cache.rs | 498 +++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/schema_change.rs | 96 +- .../src/schema_change/migration_schema_v13.rs | 150 ++++ beacon_node/beacon_chain/src/test_utils.rs | 5 +- beacon_node/client/src/builder.rs | 79 +- beacon_node/eth1/Cargo.toml | 1 + beacon_node/eth1/src/block_cache.rs | 52 +- beacon_node/eth1/src/deposit_cache.rs | 820 +++++++++++++++--- beacon_node/eth1/src/inner.rs | 46 +- beacon_node/eth1/src/lib.rs | 4 +- beacon_node/eth1/src/service.rs | 105 ++- beacon_node/eth1/tests/test.rs | 4 +- .../execution_layer/src/engine_api/http.rs | 14 +- beacon_node/genesis/src/common.rs | 4 +- .../genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/http_api/src/lib.rs | 48 + beacon_node/store/src/metadata.rs | 2 +- common/eth2/src/lib.rs | 16 + common/eth2/src/lighthouse.rs | 18 +- consensus/merkle_proof/src/lib.rs | 185 +++- consensus/ssz/src/decode/impls.rs | 14 + consensus/ssz/src/encode/impls.rs | 36 + consensus/ssz/tests/tests.rs | 18 + .../src/common/deposit_data_tree.rs | 57 +- consensus/types/Cargo.toml | 2 +- consensus/types/src/deposit_tree_snapshot.rs | 83 ++ consensus/types/src/lib.rs | 2 + database_manager/src/lib.rs | 1 + validator_client/src/lib.rs | 2 + 35 files changed, 2302 insertions(+), 171 deletions(-) create mode 100644 beacon_node/beacon_chain/src/eth1_finalization_cache.rs create mode 100644 beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs create mode 100644 consensus/types/src/deposit_tree_snapshot.rs diff --git a/Cargo.lock b/Cargo.lock index 04cfd4235..6d65ccb48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1638,6 +1638,7 @@ dependencies = [ "slog", "sloggers", "state_processing", + "superstruct", "task_executor", "tokio", "tree_hash", @@ -6884,7 +6885,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "types" -version = "0.2.0" +version = "0.2.1" dependencies = [ "arbitrary", "beacon_chain", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 86b43a1a3..b23dd30de 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -16,6 +16,7 @@ use crate::chain_config::ChainConfig; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; +use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, PreparePayloadHandle}; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; @@ -117,6 +118,9 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// validator pubkey cache. pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); +/// The timeout for the eth1 finalization cache +pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200); + // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero(); pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero(); @@ -359,6 +363,8 @@ pub struct BeaconChain { pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: TimeoutRwLock, + /// A cache of eth1 deposit data at epoch boundaries for deposit finalization + pub eth1_finalization_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. @@ -2531,9 +2537,10 @@ impl BeaconChain { block, block_root, state, - parent_block: _, + parent_block, confirmed_state_roots, payload_verification_handle, + parent_eth1_finalization_data, } = execution_pending_block; let PayloadVerificationOutcome { @@ -2585,6 +2592,8 @@ impl BeaconChain { confirmed_state_roots, payload_verification_status, count_unrealized, + parent_block, + parent_eth1_finalization_data, ) }, "payload_verification_handle", @@ -2599,6 +2608,7 @@ impl BeaconChain { /// /// An error is returned if the block was unable to be imported. It may be partially imported /// (i.e., this function is not atomic). + #[allow(clippy::too_many_arguments)] fn import_block( &self, signed_block: Arc>, @@ -2607,6 +2617,8 @@ impl BeaconChain { confirmed_state_roots: Vec, payload_verification_status: PayloadVerificationStatus, count_unrealized: CountUnrealized, + parent_block: SignedBlindedBeaconBlock, + parent_eth1_finalization_data: Eth1FinalizationData, ) -> Result> { let current_slot = self.slot()?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); @@ -2987,6 +2999,11 @@ impl BeaconChain { let parent_root = block.parent_root(); let slot = block.slot(); + let current_eth1_finalization_data = Eth1FinalizationData { + eth1_data: state.eth1_data().clone(), + eth1_deposit_index: state.eth1_deposit_index(), + }; + let current_finalized_checkpoint = state.finalized_checkpoint(); self.snapshot_cache .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) .ok_or(Error::SnapshotCacheLockTimeout) @@ -3060,6 +3077,57 @@ impl BeaconChain { ); } + // Do not write to eth1 finalization cache for blocks older than 5 epochs + // this helps reduce noise during sync + if block_delay_total + < self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32) + { + let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch()); + if parent_block_epoch < current_epoch { + // we've crossed epoch boundary, store Eth1FinalizationData + let (checkpoint, eth1_finalization_data) = + if current_slot % T::EthSpec::slots_per_epoch() == 0 { + // current block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: block_root, + }, + current_eth1_finalization_data, + ) + } else { + // parent block is the checkpoint + ( + Checkpoint { + epoch: current_epoch, + root: parent_block.canonical_root(), + }, + parent_eth1_finalization_data, + ) + }; + + if let Some(finalized_eth1_data) = self + .eth1_finalization_cache + .try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT) + .and_then(|mut cache| { + cache.insert(checkpoint, eth1_finalization_data); + cache.finalize(¤t_finalized_checkpoint) + }) + { + if let Some(eth1_chain) = self.eth1_chain.as_ref() { + let finalized_deposit_count = finalized_eth1_data.deposit_count; + eth1_chain.finalize_eth1_data(finalized_eth1_data); + debug!( + self.log, + "called eth1_chain.finalize_eth1_data()"; + "epoch" => current_finalized_checkpoint.epoch, + "deposit count" => finalized_deposit_count, + ); + } + } + } + } + // Inform the unknown block cache, in case it was waiting on this block. self.pre_finalization_block_cache .block_processed(block_root); diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 7f59f1cfe..104de57db 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -42,6 +42,7 @@ //! END //! //! ``` +use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, PayloadNotifier, @@ -622,6 +623,7 @@ pub struct ExecutionPendingBlock { pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, + pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, pub payload_verification_handle: PayloadVerificationHandle, } @@ -1164,6 +1166,11 @@ impl ExecutionPendingBlock { .into()); } + let parent_eth1_finalization_data = Eth1FinalizationData { + eth1_data: state.eth1_data().clone(), + eth1_deposit_index: state.eth1_deposit_index(), + }; + let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64()); for _ in 0..distance { let state_root = if parent.beacon_block.slot() == state.slot() { @@ -1419,6 +1426,7 @@ impl ExecutionPendingBlock { block_root, state, parent_block: parent.beacon_block, + parent_eth1_finalization_data, confirmed_state_roots, payload_verification_handle, }) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 051b84f81..58bbb2b5c 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,5 +1,6 @@ use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; +use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::head_tracker::HeadTracker; @@ -795,6 +796,7 @@ where head_for_snapshot_cache, )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()), + eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 3d24becc8..25971bf85 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -16,7 +16,6 @@ use store::{DBColumn, Error as StoreError, StoreItem}; use task_executor::TaskExecutor; use types::{ BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, - DEPOSIT_TREE_DEPTH, }; type BlockNumber = u64; @@ -170,8 +169,8 @@ fn get_sync_status( #[derive(Encode, Decode, Clone)] pub struct SszEth1 { - use_dummy_backend: bool, - backend_bytes: Vec, + pub use_dummy_backend: bool, + pub backend_bytes: Vec, } impl StoreItem for SszEth1 { @@ -305,6 +304,12 @@ where } } + /// Set in motion the finalization of `Eth1Data`. This method is called during block import + /// so it should be fast. + pub fn finalize_eth1_data(&self, eth1_data: Eth1Data) { + self.backend.finalize_eth1_data(eth1_data); + } + /// Consumes `self`, returning the backend. pub fn into_backend(self) -> T { self.backend @@ -335,6 +340,10 @@ pub trait Eth1ChainBackend: Sized + Send + Sync { /// beacon node eth1 cache is. fn latest_cached_block(&self) -> Option; + /// Set in motion the finalization of `Eth1Data`. This method is called during block import + /// so it should be fast. + fn finalize_eth1_data(&self, eth1_data: Eth1Data); + /// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain /// an idea of how up-to-date the remote eth1 node is. fn head_block(&self) -> Option; @@ -389,6 +398,8 @@ impl Eth1ChainBackend for DummyEth1ChainBackend { None } + fn finalize_eth1_data(&self, _eth1_data: Eth1Data) {} + fn head_block(&self) -> Option { None } @@ -547,7 +558,7 @@ impl Eth1ChainBackend for CachingEth1Backend { .deposits() .read() .cache - .get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH) + .get_deposits(next, last, deposit_count) .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) .map(|(_deposit_root, deposits)| deposits) } @@ -558,6 +569,12 @@ impl Eth1ChainBackend for CachingEth1Backend { self.core.latest_cached_block() } + /// This only writes the eth1_data to a temporary cache so that the service + /// thread can later do the actual finalizing of the deposit tree. + fn finalize_eth1_data(&self, eth1_data: Eth1Data) { + self.core.set_to_finalize(Some(eth1_data)); + } + fn head_block(&self) -> Option { self.core.head_block() } diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs new file mode 100644 index 000000000..7cf805a12 --- /dev/null +++ b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs @@ -0,0 +1,498 @@ +use slog::{debug, Logger}; +use std::cmp; +use std::collections::BTreeMap; +use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; + +/// The default size of the cache. +/// The beacon chain only looks at the last 4 epochs for finalization. +/// Add 1 for current epoch and 4 earlier epochs. +pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5; + +/// These fields are named the same as the corresponding fields in the `BeaconState` +/// as this structure stores these values from the `BeaconState` at a `Checkpoint` +#[derive(Clone)] +pub struct Eth1FinalizationData { + pub eth1_data: Eth1Data, + pub eth1_deposit_index: u64, +} + +impl Eth1FinalizationData { + /// Ensures the deposit finalization conditions have been met. See: + /// https://eips.ethereum.org/EIPS/eip-4881#deposit-finalization-conditions + fn fully_imported(&self) -> bool { + self.eth1_deposit_index >= self.eth1_data.deposit_count + } +} + +/// Implements map from Checkpoint -> Eth1CacheData +pub struct CheckpointMap { + capacity: usize, + // There shouldn't be more than a couple of potential checkpoints at the same + // epoch. Searching through a vector for the matching Root should be faster + // than using another map from Root->Eth1CacheData + store: BTreeMap>, +} + +impl Default for CheckpointMap { + fn default() -> Self { + Self::new() + } +} + +/// Provides a map of `Eth1CacheData` referenced by `Checkpoint` +/// +/// ## Cache Queuing +/// +/// The cache keeps a maximum number of (`capacity`) epochs. Because there may be +/// forks at the epoch boundary, it's possible that there exists more than one +/// `Checkpoint` for the same `Epoch`. This cache will store all checkpoints for +/// a given `Epoch`. When adding data for a new `Checkpoint` would cause the number +/// of `Epoch`s stored to exceed `capacity`, the data for oldest `Epoch` is dropped +impl CheckpointMap { + pub fn new() -> Self { + CheckpointMap { + capacity: DEFAULT_ETH1_CACHE_SIZE, + store: BTreeMap::new(), + } + } + + pub fn with_capacity(capacity: usize) -> Self { + CheckpointMap { + capacity: cmp::max(1, capacity), + store: BTreeMap::new(), + } + } + + pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { + self.store + .entry(checkpoint.epoch) + .or_insert_with(Vec::new) + .push((checkpoint.root, eth1_finalization_data)); + + // faster to reduce size after the fact than do pre-checking to see + // if the current data would increase the size of the BTreeMap + while self.store.len() > self.capacity { + let oldest_stored_epoch = self.store.keys().next().cloned().unwrap(); + self.store.remove(&oldest_stored_epoch); + } + } + + pub fn get(&self, checkpoint: &Checkpoint) -> Option<&Eth1FinalizationData> { + match self.store.get(&checkpoint.epoch) { + Some(vec) => { + for (root, data) in vec { + if *root == checkpoint.root { + return Some(data); + } + } + None + } + None => None, + } + } + + #[cfg(test)] + pub fn len(&self) -> usize { + self.store.len() + } +} + +/// This cache stores `Eth1CacheData` that could potentially be finalized within 4 +/// future epochs. +pub struct Eth1FinalizationCache { + by_checkpoint: CheckpointMap, + pending_eth1: BTreeMap, + last_finalized: Option, + log: Logger, +} + +/// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to +/// finalize deposits when a new epoch is finalized. +/// +impl Eth1FinalizationCache { + pub fn new(log: Logger) -> Self { + Eth1FinalizationCache { + by_checkpoint: CheckpointMap::new(), + pending_eth1: BTreeMap::new(), + last_finalized: None, + log, + } + } + + pub fn with_capacity(log: Logger, capacity: usize) -> Self { + Eth1FinalizationCache { + by_checkpoint: CheckpointMap::with_capacity(capacity), + pending_eth1: BTreeMap::new(), + last_finalized: None, + log, + } + } + + pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { + if !eth1_finalization_data.fully_imported() { + self.pending_eth1.insert( + eth1_finalization_data.eth1_data.deposit_count, + eth1_finalization_data.eth1_data.clone(), + ); + debug!( + self.log, + "Eth1Cache: inserted pending eth1"; + "eth1_data.deposit_count" => eth1_finalization_data.eth1_data.deposit_count, + "eth1_deposit_index" => eth1_finalization_data.eth1_deposit_index, + ); + } + self.by_checkpoint + .insert(checkpoint, eth1_finalization_data); + } + + pub fn finalize(&mut self, checkpoint: &Checkpoint) -> Option { + if let Some(eth1_finalized_data) = self.by_checkpoint.get(checkpoint) { + let finalized_deposit_index = eth1_finalized_data.eth1_deposit_index; + let mut result = None; + while let Some(pending_count) = self.pending_eth1.keys().next().cloned() { + if finalized_deposit_index >= pending_count { + result = self.pending_eth1.remove(&pending_count); + debug!( + self.log, + "Eth1Cache: dropped pending eth1"; + "pending_count" => pending_count, + "finalized_deposit_index" => finalized_deposit_index, + ); + } else { + break; + } + } + if eth1_finalized_data.fully_imported() { + result = Some(eth1_finalized_data.eth1_data.clone()) + } + if result.is_some() { + self.last_finalized = result; + } + self.last_finalized.clone() + } else { + debug!( + self.log, + "Eth1Cache: cache miss"; + "epoch" => checkpoint.epoch, + ); + None + } + } + + #[cfg(test)] + pub fn by_checkpoint(&self) -> &CheckpointMap { + &self.by_checkpoint + } + + #[cfg(test)] + pub fn pending_eth1(&self) -> &BTreeMap { + &self.pending_eth1 + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use sloggers::null::NullLoggerBuilder; + use sloggers::Build; + use std::collections::HashMap; + + const SLOTS_PER_EPOCH: u64 = 32; + const MAX_DEPOSITS: u64 = 16; + const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64; + + fn eth1cache() -> Eth1FinalizationCache { + let log_builder = NullLoggerBuilder; + Eth1FinalizationCache::new(log_builder.build().expect("should build log")) + } + + fn random_eth1_data(deposit_count: u64) -> Eth1Data { + Eth1Data { + deposit_root: Root::random(), + deposit_count, + block_hash: Root::random(), + } + } + + fn random_checkpoint(epoch: u64) -> Checkpoint { + Checkpoint { + epoch: epoch.into(), + root: Root::random(), + } + } + + fn random_checkpoints(n: usize) -> Vec { + let mut result = Vec::with_capacity(n); + for epoch in 0..n { + result.push(random_checkpoint(epoch as u64)) + } + result + } + + #[test] + fn fully_imported_deposits() { + let epochs = 16; + let deposits_imported = 128; + + let eth1data = random_eth1_data(deposits_imported); + let checkpoints = random_checkpoints(epochs as usize); + let mut eth1cache = eth1cache(); + + for epoch in 4..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min((epoch - 4) as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + + let finalized_checkpoint = checkpoints + .get((epoch - 4) as usize) + .expect("should get finalized checkpoint"); + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits are fully imported so pending cache should be empty" + ); + if epoch < 8 { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + None, + "Should have cache miss" + ); + } else { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Should have cache hit" + ) + } + } + } + + #[test] + fn partially_imported_deposits() { + let epochs = 16; + let initial_deposits_imported = 1024; + let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; + let full_import_epoch = 13; + let total_deposits = + initial_deposits_imported + deposits_imported_per_epoch * full_import_epoch; + + let eth1data = random_eth1_data(total_deposits); + let checkpoints = random_checkpoints(epochs as usize); + let mut eth1cache = eth1cache(); + + for epoch in 0..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + let deposits_imported = cmp::min( + total_deposits, + initial_deposits_imported + deposits_imported_per_epoch * epoch, + ); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + + if epoch >= 4 { + let finalized_epoch = epoch - 4; + let finalized_checkpoint = checkpoints + .get(finalized_epoch as usize) + .expect("should get finalized checkpoint"); + if finalized_epoch < full_import_epoch { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + None, + "Deposits not fully finalized so cache should return no Eth1Data", + ); + assert_eq!( + eth1cache.pending_eth1().len(), + 1, + "Deposits not fully finalized. Pending eth1 cache should have 1 entry" + ); + } else { + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Deposits fully imported and finalized. Cache should return Eth1Data. finalized_deposits[{}]", + (initial_deposits_imported + deposits_imported_per_epoch * finalized_epoch), + ); + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits fully imported and finalized. Pending cache should be empty" + ); + } + } + } + } + + #[test] + fn fork_at_epoch_boundary() { + let epochs = 12; + let deposits_imported = 128; + + let eth1data = random_eth1_data(deposits_imported); + let checkpoints = random_checkpoints(epochs as usize); + let mut forks = HashMap::new(); + let mut eth1cache = eth1cache(); + + for epoch in 0..epochs { + assert_eq!( + eth1cache.by_checkpoint().len(), + cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), + "Unexpected cache size" + ); + + let checkpoint = checkpoints + .get(epoch as usize) + .expect("should get checkpoint"); + eth1cache.insert( + *checkpoint, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + // lets put a fork at every third epoch + if epoch % 3 == 0 { + let fork = random_checkpoint(epoch); + eth1cache.insert( + fork, + Eth1FinalizationData { + eth1_data: eth1data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + forks.insert(epoch as usize, fork); + } + + assert!( + eth1cache.pending_eth1().is_empty(), + "Deposits are fully imported so pending cache should be empty" + ); + if epoch >= 4 { + let finalized_epoch = (epoch - 4) as usize; + let finalized_checkpoint = if finalized_epoch % 3 == 0 { + forks.get(&finalized_epoch).expect("should get fork") + } else { + checkpoints + .get(finalized_epoch) + .expect("should get checkpoint") + }; + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + Some(eth1data.clone()), + "Should have cache hit" + ); + if finalized_epoch >= 3 { + let dropped_epoch = finalized_epoch - 3; + if let Some(dropped_checkpoint) = forks.get(&dropped_epoch) { + // got checkpoint for an old fork that should no longer + // be in the cache because it is from too long ago + assert_eq!( + eth1cache.finalize(dropped_checkpoint), + None, + "Should have cache miss" + ); + } + } + } + } + } + + #[test] + fn massive_deposit_queue() { + // Simulating a situation where deposits don't get imported within an eth1 voting period + let eth1_voting_periods = 8; + let initial_deposits_imported = 1024; + let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; + let initial_deposit_queue = + deposits_imported_per_epoch * EPOCHS_PER_ETH1_VOTING_PERIOD * 2 + 32; + let new_deposits_per_voting_period = + EPOCHS_PER_ETH1_VOTING_PERIOD * deposits_imported_per_epoch / 2; + + let mut epoch_data = BTreeMap::new(); + let mut eth1s_by_count = BTreeMap::new(); + let mut eth1cache = eth1cache(); + let mut last_period_deposits = initial_deposits_imported; + for period in 0..eth1_voting_periods { + let period_deposits = initial_deposits_imported + + initial_deposit_queue + + period * new_deposits_per_voting_period; + let period_eth1_data = random_eth1_data(period_deposits); + eth1s_by_count.insert(period_eth1_data.deposit_count, period_eth1_data.clone()); + + for epoch_mod_period in 0..EPOCHS_PER_ETH1_VOTING_PERIOD { + let epoch = period * EPOCHS_PER_ETH1_VOTING_PERIOD + epoch_mod_period; + let checkpoint = random_checkpoint(epoch); + let deposits_imported = cmp::min( + period_deposits, + last_period_deposits + deposits_imported_per_epoch * epoch_mod_period, + ); + eth1cache.insert( + checkpoint, + Eth1FinalizationData { + eth1_data: period_eth1_data.clone(), + eth1_deposit_index: deposits_imported, + }, + ); + epoch_data.insert(epoch, (checkpoint, deposits_imported)); + + if epoch >= 4 { + let finalized_epoch = epoch - 4; + let (finalized_checkpoint, finalized_deposits) = epoch_data + .get(&finalized_epoch) + .expect("should get epoch data"); + + let pending_eth1s = eth1s_by_count.range((finalized_deposits + 1)..).count(); + let last_finalized_eth1 = eth1s_by_count + .range(0..(finalized_deposits + 1)) + .map(|(_, eth1)| eth1) + .last() + .cloned(); + assert_eq!( + eth1cache.finalize(finalized_checkpoint), + last_finalized_eth1, + "finalized checkpoint mismatch", + ); + assert_eq!( + eth1cache.pending_eth1().len(), + pending_eth1s, + "pending eth1 mismatch" + ); + } + } + + // remove unneeded stuff from old epochs + while epoch_data.len() > DEFAULT_ETH1_CACHE_SIZE { + let oldest_stored_epoch = epoch_data + .keys() + .next() + .cloned() + .expect("should get oldest epoch"); + epoch_data.remove(&oldest_stored_epoch); + } + last_period_deposits = period_deposits; + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fbcd8f7fb..5ead5311e 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -14,6 +14,7 @@ pub mod chain_config; mod early_attester_cache; mod errors; pub mod eth1_chain; +mod eth1_finalization_cache; pub mod events; pub mod execution_payload; pub mod fork_choice_signal; diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 15b0f39f3..3ee77f7bb 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -2,13 +2,15 @@ mod migration_schema_v10; mod migration_schema_v11; mod migration_schema_v12; +mod migration_schema_v13; mod migration_schema_v6; mod migration_schema_v7; mod migration_schema_v8; mod migration_schema_v9; mod types; -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; +use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY}; +use crate::eth1_chain::SszEth1; use crate::persisted_fork_choice::{ PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7, PersistedForkChoiceV8, @@ -24,6 +26,7 @@ use store::{Error as StoreError, StoreItem}; /// Migrate the database from one schema version to another, applying all requisite mutations. pub fn migrate_schema( db: Arc>, + deposit_contract_deploy_block: u64, datadir: &Path, from: SchemaVersion, to: SchemaVersion, @@ -31,19 +34,51 @@ pub fn migrate_schema( spec: &ChainSpec, ) -> Result<(), StoreError> { match (from, to) { - // Migrating from the current schema version to iself is always OK, a no-op. + // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; - migrate_schema::(db, datadir, next, to, log, spec) + migrate_schema::( + db.clone(), + deposit_contract_deploy_block, + datadir, + from, + next, + log.clone(), + spec, + )?; + migrate_schema::( + db, + deposit_contract_deploy_block, + datadir, + next, + to, + log, + spec, + ) } // Downgrade across multiple versions by recursively migrating one step at a time. (_, _) if to.as_u64() + 1 < from.as_u64() => { let next = SchemaVersion(from.as_u64() - 1); - migrate_schema::(db.clone(), datadir, from, next, log.clone(), spec)?; - migrate_schema::(db, datadir, next, to, log, spec) + migrate_schema::( + db.clone(), + deposit_contract_deploy_block, + datadir, + from, + next, + log.clone(), + spec, + )?; + migrate_schema::( + db, + deposit_contract_deploy_block, + datadir, + next, + to, + log, + spec, + ) } // @@ -207,6 +242,55 @@ pub fn migrate_schema( let ops = migration_schema_v12::downgrade_from_v12::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(12), SchemaVersion(13)) => { + let mut ops = vec![]; + if let Some(persisted_eth1_v1) = db.get_item::(Ð1_CACHE_DB_KEY)? { + let upgraded_eth1_cache = + match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) { + Ok(upgraded_eth1) => upgraded_eth1, + Err(e) => { + warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e); + warn!(log, "Reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v13( + deposit_contract_deploy_block, + ) + } + }; + ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } + (SchemaVersion(13), SchemaVersion(12)) => { + let mut ops = vec![]; + if let Some(persisted_eth1_v13) = db.get_item::(Ð1_CACHE_DB_KEY)? { + let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache( + persisted_eth1_v13, + ) { + Ok(Some(downgraded_eth1)) => downgraded_eth1, + Ok(None) => { + warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v1( + deposit_contract_deploy_block, + ) + } + Err(e) => { + warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e); + warn!(log, "Reinitializing eth1 cache"); + migration_schema_v13::reinitialized_eth1_cache_v1( + deposit_contract_deploy_block, + ) + } + }; + ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + } + + db.store_schema_version_atomically(to, ops)?; + + Ok(()) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs new file mode 100644 index 000000000..d4ac97460 --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs @@ -0,0 +1,150 @@ +use crate::eth1_chain::SszEth1; +use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13}; +use ssz::{Decode, Encode}; +use state_processing::common::DepositDataTree; +use store::Error; +use types::DEPOSIT_TREE_DEPTH; + +pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result { + if persisted_eth1_v1.use_dummy_backend { + // backend_bytes is empty when using dummy backend + return Ok(persisted_eth1_v1); + } + + let SszEth1 { + use_dummy_backend, + backend_bytes, + } = persisted_eth1_v1; + + let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?; + let SszEth1CacheV1 { + block_cache, + deposit_cache: deposit_cache_v1, + last_processed_block, + } = ssz_eth1_cache_v1; + + let SszDepositCacheV1 { + logs, + leaves, + deposit_contract_deploy_block, + deposit_roots, + } = deposit_cache_v1; + + let deposit_cache_v13 = SszDepositCacheV13 { + logs, + leaves, + deposit_contract_deploy_block, + finalized_deposit_count: 0, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), + deposit_tree_snapshot: None, + deposit_roots, + }; + + let ssz_eth1_cache_v13 = SszEth1CacheV13 { + block_cache, + deposit_cache: deposit_cache_v13, + last_processed_block, + }; + + let persisted_eth1_v13 = SszEth1 { + use_dummy_backend, + backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), + }; + + Ok(persisted_eth1_v13) +} + +pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result, Error> { + if persisted_eth1_v13.use_dummy_backend { + // backend_bytes is empty when using dummy backend + return Ok(Some(persisted_eth1_v13)); + } + + let SszEth1 { + use_dummy_backend, + backend_bytes, + } = persisted_eth1_v13; + + let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?; + let SszEth1CacheV13 { + block_cache, + deposit_cache: deposit_cache_v13, + last_processed_block, + } = ssz_eth1_cache_v13; + + let SszDepositCacheV13 { + logs, + leaves, + deposit_contract_deploy_block, + finalized_deposit_count, + finalized_block_height: _, + deposit_tree_snapshot, + deposit_roots, + } = deposit_cache_v13; + + if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() { + // This tree was never finalized and can be directly downgraded to v1 without re-initializing + let deposit_cache_v1 = SszDepositCacheV1 { + logs, + leaves, + deposit_contract_deploy_block, + deposit_roots, + }; + let ssz_eth1_cache_v1 = SszEth1CacheV1 { + block_cache, + deposit_cache: deposit_cache_v1, + last_processed_block, + }; + return Ok(Some(SszEth1 { + use_dummy_backend, + backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), + })); + } + // deposit cache was finalized; can't downgrade + Ok(None) +} + +pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 { + let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + let deposit_cache_v13 = SszDepositCacheV13 { + logs: vec![], + leaves: vec![], + deposit_contract_deploy_block, + finalized_deposit_count: 0, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), + deposit_tree_snapshot: empty_tree.get_snapshot(), + deposit_roots: vec![empty_tree.root()], + }; + + let ssz_eth1_cache_v13 = SszEth1CacheV13 { + block_cache: BlockCache::default(), + deposit_cache: deposit_cache_v13, + last_processed_block: None, + }; + + SszEth1 { + use_dummy_backend: false, + backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), + } +} + +pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 { + let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); + let deposit_cache_v1 = SszDepositCacheV1 { + logs: vec![], + leaves: vec![], + deposit_contract_deploy_block, + deposit_roots: vec![empty_tree.root()], + }; + + let ssz_eth1_cache_v1 = SszEth1CacheV1 { + block_cache: BlockCache::default(), + deposit_cache: deposit_cache_v1, + last_processed_block: None, + }; + + SszEth1 { + use_dummy_backend: false, + backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), + } +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d5a888038..3b4a62f5a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1432,8 +1432,9 @@ where // Building proofs let mut proofs = vec![]; for i in 0..leaves.len() { - let (_, mut proof) = - tree.generate_proof(i, self.spec.deposit_contract_tree_depth as usize); + let (_, mut proof) = tree + .generate_proof(i, self.spec.deposit_contract_tree_depth as usize) + .expect("should generate proof"); proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64))); proofs.push(proof); } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index efd91cfdf..c89980e6e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -277,8 +277,52 @@ where BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT)); let slots_per_epoch = TEthSpec::slots_per_epoch(); - debug!(context.log(), "Downloading finalized block"); + let deposit_snapshot = if config.sync_eth1_chain { + // We want to fetch deposit snapshot before fetching the finalized beacon state to + // ensure that the snapshot is not newer than the beacon state that satisfies the + // deposit finalization conditions + debug!(context.log(), "Downloading deposit snapshot"); + let deposit_snapshot_result = remote + .get_deposit_snapshot() + .await + .map_err(|e| match e { + ApiError::InvalidSsz(e) => format!( + "Unable to parse SSZ: {:?}. Ensure the checkpoint-sync-url refers to a \ + node for the correct network", + e + ), + e => format!("Error fetching deposit snapshot from remote: {:?}", e), + }); + match deposit_snapshot_result { + Ok(Some(deposit_snapshot)) => { + if deposit_snapshot.is_valid() { + Some(deposit_snapshot) + } else { + warn!(context.log(), "Remote BN sent invalid deposit snapshot!"); + None + } + } + Ok(None) => { + warn!( + context.log(), + "Remote BN does not support EIP-4881 fast deposit sync" + ); + None + } + Err(e) => { + warn!( + context.log(), + "Remote BN does not support EIP-4881 fast deposit sync"; + "error" => e + ); + None + } + } + } else { + None + }; + debug!(context.log(), "Downloading finalized block"); // Find a suitable finalized block on an epoch boundary. let mut block = remote .get_beacon_blocks_ssz::(BlockId::Finalized, &spec) @@ -362,9 +406,33 @@ where "state_root" => ?state_root, ); + let service = + deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( + config.eth1, + context.log().clone(), + spec, + &snapshot, + ) { + Ok(service) => { + info!( + context.log(), + "Loaded deposit tree snapshot"; + "deposits loaded" => snapshot.deposit_count, + ); + Some(service) + } + Err(e) => { + warn!(context.log(), + "Unable to load deposit snapshot"; + "error" => ?e + ); + None + } + }); + builder .weak_subjectivity_state(state, block, genesis_state) - .map(|v| (v, None))? + .map(|v| (v, service))? } ClientGenesis::DepositContract => { info!( @@ -810,9 +878,16 @@ where self.freezer_db_path = Some(cold_path.into()); let inner_spec = spec.clone(); + let deposit_contract_deploy_block = context + .eth2_network_config + .as_ref() + .map(|config| config.deposit_contract_deploy_block) + .unwrap_or(0); + let schema_upgrade = |db, from, to| { migrate_schema::>( db, + deposit_contract_deploy_block, datadir, from, to, diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index 930301256..7e99c43e7 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -25,6 +25,7 @@ eth2_ssz_derive = "0.3.0" tree_hash = "0.4.1" parking_lot = "0.12.0" slog = "2.5.2" +superstruct = "0.5.0" tokio = { version = "1.14.0", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } lighthouse_metrics = { path = "../../common/lighthouse_metrics"} diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs index 5999944f4..26e160115 100644 --- a/beacon_node/eth1/src/block_cache.rs +++ b/beacon_node/eth1/src/block_cache.rs @@ -1,7 +1,10 @@ use ssz_derive::{Decode, Encode}; +use std::collections::HashMap; use std::ops::RangeInclusive; pub use eth2::lighthouse::Eth1Block; +use eth2::types::Hash256; +use std::sync::Arc; #[derive(Debug, PartialEq, Clone)] pub enum Error { @@ -20,7 +23,9 @@ pub enum Error { /// timestamp. #[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] pub struct BlockCache { - blocks: Vec, + blocks: Vec>, + #[ssz(skip_serializing, skip_deserializing)] + by_hash: HashMap>, } impl BlockCache { @@ -36,12 +41,12 @@ impl BlockCache { /// Returns the earliest (lowest timestamp) block, if any. pub fn earliest_block(&self) -> Option<&Eth1Block> { - self.blocks.first() + self.blocks.first().map(|ptr| ptr.as_ref()) } /// Returns the latest (highest timestamp) block, if any. pub fn latest_block(&self) -> Option<&Eth1Block> { - self.blocks.last() + self.blocks.last().map(|ptr| ptr.as_ref()) } /// Returns the timestamp of the earliest block in the cache (if any). @@ -71,7 +76,7 @@ impl BlockCache { /// - Monotonically increasing block numbers. /// - Non-uniformly increasing block timestamps. pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - self.blocks.iter() + self.blocks.iter().map(|ptr| ptr.as_ref()) } /// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the @@ -80,7 +85,11 @@ impl BlockCache { /// If `len` is greater than the vector's current length, this has no effect. pub fn truncate(&mut self, len: usize) { if len < self.blocks.len() { - self.blocks = self.blocks.split_off(self.blocks.len() - len); + let remaining = self.blocks.split_off(self.blocks.len() - len); + for block in &self.blocks { + self.by_hash.remove(&block.hash); + } + self.blocks = remaining; } } @@ -92,12 +101,27 @@ impl BlockCache { /// Returns a block with the corresponding number, if any. pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> { - self.blocks.get( - self.blocks - .as_slice() - .binary_search_by(|block| block.number.cmp(&block_number)) - .ok()?, - ) + self.blocks + .get( + self.blocks + .as_slice() + .binary_search_by(|block| block.number.cmp(&block_number)) + .ok()?, + ) + .map(|ptr| ptr.as_ref()) + } + + /// Returns a block with the corresponding hash, if any. + pub fn block_by_hash(&self, block_hash: &Hash256) -> Option<&Eth1Block> { + self.by_hash.get(block_hash).map(|ptr| ptr.as_ref()) + } + + /// Rebuilds the by_hash map + pub fn rebuild_by_hash_map(&mut self) { + self.by_hash.clear(); + for block in self.blocks.iter() { + self.by_hash.insert(block.hash, block.clone()); + } } /// Insert an `Eth1Snapshot` into `self`, allowing future queries. @@ -161,7 +185,9 @@ impl BlockCache { } } - self.blocks.push(block); + let ptr = Arc::new(block); + self.by_hash.insert(ptr.hash, ptr.clone()); + self.blocks.push(ptr); Ok(()) } @@ -269,6 +295,8 @@ mod tests { .expect("should add consecutive blocks with duplicate timestamps"); } + let blocks = blocks.into_iter().map(Arc::new).collect::>(); + assert_eq!(cache.blocks, blocks, "should have added all blocks"); } } diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs index 078e3602f..ab07b380d 100644 --- a/beacon_node/eth1/src/deposit_cache.rs +++ b/beacon_node/eth1/src/deposit_cache.rs @@ -1,9 +1,10 @@ -use execution_layer::http::deposit_log::DepositLog; +use crate::{DepositLog, Eth1Block}; use ssz_derive::{Decode, Encode}; use state_processing::common::DepositDataTree; use std::cmp::Ordering; +use superstruct::superstruct; use tree_hash::TreeHash; -use types::{Deposit, Hash256, DEPOSIT_TREE_DEPTH}; +use types::{Deposit, DepositTreeSnapshot, Hash256, DEPOSIT_TREE_DEPTH}; #[derive(Debug, PartialEq)] pub enum Error { @@ -21,22 +22,53 @@ pub enum Error { /// A log with the given index is already present in the cache and it does not match the one /// provided. DuplicateDistinctLog(u64), + /// Attempted to insert log with given index after the log had been finalized + FinalizedLogInsert { + log_index: u64, + finalized_index: u64, + }, /// The deposit count must always be large enough to account for the requested deposit range. /// /// E.g., you cannot request deposit 10 when the deposit count is 9. DepositCountInvalid { deposit_count: u64, range_end: u64 }, + /// You can't request deposits on or before the finalized deposit + DepositRangeInvalid { + range_start: u64, + finalized_count: u64, + }, + /// You can't finalize what's already been finalized and the cache must have the logs + /// that you wish to finalize + InvalidFinalizeIndex { + requested_count: u64, + currently_finalized: u64, + deposit_count: u64, + }, /// Error with the merkle tree for deposits. DepositTree(merkle_proof::MerkleTreeError), /// An unexpected condition was encountered. Internal(String), + /// This is for errors that should never occur + PleaseNotifyTheDevs, } -#[derive(Encode, Decode, Clone)] +pub type SszDepositCache = SszDepositCacheV13; + +#[superstruct( + variants(V1, V13), + variant_attributes(derive(Encode, Decode, Clone)), + no_enum +)] pub struct SszDepositCache { - logs: Vec, - leaves: Vec, - deposit_contract_deploy_block: u64, - deposit_roots: Vec, + pub logs: Vec, + pub leaves: Vec, + pub deposit_contract_deploy_block: u64, + #[superstruct(only(V13))] + pub finalized_deposit_count: u64, + #[superstruct(only(V13))] + pub finalized_block_height: u64, + #[superstruct(only(V13))] + pub deposit_tree_snapshot: Option, + pub deposit_roots: Vec, } impl SszDepositCache { @@ -45,13 +77,37 @@ impl SszDepositCache { logs: cache.logs.clone(), leaves: cache.leaves.clone(), deposit_contract_deploy_block: cache.deposit_contract_deploy_block, + finalized_deposit_count: cache.finalized_deposit_count, + finalized_block_height: cache.finalized_block_height, + deposit_tree_snapshot: cache.deposit_tree.get_snapshot(), deposit_roots: cache.deposit_roots.clone(), } } pub fn to_deposit_cache(&self) -> Result { - let deposit_tree = - DepositDataTree::create(&self.leaves, self.leaves.len(), DEPOSIT_TREE_DEPTH); + let deposit_tree = self + .deposit_tree_snapshot + .as_ref() + .map(|snapshot| { + let mut tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) + .map_err(|e| format!("Invalid SszDepositCache: {:?}", e))?; + for leaf in &self.leaves { + tree.push_leaf(*leaf).map_err(|e| { + format!("Invalid SszDepositCache: unable to push leaf: {:?}", e) + })?; + } + Ok::<_, String>(tree) + }) + .unwrap_or_else(|| { + // deposit_tree_snapshot = None (tree was never finalized) + // Create DepositDataTree from leaves + Ok(DepositDataTree::create( + &self.leaves, + self.leaves.len(), + DEPOSIT_TREE_DEPTH, + )) + })?; + // Check for invalid SszDepositCache conditions if self.leaves.len() != self.logs.len() { return Err("Invalid SszDepositCache: logs and leaves should have equal length".into()); @@ -67,6 +123,8 @@ impl SszDepositCache { logs: self.logs.clone(), leaves: self.leaves.clone(), deposit_contract_deploy_block: self.deposit_contract_deploy_block, + finalized_deposit_count: self.finalized_deposit_count, + finalized_block_height: self.finalized_block_height, deposit_tree, deposit_roots: self.deposit_roots.clone(), }) @@ -76,10 +134,13 @@ impl SszDepositCache { /// Mirrors the merkle tree of deposits in the eth1 deposit contract. /// /// Provides `Deposit` objects with merkle proofs included. +#[cfg_attr(test, derive(PartialEq))] pub struct DepositCache { logs: Vec, leaves: Vec, deposit_contract_deploy_block: u64, + finalized_deposit_count: u64, + finalized_block_height: u64, /// An incremental merkle tree which represents the current state of the /// deposit contract tree. deposit_tree: DepositDataTree, @@ -96,6 +157,8 @@ impl Default for DepositCache { logs: Vec::new(), leaves: Vec::new(), deposit_contract_deploy_block: 1, + finalized_deposit_count: 0, + finalized_block_height: 0, deposit_tree, deposit_roots, } @@ -114,33 +177,111 @@ impl DepositCache { pub fn new(deposit_contract_deploy_block: u64) -> Self { DepositCache { deposit_contract_deploy_block, + finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), ..Self::default() } } - /// Returns the number of deposits available in the cache. + pub fn from_deposit_snapshot( + deposit_contract_deploy_block: u64, + snapshot: &DepositTreeSnapshot, + ) -> Result { + let deposit_tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) + .map_err(|e| format!("Invalid DepositSnapshot: {:?}", e))?; + Ok(DepositCache { + logs: Vec::new(), + leaves: Vec::new(), + deposit_contract_deploy_block, + finalized_deposit_count: snapshot.deposit_count, + finalized_block_height: snapshot.execution_block_height, + deposit_tree, + deposit_roots: vec![snapshot.deposit_root], + }) + } + + /// Returns the number of deposits the cache stores pub fn len(&self) -> usize { - self.logs.len() + self.finalized_deposit_count as usize + self.logs.len() } /// True if the cache does not store any blocks. pub fn is_empty(&self) -> bool { - self.logs.is_empty() + self.finalized_deposit_count != 0 && self.logs.is_empty() } /// Returns the block number for the most recent deposit in the cache. - pub fn latest_block_number(&self) -> Option { - self.logs.last().map(|log| log.block_number) + pub fn latest_block_number(&self) -> u64 { + self.logs + .last() + .map(|log| log.block_number) + .unwrap_or(self.finalized_block_height) } - /// Returns an iterator over all the logs in `self`. + /// Returns an iterator over all the logs in `self` that aren't finalized. pub fn iter(&self) -> impl Iterator { self.logs.iter() } - /// Returns the i'th deposit log. - pub fn get(&self, i: usize) -> Option<&DepositLog> { - self.logs.get(i) + /// Returns the deposit log with INDEX i. + pub fn get_log(&self, i: usize) -> Option<&DepositLog> { + let finalized_deposit_count = self.finalized_deposit_count as usize; + if i < finalized_deposit_count { + None + } else { + self.logs.get(i - finalized_deposit_count) + } + } + + /// Returns the deposit root with DEPOSIT COUNT (not index) i + pub fn get_root(&self, i: usize) -> Option<&Hash256> { + let finalized_deposit_count = self.finalized_deposit_count as usize; + if i < finalized_deposit_count { + None + } else { + self.deposit_roots.get(i - finalized_deposit_count) + } + } + + /// Returns the finalized deposit count + pub fn finalized_deposit_count(&self) -> u64 { + self.finalized_deposit_count + } + + /// Finalizes the cache up to `eth1_block.deposit_count`. + pub fn finalize(&mut self, eth1_block: Eth1Block) -> Result<(), Error> { + let deposits_to_finalize = eth1_block.deposit_count.ok_or_else(|| { + Error::Internal("Eth1Block did not contain deposit_count".to_string()) + })?; + + let currently_finalized = self.finalized_deposit_count; + if deposits_to_finalize > self.len() as u64 || deposits_to_finalize <= currently_finalized { + Err(Error::InvalidFinalizeIndex { + requested_count: deposits_to_finalize, + currently_finalized, + deposit_count: self.len() as u64, + }) + } else { + let finalized_log = self + .get_log((deposits_to_finalize - 1) as usize) + .cloned() + .ok_or(Error::PleaseNotifyTheDevs)?; + let drop = (deposits_to_finalize - currently_finalized) as usize; + self.deposit_tree + .finalize(eth1_block.into()) + .map_err(Error::DepositTree)?; + self.logs.drain(0..drop); + self.leaves.drain(0..drop); + self.deposit_roots.drain(0..drop); + self.finalized_deposit_count = deposits_to_finalize; + self.finalized_block_height = finalized_log.block_number; + + Ok(()) + } + } + + /// Returns the deposit tree snapshot (if tree is finalized) + pub fn get_deposit_snapshot(&self) -> Option { + self.deposit_tree.get_snapshot() } /// Adds `log` to self. @@ -153,19 +294,29 @@ impl DepositCache { /// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty). /// - If a log with `log.index` is already known, but the given `log` is distinct to it. pub fn insert_log(&mut self, log: DepositLog) -> Result { - match log.index.cmp(&(self.logs.len() as u64)) { + match log.index.cmp(&(self.len() as u64)) { Ordering::Equal => { let deposit = log.deposit_data.tree_hash_root(); - self.leaves.push(deposit); - self.logs.push(log); + // should push to deposit_tree first because it's fallible self.deposit_tree .push_leaf(deposit) .map_err(Error::DepositTree)?; + self.leaves.push(deposit); + self.logs.push(log); self.deposit_roots.push(self.deposit_tree.root()); Ok(DepositCacheInsertOutcome::Inserted) } Ordering::Less => { - if self.logs[log.index as usize] == log { + let mut compare_index = log.index as usize; + if log.index < self.finalized_deposit_count { + return Err(Error::FinalizedLogInsert { + log_index: log.index, + finalized_index: self.finalized_deposit_count - 1, + }); + } else { + compare_index -= self.finalized_deposit_count as usize; + } + if self.logs[compare_index] == log { Ok(DepositCacheInsertOutcome::Duplicate) } else { Err(Error::DuplicateDistinctLog(log.index)) @@ -187,14 +338,13 @@ impl DepositCache { /// /// ## Errors /// - /// - If `deposit_count` is larger than `end`. + /// - If `deposit_count` is less than `end`. /// - There are not sufficient deposits in the tree to generate the proof. pub fn get_deposits( &self, start: u64, end: u64, deposit_count: u64, - tree_depth: usize, ) -> Result<(Hash256, Vec), Error> { if deposit_count < end { // It's invalid to ask for more deposits than should exist. @@ -202,48 +352,66 @@ impl DepositCache { deposit_count, range_end: end, }) - } else if end > self.logs.len() as u64 { + } else if end > self.len() as u64 { // The range of requested deposits exceeds the deposits stored locally. Err(Error::InsufficientDeposits { requested: end, known_deposits: self.logs.len(), }) - } else if deposit_count > self.leaves.len() as u64 { - // There are not `deposit_count` known deposit roots, so we can't build the merkle tree - // to prove into. - Err(Error::InsufficientDeposits { - requested: deposit_count, - known_deposits: self.logs.len(), + } else if self.finalized_deposit_count > start { + // Can't ask for deposits before or on the finalized deposit + Err(Error::DepositRangeInvalid { + range_start: start, + finalized_count: self.finalized_deposit_count, }) } else { + let (start, end, deposit_count) = ( + start - self.finalized_deposit_count, + end - self.finalized_deposit_count, + deposit_count - self.finalized_deposit_count, + ); let leaves = self .leaves .get(0..deposit_count as usize) .ok_or_else(|| Error::Internal("Unable to get known leaves".into()))?; - // Note: there is likely a more optimal solution than recreating the `DepositDataTree` - // each time this function is called. - // - // Perhaps a base merkle tree could be maintained that contains all deposits up to the - // last finalized eth1 deposit count. Then, that tree could be cloned and extended for - // each of these calls. + let tree = self + .deposit_tree + .get_snapshot() + .map(|snapshot| { + // The tree has already been finalized. So we can just start from the snapshot + // and replay the deposits up to `deposit_count` + let mut tree = DepositDataTree::from_snapshot(&snapshot, DEPOSIT_TREE_DEPTH) + .map_err(Error::DepositTree)?; + for leaf in leaves { + tree.push_leaf(*leaf).map_err(Error::DepositTree)?; + } + Ok(tree) + }) + .unwrap_or_else(|| { + // Deposit tree hasn't been finalized yet, will have to re-create the whole tree + Ok(DepositDataTree::create( + leaves, + leaves.len(), + DEPOSIT_TREE_DEPTH, + )) + })?; - let tree = DepositDataTree::create(leaves, deposit_count as usize, tree_depth); - - let deposits = self - .logs + let mut deposits = vec![]; + self.logs .get(start as usize..end as usize) .ok_or_else(|| Error::Internal("Unable to get known log".into()))? .iter() - .map(|deposit_log| { - let (_leaf, proof) = tree.generate_proof(deposit_log.index as usize); - - Deposit { + .try_for_each(|deposit_log| { + let (_leaf, proof) = tree + .generate_proof(deposit_log.index as usize) + .map_err(Error::DepositTree)?; + deposits.push(Deposit { proof: proof.into(), data: deposit_log.deposit_data.clone(), - } - }) - .collect(); + }); + Ok(()) + })?; Ok((tree.root(), deposits)) } @@ -270,16 +438,24 @@ impl DepositCache { /// Returns the number of deposits that have been observed up to and /// including the block at `block_number`. /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment. + /// Returns `None` if the `block_number` is zero or prior to contract deployment + /// or prior to last finalized deposit. pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option { - if block_number == 0 || block_number < self.deposit_contract_deploy_block { + if block_number == 0 + || block_number < self.deposit_contract_deploy_block + || block_number < self.finalized_block_height + { None + } else if block_number == self.finalized_block_height { + Some(self.finalized_deposit_count) } else { Some( - self.logs - .iter() - .take_while(|deposit| deposit.block_number <= block_number) - .count() as u64, + self.finalized_deposit_count + + self + .logs + .iter() + .take_while(|deposit| deposit.block_number <= block_number) + .count() as u64, ) } } @@ -289,8 +465,8 @@ impl DepositCache { /// Fetches the `deposit_count` on or just before the queried `block_number` /// and queries the `deposit_roots` map to get the corresponding `deposit_root`. pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option { - let index = self.get_deposit_count_from_cache(block_number)?; - Some(*self.deposit_roots.get(index as usize)?) + let count = self.get_deposit_count_from_cache(block_number)?; + self.get_root(count as usize).cloned() } } @@ -300,8 +476,6 @@ pub mod tests { use execution_layer::http::deposit_log::Log; use types::{EthSpec, MainnetEthSpec}; - pub const TREE_DEPTH: usize = 32; - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. pub const EXAMPLE_LOG: &[u8] = &[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -338,32 +512,52 @@ pub mod tests { log.to_deposit_log(&spec).expect("should decode log") } + fn get_cache_with_deposits(n: u64) -> DepositCache { + let mut deposit_cache = DepositCache::default(); + for i in 0..n { + let mut log = example_log(); + log.index = i; + log.block_number = i; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); + } + assert_eq!(deposit_cache.len() as u64, n, "should have {} deposits", n); + + deposit_cache + } + #[test] fn insert_log_valid() { - let mut tree = DepositCache::default(); + let mut deposit_cache = DepositCache::default(); for i in 0..16 { let mut log = example_log(); log.index = i; - tree.insert_log(log).expect("should add consecutive logs"); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); } } #[test] fn insert_log_invalid() { - let mut tree = DepositCache::default(); + let mut deposit_cache = DepositCache::default(); for i in 0..4 { let mut log = example_log(); log.index = i; - tree.insert_log(log).expect("should add consecutive logs"); + deposit_cache + .insert_log(log) + .expect("should add consecutive logs"); } // Add duplicate, when given is the same as the one known. let mut log = example_log(); log.index = 3; assert_eq!( - tree.insert_log(log).unwrap(), + deposit_cache.insert_log(log).unwrap(), DepositCacheInsertOutcome::Duplicate ); @@ -371,54 +565,40 @@ pub mod tests { let mut log = example_log(); log.index = 3; log.block_number = 99; - assert!(tree.insert_log(log).is_err()); + assert!(deposit_cache.insert_log(log).is_err()); // Skip inserting a log. let mut log = example_log(); log.index = 5; - assert!(tree.insert_log(log).is_err()); + assert!(deposit_cache.insert_log(log).is_err()); } #[test] fn get_deposit_valid() { let n = 1_024; - let mut tree = DepositCache::default(); - - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - tree.insert_log(log).expect("should add consecutive logs"); - } + let deposit_cache = get_cache_with_deposits(n); // Get 0 deposits, with max deposit count. - let (_, deposits) = tree - .get_deposits(0, 0, n, TREE_DEPTH) + let (_, deposits) = deposit_cache + .get_deposits(0, 0, n) .expect("should get the full tree"); assert_eq!(deposits.len(), 0, "should return no deposits"); // Get 0 deposits, with 0 deposit count. - let (_, deposits) = tree - .get_deposits(0, 0, 0, TREE_DEPTH) - .expect("should get the full tree"); - assert_eq!(deposits.len(), 0, "should return no deposits"); - - // Get 0 deposits, with 0 deposit count, tree depth 0. - let (_, deposits) = tree - .get_deposits(0, 0, 0, 0) + let (_, deposits) = deposit_cache + .get_deposits(0, 0, 0) .expect("should get the full tree"); assert_eq!(deposits.len(), 0, "should return no deposits"); // Get all deposits, with max deposit count. - let (full_root, deposits) = tree - .get_deposits(0, n, n, TREE_DEPTH) + let (full_root, deposits) = deposit_cache + .get_deposits(0, n, n) .expect("should get the full tree"); assert_eq!(deposits.len(), n as usize, "should return all deposits"); // Get 4 deposits, with max deposit count. - let (root, deposits) = tree - .get_deposits(0, 4, n, TREE_DEPTH) + let (root, deposits) = deposit_cache + .get_deposits(0, 4, n) .expect("should get the four from the full tree"); assert_eq!( deposits.len(), @@ -432,14 +612,14 @@ pub mod tests { // Get half of the deposits, with half deposit count. let half = n / 2; - let (half_root, deposits) = tree - .get_deposits(0, half, half, TREE_DEPTH) + let (half_root, deposits) = deposit_cache + .get_deposits(0, half, half) .expect("should get the half tree"); assert_eq!(deposits.len(), half as usize, "should return half deposits"); // Get 4 deposits, with half deposit count. - let (root, deposits) = tree - .get_deposits(0, 4, n / 2, TREE_DEPTH) + let (root, deposits) = deposit_cache + .get_deposits(0, 4, n / 2) .expect("should get the half tree"); assert_eq!( deposits.len(), @@ -459,23 +639,455 @@ pub mod tests { #[test] fn get_deposit_invalid() { let n = 16; - let mut tree = DepositCache::default(); - - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - tree.insert_log(log).expect("should add consecutive logs"); - } + let mut tree = get_cache_with_deposits(n); // Range too high. - assert!(tree.get_deposits(0, n + 1, n, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, n + 1, n).is_err()); // Count too high. - assert!(tree.get_deposits(0, n, n + 1, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, n, n + 1).is_err()); // Range higher than count. - assert!(tree.get_deposits(0, 4, 2, TREE_DEPTH).is_err()); + assert!(tree.get_deposits(0, 4, 2).is_err()); + + let block7 = fake_eth1_block(&tree, 7).expect("should create fake eth1 block"); + tree.finalize(block7).expect("should finalize"); + // Range starts <= finalized deposit + assert!(tree.get_deposits(6, 9, 11).is_err()); + assert!(tree.get_deposits(7, 9, 11).is_err()); + // Range start > finalized deposit should be OK + assert!(tree.get_deposits(8, 9, 11).is_ok()); + } + + // returns an eth1 block that can be used to finalize the cache at `deposit_index` + // this will ensure the `deposit_root` on the `Eth1Block` is correct + fn fake_eth1_block(deposit_cache: &DepositCache, deposit_index: usize) -> Option { + let deposit_log = deposit_cache.get_log(deposit_index)?; + Some(Eth1Block { + hash: Hash256::from_low_u64_be(deposit_log.block_number), + timestamp: 0, + number: deposit_log.block_number, + deposit_root: deposit_cache.get_root(deposit_index + 1).cloned(), + deposit_count: Some(deposit_log.index + 1), + }) + } + + #[test] + fn test_finalization_boundaries() { + let n = 8; + let half = (n / 2) as usize; + + let mut deposit_cache = get_cache_with_deposits(n as u64); + + let full_root_before_finalization = deposit_cache.deposit_tree.root(); + let half_log_plus1_before_finalization = deposit_cache + .get_log(half + 1) + .expect("log should exist") + .clone(); + let half_root_plus1_before_finalization = + *deposit_cache.get_root(half + 1).expect("root should exist"); + + let (root_before_finalization, proof_before_finalization) = deposit_cache + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + + // finalize on the tree at half + let half_block = + fake_eth1_block(&deposit_cache, half).expect("fake block should be created"); + assert!( + deposit_cache.get_deposit_snapshot().is_none(), + "snapshot should not exist as tree has not been finalized" + ); + deposit_cache + .finalize(half_block) + .expect("tree should_finalize"); + + // check boundary conditions for get_log + assert!( + deposit_cache.get_log(half).is_none(), + "log at finalized deposit should NOT exist" + ); + assert_eq!( + *deposit_cache.get_log(half + 1).expect("log should exist"), + half_log_plus1_before_finalization, + "log after finalized deposit should match before finalization" + ); + // check boundary conditions for get_root + assert!( + deposit_cache.get_root(half).is_none(), + "root at finalized deposit should NOT exist" + ); + assert_eq!( + *deposit_cache.get_root(half + 1).expect("root should exist"), + half_root_plus1_before_finalization, + "root after finalized deposit should match before finalization" + ); + // full root should match before and after finalization + assert_eq!( + deposit_cache.deposit_tree.root(), + full_root_before_finalization, + "full root should match before and after finalization" + ); + // check boundary conditions for get_deposits (proof) + assert!( + deposit_cache + .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) + .is_err(), + "cannot prove the finalized deposit" + ); + let (root_after_finalization, proof_after_finalization) = deposit_cache + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + assert_eq!( + root_before_finalization, root_after_finalization, + "roots before and after finalization should match" + ); + assert_eq!( + proof_before_finalization, proof_after_finalization, + "proof before and after finalization should match" + ); + + // recover tree from snapshot by replaying deposits + let snapshot = deposit_cache + .get_deposit_snapshot() + .expect("snapshot should exist"); + let mut recovered = DepositCache::from_deposit_snapshot(1, &snapshot) + .expect("should recover finalized tree"); + for i in half + 1..n { + let mut log = example_log(); + log.index = i as u64; + log.block_number = i as u64; + log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i as u64); + recovered + .insert_log(log) + .expect("should add consecutive logs"); + } + + // check the same boundary conditions above for the recovered tree + assert!( + recovered.get_log(half).is_none(), + "log at finalized deposit should NOT exist" + ); + assert_eq!( + *recovered.get_log(half + 1).expect("log should exist"), + half_log_plus1_before_finalization, + "log after finalized deposit should match before finalization in recovered tree" + ); + // check boundary conditions for get_root + assert!( + recovered.get_root(half).is_none(), + "root at finalized deposit should NOT exist" + ); + assert_eq!( + *recovered.get_root(half + 1).expect("root should exist"), + half_root_plus1_before_finalization, + "root after finalized deposit should match before finalization in recovered tree" + ); + // full root should match before and after finalization + assert_eq!( + recovered.deposit_tree.root(), + full_root_before_finalization, + "full root should match before and after finalization" + ); + // check boundary conditions for get_deposits (proof) + assert!( + recovered + .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) + .is_err(), + "cannot prove the finalized deposit" + ); + let (recovered_root_after_finalization, recovered_proof_after_finalization) = recovered + .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) + .expect("should return 1 deposit with proof"); + assert_eq!( + root_before_finalization, recovered_root_after_finalization, + "recovered roots before and after finalization should match" + ); + assert_eq!( + proof_before_finalization, recovered_proof_after_finalization, + "recovered proof before and after finalization should match" + ); + } + + #[test] + fn test_finalization() { + let n = 1024; + let half = n / 2; + let quarter = half / 2; + let mut deposit_cache = get_cache_with_deposits(n); + + let full_root_before_finalization = deposit_cache.deposit_tree.root(); + let q3_root_before_finalization = deposit_cache + .get_root((half + quarter) as usize) + .cloned() + .expect("root should exist"); + let q3_log_before_finalization = deposit_cache + .get_log((half + quarter) as usize) + .cloned() + .expect("log should exist"); + // get_log(half+quarter) should return log with index `half+quarter` + assert_eq!( + q3_log_before_finalization.index, + (half + quarter) as u64, + "log index should be {}", + (half + quarter), + ); + + // get lower quarter of deposits with max deposit count + let (lower_quarter_root_before_finalization, lower_quarter_deposits_before_finalization) = + deposit_cache + .get_deposits(quarter, half, n) + .expect("should get lower quarter"); + assert_eq!( + lower_quarter_deposits_before_finalization.len(), + quarter as usize, + "should get {} deposits from lower quarter", + quarter, + ); + // since the lower quarter was done with full deposits, root should be the same as full_root_before_finalization + assert_eq!( + lower_quarter_root_before_finalization, full_root_before_finalization, + "should still get full root with deposit subset", + ); + + // get upper quarter of deposits with slightly reduced deposit count + let (upper_quarter_root_before_finalization, upper_quarter_deposits_before_finalization) = + deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + assert_eq!( + upper_quarter_deposits_before_finalization.len(), + quarter as usize, + "should get {} deposits from upper quarter", + quarter, + ); + // since upper quarter was with subset of nodes, it should differ from full root + assert_ne!( + full_root_before_finalization, upper_quarter_root_before_finalization, + "subtree root should differ from full root", + ); + + let f0_log = deposit_cache + .get_log((quarter - 1) as usize) + .cloned() + .expect("should return log"); + let f0_block = fake_eth1_block(&deposit_cache, (quarter - 1) as usize) + .expect("fake eth1 block should be created"); + + // finalize first quarter + deposit_cache + .finalize(f0_block) + .expect("should finalize first quarter"); + // finalized count and block number should match log + assert_eq!( + deposit_cache.finalized_deposit_count, + f0_log.index + 1, + "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", + ); + assert_eq!( + deposit_cache.finalized_block_height, + f0_log.block_number, + "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" + ); + // check get_log boundaries + assert!( + deposit_cache.get_log((quarter - 1) as usize).is_none(), + "get_log() should return None for index <= finalized log index", + ); + assert!( + deposit_cache.get_log(quarter as usize).is_some(), + "get_log() should return Some(log) for index >= finalized_deposit_count", + ); + + // full root should remain the same after finalization + assert_eq!( + full_root_before_finalization, + deposit_cache.deposit_tree.root(), + "root should be the same before and after finalization", + ); + // get_root should return the same root before and after finalization + assert_eq!( + q3_root_before_finalization, + deposit_cache + .get_root((half + quarter) as usize) + .cloned() + .expect("root should exist"), + "get_root should return the same root before and after finalization", + ); + // get_log should return the same log before and after finalization + assert_eq!( + q3_log_before_finalization, + deposit_cache + .get_log((half + quarter) as usize) + .cloned() + .expect("log should exist"), + "get_log should return the same log before and after finalization", + ); + + // again get lower quarter of deposits with max deposit count after finalization + let (f0_lower_quarter_root, f0_lower_quarter_deposits) = deposit_cache + .get_deposits(quarter, half, n) + .expect("should get lower quarter"); + assert_eq!( + f0_lower_quarter_deposits.len(), + quarter as usize, + "should get {} deposits from lower quarter", + quarter, + ); + // again get upper quarter of deposits with slightly reduced deposit count after finalization + let (f0_upper_quarter_root, f0_upper_quarter_deposits) = deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + assert_eq!( + f0_upper_quarter_deposits.len(), + quarter as usize, + "should get {} deposits from upper quarter", + quarter, + ); + + // lower quarter root and deposits should be the same + assert_eq!( + lower_quarter_root_before_finalization, f0_lower_quarter_root, + "root should be the same before and after finalization", + ); + for i in 0..lower_quarter_deposits_before_finalization.len() { + assert_eq!( + lower_quarter_deposits_before_finalization[i], f0_lower_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + // upper quarter root and deposits should be the same + assert_eq!( + upper_quarter_root_before_finalization, f0_upper_quarter_root, + "subtree root should be the same before and after finalization", + ); + for i in 0..upper_quarter_deposits_before_finalization.len() { + assert_eq!( + upper_quarter_deposits_before_finalization[i], f0_upper_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + + let f1_log = deposit_cache + .get_log((half - 2) as usize) + .cloned() + .expect("should return log"); + // finalize a little less than half to test multiple finalization + let f1_block = fake_eth1_block(&deposit_cache, (half - 2) as usize) + .expect("should create fake eth1 block"); + deposit_cache + .finalize(f1_block) + .expect("should finalize a little less than half"); + // finalized count and block number should match f1_log + assert_eq!( + deposit_cache.finalized_deposit_count, + f1_log.index + 1, + "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", + ); + assert_eq!( + deposit_cache.finalized_block_height, + f1_log.block_number, + "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" + ); + // check get_log boundaries + assert!( + deposit_cache.get_log((half - 2) as usize).is_none(), + "get_log() should return None for index <= finalized log index", + ); + assert!( + deposit_cache.get_log((half - 1) as usize).is_some(), + "get_log() should return Some(log) for index >= finalized_deposit_count", + ); + + // full root should still be unchanged + assert_eq!( + full_root_before_finalization, + deposit_cache.deposit_tree.root(), + "root should be the same before and after finalization", + ); + + // again get upper quarter of deposits with slightly reduced deposit count after second finalization + let (f1_upper_quarter_root, f1_upper_quarter_deposits) = deposit_cache + .get_deposits(half, half + quarter, n - 2) + .expect("should get upper quarter"); + + // upper quarter root and deposits should be the same after second finalization + assert_eq!( + f0_upper_quarter_root, f1_upper_quarter_root, + "subtree root should be the same after multiple finalization", + ); + for i in 0..f0_upper_quarter_deposits.len() { + assert_eq!( + f0_upper_quarter_deposits[i], f1_upper_quarter_deposits[i], + "get_deposits() should be the same before and after finalization", + ); + } + } + + fn verify_equality(original: &DepositCache, copy: &DepositCache) { + // verify each field individually so that if one field should + // fail to recover, this test will point right to it + assert_eq!(original.deposit_contract_deploy_block, copy.deposit_contract_deploy_block, "DepositCache: deposit_contract_deploy_block should remain the same after encoding and decoding from ssz" ); + assert_eq!( + original.leaves, copy.leaves, + "DepositCache: leaves should remain the same after encoding and decoding from ssz" + ); + assert_eq!( + original.logs, copy.logs, + "DepositCache: logs should remain the same after encoding and decoding from ssz" + ); + assert_eq!(original.finalized_deposit_count, copy.finalized_deposit_count, "DepositCache: finalized_deposit_count should remain the same after encoding and decoding from ssz"); + assert_eq!(original.finalized_block_height, copy.finalized_block_height, "DepositCache: finalized_block_height should remain the same after encoding and decoding from ssz"); + assert_eq!(original.deposit_roots, copy.deposit_roots, "DepositCache: deposit_roots should remain the same before and after encoding and decoding from ssz"); + assert!(original.deposit_tree == copy.deposit_tree, "DepositCache: deposit_tree should remain the same before and after encoding and decoding from ssz"); + // verify all together for good measure + assert!( + original == copy, + "Deposit cache should remain the same after encoding and decoding from ssz" + ); + } + + fn ssz_round_trip(original: &DepositCache) -> DepositCache { + use ssz::{Decode, Encode}; + let bytes = SszDepositCache::from_deposit_cache(original).as_ssz_bytes(); + let ssz_cache = + SszDepositCache::from_ssz_bytes(&bytes).expect("should decode from ssz bytes"); + + SszDepositCache::to_deposit_cache(&ssz_cache).expect("should recover cache") + } + + #[test] + fn ssz_encode_decode() { + let deposit_cache = get_cache_with_deposits(512); + let recovered_cache = ssz_round_trip(&deposit_cache); + + verify_equality(&deposit_cache, &recovered_cache); + } + + #[test] + fn ssz_encode_decode_with_finalization() { + let mut deposit_cache = get_cache_with_deposits(512); + let block383 = fake_eth1_block(&deposit_cache, 383).expect("should create fake eth1 block"); + deposit_cache.finalize(block383).expect("should finalize"); + let mut first_recovery = ssz_round_trip(&deposit_cache); + + verify_equality(&deposit_cache, &first_recovery); + // finalize again to verify equality after multiple finalizations + let block447 = fake_eth1_block(&deposit_cache, 447).expect("should create fake eth1 block"); + first_recovery.finalize(block447).expect("should finalize"); + + let mut second_recovery = ssz_round_trip(&first_recovery); + verify_equality(&first_recovery, &second_recovery); + + // verify equality of a tree that finalized block383, block447, block479 + // with a tree that finalized block383, block479 + let block479 = fake_eth1_block(&deposit_cache, 479).expect("should create fake eth1 block"); + second_recovery + .finalize(block479.clone()) + .expect("should finalize"); + let third_recovery = ssz_round_trip(&second_recovery); + deposit_cache.finalize(block479).expect("should finalize"); + + verify_equality(&deposit_cache, &third_recovery); } } diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs index b0a951bef..0468a02d2 100644 --- a/beacon_node/eth1/src/inner.rs +++ b/beacon_node/eth1/src/inner.rs @@ -2,14 +2,15 @@ use crate::service::endpoint_from_config; use crate::Config; use crate::{ block_cache::{BlockCache, Eth1Block}, - deposit_cache::{DepositCache, SszDepositCache}, + deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}, }; use execution_layer::HttpJsonRpc; use parking_lot::RwLock; use ssz::four_byte_option_impl; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use types::ChainSpec; +use superstruct::superstruct; +use types::{ChainSpec, DepositTreeSnapshot, Eth1Data}; // Define "legacy" implementations of `Option` which use four bytes for encoding the union // selector. @@ -29,12 +30,25 @@ impl DepositUpdater { last_processed_block: None, } } + + pub fn from_snapshot( + deposit_contract_deploy_block: u64, + snapshot: &DepositTreeSnapshot, + ) -> Result { + let last_processed_block = Some(snapshot.execution_block_height); + Ok(Self { + cache: DepositCache::from_deposit_snapshot(deposit_contract_deploy_block, snapshot)?, + last_processed_block, + }) + } } pub struct Inner { pub block_cache: RwLock, pub deposit_cache: RwLock, pub endpoint: HttpJsonRpc, + // this gets set to Some(Eth1Data) when the deposit finalization conditions are met + pub to_finalize: RwLock>, pub config: RwLock, pub remote_head_block: RwLock>, pub spec: ChainSpec, @@ -58,9 +72,13 @@ impl Inner { /// Recover `Inner` given byte representation of eth1 deposit and block caches. pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result { - let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes) - .map_err(|e| format!("Ssz decoding error: {:?}", e))?; - ssz_cache.to_inner(config, spec) + SszEth1Cache::from_ssz_bytes(bytes) + .map_err(|e| format!("Ssz decoding error: {:?}", e))? + .to_inner(config, spec) + .map(|inner| { + inner.block_cache.write().rebuild_by_hash_map(); + inner + }) } /// Returns a reference to the specification. @@ -69,12 +87,21 @@ impl Inner { } } -#[derive(Encode, Decode, Clone)] +pub type SszEth1Cache = SszEth1CacheV13; + +#[superstruct( + variants(V1, V13), + variant_attributes(derive(Encode, Decode, Clone)), + no_enum +)] pub struct SszEth1Cache { - block_cache: BlockCache, - deposit_cache: SszDepositCache, + pub block_cache: BlockCache, + #[superstruct(only(V1))] + pub deposit_cache: SszDepositCacheV1, + #[superstruct(only(V13))] + pub deposit_cache: SszDepositCacheV13, #[ssz(with = "four_byte_option_u64")] - last_processed_block: Option, + pub last_processed_block: Option, } impl SszEth1Cache { @@ -97,6 +124,7 @@ impl SszEth1Cache { }), endpoint: endpoint_from_config(&config) .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, + to_finalize: RwLock::new(None), // Set the remote head_block zero when creating a new instance. We only care about // present and future eth1 nodes. remote_head_block: RwLock::new(None), diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs index f99d08525..3b288de49 100644 --- a/beacon_node/eth1/src/lib.rs +++ b/beacon_node/eth1/src/lib.rs @@ -8,9 +8,9 @@ mod metrics; mod service; pub use block_cache::{BlockCache, Eth1Block}; -pub use deposit_cache::DepositCache; +pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13}; pub use execution_layer::http::deposit_log::DepositLog; -pub use inner::SszEth1Cache; +pub use inner::{SszEth1Cache, SszEth1CacheV1, SszEth1CacheV13}; pub use service::{ BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, DEFAULT_CHAIN_ID, diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index c6b87e88e..f24b746cd 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -20,7 +20,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; use tokio::time::{interval_at, Duration, Instant}; -use types::{ChainSpec, EthSpec, Unsigned}; +use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; /// Indicates the default eth1 chain id we use for the deposit contract. pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli; @@ -63,7 +63,13 @@ async fn endpoint_state( config_chain_id: &Eth1Id, log: &Logger, ) -> EndpointState { - let error_connecting = |e| { + let error_connecting = |e: String| { + debug!( + log, + "eth1 endpoint error"; + "endpoint" => %endpoint, + "error" => &e, + ); warn!( log, "Error connecting to eth1 node endpoint"; @@ -213,6 +219,10 @@ pub enum Error { GetDepositLogsFailed(String), /// There was an unexpected internal error. Internal(String), + /// Error finalizing deposit + FailedToFinalizeDeposit(String), + /// There was a problem Initializing from deposit snapshot + FailedToInitializeFromSnapshot(String), } /// The success message for an Eth1Data cache update. @@ -395,6 +405,7 @@ impl Service { config.deposit_contract_deploy_block, )), endpoint: endpoint_from_config(&config)?, + to_finalize: RwLock::new(None), remote_head_block: RwLock::new(None), config: RwLock::new(config), spec, @@ -407,6 +418,36 @@ impl Service { &self.inner.endpoint } + /// Creates a new service, initializing the deposit tree from a snapshot. + pub fn from_deposit_snapshot( + config: Config, + log: Logger, + spec: ChainSpec, + deposit_snapshot: &DepositTreeSnapshot, + ) -> Result { + let deposit_cache = + DepositUpdater::from_snapshot(config.deposit_contract_deploy_block, deposit_snapshot) + .map_err(Error::FailedToInitializeFromSnapshot)?; + + Ok(Self { + inner: Arc::new(Inner { + block_cache: <_>::default(), + deposit_cache: RwLock::new(deposit_cache), + endpoint: endpoint_from_config(&config) + .map_err(Error::FailedToInitializeFromSnapshot)?, + to_finalize: RwLock::new(None), + remote_head_block: RwLock::new(None), + config: RwLock::new(config), + spec, + }), + log, + }) + } + + pub fn set_to_finalize(&self, eth1_data: Option) { + *(self.inner.to_finalize.write()) = eth1_data; + } + /// Returns the follow distance that has been shortened to accommodate for differences in the /// spacing between blocks. /// @@ -521,7 +562,7 @@ impl Service { let deposits = self.deposits().read(); deposits .cache - .get_valid_signature_count(deposits.cache.latest_block_number()?) + .get_valid_signature_count(deposits.cache.latest_block_number()) } /// Returns the number of deposits with valid signatures that have been observed up to and @@ -619,7 +660,8 @@ impl Service { "old_block_number" => deposit_cache.last_processed_block, "new_block_number" => deposit_cache.cache.latest_block_number(), ); - deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number(); + deposit_cache.last_processed_block = + Some(deposit_cache.cache.latest_block_number()); } let outcome = @@ -698,6 +740,37 @@ impl Service { "deposits" => format!("{:?}", deposit), ), }; + let optional_eth1data = self.inner.to_finalize.write().take(); + if let Some(eth1data_to_finalize) = optional_eth1data { + let already_finalized = self + .inner + .deposit_cache + .read() + .cache + .finalized_deposit_count(); + let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; + if deposit_count_to_finalize > already_finalized { + match self.finalize_deposits(eth1data_to_finalize) { + Err(e) => error!( + self.log, + "Failed to finalize deposit cache"; + "error" => ?e, + ), + Ok(()) => info!( + self.log, + "Successfully finalized deposit tree"; + "finalized deposit count" => deposit_count_to_finalize, + ), + } + } else { + debug!( + self.log, + "Deposits tree already finalized"; + "already_finalized" => already_finalized, + "deposit_count_to_finalize" => deposit_count_to_finalize, + ); + } + } Ok(()) } @@ -733,6 +806,30 @@ impl Service { ) } + pub fn finalize_deposits(&self, eth1_data: Eth1Data) -> Result<(), Error> { + let eth1_block = self + .inner + .block_cache + .read() + .block_by_hash(ð1_data.block_hash) + .cloned() + .ok_or_else(|| { + Error::FailedToFinalizeDeposit( + "Finalized block not found in block cache".to_string(), + ) + })?; + self.inner + .deposit_cache + .write() + .cache + .finalize(eth1_block) + .map_err(|e| Error::FailedToFinalizeDeposit(format!("{:?}", e))) + } + + pub fn get_deposit_snapshot(&self) -> Option { + self.inner.deposit_cache.read().cache.get_deposit_snapshot() + } + /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured /// follow-distance block. /// diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs index 7e58f07e2..069a6e4aa 100644 --- a/beacon_node/eth1/tests/test.rs +++ b/beacon_node/eth1/tests/test.rs @@ -400,7 +400,7 @@ mod deposit_tree { .deposits() .read() .cache - .get_deposits(first, last, last, 32) + .get_deposits(first, last, last) .unwrap_or_else(|_| panic!("should get deposits in round {}", round)); assert_eq!( @@ -551,7 +551,7 @@ mod deposit_tree { // Ensure that the root from the deposit tree matches what the contract reported. let (root, deposits) = tree - .get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH) + .get_deposits(0, i as u64, deposit_counts[i]) .expect("should get deposits"); assert_eq!( root, deposit_roots[i], diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index be68c37b0..745366301 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -208,6 +208,7 @@ pub mod deposit_methods { #[derive(Clone, Copy)] pub enum BlockQuery { Number(u64), + Hash(Hash256), Latest, } @@ -322,9 +323,12 @@ pub mod deposit_methods { query: BlockQuery, timeout: Duration, ) -> Result { - let query_param = match query { - BlockQuery::Number(block_number) => format!("0x{:x}", block_number), - BlockQuery::Latest => "latest".to_string(), + let (method, query_param) = match query { + BlockQuery::Number(block_number) => { + ("eth_getBlockByNumber", format!("0x{:x}", block_number)) + } + BlockQuery::Hash(block_hash) => ("eth_getBlockByHash", format!("{:?}", block_hash)), + BlockQuery::Latest => ("eth_getBlockByNumber", "latest".to_string()), }; let params = json!([ query_param, @@ -332,9 +336,9 @@ pub mod deposit_methods { ]); let response: Value = self - .rpc_request("eth_getBlockByNumber", params, timeout) + .rpc_request(method, params, timeout) .await - .map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?; + .map_err(|e| format!("{} call failed {:?}", method, e))?; let hash: Vec = hex_to_bytes( response diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index 0d483f983..06bf99f9f 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -23,7 +23,9 @@ pub fn genesis_deposits( return Err(String::from("Failed to push leaf")); } - let (_, mut proof) = tree.generate_proof(i, depth); + let (_, mut proof) = tree + .generate_proof(i, depth) + .map_err(|e| format!("Error generating merkle proof: {:?}", e))?; proof.push(Hash256::from_slice(&int_to_fixed_bytes32((i + 1) as u64))); assert_eq!( diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 5614e237f..b7134e37c 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -86,7 +86,7 @@ impl Eth1GenesisService { .deposits() .read() .cache - .get(min_genesis_active_validator_count.saturating_sub(1)) + .get_log(min_genesis_active_validator_count.saturating_sub(1)) .map(|log| log.block_number) } } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5b4fa5816..4267a22f9 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1533,6 +1533,53 @@ pub fn serve( }, ); + // GET beacon/deposit_snapshot + let get_beacon_deposit_snapshot = eth_v1 + .and(warp::path("beacon")) + .and(warp::path("deposit_snapshot")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(eth1_service_filter.clone()) + .and_then( + |accept_header: Option, eth1_service: eth1::Service| { + blocking_task(move || match accept_header { + Some(api_types::Accept::Json) | None => { + let snapshot = eth1_service.get_deposit_snapshot(); + Ok( + warp::reply::json(&api_types::GenericResponse::from(snapshot)) + .into_response(), + ) + } + _ => eth1_service + .get_deposit_snapshot() + .map(|snapshot| { + Response::builder() + .status(200) + .header("Content-Type", "application/octet-stream") + .body(snapshot.as_ssz_bytes().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + }) + .unwrap_or_else(|| { + Response::builder() + .status(503) + .header("Content-Type", "application/octet-stream") + .body(Vec::new().into()) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + }), + }) + }, + ); + /* * config */ @@ -3120,6 +3167,7 @@ pub fn serve( .or(get_beacon_pool_attester_slashings.boxed()) .or(get_beacon_pool_proposer_slashings.boxed()) .or(get_beacon_pool_voluntary_exits.boxed()) + .or(get_beacon_deposit_snapshot.boxed()) .or(get_config_fork_schedule.boxed()) .or(get_config_spec.boxed()) .or(get_config_deposit_contract.boxed()) diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 4f35c4c07..5cb3f1220 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(12); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13); // All the keys that get stored under the `BeaconMeta` column. // diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 104ca9ccd..a2fb082a3 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -114,6 +114,7 @@ pub struct Timeouts { pub sync_duties: Duration, pub get_beacon_blocks_ssz: Duration, pub get_debug_beacon_states: Duration, + pub get_deposit_snapshot: Duration, } impl Timeouts { @@ -128,6 +129,7 @@ impl Timeouts { sync_duties: timeout, get_beacon_blocks_ssz: timeout, get_debug_beacon_states: timeout, + get_deposit_snapshot: timeout, } } } @@ -913,6 +915,20 @@ impl BeaconNodeHttpClient { Ok(()) } + /// `GET beacon/deposit_snapshot` + pub async fn get_deposit_snapshot(&self) -> Result, Error> { + use ssz::Decode; + let mut path = self.eth_path(V1)?; + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("deposit_snapshot"); + self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot) + .await? + .map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz)) + .transpose() + } + /// `POST validator/contribution_and_proofs` pub async fn post_validator_contribution_and_proofs( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 91e6a5558..2dced1c44 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -6,7 +6,10 @@ mod block_rewards; use crate::{ ok_or_error, - types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId}, + types::{ + BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock, + GenericResponse, ValidatorId, + }, BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode, }; use proto_array::core::ProtoArray; @@ -331,6 +334,19 @@ impl Eth1Block { } } +impl From for FinalizedExecutionBlock { + fn from(eth1_block: Eth1Block) -> Self { + Self { + deposit_count: eth1_block.deposit_count.unwrap_or(0), + deposit_root: eth1_block + .deposit_root + .unwrap_or_else(|| DepositTreeSnapshot::default().deposit_root), + block_hash: eth1_block.hash, + block_height: eth1_block.number, + } + } +} + #[derive(Debug, Serialize, Deserialize)] pub struct DatabaseInfo { pub schema_version: u64, diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index da9b78ff1..887deb1ef 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -19,6 +19,8 @@ lazy_static! { /// indices are populated by non-zero leaves (perfect for the deposit contract tree). #[derive(Debug, PartialEq)] pub enum MerkleTree { + /// Finalized Node + Finalized(H256), /// Leaf node with the hash of its content. Leaf(H256), /// Internal node with hash, left subtree and right subtree. @@ -41,6 +43,24 @@ pub enum MerkleTreeError { DepthTooSmall, // Overflow occurred ArithError, + // Can't finalize a zero node + ZeroNodeFinalized, + // Can't push to finalized node + FinalizedNodePushed, + // Invalid Snapshot + InvalidSnapshot(InvalidSnapshot), + // Can't proof a finalized node + ProofEncounteredFinalizedNode, + // This should never happen + PleaseNotifyTheDevs, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum InvalidSnapshot { + // Branch hashes are empty but deposits are not + EmptyBranchWithNonZeroDeposits(usize), + // End of tree reached but deposits != 1 + EndOfTree, } impl MerkleTree { @@ -97,9 +117,11 @@ impl MerkleTree { let right: &mut MerkleTree = &mut *right; match (&*left, &*right) { // Tree is full - (Leaf(_), Leaf(_)) => return Err(MerkleTreeError::MerkleTreeFull), + (Leaf(_), Leaf(_)) | (Finalized(_), Leaf(_)) => { + return Err(MerkleTreeError::MerkleTreeFull) + } // There is a right node so insert in right node - (Node(_, _, _), Node(_, _, _)) => { + (Node(_, _, _), Node(_, _, _)) | (Finalized(_), Node(_, _, _)) => { right.push_leaf(elem, depth - 1)?; } // Both branches are zero, insert in left one @@ -107,7 +129,7 @@ impl MerkleTree { *left = MerkleTree::create(&[elem], depth - 1); } // Leaf on left branch and zero on right branch, insert on right side - (Leaf(_), Zero(_)) => { + (Leaf(_), Zero(_)) | (Finalized(_), Zero(_)) => { *right = MerkleTree::create(&[elem], depth - 1); } // Try inserting on the left node -> if it fails because it is full, insert in right side. @@ -129,6 +151,7 @@ impl MerkleTree { right.hash().as_bytes(), )); } + Finalized(_) => return Err(MerkleTreeError::FinalizedNodePushed), } Ok(()) @@ -137,6 +160,7 @@ impl MerkleTree { /// Retrieve the root hash of this Merkle tree. pub fn hash(&self) -> H256 { match *self { + MerkleTree::Finalized(h) => h, MerkleTree::Leaf(h) => h, MerkleTree::Node(h, _, _) => h, MerkleTree::Zero(depth) => H256::from_slice(&ZERO_HASHES[depth]), @@ -146,7 +170,7 @@ impl MerkleTree { /// Get a reference to the left and right subtrees if they exist. pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> { match *self { - MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, + MerkleTree::Finalized(_) | MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, MerkleTree::Node(_, ref l, ref r) => Some((l, r)), MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])), } @@ -157,16 +181,125 @@ impl MerkleTree { matches!(self, MerkleTree::Leaf(_)) } + /// Finalize deposits up to deposit with count = deposits_to_finalize + pub fn finalize_deposits( + &mut self, + deposits_to_finalize: usize, + level: usize, + ) -> Result<(), MerkleTreeError> { + match self { + MerkleTree::Finalized(_) => Ok(()), + MerkleTree::Zero(_) => Err(MerkleTreeError::ZeroNodeFinalized), + MerkleTree::Leaf(hash) => { + if level != 0 { + // This shouldn't happen but this is a sanity check + return Err(MerkleTreeError::PleaseNotifyTheDevs); + } + *self = MerkleTree::Finalized(*hash); + Ok(()) + } + MerkleTree::Node(hash, left, right) => { + if level == 0 { + // this shouldn't happen but we'll put it here for safety + return Err(MerkleTreeError::PleaseNotifyTheDevs); + } + let deposits = 0x1 << level; + if deposits <= deposits_to_finalize { + *self = MerkleTree::Finalized(*hash); + return Ok(()); + } + left.finalize_deposits(deposits_to_finalize, level - 1)?; + if deposits_to_finalize > deposits / 2 { + let remaining = deposits_to_finalize - deposits / 2; + right.finalize_deposits(remaining, level - 1)?; + } + Ok(()) + } + } + } + + fn append_finalized_hashes(&self, result: &mut Vec) { + match self { + MerkleTree::Zero(_) | MerkleTree::Leaf(_) => {} + MerkleTree::Finalized(h) => result.push(*h), + MerkleTree::Node(_, left, right) => { + left.append_finalized_hashes(result); + right.append_finalized_hashes(result); + } + } + } + + pub fn get_finalized_hashes(&self) -> Vec { + let mut result = vec![]; + self.append_finalized_hashes(&mut result); + result + } + + pub fn from_finalized_snapshot( + finalized_branch: &[H256], + deposit_count: usize, + level: usize, + ) -> Result { + if finalized_branch.is_empty() { + return if deposit_count == 0 { + Ok(MerkleTree::Zero(level)) + } else { + Err(InvalidSnapshot::EmptyBranchWithNonZeroDeposits(deposit_count).into()) + }; + } + if deposit_count == (0x1 << level) { + return Ok(MerkleTree::Finalized( + *finalized_branch + .get(0) + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, + )); + } + if level == 0 { + return Err(InvalidSnapshot::EndOfTree.into()); + } + + let (left, right) = match deposit_count.checked_sub(0x1 << (level - 1)) { + // left tree is fully finalized + Some(right_deposits) => { + let (left_hash, right_branch) = finalized_branch + .split_first() + .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?; + ( + MerkleTree::Finalized(*left_hash), + MerkleTree::from_finalized_snapshot(right_branch, right_deposits, level - 1)?, + ) + } + // left tree is not fully finalized -> right tree is zero + None => ( + MerkleTree::from_finalized_snapshot(finalized_branch, deposit_count, level - 1)?, + MerkleTree::Zero(level - 1), + ), + }; + + let hash = H256::from_slice(&hash32_concat( + left.hash().as_bytes(), + right.hash().as_bytes(), + )); + Ok(MerkleTree::Node(hash, Box::new(left), Box::new(right))) + } + /// Return the leaf at `index` and a Merkle proof of its inclusion. /// /// The Merkle proof is in "bottom-up" order, starting with a leaf node /// and moving up the tree. Its length will be exactly equal to `depth`. - pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec) { + pub fn generate_proof( + &self, + index: usize, + depth: usize, + ) -> Result<(H256, Vec), MerkleTreeError> { let mut proof = vec![]; let mut current_node = self; let mut current_depth = depth; while current_depth > 0 { let ith_bit = (index >> (current_depth - 1)) & 0x01; + if let &MerkleTree::Finalized(_) = current_node { + return Err(MerkleTreeError::ProofEncounteredFinalizedNode); + } // Note: unwrap is safe because leaves are only ever constructed at depth == 0. let (left, right) = current_node.left_and_right_branches().unwrap(); @@ -187,7 +320,33 @@ impl MerkleTree { // Put proof in bottom-up order. proof.reverse(); - (current_node.hash(), proof) + Ok((current_node.hash(), proof)) + } + + /// useful for debugging + pub fn print_node(&self, mut space: u32) { + const SPACES: u32 = 10; + space += SPACES; + let (pair, text) = match self { + MerkleTree::Node(hash, left, right) => (Some((left, right)), format!("Node({})", hash)), + MerkleTree::Leaf(hash) => (None, format!("Leaf({})", hash)), + MerkleTree::Zero(depth) => ( + None, + format!("Z[{}]({})", depth, H256::from_slice(&ZERO_HASHES[*depth])), + ), + MerkleTree::Finalized(hash) => (None, format!("Finl({})", hash)), + }; + if let Some((_, right)) = pair { + right.print_node(space); + } + println!(); + for _i in SPACES..space { + print!(" "); + } + println!("{}", text); + if let Some((left, _)) = pair { + left.print_node(space); + } } } @@ -235,6 +394,12 @@ impl From for MerkleTreeError { } } +impl From for MerkleTreeError { + fn from(e: InvalidSnapshot) -> Self { + MerkleTreeError::InvalidSnapshot(e) + } +} + #[cfg(test)] mod tests { use super::*; @@ -255,7 +420,9 @@ mod tests { let merkle_root = merkle_tree.hash(); let proofs_ok = (0..leaves.len()).all(|i| { - let (leaf, branch) = merkle_tree.generate_proof(i, depth); + let (leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) }); @@ -274,7 +441,9 @@ mod tests { let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); - let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth); + let (stored_leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) }); diff --git a/consensus/ssz/src/decode/impls.rs b/consensus/ssz/src/decode/impls.rs index d91ddabe0..76d85f775 100644 --- a/consensus/ssz/src/decode/impls.rs +++ b/consensus/ssz/src/decode/impls.rs @@ -246,6 +246,20 @@ impl Decode for NonZeroUsize { } } +impl Decode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let (selector, body) = split_union_bytes(bytes)?; + match selector.into() { + 0u8 => Ok(None), + 1u8 => ::from_ssz_bytes(body).map(Option::Some), + other => Err(DecodeError::UnionSelectorInvalid(other)), + } + } +} + impl Decode for Arc { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() diff --git a/consensus/ssz/src/encode/impls.rs b/consensus/ssz/src/encode/impls.rs index cfd95ba40..833480e1b 100644 --- a/consensus/ssz/src/encode/impls.rs +++ b/consensus/ssz/src/encode/impls.rs @@ -203,6 +203,34 @@ impl_encode_for_tuples! { } } +impl Encode for Option { + fn is_ssz_fixed_len() -> bool { + false + } + fn ssz_append(&self, buf: &mut Vec) { + match self { + Option::None => { + let union_selector: u8 = 0u8; + buf.push(union_selector); + } + Option::Some(ref inner) => { + let union_selector: u8 = 1u8; + buf.push(union_selector); + inner.ssz_append(buf); + } + } + } + fn ssz_bytes_len(&self) -> usize { + match self { + Option::None => 1usize, + Option::Some(ref inner) => inner + .ssz_bytes_len() + .checked_add(1) + .expect("encoded length must be less than usize::max_value"), + } + } +} + impl Encode for Arc { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() @@ -561,6 +589,14 @@ mod tests { ); } + #[test] + fn ssz_encode_option_u8() { + let opt: Option = None; + assert_eq!(opt.as_ssz_bytes(), vec![0]); + let opt: Option = Some(2); + assert_eq!(opt.as_ssz_bytes(), vec![1, 2]); + } + #[test] fn ssz_encode_bool() { assert_eq!(true.as_ssz_bytes(), vec![1]); diff --git a/consensus/ssz/tests/tests.rs b/consensus/ssz/tests/tests.rs index e41fc15dd..b4b91da4b 100644 --- a/consensus/ssz/tests/tests.rs +++ b/consensus/ssz/tests/tests.rs @@ -22,6 +22,13 @@ mod round_trip { round_trip(items); } + #[test] + fn option_u16() { + let items: Vec> = vec![None, Some(2u16)]; + + round_trip(items); + } + #[test] fn u8_array_4() { let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]]; @@ -46,6 +53,17 @@ mod round_trip { round_trip(items); } + #[test] + fn option_vec_h256() { + let items: Vec>> = vec![ + None, + Some(vec![]), + Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]), + ]; + + round_trip(items); + } + #[test] fn vec_u16() { let items: Vec> = vec![ diff --git a/consensus/state_processing/src/common/deposit_data_tree.rs b/consensus/state_processing/src/common/deposit_data_tree.rs index 46f1ed8cc..aaad96fbd 100644 --- a/consensus/state_processing/src/common/deposit_data_tree.rs +++ b/consensus/state_processing/src/common/deposit_data_tree.rs @@ -2,12 +2,14 @@ use eth2_hashing::hash; use int_to_bytes::int_to_bytes32; use merkle_proof::{MerkleTree, MerkleTreeError}; use safe_arith::SafeArith; -use types::Hash256; +use types::{DepositTreeSnapshot, FinalizedExecutionBlock, Hash256}; /// Emulates the eth1 deposit contract merkle tree. +#[derive(PartialEq)] pub struct DepositDataTree { tree: MerkleTree, mix_in_length: usize, + finalized_execution_block: Option, depth: usize, } @@ -17,6 +19,7 @@ impl DepositDataTree { Self { tree: MerkleTree::create(leaves, depth), mix_in_length, + finalized_execution_block: None, depth, } } @@ -38,10 +41,10 @@ impl DepositDataTree { /// /// The Merkle proof is in "bottom-up" order, starting with a leaf node /// and moving up the tree. Its length will be exactly equal to `depth + 1`. - pub fn generate_proof(&self, index: usize) -> (Hash256, Vec) { - let (root, mut proof) = self.tree.generate_proof(index, self.depth); + pub fn generate_proof(&self, index: usize) -> Result<(Hash256, Vec), MerkleTreeError> { + let (root, mut proof) = self.tree.generate_proof(index, self.depth)?; proof.push(Hash256::from_slice(&self.length_bytes())); - (root, proof) + Ok((root, proof)) } /// Add a deposit to the merkle tree. @@ -50,4 +53,50 @@ impl DepositDataTree { self.mix_in_length.safe_add_assign(1)?; Ok(()) } + + /// Finalize deposits up to `finalized_execution_block.deposit_count` + pub fn finalize( + &mut self, + finalized_execution_block: FinalizedExecutionBlock, + ) -> Result<(), MerkleTreeError> { + self.tree + .finalize_deposits(finalized_execution_block.deposit_count as usize, self.depth)?; + self.finalized_execution_block = Some(finalized_execution_block); + Ok(()) + } + + /// Get snapshot of finalized deposit tree (if tree is finalized) + pub fn get_snapshot(&self) -> Option { + let finalized_execution_block = self.finalized_execution_block.as_ref()?; + Some(DepositTreeSnapshot { + finalized: self.tree.get_finalized_hashes(), + deposit_root: finalized_execution_block.deposit_root, + deposit_count: finalized_execution_block.deposit_count, + execution_block_hash: finalized_execution_block.block_hash, + execution_block_height: finalized_execution_block.block_height, + }) + } + + /// Create a new Merkle tree from a snapshot + pub fn from_snapshot( + snapshot: &DepositTreeSnapshot, + depth: usize, + ) -> Result { + Ok(Self { + tree: MerkleTree::from_finalized_snapshot( + &snapshot.finalized, + snapshot.deposit_count as usize, + depth, + )?, + mix_in_length: snapshot.deposit_count as usize, + finalized_execution_block: Some(snapshot.into()), + depth, + }) + } + + #[allow(dead_code)] + pub fn print_tree(&self) { + self.tree.print_node(0); + println!("========================================================"); + } } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 68fdbf799..d1b2ae182 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "types" -version = "0.2.0" +version = "0.2.1" authors = ["Paul Hauner ", "Age Manning "] edition = "2021" diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs new file mode 100644 index 000000000..21bbab81f --- /dev/null +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -0,0 +1,83 @@ +use crate::*; +use eth2_hashing::{hash32_concat, ZERO_HASHES}; +use int_to_bytes::int_to_bytes32; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use test_utils::TestRandom; +use DEPOSIT_TREE_DEPTH; + +#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] +pub struct FinalizedExecutionBlock { + pub deposit_root: Hash256, + pub deposit_count: u64, + pub block_hash: Hash256, + pub block_height: u64, +} + +impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock { + fn from(snapshot: &DepositTreeSnapshot) -> Self { + Self { + deposit_root: snapshot.deposit_root, + deposit_count: snapshot.deposit_count, + block_hash: snapshot.execution_block_hash, + block_height: snapshot.execution_block_height, + } + } +} + +#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] +pub struct DepositTreeSnapshot { + pub finalized: Vec, + pub deposit_root: Hash256, + pub deposit_count: u64, + pub execution_block_hash: Hash256, + pub execution_block_height: u64, +} + +impl Default for DepositTreeSnapshot { + fn default() -> Self { + let mut result = Self { + finalized: vec![], + deposit_root: Hash256::default(), + deposit_count: 0, + execution_block_hash: Hash256::zero(), + execution_block_height: 0, + }; + // properly set the empty deposit root + result.deposit_root = result.calculate_root().unwrap(); + result + } +} + +impl DepositTreeSnapshot { + // Calculates the deposit tree root from the hashes in the snapshot + pub fn calculate_root(&self) -> Option { + let mut size = self.deposit_count; + let mut index = self.finalized.len(); + let mut deposit_root = [0; 32]; + for height in 0..DEPOSIT_TREE_DEPTH { + deposit_root = if (size & 1) == 1 { + index = index.checked_sub(1)?; + hash32_concat(self.finalized.get(index)?.as_bytes(), &deposit_root) + } else { + hash32_concat(&deposit_root, ZERO_HASHES.get(height)?) + }; + size /= 2; + } + // add mix-in-length + deposit_root = hash32_concat(&deposit_root, &int_to_bytes32(self.deposit_count)); + + Some(Hash256::from_slice(&deposit_root)) + } + pub fn is_valid(&self) -> bool { + self.calculate_root() + .map_or(false, |calculated| self.deposit_root == calculated) + } +} + +#[cfg(test)] +mod tests { + use super::*; + ssz_tests!(DepositTreeSnapshot); +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 7f618dc34..4a6cc57b1 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -36,6 +36,7 @@ pub mod contribution_and_proof; pub mod deposit; pub mod deposit_data; pub mod deposit_message; +pub mod deposit_tree_snapshot; pub mod enr_fork_id; pub mod eth1_data; pub mod eth_spec; @@ -120,6 +121,7 @@ pub use crate::contribution_and_proof::ContributionAndProof; pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH}; pub use crate::deposit_data::DepositData; pub use crate::deposit_message::DepositMessage; +pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; pub use crate::enr_fork_id::EnrForkId; pub use crate::eth1_data::Eth1Data; pub use crate::eth_spec::EthSpecId; diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 20147adb9..cb50a4ee8 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -256,6 +256,7 @@ pub fn migrate_db( migrate_schema::, _, _, _>>( db, + client_config.eth1.deposit_contract_deploy_block, &client_config.get_data_dir(), from, to, diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 005a74edf..1f869562d 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -77,6 +77,7 @@ const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4; +const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger"; @@ -291,6 +292,7 @@ impl ProductionValidatorClient { / HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT, get_debug_beacon_states: slot_duration / HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT, + get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT, } } else { Timeouts::set_all(slot_duration)