Deposit Cache Finalization & Fast WS Sync (#2915)

## Summary

The deposit cache now has the ability to finalize deposits. This will cause it to drop unneeded deposit logs and hashes in the deposit Merkle tree that are no longer required to construct deposit proofs. The cache is finalized whenever the latest finalized checkpoint has a new `Eth1Data` with all deposits imported.

This has three benefits:

1. Improves the speed of constructing Merkle proofs for deposits as we can just replay deposits since the last finalized checkpoint instead of all historical deposits when re-constructing the Merkle tree.
2. Significantly faster weak subjectivity sync as the deposit cache can be transferred to the newly syncing node in compressed form. The Merkle tree that stores `N` finalized deposits requires a maximum of `log2(N)` hashes. The newly syncing node then only needs to download deposits since the last finalized checkpoint to have a full tree.
3. Future proofing in preparation for [EIP-4444](https://eips.ethereum.org/EIPS/eip-4444) as execution nodes will no longer be required to store logs permanently so we won't always have all historical logs available to us.

## More Details

Image to illustrate how the deposit contract merkle tree evolves and finalizes along with the resulting `DepositTreeSnapshot`
![image](https://user-images.githubusercontent.com/37123614/151465302-5fc56284-8a69-4998-b20e-45db3934ac70.png)

## Other Considerations

I've changed the structure of the `SszDepositCache` so once you load & save your database from this version of lighthouse, you will no longer be able to load it from older versions.

Co-authored-by: ethDreamer <37123614+ethDreamer@users.noreply.github.com>
This commit is contained in:
ethDreamer 2022-10-30 04:04:24 +00:00
parent 46fbf5b98b
commit e8604757a2
35 changed files with 2302 additions and 171 deletions

3
Cargo.lock generated
View File

@ -1638,6 +1638,7 @@ dependencies = [
"slog",
"sloggers",
"state_processing",
"superstruct",
"task_executor",
"tokio",
"tree_hash",
@ -6884,7 +6885,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "types"
version = "0.2.0"
version = "0.2.1"
dependencies = [
"arbitrary",
"beacon_chain",

View File

@ -16,6 +16,7 @@ use crate::chain_config::ChainConfig;
use crate::early_attester_cache::EarlyAttesterCache;
use crate::errors::{BeaconChainError as Error, BlockProductionError};
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData};
use crate::events::ServerSentEventHandler;
use crate::execution_payload::{get_execution_payload, PreparePayloadHandle};
use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult};
@ -117,6 +118,9 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
/// validator pubkey cache.
pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
/// The timeout for the eth1 finalization cache
pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200);
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero();
pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero();
@ -359,6 +363,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
pub(crate) snapshot_cache: TimeoutRwLock<SnapshotCache<T::EthSpec>>,
/// Caches the attester shuffling for a given epoch and shuffling key root.
pub shuffling_cache: TimeoutRwLock<ShufflingCache>,
/// A cache of eth1 deposit data at epoch boundaries for deposit finalization
pub eth1_finalization_cache: TimeoutRwLock<Eth1FinalizationCache>,
/// Caches the beacon block proposer shuffling for a given epoch and shuffling key root.
pub beacon_proposer_cache: Mutex<BeaconProposerCache>,
/// Caches a map of `validator_index -> validator_pubkey`.
@ -2531,9 +2537,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
block,
block_root,
state,
parent_block: _,
parent_block,
confirmed_state_roots,
payload_verification_handle,
parent_eth1_finalization_data,
} = execution_pending_block;
let PayloadVerificationOutcome {
@ -2585,6 +2592,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
confirmed_state_roots,
payload_verification_status,
count_unrealized,
parent_block,
parent_eth1_finalization_data,
)
},
"payload_verification_handle",
@ -2599,6 +2608,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
///
/// An error is returned if the block was unable to be imported. It may be partially imported
/// (i.e., this function is not atomic).
#[allow(clippy::too_many_arguments)]
fn import_block(
&self,
signed_block: Arc<SignedBeaconBlock<T::EthSpec>>,
@ -2607,6 +2617,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
confirmed_state_roots: Vec<Hash256>,
payload_verification_status: PayloadVerificationStatus,
count_unrealized: CountUnrealized,
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
parent_eth1_finalization_data: Eth1FinalizationData,
) -> Result<Hash256, BlockError<T::EthSpec>> {
let current_slot = self.slot()?;
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
@ -2987,6 +2999,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let parent_root = block.parent_root();
let slot = block.slot();
let current_eth1_finalization_data = Eth1FinalizationData {
eth1_data: state.eth1_data().clone(),
eth1_deposit_index: state.eth1_deposit_index(),
};
let current_finalized_checkpoint = state.finalized_checkpoint();
self.snapshot_cache
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.ok_or(Error::SnapshotCacheLockTimeout)
@ -3060,6 +3077,57 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
);
}
// Do not write to eth1 finalization cache for blocks older than 5 epochs
// this helps reduce noise during sync
if block_delay_total
< self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32)
{
let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch());
if parent_block_epoch < current_epoch {
// we've crossed epoch boundary, store Eth1FinalizationData
let (checkpoint, eth1_finalization_data) =
if current_slot % T::EthSpec::slots_per_epoch() == 0 {
// current block is the checkpoint
(
Checkpoint {
epoch: current_epoch,
root: block_root,
},
current_eth1_finalization_data,
)
} else {
// parent block is the checkpoint
(
Checkpoint {
epoch: current_epoch,
root: parent_block.canonical_root(),
},
parent_eth1_finalization_data,
)
};
if let Some(finalized_eth1_data) = self
.eth1_finalization_cache
.try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT)
.and_then(|mut cache| {
cache.insert(checkpoint, eth1_finalization_data);
cache.finalize(&current_finalized_checkpoint)
})
{
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
let finalized_deposit_count = finalized_eth1_data.deposit_count;
eth1_chain.finalize_eth1_data(finalized_eth1_data);
debug!(
self.log,
"called eth1_chain.finalize_eth1_data()";
"epoch" => current_finalized_checkpoint.epoch,
"deposit count" => finalized_deposit_count,
);
}
}
}
}
// Inform the unknown block cache, in case it was waiting on this block.
self.pre_finalization_block_cache
.block_processed(block_root);

View File

@ -42,6 +42,7 @@
//! END
//!
//! ```
use crate::eth1_finalization_cache::Eth1FinalizationData;
use crate::execution_payload::{
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
AllowOptimisticImport, PayloadNotifier,
@ -622,6 +623,7 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
pub block_root: Hash256,
pub state: BeaconState<T::EthSpec>,
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
pub parent_eth1_finalization_data: Eth1FinalizationData,
pub confirmed_state_roots: Vec<Hash256>,
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
}
@ -1164,6 +1166,11 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
.into());
}
let parent_eth1_finalization_data = Eth1FinalizationData {
eth1_data: state.eth1_data().clone(),
eth1_deposit_index: state.eth1_deposit_index(),
};
let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64());
for _ in 0..distance {
let state_root = if parent.beacon_block.slot() == state.slot() {
@ -1419,6 +1426,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
block_root,
state,
parent_block: parent.beacon_block,
parent_eth1_finalization_data,
confirmed_state_roots,
payload_verification_handle,
})

View File

@ -1,5 +1,6 @@
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
use crate::eth1_finalization_cache::Eth1FinalizationCache;
use crate::fork_choice_signal::ForkChoiceSignalTx;
use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary};
use crate::head_tracker::HeadTracker;
@ -795,6 +796,7 @@ where
head_for_snapshot_cache,
)),
shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()),
eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())),
beacon_proposer_cache: <_>::default(),
block_times_cache: <_>::default(),
pre_finalization_block_cache: <_>::default(),

View File

@ -16,7 +16,6 @@ use store::{DBColumn, Error as StoreError, StoreItem};
use task_executor::TaskExecutor;
use types::{
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
DEPOSIT_TREE_DEPTH,
};
type BlockNumber = u64;
@ -170,8 +169,8 @@ fn get_sync_status<T: EthSpec>(
#[derive(Encode, Decode, Clone)]
pub struct SszEth1 {
use_dummy_backend: bool,
backend_bytes: Vec<u8>,
pub use_dummy_backend: bool,
pub backend_bytes: Vec<u8>,
}
impl StoreItem for SszEth1 {
@ -305,6 +304,12 @@ where
}
}
/// Set in motion the finalization of `Eth1Data`. This method is called during block import
/// so it should be fast.
pub fn finalize_eth1_data(&self, eth1_data: Eth1Data) {
self.backend.finalize_eth1_data(eth1_data);
}
/// Consumes `self`, returning the backend.
pub fn into_backend(self) -> T {
self.backend
@ -335,6 +340,10 @@ pub trait Eth1ChainBackend<T: EthSpec>: Sized + Send + Sync {
/// beacon node eth1 cache is.
fn latest_cached_block(&self) -> Option<Eth1Block>;
/// Set in motion the finalization of `Eth1Data`. This method is called during block import
/// so it should be fast.
fn finalize_eth1_data(&self, eth1_data: Eth1Data);
/// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain
/// an idea of how up-to-date the remote eth1 node is.
fn head_block(&self) -> Option<Eth1Block>;
@ -389,6 +398,8 @@ impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
None
}
fn finalize_eth1_data(&self, _eth1_data: Eth1Data) {}
fn head_block(&self) -> Option<Eth1Block> {
None
}
@ -547,7 +558,7 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
.deposits()
.read()
.cache
.get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH)
.get_deposits(next, last, deposit_count)
.map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e)))
.map(|(_deposit_root, deposits)| deposits)
}
@ -558,6 +569,12 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
self.core.latest_cached_block()
}
/// This only writes the eth1_data to a temporary cache so that the service
/// thread can later do the actual finalizing of the deposit tree.
fn finalize_eth1_data(&self, eth1_data: Eth1Data) {
self.core.set_to_finalize(Some(eth1_data));
}
fn head_block(&self) -> Option<Eth1Block> {
self.core.head_block()
}

View File

@ -0,0 +1,498 @@
use slog::{debug, Logger};
use std::cmp;
use std::collections::BTreeMap;
use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root};
/// The default size of the cache.
/// The beacon chain only looks at the last 4 epochs for finalization.
/// Add 1 for current epoch and 4 earlier epochs.
pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5;
/// These fields are named the same as the corresponding fields in the `BeaconState`
/// as this structure stores these values from the `BeaconState` at a `Checkpoint`
#[derive(Clone)]
pub struct Eth1FinalizationData {
pub eth1_data: Eth1Data,
pub eth1_deposit_index: u64,
}
impl Eth1FinalizationData {
/// Ensures the deposit finalization conditions have been met. See:
/// https://eips.ethereum.org/EIPS/eip-4881#deposit-finalization-conditions
fn fully_imported(&self) -> bool {
self.eth1_deposit_index >= self.eth1_data.deposit_count
}
}
/// Implements map from Checkpoint -> Eth1CacheData
pub struct CheckpointMap {
capacity: usize,
// There shouldn't be more than a couple of potential checkpoints at the same
// epoch. Searching through a vector for the matching Root should be faster
// than using another map from Root->Eth1CacheData
store: BTreeMap<Epoch, Vec<(Root, Eth1FinalizationData)>>,
}
impl Default for CheckpointMap {
fn default() -> Self {
Self::new()
}
}
/// Provides a map of `Eth1CacheData` referenced by `Checkpoint`
///
/// ## Cache Queuing
///
/// The cache keeps a maximum number of (`capacity`) epochs. Because there may be
/// forks at the epoch boundary, it's possible that there exists more than one
/// `Checkpoint` for the same `Epoch`. This cache will store all checkpoints for
/// a given `Epoch`. When adding data for a new `Checkpoint` would cause the number
/// of `Epoch`s stored to exceed `capacity`, the data for oldest `Epoch` is dropped
impl CheckpointMap {
pub fn new() -> Self {
CheckpointMap {
capacity: DEFAULT_ETH1_CACHE_SIZE,
store: BTreeMap::new(),
}
}
pub fn with_capacity(capacity: usize) -> Self {
CheckpointMap {
capacity: cmp::max(1, capacity),
store: BTreeMap::new(),
}
}
pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) {
self.store
.entry(checkpoint.epoch)
.or_insert_with(Vec::new)
.push((checkpoint.root, eth1_finalization_data));
// faster to reduce size after the fact than do pre-checking to see
// if the current data would increase the size of the BTreeMap
while self.store.len() > self.capacity {
let oldest_stored_epoch = self.store.keys().next().cloned().unwrap();
self.store.remove(&oldest_stored_epoch);
}
}
pub fn get(&self, checkpoint: &Checkpoint) -> Option<&Eth1FinalizationData> {
match self.store.get(&checkpoint.epoch) {
Some(vec) => {
for (root, data) in vec {
if *root == checkpoint.root {
return Some(data);
}
}
None
}
None => None,
}
}
#[cfg(test)]
pub fn len(&self) -> usize {
self.store.len()
}
}
/// This cache stores `Eth1CacheData` that could potentially be finalized within 4
/// future epochs.
pub struct Eth1FinalizationCache {
by_checkpoint: CheckpointMap,
pending_eth1: BTreeMap<u64, Eth1Data>,
last_finalized: Option<Eth1Data>,
log: Logger,
}
/// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to
/// finalize deposits when a new epoch is finalized.
///
impl Eth1FinalizationCache {
pub fn new(log: Logger) -> Self {
Eth1FinalizationCache {
by_checkpoint: CheckpointMap::new(),
pending_eth1: BTreeMap::new(),
last_finalized: None,
log,
}
}
pub fn with_capacity(log: Logger, capacity: usize) -> Self {
Eth1FinalizationCache {
by_checkpoint: CheckpointMap::with_capacity(capacity),
pending_eth1: BTreeMap::new(),
last_finalized: None,
log,
}
}
pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) {
if !eth1_finalization_data.fully_imported() {
self.pending_eth1.insert(
eth1_finalization_data.eth1_data.deposit_count,
eth1_finalization_data.eth1_data.clone(),
);
debug!(
self.log,
"Eth1Cache: inserted pending eth1";
"eth1_data.deposit_count" => eth1_finalization_data.eth1_data.deposit_count,
"eth1_deposit_index" => eth1_finalization_data.eth1_deposit_index,
);
}
self.by_checkpoint
.insert(checkpoint, eth1_finalization_data);
}
pub fn finalize(&mut self, checkpoint: &Checkpoint) -> Option<Eth1Data> {
if let Some(eth1_finalized_data) = self.by_checkpoint.get(checkpoint) {
let finalized_deposit_index = eth1_finalized_data.eth1_deposit_index;
let mut result = None;
while let Some(pending_count) = self.pending_eth1.keys().next().cloned() {
if finalized_deposit_index >= pending_count {
result = self.pending_eth1.remove(&pending_count);
debug!(
self.log,
"Eth1Cache: dropped pending eth1";
"pending_count" => pending_count,
"finalized_deposit_index" => finalized_deposit_index,
);
} else {
break;
}
}
if eth1_finalized_data.fully_imported() {
result = Some(eth1_finalized_data.eth1_data.clone())
}
if result.is_some() {
self.last_finalized = result;
}
self.last_finalized.clone()
} else {
debug!(
self.log,
"Eth1Cache: cache miss";
"epoch" => checkpoint.epoch,
);
None
}
}
#[cfg(test)]
pub fn by_checkpoint(&self) -> &CheckpointMap {
&self.by_checkpoint
}
#[cfg(test)]
pub fn pending_eth1(&self) -> &BTreeMap<u64, Eth1Data> {
&self.pending_eth1
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use sloggers::null::NullLoggerBuilder;
use sloggers::Build;
use std::collections::HashMap;
const SLOTS_PER_EPOCH: u64 = 32;
const MAX_DEPOSITS: u64 = 16;
const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64;
fn eth1cache() -> Eth1FinalizationCache {
let log_builder = NullLoggerBuilder;
Eth1FinalizationCache::new(log_builder.build().expect("should build log"))
}
fn random_eth1_data(deposit_count: u64) -> Eth1Data {
Eth1Data {
deposit_root: Root::random(),
deposit_count,
block_hash: Root::random(),
}
}
fn random_checkpoint(epoch: u64) -> Checkpoint {
Checkpoint {
epoch: epoch.into(),
root: Root::random(),
}
}
fn random_checkpoints(n: usize) -> Vec<Checkpoint> {
let mut result = Vec::with_capacity(n);
for epoch in 0..n {
result.push(random_checkpoint(epoch as u64))
}
result
}
#[test]
fn fully_imported_deposits() {
let epochs = 16;
let deposits_imported = 128;
let eth1data = random_eth1_data(deposits_imported);
let checkpoints = random_checkpoints(epochs as usize);
let mut eth1cache = eth1cache();
for epoch in 4..epochs {
assert_eq!(
eth1cache.by_checkpoint().len(),
cmp::min((epoch - 4) as usize, DEFAULT_ETH1_CACHE_SIZE),
"Unexpected cache size"
);
let checkpoint = checkpoints
.get(epoch as usize)
.expect("should get checkpoint");
eth1cache.insert(
*checkpoint,
Eth1FinalizationData {
eth1_data: eth1data.clone(),
eth1_deposit_index: deposits_imported,
},
);
let finalized_checkpoint = checkpoints
.get((epoch - 4) as usize)
.expect("should get finalized checkpoint");
assert!(
eth1cache.pending_eth1().is_empty(),
"Deposits are fully imported so pending cache should be empty"
);
if epoch < 8 {
assert_eq!(
eth1cache.finalize(finalized_checkpoint),
None,
"Should have cache miss"
);
} else {
assert_eq!(
eth1cache.finalize(finalized_checkpoint),
Some(eth1data.clone()),
"Should have cache hit"
)
}
}
}
#[test]
fn partially_imported_deposits() {
let epochs = 16;
let initial_deposits_imported = 1024;
let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH;
let full_import_epoch = 13;
let total_deposits =
initial_deposits_imported + deposits_imported_per_epoch * full_import_epoch;
let eth1data = random_eth1_data(total_deposits);
let checkpoints = random_checkpoints(epochs as usize);
let mut eth1cache = eth1cache();
for epoch in 0..epochs {
assert_eq!(
eth1cache.by_checkpoint().len(),
cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE),
"Unexpected cache size"
);
let checkpoint = checkpoints
.get(epoch as usize)
.expect("should get checkpoint");
let deposits_imported = cmp::min(
total_deposits,
initial_deposits_imported + deposits_imported_per_epoch * epoch,
);
eth1cache.insert(
*checkpoint,
Eth1FinalizationData {
eth1_data: eth1data.clone(),
eth1_deposit_index: deposits_imported,
},
);
if epoch >= 4 {
let finalized_epoch = epoch - 4;
let finalized_checkpoint = checkpoints
.get(finalized_epoch as usize)
.expect("should get finalized checkpoint");
if finalized_epoch < full_import_epoch {
assert_eq!(
eth1cache.finalize(finalized_checkpoint),
None,
"Deposits not fully finalized so cache should return no Eth1Data",
);
assert_eq!(
eth1cache.pending_eth1().len(),
1,
"Deposits not fully finalized. Pending eth1 cache should have 1 entry"
);
} else {
assert_eq!(
eth1cache.finalize(finalized_checkpoint),
Some(eth1data.clone()),
"Deposits fully imported and finalized. Cache should return Eth1Data. finalized_deposits[{}]",
(initial_deposits_imported + deposits_imported_per_epoch * finalized_epoch),
);
assert!(
eth1cache.pending_eth1().is_empty(),
"Deposits fully imported and finalized. Pending cache should be empty"
);
}
}
}
}
#[test]
fn fork_at_epoch_boundary() {
let epochs = 12;
let deposits_imported = 128;
let eth1data = random_eth1_data(deposits_imported);
let checkpoints = random_checkpoints(epochs as usize);
let mut forks = HashMap::new();
let mut eth1cache = eth1cache();
for epoch in 0..epochs {
assert_eq!(
eth1cache.by_checkpoint().len(),
cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE),
"Unexpected cache size"
);
let checkpoint = checkpoints
.get(epoch as usize)
.expect("should get checkpoint");
eth1cache.insert(
*checkpoint,
Eth1FinalizationData {
eth1_data: eth1data.clone(),
eth1_deposit_index: deposits_imported,
},
);
// lets put a fork at every third epoch
if epoch % 3 == 0 {
let fork = random_checkpoint(epoch);
eth1cache.insert(
fork,
Eth1FinalizationData {
eth1_data: eth1data.clone(),
eth1_deposit_index: deposits_imported,
},
);
forks.insert(epoch as usize, fork);
}
assert!(
eth1cache.pending_eth1().is_empty(),
"Deposits are fully imported so pending cache should be empty"
);
if epoch >= 4 {
let finalized_epoch = (epoch - 4) as usize;
let finalized_checkpoint = if finalized_epoch % 3 == 0 {
forks.get(&finalized_epoch).expect("should get fork")
} else {
checkpoints
.get(finalized_epoch)
.expect("should get checkpoint")
};
assert_eq!(
eth1cache.finalize(finalized_checkpoint),
Some(eth1data.clone()),
"Should have cache hit"
);
if finalized_epoch >= 3 {
let dropped_epoch = finalized_epoch - 3;
if let Some(dropped_checkpoint) = forks.get(&dropped_epoch) {
// got checkpoint for an old fork that should no longer
// be in the cache because it is from too long ago
assert_eq!(
eth1cache.finalize(dropped_checkpoint),
None,
"Should have cache miss"
);
}
}
}
}
}
#[test]
fn massive_deposit_queue() {
// Simulating a situation where deposits don't get imported within an eth1 voting period
let eth1_voting_periods = 8;
let initial_deposits_imported = 1024;
let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH;
let initial_deposit_queue =
deposits_imported_per_epoch * EPOCHS_PER_ETH1_VOTING_PERIOD * 2 + 32;
let new_deposits_per_voting_period =
EPOCHS_PER_ETH1_VOTING_PERIOD * deposits_imported_per_epoch / 2;
let mut epoch_data = BTreeMap::new();
let mut eth1s_by_count = BTreeMap::new();
let mut eth1cache = eth1cache();
let mut last_period_deposits = initial_deposits_imported;
for period in 0..eth1_voting_periods {
let period_deposits = initial_deposits_imported
+ initial_deposit_queue
+ period * new_deposits_per_voting_period;
let period_eth1_data = random_eth1_data(period_deposits);
eth1s_by_count.insert(period_eth1_data.deposit_count, period_eth1_data.clone());
for epoch_mod_period in 0..EPOCHS_PER_ETH1_VOTING_PERIOD {
let epoch = period * EPOCHS_PER_ETH1_VOTING_PERIOD + epoch_mod_period;
let checkpoint = random_checkpoint(epoch);
let deposits_imported = cmp::min(
period_deposits,
last_period_deposits + deposits_imported_per_epoch * epoch_mod_period,
);
eth1cache.insert(
checkpoint,
Eth1FinalizationData {
eth1_data: period_eth1_data.clone(),
eth1_deposit_index: deposits_imported,
},
);
epoch_data.insert(epoch, (checkpoint, deposits_imported));
if epoch >= 4 {
let finalized_epoch = epoch - 4;
let (finalized_checkpoint, finalized_deposits) = epoch_data
.get(&finalized_epoch)
.expect("should get epoch data");
let pending_eth1s = eth1s_by_count.range((finalized_deposits + 1)..).count();
let last_finalized_eth1 = eth1s_by_count
.range(0..(finalized_deposits + 1))
.map(|(_, eth1)| eth1)
.last()
.cloned();
assert_eq!(
eth1cache.finalize(finalized_checkpoint),
last_finalized_eth1,
"finalized checkpoint mismatch",
);
assert_eq!(
eth1cache.pending_eth1().len(),
pending_eth1s,
"pending eth1 mismatch"
);
}
}
// remove unneeded stuff from old epochs
while epoch_data.len() > DEFAULT_ETH1_CACHE_SIZE {
let oldest_stored_epoch = epoch_data
.keys()
.next()
.cloned()
.expect("should get oldest epoch");
epoch_data.remove(&oldest_stored_epoch);
}
last_period_deposits = period_deposits;
}
}
}

View File

@ -14,6 +14,7 @@ pub mod chain_config;
mod early_attester_cache;
mod errors;
pub mod eth1_chain;
mod eth1_finalization_cache;
pub mod events;
pub mod execution_payload;
pub mod fork_choice_signal;

View File

@ -2,13 +2,15 @@
mod migration_schema_v10;
mod migration_schema_v11;
mod migration_schema_v12;
mod migration_schema_v13;
mod migration_schema_v6;
mod migration_schema_v7;
mod migration_schema_v8;
mod migration_schema_v9;
mod types;
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY};
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY};
use crate::eth1_chain::SszEth1;
use crate::persisted_fork_choice::{
PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7,
PersistedForkChoiceV8,
@ -24,6 +26,7 @@ use store::{Error as StoreError, StoreItem};
/// Migrate the database from one schema version to another, applying all requisite mutations.
pub fn migrate_schema<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
deposit_contract_deploy_block: u64,
datadir: &Path,
from: SchemaVersion,
to: SchemaVersion,
@ -31,19 +34,51 @@ pub fn migrate_schema<T: BeaconChainTypes>(
spec: &ChainSpec,
) -> Result<(), StoreError> {
match (from, to) {
// Migrating from the current schema version to iself is always OK, a no-op.
// Migrating from the current schema version to itself is always OK, a no-op.
(_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()),
// Upgrade across multiple versions by recursively migrating one step at a time.
(_, _) if from.as_u64() + 1 < to.as_u64() => {
let next = SchemaVersion(from.as_u64() + 1);
migrate_schema::<T>(db.clone(), datadir, from, next, log.clone(), spec)?;
migrate_schema::<T>(db, datadir, next, to, log, spec)
migrate_schema::<T>(
db.clone(),
deposit_contract_deploy_block,
datadir,
from,
next,
log.clone(),
spec,
)?;
migrate_schema::<T>(
db,
deposit_contract_deploy_block,
datadir,
next,
to,
log,
spec,
)
}
// Downgrade across multiple versions by recursively migrating one step at a time.
(_, _) if to.as_u64() + 1 < from.as_u64() => {
let next = SchemaVersion(from.as_u64() - 1);
migrate_schema::<T>(db.clone(), datadir, from, next, log.clone(), spec)?;
migrate_schema::<T>(db, datadir, next, to, log, spec)
migrate_schema::<T>(
db.clone(),
deposit_contract_deploy_block,
datadir,
from,
next,
log.clone(),
spec,
)?;
migrate_schema::<T>(
db,
deposit_contract_deploy_block,
datadir,
next,
to,
log,
spec,
)
}
//
@ -207,6 +242,55 @@ pub fn migrate_schema<T: BeaconChainTypes>(
let ops = migration_schema_v12::downgrade_from_v12::<T>(db.clone(), log)?;
db.store_schema_version_atomically(to, ops)
}
(SchemaVersion(12), SchemaVersion(13)) => {
let mut ops = vec![];
if let Some(persisted_eth1_v1) = db.get_item::<SszEth1>(&ETH1_CACHE_DB_KEY)? {
let upgraded_eth1_cache =
match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) {
Ok(upgraded_eth1) => upgraded_eth1,
Err(e) => {
warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e);
warn!(log, "Reinitializing eth1 cache");
migration_schema_v13::reinitialized_eth1_cache_v13(
deposit_contract_deploy_block,
)
}
};
ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
(SchemaVersion(13), SchemaVersion(12)) => {
let mut ops = vec![];
if let Some(persisted_eth1_v13) = db.get_item::<SszEth1>(&ETH1_CACHE_DB_KEY)? {
let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache(
persisted_eth1_v13,
) {
Ok(Some(downgraded_eth1)) => downgraded_eth1,
Ok(None) => {
warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache");
migration_schema_v13::reinitialized_eth1_cache_v1(
deposit_contract_deploy_block,
)
}
Err(e) => {
warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e);
warn!(log, "Reinitializing eth1 cache");
migration_schema_v13::reinitialized_eth1_cache_v1(
deposit_contract_deploy_block,
)
}
};
ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// Anything else is an error.
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
target_version: to,

View File

@ -0,0 +1,150 @@
use crate::eth1_chain::SszEth1;
use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13};
use ssz::{Decode, Encode};
use state_processing::common::DepositDataTree;
use store::Error;
use types::DEPOSIT_TREE_DEPTH;
pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result<SszEth1, Error> {
if persisted_eth1_v1.use_dummy_backend {
// backend_bytes is empty when using dummy backend
return Ok(persisted_eth1_v1);
}
let SszEth1 {
use_dummy_backend,
backend_bytes,
} = persisted_eth1_v1;
let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?;
let SszEth1CacheV1 {
block_cache,
deposit_cache: deposit_cache_v1,
last_processed_block,
} = ssz_eth1_cache_v1;
let SszDepositCacheV1 {
logs,
leaves,
deposit_contract_deploy_block,
deposit_roots,
} = deposit_cache_v1;
let deposit_cache_v13 = SszDepositCacheV13 {
logs,
leaves,
deposit_contract_deploy_block,
finalized_deposit_count: 0,
finalized_block_height: deposit_contract_deploy_block.saturating_sub(1),
deposit_tree_snapshot: None,
deposit_roots,
};
let ssz_eth1_cache_v13 = SszEth1CacheV13 {
block_cache,
deposit_cache: deposit_cache_v13,
last_processed_block,
};
let persisted_eth1_v13 = SszEth1 {
use_dummy_backend,
backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(),
};
Ok(persisted_eth1_v13)
}
pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result<Option<SszEth1>, Error> {
if persisted_eth1_v13.use_dummy_backend {
// backend_bytes is empty when using dummy backend
return Ok(Some(persisted_eth1_v13));
}
let SszEth1 {
use_dummy_backend,
backend_bytes,
} = persisted_eth1_v13;
let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?;
let SszEth1CacheV13 {
block_cache,
deposit_cache: deposit_cache_v13,
last_processed_block,
} = ssz_eth1_cache_v13;
let SszDepositCacheV13 {
logs,
leaves,
deposit_contract_deploy_block,
finalized_deposit_count,
finalized_block_height: _,
deposit_tree_snapshot,
deposit_roots,
} = deposit_cache_v13;
if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() {
// This tree was never finalized and can be directly downgraded to v1 without re-initializing
let deposit_cache_v1 = SszDepositCacheV1 {
logs,
leaves,
deposit_contract_deploy_block,
deposit_roots,
};
let ssz_eth1_cache_v1 = SszEth1CacheV1 {
block_cache,
deposit_cache: deposit_cache_v1,
last_processed_block,
};
return Ok(Some(SszEth1 {
use_dummy_backend,
backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(),
}));
}
// deposit cache was finalized; can't downgrade
Ok(None)
}
pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 {
let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
let deposit_cache_v13 = SszDepositCacheV13 {
logs: vec![],
leaves: vec![],
deposit_contract_deploy_block,
finalized_deposit_count: 0,
finalized_block_height: deposit_contract_deploy_block.saturating_sub(1),
deposit_tree_snapshot: empty_tree.get_snapshot(),
deposit_roots: vec![empty_tree.root()],
};
let ssz_eth1_cache_v13 = SszEth1CacheV13 {
block_cache: BlockCache::default(),
deposit_cache: deposit_cache_v13,
last_processed_block: None,
};
SszEth1 {
use_dummy_backend: false,
backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(),
}
}
pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 {
let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
let deposit_cache_v1 = SszDepositCacheV1 {
logs: vec![],
leaves: vec![],
deposit_contract_deploy_block,
deposit_roots: vec![empty_tree.root()],
};
let ssz_eth1_cache_v1 = SszEth1CacheV1 {
block_cache: BlockCache::default(),
deposit_cache: deposit_cache_v1,
last_processed_block: None,
};
SszEth1 {
use_dummy_backend: false,
backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(),
}
}

View File

@ -1432,8 +1432,9 @@ where
// Building proofs
let mut proofs = vec![];
for i in 0..leaves.len() {
let (_, mut proof) =
tree.generate_proof(i, self.spec.deposit_contract_tree_depth as usize);
let (_, mut proof) = tree
.generate_proof(i, self.spec.deposit_contract_tree_depth as usize)
.expect("should generate proof");
proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64)));
proofs.push(proof);
}

View File

@ -277,8 +277,52 @@ where
BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT));
let slots_per_epoch = TEthSpec::slots_per_epoch();
debug!(context.log(), "Downloading finalized block");
let deposit_snapshot = if config.sync_eth1_chain {
// We want to fetch deposit snapshot before fetching the finalized beacon state to
// ensure that the snapshot is not newer than the beacon state that satisfies the
// deposit finalization conditions
debug!(context.log(), "Downloading deposit snapshot");
let deposit_snapshot_result = remote
.get_deposit_snapshot()
.await
.map_err(|e| match e {
ApiError::InvalidSsz(e) => format!(
"Unable to parse SSZ: {:?}. Ensure the checkpoint-sync-url refers to a \
node for the correct network",
e
),
e => format!("Error fetching deposit snapshot from remote: {:?}", e),
});
match deposit_snapshot_result {
Ok(Some(deposit_snapshot)) => {
if deposit_snapshot.is_valid() {
Some(deposit_snapshot)
} else {
warn!(context.log(), "Remote BN sent invalid deposit snapshot!");
None
}
}
Ok(None) => {
warn!(
context.log(),
"Remote BN does not support EIP-4881 fast deposit sync"
);
None
}
Err(e) => {
warn!(
context.log(),
"Remote BN does not support EIP-4881 fast deposit sync";
"error" => e
);
None
}
}
} else {
None
};
debug!(context.log(), "Downloading finalized block");
// Find a suitable finalized block on an epoch boundary.
let mut block = remote
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec)
@ -362,9 +406,33 @@ where
"state_root" => ?state_root,
);
let service =
deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot(
config.eth1,
context.log().clone(),
spec,
&snapshot,
) {
Ok(service) => {
info!(
context.log(),
"Loaded deposit tree snapshot";
"deposits loaded" => snapshot.deposit_count,
);
Some(service)
}
Err(e) => {
warn!(context.log(),
"Unable to load deposit snapshot";
"error" => ?e
);
None
}
});
builder
.weak_subjectivity_state(state, block, genesis_state)
.map(|v| (v, None))?
.map(|v| (v, service))?
}
ClientGenesis::DepositContract => {
info!(
@ -810,9 +878,16 @@ where
self.freezer_db_path = Some(cold_path.into());
let inner_spec = spec.clone();
let deposit_contract_deploy_block = context
.eth2_network_config
.as_ref()
.map(|config| config.deposit_contract_deploy_block)
.unwrap_or(0);
let schema_upgrade = |db, from, to| {
migrate_schema::<Witness<TSlotClock, TEth1Backend, _, _, _>>(
db,
deposit_contract_deploy_block,
datadir,
from,
to,

View File

@ -25,6 +25,7 @@ eth2_ssz_derive = "0.3.0"
tree_hash = "0.4.1"
parking_lot = "0.12.0"
slog = "2.5.2"
superstruct = "0.5.0"
tokio = { version = "1.14.0", features = ["full"] }
state_processing = { path = "../../consensus/state_processing" }
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}

View File

@ -1,7 +1,10 @@
use ssz_derive::{Decode, Encode};
use std::collections::HashMap;
use std::ops::RangeInclusive;
pub use eth2::lighthouse::Eth1Block;
use eth2::types::Hash256;
use std::sync::Arc;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
@ -20,7 +23,9 @@ pub enum Error {
/// timestamp.
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
pub struct BlockCache {
blocks: Vec<Eth1Block>,
blocks: Vec<Arc<Eth1Block>>,
#[ssz(skip_serializing, skip_deserializing)]
by_hash: HashMap<Hash256, Arc<Eth1Block>>,
}
impl BlockCache {
@ -36,12 +41,12 @@ impl BlockCache {
/// Returns the earliest (lowest timestamp) block, if any.
pub fn earliest_block(&self) -> Option<&Eth1Block> {
self.blocks.first()
self.blocks.first().map(|ptr| ptr.as_ref())
}
/// Returns the latest (highest timestamp) block, if any.
pub fn latest_block(&self) -> Option<&Eth1Block> {
self.blocks.last()
self.blocks.last().map(|ptr| ptr.as_ref())
}
/// Returns the timestamp of the earliest block in the cache (if any).
@ -71,7 +76,7 @@ impl BlockCache {
/// - Monotonically increasing block numbers.
/// - Non-uniformly increasing block timestamps.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &Eth1Block> + Clone {
self.blocks.iter()
self.blocks.iter().map(|ptr| ptr.as_ref())
}
/// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the
@ -80,7 +85,11 @@ impl BlockCache {
/// If `len` is greater than the vector's current length, this has no effect.
pub fn truncate(&mut self, len: usize) {
if len < self.blocks.len() {
self.blocks = self.blocks.split_off(self.blocks.len() - len);
let remaining = self.blocks.split_off(self.blocks.len() - len);
for block in &self.blocks {
self.by_hash.remove(&block.hash);
}
self.blocks = remaining;
}
}
@ -92,12 +101,27 @@ impl BlockCache {
/// Returns a block with the corresponding number, if any.
pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> {
self.blocks.get(
self.blocks
.as_slice()
.binary_search_by(|block| block.number.cmp(&block_number))
.ok()?,
)
self.blocks
.get(
self.blocks
.as_slice()
.binary_search_by(|block| block.number.cmp(&block_number))
.ok()?,
)
.map(|ptr| ptr.as_ref())
}
/// Returns a block with the corresponding hash, if any.
pub fn block_by_hash(&self, block_hash: &Hash256) -> Option<&Eth1Block> {
self.by_hash.get(block_hash).map(|ptr| ptr.as_ref())
}
/// Rebuilds the by_hash map
pub fn rebuild_by_hash_map(&mut self) {
self.by_hash.clear();
for block in self.blocks.iter() {
self.by_hash.insert(block.hash, block.clone());
}
}
/// Insert an `Eth1Snapshot` into `self`, allowing future queries.
@ -161,7 +185,9 @@ impl BlockCache {
}
}
self.blocks.push(block);
let ptr = Arc::new(block);
self.by_hash.insert(ptr.hash, ptr.clone());
self.blocks.push(ptr);
Ok(())
}
@ -269,6 +295,8 @@ mod tests {
.expect("should add consecutive blocks with duplicate timestamps");
}
let blocks = blocks.into_iter().map(Arc::new).collect::<Vec<_>>();
assert_eq!(cache.blocks, blocks, "should have added all blocks");
}
}

File diff suppressed because it is too large Load Diff

View File

@ -2,14 +2,15 @@ use crate::service::endpoint_from_config;
use crate::Config;
use crate::{
block_cache::{BlockCache, Eth1Block},
deposit_cache::{DepositCache, SszDepositCache},
deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13},
};
use execution_layer::HttpJsonRpc;
use parking_lot::RwLock;
use ssz::four_byte_option_impl;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use types::ChainSpec;
use superstruct::superstruct;
use types::{ChainSpec, DepositTreeSnapshot, Eth1Data};
// Define "legacy" implementations of `Option<u64>` which use four bytes for encoding the union
// selector.
@ -29,12 +30,25 @@ impl DepositUpdater {
last_processed_block: None,
}
}
pub fn from_snapshot(
deposit_contract_deploy_block: u64,
snapshot: &DepositTreeSnapshot,
) -> Result<Self, String> {
let last_processed_block = Some(snapshot.execution_block_height);
Ok(Self {
cache: DepositCache::from_deposit_snapshot(deposit_contract_deploy_block, snapshot)?,
last_processed_block,
})
}
}
pub struct Inner {
pub block_cache: RwLock<BlockCache>,
pub deposit_cache: RwLock<DepositUpdater>,
pub endpoint: HttpJsonRpc,
// this gets set to Some(Eth1Data) when the deposit finalization conditions are met
pub to_finalize: RwLock<Option<Eth1Data>>,
pub config: RwLock<Config>,
pub remote_head_block: RwLock<Option<Eth1Block>>,
pub spec: ChainSpec,
@ -58,9 +72,13 @@ impl Inner {
/// Recover `Inner` given byte representation of eth1 deposit and block caches.
pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result<Self, String> {
let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes)
.map_err(|e| format!("Ssz decoding error: {:?}", e))?;
ssz_cache.to_inner(config, spec)
SszEth1Cache::from_ssz_bytes(bytes)
.map_err(|e| format!("Ssz decoding error: {:?}", e))?
.to_inner(config, spec)
.map(|inner| {
inner.block_cache.write().rebuild_by_hash_map();
inner
})
}
/// Returns a reference to the specification.
@ -69,12 +87,21 @@ impl Inner {
}
}
#[derive(Encode, Decode, Clone)]
pub type SszEth1Cache = SszEth1CacheV13;
#[superstruct(
variants(V1, V13),
variant_attributes(derive(Encode, Decode, Clone)),
no_enum
)]
pub struct SszEth1Cache {
block_cache: BlockCache,
deposit_cache: SszDepositCache,
pub block_cache: BlockCache,
#[superstruct(only(V1))]
pub deposit_cache: SszDepositCacheV1,
#[superstruct(only(V13))]
pub deposit_cache: SszDepositCacheV13,
#[ssz(with = "four_byte_option_u64")]
last_processed_block: Option<u64>,
pub last_processed_block: Option<u64>,
}
impl SszEth1Cache {
@ -97,6 +124,7 @@ impl SszEth1Cache {
}),
endpoint: endpoint_from_config(&config)
.map_err(|e| format!("Failed to create endpoint: {:?}", e))?,
to_finalize: RwLock::new(None),
// Set the remote head_block zero when creating a new instance. We only care about
// present and future eth1 nodes.
remote_head_block: RwLock::new(None),

View File

@ -8,9 +8,9 @@ mod metrics;
mod service;
pub use block_cache::{BlockCache, Eth1Block};
pub use deposit_cache::DepositCache;
pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13};
pub use execution_layer::http::deposit_log::DepositLog;
pub use inner::SszEth1Cache;
pub use inner::{SszEth1Cache, SszEth1CacheV1, SszEth1CacheV13};
pub use service::{
BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service,
DEFAULT_CHAIN_ID,

View File

@ -20,7 +20,7 @@ use std::path::PathBuf;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{interval_at, Duration, Instant};
use types::{ChainSpec, EthSpec, Unsigned};
use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned};
/// Indicates the default eth1 chain id we use for the deposit contract.
pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli;
@ -63,7 +63,13 @@ async fn endpoint_state(
config_chain_id: &Eth1Id,
log: &Logger,
) -> EndpointState {
let error_connecting = |e| {
let error_connecting = |e: String| {
debug!(
log,
"eth1 endpoint error";
"endpoint" => %endpoint,
"error" => &e,
);
warn!(
log,
"Error connecting to eth1 node endpoint";
@ -213,6 +219,10 @@ pub enum Error {
GetDepositLogsFailed(String),
/// There was an unexpected internal error.
Internal(String),
/// Error finalizing deposit
FailedToFinalizeDeposit(String),
/// There was a problem Initializing from deposit snapshot
FailedToInitializeFromSnapshot(String),
}
/// The success message for an Eth1Data cache update.
@ -395,6 +405,7 @@ impl Service {
config.deposit_contract_deploy_block,
)),
endpoint: endpoint_from_config(&config)?,
to_finalize: RwLock::new(None),
remote_head_block: RwLock::new(None),
config: RwLock::new(config),
spec,
@ -407,6 +418,36 @@ impl Service {
&self.inner.endpoint
}
/// Creates a new service, initializing the deposit tree from a snapshot.
pub fn from_deposit_snapshot(
config: Config,
log: Logger,
spec: ChainSpec,
deposit_snapshot: &DepositTreeSnapshot,
) -> Result<Self, Error> {
let deposit_cache =
DepositUpdater::from_snapshot(config.deposit_contract_deploy_block, deposit_snapshot)
.map_err(Error::FailedToInitializeFromSnapshot)?;
Ok(Self {
inner: Arc::new(Inner {
block_cache: <_>::default(),
deposit_cache: RwLock::new(deposit_cache),
endpoint: endpoint_from_config(&config)
.map_err(Error::FailedToInitializeFromSnapshot)?,
to_finalize: RwLock::new(None),
remote_head_block: RwLock::new(None),
config: RwLock::new(config),
spec,
}),
log,
})
}
pub fn set_to_finalize(&self, eth1_data: Option<Eth1Data>) {
*(self.inner.to_finalize.write()) = eth1_data;
}
/// Returns the follow distance that has been shortened to accommodate for differences in the
/// spacing between blocks.
///
@ -521,7 +562,7 @@ impl Service {
let deposits = self.deposits().read();
deposits
.cache
.get_valid_signature_count(deposits.cache.latest_block_number()?)
.get_valid_signature_count(deposits.cache.latest_block_number())
}
/// Returns the number of deposits with valid signatures that have been observed up to and
@ -619,7 +660,8 @@ impl Service {
"old_block_number" => deposit_cache.last_processed_block,
"new_block_number" => deposit_cache.cache.latest_block_number(),
);
deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number();
deposit_cache.last_processed_block =
Some(deposit_cache.cache.latest_block_number());
}
let outcome =
@ -698,6 +740,37 @@ impl Service {
"deposits" => format!("{:?}", deposit),
),
};
let optional_eth1data = self.inner.to_finalize.write().take();
if let Some(eth1data_to_finalize) = optional_eth1data {
let already_finalized = self
.inner
.deposit_cache
.read()
.cache
.finalized_deposit_count();
let deposit_count_to_finalize = eth1data_to_finalize.deposit_count;
if deposit_count_to_finalize > already_finalized {
match self.finalize_deposits(eth1data_to_finalize) {
Err(e) => error!(
self.log,
"Failed to finalize deposit cache";
"error" => ?e,
),
Ok(()) => info!(
self.log,
"Successfully finalized deposit tree";
"finalized deposit count" => deposit_count_to_finalize,
),
}
} else {
debug!(
self.log,
"Deposits tree already finalized";
"already_finalized" => already_finalized,
"deposit_count_to_finalize" => deposit_count_to_finalize,
);
}
}
Ok(())
}
@ -733,6 +806,30 @@ impl Service {
)
}
pub fn finalize_deposits(&self, eth1_data: Eth1Data) -> Result<(), Error> {
let eth1_block = self
.inner
.block_cache
.read()
.block_by_hash(&eth1_data.block_hash)
.cloned()
.ok_or_else(|| {
Error::FailedToFinalizeDeposit(
"Finalized block not found in block cache".to_string(),
)
})?;
self.inner
.deposit_cache
.write()
.cache
.finalize(eth1_block)
.map_err(|e| Error::FailedToFinalizeDeposit(format!("{:?}", e)))
}
pub fn get_deposit_snapshot(&self) -> Option<DepositTreeSnapshot> {
self.inner.deposit_cache.read().cache.get_deposit_snapshot()
}
/// Contacts the remote eth1 node and attempts to import deposit logs up to the configured
/// follow-distance block.
///

View File

@ -400,7 +400,7 @@ mod deposit_tree {
.deposits()
.read()
.cache
.get_deposits(first, last, last, 32)
.get_deposits(first, last, last)
.unwrap_or_else(|_| panic!("should get deposits in round {}", round));
assert_eq!(
@ -551,7 +551,7 @@ mod deposit_tree {
// Ensure that the root from the deposit tree matches what the contract reported.
let (root, deposits) = tree
.get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH)
.get_deposits(0, i as u64, deposit_counts[i])
.expect("should get deposits");
assert_eq!(
root, deposit_roots[i],

View File

@ -208,6 +208,7 @@ pub mod deposit_methods {
#[derive(Clone, Copy)]
pub enum BlockQuery {
Number(u64),
Hash(Hash256),
Latest,
}
@ -322,9 +323,12 @@ pub mod deposit_methods {
query: BlockQuery,
timeout: Duration,
) -> Result<Block, String> {
let query_param = match query {
BlockQuery::Number(block_number) => format!("0x{:x}", block_number),
BlockQuery::Latest => "latest".to_string(),
let (method, query_param) = match query {
BlockQuery::Number(block_number) => {
("eth_getBlockByNumber", format!("0x{:x}", block_number))
}
BlockQuery::Hash(block_hash) => ("eth_getBlockByHash", format!("{:?}", block_hash)),
BlockQuery::Latest => ("eth_getBlockByNumber", "latest".to_string()),
};
let params = json!([
query_param,
@ -332,9 +336,9 @@ pub mod deposit_methods {
]);
let response: Value = self
.rpc_request("eth_getBlockByNumber", params, timeout)
.rpc_request(method, params, timeout)
.await
.map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?;
.map_err(|e| format!("{} call failed {:?}", method, e))?;
let hash: Vec<u8> = hex_to_bytes(
response

View File

@ -23,7 +23,9 @@ pub fn genesis_deposits(
return Err(String::from("Failed to push leaf"));
}
let (_, mut proof) = tree.generate_proof(i, depth);
let (_, mut proof) = tree
.generate_proof(i, depth)
.map_err(|e| format!("Error generating merkle proof: {:?}", e))?;
proof.push(Hash256::from_slice(&int_to_fixed_bytes32((i + 1) as u64)));
assert_eq!(

View File

@ -86,7 +86,7 @@ impl Eth1GenesisService {
.deposits()
.read()
.cache
.get(min_genesis_active_validator_count.saturating_sub(1))
.get_log(min_genesis_active_validator_count.saturating_sub(1))
.map(|log| log.block_number)
}
}

View File

@ -1533,6 +1533,53 @@ pub fn serve<T: BeaconChainTypes>(
},
);
// GET beacon/deposit_snapshot
let get_beacon_deposit_snapshot = eth_v1
.and(warp::path("beacon"))
.and(warp::path("deposit_snapshot"))
.and(warp::path::end())
.and(warp::header::optional::<api_types::Accept>("accept"))
.and(eth1_service_filter.clone())
.and_then(
|accept_header: Option<api_types::Accept>, eth1_service: eth1::Service| {
blocking_task(move || match accept_header {
Some(api_types::Accept::Json) | None => {
let snapshot = eth1_service.get_deposit_snapshot();
Ok(
warp::reply::json(&api_types::GenericResponse::from(snapshot))
.into_response(),
)
}
_ => eth1_service
.get_deposit_snapshot()
.map(|snapshot| {
Response::builder()
.status(200)
.header("Content-Type", "application/octet-stream")
.body(snapshot.as_ssz_bytes().into())
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"failed to create response: {}",
e
))
})
})
.unwrap_or_else(|| {
Response::builder()
.status(503)
.header("Content-Type", "application/octet-stream")
.body(Vec::new().into())
.map_err(|e| {
warp_utils::reject::custom_server_error(format!(
"failed to create response: {}",
e
))
})
}),
})
},
);
/*
* config
*/
@ -3120,6 +3167,7 @@ pub fn serve<T: BeaconChainTypes>(
.or(get_beacon_pool_attester_slashings.boxed())
.or(get_beacon_pool_proposer_slashings.boxed())
.or(get_beacon_pool_voluntary_exits.boxed())
.or(get_beacon_deposit_snapshot.boxed())
.or(get_config_fork_schedule.boxed())
.or(get_config_spec.boxed())
.or(get_config_deposit_contract.boxed())

View File

@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use types::{Checkpoint, Hash256, Slot};
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(12);
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13);
// All the keys that get stored under the `BeaconMeta` column.
//

View File

@ -114,6 +114,7 @@ pub struct Timeouts {
pub sync_duties: Duration,
pub get_beacon_blocks_ssz: Duration,
pub get_debug_beacon_states: Duration,
pub get_deposit_snapshot: Duration,
}
impl Timeouts {
@ -128,6 +129,7 @@ impl Timeouts {
sync_duties: timeout,
get_beacon_blocks_ssz: timeout,
get_debug_beacon_states: timeout,
get_deposit_snapshot: timeout,
}
}
}
@ -913,6 +915,20 @@ impl BeaconNodeHttpClient {
Ok(())
}
/// `GET beacon/deposit_snapshot`
pub async fn get_deposit_snapshot(&self) -> Result<Option<types::DepositTreeSnapshot>, Error> {
use ssz::Decode;
let mut path = self.eth_path(V1)?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("deposit_snapshot");
self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot)
.await?
.map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz))
.transpose()
}
/// `POST validator/contribution_and_proofs`
pub async fn post_validator_contribution_and_proofs<T: EthSpec>(
&self,

View File

@ -6,7 +6,10 @@ mod block_rewards;
use crate::{
ok_or_error,
types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId},
types::{
BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock,
GenericResponse, ValidatorId,
},
BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode,
};
use proto_array::core::ProtoArray;
@ -331,6 +334,19 @@ impl Eth1Block {
}
}
impl From<Eth1Block> for FinalizedExecutionBlock {
fn from(eth1_block: Eth1Block) -> Self {
Self {
deposit_count: eth1_block.deposit_count.unwrap_or(0),
deposit_root: eth1_block
.deposit_root
.unwrap_or_else(|| DepositTreeSnapshot::default().deposit_root),
block_hash: eth1_block.hash,
block_height: eth1_block.number,
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DatabaseInfo {
pub schema_version: u64,

View File

@ -19,6 +19,8 @@ lazy_static! {
/// indices are populated by non-zero leaves (perfect for the deposit contract tree).
#[derive(Debug, PartialEq)]
pub enum MerkleTree {
/// Finalized Node
Finalized(H256),
/// Leaf node with the hash of its content.
Leaf(H256),
/// Internal node with hash, left subtree and right subtree.
@ -41,6 +43,24 @@ pub enum MerkleTreeError {
DepthTooSmall,
// Overflow occurred
ArithError,
// Can't finalize a zero node
ZeroNodeFinalized,
// Can't push to finalized node
FinalizedNodePushed,
// Invalid Snapshot
InvalidSnapshot(InvalidSnapshot),
// Can't proof a finalized node
ProofEncounteredFinalizedNode,
// This should never happen
PleaseNotifyTheDevs,
}
#[derive(Debug, PartialEq, Clone)]
pub enum InvalidSnapshot {
// Branch hashes are empty but deposits are not
EmptyBranchWithNonZeroDeposits(usize),
// End of tree reached but deposits != 1
EndOfTree,
}
impl MerkleTree {
@ -97,9 +117,11 @@ impl MerkleTree {
let right: &mut MerkleTree = &mut *right;
match (&*left, &*right) {
// Tree is full
(Leaf(_), Leaf(_)) => return Err(MerkleTreeError::MerkleTreeFull),
(Leaf(_), Leaf(_)) | (Finalized(_), Leaf(_)) => {
return Err(MerkleTreeError::MerkleTreeFull)
}
// There is a right node so insert in right node
(Node(_, _, _), Node(_, _, _)) => {
(Node(_, _, _), Node(_, _, _)) | (Finalized(_), Node(_, _, _)) => {
right.push_leaf(elem, depth - 1)?;
}
// Both branches are zero, insert in left one
@ -107,7 +129,7 @@ impl MerkleTree {
*left = MerkleTree::create(&[elem], depth - 1);
}
// Leaf on left branch and zero on right branch, insert on right side
(Leaf(_), Zero(_)) => {
(Leaf(_), Zero(_)) | (Finalized(_), Zero(_)) => {
*right = MerkleTree::create(&[elem], depth - 1);
}
// Try inserting on the left node -> if it fails because it is full, insert in right side.
@ -129,6 +151,7 @@ impl MerkleTree {
right.hash().as_bytes(),
));
}
Finalized(_) => return Err(MerkleTreeError::FinalizedNodePushed),
}
Ok(())
@ -137,6 +160,7 @@ impl MerkleTree {
/// Retrieve the root hash of this Merkle tree.
pub fn hash(&self) -> H256 {
match *self {
MerkleTree::Finalized(h) => h,
MerkleTree::Leaf(h) => h,
MerkleTree::Node(h, _, _) => h,
MerkleTree::Zero(depth) => H256::from_slice(&ZERO_HASHES[depth]),
@ -146,7 +170,7 @@ impl MerkleTree {
/// Get a reference to the left and right subtrees if they exist.
pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> {
match *self {
MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None,
MerkleTree::Finalized(_) | MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None,
MerkleTree::Node(_, ref l, ref r) => Some((l, r)),
MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])),
}
@ -157,16 +181,125 @@ impl MerkleTree {
matches!(self, MerkleTree::Leaf(_))
}
/// Finalize deposits up to deposit with count = deposits_to_finalize
pub fn finalize_deposits(
&mut self,
deposits_to_finalize: usize,
level: usize,
) -> Result<(), MerkleTreeError> {
match self {
MerkleTree::Finalized(_) => Ok(()),
MerkleTree::Zero(_) => Err(MerkleTreeError::ZeroNodeFinalized),
MerkleTree::Leaf(hash) => {
if level != 0 {
// This shouldn't happen but this is a sanity check
return Err(MerkleTreeError::PleaseNotifyTheDevs);
}
*self = MerkleTree::Finalized(*hash);
Ok(())
}
MerkleTree::Node(hash, left, right) => {
if level == 0 {
// this shouldn't happen but we'll put it here for safety
return Err(MerkleTreeError::PleaseNotifyTheDevs);
}
let deposits = 0x1 << level;
if deposits <= deposits_to_finalize {
*self = MerkleTree::Finalized(*hash);
return Ok(());
}
left.finalize_deposits(deposits_to_finalize, level - 1)?;
if deposits_to_finalize > deposits / 2 {
let remaining = deposits_to_finalize - deposits / 2;
right.finalize_deposits(remaining, level - 1)?;
}
Ok(())
}
}
}
fn append_finalized_hashes(&self, result: &mut Vec<H256>) {
match self {
MerkleTree::Zero(_) | MerkleTree::Leaf(_) => {}
MerkleTree::Finalized(h) => result.push(*h),
MerkleTree::Node(_, left, right) => {
left.append_finalized_hashes(result);
right.append_finalized_hashes(result);
}
}
}
pub fn get_finalized_hashes(&self) -> Vec<H256> {
let mut result = vec![];
self.append_finalized_hashes(&mut result);
result
}
pub fn from_finalized_snapshot(
finalized_branch: &[H256],
deposit_count: usize,
level: usize,
) -> Result<Self, MerkleTreeError> {
if finalized_branch.is_empty() {
return if deposit_count == 0 {
Ok(MerkleTree::Zero(level))
} else {
Err(InvalidSnapshot::EmptyBranchWithNonZeroDeposits(deposit_count).into())
};
}
if deposit_count == (0x1 << level) {
return Ok(MerkleTree::Finalized(
*finalized_branch
.get(0)
.ok_or(MerkleTreeError::PleaseNotifyTheDevs)?,
));
}
if level == 0 {
return Err(InvalidSnapshot::EndOfTree.into());
}
let (left, right) = match deposit_count.checked_sub(0x1 << (level - 1)) {
// left tree is fully finalized
Some(right_deposits) => {
let (left_hash, right_branch) = finalized_branch
.split_first()
.ok_or(MerkleTreeError::PleaseNotifyTheDevs)?;
(
MerkleTree::Finalized(*left_hash),
MerkleTree::from_finalized_snapshot(right_branch, right_deposits, level - 1)?,
)
}
// left tree is not fully finalized -> right tree is zero
None => (
MerkleTree::from_finalized_snapshot(finalized_branch, deposit_count, level - 1)?,
MerkleTree::Zero(level - 1),
),
};
let hash = H256::from_slice(&hash32_concat(
left.hash().as_bytes(),
right.hash().as_bytes(),
));
Ok(MerkleTree::Node(hash, Box::new(left), Box::new(right)))
}
/// Return the leaf at `index` and a Merkle proof of its inclusion.
///
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
/// and moving up the tree. Its length will be exactly equal to `depth`.
pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec<H256>) {
pub fn generate_proof(
&self,
index: usize,
depth: usize,
) -> Result<(H256, Vec<H256>), MerkleTreeError> {
let mut proof = vec![];
let mut current_node = self;
let mut current_depth = depth;
while current_depth > 0 {
let ith_bit = (index >> (current_depth - 1)) & 0x01;
if let &MerkleTree::Finalized(_) = current_node {
return Err(MerkleTreeError::ProofEncounteredFinalizedNode);
}
// Note: unwrap is safe because leaves are only ever constructed at depth == 0.
let (left, right) = current_node.left_and_right_branches().unwrap();
@ -187,7 +320,33 @@ impl MerkleTree {
// Put proof in bottom-up order.
proof.reverse();
(current_node.hash(), proof)
Ok((current_node.hash(), proof))
}
/// useful for debugging
pub fn print_node(&self, mut space: u32) {
const SPACES: u32 = 10;
space += SPACES;
let (pair, text) = match self {
MerkleTree::Node(hash, left, right) => (Some((left, right)), format!("Node({})", hash)),
MerkleTree::Leaf(hash) => (None, format!("Leaf({})", hash)),
MerkleTree::Zero(depth) => (
None,
format!("Z[{}]({})", depth, H256::from_slice(&ZERO_HASHES[*depth])),
),
MerkleTree::Finalized(hash) => (None, format!("Finl({})", hash)),
};
if let Some((_, right)) = pair {
right.print_node(space);
}
println!();
for _i in SPACES..space {
print!(" ");
}
println!("{}", text);
if let Some((left, _)) = pair {
left.print_node(space);
}
}
}
@ -235,6 +394,12 @@ impl From<ArithError> for MerkleTreeError {
}
}
impl From<InvalidSnapshot> for MerkleTreeError {
fn from(e: InvalidSnapshot) -> Self {
MerkleTreeError::InvalidSnapshot(e)
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -255,7 +420,9 @@ mod tests {
let merkle_root = merkle_tree.hash();
let proofs_ok = (0..leaves.len()).all(|i| {
let (leaf, branch) = merkle_tree.generate_proof(i, depth);
let (leaf, branch) = merkle_tree
.generate_proof(i, depth)
.expect("should generate proof");
leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root)
});
@ -274,7 +441,9 @@ mod tests {
let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| {
assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(()));
let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth);
let (stored_leaf, branch) = merkle_tree
.generate_proof(i, depth)
.expect("should generate proof");
stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash())
});

View File

@ -246,6 +246,20 @@ impl Decode for NonZeroUsize {
}
}
impl<T: Decode> Decode for Option<T> {
fn is_ssz_fixed_len() -> bool {
false
}
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
let (selector, body) = split_union_bytes(bytes)?;
match selector.into() {
0u8 => Ok(None),
1u8 => <T as Decode>::from_ssz_bytes(body).map(Option::Some),
other => Err(DecodeError::UnionSelectorInvalid(other)),
}
}
}
impl<T: Decode> Decode for Arc<T> {
fn is_ssz_fixed_len() -> bool {
T::is_ssz_fixed_len()

View File

@ -203,6 +203,34 @@ impl_encode_for_tuples! {
}
}
impl<T: Encode> Encode for Option<T> {
fn is_ssz_fixed_len() -> bool {
false
}
fn ssz_append(&self, buf: &mut Vec<u8>) {
match self {
Option::None => {
let union_selector: u8 = 0u8;
buf.push(union_selector);
}
Option::Some(ref inner) => {
let union_selector: u8 = 1u8;
buf.push(union_selector);
inner.ssz_append(buf);
}
}
}
fn ssz_bytes_len(&self) -> usize {
match self {
Option::None => 1usize,
Option::Some(ref inner) => inner
.ssz_bytes_len()
.checked_add(1)
.expect("encoded length must be less than usize::max_value"),
}
}
}
impl<T: Encode> Encode for Arc<T> {
fn is_ssz_fixed_len() -> bool {
T::is_ssz_fixed_len()
@ -561,6 +589,14 @@ mod tests {
);
}
#[test]
fn ssz_encode_option_u8() {
let opt: Option<u8> = None;
assert_eq!(opt.as_ssz_bytes(), vec![0]);
let opt: Option<u8> = Some(2);
assert_eq!(opt.as_ssz_bytes(), vec![1, 2]);
}
#[test]
fn ssz_encode_bool() {
assert_eq!(true.as_ssz_bytes(), vec![1]);

View File

@ -22,6 +22,13 @@ mod round_trip {
round_trip(items);
}
#[test]
fn option_u16() {
let items: Vec<Option<u16>> = vec![None, Some(2u16)];
round_trip(items);
}
#[test]
fn u8_array_4() {
let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]];
@ -46,6 +53,17 @@ mod round_trip {
round_trip(items);
}
#[test]
fn option_vec_h256() {
let items: Vec<Option<Vec<H256>>> = vec![
None,
Some(vec![]),
Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]),
];
round_trip(items);
}
#[test]
fn vec_u16() {
let items: Vec<Vec<u16>> = vec![

View File

@ -2,12 +2,14 @@ use eth2_hashing::hash;
use int_to_bytes::int_to_bytes32;
use merkle_proof::{MerkleTree, MerkleTreeError};
use safe_arith::SafeArith;
use types::Hash256;
use types::{DepositTreeSnapshot, FinalizedExecutionBlock, Hash256};
/// Emulates the eth1 deposit contract merkle tree.
#[derive(PartialEq)]
pub struct DepositDataTree {
tree: MerkleTree,
mix_in_length: usize,
finalized_execution_block: Option<FinalizedExecutionBlock>,
depth: usize,
}
@ -17,6 +19,7 @@ impl DepositDataTree {
Self {
tree: MerkleTree::create(leaves, depth),
mix_in_length,
finalized_execution_block: None,
depth,
}
}
@ -38,10 +41,10 @@ impl DepositDataTree {
///
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
/// and moving up the tree. Its length will be exactly equal to `depth + 1`.
pub fn generate_proof(&self, index: usize) -> (Hash256, Vec<Hash256>) {
let (root, mut proof) = self.tree.generate_proof(index, self.depth);
pub fn generate_proof(&self, index: usize) -> Result<(Hash256, Vec<Hash256>), MerkleTreeError> {
let (root, mut proof) = self.tree.generate_proof(index, self.depth)?;
proof.push(Hash256::from_slice(&self.length_bytes()));
(root, proof)
Ok((root, proof))
}
/// Add a deposit to the merkle tree.
@ -50,4 +53,50 @@ impl DepositDataTree {
self.mix_in_length.safe_add_assign(1)?;
Ok(())
}
/// Finalize deposits up to `finalized_execution_block.deposit_count`
pub fn finalize(
&mut self,
finalized_execution_block: FinalizedExecutionBlock,
) -> Result<(), MerkleTreeError> {
self.tree
.finalize_deposits(finalized_execution_block.deposit_count as usize, self.depth)?;
self.finalized_execution_block = Some(finalized_execution_block);
Ok(())
}
/// Get snapshot of finalized deposit tree (if tree is finalized)
pub fn get_snapshot(&self) -> Option<DepositTreeSnapshot> {
let finalized_execution_block = self.finalized_execution_block.as_ref()?;
Some(DepositTreeSnapshot {
finalized: self.tree.get_finalized_hashes(),
deposit_root: finalized_execution_block.deposit_root,
deposit_count: finalized_execution_block.deposit_count,
execution_block_hash: finalized_execution_block.block_hash,
execution_block_height: finalized_execution_block.block_height,
})
}
/// Create a new Merkle tree from a snapshot
pub fn from_snapshot(
snapshot: &DepositTreeSnapshot,
depth: usize,
) -> Result<Self, MerkleTreeError> {
Ok(Self {
tree: MerkleTree::from_finalized_snapshot(
&snapshot.finalized,
snapshot.deposit_count as usize,
depth,
)?,
mix_in_length: snapshot.deposit_count as usize,
finalized_execution_block: Some(snapshot.into()),
depth,
})
}
#[allow(dead_code)]
pub fn print_tree(&self) {
self.tree.print_node(0);
println!("========================================================");
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "types"
version = "0.2.0"
version = "0.2.1"
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"]
edition = "2021"

View File

@ -0,0 +1,83 @@
use crate::*;
use eth2_hashing::{hash32_concat, ZERO_HASHES};
use int_to_bytes::int_to_bytes32;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom;
use test_utils::TestRandom;
use DEPOSIT_TREE_DEPTH;
#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)]
pub struct FinalizedExecutionBlock {
pub deposit_root: Hash256,
pub deposit_count: u64,
pub block_hash: Hash256,
pub block_height: u64,
}
impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock {
fn from(snapshot: &DepositTreeSnapshot) -> Self {
Self {
deposit_root: snapshot.deposit_root,
deposit_count: snapshot.deposit_count,
block_hash: snapshot.execution_block_hash,
block_height: snapshot.execution_block_height,
}
}
}
#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)]
pub struct DepositTreeSnapshot {
pub finalized: Vec<Hash256>,
pub deposit_root: Hash256,
pub deposit_count: u64,
pub execution_block_hash: Hash256,
pub execution_block_height: u64,
}
impl Default for DepositTreeSnapshot {
fn default() -> Self {
let mut result = Self {
finalized: vec![],
deposit_root: Hash256::default(),
deposit_count: 0,
execution_block_hash: Hash256::zero(),
execution_block_height: 0,
};
// properly set the empty deposit root
result.deposit_root = result.calculate_root().unwrap();
result
}
}
impl DepositTreeSnapshot {
// Calculates the deposit tree root from the hashes in the snapshot
pub fn calculate_root(&self) -> Option<Hash256> {
let mut size = self.deposit_count;
let mut index = self.finalized.len();
let mut deposit_root = [0; 32];
for height in 0..DEPOSIT_TREE_DEPTH {
deposit_root = if (size & 1) == 1 {
index = index.checked_sub(1)?;
hash32_concat(self.finalized.get(index)?.as_bytes(), &deposit_root)
} else {
hash32_concat(&deposit_root, ZERO_HASHES.get(height)?)
};
size /= 2;
}
// add mix-in-length
deposit_root = hash32_concat(&deposit_root, &int_to_bytes32(self.deposit_count));
Some(Hash256::from_slice(&deposit_root))
}
pub fn is_valid(&self) -> bool {
self.calculate_root()
.map_or(false, |calculated| self.deposit_root == calculated)
}
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(DepositTreeSnapshot);
}

View File

@ -36,6 +36,7 @@ pub mod contribution_and_proof;
pub mod deposit;
pub mod deposit_data;
pub mod deposit_message;
pub mod deposit_tree_snapshot;
pub mod enr_fork_id;
pub mod eth1_data;
pub mod eth_spec;
@ -120,6 +121,7 @@ pub use crate::contribution_and_proof::ContributionAndProof;
pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH};
pub use crate::deposit_data::DepositData;
pub use crate::deposit_message::DepositMessage;
pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock};
pub use crate::enr_fork_id::EnrForkId;
pub use crate::eth1_data::Eth1Data;
pub use crate::eth_spec::EthSpecId;

View File

@ -256,6 +256,7 @@ pub fn migrate_db<E: EthSpec>(
migrate_schema::<Witness<SystemTimeSlotClock, CachingEth1Backend<E>, _, _, _>>(
db,
client_config.eth1.deposit_contract_deploy_block,
&client_config.get_data_dir(),
from,
to,

View File

@ -77,6 +77,7 @@ const HTTP_SYNC_COMMITTEE_CONTRIBUTION_TIMEOUT_QUOTIENT: u32 = 4;
const HTTP_SYNC_DUTIES_TIMEOUT_QUOTIENT: u32 = 4;
const HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT: u32 = 4;
const HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT: u32 = 4;
const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4;
const DOPPELGANGER_SERVICE_NAME: &str = "doppelganger";
@ -291,6 +292,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
/ HTTP_GET_BEACON_BLOCK_SSZ_TIMEOUT_QUOTIENT,
get_debug_beacon_states: slot_duration
/ HTTP_GET_DEBUG_BEACON_STATE_QUOTIENT,
get_deposit_snapshot: slot_duration / HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT,
}
} else {
Timeouts::set_all(slot_duration)