merge with unstable
This commit is contained in:
commit
8656d23327
2
.github/workflows/local-testnet.yml
vendored
2
.github/workflows/local-testnet.yml
vendored
@ -22,6 +22,8 @@ jobs:
|
|||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: npm install ganache@latest --global
|
run: npm install ganache@latest --global
|
||||||
|
|
||||||
|
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@ -83,6 +83,15 @@ jobs:
|
|||||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||||
|
|
||||||
|
# ==============================
|
||||||
|
# Windows & Mac dependencies
|
||||||
|
# ==============================
|
||||||
|
- name: Install Protoc
|
||||||
|
if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows')
|
||||||
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
# ==============================
|
# ==============================
|
||||||
# Builds
|
# Builds
|
||||||
# ==============================
|
# ==============================
|
||||||
|
641
Cargo.lock
generated
641
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -43,7 +43,6 @@ members = [
|
|||||||
"common/unused_port",
|
"common/unused_port",
|
||||||
"common/validator_dir",
|
"common/validator_dir",
|
||||||
"common/warp_utils",
|
"common/warp_utils",
|
||||||
"common/fallback",
|
|
||||||
"common/monitoring_api",
|
"common/monitoring_api",
|
||||||
|
|
||||||
"database_manager",
|
"database_manager",
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[target.x86_64-unknown-linux-gnu]
|
[target.x86_64-unknown-linux-gnu]
|
||||||
dockerfile = './scripts/cross/x86_64-unknown-linux-gnu.dockerfile'
|
dockerfile = './scripts/cross/Dockerfile'
|
||||||
|
|
||||||
[target.aarch64-unknown-linux-gnu]
|
[target.aarch64-unknown-linux-gnu]
|
||||||
dockerfile = './scripts/cross/aarch64-unknown-linux-gnu.dockerfile'
|
dockerfile = './scripts/cross/Dockerfile'
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "3.1.2"
|
version = "3.2.1"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ use crate::chain_config::ChainConfig;
|
|||||||
use crate::early_attester_cache::EarlyAttesterCache;
|
use crate::early_attester_cache::EarlyAttesterCache;
|
||||||
use crate::errors::{BeaconChainError as Error, BlockProductionError};
|
use crate::errors::{BeaconChainError as Error, BlockProductionError};
|
||||||
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||||
|
use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData};
|
||||||
use crate::events::ServerSentEventHandler;
|
use crate::events::ServerSentEventHandler;
|
||||||
use crate::execution_payload::get_execution_payload;
|
use crate::execution_payload::get_execution_payload;
|
||||||
use crate::execution_payload::PreparePayloadHandle;
|
use crate::execution_payload::PreparePayloadHandle;
|
||||||
@ -85,7 +86,7 @@ use state_processing::{
|
|||||||
},
|
},
|
||||||
per_slot_processing,
|
per_slot_processing,
|
||||||
state_advance::{complete_state_advance, partial_state_advance},
|
state_advance::{complete_state_advance, partial_state_advance},
|
||||||
BlockSignatureStrategy, SigVerifiedOp, VerifyBlockRoot, VerifyOperation,
|
BlockSignatureStrategy, ConsensusContext, SigVerifiedOp, VerifyBlockRoot, VerifyOperation,
|
||||||
};
|
};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
@ -119,6 +120,9 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
|
|||||||
/// validator pubkey cache.
|
/// validator pubkey cache.
|
||||||
pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
|
pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
|
||||||
|
|
||||||
|
/// The timeout for the eth1 finalization cache
|
||||||
|
pub const ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_millis(200);
|
||||||
|
|
||||||
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
|
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
|
||||||
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero();
|
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero();
|
||||||
pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero();
|
pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero();
|
||||||
@ -361,6 +365,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
pub(crate) snapshot_cache: TimeoutRwLock<SnapshotCache<T::EthSpec>>,
|
pub(crate) snapshot_cache: TimeoutRwLock<SnapshotCache<T::EthSpec>>,
|
||||||
/// Caches the attester shuffling for a given epoch and shuffling key root.
|
/// Caches the attester shuffling for a given epoch and shuffling key root.
|
||||||
pub shuffling_cache: TimeoutRwLock<ShufflingCache>,
|
pub shuffling_cache: TimeoutRwLock<ShufflingCache>,
|
||||||
|
/// A cache of eth1 deposit data at epoch boundaries for deposit finalization
|
||||||
|
pub eth1_finalization_cache: TimeoutRwLock<Eth1FinalizationCache>,
|
||||||
/// Caches the beacon block proposer shuffling for a given epoch and shuffling key root.
|
/// Caches the beacon block proposer shuffling for a given epoch and shuffling key root.
|
||||||
pub beacon_proposer_cache: Mutex<BeaconProposerCache>,
|
pub beacon_proposer_cache: Mutex<BeaconProposerCache>,
|
||||||
/// Caches a map of `validator_index -> validator_pubkey`.
|
/// Caches a map of `validator_index -> validator_pubkey`.
|
||||||
@ -2013,60 +2019,75 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
target_epoch: Epoch,
|
target_epoch: Epoch,
|
||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
self.shuffling_is_compatible_result(block_root, target_epoch, state)
|
||||||
let shuffling_lookahead = 1 + self.spec.min_seed_lookahead.as_u64();
|
.unwrap_or_else(|e| {
|
||||||
|
|
||||||
// Shuffling can't have changed if we're in the first few epochs
|
|
||||||
if state.current_epoch() < shuffling_lookahead {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise the shuffling is determined by the block at the end of the target epoch
|
|
||||||
// minus the shuffling lookahead (usually 2). We call this the "pivot".
|
|
||||||
let pivot_slot =
|
|
||||||
if target_epoch == state.previous_epoch() || target_epoch == state.current_epoch() {
|
|
||||||
(target_epoch - shuffling_lookahead).end_slot(slots_per_epoch)
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
let state_pivot_block_root = match state.get_block_root(pivot_slot) {
|
|
||||||
Ok(root) => *root,
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
&self.log,
|
|
||||||
"Missing pivot block root for attestation";
|
|
||||||
"slot" => pivot_slot,
|
|
||||||
"error" => ?e,
|
|
||||||
);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use fork choice's view of the block DAG to quickly evaluate whether the attestation's
|
|
||||||
// pivot block is the same as the current state's pivot block. If it is, then the
|
|
||||||
// attestation's shuffling is the same as the current state's.
|
|
||||||
// To account for skipped slots, find the first block at *or before* the pivot slot.
|
|
||||||
let fork_choice_lock = self.canonical_head.fork_choice_read_lock();
|
|
||||||
let pivot_block_root = fork_choice_lock
|
|
||||||
.proto_array()
|
|
||||||
.core_proto_array()
|
|
||||||
.iter_block_roots(block_root)
|
|
||||||
.find(|(_, slot)| *slot <= pivot_slot)
|
|
||||||
.map(|(block_root, _)| block_root);
|
|
||||||
drop(fork_choice_lock);
|
|
||||||
|
|
||||||
match pivot_block_root {
|
|
||||||
Some(root) => root == state_pivot_block_root,
|
|
||||||
None => {
|
|
||||||
debug!(
|
debug!(
|
||||||
&self.log,
|
self.log,
|
||||||
"Discarding attestation because of missing ancestor";
|
"Skipping attestation with incompatible shuffling";
|
||||||
"pivot_slot" => pivot_slot.as_u64(),
|
|
||||||
"block_root" => ?block_root,
|
"block_root" => ?block_root,
|
||||||
|
"target_epoch" => target_epoch,
|
||||||
|
"reason" => ?e,
|
||||||
);
|
);
|
||||||
false
|
false
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn shuffling_is_compatible_result(
|
||||||
|
&self,
|
||||||
|
block_root: &Hash256,
|
||||||
|
target_epoch: Epoch,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
// Compute the shuffling ID for the head state in the `target_epoch`.
|
||||||
|
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), target_epoch)
|
||||||
|
.map_err(|e| Error::BeaconStateError(e.into()))?;
|
||||||
|
let head_shuffling_id =
|
||||||
|
AttestationShufflingId::new(self.genesis_block_root, state, relative_epoch)?;
|
||||||
|
|
||||||
|
// Load the block's shuffling ID from fork choice. We use the variant of `get_block` that
|
||||||
|
// checks descent from the finalized block, so there's one case where we'll spuriously
|
||||||
|
// return `false`: where an attestation for the previous epoch nominates the pivot block
|
||||||
|
// which is the parent block of the finalized block. Such attestations are not useful, so
|
||||||
|
// this doesn't matter.
|
||||||
|
let fork_choice_lock = self.canonical_head.fork_choice_read_lock();
|
||||||
|
let block = fork_choice_lock
|
||||||
|
.get_block(block_root)
|
||||||
|
.ok_or(Error::AttestationHeadNotInForkChoice(*block_root))?;
|
||||||
|
drop(fork_choice_lock);
|
||||||
|
|
||||||
|
let block_shuffling_id = if target_epoch == block.current_epoch_shuffling_id.shuffling_epoch
|
||||||
|
{
|
||||||
|
block.current_epoch_shuffling_id
|
||||||
|
} else if target_epoch == block.next_epoch_shuffling_id.shuffling_epoch {
|
||||||
|
block.next_epoch_shuffling_id
|
||||||
|
} else if target_epoch > block.next_epoch_shuffling_id.shuffling_epoch {
|
||||||
|
AttestationShufflingId {
|
||||||
|
shuffling_epoch: target_epoch,
|
||||||
|
shuffling_decision_block: *block_root,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Skipping attestation with incompatible shuffling";
|
||||||
|
"block_root" => ?block_root,
|
||||||
|
"target_epoch" => target_epoch,
|
||||||
|
"reason" => "target epoch less than block epoch"
|
||||||
|
);
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
if head_shuffling_id == block_shuffling_id {
|
||||||
|
Ok(true)
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Skipping attestation with incompatible shuffling";
|
||||||
|
"block_root" => ?block_root,
|
||||||
|
"target_epoch" => target_epoch,
|
||||||
|
"head_shuffling_id" => ?head_shuffling_id,
|
||||||
|
"block_shuffling_id" => ?block_shuffling_id,
|
||||||
|
);
|
||||||
|
Ok(false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2538,9 +2559,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
state,
|
state,
|
||||||
parent_block: _,
|
parent_block,
|
||||||
confirmed_state_roots,
|
confirmed_state_roots,
|
||||||
payload_verification_handle,
|
payload_verification_handle,
|
||||||
|
parent_eth1_finalization_data,
|
||||||
} = execution_pending_block;
|
} = execution_pending_block;
|
||||||
|
|
||||||
let PayloadVerificationOutcome {
|
let PayloadVerificationOutcome {
|
||||||
@ -2592,6 +2614,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
confirmed_state_roots,
|
confirmed_state_roots,
|
||||||
payload_verification_status,
|
payload_verification_status,
|
||||||
count_unrealized,
|
count_unrealized,
|
||||||
|
parent_block,
|
||||||
|
parent_eth1_finalization_data,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
"payload_verification_handle",
|
"payload_verification_handle",
|
||||||
@ -2606,6 +2630,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
///
|
///
|
||||||
/// An error is returned if the block was unable to be imported. It may be partially imported
|
/// An error is returned if the block was unable to be imported. It may be partially imported
|
||||||
/// (i.e., this function is not atomic).
|
/// (i.e., this function is not atomic).
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn import_block(
|
fn import_block(
|
||||||
&self,
|
&self,
|
||||||
signed_block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
signed_block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||||
@ -2614,6 +2639,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
confirmed_state_roots: Vec<Hash256>,
|
confirmed_state_roots: Vec<Hash256>,
|
||||||
payload_verification_status: PayloadVerificationStatus,
|
payload_verification_status: PayloadVerificationStatus,
|
||||||
count_unrealized: CountUnrealized,
|
count_unrealized: CountUnrealized,
|
||||||
|
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||||
|
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||||
let current_slot = self.slot()?;
|
let current_slot = self.slot()?;
|
||||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
@ -2994,6 +3021,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let parent_root = block.parent_root();
|
let parent_root = block.parent_root();
|
||||||
let slot = block.slot();
|
let slot = block.slot();
|
||||||
|
|
||||||
|
let current_eth1_finalization_data = Eth1FinalizationData {
|
||||||
|
eth1_data: state.eth1_data().clone(),
|
||||||
|
eth1_deposit_index: state.eth1_deposit_index(),
|
||||||
|
};
|
||||||
|
let current_finalized_checkpoint = state.finalized_checkpoint();
|
||||||
self.snapshot_cache
|
self.snapshot_cache
|
||||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||||
.ok_or(Error::SnapshotCacheLockTimeout)
|
.ok_or(Error::SnapshotCacheLockTimeout)
|
||||||
@ -3067,6 +3099,57 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Do not write to eth1 finalization cache for blocks older than 5 epochs
|
||||||
|
// this helps reduce noise during sync
|
||||||
|
if block_delay_total
|
||||||
|
< self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32)
|
||||||
|
{
|
||||||
|
let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
if parent_block_epoch < current_epoch {
|
||||||
|
// we've crossed epoch boundary, store Eth1FinalizationData
|
||||||
|
let (checkpoint, eth1_finalization_data) =
|
||||||
|
if current_slot % T::EthSpec::slots_per_epoch() == 0 {
|
||||||
|
// current block is the checkpoint
|
||||||
|
(
|
||||||
|
Checkpoint {
|
||||||
|
epoch: current_epoch,
|
||||||
|
root: block_root,
|
||||||
|
},
|
||||||
|
current_eth1_finalization_data,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// parent block is the checkpoint
|
||||||
|
(
|
||||||
|
Checkpoint {
|
||||||
|
epoch: current_epoch,
|
||||||
|
root: parent_block.canonical_root(),
|
||||||
|
},
|
||||||
|
parent_eth1_finalization_data,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(finalized_eth1_data) = self
|
||||||
|
.eth1_finalization_cache
|
||||||
|
.try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT)
|
||||||
|
.and_then(|mut cache| {
|
||||||
|
cache.insert(checkpoint, eth1_finalization_data);
|
||||||
|
cache.finalize(¤t_finalized_checkpoint)
|
||||||
|
})
|
||||||
|
{
|
||||||
|
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||||
|
let finalized_deposit_count = finalized_eth1_data.deposit_count;
|
||||||
|
eth1_chain.finalize_eth1_data(finalized_eth1_data);
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"called eth1_chain.finalize_eth1_data()";
|
||||||
|
"epoch" => current_finalized_checkpoint.epoch,
|
||||||
|
"deposit count" => finalized_deposit_count,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Inform the unknown block cache, in case it was waiting on this block.
|
// Inform the unknown block cache, in case it was waiting on this block.
|
||||||
self.pre_finalization_block_cache
|
self.pre_finalization_block_cache
|
||||||
.block_processed(block_root);
|
.block_processed(block_root);
|
||||||
@ -3525,7 +3608,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let slot = state.slot();
|
let slot = state.slot();
|
||||||
let proposer_index = state.get_beacon_proposer_index(state.slot(), &self.spec)? as u64;
|
|
||||||
|
|
||||||
let sync_aggregate = if matches!(&state, BeaconState::Base(_)) {
|
let sync_aggregate = if matches!(&state, BeaconState::Base(_)) {
|
||||||
None
|
None
|
||||||
@ -3721,12 +3803,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao,
|
ProduceBlockVerification::VerifyRandao => BlockSignatureStrategy::VerifyRandao,
|
||||||
ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification,
|
ProduceBlockVerification::NoVerification => BlockSignatureStrategy::NoVerification,
|
||||||
};
|
};
|
||||||
|
// Use a context without block root or proposer index so that both are checked.
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot());
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&block,
|
&block,
|
||||||
None,
|
|
||||||
signature_strategy,
|
signature_strategy,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&self.spec,
|
&self.spec,
|
||||||
)?;
|
)?;
|
||||||
drop(process_timer);
|
drop(process_timer);
|
||||||
@ -4552,7 +4636,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
///
|
///
|
||||||
/// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the
|
/// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the
|
||||||
/// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`.
|
/// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`.
|
||||||
pub(crate) fn with_committee_cache<F, R>(
|
pub fn with_committee_cache<F, R>(
|
||||||
&self,
|
&self,
|
||||||
head_block_root: Hash256,
|
head_block_root: Hash256,
|
||||||
shuffling_epoch: Epoch,
|
shuffling_epoch: Epoch,
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
//! END
|
//! END
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
|
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
||||||
use crate::execution_payload::{
|
use crate::execution_payload::{
|
||||||
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
|
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
|
||||||
AllowOptimisticImport, PayloadNotifier,
|
AllowOptimisticImport, PayloadNotifier,
|
||||||
@ -71,7 +72,8 @@ use state_processing::{
|
|||||||
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
||||||
per_block_processing, per_slot_processing,
|
per_block_processing, per_slot_processing,
|
||||||
state_advance::partial_state_advance,
|
state_advance::partial_state_advance,
|
||||||
BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot,
|
BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError,
|
||||||
|
VerifyBlockRoot,
|
||||||
};
|
};
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
@ -550,7 +552,7 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
|||||||
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
||||||
|
|
||||||
for (block_root, block) in &chain_segment {
|
for (block_root, block) in &chain_segment {
|
||||||
signature_verifier.include_all_signatures(block, Some(*block_root))?;
|
signature_verifier.include_all_signatures(block, Some(*block_root), None)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if signature_verifier.verify().is_err() {
|
if signature_verifier.verify().is_err() {
|
||||||
@ -561,10 +563,17 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
let mut signature_verified_blocks = chain_segment
|
let mut signature_verified_blocks = chain_segment
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(block_root, block)| SignatureVerifiedBlock {
|
.map(|(block_root, block)| {
|
||||||
|
// Proposer index has already been verified above during signature verification.
|
||||||
|
let consensus_context = ConsensusContext::new(block.slot())
|
||||||
|
.set_current_block_root(block_root)
|
||||||
|
.set_proposer_index(block.message().proposer_index());
|
||||||
|
SignatureVerifiedBlock {
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
parent: None,
|
parent: None,
|
||||||
|
consensus_context,
|
||||||
|
}
|
||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
@ -583,6 +592,7 @@ pub struct GossipVerifiedBlock<T: BeaconChainTypes> {
|
|||||||
pub block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
pub block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||||
pub block_root: Hash256,
|
pub block_root: Hash256,
|
||||||
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
||||||
|
consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
|
/// A wrapper around a `SignedBeaconBlock` that indicates that all signatures (except the deposit
|
||||||
@ -591,6 +601,7 @@ pub struct SignatureVerifiedBlock<T: BeaconChainTypes> {
|
|||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
parent: Option<PreProcessingSnapshot<T::EthSpec>>,
|
||||||
|
consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Used to await the result of executing payload with a remote EE.
|
/// Used to await the result of executing payload with a remote EE.
|
||||||
@ -613,6 +624,7 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
|
|||||||
pub block_root: Hash256,
|
pub block_root: Hash256,
|
||||||
pub state: BeaconState<T::EthSpec>,
|
pub state: BeaconState<T::EthSpec>,
|
||||||
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||||
|
pub parent_eth1_finalization_data: Eth1FinalizationData,
|
||||||
pub confirmed_state_roots: Vec<Hash256>,
|
pub confirmed_state_roots: Vec<Hash256>,
|
||||||
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
|
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
|
||||||
}
|
}
|
||||||
@ -864,10 +876,16 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
|||||||
// Validate the block's execution_payload (if any).
|
// Validate the block's execution_payload (if any).
|
||||||
validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?;
|
validate_execution_payload_for_gossip(&parent_block, block.message(), chain)?;
|
||||||
|
|
||||||
|
// Having checked the proposer index and the block root we can cache them.
|
||||||
|
let consensus_context = ConsensusContext::new(block.slot())
|
||||||
|
.set_current_block_root(block_root)
|
||||||
|
.set_proposer_index(block.message().proposer_index());
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
parent,
|
parent,
|
||||||
|
consensus_context,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -927,10 +945,13 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
|
|
||||||
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
||||||
|
|
||||||
signature_verifier.include_all_signatures(&block, Some(block_root))?;
|
signature_verifier.include_all_signatures(&block, Some(block_root), None)?;
|
||||||
|
|
||||||
if signature_verifier.verify().is_ok() {
|
if signature_verifier.verify().is_ok() {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
|
consensus_context: ConsensusContext::new(block.slot())
|
||||||
|
.set_current_block_root(block_root)
|
||||||
|
.set_proposer_index(block.message().proposer_index()),
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
parent: Some(parent),
|
parent: Some(parent),
|
||||||
@ -973,13 +994,18 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
|
|
||||||
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
||||||
|
|
||||||
signature_verifier.include_all_signatures_except_proposal(&block)?;
|
// Gossip verification has already checked the proposer index. Use it to check the RANDAO
|
||||||
|
// signature.
|
||||||
|
let verified_proposer_index = Some(block.message().proposer_index());
|
||||||
|
signature_verifier
|
||||||
|
.include_all_signatures_except_proposal(&block, verified_proposer_index)?;
|
||||||
|
|
||||||
if signature_verifier.verify().is_ok() {
|
if signature_verifier.verify().is_ok() {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
block,
|
block,
|
||||||
block_root: from.block_root,
|
block_root: from.block_root,
|
||||||
parent: Some(parent),
|
parent: Some(parent),
|
||||||
|
consensus_context: from.consensus_context,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -1016,7 +1042,13 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
|
|||||||
.map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))?
|
.map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))?
|
||||||
};
|
};
|
||||||
|
|
||||||
ExecutionPendingBlock::from_signature_verified_components(block, block_root, parent, chain)
|
ExecutionPendingBlock::from_signature_verified_components(
|
||||||
|
block,
|
||||||
|
block_root,
|
||||||
|
parent,
|
||||||
|
self.consensus_context,
|
||||||
|
chain,
|
||||||
|
)
|
||||||
.map_err(|e| BlockSlashInfo::SignatureValid(header, e))
|
.map_err(|e| BlockSlashInfo::SignatureValid(header, e))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1058,6 +1090,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
parent: PreProcessingSnapshot<T::EthSpec>,
|
parent: PreProcessingSnapshot<T::EthSpec>,
|
||||||
|
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||||
if let Some(parent) = chain
|
if let Some(parent) = chain
|
||||||
@ -1134,6 +1167,11 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
.into());
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let parent_eth1_finalization_data = Eth1FinalizationData {
|
||||||
|
eth1_data: state.eth1_data().clone(),
|
||||||
|
eth1_deposit_index: state.eth1_deposit_index(),
|
||||||
|
};
|
||||||
|
|
||||||
let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64());
|
let distance = block.slot().as_u64().saturating_sub(state.slot().as_u64());
|
||||||
for _ in 0..distance {
|
for _ in 0..distance {
|
||||||
let state_root = if parent.beacon_block.slot() == state.slot() {
|
let state_root = if parent.beacon_block.slot() == state.slot() {
|
||||||
@ -1341,10 +1379,10 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
if let Err(err) = per_block_processing(
|
if let Err(err) = per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&block,
|
&block,
|
||||||
Some(block_root),
|
|
||||||
// Signatures were verified earlier in this function.
|
// Signatures were verified earlier in this function.
|
||||||
BlockSignatureStrategy::NoVerification,
|
BlockSignatureStrategy::NoVerification,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut consensus_context,
|
||||||
&chain.spec,
|
&chain.spec,
|
||||||
) {
|
) {
|
||||||
match err {
|
match err {
|
||||||
@ -1389,6 +1427,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
block_root,
|
block_root,
|
||||||
state,
|
state,
|
||||||
parent_block: parent.beacon_block,
|
parent_block: parent.beacon_block,
|
||||||
|
parent_eth1_finalization_data,
|
||||||
confirmed_state_roots,
|
confirmed_state_roots,
|
||||||
payload_verification_handle,
|
payload_verification_handle,
|
||||||
})
|
})
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
|
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
|
||||||
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||||
|
use crate::eth1_finalization_cache::Eth1FinalizationCache;
|
||||||
use crate::fork_choice_signal::ForkChoiceSignalTx;
|
use crate::fork_choice_signal::ForkChoiceSignalTx;
|
||||||
use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary};
|
use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary};
|
||||||
use crate::head_tracker::HeadTracker;
|
use crate::head_tracker::HeadTracker;
|
||||||
@ -795,6 +796,7 @@ where
|
|||||||
head_for_snapshot_cache,
|
head_for_snapshot_cache,
|
||||||
)),
|
)),
|
||||||
shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()),
|
shuffling_cache: TimeoutRwLock::new(ShufflingCache::new()),
|
||||||
|
eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())),
|
||||||
beacon_proposer_cache: <_>::default(),
|
beacon_proposer_cache: <_>::default(),
|
||||||
block_times_cache: <_>::default(),
|
block_times_cache: <_>::default(),
|
||||||
pre_finalization_block_cache: <_>::default(),
|
pre_finalization_block_cache: <_>::default(),
|
||||||
@ -897,7 +899,7 @@ where
|
|||||||
.ok_or("dummy_eth1_backend requires a log")?;
|
.ok_or("dummy_eth1_backend requires a log")?;
|
||||||
|
|
||||||
let backend =
|
let backend =
|
||||||
CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone());
|
CachingEth1Backend::new(Eth1Config::default(), log.clone(), self.spec.clone())?;
|
||||||
|
|
||||||
self.eth1_chain = Some(Eth1Chain::new_dummy(backend));
|
self.eth1_chain = Some(Eth1Chain::new_dummy(backend));
|
||||||
|
|
||||||
|
@ -16,7 +16,6 @@ use store::{DBColumn, Error as StoreError, StoreItem};
|
|||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use types::{
|
use types::{
|
||||||
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
||||||
DEPOSIT_TREE_DEPTH,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
type BlockNumber = u64;
|
type BlockNumber = u64;
|
||||||
@ -170,8 +169,8 @@ fn get_sync_status<T: EthSpec>(
|
|||||||
|
|
||||||
#[derive(Encode, Decode, Clone)]
|
#[derive(Encode, Decode, Clone)]
|
||||||
pub struct SszEth1 {
|
pub struct SszEth1 {
|
||||||
use_dummy_backend: bool,
|
pub use_dummy_backend: bool,
|
||||||
backend_bytes: Vec<u8>,
|
pub backend_bytes: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl StoreItem for SszEth1 {
|
impl StoreItem for SszEth1 {
|
||||||
@ -305,6 +304,12 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set in motion the finalization of `Eth1Data`. This method is called during block import
|
||||||
|
/// so it should be fast.
|
||||||
|
pub fn finalize_eth1_data(&self, eth1_data: Eth1Data) {
|
||||||
|
self.backend.finalize_eth1_data(eth1_data);
|
||||||
|
}
|
||||||
|
|
||||||
/// Consumes `self`, returning the backend.
|
/// Consumes `self`, returning the backend.
|
||||||
pub fn into_backend(self) -> T {
|
pub fn into_backend(self) -> T {
|
||||||
self.backend
|
self.backend
|
||||||
@ -335,6 +340,10 @@ pub trait Eth1ChainBackend<T: EthSpec>: Sized + Send + Sync {
|
|||||||
/// beacon node eth1 cache is.
|
/// beacon node eth1 cache is.
|
||||||
fn latest_cached_block(&self) -> Option<Eth1Block>;
|
fn latest_cached_block(&self) -> Option<Eth1Block>;
|
||||||
|
|
||||||
|
/// Set in motion the finalization of `Eth1Data`. This method is called during block import
|
||||||
|
/// so it should be fast.
|
||||||
|
fn finalize_eth1_data(&self, eth1_data: Eth1Data);
|
||||||
|
|
||||||
/// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain
|
/// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain
|
||||||
/// an idea of how up-to-date the remote eth1 node is.
|
/// an idea of how up-to-date the remote eth1 node is.
|
||||||
fn head_block(&self) -> Option<Eth1Block>;
|
fn head_block(&self) -> Option<Eth1Block>;
|
||||||
@ -389,6 +398,8 @@ impl<T: EthSpec> Eth1ChainBackend<T> for DummyEth1ChainBackend<T> {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn finalize_eth1_data(&self, _eth1_data: Eth1Data) {}
|
||||||
|
|
||||||
fn head_block(&self) -> Option<Eth1Block> {
|
fn head_block(&self) -> Option<Eth1Block> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -431,12 +442,13 @@ impl<T: EthSpec> CachingEth1Backend<T> {
|
|||||||
/// Instantiates `self` with empty caches.
|
/// Instantiates `self` with empty caches.
|
||||||
///
|
///
|
||||||
/// Does not connect to the eth1 node or start any tasks to keep the cache updated.
|
/// Does not connect to the eth1 node or start any tasks to keep the cache updated.
|
||||||
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self {
|
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result<Self, String> {
|
||||||
Self {
|
Ok(Self {
|
||||||
core: HttpService::new(config, log.clone(), spec),
|
core: HttpService::new(config, log.clone(), spec)
|
||||||
|
.map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?,
|
||||||
log,
|
log,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Starts the routine which connects to the external eth1 node and updates the caches.
|
/// Starts the routine which connects to the external eth1 node and updates the caches.
|
||||||
@ -546,7 +558,7 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
|||||||
.deposits()
|
.deposits()
|
||||||
.read()
|
.read()
|
||||||
.cache
|
.cache
|
||||||
.get_deposits(next, last, deposit_count, DEPOSIT_TREE_DEPTH)
|
.get_deposits(next, last, deposit_count)
|
||||||
.map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e)))
|
.map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e)))
|
||||||
.map(|(_deposit_root, deposits)| deposits)
|
.map(|(_deposit_root, deposits)| deposits)
|
||||||
}
|
}
|
||||||
@ -557,6 +569,12 @@ impl<T: EthSpec> Eth1ChainBackend<T> for CachingEth1Backend<T> {
|
|||||||
self.core.latest_cached_block()
|
self.core.latest_cached_block()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// This only writes the eth1_data to a temporary cache so that the service
|
||||||
|
/// thread can later do the actual finalizing of the deposit tree.
|
||||||
|
fn finalize_eth1_data(&self, eth1_data: Eth1Data) {
|
||||||
|
self.core.set_to_finalize(Some(eth1_data));
|
||||||
|
}
|
||||||
|
|
||||||
fn head_block(&self) -> Option<Eth1Block> {
|
fn head_block(&self) -> Option<Eth1Block> {
|
||||||
self.core.head_block()
|
self.core.head_block()
|
||||||
}
|
}
|
||||||
@ -730,11 +748,9 @@ mod test {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let log = null_logger().unwrap();
|
let log = null_logger().unwrap();
|
||||||
Eth1Chain::new(CachingEth1Backend::new(
|
Eth1Chain::new(
|
||||||
eth1_config,
|
CachingEth1Backend::new(eth1_config, log, MainnetEthSpec::default_spec()).unwrap(),
|
||||||
log,
|
)
|
||||||
MainnetEthSpec::default_spec(),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog {
|
fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog {
|
||||||
|
498
beacon_node/beacon_chain/src/eth1_finalization_cache.rs
Normal file
498
beacon_node/beacon_chain/src/eth1_finalization_cache.rs
Normal file
@ -0,0 +1,498 @@
|
|||||||
|
use slog::{debug, Logger};
|
||||||
|
use std::cmp;
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root};
|
||||||
|
|
||||||
|
/// The default size of the cache.
|
||||||
|
/// The beacon chain only looks at the last 4 epochs for finalization.
|
||||||
|
/// Add 1 for current epoch and 4 earlier epochs.
|
||||||
|
pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5;
|
||||||
|
|
||||||
|
/// These fields are named the same as the corresponding fields in the `BeaconState`
|
||||||
|
/// as this structure stores these values from the `BeaconState` at a `Checkpoint`
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Eth1FinalizationData {
|
||||||
|
pub eth1_data: Eth1Data,
|
||||||
|
pub eth1_deposit_index: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Eth1FinalizationData {
|
||||||
|
/// Ensures the deposit finalization conditions have been met. See:
|
||||||
|
/// https://eips.ethereum.org/EIPS/eip-4881#deposit-finalization-conditions
|
||||||
|
fn fully_imported(&self) -> bool {
|
||||||
|
self.eth1_deposit_index >= self.eth1_data.deposit_count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Implements map from Checkpoint -> Eth1CacheData
|
||||||
|
pub struct CheckpointMap {
|
||||||
|
capacity: usize,
|
||||||
|
// There shouldn't be more than a couple of potential checkpoints at the same
|
||||||
|
// epoch. Searching through a vector for the matching Root should be faster
|
||||||
|
// than using another map from Root->Eth1CacheData
|
||||||
|
store: BTreeMap<Epoch, Vec<(Root, Eth1FinalizationData)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for CheckpointMap {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides a map of `Eth1CacheData` referenced by `Checkpoint`
|
||||||
|
///
|
||||||
|
/// ## Cache Queuing
|
||||||
|
///
|
||||||
|
/// The cache keeps a maximum number of (`capacity`) epochs. Because there may be
|
||||||
|
/// forks at the epoch boundary, it's possible that there exists more than one
|
||||||
|
/// `Checkpoint` for the same `Epoch`. This cache will store all checkpoints for
|
||||||
|
/// a given `Epoch`. When adding data for a new `Checkpoint` would cause the number
|
||||||
|
/// of `Epoch`s stored to exceed `capacity`, the data for oldest `Epoch` is dropped
|
||||||
|
impl CheckpointMap {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
CheckpointMap {
|
||||||
|
capacity: DEFAULT_ETH1_CACHE_SIZE,
|
||||||
|
store: BTreeMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_capacity(capacity: usize) -> Self {
|
||||||
|
CheckpointMap {
|
||||||
|
capacity: cmp::max(1, capacity),
|
||||||
|
store: BTreeMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) {
|
||||||
|
self.store
|
||||||
|
.entry(checkpoint.epoch)
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push((checkpoint.root, eth1_finalization_data));
|
||||||
|
|
||||||
|
// faster to reduce size after the fact than do pre-checking to see
|
||||||
|
// if the current data would increase the size of the BTreeMap
|
||||||
|
while self.store.len() > self.capacity {
|
||||||
|
let oldest_stored_epoch = self.store.keys().next().cloned().unwrap();
|
||||||
|
self.store.remove(&oldest_stored_epoch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get(&self, checkpoint: &Checkpoint) -> Option<&Eth1FinalizationData> {
|
||||||
|
match self.store.get(&checkpoint.epoch) {
|
||||||
|
Some(vec) => {
|
||||||
|
for (root, data) in vec {
|
||||||
|
if *root == checkpoint.root {
|
||||||
|
return Some(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.store.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This cache stores `Eth1CacheData` that could potentially be finalized within 4
|
||||||
|
/// future epochs.
|
||||||
|
pub struct Eth1FinalizationCache {
|
||||||
|
by_checkpoint: CheckpointMap,
|
||||||
|
pending_eth1: BTreeMap<u64, Eth1Data>,
|
||||||
|
last_finalized: Option<Eth1Data>,
|
||||||
|
log: Logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to
|
||||||
|
/// finalize deposits when a new epoch is finalized.
|
||||||
|
///
|
||||||
|
impl Eth1FinalizationCache {
|
||||||
|
pub fn new(log: Logger) -> Self {
|
||||||
|
Eth1FinalizationCache {
|
||||||
|
by_checkpoint: CheckpointMap::new(),
|
||||||
|
pending_eth1: BTreeMap::new(),
|
||||||
|
last_finalized: None,
|
||||||
|
log,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_capacity(log: Logger, capacity: usize) -> Self {
|
||||||
|
Eth1FinalizationCache {
|
||||||
|
by_checkpoint: CheckpointMap::with_capacity(capacity),
|
||||||
|
pending_eth1: BTreeMap::new(),
|
||||||
|
last_finalized: None,
|
||||||
|
log,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) {
|
||||||
|
if !eth1_finalization_data.fully_imported() {
|
||||||
|
self.pending_eth1.insert(
|
||||||
|
eth1_finalization_data.eth1_data.deposit_count,
|
||||||
|
eth1_finalization_data.eth1_data.clone(),
|
||||||
|
);
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Eth1Cache: inserted pending eth1";
|
||||||
|
"eth1_data.deposit_count" => eth1_finalization_data.eth1_data.deposit_count,
|
||||||
|
"eth1_deposit_index" => eth1_finalization_data.eth1_deposit_index,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
self.by_checkpoint
|
||||||
|
.insert(checkpoint, eth1_finalization_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn finalize(&mut self, checkpoint: &Checkpoint) -> Option<Eth1Data> {
|
||||||
|
if let Some(eth1_finalized_data) = self.by_checkpoint.get(checkpoint) {
|
||||||
|
let finalized_deposit_index = eth1_finalized_data.eth1_deposit_index;
|
||||||
|
let mut result = None;
|
||||||
|
while let Some(pending_count) = self.pending_eth1.keys().next().cloned() {
|
||||||
|
if finalized_deposit_index >= pending_count {
|
||||||
|
result = self.pending_eth1.remove(&pending_count);
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Eth1Cache: dropped pending eth1";
|
||||||
|
"pending_count" => pending_count,
|
||||||
|
"finalized_deposit_index" => finalized_deposit_index,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if eth1_finalized_data.fully_imported() {
|
||||||
|
result = Some(eth1_finalized_data.eth1_data.clone())
|
||||||
|
}
|
||||||
|
if result.is_some() {
|
||||||
|
self.last_finalized = result;
|
||||||
|
}
|
||||||
|
self.last_finalized.clone()
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Eth1Cache: cache miss";
|
||||||
|
"epoch" => checkpoint.epoch,
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn by_checkpoint(&self) -> &CheckpointMap {
|
||||||
|
&self.by_checkpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn pending_eth1(&self) -> &BTreeMap<u64, Eth1Data> {
|
||||||
|
&self.pending_eth1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub mod tests {
|
||||||
|
use super::*;
|
||||||
|
use sloggers::null::NullLoggerBuilder;
|
||||||
|
use sloggers::Build;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
const SLOTS_PER_EPOCH: u64 = 32;
|
||||||
|
const MAX_DEPOSITS: u64 = 16;
|
||||||
|
const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64;
|
||||||
|
|
||||||
|
fn eth1cache() -> Eth1FinalizationCache {
|
||||||
|
let log_builder = NullLoggerBuilder;
|
||||||
|
Eth1FinalizationCache::new(log_builder.build().expect("should build log"))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn random_eth1_data(deposit_count: u64) -> Eth1Data {
|
||||||
|
Eth1Data {
|
||||||
|
deposit_root: Root::random(),
|
||||||
|
deposit_count,
|
||||||
|
block_hash: Root::random(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn random_checkpoint(epoch: u64) -> Checkpoint {
|
||||||
|
Checkpoint {
|
||||||
|
epoch: epoch.into(),
|
||||||
|
root: Root::random(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn random_checkpoints(n: usize) -> Vec<Checkpoint> {
|
||||||
|
let mut result = Vec::with_capacity(n);
|
||||||
|
for epoch in 0..n {
|
||||||
|
result.push(random_checkpoint(epoch as u64))
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fully_imported_deposits() {
|
||||||
|
let epochs = 16;
|
||||||
|
let deposits_imported = 128;
|
||||||
|
|
||||||
|
let eth1data = random_eth1_data(deposits_imported);
|
||||||
|
let checkpoints = random_checkpoints(epochs as usize);
|
||||||
|
let mut eth1cache = eth1cache();
|
||||||
|
|
||||||
|
for epoch in 4..epochs {
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.by_checkpoint().len(),
|
||||||
|
cmp::min((epoch - 4) as usize, DEFAULT_ETH1_CACHE_SIZE),
|
||||||
|
"Unexpected cache size"
|
||||||
|
);
|
||||||
|
|
||||||
|
let checkpoint = checkpoints
|
||||||
|
.get(epoch as usize)
|
||||||
|
.expect("should get checkpoint");
|
||||||
|
eth1cache.insert(
|
||||||
|
*checkpoint,
|
||||||
|
Eth1FinalizationData {
|
||||||
|
eth1_data: eth1data.clone(),
|
||||||
|
eth1_deposit_index: deposits_imported,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let finalized_checkpoint = checkpoints
|
||||||
|
.get((epoch - 4) as usize)
|
||||||
|
.expect("should get finalized checkpoint");
|
||||||
|
assert!(
|
||||||
|
eth1cache.pending_eth1().is_empty(),
|
||||||
|
"Deposits are fully imported so pending cache should be empty"
|
||||||
|
);
|
||||||
|
if epoch < 8 {
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.finalize(finalized_checkpoint),
|
||||||
|
None,
|
||||||
|
"Should have cache miss"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.finalize(finalized_checkpoint),
|
||||||
|
Some(eth1data.clone()),
|
||||||
|
"Should have cache hit"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn partially_imported_deposits() {
|
||||||
|
let epochs = 16;
|
||||||
|
let initial_deposits_imported = 1024;
|
||||||
|
let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH;
|
||||||
|
let full_import_epoch = 13;
|
||||||
|
let total_deposits =
|
||||||
|
initial_deposits_imported + deposits_imported_per_epoch * full_import_epoch;
|
||||||
|
|
||||||
|
let eth1data = random_eth1_data(total_deposits);
|
||||||
|
let checkpoints = random_checkpoints(epochs as usize);
|
||||||
|
let mut eth1cache = eth1cache();
|
||||||
|
|
||||||
|
for epoch in 0..epochs {
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.by_checkpoint().len(),
|
||||||
|
cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE),
|
||||||
|
"Unexpected cache size"
|
||||||
|
);
|
||||||
|
|
||||||
|
let checkpoint = checkpoints
|
||||||
|
.get(epoch as usize)
|
||||||
|
.expect("should get checkpoint");
|
||||||
|
let deposits_imported = cmp::min(
|
||||||
|
total_deposits,
|
||||||
|
initial_deposits_imported + deposits_imported_per_epoch * epoch,
|
||||||
|
);
|
||||||
|
eth1cache.insert(
|
||||||
|
*checkpoint,
|
||||||
|
Eth1FinalizationData {
|
||||||
|
eth1_data: eth1data.clone(),
|
||||||
|
eth1_deposit_index: deposits_imported,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
if epoch >= 4 {
|
||||||
|
let finalized_epoch = epoch - 4;
|
||||||
|
let finalized_checkpoint = checkpoints
|
||||||
|
.get(finalized_epoch as usize)
|
||||||
|
.expect("should get finalized checkpoint");
|
||||||
|
if finalized_epoch < full_import_epoch {
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.finalize(finalized_checkpoint),
|
||||||
|
None,
|
||||||
|
"Deposits not fully finalized so cache should return no Eth1Data",
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.pending_eth1().len(),
|
||||||
|
1,
|
||||||
|
"Deposits not fully finalized. Pending eth1 cache should have 1 entry"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.finalize(finalized_checkpoint),
|
||||||
|
Some(eth1data.clone()),
|
||||||
|
"Deposits fully imported and finalized. Cache should return Eth1Data. finalized_deposits[{}]",
|
||||||
|
(initial_deposits_imported + deposits_imported_per_epoch * finalized_epoch),
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
eth1cache.pending_eth1().is_empty(),
|
||||||
|
"Deposits fully imported and finalized. Pending cache should be empty"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn fork_at_epoch_boundary() {
|
||||||
|
let epochs = 12;
|
||||||
|
let deposits_imported = 128;
|
||||||
|
|
||||||
|
let eth1data = random_eth1_data(deposits_imported);
|
||||||
|
let checkpoints = random_checkpoints(epochs as usize);
|
||||||
|
let mut forks = HashMap::new();
|
||||||
|
let mut eth1cache = eth1cache();
|
||||||
|
|
||||||
|
for epoch in 0..epochs {
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.by_checkpoint().len(),
|
||||||
|
cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE),
|
||||||
|
"Unexpected cache size"
|
||||||
|
);
|
||||||
|
|
||||||
|
let checkpoint = checkpoints
|
||||||
|
.get(epoch as usize)
|
||||||
|
.expect("should get checkpoint");
|
||||||
|
eth1cache.insert(
|
||||||
|
*checkpoint,
|
||||||
|
Eth1FinalizationData {
|
||||||
|
eth1_data: eth1data.clone(),
|
||||||
|
eth1_deposit_index: deposits_imported,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
// lets put a fork at every third epoch
|
||||||
|
if epoch % 3 == 0 {
|
||||||
|
let fork = random_checkpoint(epoch);
|
||||||
|
eth1cache.insert(
|
||||||
|
fork,
|
||||||
|
Eth1FinalizationData {
|
||||||
|
eth1_data: eth1data.clone(),
|
||||||
|
eth1_deposit_index: deposits_imported,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
forks.insert(epoch as usize, fork);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
eth1cache.pending_eth1().is_empty(),
|
||||||
|
"Deposits are fully imported so pending cache should be empty"
|
||||||
|
);
|
||||||
|
if epoch >= 4 {
|
||||||
|
let finalized_epoch = (epoch - 4) as usize;
|
||||||
|
let finalized_checkpoint = if finalized_epoch % 3 == 0 {
|
||||||
|
forks.get(&finalized_epoch).expect("should get fork")
|
||||||
|
} else {
|
||||||
|
checkpoints
|
||||||
|
.get(finalized_epoch)
|
||||||
|
.expect("should get checkpoint")
|
||||||
|
};
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.finalize(finalized_checkpoint),
|
||||||
|
Some(eth1data.clone()),
|
||||||
|
"Should have cache hit"
|
||||||
|
);
|
||||||
|
if finalized_epoch >= 3 {
|
||||||
|
let dropped_epoch = finalized_epoch - 3;
|
||||||
|
if let Some(dropped_checkpoint) = forks.get(&dropped_epoch) {
|
||||||
|
// got checkpoint for an old fork that should no longer
|
||||||
|
// be in the cache because it is from too long ago
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.finalize(dropped_checkpoint),
|
||||||
|
None,
|
||||||
|
"Should have cache miss"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn massive_deposit_queue() {
|
||||||
|
// Simulating a situation where deposits don't get imported within an eth1 voting period
|
||||||
|
let eth1_voting_periods = 8;
|
||||||
|
let initial_deposits_imported = 1024;
|
||||||
|
let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH;
|
||||||
|
let initial_deposit_queue =
|
||||||
|
deposits_imported_per_epoch * EPOCHS_PER_ETH1_VOTING_PERIOD * 2 + 32;
|
||||||
|
let new_deposits_per_voting_period =
|
||||||
|
EPOCHS_PER_ETH1_VOTING_PERIOD * deposits_imported_per_epoch / 2;
|
||||||
|
|
||||||
|
let mut epoch_data = BTreeMap::new();
|
||||||
|
let mut eth1s_by_count = BTreeMap::new();
|
||||||
|
let mut eth1cache = eth1cache();
|
||||||
|
let mut last_period_deposits = initial_deposits_imported;
|
||||||
|
for period in 0..eth1_voting_periods {
|
||||||
|
let period_deposits = initial_deposits_imported
|
||||||
|
+ initial_deposit_queue
|
||||||
|
+ period * new_deposits_per_voting_period;
|
||||||
|
let period_eth1_data = random_eth1_data(period_deposits);
|
||||||
|
eth1s_by_count.insert(period_eth1_data.deposit_count, period_eth1_data.clone());
|
||||||
|
|
||||||
|
for epoch_mod_period in 0..EPOCHS_PER_ETH1_VOTING_PERIOD {
|
||||||
|
let epoch = period * EPOCHS_PER_ETH1_VOTING_PERIOD + epoch_mod_period;
|
||||||
|
let checkpoint = random_checkpoint(epoch);
|
||||||
|
let deposits_imported = cmp::min(
|
||||||
|
period_deposits,
|
||||||
|
last_period_deposits + deposits_imported_per_epoch * epoch_mod_period,
|
||||||
|
);
|
||||||
|
eth1cache.insert(
|
||||||
|
checkpoint,
|
||||||
|
Eth1FinalizationData {
|
||||||
|
eth1_data: period_eth1_data.clone(),
|
||||||
|
eth1_deposit_index: deposits_imported,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
epoch_data.insert(epoch, (checkpoint, deposits_imported));
|
||||||
|
|
||||||
|
if epoch >= 4 {
|
||||||
|
let finalized_epoch = epoch - 4;
|
||||||
|
let (finalized_checkpoint, finalized_deposits) = epoch_data
|
||||||
|
.get(&finalized_epoch)
|
||||||
|
.expect("should get epoch data");
|
||||||
|
|
||||||
|
let pending_eth1s = eth1s_by_count.range((finalized_deposits + 1)..).count();
|
||||||
|
let last_finalized_eth1 = eth1s_by_count
|
||||||
|
.range(0..(finalized_deposits + 1))
|
||||||
|
.map(|(_, eth1)| eth1)
|
||||||
|
.last()
|
||||||
|
.cloned();
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.finalize(finalized_checkpoint),
|
||||||
|
last_finalized_eth1,
|
||||||
|
"finalized checkpoint mismatch",
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
eth1cache.pending_eth1().len(),
|
||||||
|
pending_eth1s,
|
||||||
|
"pending eth1 mismatch"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove unneeded stuff from old epochs
|
||||||
|
while epoch_data.len() > DEFAULT_ETH1_CACHE_SIZE {
|
||||||
|
let oldest_stored_epoch = epoch_data
|
||||||
|
.keys()
|
||||||
|
.next()
|
||||||
|
.cloned()
|
||||||
|
.expect("should get oldest epoch");
|
||||||
|
epoch_data.remove(&oldest_stored_epoch);
|
||||||
|
}
|
||||||
|
last_period_deposits = period_deposits;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -5,7 +5,8 @@ use proto_array::CountUnrealizedFull;
|
|||||||
use slog::{info, warn, Logger};
|
use slog::{info, warn, Logger};
|
||||||
use state_processing::state_advance::complete_state_advance;
|
use state_processing::state_advance::complete_state_advance;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
per_block_processing, per_block_processing::BlockSignatureStrategy, VerifyBlockRoot,
|
per_block_processing, per_block_processing::BlockSignatureStrategy, ConsensusContext,
|
||||||
|
VerifyBlockRoot,
|
||||||
};
|
};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -172,12 +173,14 @@ pub fn reset_fork_choice_to_finalization<E: EthSpec, Hot: ItemStore<E>, Cold: It
|
|||||||
complete_state_advance(&mut state, None, block.slot(), spec)
|
complete_state_advance(&mut state, None, block.slot(), spec)
|
||||||
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
.map_err(|e| format!("State advance failed: {:?}", e))?;
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot())
|
||||||
|
.set_proposer_index(block.message().proposer_index());
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&block,
|
&block,
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::NoVerification,
|
BlockSignatureStrategy::NoVerification,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
spec,
|
spec,
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("Error replaying block: {:?}", e))?;
|
.map_err(|e| format!("Error replaying block: {:?}", e))?;
|
||||||
|
@ -15,6 +15,7 @@ pub mod chain_config;
|
|||||||
mod early_attester_cache;
|
mod early_attester_cache;
|
||||||
mod errors;
|
mod errors;
|
||||||
pub mod eth1_chain;
|
pub mod eth1_chain;
|
||||||
|
mod eth1_finalization_cache;
|
||||||
pub mod events;
|
pub mod events;
|
||||||
pub mod execution_payload;
|
pub mod execution_payload;
|
||||||
pub mod fork_choice_signal;
|
pub mod fork_choice_signal;
|
||||||
|
@ -2,13 +2,15 @@
|
|||||||
mod migration_schema_v10;
|
mod migration_schema_v10;
|
||||||
mod migration_schema_v11;
|
mod migration_schema_v11;
|
||||||
mod migration_schema_v12;
|
mod migration_schema_v12;
|
||||||
|
mod migration_schema_v13;
|
||||||
mod migration_schema_v6;
|
mod migration_schema_v6;
|
||||||
mod migration_schema_v7;
|
mod migration_schema_v7;
|
||||||
mod migration_schema_v8;
|
mod migration_schema_v8;
|
||||||
mod migration_schema_v9;
|
mod migration_schema_v9;
|
||||||
mod types;
|
mod types;
|
||||||
|
|
||||||
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY};
|
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY};
|
||||||
|
use crate::eth1_chain::SszEth1;
|
||||||
use crate::persisted_fork_choice::{
|
use crate::persisted_fork_choice::{
|
||||||
PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7,
|
PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7,
|
||||||
PersistedForkChoiceV8,
|
PersistedForkChoiceV8,
|
||||||
@ -24,6 +26,7 @@ use store::{Error as StoreError, StoreItem};
|
|||||||
/// Migrate the database from one schema version to another, applying all requisite mutations.
|
/// Migrate the database from one schema version to another, applying all requisite mutations.
|
||||||
pub fn migrate_schema<T: BeaconChainTypes>(
|
pub fn migrate_schema<T: BeaconChainTypes>(
|
||||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||||
|
deposit_contract_deploy_block: u64,
|
||||||
datadir: &Path,
|
datadir: &Path,
|
||||||
from: SchemaVersion,
|
from: SchemaVersion,
|
||||||
to: SchemaVersion,
|
to: SchemaVersion,
|
||||||
@ -31,19 +34,51 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
|||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), StoreError> {
|
) -> Result<(), StoreError> {
|
||||||
match (from, to) {
|
match (from, to) {
|
||||||
// Migrating from the current schema version to iself is always OK, a no-op.
|
// Migrating from the current schema version to itself is always OK, a no-op.
|
||||||
(_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()),
|
(_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()),
|
||||||
// Upgrade across multiple versions by recursively migrating one step at a time.
|
// Upgrade across multiple versions by recursively migrating one step at a time.
|
||||||
(_, _) if from.as_u64() + 1 < to.as_u64() => {
|
(_, _) if from.as_u64() + 1 < to.as_u64() => {
|
||||||
let next = SchemaVersion(from.as_u64() + 1);
|
let next = SchemaVersion(from.as_u64() + 1);
|
||||||
migrate_schema::<T>(db.clone(), datadir, from, next, log.clone(), spec)?;
|
migrate_schema::<T>(
|
||||||
migrate_schema::<T>(db, datadir, next, to, log, spec)
|
db.clone(),
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
datadir,
|
||||||
|
from,
|
||||||
|
next,
|
||||||
|
log.clone(),
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
migrate_schema::<T>(
|
||||||
|
db,
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
datadir,
|
||||||
|
next,
|
||||||
|
to,
|
||||||
|
log,
|
||||||
|
spec,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
// Downgrade across multiple versions by recursively migrating one step at a time.
|
// Downgrade across multiple versions by recursively migrating one step at a time.
|
||||||
(_, _) if to.as_u64() + 1 < from.as_u64() => {
|
(_, _) if to.as_u64() + 1 < from.as_u64() => {
|
||||||
let next = SchemaVersion(from.as_u64() - 1);
|
let next = SchemaVersion(from.as_u64() - 1);
|
||||||
migrate_schema::<T>(db.clone(), datadir, from, next, log.clone(), spec)?;
|
migrate_schema::<T>(
|
||||||
migrate_schema::<T>(db, datadir, next, to, log, spec)
|
db.clone(),
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
datadir,
|
||||||
|
from,
|
||||||
|
next,
|
||||||
|
log.clone(),
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
|
migrate_schema::<T>(
|
||||||
|
db,
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
datadir,
|
||||||
|
next,
|
||||||
|
to,
|
||||||
|
log,
|
||||||
|
spec,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -207,6 +242,55 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
|||||||
let ops = migration_schema_v12::downgrade_from_v12::<T>(db.clone(), log)?;
|
let ops = migration_schema_v12::downgrade_from_v12::<T>(db.clone(), log)?;
|
||||||
db.store_schema_version_atomically(to, ops)
|
db.store_schema_version_atomically(to, ops)
|
||||||
}
|
}
|
||||||
|
(SchemaVersion(12), SchemaVersion(13)) => {
|
||||||
|
let mut ops = vec![];
|
||||||
|
if let Some(persisted_eth1_v1) = db.get_item::<SszEth1>(Ð1_CACHE_DB_KEY)? {
|
||||||
|
let upgraded_eth1_cache =
|
||||||
|
match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) {
|
||||||
|
Ok(upgraded_eth1) => upgraded_eth1,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e);
|
||||||
|
warn!(log, "Reinitializing eth1 cache");
|
||||||
|
migration_schema_v13::reinitialized_eth1_cache_v13(
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY));
|
||||||
|
}
|
||||||
|
|
||||||
|
db.store_schema_version_atomically(to, ops)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
(SchemaVersion(13), SchemaVersion(12)) => {
|
||||||
|
let mut ops = vec![];
|
||||||
|
if let Some(persisted_eth1_v13) = db.get_item::<SszEth1>(Ð1_CACHE_DB_KEY)? {
|
||||||
|
let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache(
|
||||||
|
persisted_eth1_v13,
|
||||||
|
) {
|
||||||
|
Ok(Some(downgraded_eth1)) => downgraded_eth1,
|
||||||
|
Ok(None) => {
|
||||||
|
warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache");
|
||||||
|
migration_schema_v13::reinitialized_eth1_cache_v1(
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e);
|
||||||
|
warn!(log, "Reinitializing eth1 cache");
|
||||||
|
migration_schema_v13::reinitialized_eth1_cache_v1(
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY));
|
||||||
|
}
|
||||||
|
|
||||||
|
db.store_schema_version_atomically(to, ops)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
// Anything else is an error.
|
// Anything else is an error.
|
||||||
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
||||||
target_version: to,
|
target_version: to,
|
||||||
|
@ -0,0 +1,150 @@
|
|||||||
|
use crate::eth1_chain::SszEth1;
|
||||||
|
use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13};
|
||||||
|
use ssz::{Decode, Encode};
|
||||||
|
use state_processing::common::DepositDataTree;
|
||||||
|
use store::Error;
|
||||||
|
use types::DEPOSIT_TREE_DEPTH;
|
||||||
|
|
||||||
|
pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result<SszEth1, Error> {
|
||||||
|
if persisted_eth1_v1.use_dummy_backend {
|
||||||
|
// backend_bytes is empty when using dummy backend
|
||||||
|
return Ok(persisted_eth1_v1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let SszEth1 {
|
||||||
|
use_dummy_backend,
|
||||||
|
backend_bytes,
|
||||||
|
} = persisted_eth1_v1;
|
||||||
|
|
||||||
|
let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?;
|
||||||
|
let SszEth1CacheV1 {
|
||||||
|
block_cache,
|
||||||
|
deposit_cache: deposit_cache_v1,
|
||||||
|
last_processed_block,
|
||||||
|
} = ssz_eth1_cache_v1;
|
||||||
|
|
||||||
|
let SszDepositCacheV1 {
|
||||||
|
logs,
|
||||||
|
leaves,
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
deposit_roots,
|
||||||
|
} = deposit_cache_v1;
|
||||||
|
|
||||||
|
let deposit_cache_v13 = SszDepositCacheV13 {
|
||||||
|
logs,
|
||||||
|
leaves,
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
finalized_deposit_count: 0,
|
||||||
|
finalized_block_height: deposit_contract_deploy_block.saturating_sub(1),
|
||||||
|
deposit_tree_snapshot: None,
|
||||||
|
deposit_roots,
|
||||||
|
};
|
||||||
|
|
||||||
|
let ssz_eth1_cache_v13 = SszEth1CacheV13 {
|
||||||
|
block_cache,
|
||||||
|
deposit_cache: deposit_cache_v13,
|
||||||
|
last_processed_block,
|
||||||
|
};
|
||||||
|
|
||||||
|
let persisted_eth1_v13 = SszEth1 {
|
||||||
|
use_dummy_backend,
|
||||||
|
backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(persisted_eth1_v13)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result<Option<SszEth1>, Error> {
|
||||||
|
if persisted_eth1_v13.use_dummy_backend {
|
||||||
|
// backend_bytes is empty when using dummy backend
|
||||||
|
return Ok(Some(persisted_eth1_v13));
|
||||||
|
}
|
||||||
|
|
||||||
|
let SszEth1 {
|
||||||
|
use_dummy_backend,
|
||||||
|
backend_bytes,
|
||||||
|
} = persisted_eth1_v13;
|
||||||
|
|
||||||
|
let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?;
|
||||||
|
let SszEth1CacheV13 {
|
||||||
|
block_cache,
|
||||||
|
deposit_cache: deposit_cache_v13,
|
||||||
|
last_processed_block,
|
||||||
|
} = ssz_eth1_cache_v13;
|
||||||
|
|
||||||
|
let SszDepositCacheV13 {
|
||||||
|
logs,
|
||||||
|
leaves,
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
finalized_deposit_count,
|
||||||
|
finalized_block_height: _,
|
||||||
|
deposit_tree_snapshot,
|
||||||
|
deposit_roots,
|
||||||
|
} = deposit_cache_v13;
|
||||||
|
|
||||||
|
if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() {
|
||||||
|
// This tree was never finalized and can be directly downgraded to v1 without re-initializing
|
||||||
|
let deposit_cache_v1 = SszDepositCacheV1 {
|
||||||
|
logs,
|
||||||
|
leaves,
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
deposit_roots,
|
||||||
|
};
|
||||||
|
let ssz_eth1_cache_v1 = SszEth1CacheV1 {
|
||||||
|
block_cache,
|
||||||
|
deposit_cache: deposit_cache_v1,
|
||||||
|
last_processed_block,
|
||||||
|
};
|
||||||
|
return Ok(Some(SszEth1 {
|
||||||
|
use_dummy_backend,
|
||||||
|
backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(),
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
// deposit cache was finalized; can't downgrade
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 {
|
||||||
|
let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
|
||||||
|
let deposit_cache_v13 = SszDepositCacheV13 {
|
||||||
|
logs: vec![],
|
||||||
|
leaves: vec![],
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
finalized_deposit_count: 0,
|
||||||
|
finalized_block_height: deposit_contract_deploy_block.saturating_sub(1),
|
||||||
|
deposit_tree_snapshot: empty_tree.get_snapshot(),
|
||||||
|
deposit_roots: vec![empty_tree.root()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let ssz_eth1_cache_v13 = SszEth1CacheV13 {
|
||||||
|
block_cache: BlockCache::default(),
|
||||||
|
deposit_cache: deposit_cache_v13,
|
||||||
|
last_processed_block: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
SszEth1 {
|
||||||
|
use_dummy_backend: false,
|
||||||
|
backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 {
|
||||||
|
let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH);
|
||||||
|
let deposit_cache_v1 = SszDepositCacheV1 {
|
||||||
|
logs: vec![],
|
||||||
|
leaves: vec![],
|
||||||
|
deposit_contract_deploy_block,
|
||||||
|
deposit_roots: vec![empty_tree.root()],
|
||||||
|
};
|
||||||
|
|
||||||
|
let ssz_eth1_cache_v1 = SszEth1CacheV1 {
|
||||||
|
block_cache: BlockCache::default(),
|
||||||
|
deposit_cache: deposit_cache_v1,
|
||||||
|
last_processed_block: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
SszEth1 {
|
||||||
|
use_dummy_backend: false,
|
||||||
|
backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(),
|
||||||
|
}
|
||||||
|
}
|
@ -1432,8 +1432,9 @@ where
|
|||||||
// Building proofs
|
// Building proofs
|
||||||
let mut proofs = vec![];
|
let mut proofs = vec![];
|
||||||
for i in 0..leaves.len() {
|
for i in 0..leaves.len() {
|
||||||
let (_, mut proof) =
|
let (_, mut proof) = tree
|
||||||
tree.generate_proof(i, self.spec.deposit_contract_tree_depth as usize);
|
.generate_proof(i, self.spec.deposit_contract_tree_depth as usize)
|
||||||
|
.expect("should generate proof");
|
||||||
proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64)));
|
proof.push(Hash256::from_slice(&int_to_bytes32(leaves.len() as u64)));
|
||||||
proofs.push(proof);
|
proofs.push(proof);
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ use slasher::{Config as SlasherConfig, Slasher};
|
|||||||
use state_processing::{
|
use state_processing::{
|
||||||
common::get_indexed_attestation,
|
common::get_indexed_attestation,
|
||||||
per_block_processing::{per_block_processing, BlockSignatureStrategy},
|
per_block_processing::{per_block_processing, BlockSignatureStrategy},
|
||||||
per_slot_processing, BlockProcessingError, VerifyBlockRoot,
|
per_slot_processing, BlockProcessingError, ConsensusContext, VerifyBlockRoot,
|
||||||
};
|
};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -1139,14 +1139,15 @@ async fn add_base_block_to_altair_chain() {
|
|||||||
// Ensure that it would be impossible to apply this block to `per_block_processing`.
|
// Ensure that it would be impossible to apply this block to `per_block_processing`.
|
||||||
{
|
{
|
||||||
let mut state = state;
|
let mut state = state;
|
||||||
|
let mut ctxt = ConsensusContext::new(base_block.slot());
|
||||||
per_slot_processing(&mut state, None, &harness.chain.spec).unwrap();
|
per_slot_processing(&mut state, None, &harness.chain.spec).unwrap();
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&base_block,
|
&base_block,
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::NoVerification,
|
BlockSignatureStrategy::NoVerification,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&harness.chain.spec,
|
&harness.chain.spec,
|
||||||
),
|
),
|
||||||
Err(BlockProcessingError::InconsistentBlockFork(
|
Err(BlockProcessingError::InconsistentBlockFork(
|
||||||
@ -1271,14 +1272,15 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
// Ensure that it would be impossible to apply this block to `per_block_processing`.
|
// Ensure that it would be impossible to apply this block to `per_block_processing`.
|
||||||
{
|
{
|
||||||
let mut state = state;
|
let mut state = state;
|
||||||
|
let mut ctxt = ConsensusContext::new(altair_block.slot());
|
||||||
per_slot_processing(&mut state, None, &harness.chain.spec).unwrap();
|
per_slot_processing(&mut state, None, &harness.chain.spec).unwrap();
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&altair_block,
|
&altair_block,
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::NoVerification,
|
BlockSignatureStrategy::NoVerification,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&harness.chain.spec,
|
&harness.chain.spec,
|
||||||
),
|
),
|
||||||
Err(BlockProcessingError::InconsistentBlockFork(
|
Err(BlockProcessingError::InconsistentBlockFork(
|
||||||
|
@ -811,7 +811,6 @@ async fn shuffling_compatible_linear_chain() {
|
|||||||
let store = get_store(&db_path);
|
let store = get_store(&db_path);
|
||||||
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
||||||
|
|
||||||
// Skip the block at the end of the first epoch.
|
|
||||||
let head_block_root = harness
|
let head_block_root = harness
|
||||||
.extend_chain(
|
.extend_chain(
|
||||||
4 * E::slots_per_epoch() as usize,
|
4 * E::slots_per_epoch() as usize,
|
||||||
@ -824,10 +823,6 @@ async fn shuffling_compatible_linear_chain() {
|
|||||||
&harness,
|
&harness,
|
||||||
&get_state_for_block(&harness, head_block_root),
|
&get_state_for_block(&harness, head_block_root),
|
||||||
head_block_root,
|
head_block_root,
|
||||||
true,
|
|
||||||
true,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -859,10 +854,6 @@ async fn shuffling_compatible_missing_pivot_block() {
|
|||||||
&harness,
|
&harness,
|
||||||
&get_state_for_block(&harness, head_block_root),
|
&get_state_for_block(&harness, head_block_root),
|
||||||
head_block_root,
|
head_block_root,
|
||||||
true,
|
|
||||||
true,
|
|
||||||
Some(E::slots_per_epoch() - 2),
|
|
||||||
Some(E::slots_per_epoch() - 2),
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -880,10 +871,10 @@ async fn shuffling_compatible_simple_fork() {
|
|||||||
let head1_state = get_state_for_block(&harness, head1);
|
let head1_state = get_state_for_block(&harness, head1);
|
||||||
let head2_state = get_state_for_block(&harness, head2);
|
let head2_state = get_state_for_block(&harness, head2);
|
||||||
|
|
||||||
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
check_shuffling_compatible(&harness, &head1_state, head1);
|
||||||
check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None);
|
check_shuffling_compatible(&harness, &head1_state, head2);
|
||||||
check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None);
|
check_shuffling_compatible(&harness, &head2_state, head1);
|
||||||
check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None);
|
check_shuffling_compatible(&harness, &head2_state, head2);
|
||||||
|
|
||||||
drop(db_path);
|
drop(db_path);
|
||||||
}
|
}
|
||||||
@ -902,21 +893,10 @@ async fn shuffling_compatible_short_fork() {
|
|||||||
let head1_state = get_state_for_block(&harness, head1);
|
let head1_state = get_state_for_block(&harness, head1);
|
||||||
let head2_state = get_state_for_block(&harness, head2);
|
let head2_state = get_state_for_block(&harness, head2);
|
||||||
|
|
||||||
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
check_shuffling_compatible(&harness, &head1_state, head1);
|
||||||
check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None);
|
check_shuffling_compatible(&harness, &head1_state, head2);
|
||||||
// NOTE: don't check this case, as block 14 from the first chain appears valid on the second
|
check_shuffling_compatible(&harness, &head2_state, head1);
|
||||||
// chain due to it matching the second chain's block 15.
|
check_shuffling_compatible(&harness, &head2_state, head2);
|
||||||
// check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None);
|
|
||||||
check_shuffling_compatible(
|
|
||||||
&harness,
|
|
||||||
&head2_state,
|
|
||||||
head2,
|
|
||||||
true,
|
|
||||||
true,
|
|
||||||
// Required because of the skipped slot.
|
|
||||||
Some(2 * E::slots_per_epoch() - 2),
|
|
||||||
None,
|
|
||||||
);
|
|
||||||
|
|
||||||
drop(db_path);
|
drop(db_path);
|
||||||
}
|
}
|
||||||
@ -940,54 +920,82 @@ fn check_shuffling_compatible(
|
|||||||
harness: &TestHarness,
|
harness: &TestHarness,
|
||||||
head_state: &BeaconState<E>,
|
head_state: &BeaconState<E>,
|
||||||
head_block_root: Hash256,
|
head_block_root: Hash256,
|
||||||
current_epoch_valid: bool,
|
|
||||||
previous_epoch_valid: bool,
|
|
||||||
current_epoch_cutoff_slot: Option<u64>,
|
|
||||||
previous_epoch_cutoff_slot: Option<u64>,
|
|
||||||
) {
|
) {
|
||||||
let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1;
|
|
||||||
let current_pivot_slot =
|
|
||||||
(head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
|
||||||
let previous_pivot_slot =
|
|
||||||
(head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
|
||||||
|
|
||||||
for maybe_tuple in harness
|
for maybe_tuple in harness
|
||||||
.chain
|
.chain
|
||||||
.rev_iter_block_roots_from(head_block_root)
|
.rev_iter_block_roots_from(head_block_root)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
{
|
{
|
||||||
let (block_root, slot) = maybe_tuple.unwrap();
|
let (block_root, slot) = maybe_tuple.unwrap();
|
||||||
// Shuffling is compatible targeting the current epoch,
|
|
||||||
// if slot is greater than or equal to the current epoch pivot block.
|
// Would an attestation to `block_root` at the current epoch be compatible with the head
|
||||||
assert_eq!(
|
// state's shuffling?
|
||||||
harness.chain.shuffling_is_compatible(
|
let current_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible(
|
||||||
&block_root,
|
&block_root,
|
||||||
head_state.current_epoch(),
|
head_state.current_epoch(),
|
||||||
&head_state
|
&head_state,
|
||||||
),
|
|
||||||
current_epoch_valid
|
|
||||||
&& slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64())
|
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Check for consistency with the more expensive shuffling lookup.
|
||||||
|
harness
|
||||||
|
.chain
|
||||||
|
.with_committee_cache(
|
||||||
|
block_root,
|
||||||
|
head_state.current_epoch(),
|
||||||
|
|committee_cache, _| {
|
||||||
|
let state_cache = head_state.committee_cache(RelativeEpoch::Current).unwrap();
|
||||||
|
if current_epoch_shuffling_is_compatible {
|
||||||
|
assert_eq!(committee_cache, state_cache, "block at slot {slot}");
|
||||||
|
} else {
|
||||||
|
assert_ne!(committee_cache, state_cache, "block at slot {slot}");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
// If the lookup fails then the shuffling must be invalid in some way, e.g. the
|
||||||
|
// block with `block_root` is from a later epoch than `previous_epoch`.
|
||||||
|
assert!(
|
||||||
|
!current_epoch_shuffling_is_compatible,
|
||||||
|
"block at slot {slot} has compatible shuffling at epoch {} \
|
||||||
|
but should be incompatible due to error: {e:?}",
|
||||||
|
head_state.current_epoch()
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
// Similarly for the previous epoch
|
// Similarly for the previous epoch
|
||||||
assert_eq!(
|
let previous_epoch_shuffling_is_compatible = harness.chain.shuffling_is_compatible(
|
||||||
harness.chain.shuffling_is_compatible(
|
|
||||||
&block_root,
|
&block_root,
|
||||||
head_state.previous_epoch(),
|
head_state.previous_epoch(),
|
||||||
&head_state
|
&head_state,
|
||||||
),
|
|
||||||
previous_epoch_valid
|
|
||||||
&& slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64())
|
|
||||||
);
|
);
|
||||||
// Targeting the next epoch should always return false
|
harness
|
||||||
assert_eq!(
|
.chain
|
||||||
harness.chain.shuffling_is_compatible(
|
.with_committee_cache(
|
||||||
&block_root,
|
block_root,
|
||||||
head_state.current_epoch() + 1,
|
head_state.previous_epoch(),
|
||||||
&head_state
|
|committee_cache, _| {
|
||||||
),
|
let state_cache = head_state.committee_cache(RelativeEpoch::Previous).unwrap();
|
||||||
false
|
if previous_epoch_shuffling_is_compatible {
|
||||||
|
assert_eq!(committee_cache, state_cache);
|
||||||
|
} else {
|
||||||
|
assert_ne!(committee_cache, state_cache);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
// If the lookup fails then the shuffling must be invalid in some way, e.g. the
|
||||||
|
// block with `block_root` is from a later epoch than `previous_epoch`.
|
||||||
|
assert!(
|
||||||
|
!previous_epoch_shuffling_is_compatible,
|
||||||
|
"block at slot {slot} has compatible shuffling at epoch {} \
|
||||||
|
but should be incompatible due to error: {e:?}",
|
||||||
|
head_state.previous_epoch()
|
||||||
);
|
);
|
||||||
// Targeting two epochs before the current epoch should also always return false
|
});
|
||||||
|
|
||||||
|
// Targeting two epochs before the current epoch should always return false
|
||||||
if head_state.current_epoch() >= 2 {
|
if head_state.current_epoch() >= 2 {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
harness.chain.shuffling_is_compatible(
|
harness.chain.shuffling_is_compatible(
|
||||||
|
@ -277,8 +277,52 @@ where
|
|||||||
BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT));
|
BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT));
|
||||||
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
||||||
|
|
||||||
debug!(context.log(), "Downloading finalized block");
|
let deposit_snapshot = if config.sync_eth1_chain {
|
||||||
|
// We want to fetch deposit snapshot before fetching the finalized beacon state to
|
||||||
|
// ensure that the snapshot is not newer than the beacon state that satisfies the
|
||||||
|
// deposit finalization conditions
|
||||||
|
debug!(context.log(), "Downloading deposit snapshot");
|
||||||
|
let deposit_snapshot_result = remote
|
||||||
|
.get_deposit_snapshot()
|
||||||
|
.await
|
||||||
|
.map_err(|e| match e {
|
||||||
|
ApiError::InvalidSsz(e) => format!(
|
||||||
|
"Unable to parse SSZ: {:?}. Ensure the checkpoint-sync-url refers to a \
|
||||||
|
node for the correct network",
|
||||||
|
e
|
||||||
|
),
|
||||||
|
e => format!("Error fetching deposit snapshot from remote: {:?}", e),
|
||||||
|
});
|
||||||
|
match deposit_snapshot_result {
|
||||||
|
Ok(Some(deposit_snapshot)) => {
|
||||||
|
if deposit_snapshot.is_valid() {
|
||||||
|
Some(deposit_snapshot)
|
||||||
|
} else {
|
||||||
|
warn!(context.log(), "Remote BN sent invalid deposit snapshot!");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
warn!(
|
||||||
|
context.log(),
|
||||||
|
"Remote BN does not support EIP-4881 fast deposit sync"
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
context.log(),
|
||||||
|
"Remote BN does not support EIP-4881 fast deposit sync";
|
||||||
|
"error" => e
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(context.log(), "Downloading finalized block");
|
||||||
// Find a suitable finalized block on an epoch boundary.
|
// Find a suitable finalized block on an epoch boundary.
|
||||||
let mut block = remote
|
let mut block = remote
|
||||||
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec)
|
.get_beacon_blocks_ssz::<TEthSpec>(BlockId::Finalized, &spec)
|
||||||
@ -362,15 +406,39 @@ where
|
|||||||
"state_root" => ?state_root,
|
"state_root" => ?state_root,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
let service =
|
||||||
|
deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot(
|
||||||
|
config.eth1,
|
||||||
|
context.log().clone(),
|
||||||
|
spec,
|
||||||
|
&snapshot,
|
||||||
|
) {
|
||||||
|
Ok(service) => {
|
||||||
|
info!(
|
||||||
|
context.log(),
|
||||||
|
"Loaded deposit tree snapshot";
|
||||||
|
"deposits loaded" => snapshot.deposit_count,
|
||||||
|
);
|
||||||
|
Some(service)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(context.log(),
|
||||||
|
"Unable to load deposit snapshot";
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
builder
|
builder
|
||||||
.weak_subjectivity_state(state, block, genesis_state)
|
.weak_subjectivity_state(state, block, genesis_state)
|
||||||
.map(|v| (v, None))?
|
.map(|v| (v, service))?
|
||||||
}
|
}
|
||||||
ClientGenesis::DepositContract => {
|
ClientGenesis::DepositContract => {
|
||||||
info!(
|
info!(
|
||||||
context.log(),
|
context.log(),
|
||||||
"Waiting for eth2 genesis from eth1";
|
"Waiting for eth2 genesis from eth1";
|
||||||
"eth1_endpoints" => format!("{:?}", &config.eth1.endpoints),
|
"eth1_endpoints" => format!("{:?}", &config.eth1.endpoint),
|
||||||
"contract_deploy_block" => config.eth1.deposit_contract_deploy_block,
|
"contract_deploy_block" => config.eth1.deposit_contract_deploy_block,
|
||||||
"deposit_contract" => &config.eth1.deposit_contract_address
|
"deposit_contract" => &config.eth1.deposit_contract_address
|
||||||
);
|
);
|
||||||
@ -379,7 +447,7 @@ where
|
|||||||
config.eth1,
|
config.eth1,
|
||||||
context.log().clone(),
|
context.log().clone(),
|
||||||
context.eth2_config().spec.clone(),
|
context.eth2_config().spec.clone(),
|
||||||
);
|
)?;
|
||||||
|
|
||||||
// If the HTTP API server is enabled, start an instance of it where it only
|
// If the HTTP API server is enabled, start an instance of it where it only
|
||||||
// contains a reference to the eth1 service (all non-eth1 endpoints will fail
|
// contains a reference to the eth1 service (all non-eth1 endpoints will fail
|
||||||
@ -457,7 +525,9 @@ where
|
|||||||
ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?,
|
ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if config.sync_eth1_chain {
|
||||||
self.eth1_service = eth1_service_option;
|
self.eth1_service = eth1_service_option;
|
||||||
|
}
|
||||||
self.beacon_chain_builder = Some(beacon_chain_builder);
|
self.beacon_chain_builder = Some(beacon_chain_builder);
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
@ -808,9 +878,16 @@ where
|
|||||||
self.freezer_db_path = Some(cold_path.into());
|
self.freezer_db_path = Some(cold_path.into());
|
||||||
|
|
||||||
let inner_spec = spec.clone();
|
let inner_spec = spec.clone();
|
||||||
|
let deposit_contract_deploy_block = context
|
||||||
|
.eth2_network_config
|
||||||
|
.as_ref()
|
||||||
|
.map(|config| config.deposit_contract_deploy_block)
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
let schema_upgrade = |db, from, to| {
|
let schema_upgrade = |db, from, to| {
|
||||||
migrate_schema::<Witness<TSlotClock, TEth1Backend, _, _, _>>(
|
migrate_schema::<Witness<TSlotClock, TEth1Backend, _, _, _>>(
|
||||||
db,
|
db,
|
||||||
|
deposit_contract_deploy_block,
|
||||||
datadir,
|
datadir,
|
||||||
from,
|
from,
|
||||||
to,
|
to,
|
||||||
@ -875,7 +952,7 @@ where
|
|||||||
|
|
||||||
CachingEth1Backend::from_service(eth1_service_from_genesis)
|
CachingEth1Backend::from_service(eth1_service_from_genesis)
|
||||||
} else if config.purge_cache {
|
} else if config.purge_cache {
|
||||||
CachingEth1Backend::new(config, context.log().clone(), spec)
|
CachingEth1Backend::new(config, context.log().clone(), spec)?
|
||||||
} else {
|
} else {
|
||||||
beacon_chain_builder
|
beacon_chain_builder
|
||||||
.get_persisted_eth1_backend()?
|
.get_persisted_eth1_backend()?
|
||||||
@ -889,11 +966,7 @@ where
|
|||||||
.map(|chain| chain.into_backend())
|
.map(|chain| chain.into_backend())
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
Ok(CachingEth1Backend::new(
|
CachingEth1Backend::new(config, context.log().clone(), spec.clone())
|
||||||
config,
|
|
||||||
context.log().clone(),
|
|
||||||
spec.clone(),
|
|
||||||
))
|
|
||||||
})?
|
})?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
use directory::DEFAULT_ROOT_DIR;
|
use directory::DEFAULT_ROOT_DIR;
|
||||||
|
use environment::LoggerConfig;
|
||||||
use network::NetworkConfig;
|
use network::NetworkConfig;
|
||||||
use sensitive_url::SensitiveUrl;
|
use sensitive_url::SensitiveUrl;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::{Graffiti, PublicKeyBytes};
|
use types::{Graffiti, PublicKeyBytes};
|
||||||
|
|
||||||
/// Default directory name for the freezer database under the top-level data dir.
|
/// Default directory name for the freezer database under the top-level data dir.
|
||||||
const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db";
|
const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db";
|
||||||
|
|
||||||
@ -72,6 +72,7 @@ pub struct Config {
|
|||||||
pub http_metrics: http_metrics::Config,
|
pub http_metrics: http_metrics::Config,
|
||||||
pub monitoring_api: Option<monitoring_api::Config>,
|
pub monitoring_api: Option<monitoring_api::Config>,
|
||||||
pub slasher: Option<slasher::Config>,
|
pub slasher: Option<slasher::Config>,
|
||||||
|
pub logger_config: LoggerConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -96,6 +97,7 @@ impl Default for Config {
|
|||||||
slasher: None,
|
slasher: None,
|
||||||
validator_monitor_auto: false,
|
validator_monitor_auto: false,
|
||||||
validator_monitor_pubkeys: vec![],
|
validator_monitor_pubkeys: vec![],
|
||||||
|
logger_config: LoggerConfig::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,11 +25,11 @@ eth2_ssz_derive = "0.3.0"
|
|||||||
tree_hash = "0.4.1"
|
tree_hash = "0.4.1"
|
||||||
parking_lot = "0.12.0"
|
parking_lot = "0.12.0"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
|
superstruct = "0.5.0"
|
||||||
tokio = { version = "1.14.0", features = ["full"] }
|
tokio = { version = "1.14.0", features = ["full"] }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
eth2 = { path = "../../common/eth2" }
|
eth2 = { path = "../../common/eth2" }
|
||||||
fallback = { path = "../../common/fallback" }
|
|
||||||
sensitive_url = { path = "../../common/sensitive_url" }
|
sensitive_url = { path = "../../common/sensitive_url" }
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::ops::RangeInclusive;
|
use std::ops::RangeInclusive;
|
||||||
|
|
||||||
pub use eth2::lighthouse::Eth1Block;
|
pub use eth2::lighthouse::Eth1Block;
|
||||||
|
use eth2::types::Hash256;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
@ -20,7 +23,9 @@ pub enum Error {
|
|||||||
/// timestamp.
|
/// timestamp.
|
||||||
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
|
#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)]
|
||||||
pub struct BlockCache {
|
pub struct BlockCache {
|
||||||
blocks: Vec<Eth1Block>,
|
blocks: Vec<Arc<Eth1Block>>,
|
||||||
|
#[ssz(skip_serializing, skip_deserializing)]
|
||||||
|
by_hash: HashMap<Hash256, Arc<Eth1Block>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BlockCache {
|
impl BlockCache {
|
||||||
@ -36,12 +41,12 @@ impl BlockCache {
|
|||||||
|
|
||||||
/// Returns the earliest (lowest timestamp) block, if any.
|
/// Returns the earliest (lowest timestamp) block, if any.
|
||||||
pub fn earliest_block(&self) -> Option<&Eth1Block> {
|
pub fn earliest_block(&self) -> Option<&Eth1Block> {
|
||||||
self.blocks.first()
|
self.blocks.first().map(|ptr| ptr.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the latest (highest timestamp) block, if any.
|
/// Returns the latest (highest timestamp) block, if any.
|
||||||
pub fn latest_block(&self) -> Option<&Eth1Block> {
|
pub fn latest_block(&self) -> Option<&Eth1Block> {
|
||||||
self.blocks.last()
|
self.blocks.last().map(|ptr| ptr.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the timestamp of the earliest block in the cache (if any).
|
/// Returns the timestamp of the earliest block in the cache (if any).
|
||||||
@ -71,7 +76,7 @@ impl BlockCache {
|
|||||||
/// - Monotonically increasing block numbers.
|
/// - Monotonically increasing block numbers.
|
||||||
/// - Non-uniformly increasing block timestamps.
|
/// - Non-uniformly increasing block timestamps.
|
||||||
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &Eth1Block> + Clone {
|
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &Eth1Block> + Clone {
|
||||||
self.blocks.iter()
|
self.blocks.iter().map(|ptr| ptr.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the
|
/// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the
|
||||||
@ -80,7 +85,11 @@ impl BlockCache {
|
|||||||
/// If `len` is greater than the vector's current length, this has no effect.
|
/// If `len` is greater than the vector's current length, this has no effect.
|
||||||
pub fn truncate(&mut self, len: usize) {
|
pub fn truncate(&mut self, len: usize) {
|
||||||
if len < self.blocks.len() {
|
if len < self.blocks.len() {
|
||||||
self.blocks = self.blocks.split_off(self.blocks.len() - len);
|
let remaining = self.blocks.split_off(self.blocks.len() - len);
|
||||||
|
for block in &self.blocks {
|
||||||
|
self.by_hash.remove(&block.hash);
|
||||||
|
}
|
||||||
|
self.blocks = remaining;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,12 +101,27 @@ impl BlockCache {
|
|||||||
|
|
||||||
/// Returns a block with the corresponding number, if any.
|
/// Returns a block with the corresponding number, if any.
|
||||||
pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> {
|
pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> {
|
||||||
self.blocks.get(
|
self.blocks
|
||||||
|
.get(
|
||||||
self.blocks
|
self.blocks
|
||||||
.as_slice()
|
.as_slice()
|
||||||
.binary_search_by(|block| block.number.cmp(&block_number))
|
.binary_search_by(|block| block.number.cmp(&block_number))
|
||||||
.ok()?,
|
.ok()?,
|
||||||
)
|
)
|
||||||
|
.map(|ptr| ptr.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a block with the corresponding hash, if any.
|
||||||
|
pub fn block_by_hash(&self, block_hash: &Hash256) -> Option<&Eth1Block> {
|
||||||
|
self.by_hash.get(block_hash).map(|ptr| ptr.as_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Rebuilds the by_hash map
|
||||||
|
pub fn rebuild_by_hash_map(&mut self) {
|
||||||
|
self.by_hash.clear();
|
||||||
|
for block in self.blocks.iter() {
|
||||||
|
self.by_hash.insert(block.hash, block.clone());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Insert an `Eth1Snapshot` into `self`, allowing future queries.
|
/// Insert an `Eth1Snapshot` into `self`, allowing future queries.
|
||||||
@ -161,7 +185,9 @@ impl BlockCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.blocks.push(block);
|
let ptr = Arc::new(block);
|
||||||
|
self.by_hash.insert(ptr.hash, ptr.clone());
|
||||||
|
self.blocks.push(ptr);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -269,6 +295,8 @@ mod tests {
|
|||||||
.expect("should add consecutive blocks with duplicate timestamps");
|
.expect("should add consecutive blocks with duplicate timestamps");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let blocks = blocks.into_iter().map(Arc::new).collect::<Vec<_>>();
|
||||||
|
|
||||||
assert_eq!(cache.blocks, blocks, "should have added all blocks");
|
assert_eq!(cache.blocks, blocks, "should have added all blocks");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,15 +1,16 @@
|
|||||||
|
use crate::service::endpoint_from_config;
|
||||||
use crate::Config;
|
use crate::Config;
|
||||||
use crate::{
|
use crate::{
|
||||||
block_cache::{BlockCache, Eth1Block},
|
block_cache::{BlockCache, Eth1Block},
|
||||||
deposit_cache::{DepositCache, SszDepositCache},
|
deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13},
|
||||||
service::EndpointsCache,
|
|
||||||
};
|
};
|
||||||
|
use execution_layer::HttpJsonRpc;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use ssz::four_byte_option_impl;
|
use ssz::four_byte_option_impl;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use std::sync::Arc;
|
use superstruct::superstruct;
|
||||||
use types::ChainSpec;
|
use types::{ChainSpec, DepositTreeSnapshot, Eth1Data};
|
||||||
|
|
||||||
// Define "legacy" implementations of `Option<u64>` which use four bytes for encoding the union
|
// Define "legacy" implementations of `Option<u64>` which use four bytes for encoding the union
|
||||||
// selector.
|
// selector.
|
||||||
@ -29,13 +30,25 @@ impl DepositUpdater {
|
|||||||
last_processed_block: None,
|
last_processed_block: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn from_snapshot(
|
||||||
|
deposit_contract_deploy_block: u64,
|
||||||
|
snapshot: &DepositTreeSnapshot,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
let last_processed_block = Some(snapshot.execution_block_height);
|
||||||
|
Ok(Self {
|
||||||
|
cache: DepositCache::from_deposit_snapshot(deposit_contract_deploy_block, snapshot)?,
|
||||||
|
last_processed_block,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct Inner {
|
pub struct Inner {
|
||||||
pub block_cache: RwLock<BlockCache>,
|
pub block_cache: RwLock<BlockCache>,
|
||||||
pub deposit_cache: RwLock<DepositUpdater>,
|
pub deposit_cache: RwLock<DepositUpdater>,
|
||||||
pub endpoints_cache: RwLock<Option<Arc<EndpointsCache>>>,
|
pub endpoint: HttpJsonRpc,
|
||||||
|
// this gets set to Some(Eth1Data) when the deposit finalization conditions are met
|
||||||
|
pub to_finalize: RwLock<Option<Eth1Data>>,
|
||||||
pub config: RwLock<Config>,
|
pub config: RwLock<Config>,
|
||||||
pub remote_head_block: RwLock<Option<Eth1Block>>,
|
pub remote_head_block: RwLock<Option<Eth1Block>>,
|
||||||
pub spec: ChainSpec,
|
pub spec: ChainSpec,
|
||||||
@ -59,9 +72,13 @@ impl Inner {
|
|||||||
|
|
||||||
/// Recover `Inner` given byte representation of eth1 deposit and block caches.
|
/// Recover `Inner` given byte representation of eth1 deposit and block caches.
|
||||||
pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result<Self, String> {
|
pub fn from_bytes(bytes: &[u8], config: Config, spec: ChainSpec) -> Result<Self, String> {
|
||||||
let ssz_cache = SszEth1Cache::from_ssz_bytes(bytes)
|
SszEth1Cache::from_ssz_bytes(bytes)
|
||||||
.map_err(|e| format!("Ssz decoding error: {:?}", e))?;
|
.map_err(|e| format!("Ssz decoding error: {:?}", e))?
|
||||||
ssz_cache.to_inner(config, spec)
|
.to_inner(config, spec)
|
||||||
|
.map(|inner| {
|
||||||
|
inner.block_cache.write().rebuild_by_hash_map();
|
||||||
|
inner
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the specification.
|
/// Returns a reference to the specification.
|
||||||
@ -70,12 +87,21 @@ impl Inner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Encode, Decode, Clone)]
|
pub type SszEth1Cache = SszEth1CacheV13;
|
||||||
|
|
||||||
|
#[superstruct(
|
||||||
|
variants(V1, V13),
|
||||||
|
variant_attributes(derive(Encode, Decode, Clone)),
|
||||||
|
no_enum
|
||||||
|
)]
|
||||||
pub struct SszEth1Cache {
|
pub struct SszEth1Cache {
|
||||||
block_cache: BlockCache,
|
pub block_cache: BlockCache,
|
||||||
deposit_cache: SszDepositCache,
|
#[superstruct(only(V1))]
|
||||||
|
pub deposit_cache: SszDepositCacheV1,
|
||||||
|
#[superstruct(only(V13))]
|
||||||
|
pub deposit_cache: SszDepositCacheV13,
|
||||||
#[ssz(with = "four_byte_option_u64")]
|
#[ssz(with = "four_byte_option_u64")]
|
||||||
last_processed_block: Option<u64>,
|
pub last_processed_block: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SszEth1Cache {
|
impl SszEth1Cache {
|
||||||
@ -96,7 +122,9 @@ impl SszEth1Cache {
|
|||||||
cache: self.deposit_cache.to_deposit_cache()?,
|
cache: self.deposit_cache.to_deposit_cache()?,
|
||||||
last_processed_block: self.last_processed_block,
|
last_processed_block: self.last_processed_block,
|
||||||
}),
|
}),
|
||||||
endpoints_cache: RwLock::new(None),
|
endpoint: endpoint_from_config(&config)
|
||||||
|
.map_err(|e| format!("Failed to create endpoint: {:?}", e))?,
|
||||||
|
to_finalize: RwLock::new(None),
|
||||||
// Set the remote head_block zero when creating a new instance. We only care about
|
// Set the remote head_block zero when creating a new instance. We only care about
|
||||||
// present and future eth1 nodes.
|
// present and future eth1 nodes.
|
||||||
remote_head_block: RwLock::new(None),
|
remote_head_block: RwLock::new(None),
|
||||||
|
@ -8,9 +8,9 @@ mod metrics;
|
|||||||
mod service;
|
mod service;
|
||||||
|
|
||||||
pub use block_cache::{BlockCache, Eth1Block};
|
pub use block_cache::{BlockCache, Eth1Block};
|
||||||
pub use deposit_cache::DepositCache;
|
pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV1, SszDepositCacheV13};
|
||||||
pub use execution_layer::http::deposit_log::DepositLog;
|
pub use execution_layer::http::deposit_log::DepositLog;
|
||||||
pub use inner::SszEth1Cache;
|
pub use inner::{SszEth1Cache, SszEth1CacheV1, SszEth1CacheV13};
|
||||||
pub use service::{
|
pub use service::{
|
||||||
BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service,
|
BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service,
|
||||||
DEFAULT_CHAIN_ID,
|
DEFAULT_CHAIN_ID,
|
||||||
|
@ -17,16 +17,6 @@ lazy_static! {
|
|||||||
pub static ref HIGHEST_PROCESSED_DEPOSIT_BLOCK: Result<IntGauge> =
|
pub static ref HIGHEST_PROCESSED_DEPOSIT_BLOCK: Result<IntGauge> =
|
||||||
try_create_int_gauge("eth1_highest_processed_deposit_block", "Number of the last block checked for deposits");
|
try_create_int_gauge("eth1_highest_processed_deposit_block", "Number of the last block checked for deposits");
|
||||||
|
|
||||||
/*
|
|
||||||
* Eth1 endpoint errors
|
|
||||||
*/
|
|
||||||
pub static ref ENDPOINT_ERRORS: Result<IntCounterVec> = try_create_int_counter_vec(
|
|
||||||
"eth1_endpoint_errors", "The number of eth1 request errors for each endpoint", &["endpoint"]
|
|
||||||
);
|
|
||||||
pub static ref ENDPOINT_REQUESTS: Result<IntCounterVec> = try_create_int_counter_vec(
|
|
||||||
"eth1_endpoint_requests", "The number of eth1 requests for each endpoint", &["endpoint"]
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Eth1 rpc connection
|
* Eth1 rpc connection
|
||||||
*/
|
*/
|
||||||
@ -35,14 +25,4 @@ lazy_static! {
|
|||||||
"sync_eth1_connected", "Set to 1 if connected to an eth1 node, otherwise set to 0"
|
"sync_eth1_connected", "Set to 1 if connected to an eth1 node, otherwise set to 0"
|
||||||
);
|
);
|
||||||
|
|
||||||
pub static ref ETH1_FALLBACK_CONFIGURED: Result<IntGauge> = try_create_int_gauge(
|
|
||||||
"sync_eth1_fallback_configured", "Number of configured eth1 fallbacks"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Note: This metric only checks if an eth1 fallback is configured, not if it is connected and synced.
|
|
||||||
// Checking for liveness of the fallback would require moving away from lazy checking of fallbacks.
|
|
||||||
pub static ref ETH1_FALLBACK_CONNECTED: Result<IntGauge> = try_create_int_gauge(
|
|
||||||
"eth1_sync_fallback_connected", "Set to 1 if an eth1 fallback is connected, otherwise set to 0"
|
|
||||||
);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -9,21 +9,18 @@ use execution_layer::http::{
|
|||||||
deposit_methods::{BlockQuery, Eth1Id},
|
deposit_methods::{BlockQuery, Eth1Id},
|
||||||
HttpJsonRpc,
|
HttpJsonRpc,
|
||||||
};
|
};
|
||||||
use fallback::{Fallback, FallbackError};
|
|
||||||
use futures::future::TryFutureExt;
|
use futures::future::TryFutureExt;
|
||||||
use parking_lot::{RwLock, RwLockReadGuard};
|
use parking_lot::{RwLock, RwLockReadGuard};
|
||||||
use sensitive_url::SensitiveUrl;
|
use sensitive_url::SensitiveUrl;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use slog::{debug, error, info, trace, warn, Logger};
|
use slog::{debug, error, info, trace, warn, Logger};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::future::Future;
|
|
||||||
use std::ops::{Range, RangeInclusive};
|
use std::ops::{Range, RangeInclusive};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use tokio::sync::RwLock as TRwLock;
|
|
||||||
use tokio::time::{interval_at, Duration, Instant};
|
use tokio::time::{interval_at, Duration, Instant};
|
||||||
use types::{ChainSpec, EthSpec, Unsigned};
|
use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned};
|
||||||
|
|
||||||
/// Indicates the default eth1 chain id we use for the deposit contract.
|
/// Indicates the default eth1 chain id we use for the deposit contract.
|
||||||
pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli;
|
pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Goerli;
|
||||||
@ -53,127 +50,12 @@ const CACHE_FACTOR: u64 = 2;
|
|||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum EndpointError {
|
pub enum EndpointError {
|
||||||
RequestFailed(String),
|
RequestFailed(String),
|
||||||
WrongNetworkId,
|
|
||||||
WrongChainId,
|
WrongChainId,
|
||||||
FarBehind,
|
FarBehind,
|
||||||
}
|
}
|
||||||
|
|
||||||
type EndpointState = Result<(), EndpointError>;
|
type EndpointState = Result<(), EndpointError>;
|
||||||
|
|
||||||
pub struct EndpointWithState {
|
|
||||||
client: HttpJsonRpc,
|
|
||||||
state: TRwLock<Option<EndpointState>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EndpointWithState {
|
|
||||||
pub fn new(client: HttpJsonRpc) -> Self {
|
|
||||||
Self {
|
|
||||||
client,
|
|
||||||
state: TRwLock::new(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn reset_endpoint_state(endpoint: &EndpointWithState) {
|
|
||||||
*endpoint.state.write().await = None;
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_state(endpoint: &EndpointWithState) -> Option<EndpointState> {
|
|
||||||
endpoint.state.read().await.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A cache structure to lazily check usability of endpoints. An endpoint is usable if it is
|
|
||||||
/// reachable and has the correct network id and chain id. Emits a `WARN` log if a checked endpoint
|
|
||||||
/// is not usable.
|
|
||||||
pub struct EndpointsCache {
|
|
||||||
pub fallback: Fallback<EndpointWithState>,
|
|
||||||
pub config_chain_id: Eth1Id,
|
|
||||||
pub log: Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EndpointsCache {
|
|
||||||
/// Checks the usability of an endpoint. Results get cached and therefore only the first call
|
|
||||||
/// for each endpoint does the real check.
|
|
||||||
async fn state(&self, endpoint: &EndpointWithState) -> EndpointState {
|
|
||||||
if let Some(result) = endpoint.state.read().await.clone() {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
let mut value = endpoint.state.write().await;
|
|
||||||
if let Some(result) = value.clone() {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
crate::metrics::inc_counter_vec(
|
|
||||||
&crate::metrics::ENDPOINT_REQUESTS,
|
|
||||||
&[&endpoint.client.to_string()],
|
|
||||||
);
|
|
||||||
let state = endpoint_state(&endpoint.client, &self.config_chain_id, &self.log).await;
|
|
||||||
*value = Some(state.clone());
|
|
||||||
if state.is_err() {
|
|
||||||
crate::metrics::inc_counter_vec(
|
|
||||||
&crate::metrics::ENDPOINT_ERRORS,
|
|
||||||
&[&endpoint.client.to_string()],
|
|
||||||
);
|
|
||||||
crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0);
|
|
||||||
} else {
|
|
||||||
crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1);
|
|
||||||
}
|
|
||||||
state
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the first successful result along with number of previous errors encountered
|
|
||||||
/// or all the errors encountered if every none of the fallback endpoints return required output.
|
|
||||||
pub async fn first_success<'a, F, O, R>(
|
|
||||||
&'a self,
|
|
||||||
func: F,
|
|
||||||
) -> Result<(O, usize), FallbackError<SingleEndpointError>>
|
|
||||||
where
|
|
||||||
F: Fn(&'a HttpJsonRpc) -> R,
|
|
||||||
R: Future<Output = Result<O, SingleEndpointError>>,
|
|
||||||
{
|
|
||||||
let func = &func;
|
|
||||||
self.fallback
|
|
||||||
.first_success(|endpoint| async move {
|
|
||||||
match self.state(endpoint).await {
|
|
||||||
Ok(()) => {
|
|
||||||
let endpoint_str = &endpoint.client.to_string();
|
|
||||||
crate::metrics::inc_counter_vec(
|
|
||||||
&crate::metrics::ENDPOINT_REQUESTS,
|
|
||||||
&[endpoint_str],
|
|
||||||
);
|
|
||||||
match func(&endpoint.client).await {
|
|
||||||
Ok(t) => Ok(t),
|
|
||||||
Err(t) => {
|
|
||||||
crate::metrics::inc_counter_vec(
|
|
||||||
&crate::metrics::ENDPOINT_ERRORS,
|
|
||||||
&[endpoint_str],
|
|
||||||
);
|
|
||||||
if let SingleEndpointError::EndpointError(e) = &t {
|
|
||||||
*endpoint.state.write().await = Some(Err(e.clone()));
|
|
||||||
} else {
|
|
||||||
// A non-`EndpointError` error occurred, so reset the state.
|
|
||||||
reset_endpoint_state(endpoint).await;
|
|
||||||
}
|
|
||||||
Err(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => Err(SingleEndpointError::EndpointError(e)),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn reset_errorred_endpoints(&self) {
|
|
||||||
for endpoint in &self.fallback.servers {
|
|
||||||
if let Some(state) = get_state(endpoint).await {
|
|
||||||
if state.is_err() {
|
|
||||||
reset_endpoint_state(endpoint).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and
|
/// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and
|
||||||
/// chain id. Otherwise it returns `Err`.
|
/// chain id. Otherwise it returns `Err`.
|
||||||
async fn endpoint_state(
|
async fn endpoint_state(
|
||||||
@ -181,12 +63,17 @@ async fn endpoint_state(
|
|||||||
config_chain_id: &Eth1Id,
|
config_chain_id: &Eth1Id,
|
||||||
log: &Logger,
|
log: &Logger,
|
||||||
) -> EndpointState {
|
) -> EndpointState {
|
||||||
let error_connecting = |e| {
|
let error_connecting = |e: String| {
|
||||||
|
debug!(
|
||||||
|
log,
|
||||||
|
"eth1 endpoint error";
|
||||||
|
"endpoint" => %endpoint,
|
||||||
|
"error" => &e,
|
||||||
|
);
|
||||||
warn!(
|
warn!(
|
||||||
log,
|
log,
|
||||||
"Error connecting to eth1 node endpoint";
|
"Error connecting to eth1 node endpoint";
|
||||||
"endpoint" => %endpoint,
|
"endpoint" => %endpoint,
|
||||||
"action" => "trying fallbacks"
|
|
||||||
);
|
);
|
||||||
EndpointError::RequestFailed(e)
|
EndpointError::RequestFailed(e)
|
||||||
};
|
};
|
||||||
@ -202,7 +89,6 @@ async fn endpoint_state(
|
|||||||
log,
|
log,
|
||||||
"Remote execution node is not synced";
|
"Remote execution node is not synced";
|
||||||
"endpoint" => %endpoint,
|
"endpoint" => %endpoint,
|
||||||
"action" => "trying fallbacks"
|
|
||||||
);
|
);
|
||||||
return Err(EndpointError::FarBehind);
|
return Err(EndpointError::FarBehind);
|
||||||
}
|
}
|
||||||
@ -211,7 +97,6 @@ async fn endpoint_state(
|
|||||||
log,
|
log,
|
||||||
"Invalid execution chain ID. Please switch to correct chain ID on endpoint";
|
"Invalid execution chain ID. Please switch to correct chain ID on endpoint";
|
||||||
"endpoint" => %endpoint,
|
"endpoint" => %endpoint,
|
||||||
"action" => "trying fallbacks",
|
|
||||||
"expected" => ?config_chain_id,
|
"expected" => ?config_chain_id,
|
||||||
"received" => ?chain_id,
|
"received" => ?chain_id,
|
||||||
);
|
);
|
||||||
@ -240,7 +125,7 @@ async fn get_remote_head_and_new_block_ranges(
|
|||||||
Option<RangeInclusive<u64>>,
|
Option<RangeInclusive<u64>>,
|
||||||
Option<RangeInclusive<u64>>,
|
Option<RangeInclusive<u64>>,
|
||||||
),
|
),
|
||||||
SingleEndpointError,
|
Error,
|
||||||
> {
|
> {
|
||||||
let remote_head_block = download_eth1_block(endpoint, service.inner.clone(), None).await?;
|
let remote_head_block = download_eth1_block(endpoint, service.inner.clone(), None).await?;
|
||||||
let now = SystemTime::now()
|
let now = SystemTime::now()
|
||||||
@ -253,18 +138,16 @@ async fn get_remote_head_and_new_block_ranges(
|
|||||||
"Execution endpoint is not synced";
|
"Execution endpoint is not synced";
|
||||||
"endpoint" => %endpoint,
|
"endpoint" => %endpoint,
|
||||||
"last_seen_block_unix_timestamp" => remote_head_block.timestamp,
|
"last_seen_block_unix_timestamp" => remote_head_block.timestamp,
|
||||||
"action" => "trying fallback"
|
|
||||||
);
|
);
|
||||||
return Err(SingleEndpointError::EndpointError(EndpointError::FarBehind));
|
return Err(Error::EndpointError(EndpointError::FarBehind));
|
||||||
}
|
}
|
||||||
|
|
||||||
let handle_remote_not_synced = |e| {
|
let handle_remote_not_synced = |e| {
|
||||||
if let SingleEndpointError::RemoteNotSynced { .. } = e {
|
if let Error::RemoteNotSynced { .. } = e {
|
||||||
warn!(
|
warn!(
|
||||||
service.log,
|
service.log,
|
||||||
"Execution endpoint is not synced";
|
"Execution endpoint is not synced";
|
||||||
"endpoint" => %endpoint,
|
"endpoint" => %endpoint,
|
||||||
"action" => "trying fallbacks"
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
e
|
e
|
||||||
@ -296,16 +179,25 @@ async fn relevant_new_block_numbers_from_endpoint(
|
|||||||
endpoint: &HttpJsonRpc,
|
endpoint: &HttpJsonRpc,
|
||||||
service: &Service,
|
service: &Service,
|
||||||
head_type: HeadType,
|
head_type: HeadType,
|
||||||
) -> Result<Option<RangeInclusive<u64>>, SingleEndpointError> {
|
) -> Result<Option<RangeInclusive<u64>>, Error> {
|
||||||
let remote_highest_block = endpoint
|
let remote_highest_block = endpoint
|
||||||
.get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS))
|
.get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS))
|
||||||
.map_err(SingleEndpointError::GetBlockNumberFailed)
|
.map_err(Error::GetBlockNumberFailed)
|
||||||
.await?;
|
.await?;
|
||||||
service.relevant_new_block_numbers(remote_highest_block, None, head_type)
|
service.relevant_new_block_numbers(remote_highest_block, None, head_type)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum SingleEndpointError {
|
pub enum Error {
|
||||||
|
/// There was an inconsistency when adding a block to the cache.
|
||||||
|
FailedToInsertEth1Block(BlockCacheError),
|
||||||
|
/// There was an inconsistency when adding a deposit to the cache.
|
||||||
|
FailedToInsertDeposit(DepositCacheError),
|
||||||
|
/// A log downloaded from the eth1 contract was not well formed.
|
||||||
|
FailedToParseDepositLog {
|
||||||
|
block_range: Range<u64>,
|
||||||
|
error: String,
|
||||||
|
},
|
||||||
/// Endpoint is currently not functional.
|
/// Endpoint is currently not functional.
|
||||||
EndpointError(EndpointError),
|
EndpointError(EndpointError),
|
||||||
/// The remote node is less synced that we expect, it is not useful until has done more
|
/// The remote node is less synced that we expect, it is not useful until has done more
|
||||||
@ -325,23 +217,12 @@ pub enum SingleEndpointError {
|
|||||||
GetDepositCountFailed(String),
|
GetDepositCountFailed(String),
|
||||||
/// Failed to read the deposit contract root from the eth1 node.
|
/// Failed to read the deposit contract root from the eth1 node.
|
||||||
GetDepositLogsFailed(String),
|
GetDepositLogsFailed(String),
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum Error {
|
|
||||||
/// There was an inconsistency when adding a block to the cache.
|
|
||||||
FailedToInsertEth1Block(BlockCacheError),
|
|
||||||
/// There was an inconsistency when adding a deposit to the cache.
|
|
||||||
FailedToInsertDeposit(DepositCacheError),
|
|
||||||
/// A log downloaded from the eth1 contract was not well formed.
|
|
||||||
FailedToParseDepositLog {
|
|
||||||
block_range: Range<u64>,
|
|
||||||
error: String,
|
|
||||||
},
|
|
||||||
/// All possible endpoints returned a `SingleEndpointError`.
|
|
||||||
FallbackError(FallbackError<SingleEndpointError>),
|
|
||||||
/// There was an unexpected internal error.
|
/// There was an unexpected internal error.
|
||||||
Internal(String),
|
Internal(String),
|
||||||
|
/// Error finalizing deposit
|
||||||
|
FailedToFinalizeDeposit(String),
|
||||||
|
/// There was a problem Initializing from deposit snapshot
|
||||||
|
FailedToInitializeFromSnapshot(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The success message for an Eth1Data cache update.
|
/// The success message for an Eth1Data cache update.
|
||||||
@ -367,21 +248,14 @@ pub enum Eth1Endpoint {
|
|||||||
jwt_id: Option<String>,
|
jwt_id: Option<String>,
|
||||||
jwt_version: Option<String>,
|
jwt_version: Option<String>,
|
||||||
},
|
},
|
||||||
NoAuth(Vec<SensitiveUrl>),
|
NoAuth(SensitiveUrl),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eth1Endpoint {
|
impl Eth1Endpoint {
|
||||||
fn len(&self) -> usize {
|
pub fn get_endpoint(&self) -> SensitiveUrl {
|
||||||
match &self {
|
match &self {
|
||||||
Self::Auth { .. } => 1,
|
Self::Auth { endpoint, .. } => endpoint.clone(),
|
||||||
Self::NoAuth(urls) => urls.len(),
|
Self::NoAuth(endpoint) => endpoint.clone(),
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_endpoints(&self) -> Vec<SensitiveUrl> {
|
|
||||||
match &self {
|
|
||||||
Self::Auth { endpoint, .. } => vec![endpoint.clone()],
|
|
||||||
Self::NoAuth(endpoints) => endpoints.clone(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -389,7 +263,7 @@ impl Eth1Endpoint {
|
|||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint.
|
/// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint.
|
||||||
pub endpoints: Eth1Endpoint,
|
pub endpoint: Eth1Endpoint,
|
||||||
/// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract.
|
/// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract.
|
||||||
pub deposit_contract_address: String,
|
pub deposit_contract_address: String,
|
||||||
/// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet).
|
/// The eth1 chain id where the deposit contract is deployed (Goerli/Mainnet).
|
||||||
@ -426,6 +300,7 @@ pub struct Config {
|
|||||||
pub max_blocks_per_update: Option<usize>,
|
pub max_blocks_per_update: Option<usize>,
|
||||||
/// If set to true, the eth1 caches are wiped clean when the eth1 service starts.
|
/// If set to true, the eth1 caches are wiped clean when the eth1 service starts.
|
||||||
pub purge_cache: bool,
|
pub purge_cache: bool,
|
||||||
|
pub execution_timeout_multiplier: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
@ -466,8 +341,10 @@ impl Config {
|
|||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT)
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
.expect("The default Eth1 endpoint must always be a valid URL.")]),
|
SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT)
|
||||||
|
.expect("The default Eth1 endpoint must always be a valid URL."),
|
||||||
|
),
|
||||||
deposit_contract_address: "0x0000000000000000000000000000000000000000".into(),
|
deposit_contract_address: "0x0000000000000000000000000000000000000000".into(),
|
||||||
chain_id: DEFAULT_CHAIN_ID,
|
chain_id: DEFAULT_CHAIN_ID,
|
||||||
deposit_contract_deploy_block: 1,
|
deposit_contract_deploy_block: 1,
|
||||||
@ -481,6 +358,27 @@ impl Default for Config {
|
|||||||
max_log_requests_per_update: Some(5_000),
|
max_log_requests_per_update: Some(5_000),
|
||||||
max_blocks_per_update: Some(8_192),
|
max_blocks_per_update: Some(8_192),
|
||||||
purge_cache: false,
|
purge_cache: false,
|
||||||
|
execution_timeout_multiplier: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn endpoint_from_config(config: &Config) -> Result<HttpJsonRpc, String> {
|
||||||
|
match config.endpoint.clone() {
|
||||||
|
Eth1Endpoint::Auth {
|
||||||
|
endpoint,
|
||||||
|
jwt_path,
|
||||||
|
jwt_id,
|
||||||
|
jwt_version,
|
||||||
|
} => {
|
||||||
|
let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version)
|
||||||
|
.map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?;
|
||||||
|
HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier))
|
||||||
|
.map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e))
|
||||||
|
}
|
||||||
|
Eth1Endpoint::NoAuth(endpoint) => {
|
||||||
|
HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier))
|
||||||
|
.map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -499,20 +397,55 @@ pub struct Service {
|
|||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
/// Creates a new service. Does not attempt to connect to the eth1 node.
|
/// Creates a new service. Does not attempt to connect to the eth1 node.
|
||||||
pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Self {
|
pub fn new(config: Config, log: Logger, spec: ChainSpec) -> Result<Self, String> {
|
||||||
Self {
|
Ok(Self {
|
||||||
inner: Arc::new(Inner {
|
inner: Arc::new(Inner {
|
||||||
block_cache: <_>::default(),
|
block_cache: <_>::default(),
|
||||||
deposit_cache: RwLock::new(DepositUpdater::new(
|
deposit_cache: RwLock::new(DepositUpdater::new(
|
||||||
config.deposit_contract_deploy_block,
|
config.deposit_contract_deploy_block,
|
||||||
)),
|
)),
|
||||||
endpoints_cache: RwLock::new(None),
|
endpoint: endpoint_from_config(&config)?,
|
||||||
|
to_finalize: RwLock::new(None),
|
||||||
remote_head_block: RwLock::new(None),
|
remote_head_block: RwLock::new(None),
|
||||||
config: RwLock::new(config),
|
config: RwLock::new(config),
|
||||||
spec,
|
spec,
|
||||||
}),
|
}),
|
||||||
log,
|
log,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn client(&self) -> &HttpJsonRpc {
|
||||||
|
&self.inner.endpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new service, initializing the deposit tree from a snapshot.
|
||||||
|
pub fn from_deposit_snapshot(
|
||||||
|
config: Config,
|
||||||
|
log: Logger,
|
||||||
|
spec: ChainSpec,
|
||||||
|
deposit_snapshot: &DepositTreeSnapshot,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let deposit_cache =
|
||||||
|
DepositUpdater::from_snapshot(config.deposit_contract_deploy_block, deposit_snapshot)
|
||||||
|
.map_err(Error::FailedToInitializeFromSnapshot)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
inner: Arc::new(Inner {
|
||||||
|
block_cache: <_>::default(),
|
||||||
|
deposit_cache: RwLock::new(deposit_cache),
|
||||||
|
endpoint: endpoint_from_config(&config)
|
||||||
|
.map_err(Error::FailedToInitializeFromSnapshot)?,
|
||||||
|
to_finalize: RwLock::new(None),
|
||||||
|
remote_head_block: RwLock::new(None),
|
||||||
|
config: RwLock::new(config),
|
||||||
|
spec,
|
||||||
|
}),
|
||||||
|
log,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_to_finalize(&self, eth1_data: Option<Eth1Data>) {
|
||||||
|
*(self.inner.to_finalize.write()) = eth1_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the follow distance that has been shortened to accommodate for differences in the
|
/// Returns the follow distance that has been shortened to accommodate for differences in the
|
||||||
@ -629,7 +562,7 @@ impl Service {
|
|||||||
let deposits = self.deposits().read();
|
let deposits = self.deposits().read();
|
||||||
deposits
|
deposits
|
||||||
.cache
|
.cache
|
||||||
.get_valid_signature_count(deposits.cache.latest_block_number()?)
|
.get_valid_signature_count(deposits.cache.latest_block_number())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number of deposits with valid signatures that have been observed up to and
|
/// Returns the number of deposits with valid signatures that have been observed up to and
|
||||||
@ -676,52 +609,6 @@ impl Service {
|
|||||||
self.inner.config.write().lowest_cached_block_number = block_number;
|
self.inner.config.write().lowest_cached_block_number = block_number;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Builds a new `EndpointsCache` with empty states.
|
|
||||||
pub fn init_endpoints(&self) -> Result<Arc<EndpointsCache>, String> {
|
|
||||||
let endpoints = self.config().endpoints.clone();
|
|
||||||
let config_chain_id = self.config().chain_id.clone();
|
|
||||||
|
|
||||||
let servers = match endpoints {
|
|
||||||
Eth1Endpoint::Auth {
|
|
||||||
jwt_path,
|
|
||||||
endpoint,
|
|
||||||
jwt_id,
|
|
||||||
jwt_version,
|
|
||||||
} => {
|
|
||||||
let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version)
|
|
||||||
.map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?;
|
|
||||||
vec![HttpJsonRpc::new_with_auth(endpoint, auth)
|
|
||||||
.map_err(|e| format!("Failed to build auth enabled json rpc {:?}", e))?]
|
|
||||||
}
|
|
||||||
Eth1Endpoint::NoAuth(urls) => urls
|
|
||||||
.into_iter()
|
|
||||||
.map(|url| {
|
|
||||||
HttpJsonRpc::new(url).map_err(|e| format!("Failed to build json rpc {:?}", e))
|
|
||||||
})
|
|
||||||
.collect::<Result<_, _>>()?,
|
|
||||||
};
|
|
||||||
let new_cache = Arc::new(EndpointsCache {
|
|
||||||
fallback: Fallback::new(servers.into_iter().map(EndpointWithState::new).collect()),
|
|
||||||
config_chain_id,
|
|
||||||
log: self.log.clone(),
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut endpoints_cache = self.inner.endpoints_cache.write();
|
|
||||||
*endpoints_cache = Some(new_cache.clone());
|
|
||||||
Ok(new_cache)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the cached `EndpointsCache` if it exists or builds a new one.
|
|
||||||
pub fn get_endpoints(&self) -> Result<Arc<EndpointsCache>, String> {
|
|
||||||
let endpoints_cache = self.inner.endpoints_cache.read();
|
|
||||||
if let Some(cache) = endpoints_cache.clone() {
|
|
||||||
Ok(cache)
|
|
||||||
} else {
|
|
||||||
drop(endpoints_cache);
|
|
||||||
self.init_endpoints()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update the deposit and block cache, returning an error if either fail.
|
/// Update the deposit and block cache, returning an error if either fail.
|
||||||
///
|
///
|
||||||
/// ## Returns
|
/// ## Returns
|
||||||
@ -733,56 +620,28 @@ impl Service {
|
|||||||
pub async fn update(
|
pub async fn update(
|
||||||
&self,
|
&self,
|
||||||
) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> {
|
) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> {
|
||||||
let endpoints = self.get_endpoints()?;
|
let client = self.client();
|
||||||
|
let log = self.log.clone();
|
||||||
// Reset the state of any endpoints which have errored so their state can be redetermined.
|
let chain_id = self.config().chain_id.clone();
|
||||||
endpoints.reset_errorred_endpoints().await;
|
|
||||||
|
|
||||||
let node_far_behind_seconds = self.inner.config.read().node_far_behind_seconds;
|
let node_far_behind_seconds = self.inner.config.read().node_far_behind_seconds;
|
||||||
|
|
||||||
let process_single_err = |e: &FallbackError<SingleEndpointError>| {
|
match endpoint_state(client, &chain_id, &log).await {
|
||||||
match e {
|
Ok(()) => crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1),
|
||||||
FallbackError::AllErrored(errors) => {
|
Err(e) => {
|
||||||
if errors
|
crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0);
|
||||||
.iter()
|
return Err(format!("Invalid endpoint state: {:?}", e));
|
||||||
.all(|error| matches!(error, SingleEndpointError::EndpointError(_)))
|
|
||||||
{
|
|
||||||
error!(
|
|
||||||
self.log,
|
|
||||||
"No synced execution endpoint";
|
|
||||||
"advice" => "ensure you have an execution node configured via \
|
|
||||||
--execution-endpoint or if pre-merge, --eth1-endpoints"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
let (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache) =
|
||||||
endpoints.fallback.map_format_error(|s| &s.client, e)
|
get_remote_head_and_new_block_ranges(client, self, node_far_behind_seconds)
|
||||||
};
|
|
||||||
|
|
||||||
let process_err = |e: Error| match &e {
|
|
||||||
Error::FallbackError(f) => process_single_err(f),
|
|
||||||
e => format!("{:?}", e),
|
|
||||||
};
|
|
||||||
|
|
||||||
let (
|
|
||||||
(remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache),
|
|
||||||
num_errors,
|
|
||||||
) = endpoints
|
|
||||||
.first_success(|e| async move {
|
|
||||||
get_remote_head_and_new_block_ranges(e, self, node_far_behind_seconds).await
|
|
||||||
})
|
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{:?}", process_single_err(&e)))?;
|
.map_err(|e| format!("Failed to get remote head and new block ranges: {:?}", e))?;
|
||||||
|
|
||||||
if num_errors > 0 {
|
|
||||||
info!(self.log, "Fetched data from fallback"; "fallback_number" => num_errors);
|
|
||||||
}
|
|
||||||
|
|
||||||
*self.inner.remote_head_block.write() = Some(remote_head_block);
|
*self.inner.remote_head_block.write() = Some(remote_head_block);
|
||||||
|
|
||||||
let update_deposit_cache = async {
|
let update_deposit_cache = async {
|
||||||
let outcome_result = self
|
let outcome_result = self
|
||||||
.update_deposit_cache(Some(new_block_numbers_deposit), &endpoints)
|
.update_deposit_cache(Some(new_block_numbers_deposit))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Reset the `last_procesed block` to the last valid deposit's block number.
|
// Reset the `last_procesed block` to the last valid deposit's block number.
|
||||||
@ -801,11 +660,12 @@ impl Service {
|
|||||||
"old_block_number" => deposit_cache.last_processed_block,
|
"old_block_number" => deposit_cache.last_processed_block,
|
||||||
"new_block_number" => deposit_cache.cache.latest_block_number(),
|
"new_block_number" => deposit_cache.cache.latest_block_number(),
|
||||||
);
|
);
|
||||||
deposit_cache.last_processed_block = deposit_cache.cache.latest_block_number();
|
deposit_cache.last_processed_block =
|
||||||
|
Some(deposit_cache.cache.latest_block_number());
|
||||||
}
|
}
|
||||||
|
|
||||||
let outcome = outcome_result
|
let outcome =
|
||||||
.map_err(|e| format!("Failed to update deposit cache: {:?}", process_err(e)))?;
|
outcome_result.map_err(|e| format!("Failed to update deposit cache: {:?}", e))?;
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -819,14 +679,9 @@ impl Service {
|
|||||||
|
|
||||||
let update_block_cache = async {
|
let update_block_cache = async {
|
||||||
let outcome = self
|
let outcome = self
|
||||||
.update_block_cache(Some(new_block_numbers_block_cache), &endpoints)
|
.update_block_cache(Some(new_block_numbers_block_cache))
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|e| format!("Failed to update deposit contract block cache: {:?}", e))?;
|
||||||
format!(
|
|
||||||
"Failed to update deposit contract block cache: {:?}",
|
|
||||||
process_err(e)
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -858,7 +713,6 @@ impl Service {
|
|||||||
|
|
||||||
let mut interval = interval_at(Instant::now(), update_interval);
|
let mut interval = interval_at(Instant::now(), update_interval);
|
||||||
|
|
||||||
let num_fallbacks = self.config().endpoints.len() - 1;
|
|
||||||
let update_future = async move {
|
let update_future = async move {
|
||||||
loop {
|
loop {
|
||||||
interval.tick().await;
|
interval.tick().await;
|
||||||
@ -866,15 +720,6 @@ impl Service {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set the number of configured eth1 servers
|
|
||||||
metrics::set_gauge(&metrics::ETH1_FALLBACK_CONFIGURED, num_fallbacks as i64);
|
|
||||||
// Since we lazily update eth1 fallbacks, it's not possible to know connection status of fallback.
|
|
||||||
// Hence, we set it to 1 if we have atleast one configured fallback.
|
|
||||||
if num_fallbacks > 0 {
|
|
||||||
metrics::set_gauge(&metrics::ETH1_FALLBACK_CONNECTED, 1);
|
|
||||||
} else {
|
|
||||||
metrics::set_gauge(&metrics::ETH1_FALLBACK_CONNECTED, 0);
|
|
||||||
}
|
|
||||||
handle.spawn(update_future, "eth1");
|
handle.spawn(update_future, "eth1");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -895,6 +740,37 @@ impl Service {
|
|||||||
"deposits" => format!("{:?}", deposit),
|
"deposits" => format!("{:?}", deposit),
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
let optional_eth1data = self.inner.to_finalize.write().take();
|
||||||
|
if let Some(eth1data_to_finalize) = optional_eth1data {
|
||||||
|
let already_finalized = self
|
||||||
|
.inner
|
||||||
|
.deposit_cache
|
||||||
|
.read()
|
||||||
|
.cache
|
||||||
|
.finalized_deposit_count();
|
||||||
|
let deposit_count_to_finalize = eth1data_to_finalize.deposit_count;
|
||||||
|
if deposit_count_to_finalize > already_finalized {
|
||||||
|
match self.finalize_deposits(eth1data_to_finalize) {
|
||||||
|
Err(e) => error!(
|
||||||
|
self.log,
|
||||||
|
"Failed to finalize deposit cache";
|
||||||
|
"error" => ?e,
|
||||||
|
),
|
||||||
|
Ok(()) => info!(
|
||||||
|
self.log,
|
||||||
|
"Successfully finalized deposit tree";
|
||||||
|
"finalized deposit count" => deposit_count_to_finalize,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Deposits tree already finalized";
|
||||||
|
"already_finalized" => already_finalized,
|
||||||
|
"deposit_count_to_finalize" => deposit_count_to_finalize,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -904,7 +780,7 @@ impl Service {
|
|||||||
remote_highest_block_number: u64,
|
remote_highest_block_number: u64,
|
||||||
remote_highest_block_timestamp: Option<u64>,
|
remote_highest_block_timestamp: Option<u64>,
|
||||||
head_type: HeadType,
|
head_type: HeadType,
|
||||||
) -> Result<Option<RangeInclusive<u64>>, SingleEndpointError> {
|
) -> Result<Option<RangeInclusive<u64>>, Error> {
|
||||||
let follow_distance = self.cache_follow_distance();
|
let follow_distance = self.cache_follow_distance();
|
||||||
let latest_cached_block = self.latest_cached_block();
|
let latest_cached_block = self.latest_cached_block();
|
||||||
let next_required_block = match head_type {
|
let next_required_block = match head_type {
|
||||||
@ -930,6 +806,30 @@ impl Service {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn finalize_deposits(&self, eth1_data: Eth1Data) -> Result<(), Error> {
|
||||||
|
let eth1_block = self
|
||||||
|
.inner
|
||||||
|
.block_cache
|
||||||
|
.read()
|
||||||
|
.block_by_hash(ð1_data.block_hash)
|
||||||
|
.cloned()
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Error::FailedToFinalizeDeposit(
|
||||||
|
"Finalized block not found in block cache".to_string(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
self.inner
|
||||||
|
.deposit_cache
|
||||||
|
.write()
|
||||||
|
.cache
|
||||||
|
.finalize(eth1_block)
|
||||||
|
.map_err(|e| Error::FailedToFinalizeDeposit(format!("{:?}", e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_deposit_snapshot(&self) -> Option<DepositTreeSnapshot> {
|
||||||
|
self.inner.deposit_cache.read().cache.get_deposit_snapshot()
|
||||||
|
}
|
||||||
|
|
||||||
/// Contacts the remote eth1 node and attempts to import deposit logs up to the configured
|
/// Contacts the remote eth1 node and attempts to import deposit logs up to the configured
|
||||||
/// follow-distance block.
|
/// follow-distance block.
|
||||||
///
|
///
|
||||||
@ -948,8 +848,8 @@ impl Service {
|
|||||||
pub async fn update_deposit_cache(
|
pub async fn update_deposit_cache(
|
||||||
&self,
|
&self,
|
||||||
new_block_numbers: Option<Option<RangeInclusive<u64>>>,
|
new_block_numbers: Option<Option<RangeInclusive<u64>>>,
|
||||||
endpoints: &EndpointsCache,
|
|
||||||
) -> Result<DepositCacheUpdateOutcome, Error> {
|
) -> Result<DepositCacheUpdateOutcome, Error> {
|
||||||
|
let client = self.client();
|
||||||
let deposit_contract_address = self.config().deposit_contract_address.clone();
|
let deposit_contract_address = self.config().deposit_contract_address.clone();
|
||||||
|
|
||||||
let blocks_per_log_query = self.config().blocks_per_log_query;
|
let blocks_per_log_query = self.config().blocks_per_log_query;
|
||||||
@ -961,13 +861,10 @@ impl Service {
|
|||||||
let range = {
|
let range = {
|
||||||
match new_block_numbers {
|
match new_block_numbers {
|
||||||
Some(range) => range,
|
Some(range) => range,
|
||||||
None => endpoints
|
None => {
|
||||||
.first_success(|e| async move {
|
relevant_new_block_numbers_from_endpoint(client, self, HeadType::Deposit)
|
||||||
relevant_new_block_numbers_from_endpoint(e, self, HeadType::Deposit).await
|
.await?
|
||||||
})
|
}
|
||||||
.await
|
|
||||||
.map(|(res, _)| res)
|
|
||||||
.map_err(Error::FallbackError)?,
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1001,20 +898,14 @@ impl Service {
|
|||||||
* Step 1. Download logs.
|
* Step 1. Download logs.
|
||||||
*/
|
*/
|
||||||
let block_range_ref = &block_range;
|
let block_range_ref = &block_range;
|
||||||
let logs = endpoints
|
let logs = client
|
||||||
.first_success(|endpoint| async move {
|
|
||||||
endpoint
|
|
||||||
.get_deposit_logs_in_range(
|
.get_deposit_logs_in_range(
|
||||||
deposit_contract_address_ref,
|
deposit_contract_address_ref,
|
||||||
block_range_ref.clone(),
|
block_range_ref.clone(),
|
||||||
Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS),
|
Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(SingleEndpointError::GetDepositLogsFailed)
|
.map_err(Error::GetDepositLogsFailed)?;
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map(|(res, _)| res)
|
|
||||||
.map_err(Error::FallbackError)?;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Step 2. Import logs to cache.
|
* Step 2. Import logs to cache.
|
||||||
@ -1050,7 +941,7 @@ impl Service {
|
|||||||
logs_imported += 1;
|
logs_imported += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok::<_, Error>(())
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
@ -1105,8 +996,8 @@ impl Service {
|
|||||||
pub async fn update_block_cache(
|
pub async fn update_block_cache(
|
||||||
&self,
|
&self,
|
||||||
new_block_numbers: Option<Option<RangeInclusive<u64>>>,
|
new_block_numbers: Option<Option<RangeInclusive<u64>>>,
|
||||||
endpoints: &EndpointsCache,
|
|
||||||
) -> Result<BlockCacheUpdateOutcome, Error> {
|
) -> Result<BlockCacheUpdateOutcome, Error> {
|
||||||
|
let client = self.client();
|
||||||
let block_cache_truncation = self.config().block_cache_truncation;
|
let block_cache_truncation = self.config().block_cache_truncation;
|
||||||
let max_blocks_per_update = self
|
let max_blocks_per_update = self
|
||||||
.config()
|
.config()
|
||||||
@ -1116,14 +1007,10 @@ impl Service {
|
|||||||
let range = {
|
let range = {
|
||||||
match new_block_numbers {
|
match new_block_numbers {
|
||||||
Some(range) => range,
|
Some(range) => range,
|
||||||
None => endpoints
|
None => {
|
||||||
.first_success(|e| async move {
|
relevant_new_block_numbers_from_endpoint(client, self, HeadType::BlockCache)
|
||||||
relevant_new_block_numbers_from_endpoint(e, self, HeadType::BlockCache)
|
.await?
|
||||||
.await
|
}
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map(|(res, _)| res)
|
|
||||||
.map_err(Error::FallbackError)?,
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1183,13 +1070,8 @@ impl Service {
|
|||||||
|
|
||||||
let mut blocks_imported = 0;
|
let mut blocks_imported = 0;
|
||||||
for block_number in required_block_numbers {
|
for block_number in required_block_numbers {
|
||||||
let eth1_block = endpoints
|
let eth1_block =
|
||||||
.first_success(|e| async move {
|
download_eth1_block(client, self.inner.clone(), Some(block_number)).await?;
|
||||||
download_eth1_block(e, self.inner.clone(), Some(block_number)).await
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.map(|(res, _)| res)
|
|
||||||
.map_err(Error::FallbackError)?;
|
|
||||||
|
|
||||||
self.inner
|
self.inner
|
||||||
.block_cache
|
.block_cache
|
||||||
@ -1269,7 +1151,7 @@ fn relevant_block_range(
|
|||||||
cache_follow_distance: u64,
|
cache_follow_distance: u64,
|
||||||
latest_cached_block: Option<&Eth1Block>,
|
latest_cached_block: Option<&Eth1Block>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<Option<RangeInclusive<u64>>, SingleEndpointError> {
|
) -> Result<Option<RangeInclusive<u64>>, Error> {
|
||||||
// If the latest cached block is lagging the head block by more than `cache_follow_distance`
|
// If the latest cached block is lagging the head block by more than `cache_follow_distance`
|
||||||
// times the expected block time then the eth1 block time is likely quite different from what we
|
// times the expected block time then the eth1 block time is likely quite different from what we
|
||||||
// assumed.
|
// assumed.
|
||||||
@ -1304,7 +1186,7 @@ fn relevant_block_range(
|
|||||||
//
|
//
|
||||||
// We assume that the `cache_follow_distance` should be sufficient to ensure this never
|
// We assume that the `cache_follow_distance` should be sufficient to ensure this never
|
||||||
// happens, otherwise it is an error.
|
// happens, otherwise it is an error.
|
||||||
Err(SingleEndpointError::RemoteNotSynced {
|
Err(Error::RemoteNotSynced {
|
||||||
next_required_block,
|
next_required_block,
|
||||||
remote_highest_block: remote_highest_block_number,
|
remote_highest_block: remote_highest_block_number,
|
||||||
cache_follow_distance,
|
cache_follow_distance,
|
||||||
@ -1325,7 +1207,7 @@ async fn download_eth1_block(
|
|||||||
endpoint: &HttpJsonRpc,
|
endpoint: &HttpJsonRpc,
|
||||||
cache: Arc<Inner>,
|
cache: Arc<Inner>,
|
||||||
block_number_opt: Option<u64>,
|
block_number_opt: Option<u64>,
|
||||||
) -> Result<Eth1Block, SingleEndpointError> {
|
) -> Result<Eth1Block, Error> {
|
||||||
let deposit_root = block_number_opt.and_then(|block_number| {
|
let deposit_root = block_number_opt.and_then(|block_number| {
|
||||||
cache
|
cache
|
||||||
.deposit_cache
|
.deposit_cache
|
||||||
@ -1350,7 +1232,7 @@ async fn download_eth1_block(
|
|||||||
.unwrap_or_else(|| BlockQuery::Latest),
|
.unwrap_or_else(|| BlockQuery::Latest),
|
||||||
Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS),
|
Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS),
|
||||||
)
|
)
|
||||||
.map_err(SingleEndpointError::BlockDownloadFailed)
|
.map_err(Error::BlockDownloadFailed)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(Eth1Block {
|
Ok(Eth1Block {
|
||||||
|
@ -117,10 +117,9 @@ mod eth1_cache {
|
|||||||
let initial_block_number = get_block_number(&web3).await;
|
let initial_block_number = get_block_number(&web3).await;
|
||||||
|
|
||||||
let config = Config {
|
let config = Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
lowest_cached_block_number: initial_block_number,
|
lowest_cached_block_number: initial_block_number,
|
||||||
follow_distance,
|
follow_distance,
|
||||||
@ -128,7 +127,8 @@ mod eth1_cache {
|
|||||||
};
|
};
|
||||||
let cache_follow_distance = config.cache_follow_distance();
|
let cache_follow_distance = config.cache_follow_distance();
|
||||||
|
|
||||||
let service = Service::new(config, log.clone(), MainnetEthSpec::default_spec());
|
let service =
|
||||||
|
Service::new(config, log.clone(), MainnetEthSpec::default_spec()).unwrap();
|
||||||
|
|
||||||
// Create some blocks and then consume them, performing the test `rounds` times.
|
// Create some blocks and then consume them, performing the test `rounds` times.
|
||||||
for round in 0..2 {
|
for round in 0..2 {
|
||||||
@ -149,19 +149,17 @@ mod eth1_cache {
|
|||||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
|
|
||||||
service
|
service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should update deposit cache");
|
.expect("should update deposit cache");
|
||||||
service
|
service
|
||||||
.update_block_cache(None, &endpoints)
|
.update_block_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should update block cache");
|
.expect("should update block cache");
|
||||||
|
|
||||||
service
|
service
|
||||||
.update_block_cache(None, &endpoints)
|
.update_block_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should update cache when nothing has changed");
|
.expect("should update cache when nothing has changed");
|
||||||
|
|
||||||
@ -201,10 +199,9 @@ mod eth1_cache {
|
|||||||
|
|
||||||
let service = Service::new(
|
let service = Service::new(
|
||||||
Config {
|
Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
lowest_cached_block_number: get_block_number(&web3).await,
|
lowest_cached_block_number: get_block_number(&web3).await,
|
||||||
follow_distance: 0,
|
follow_distance: 0,
|
||||||
@ -213,7 +210,8 @@ mod eth1_cache {
|
|||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
MainnetEthSpec::default_spec(),
|
MainnetEthSpec::default_spec(),
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let blocks = cache_len * 2;
|
let blocks = cache_len * 2;
|
||||||
|
|
||||||
@ -221,14 +219,12 @@ mod eth1_cache {
|
|||||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
|
|
||||||
service
|
service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should update deposit cache");
|
.expect("should update deposit cache");
|
||||||
service
|
service
|
||||||
.update_block_cache(None, &endpoints)
|
.update_block_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should update block cache");
|
.expect("should update block cache");
|
||||||
|
|
||||||
@ -258,10 +254,9 @@ mod eth1_cache {
|
|||||||
|
|
||||||
let service = Service::new(
|
let service = Service::new(
|
||||||
Config {
|
Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
lowest_cached_block_number: get_block_number(&web3).await,
|
lowest_cached_block_number: get_block_number(&web3).await,
|
||||||
follow_distance: 0,
|
follow_distance: 0,
|
||||||
@ -270,19 +265,19 @@ mod eth1_cache {
|
|||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
MainnetEthSpec::default_spec(),
|
MainnetEthSpec::default_spec(),
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
for _ in 0..4u8 {
|
for _ in 0..4u8 {
|
||||||
for _ in 0..cache_len / 2 {
|
for _ in 0..cache_len / 2 {
|
||||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||||
}
|
}
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
service
|
service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should update deposit cache");
|
.expect("should update deposit cache");
|
||||||
service
|
service
|
||||||
.update_block_cache(None, &endpoints)
|
.update_block_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should update block cache");
|
.expect("should update block cache");
|
||||||
}
|
}
|
||||||
@ -311,10 +306,9 @@ mod eth1_cache {
|
|||||||
|
|
||||||
let service = Service::new(
|
let service = Service::new(
|
||||||
Config {
|
Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
lowest_cached_block_number: get_block_number(&web3).await,
|
lowest_cached_block_number: get_block_number(&web3).await,
|
||||||
follow_distance: 0,
|
follow_distance: 0,
|
||||||
@ -322,21 +316,21 @@ mod eth1_cache {
|
|||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
MainnetEthSpec::default_spec(),
|
MainnetEthSpec::default_spec(),
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
eth1.ganache.evm_mine().await.expect("should mine block")
|
eth1.ganache.evm_mine().await.expect("should mine block")
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
futures::try_join!(
|
futures::try_join!(
|
||||||
service.update_deposit_cache(None, &endpoints),
|
service.update_deposit_cache(None),
|
||||||
service.update_deposit_cache(None, &endpoints)
|
service.update_deposit_cache(None)
|
||||||
)
|
)
|
||||||
.expect("should perform two simultaneous updates of deposit cache");
|
.expect("should perform two simultaneous updates of deposit cache");
|
||||||
futures::try_join!(
|
futures::try_join!(
|
||||||
service.update_block_cache(None, &endpoints),
|
service.update_block_cache(None),
|
||||||
service.update_block_cache(None, &endpoints)
|
service.update_block_cache(None)
|
||||||
)
|
)
|
||||||
.expect("should perform two simultaneous updates of block cache");
|
.expect("should perform two simultaneous updates of block cache");
|
||||||
|
|
||||||
@ -366,10 +360,9 @@ mod deposit_tree {
|
|||||||
|
|
||||||
let service = Service::new(
|
let service = Service::new(
|
||||||
Config {
|
Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
deposit_contract_deploy_block: start_block,
|
deposit_contract_deploy_block: start_block,
|
||||||
follow_distance: 0,
|
follow_distance: 0,
|
||||||
@ -377,7 +370,8 @@ mod deposit_tree {
|
|||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
MainnetEthSpec::default_spec(),
|
MainnetEthSpec::default_spec(),
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
for round in 0..3 {
|
for round in 0..3 {
|
||||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||||
@ -389,15 +383,13 @@ mod deposit_tree {
|
|||||||
.expect("should perform a deposit");
|
.expect("should perform a deposit");
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
|
|
||||||
service
|
service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should perform update");
|
.expect("should perform update");
|
||||||
|
|
||||||
service
|
service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should perform update when nothing has changed");
|
.expect("should perform update when nothing has changed");
|
||||||
|
|
||||||
@ -408,7 +400,7 @@ mod deposit_tree {
|
|||||||
.deposits()
|
.deposits()
|
||||||
.read()
|
.read()
|
||||||
.cache
|
.cache
|
||||||
.get_deposits(first, last, last, 32)
|
.get_deposits(first, last, last)
|
||||||
.unwrap_or_else(|_| panic!("should get deposits in round {}", round));
|
.unwrap_or_else(|_| panic!("should get deposits in round {}", round));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -449,10 +441,9 @@ mod deposit_tree {
|
|||||||
|
|
||||||
let service = Service::new(
|
let service = Service::new(
|
||||||
Config {
|
Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
deposit_contract_deploy_block: start_block,
|
deposit_contract_deploy_block: start_block,
|
||||||
lowest_cached_block_number: start_block,
|
lowest_cached_block_number: start_block,
|
||||||
@ -461,7 +452,8 @@ mod deposit_tree {
|
|||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
MainnetEthSpec::default_spec(),
|
MainnetEthSpec::default_spec(),
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||||
|
|
||||||
@ -472,10 +464,9 @@ mod deposit_tree {
|
|||||||
.expect("should perform a deposit");
|
.expect("should perform a deposit");
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
futures::try_join!(
|
futures::try_join!(
|
||||||
service.update_deposit_cache(None, &endpoints),
|
service.update_deposit_cache(None),
|
||||||
service.update_deposit_cache(None, &endpoints)
|
service.update_deposit_cache(None)
|
||||||
)
|
)
|
||||||
.expect("should perform two updates concurrently");
|
.expect("should perform two updates concurrently");
|
||||||
|
|
||||||
@ -502,7 +493,8 @@ mod deposit_tree {
|
|||||||
let mut deposit_roots = vec![];
|
let mut deposit_roots = vec![];
|
||||||
let mut deposit_counts = vec![];
|
let mut deposit_counts = vec![];
|
||||||
|
|
||||||
let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap();
|
let client =
|
||||||
|
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap();
|
||||||
|
|
||||||
// Perform deposits to the smart contract, recording it's state along the way.
|
// Perform deposits to the smart contract, recording it's state along the way.
|
||||||
for deposit in &deposits {
|
for deposit in &deposits {
|
||||||
@ -559,7 +551,7 @@ mod deposit_tree {
|
|||||||
|
|
||||||
// Ensure that the root from the deposit tree matches what the contract reported.
|
// Ensure that the root from the deposit tree matches what the contract reported.
|
||||||
let (root, deposits) = tree
|
let (root, deposits) = tree
|
||||||
.get_deposits(0, i as u64, deposit_counts[i], DEPOSIT_CONTRACT_TREE_DEPTH)
|
.get_deposits(0, i as u64, deposit_counts[i])
|
||||||
.expect("should get deposits");
|
.expect("should get deposits");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
root, deposit_roots[i],
|
root, deposit_roots[i],
|
||||||
@ -606,7 +598,8 @@ mod http {
|
|||||||
.expect("should start eth1 environment");
|
.expect("should start eth1 environment");
|
||||||
let deposit_contract = ð1.deposit_contract;
|
let deposit_contract = ð1.deposit_contract;
|
||||||
let web3 = eth1.web3();
|
let web3 = eth1.web3();
|
||||||
let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap();
|
let client =
|
||||||
|
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap();
|
||||||
|
|
||||||
let block_number = get_block_number(&web3).await;
|
let block_number = get_block_number(&web3).await;
|
||||||
let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await;
|
let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await;
|
||||||
@ -706,10 +699,9 @@ mod fast {
|
|||||||
let now = get_block_number(&web3).await;
|
let now = get_block_number(&web3).await;
|
||||||
let service = Service::new(
|
let service = Service::new(
|
||||||
Config {
|
Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
deposit_contract_deploy_block: now,
|
deposit_contract_deploy_block: now,
|
||||||
lowest_cached_block_number: now,
|
lowest_cached_block_number: now,
|
||||||
@ -719,8 +711,10 @@ mod fast {
|
|||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
MainnetEthSpec::default_spec(),
|
MainnetEthSpec::default_spec(),
|
||||||
);
|
)
|
||||||
let client = HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap()).unwrap();
|
.unwrap();
|
||||||
|
let client =
|
||||||
|
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap();
|
||||||
let n = 10;
|
let n = 10;
|
||||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||||
for deposit in &deposits {
|
for deposit in &deposits {
|
||||||
@ -732,9 +726,8 @@ mod fast {
|
|||||||
eth1.ganache.evm_mine().await.expect("should mine block");
|
eth1.ganache.evm_mine().await.expect("should mine block");
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
service
|
service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should perform update");
|
.expect("should perform update");
|
||||||
|
|
||||||
@ -787,10 +780,9 @@ mod persist {
|
|||||||
|
|
||||||
let now = get_block_number(&web3).await;
|
let now = get_block_number(&web3).await;
|
||||||
let config = Config {
|
let config = Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
deposit_contract_deploy_block: now,
|
deposit_contract_deploy_block: now,
|
||||||
lowest_cached_block_number: now,
|
lowest_cached_block_number: now,
|
||||||
@ -798,7 +790,8 @@ mod persist {
|
|||||||
block_cache_truncation: None,
|
block_cache_truncation: None,
|
||||||
..Config::default()
|
..Config::default()
|
||||||
};
|
};
|
||||||
let service = Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec());
|
let service =
|
||||||
|
Service::new(config.clone(), log.clone(), MainnetEthSpec::default_spec()).unwrap();
|
||||||
let n = 10;
|
let n = 10;
|
||||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||||
for deposit in &deposits {
|
for deposit in &deposits {
|
||||||
@ -808,9 +801,8 @@ mod persist {
|
|||||||
.expect("should perform a deposit");
|
.expect("should perform a deposit");
|
||||||
}
|
}
|
||||||
|
|
||||||
let endpoints = service.init_endpoints().unwrap();
|
|
||||||
service
|
service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should perform update");
|
.expect("should perform update");
|
||||||
|
|
||||||
@ -822,7 +814,7 @@ mod persist {
|
|||||||
let deposit_count = service.deposit_cache_len();
|
let deposit_count = service.deposit_cache_len();
|
||||||
|
|
||||||
service
|
service
|
||||||
.update_block_cache(None, &endpoints)
|
.update_block_cache(None)
|
||||||
.await
|
.await
|
||||||
.expect("should perform update");
|
.expect("should perform update");
|
||||||
|
|
||||||
@ -855,228 +847,3 @@ mod persist {
|
|||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tests for eth1 fallback
|
|
||||||
mod fallbacks {
|
|
||||||
use super::*;
|
|
||||||
use tokio::time::sleep;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_fallback_when_offline() {
|
|
||||||
async {
|
|
||||||
let log = null_logger();
|
|
||||||
let endpoint2 = new_ganache_instance()
|
|
||||||
.await
|
|
||||||
.expect("should start eth1 environment");
|
|
||||||
let deposit_contract = &endpoint2.deposit_contract;
|
|
||||||
|
|
||||||
let initial_block_number = get_block_number(&endpoint2.web3()).await;
|
|
||||||
|
|
||||||
// Create some blocks and then consume them, performing the test `rounds` times.
|
|
||||||
let new_blocks = 4;
|
|
||||||
|
|
||||||
for _ in 0..new_blocks {
|
|
||||||
endpoint2
|
|
||||||
.ganache
|
|
||||||
.evm_mine()
|
|
||||||
.await
|
|
||||||
.expect("should mine block");
|
|
||||||
}
|
|
||||||
|
|
||||||
let endpoint1 = endpoint2
|
|
||||||
.ganache
|
|
||||||
.fork()
|
|
||||||
.expect("should start eth1 environment");
|
|
||||||
|
|
||||||
//mine additional blocks on top of the original endpoint
|
|
||||||
for _ in 0..new_blocks {
|
|
||||||
endpoint2
|
|
||||||
.ganache
|
|
||||||
.evm_mine()
|
|
||||||
.await
|
|
||||||
.expect("should mine block");
|
|
||||||
}
|
|
||||||
|
|
||||||
let service = Service::new(
|
|
||||||
Config {
|
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![
|
|
||||||
SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(),
|
|
||||||
SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(),
|
|
||||||
]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
|
||||||
lowest_cached_block_number: initial_block_number,
|
|
||||||
follow_distance: 0,
|
|
||||||
..Config::default()
|
|
||||||
},
|
|
||||||
log.clone(),
|
|
||||||
MainnetEthSpec::default_spec(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let endpoint1_block_number = get_block_number(&endpoint1.web3).await;
|
|
||||||
//the first call will only query endpoint1
|
|
||||||
service.update().await.expect("should update deposit cache");
|
|
||||||
assert_eq!(
|
|
||||||
service.deposits().read().last_processed_block.unwrap(),
|
|
||||||
endpoint1_block_number
|
|
||||||
);
|
|
||||||
|
|
||||||
drop(endpoint1);
|
|
||||||
|
|
||||||
let endpoint2_block_number = get_block_number(&endpoint2.web3()).await;
|
|
||||||
assert!(endpoint1_block_number < endpoint2_block_number);
|
|
||||||
//endpoint1 is offline => query will import blocks from endpoint2
|
|
||||||
service.update().await.expect("should update deposit cache");
|
|
||||||
assert_eq!(
|
|
||||||
service.deposits().read().last_processed_block.unwrap(),
|
|
||||||
endpoint2_block_number
|
|
||||||
);
|
|
||||||
}
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_fallback_when_wrong_chain_id() {
|
|
||||||
async {
|
|
||||||
let log = null_logger();
|
|
||||||
let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into();
|
|
||||||
let wrong_chain_id = correct_chain_id + 1;
|
|
||||||
let endpoint1 = GanacheEth1Instance::new(wrong_chain_id)
|
|
||||||
.await
|
|
||||||
.expect("should start eth1 environment");
|
|
||||||
let endpoint2 = new_ganache_instance()
|
|
||||||
.await
|
|
||||||
.expect("should start eth1 environment");
|
|
||||||
let deposit_contract = &endpoint2.deposit_contract;
|
|
||||||
|
|
||||||
let initial_block_number = get_block_number(&endpoint2.web3()).await;
|
|
||||||
|
|
||||||
// Create some blocks and then consume them, performing the test `rounds` times.
|
|
||||||
let new_blocks = 4;
|
|
||||||
|
|
||||||
for _ in 0..new_blocks {
|
|
||||||
endpoint1
|
|
||||||
.ganache
|
|
||||||
.evm_mine()
|
|
||||||
.await
|
|
||||||
.expect("should mine block");
|
|
||||||
endpoint2
|
|
||||||
.ganache
|
|
||||||
.evm_mine()
|
|
||||||
.await
|
|
||||||
.expect("should mine block");
|
|
||||||
}
|
|
||||||
|
|
||||||
//additional blocks for endpoint1 to be able to distinguish
|
|
||||||
for _ in 0..new_blocks {
|
|
||||||
endpoint1
|
|
||||||
.ganache
|
|
||||||
.evm_mine()
|
|
||||||
.await
|
|
||||||
.expect("should mine block");
|
|
||||||
}
|
|
||||||
|
|
||||||
let service = Service::new(
|
|
||||||
Config {
|
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![
|
|
||||||
SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(),
|
|
||||||
SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(),
|
|
||||||
]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
|
||||||
lowest_cached_block_number: initial_block_number,
|
|
||||||
follow_distance: 0,
|
|
||||||
..Config::default()
|
|
||||||
},
|
|
||||||
log.clone(),
|
|
||||||
MainnetEthSpec::default_spec(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let endpoint1_block_number = get_block_number(&endpoint1.web3()).await;
|
|
||||||
let endpoint2_block_number = get_block_number(&endpoint2.web3()).await;
|
|
||||||
assert!(endpoint2_block_number < endpoint1_block_number);
|
|
||||||
//the call will fallback to endpoint2
|
|
||||||
service.update().await.expect("should update deposit cache");
|
|
||||||
assert_eq!(
|
|
||||||
service.deposits().read().last_processed_block.unwrap(),
|
|
||||||
endpoint2_block_number
|
|
||||||
);
|
|
||||||
}
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_fallback_when_node_far_behind() {
|
|
||||||
async {
|
|
||||||
let log = null_logger();
|
|
||||||
let endpoint2 = new_ganache_instance()
|
|
||||||
.await
|
|
||||||
.expect("should start eth1 environment");
|
|
||||||
let deposit_contract = &endpoint2.deposit_contract;
|
|
||||||
|
|
||||||
let initial_block_number = get_block_number(&endpoint2.web3()).await;
|
|
||||||
|
|
||||||
// Create some blocks and then consume them, performing the test `rounds` times.
|
|
||||||
let new_blocks = 4;
|
|
||||||
|
|
||||||
for _ in 0..new_blocks {
|
|
||||||
endpoint2
|
|
||||||
.ganache
|
|
||||||
.evm_mine()
|
|
||||||
.await
|
|
||||||
.expect("should mine block");
|
|
||||||
}
|
|
||||||
|
|
||||||
let endpoint1 = endpoint2
|
|
||||||
.ganache
|
|
||||||
.fork()
|
|
||||||
.expect("should start eth1 environment");
|
|
||||||
|
|
||||||
let service = Service::new(
|
|
||||||
Config {
|
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![
|
|
||||||
SensitiveUrl::parse(endpoint1.endpoint().as_str()).unwrap(),
|
|
||||||
SensitiveUrl::parse(endpoint2.endpoint().as_str()).unwrap(),
|
|
||||||
]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
|
||||||
lowest_cached_block_number: initial_block_number,
|
|
||||||
follow_distance: 0,
|
|
||||||
node_far_behind_seconds: 5,
|
|
||||||
..Config::default()
|
|
||||||
},
|
|
||||||
log.clone(),
|
|
||||||
MainnetEthSpec::default_spec(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let endpoint1_block_number = get_block_number(&endpoint1.web3).await;
|
|
||||||
//the first call will only query endpoint1
|
|
||||||
service.update().await.expect("should update deposit cache");
|
|
||||||
assert_eq!(
|
|
||||||
service.deposits().read().last_processed_block.unwrap(),
|
|
||||||
endpoint1_block_number
|
|
||||||
);
|
|
||||||
|
|
||||||
sleep(Duration::from_secs(7)).await;
|
|
||||||
|
|
||||||
//both endpoints don't have recent blocks => should return error
|
|
||||||
assert!(service.update().await.is_err());
|
|
||||||
|
|
||||||
//produce some new blocks on endpoint2
|
|
||||||
for _ in 0..new_blocks {
|
|
||||||
endpoint2
|
|
||||||
.ganache
|
|
||||||
.evm_mine()
|
|
||||||
.await
|
|
||||||
.expect("should mine block");
|
|
||||||
}
|
|
||||||
|
|
||||||
let endpoint2_block_number = get_block_number(&endpoint2.web3()).await;
|
|
||||||
|
|
||||||
//endpoint1 is far behind + endpoint2 not => update will import blocks from endpoint2
|
|
||||||
service.update().await.expect("should update deposit cache");
|
|
||||||
assert_eq!(
|
|
||||||
service.deposits().read().last_processed_block.unwrap(),
|
|
||||||
endpoint2_block_number
|
|
||||||
);
|
|
||||||
}
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -38,11 +38,11 @@ rand = "0.8.5"
|
|||||||
zeroize = { version = "1.4.2", features = ["zeroize_derive"] }
|
zeroize = { version = "1.4.2", features = ["zeroize_derive"] }
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
ethers-core = { git = "https://github.com/gakonst/ethers-rs", rev = "02ad93a1cfb7b62eb051c77c61dc4c0218428e4a" }
|
ethers-core = "0.17.0"
|
||||||
builder_client = { path = "../builder_client" }
|
builder_client = { path = "../builder_client" }
|
||||||
fork_choice = { path = "../../consensus/fork_choice" }
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
mev-build-rs = {git = "https://github.com/ralexstokes/mev-rs", rev = "a088806575805c00d63fa59c002abc5eb1dc7709"}
|
mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" }
|
||||||
ethereum-consensus = {git = "https://github.com/ralexstokes/ethereum-consensus", rev = "e1188b1" }
|
ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus", rev = "a8110af76d97bf2bf27fb987a671808fcbdf1834" }
|
||||||
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" }
|
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" }
|
||||||
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
||||||
strum = "0.24.0"
|
strum = "0.24.0"
|
||||||
|
@ -211,6 +211,7 @@ pub mod deposit_methods {
|
|||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
pub enum BlockQuery {
|
pub enum BlockQuery {
|
||||||
Number(u64),
|
Number(u64),
|
||||||
|
Hash(Hash256),
|
||||||
Latest,
|
Latest,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,9 +326,12 @@ pub mod deposit_methods {
|
|||||||
query: BlockQuery,
|
query: BlockQuery,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
) -> Result<Block, String> {
|
) -> Result<Block, String> {
|
||||||
let query_param = match query {
|
let (method, query_param) = match query {
|
||||||
BlockQuery::Number(block_number) => format!("0x{:x}", block_number),
|
BlockQuery::Number(block_number) => {
|
||||||
BlockQuery::Latest => "latest".to_string(),
|
("eth_getBlockByNumber", format!("0x{:x}", block_number))
|
||||||
|
}
|
||||||
|
BlockQuery::Hash(block_hash) => ("eth_getBlockByHash", format!("{:?}", block_hash)),
|
||||||
|
BlockQuery::Latest => ("eth_getBlockByNumber", "latest".to_string()),
|
||||||
};
|
};
|
||||||
let params = json!([
|
let params = json!([
|
||||||
query_param,
|
query_param,
|
||||||
@ -335,9 +339,9 @@ pub mod deposit_methods {
|
|||||||
]);
|
]);
|
||||||
|
|
||||||
let response: Value = self
|
let response: Value = self
|
||||||
.rpc_request("eth_getBlockByNumber", params, timeout)
|
.rpc_request(method, params, timeout)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("eth_getBlockByNumber call failed {:?}", e))?;
|
.map_err(|e| format!("{} call failed {:?}", method, e))?;
|
||||||
|
|
||||||
let hash: Vec<u8> = hex_to_bytes(
|
let hash: Vec<u8> = hex_to_bytes(
|
||||||
response
|
response
|
||||||
@ -521,22 +525,32 @@ pub mod deposit_methods {
|
|||||||
pub struct HttpJsonRpc {
|
pub struct HttpJsonRpc {
|
||||||
pub client: Client,
|
pub client: Client,
|
||||||
pub url: SensitiveUrl,
|
pub url: SensitiveUrl,
|
||||||
|
pub execution_timeout_multiplier: u32,
|
||||||
auth: Option<Auth>,
|
auth: Option<Auth>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpJsonRpc {
|
impl HttpJsonRpc {
|
||||||
pub fn new(url: SensitiveUrl) -> Result<Self, Error> {
|
pub fn new(
|
||||||
|
url: SensitiveUrl,
|
||||||
|
execution_timeout_multiplier: Option<u32>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
client: Client::builder().build()?,
|
client: Client::builder().build()?,
|
||||||
url,
|
url,
|
||||||
|
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
|
||||||
auth: None,
|
auth: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_with_auth(url: SensitiveUrl, auth: Auth) -> Result<Self, Error> {
|
pub fn new_with_auth(
|
||||||
|
url: SensitiveUrl,
|
||||||
|
auth: Auth,
|
||||||
|
execution_timeout_multiplier: Option<u32>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
client: Client::builder().build()?,
|
client: Client::builder().build()?,
|
||||||
url,
|
url,
|
||||||
|
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
|
||||||
auth: Some(auth),
|
auth: Some(auth),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -593,7 +607,11 @@ impl std::fmt::Display for HttpJsonRpc {
|
|||||||
impl HttpJsonRpc {
|
impl HttpJsonRpc {
|
||||||
pub async fn upcheck(&self) -> Result<(), Error> {
|
pub async fn upcheck(&self) -> Result<(), Error> {
|
||||||
let result: serde_json::Value = self
|
let result: serde_json::Value = self
|
||||||
.rpc_request(ETH_SYNCING, json!([]), ETH_SYNCING_TIMEOUT)
|
.rpc_request(
|
||||||
|
ETH_SYNCING,
|
||||||
|
json!([]),
|
||||||
|
ETH_SYNCING_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -617,7 +635,7 @@ impl HttpJsonRpc {
|
|||||||
self.rpc_request(
|
self.rpc_request(
|
||||||
ETH_GET_BLOCK_BY_NUMBER,
|
ETH_GET_BLOCK_BY_NUMBER,
|
||||||
params,
|
params,
|
||||||
ETH_GET_BLOCK_BY_NUMBER_TIMEOUT,
|
ETH_GET_BLOCK_BY_NUMBER_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@ -628,7 +646,11 @@ impl HttpJsonRpc {
|
|||||||
) -> Result<Option<ExecutionBlock>, Error> {
|
) -> Result<Option<ExecutionBlock>, Error> {
|
||||||
let params = json!([block_hash, RETURN_FULL_TRANSACTION_OBJECTS]);
|
let params = json!([block_hash, RETURN_FULL_TRANSACTION_OBJECTS]);
|
||||||
|
|
||||||
self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT)
|
self.rpc_request(
|
||||||
|
ETH_GET_BLOCK_BY_HASH,
|
||||||
|
params,
|
||||||
|
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -637,7 +659,11 @@ impl HttpJsonRpc {
|
|||||||
block_hash: ExecutionBlockHash,
|
block_hash: ExecutionBlockHash,
|
||||||
) -> Result<Option<ExecutionBlockWithTransactions<T>>, Error> {
|
) -> Result<Option<ExecutionBlockWithTransactions<T>>, Error> {
|
||||||
let params = json!([block_hash, true]);
|
let params = json!([block_hash, true]);
|
||||||
self.rpc_request(ETH_GET_BLOCK_BY_HASH, params, ETH_GET_BLOCK_BY_HASH_TIMEOUT)
|
self.rpc_request(
|
||||||
|
ETH_GET_BLOCK_BY_HASH,
|
||||||
|
params,
|
||||||
|
ETH_GET_BLOCK_BY_HASH_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -648,7 +674,11 @@ impl HttpJsonRpc {
|
|||||||
let params = json!([JsonExecutionPayload::from(execution_payload)]);
|
let params = json!([JsonExecutionPayload::from(execution_payload)]);
|
||||||
|
|
||||||
let response: JsonPayloadStatusV1 = self
|
let response: JsonPayloadStatusV1 = self
|
||||||
.rpc_request(ENGINE_NEW_PAYLOAD_V1, params, ENGINE_NEW_PAYLOAD_TIMEOUT)
|
.rpc_request(
|
||||||
|
ENGINE_NEW_PAYLOAD_V1,
|
||||||
|
params,
|
||||||
|
ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(response.into())
|
Ok(response.into())
|
||||||
@ -661,7 +691,11 @@ impl HttpJsonRpc {
|
|||||||
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
|
let params = json!([JsonPayloadIdRequest::from(payload_id)]);
|
||||||
|
|
||||||
let response: JsonExecutionPayload<T> = self
|
let response: JsonExecutionPayload<T> = self
|
||||||
.rpc_request(ENGINE_GET_PAYLOAD_V1, params, ENGINE_GET_PAYLOAD_TIMEOUT)
|
.rpc_request(
|
||||||
|
ENGINE_GET_PAYLOAD_V1,
|
||||||
|
params,
|
||||||
|
ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(response.into())
|
Ok(response.into())
|
||||||
@ -698,7 +732,7 @@ impl HttpJsonRpc {
|
|||||||
.rpc_request(
|
.rpc_request(
|
||||||
ENGINE_FORKCHOICE_UPDATED_V1,
|
ENGINE_FORKCHOICE_UPDATED_V1,
|
||||||
params,
|
params,
|
||||||
ENGINE_FORKCHOICE_UPDATED_TIMEOUT,
|
ENGINE_FORKCHOICE_UPDATED_TIMEOUT * self.execution_timeout_multiplier,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -715,7 +749,8 @@ impl HttpJsonRpc {
|
|||||||
.rpc_request(
|
.rpc_request(
|
||||||
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1,
|
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1,
|
||||||
params,
|
params,
|
||||||
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT,
|
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT
|
||||||
|
* self.execution_timeout_multiplier,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@ -755,13 +790,13 @@ mod test {
|
|||||||
let echo_auth =
|
let echo_auth =
|
||||||
Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None);
|
Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None);
|
||||||
(
|
(
|
||||||
Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth).unwrap()),
|
Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None).unwrap()),
|
||||||
Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth).unwrap()),
|
Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None).unwrap()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
Arc::new(HttpJsonRpc::new(rpc_url).unwrap()),
|
Arc::new(HttpJsonRpc::new(rpc_url, None).unwrap()),
|
||||||
Arc::new(HttpJsonRpc::new(echo_url).unwrap()),
|
Arc::new(HttpJsonRpc::new(echo_url, None).unwrap()),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use strum::EnumString;
|
||||||
use superstruct::superstruct;
|
use superstruct::superstruct;
|
||||||
use types::{
|
use types::{
|
||||||
Blob, EthSpec, ExecutionBlockHash, ExecutionPayloadEip4844, ExecutionPayloadHeaderEip4844,
|
Blob, EthSpec, ExecutionBlockHash, ExecutionPayloadEip4844, ExecutionPayloadHeaderEip4844,
|
||||||
@ -514,8 +515,9 @@ impl From<JsonForkChoiceStateV1> for ForkChoiceState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, EnumString)]
|
||||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||||
|
#[strum(serialize_all = "SCREAMING_SNAKE_CASE")]
|
||||||
pub enum JsonPayloadStatusV1Status {
|
pub enum JsonPayloadStatusV1Status {
|
||||||
Valid,
|
Valid,
|
||||||
Invalid,
|
Invalid,
|
||||||
|
@ -226,6 +226,7 @@ pub struct Config {
|
|||||||
pub default_datadir: PathBuf,
|
pub default_datadir: PathBuf,
|
||||||
/// The minimum value of an external payload for it to be considered in a proposal.
|
/// The minimum value of an external payload for it to be considered in a proposal.
|
||||||
pub builder_profit_threshold: u128,
|
pub builder_profit_threshold: u128,
|
||||||
|
pub execution_timeout_multiplier: Option<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
||||||
@ -247,6 +248,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
jwt_version,
|
jwt_version,
|
||||||
default_datadir,
|
default_datadir,
|
||||||
builder_profit_threshold,
|
builder_profit_threshold,
|
||||||
|
execution_timeout_multiplier,
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
if urls.len() > 1 {
|
if urls.len() > 1 {
|
||||||
@ -291,7 +293,8 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
let engine: Engine = {
|
let engine: Engine = {
|
||||||
let auth = Auth::new(jwt_key, jwt_id, jwt_version);
|
let auth = Auth::new(jwt_key, jwt_id, jwt_version);
|
||||||
debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path());
|
debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path());
|
||||||
let api = HttpJsonRpc::new_with_auth(execution_url, auth).map_err(Error::ApiError)?;
|
let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier)
|
||||||
|
.map_err(Error::ApiError)?;
|
||||||
Engine::new(api, executor.clone(), &log)
|
Engine::new(api, executor.clone(), &log)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,6 +77,11 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
ENGINE_NEW_PAYLOAD_V1 => {
|
ENGINE_NEW_PAYLOAD_V1 => {
|
||||||
let request: JsonExecutionPayload<T> = get_param(params, 0)?;
|
let request: JsonExecutionPayload<T> = get_param(params, 0)?;
|
||||||
|
|
||||||
|
// Canned responses set by block hash take priority.
|
||||||
|
if let Some(status) = ctx.get_new_payload_status(&request.block_hash) {
|
||||||
|
return Ok(serde_json::to_value(JsonPayloadStatusV1::from(status)).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
let (static_response, should_import) =
|
let (static_response, should_import) =
|
||||||
if let Some(mut response) = ctx.static_new_payload_response.lock().clone() {
|
if let Some(mut response) = ctx.static_new_payload_response.lock().clone() {
|
||||||
if response.status.status == PayloadStatusV1Status::Valid {
|
if response.status.status == PayloadStatusV1Status::Valid {
|
||||||
@ -120,6 +125,15 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
|
|
||||||
let head_block_hash = forkchoice_state.head_block_hash;
|
let head_block_hash = forkchoice_state.head_block_hash;
|
||||||
|
|
||||||
|
// Canned responses set by block hash take priority.
|
||||||
|
if let Some(status) = ctx.get_fcu_payload_status(&head_block_hash) {
|
||||||
|
let response = JsonForkchoiceUpdatedV1Response {
|
||||||
|
payload_status: JsonPayloadStatusV1::from(status),
|
||||||
|
payload_id: None,
|
||||||
|
};
|
||||||
|
return Ok(serde_json::to_value(response).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
let mut response = ctx
|
let mut response = ctx
|
||||||
.execution_block_generator
|
.execution_block_generator
|
||||||
.write()
|
.write()
|
||||||
|
@ -12,6 +12,7 @@ use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use slog::{info, Logger};
|
use slog::{info, Logger};
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::convert::Infallible;
|
use std::convert::Infallible;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
@ -98,6 +99,8 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
static_new_payload_response: <_>::default(),
|
static_new_payload_response: <_>::default(),
|
||||||
static_forkchoice_updated_response: <_>::default(),
|
static_forkchoice_updated_response: <_>::default(),
|
||||||
static_get_block_by_hash_response: <_>::default(),
|
static_get_block_by_hash_response: <_>::default(),
|
||||||
|
new_payload_statuses: <_>::default(),
|
||||||
|
fcu_payload_statuses: <_>::default(),
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -370,6 +373,25 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
pub fn drop_all_blocks(&self) {
|
pub fn drop_all_blocks(&self) {
|
||||||
self.ctx.execution_block_generator.write().drop_all_blocks()
|
self.ctx.execution_block_generator.write().drop_all_blocks()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn set_payload_statuses(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) {
|
||||||
|
self.set_new_payload_status(block_hash, status.clone());
|
||||||
|
self.set_fcu_payload_status(block_hash, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_new_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) {
|
||||||
|
self.ctx
|
||||||
|
.new_payload_statuses
|
||||||
|
.lock()
|
||||||
|
.insert(block_hash, status);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_fcu_payload_status(&self, block_hash: ExecutionBlockHash, status: PayloadStatusV1) {
|
||||||
|
self.ctx
|
||||||
|
.fcu_payload_statuses
|
||||||
|
.lock()
|
||||||
|
.insert(block_hash, status);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -419,9 +441,33 @@ pub struct Context<T: EthSpec> {
|
|||||||
pub static_new_payload_response: Arc<Mutex<Option<StaticNewPayloadResponse>>>,
|
pub static_new_payload_response: Arc<Mutex<Option<StaticNewPayloadResponse>>>,
|
||||||
pub static_forkchoice_updated_response: Arc<Mutex<Option<PayloadStatusV1>>>,
|
pub static_forkchoice_updated_response: Arc<Mutex<Option<PayloadStatusV1>>>,
|
||||||
pub static_get_block_by_hash_response: Arc<Mutex<Option<Option<ExecutionBlock>>>>,
|
pub static_get_block_by_hash_response: Arc<Mutex<Option<Option<ExecutionBlock>>>>,
|
||||||
|
|
||||||
|
// Canned responses by block hash.
|
||||||
|
//
|
||||||
|
// This is a more flexible and less stateful alternative to `static_new_payload_response`
|
||||||
|
// and `preloaded_responses`.
|
||||||
|
pub new_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>,
|
||||||
|
pub fcu_payload_statuses: Arc<Mutex<HashMap<ExecutionBlockHash, PayloadStatusV1>>>,
|
||||||
|
|
||||||
pub _phantom: PhantomData<T>,
|
pub _phantom: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> Context<T> {
|
||||||
|
pub fn get_new_payload_status(
|
||||||
|
&self,
|
||||||
|
block_hash: &ExecutionBlockHash,
|
||||||
|
) -> Option<PayloadStatusV1> {
|
||||||
|
self.new_payload_statuses.lock().get(block_hash).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_fcu_payload_status(
|
||||||
|
&self,
|
||||||
|
block_hash: &ExecutionBlockHash,
|
||||||
|
) -> Option<PayloadStatusV1> {
|
||||||
|
self.fcu_payload_statuses.lock().get(block_hash).cloned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Configuration for the HTTP server.
|
/// Configuration for the HTTP server.
|
||||||
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
|
@ -23,7 +23,9 @@ pub fn genesis_deposits(
|
|||||||
return Err(String::from("Failed to push leaf"));
|
return Err(String::from("Failed to push leaf"));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (_, mut proof) = tree.generate_proof(i, depth);
|
let (_, mut proof) = tree
|
||||||
|
.generate_proof(i, depth)
|
||||||
|
.map_err(|e| format!("Error generating merkle proof: {:?}", e))?;
|
||||||
proof.push(Hash256::from_slice(&int_to_fixed_bytes32((i + 1) as u64)));
|
proof.push(Hash256::from_slice(&int_to_fixed_bytes32((i + 1) as u64)));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
|
@ -43,7 +43,7 @@ impl Eth1GenesisService {
|
|||||||
/// Creates a new service. Does not attempt to connect to the Eth1 node.
|
/// Creates a new service. Does not attempt to connect to the Eth1 node.
|
||||||
///
|
///
|
||||||
/// Modifies the given `config` to make it more suitable to the task of listening to genesis.
|
/// Modifies the given `config` to make it more suitable to the task of listening to genesis.
|
||||||
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Self {
|
pub fn new(config: Eth1Config, log: Logger, spec: ChainSpec) -> Result<Self, String> {
|
||||||
let config = Eth1Config {
|
let config = Eth1Config {
|
||||||
// Truncating the block cache makes searching for genesis more
|
// Truncating the block cache makes searching for genesis more
|
||||||
// complicated.
|
// complicated.
|
||||||
@ -64,15 +64,16 @@ impl Eth1GenesisService {
|
|||||||
..config
|
..config
|
||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Ok(Self {
|
||||||
eth1_service: Eth1Service::new(config, log, spec),
|
eth1_service: Eth1Service::new(config, log, spec)
|
||||||
|
.map_err(|e| format!("Failed to create eth1 service: {:?}", e))?,
|
||||||
stats: Arc::new(Statistics {
|
stats: Arc::new(Statistics {
|
||||||
highest_processed_block: AtomicU64::new(0),
|
highest_processed_block: AtomicU64::new(0),
|
||||||
active_validator_count: AtomicUsize::new(0),
|
active_validator_count: AtomicUsize::new(0),
|
||||||
total_deposit_count: AtomicUsize::new(0),
|
total_deposit_count: AtomicUsize::new(0),
|
||||||
latest_timestamp: AtomicU64::new(0),
|
latest_timestamp: AtomicU64::new(0),
|
||||||
}),
|
}),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the first eth1 block that has enough deposits that it's a (potentially invalid)
|
/// Returns the first eth1 block that has enough deposits that it's a (potentially invalid)
|
||||||
@ -85,7 +86,7 @@ impl Eth1GenesisService {
|
|||||||
.deposits()
|
.deposits()
|
||||||
.read()
|
.read()
|
||||||
.cache
|
.cache
|
||||||
.get(min_genesis_active_validator_count.saturating_sub(1))
|
.get_log(min_genesis_active_validator_count.saturating_sub(1))
|
||||||
.map(|log| log.block_number)
|
.map(|log| log.block_number)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -112,11 +113,9 @@ impl Eth1GenesisService {
|
|||||||
"Importing eth1 deposit logs";
|
"Importing eth1 deposit logs";
|
||||||
);
|
);
|
||||||
|
|
||||||
let endpoints = eth1_service.init_endpoints()?;
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let update_result = eth1_service
|
let update_result = eth1_service
|
||||||
.update_deposit_cache(None, &endpoints)
|
.update_deposit_cache(None)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{:?}", e));
|
.map_err(|e| format!("{:?}", e));
|
||||||
|
|
||||||
@ -158,7 +157,7 @@ impl Eth1GenesisService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Download new eth1 blocks into the cache.
|
// Download new eth1 blocks into the cache.
|
||||||
let blocks_imported = match eth1_service.update_block_cache(None, &endpoints).await {
|
let blocks_imported = match eth1_service.update_block_cache(None).await {
|
||||||
Ok(outcome) => {
|
Ok(outcome) => {
|
||||||
debug!(
|
debug!(
|
||||||
log,
|
log,
|
||||||
|
@ -44,10 +44,9 @@ fn basic() {
|
|||||||
|
|
||||||
let service = Eth1GenesisService::new(
|
let service = Eth1GenesisService::new(
|
||||||
Eth1Config {
|
Eth1Config {
|
||||||
endpoints: Eth1Endpoint::NoAuth(vec![SensitiveUrl::parse(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
eth1.endpoint().as_str(),
|
SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(),
|
||||||
)
|
),
|
||||||
.unwrap()]),
|
|
||||||
deposit_contract_address: deposit_contract.address(),
|
deposit_contract_address: deposit_contract.address(),
|
||||||
deposit_contract_deploy_block: now,
|
deposit_contract_deploy_block: now,
|
||||||
lowest_cached_block_number: now,
|
lowest_cached_block_number: now,
|
||||||
@ -57,7 +56,8 @@ fn basic() {
|
|||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
spec.clone(),
|
spec.clone(),
|
||||||
);
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// NOTE: this test is sensitive to the response speed of the external web3 server. If
|
// NOTE: this test is sensitive to the response speed of the external web3 server. If
|
||||||
// you're experiencing failures, try increasing the update_interval.
|
// you're experiencing failures, try increasing the update_interval.
|
||||||
|
@ -1535,6 +1535,53 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// GET beacon/deposit_snapshot
|
||||||
|
let get_beacon_deposit_snapshot = eth_v1
|
||||||
|
.and(warp::path("beacon"))
|
||||||
|
.and(warp::path("deposit_snapshot"))
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
||||||
|
.and(eth1_service_filter.clone())
|
||||||
|
.and_then(
|
||||||
|
|accept_header: Option<api_types::Accept>, eth1_service: eth1::Service| {
|
||||||
|
blocking_task(move || match accept_header {
|
||||||
|
Some(api_types::Accept::Json) | None => {
|
||||||
|
let snapshot = eth1_service.get_deposit_snapshot();
|
||||||
|
Ok(
|
||||||
|
warp::reply::json(&api_types::GenericResponse::from(snapshot))
|
||||||
|
.into_response(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
_ => eth1_service
|
||||||
|
.get_deposit_snapshot()
|
||||||
|
.map(|snapshot| {
|
||||||
|
Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "application/octet-stream")
|
||||||
|
.body(snapshot.as_ssz_bytes().into())
|
||||||
|
.map_err(|e| {
|
||||||
|
warp_utils::reject::custom_server_error(format!(
|
||||||
|
"failed to create response: {}",
|
||||||
|
e
|
||||||
|
))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Response::builder()
|
||||||
|
.status(503)
|
||||||
|
.header("Content-Type", "application/octet-stream")
|
||||||
|
.body(Vec::new().into())
|
||||||
|
.map_err(|e| {
|
||||||
|
warp_utils::reject::custom_server_error(format!(
|
||||||
|
"failed to create response: {}",
|
||||||
|
e
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* config
|
* config
|
||||||
*/
|
*/
|
||||||
@ -3122,6 +3169,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.or(get_beacon_pool_attester_slashings.boxed())
|
.or(get_beacon_pool_attester_slashings.boxed())
|
||||||
.or(get_beacon_pool_proposer_slashings.boxed())
|
.or(get_beacon_pool_proposer_slashings.boxed())
|
||||||
.or(get_beacon_pool_voluntary_exits.boxed())
|
.or(get_beacon_pool_voluntary_exits.boxed())
|
||||||
|
.or(get_beacon_deposit_snapshot.boxed())
|
||||||
.or(get_config_fork_schedule.boxed())
|
.or(get_config_fork_schedule.boxed())
|
||||||
.or(get_config_spec.boxed())
|
.or(get_config_spec.boxed())
|
||||||
.or(get_config_deposit_contract.boxed())
|
.or(get_config_deposit_contract.boxed())
|
||||||
|
@ -155,33 +155,12 @@ impl StateId {
|
|||||||
Ok((state, execution_optimistic))
|
Ok((state, execution_optimistic))
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
/// Map a function across the `BeaconState` identified by `self`.
|
/// Map a function across the `BeaconState` identified by `self`.
|
||||||
///
|
///
|
||||||
|
/// The optimistic status of the requested state is also provided to the `func` closure.
|
||||||
|
///
|
||||||
/// This function will avoid instantiating/copying a new state when `self` points to the head
|
/// This function will avoid instantiating/copying a new state when `self` points to the head
|
||||||
/// of the chain.
|
/// of the chain.
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn map_state<T: BeaconChainTypes, F, U>(
|
|
||||||
&self,
|
|
||||||
chain: &BeaconChain<T>,
|
|
||||||
func: F,
|
|
||||||
) -> Result<U, warp::Rejection>
|
|
||||||
where
|
|
||||||
F: Fn(&BeaconState<T::EthSpec>) -> Result<U, warp::Rejection>,
|
|
||||||
{
|
|
||||||
match &self.0 {
|
|
||||||
CoreStateId::Head => chain
|
|
||||||
.with_head(|snapshot| Ok(func(&snapshot.beacon_state)))
|
|
||||||
.map_err(warp_utils::reject::beacon_chain_error)?,
|
|
||||||
_ => func(&self.state(chain)?),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
/// Functions the same as `map_state` but additionally computes the value of
|
|
||||||
/// `execution_optimistic` of the state identified by `self`.
|
|
||||||
///
|
|
||||||
/// This is to avoid re-instantiating `state` unnecessarily.
|
|
||||||
pub fn map_state_and_execution_optimistic<T: BeaconChainTypes, F, U>(
|
pub fn map_state_and_execution_optimistic<T: BeaconChainTypes, F, U>(
|
||||||
&self,
|
&self,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
|
@ -131,7 +131,8 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
pm.inject_connection_established(&peer_id, &con_id, &connected_point, None, 0);
|
pm.inject_connection_established(&peer_id, &con_id, &connected_point, None, 0);
|
||||||
*network_globals.sync_state.write() = SyncState::Synced;
|
*network_globals.sync_state.write() = SyncState::Synced;
|
||||||
|
|
||||||
let eth1_service = eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone());
|
let eth1_service =
|
||||||
|
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap();
|
||||||
|
|
||||||
let context = Arc::new(Context {
|
let context = Arc::new(Context {
|
||||||
config: Config {
|
config: Config {
|
||||||
|
@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
discv5 = { version = "0.1.0-beta.13", features = ["libp2p"] }
|
discv5 = { version = "0.1.0", features = ["libp2p"] }
|
||||||
unsigned-varint = { version = "0.6.0", features = ["codec"] }
|
unsigned-varint = { version = "0.6.0", features = ["codec"] }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
eth2_ssz_types = "0.2.2"
|
eth2_ssz_types = "0.2.2"
|
||||||
@ -28,7 +28,7 @@ smallvec = "1.6.1"
|
|||||||
tokio-io-timeout = "1.1.1"
|
tokio-io-timeout = "1.1.1"
|
||||||
lru = "0.7.1"
|
lru = "0.7.1"
|
||||||
parking_lot = "0.12.0"
|
parking_lot = "0.12.0"
|
||||||
sha2 = "0.9.1"
|
sha2 = "0.10"
|
||||||
snap = "1.0.1"
|
snap = "1.0.1"
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] }
|
tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] }
|
||||||
|
@ -176,7 +176,7 @@ impl Default for Config {
|
|||||||
.filter_rate_limiter(filter_rate_limiter)
|
.filter_rate_limiter(filter_rate_limiter)
|
||||||
.filter_max_bans_per_ip(Some(5))
|
.filter_max_bans_per_ip(Some(5))
|
||||||
.filter_max_nodes_per_ip(Some(10))
|
.filter_max_nodes_per_ip(Some(10))
|
||||||
.table_filter(|enr| enr.ip().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs
|
.table_filter(|enr| enr.ip4().map_or(false, |ip| is_global(&ip))) // Filter non-global IPs
|
||||||
.ban_duration(Some(Duration::from_secs(3600)))
|
.ban_duration(Some(Duration::from_secs(3600)))
|
||||||
.ping_interval(Duration::from_secs(300))
|
.ping_interval(Duration::from_secs(300))
|
||||||
.build();
|
.build();
|
||||||
|
@ -149,12 +149,12 @@ pub fn create_enr_builder_from_config<T: EnrKey>(
|
|||||||
builder.ip(enr_address);
|
builder.ip(enr_address);
|
||||||
}
|
}
|
||||||
if let Some(udp_port) = config.enr_udp_port {
|
if let Some(udp_port) = config.enr_udp_port {
|
||||||
builder.udp(udp_port);
|
builder.udp4(udp_port);
|
||||||
}
|
}
|
||||||
// we always give it our listening tcp port
|
// we always give it our listening tcp port
|
||||||
if enable_tcp {
|
if enable_tcp {
|
||||||
let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port);
|
let tcp_port = config.enr_tcp_port.unwrap_or(config.libp2p_port);
|
||||||
builder.tcp(tcp_port);
|
builder.tcp4(tcp_port);
|
||||||
}
|
}
|
||||||
builder
|
builder
|
||||||
}
|
}
|
||||||
@ -189,13 +189,13 @@ pub fn build_enr<T: EthSpec>(
|
|||||||
/// If this function returns true, we use the `disk_enr`.
|
/// If this function returns true, we use the `disk_enr`.
|
||||||
fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool {
|
fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool {
|
||||||
// take preference over disk_enr address if one is not specified
|
// take preference over disk_enr address if one is not specified
|
||||||
(local_enr.ip().is_none() || local_enr.ip() == disk_enr.ip())
|
(local_enr.ip4().is_none() || local_enr.ip4() == disk_enr.ip4())
|
||||||
// tcp ports must match
|
// tcp ports must match
|
||||||
&& local_enr.tcp() == disk_enr.tcp()
|
&& local_enr.tcp4() == disk_enr.tcp4()
|
||||||
// must match on the same fork
|
// must match on the same fork
|
||||||
&& local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY)
|
&& local_enr.get(ETH2_ENR_KEY) == disk_enr.get(ETH2_ENR_KEY)
|
||||||
// take preference over disk udp port if one is not specified
|
// take preference over disk udp port if one is not specified
|
||||||
&& (local_enr.udp().is_none() || local_enr.udp() == disk_enr.udp())
|
&& (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4())
|
||||||
// we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match,
|
// we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match,
|
||||||
// otherwise we use a new ENR. This will likely only be true for non-validating nodes
|
// otherwise we use a new ENR. This will likely only be true for non-validating nodes
|
||||||
&& local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY)
|
&& local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY)
|
||||||
|
@ -48,14 +48,14 @@ impl EnrExt for Enr {
|
|||||||
/// The vector remains empty if these fields are not defined.
|
/// The vector remains empty if these fields are not defined.
|
||||||
fn multiaddr(&self) -> Vec<Multiaddr> {
|
fn multiaddr(&self) -> Vec<Multiaddr> {
|
||||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||||
if let Some(ip) = self.ip() {
|
if let Some(ip) = self.ip4() {
|
||||||
if let Some(udp) = self.udp() {
|
if let Some(udp) = self.udp4() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Udp(udp));
|
multiaddr.push(Protocol::Udp(udp));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(tcp) = self.tcp() {
|
if let Some(tcp) = self.tcp4() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Tcp(tcp));
|
multiaddr.push(Protocol::Tcp(tcp));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
@ -84,15 +84,15 @@ impl EnrExt for Enr {
|
|||||||
fn multiaddr_p2p(&self) -> Vec<Multiaddr> {
|
fn multiaddr_p2p(&self) -> Vec<Multiaddr> {
|
||||||
let peer_id = self.peer_id();
|
let peer_id = self.peer_id();
|
||||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||||
if let Some(ip) = self.ip() {
|
if let Some(ip) = self.ip4() {
|
||||||
if let Some(udp) = self.udp() {
|
if let Some(udp) = self.udp4() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Udp(udp));
|
multiaddr.push(Protocol::Udp(udp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(tcp) = self.tcp() {
|
if let Some(tcp) = self.tcp4() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Tcp(tcp));
|
multiaddr.push(Protocol::Tcp(tcp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
@ -124,8 +124,8 @@ impl EnrExt for Enr {
|
|||||||
fn multiaddr_p2p_tcp(&self) -> Vec<Multiaddr> {
|
fn multiaddr_p2p_tcp(&self) -> Vec<Multiaddr> {
|
||||||
let peer_id = self.peer_id();
|
let peer_id = self.peer_id();
|
||||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||||
if let Some(ip) = self.ip() {
|
if let Some(ip) = self.ip4() {
|
||||||
if let Some(tcp) = self.tcp() {
|
if let Some(tcp) = self.tcp4() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Tcp(tcp));
|
multiaddr.push(Protocol::Tcp(tcp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
@ -150,8 +150,8 @@ impl EnrExt for Enr {
|
|||||||
fn multiaddr_p2p_udp(&self) -> Vec<Multiaddr> {
|
fn multiaddr_p2p_udp(&self) -> Vec<Multiaddr> {
|
||||||
let peer_id = self.peer_id();
|
let peer_id = self.peer_id();
|
||||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||||
if let Some(ip) = self.ip() {
|
if let Some(ip) = self.ip4() {
|
||||||
if let Some(udp) = self.udp() {
|
if let Some(udp) = self.udp4() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Udp(udp));
|
multiaddr.push(Protocol::Udp(udp));
|
||||||
multiaddr.push(Protocol::P2p(peer_id.into()));
|
multiaddr.push(Protocol::P2p(peer_id.into()));
|
||||||
@ -173,8 +173,8 @@ impl EnrExt for Enr {
|
|||||||
/// The vector remains empty if these fields are not defined.
|
/// The vector remains empty if these fields are not defined.
|
||||||
fn multiaddr_tcp(&self) -> Vec<Multiaddr> {
|
fn multiaddr_tcp(&self) -> Vec<Multiaddr> {
|
||||||
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
let mut multiaddrs: Vec<Multiaddr> = Vec::new();
|
||||||
if let Some(ip) = self.ip() {
|
if let Some(ip) = self.ip4() {
|
||||||
if let Some(tcp) = self.tcp() {
|
if let Some(tcp) = self.tcp4() {
|
||||||
let mut multiaddr: Multiaddr = ip.into();
|
let mut multiaddr: Multiaddr = ip.into();
|
||||||
multiaddr.push(Protocol::Tcp(tcp));
|
multiaddr.push(Protocol::Tcp(tcp));
|
||||||
multiaddrs.push(multiaddr);
|
multiaddrs.push(multiaddr);
|
||||||
@ -232,6 +232,7 @@ impl CombinedKeyExt for CombinedKey {
|
|||||||
.expect("libp2p key must be valid");
|
.expect("libp2p key must be valid");
|
||||||
Ok(CombinedKey::from(ed_keypair))
|
Ok(CombinedKey::from(ed_keypair))
|
||||||
}
|
}
|
||||||
|
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -265,6 +266,10 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
|
|||||||
hasher.finalize(&mut output);
|
hasher.finalize(&mut output);
|
||||||
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
|
||||||
}
|
}
|
||||||
|
PublicKey::Ecdsa(_) => Err(format!(
|
||||||
|
"Unsupported public key (Ecdsa) from peer {}",
|
||||||
|
peer_id
|
||||||
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -197,7 +197,9 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
|
|
||||||
let local_enr = network_globals.local_enr.read().clone();
|
let local_enr = network_globals.local_enr.read().clone();
|
||||||
|
|
||||||
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(), "ip" => ?local_enr.ip(), "udp"=> ?local_enr.udp(), "tcp" => ?local_enr.tcp());
|
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(),
|
||||||
|
"ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6()
|
||||||
|
);
|
||||||
|
|
||||||
let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port);
|
let listen_socket = SocketAddr::new(config.listen_address, config.discovery_port);
|
||||||
|
|
||||||
@ -214,9 +216,9 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
"Adding node to routing table";
|
"Adding node to routing table";
|
||||||
"node_id" => %bootnode_enr.node_id(),
|
"node_id" => %bootnode_enr.node_id(),
|
||||||
"peer_id" => %bootnode_enr.peer_id(),
|
"peer_id" => %bootnode_enr.peer_id(),
|
||||||
"ip" => ?bootnode_enr.ip(),
|
"ip" => ?bootnode_enr.ip4(),
|
||||||
"udp" => ?bootnode_enr.udp(),
|
"udp" => ?bootnode_enr.udp4(),
|
||||||
"tcp" => ?bootnode_enr.tcp()
|
"tcp" => ?bootnode_enr.tcp4()
|
||||||
);
|
);
|
||||||
let repr = bootnode_enr.to_string();
|
let repr = bootnode_enr.to_string();
|
||||||
let _ = discv5.add_enr(bootnode_enr).map_err(|e| {
|
let _ = discv5.add_enr(bootnode_enr).map_err(|e| {
|
||||||
@ -268,9 +270,9 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
"Adding node to routing table";
|
"Adding node to routing table";
|
||||||
"node_id" => %enr.node_id(),
|
"node_id" => %enr.node_id(),
|
||||||
"peer_id" => %enr.peer_id(),
|
"peer_id" => %enr.peer_id(),
|
||||||
"ip" => ?enr.ip(),
|
"ip" => ?enr.ip4(),
|
||||||
"udp" => ?enr.udp(),
|
"udp" => ?enr.udp4(),
|
||||||
"tcp" => ?enr.tcp()
|
"tcp" => ?enr.tcp4()
|
||||||
);
|
);
|
||||||
let _ = discv5.add_enr(enr).map_err(|e| {
|
let _ = discv5.add_enr(enr).map_err(|e| {
|
||||||
error!(
|
error!(
|
||||||
@ -763,7 +765,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
// we can connect to peers who aren't compatible with an upcoming fork.
|
// we can connect to peers who aren't compatible with an upcoming fork.
|
||||||
// `fork_digest` **must** be same.
|
// `fork_digest` **must** be same.
|
||||||
enr.eth2().map(|e| e.fork_digest) == Ok(enr_fork_id.fork_digest)
|
enr.eth2().map(|e| e.fork_digest) == Ok(enr_fork_id.fork_digest)
|
||||||
&& (enr.tcp().is_some() || enr.tcp6().is_some())
|
&& (enr.tcp4().is_some() || enr.tcp6().is_some())
|
||||||
};
|
};
|
||||||
|
|
||||||
// General predicate
|
// General predicate
|
||||||
@ -1040,7 +1042,8 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
|||||||
}
|
}
|
||||||
Discv5Event::EnrAdded { .. }
|
Discv5Event::EnrAdded { .. }
|
||||||
| Discv5Event::TalkRequest(_)
|
| Discv5Event::TalkRequest(_)
|
||||||
| Discv5Event::NodeInserted { .. } => {} // Ignore all other discv5 server events
|
| Discv5Event::NodeInserted { .. }
|
||||||
|
| Discv5Event::SessionEstablished { .. } => {} // Ignore all other discv5 server events
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check NAT if metrics are enabled
|
// Check NAT if metrics are enabled
|
||||||
if self.network_globals.local_enr.read().udp().is_some() {
|
if self.network_globals.local_enr.read().udp4().is_some() {
|
||||||
metrics::check_nat();
|
metrics::check_nat();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1359,10 +1359,12 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
Some(NetworkEvent::PeerDisconnected(peer_id))
|
Some(NetworkEvent::PeerDisconnected(peer_id))
|
||||||
}
|
}
|
||||||
PeerManagerEvent::Banned(peer_id, associated_ips) => {
|
PeerManagerEvent::Banned(peer_id, associated_ips) => {
|
||||||
|
self.swarm.ban_peer_id(peer_id);
|
||||||
self.discovery_mut().ban_peer(&peer_id, associated_ips);
|
self.discovery_mut().ban_peer(&peer_id, associated_ips);
|
||||||
Some(NetworkEvent::PeerBanned(peer_id))
|
Some(NetworkEvent::PeerBanned(peer_id))
|
||||||
}
|
}
|
||||||
PeerManagerEvent::UnBanned(peer_id, associated_ips) => {
|
PeerManagerEvent::UnBanned(peer_id, associated_ips) => {
|
||||||
|
self.swarm.unban_peer_id(peer_id);
|
||||||
self.discovery_mut().unban_peer(&peer_id, associated_ips);
|
self.discovery_mut().unban_peer(&peer_id, associated_ips);
|
||||||
Some(NetworkEvent::PeerUnbanned(peer_id))
|
Some(NetworkEvent::PeerUnbanned(peer_id))
|
||||||
}
|
}
|
||||||
|
@ -44,3 +44,8 @@ strum = "0.24.0"
|
|||||||
tokio-util = { version = "0.6.3", features = ["time"] }
|
tokio-util = { version = "0.6.3", features = ["time"] }
|
||||||
derivative = "2.2.0"
|
derivative = "2.2.0"
|
||||||
delay_map = "0.1.1"
|
delay_map = "0.1.1"
|
||||||
|
ethereum-types = { version = "0.12.1", optional = true }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
deterministic_long_lived_attnets = [ "ethereum-types" ]
|
||||||
|
# default = ["deterministic_long_lived_attnets"]
|
||||||
|
@ -299,9 +299,13 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
// attestation subnet service
|
// attestation subnet service
|
||||||
let attestation_service =
|
let attestation_service = AttestationService::new(
|
||||||
AttestationService::new(beacon_chain.clone(), config, &network_log);
|
beacon_chain.clone(),
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
network_globals.local_enr().node_id().raw().into(),
|
||||||
|
config,
|
||||||
|
&network_log,
|
||||||
|
);
|
||||||
// sync committee subnet service
|
// sync committee subnet service
|
||||||
let sync_committee_service =
|
let sync_committee_service =
|
||||||
SyncCommitteeService::new(beacon_chain.clone(), config, &network_log);
|
SyncCommitteeService::new(beacon_chain.clone(), config, &network_log);
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
//! determines whether attestations should be aggregated and/or passed to the beacon node.
|
//! determines whether attestations should be aggregated and/or passed to the beacon node.
|
||||||
|
|
||||||
use super::SubnetServiceMessage;
|
use super::SubnetServiceMessage;
|
||||||
#[cfg(test)]
|
#[cfg(any(test, feature = "deterministic_long_lived_attnets"))]
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::collections::{HashMap, VecDeque};
|
use std::collections::{HashMap, VecDeque};
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
@ -15,6 +15,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes};
|
|||||||
use delay_map::{HashMapDelay, HashSetDelay};
|
use delay_map::{HashMapDelay, HashSetDelay};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery};
|
use lighthouse_network::{NetworkConfig, Subnet, SubnetDiscovery};
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use slog::{debug, error, o, trace, warn};
|
use slog::{debug, error, o, trace, warn};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
@ -28,6 +29,7 @@ use crate::metrics;
|
|||||||
pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2;
|
pub(crate) const MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD: u64 = 2;
|
||||||
/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from
|
/// The time (in slots) before a last seen validator is considered absent and we unsubscribe from
|
||||||
/// the random gossip topics that we subscribed to due to the validator connection.
|
/// the random gossip topics that we subscribed to due to the validator connection.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
const LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS: u32 = 150;
|
const LAST_SEEN_VALIDATOR_TIMEOUT_SLOTS: u32 = 150;
|
||||||
/// The fraction of a slot that we subscribe to a subnet before the required slot.
|
/// The fraction of a slot that we subscribe to a subnet before the required slot.
|
||||||
///
|
///
|
||||||
@ -70,6 +72,9 @@ pub struct AttestationService<T: BeaconChainTypes> {
|
|||||||
/// Subnets we are currently subscribed to as long lived subscriptions.
|
/// Subnets we are currently subscribed to as long lived subscriptions.
|
||||||
///
|
///
|
||||||
/// We advertise these in our ENR. When these expire, the subnet is removed from our ENR.
|
/// We advertise these in our ENR. When these expire, the subnet is removed from our ENR.
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
long_lived_subscriptions: HashSet<SubnetId>,
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
long_lived_subscriptions: HashMapDelay<SubnetId, Slot>,
|
long_lived_subscriptions: HashMapDelay<SubnetId, Slot>,
|
||||||
|
|
||||||
/// Short lived subscriptions that need to be done in the future.
|
/// Short lived subscriptions that need to be done in the future.
|
||||||
@ -83,6 +88,7 @@ pub struct AttestationService<T: BeaconChainTypes> {
|
|||||||
/// subscribed to. As these time out, we unsubscribe for the required random subnets and update
|
/// subscribed to. As these time out, we unsubscribe for the required random subnets and update
|
||||||
/// our ENR.
|
/// our ENR.
|
||||||
/// This is a set of validator indices.
|
/// This is a set of validator indices.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
known_validators: HashSetDelay<u64>,
|
known_validators: HashSetDelay<u64>,
|
||||||
|
|
||||||
/// The waker for the current thread.
|
/// The waker for the current thread.
|
||||||
@ -95,8 +101,17 @@ pub struct AttestationService<T: BeaconChainTypes> {
|
|||||||
subscribe_all_subnets: bool,
|
subscribe_all_subnets: bool,
|
||||||
|
|
||||||
/// For how many slots we subscribe to long lived subnets.
|
/// For how many slots we subscribe to long lived subnets.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
long_lived_subnet_subscription_slots: u64,
|
long_lived_subnet_subscription_slots: u64,
|
||||||
|
|
||||||
|
/// Our Discv5 node_id.
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
node_id: ethereum_types::U256,
|
||||||
|
|
||||||
|
/// Future used to manage subscribing and unsubscribing from long lived subnets.
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
next_long_lived_subscription_event: Pin<Box<tokio::time::Sleep>>,
|
||||||
|
|
||||||
/// The logger for the attestation service.
|
/// The logger for the attestation service.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
@ -104,6 +119,7 @@ pub struct AttestationService<T: BeaconChainTypes> {
|
|||||||
impl<T: BeaconChainTypes> AttestationService<T> {
|
impl<T: BeaconChainTypes> AttestationService<T> {
|
||||||
/* Public functions */
|
/* Public functions */
|
||||||
|
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
config: &NetworkConfig,
|
config: &NetworkConfig,
|
||||||
@ -145,31 +161,85 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
pub fn new(
|
||||||
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
|
node_id: ethereum_types::U256,
|
||||||
|
config: &NetworkConfig,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> Self {
|
||||||
|
let log = log.new(o!("service" => "attestation_service"));
|
||||||
|
|
||||||
|
// Calculate the random subnet duration from the spec constants.
|
||||||
|
let slot_duration = beacon_chain.slot_clock.slot_duration();
|
||||||
|
|
||||||
|
slog::info!(log, "Deterministic long lived subnets enabled"; "subnets_per_node" => beacon_chain.spec.subnets_per_node);
|
||||||
|
|
||||||
|
let track_validators = !config.import_all_attestations;
|
||||||
|
let aggregate_validators_on_subnet =
|
||||||
|
track_validators.then(|| HashSetDelay::new(slot_duration));
|
||||||
|
let mut service = AttestationService {
|
||||||
|
events: VecDeque::with_capacity(10),
|
||||||
|
beacon_chain,
|
||||||
|
short_lived_subscriptions: HashMapDelay::new(slot_duration),
|
||||||
|
long_lived_subscriptions: HashSet::default(),
|
||||||
|
scheduled_short_lived_subscriptions: HashSetDelay::default(),
|
||||||
|
aggregate_validators_on_subnet,
|
||||||
|
waker: None,
|
||||||
|
discovery_disabled: config.disable_discovery,
|
||||||
|
subscribe_all_subnets: config.subscribe_all_subnets,
|
||||||
|
node_id,
|
||||||
|
next_long_lived_subscription_event: {
|
||||||
|
// Set a dummy sleep. Calculating the current subnet subscriptions will update this
|
||||||
|
// value with a smarter timing
|
||||||
|
Box::pin(tokio::time::sleep(Duration::from_secs(1)))
|
||||||
|
},
|
||||||
|
log,
|
||||||
|
};
|
||||||
|
service.recompute_long_lived_subnets();
|
||||||
|
service
|
||||||
|
}
|
||||||
|
|
||||||
/// Return count of all currently subscribed subnets (long-lived **and** short-lived).
|
/// Return count of all currently subscribed subnets (long-lived **and** short-lived).
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub fn subscription_count(&self) -> usize {
|
pub fn subscription_count(&self) -> usize {
|
||||||
if self.subscribe_all_subnets {
|
if self.subscribe_all_subnets {
|
||||||
self.beacon_chain.spec.attestation_subnet_count as usize
|
self.beacon_chain.spec.attestation_subnet_count as usize
|
||||||
} else {
|
} else {
|
||||||
self.short_lived_subscriptions
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
let count = self
|
||||||
|
.short_lived_subscriptions
|
||||||
|
.keys()
|
||||||
|
.chain(self.long_lived_subscriptions.iter())
|
||||||
|
.collect::<HashSet<_>>()
|
||||||
|
.len();
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
|
let count = self
|
||||||
|
.short_lived_subscriptions
|
||||||
.keys()
|
.keys()
|
||||||
.chain(self.long_lived_subscriptions.keys())
|
.chain(self.long_lived_subscriptions.keys())
|
||||||
.collect::<HashSet<_>>()
|
.collect::<HashSet<_>>()
|
||||||
.len()
|
.len();
|
||||||
|
count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Give access to the current subscriptions for testing purposes.
|
/// Returns whether we are subscribed to a subnet for testing purposes.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) fn subscriptions(
|
pub(crate) fn is_subscribed(
|
||||||
&self,
|
&self,
|
||||||
|
subnet_id: &SubnetId,
|
||||||
subscription_kind: SubscriptionKind,
|
subscription_kind: SubscriptionKind,
|
||||||
) -> &HashMapDelay<SubnetId, Slot> {
|
) -> bool {
|
||||||
match subscription_kind {
|
match subscription_kind {
|
||||||
SubscriptionKind::LongLived => &self.long_lived_subscriptions,
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
SubscriptionKind::ShortLived => &self.short_lived_subscriptions,
|
SubscriptionKind::LongLived => self.long_lived_subscriptions.contains(subnet_id),
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
|
SubscriptionKind::LongLived => self.long_lived_subscriptions.contains_key(subnet_id),
|
||||||
|
SubscriptionKind::ShortLived => self.short_lived_subscriptions.contains_key(subnet_id),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Processes a list of validator subscriptions.
|
/// Processes a list of validator subscriptions.
|
||||||
///
|
///
|
||||||
/// This will:
|
/// This will:
|
||||||
@ -197,6 +267,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
"Validator subscription";
|
"Validator subscription";
|
||||||
"subscription" => ?subscription,
|
"subscription" => ?subscription,
|
||||||
);
|
);
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
self.add_known_validator(subscription.validator_index);
|
self.add_known_validator(subscription.validator_index);
|
||||||
|
|
||||||
let subnet_id = match SubnetId::compute_subnet::<T::EthSpec>(
|
let subnet_id = match SubnetId::compute_subnet::<T::EthSpec>(
|
||||||
@ -267,6 +338,111 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
fn recompute_long_lived_subnets(&mut self) {
|
||||||
|
// Ensure the next computation is scheduled even if assigning subnets fails.
|
||||||
|
let next_subscription_event = self
|
||||||
|
.recompute_long_lived_subnets_inner()
|
||||||
|
.unwrap_or_else(|_| self.beacon_chain.slot_clock.slot_duration());
|
||||||
|
|
||||||
|
debug!(self.log, "Recomputing deterministic long lived attnets");
|
||||||
|
self.next_long_lived_subscription_event =
|
||||||
|
Box::pin(tokio::time::sleep(next_subscription_event));
|
||||||
|
|
||||||
|
if let Some(waker) = self.waker.as_ref() {
|
||||||
|
waker.wake_by_ref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the long lived subnets the node should be subscribed to during the current epoch and
|
||||||
|
/// the remaining duration for which they remain valid.
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
fn recompute_long_lived_subnets_inner(&mut self) -> Result<Duration, ()> {
|
||||||
|
let current_epoch = self.beacon_chain.epoch().map_err(
|
||||||
|
|e| error!(self.log, "Failed to get the current epoch from clock"; "err" => ?e),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let (subnets, next_subscription_epoch) = SubnetId::compute_subnets_for_epoch::<T::EthSpec>(
|
||||||
|
self.node_id,
|
||||||
|
current_epoch,
|
||||||
|
&self.beacon_chain.spec,
|
||||||
|
)
|
||||||
|
.map_err(|e| error!(self.log, "Could not compute subnets for current epoch"; "err" => e))?;
|
||||||
|
|
||||||
|
let next_subscription_slot =
|
||||||
|
next_subscription_epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
let next_subscription_event = self
|
||||||
|
.beacon_chain
|
||||||
|
.slot_clock
|
||||||
|
.duration_to_slot(next_subscription_slot)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
error!(
|
||||||
|
self.log,
|
||||||
|
"Failed to compute duration to next to long lived subscription event"
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
self.update_long_lived_subnets(subnets.collect());
|
||||||
|
|
||||||
|
Ok(next_subscription_event)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "deterministic_long_lived_attnets"))]
|
||||||
|
pub fn update_long_lived_subnets_testing(&mut self, subnets: HashSet<SubnetId>) {
|
||||||
|
self.update_long_lived_subnets(subnets)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the long lived subnets.
|
||||||
|
///
|
||||||
|
/// New subnets are registered as subscribed, removed subnets as unsubscribed and the Enr
|
||||||
|
/// updated accordingly.
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
fn update_long_lived_subnets(&mut self, mut subnets: HashSet<SubnetId>) {
|
||||||
|
for subnet in &subnets {
|
||||||
|
// Add the events for those subnets that are new as long lived subscriptions.
|
||||||
|
if !self.long_lived_subscriptions.contains(subnet) {
|
||||||
|
// Check if this subnet is new and send the subscription event if needed.
|
||||||
|
if !self.short_lived_subscriptions.contains_key(subnet) {
|
||||||
|
debug!(self.log, "Subscribing to subnet";
|
||||||
|
"subnet" => ?subnet,
|
||||||
|
"subscription_kind" => ?SubscriptionKind::LongLived,
|
||||||
|
);
|
||||||
|
self.queue_event(SubnetServiceMessage::Subscribe(Subnet::Attestation(
|
||||||
|
*subnet,
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
self.queue_event(SubnetServiceMessage::EnrAdd(Subnet::Attestation(*subnet)));
|
||||||
|
if !self.discovery_disabled {
|
||||||
|
self.queue_event(SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery {
|
||||||
|
subnet: Subnet::Attestation(*subnet),
|
||||||
|
min_ttl: None,
|
||||||
|
}]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for subnets that are being removed
|
||||||
|
std::mem::swap(&mut self.long_lived_subscriptions, &mut subnets);
|
||||||
|
for subnet in subnets {
|
||||||
|
if !self.long_lived_subscriptions.contains(&subnet) {
|
||||||
|
if !self.short_lived_subscriptions.contains_key(&subnet) {
|
||||||
|
debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet, "subscription_kind" => ?SubscriptionKind::LongLived);
|
||||||
|
self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation(
|
||||||
|
subnet,
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.queue_event(SubnetServiceMessage::EnrRemove(Subnet::Attestation(subnet)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Overwrites the long lived subscriptions for testing.
|
||||||
|
#[cfg(all(test, feature = "deterministic_long_lived_attnets"))]
|
||||||
|
pub fn set_long_lived_subscriptions(&mut self, subnets: HashSet<SubnetId>) {
|
||||||
|
self.long_lived_subscriptions = subnets
|
||||||
|
}
|
||||||
|
|
||||||
/// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip
|
/// Checks if we have subscribed aggregate validators for the subnet. If not, checks the gossip
|
||||||
/// verification, re-propagates and returns false.
|
/// verification, re-propagates and returns false.
|
||||||
pub fn should_process_attestation(
|
pub fn should_process_attestation(
|
||||||
@ -377,6 +553,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
// This is a current or past slot, we subscribe immediately.
|
// This is a current or past slot, we subscribe immediately.
|
||||||
self.subscribe_to_subnet_immediately(
|
self.subscribe_to_subnet_immediately(
|
||||||
subnet_id,
|
subnet_id,
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
SubscriptionKind::ShortLived,
|
SubscriptionKind::ShortLived,
|
||||||
slot + 1,
|
slot + 1,
|
||||||
)?;
|
)?;
|
||||||
@ -391,6 +568,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Updates the `known_validators` mapping and subscribes to long lived subnets if required.
|
/// Updates the `known_validators` mapping and subscribes to long lived subnets if required.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
fn add_known_validator(&mut self, validator_index: u64) {
|
fn add_known_validator(&mut self, validator_index: u64) {
|
||||||
let previously_known = self.known_validators.contains_key(&validator_index);
|
let previously_known = self.known_validators.contains_key(&validator_index);
|
||||||
// Add the new validator or update the current timeout for a known validator.
|
// Add the new validator or update the current timeout for a known validator.
|
||||||
@ -405,6 +583,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
/// Subscribe to long-lived random subnets and update the local ENR bitfield.
|
/// Subscribe to long-lived random subnets and update the local ENR bitfield.
|
||||||
/// The number of subnets to subscribe depends on the number of active validators and number of
|
/// The number of subnets to subscribe depends on the number of active validators and number of
|
||||||
/// current subscriptions.
|
/// current subscriptions.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
fn subscribe_to_random_subnets(&mut self) {
|
fn subscribe_to_random_subnets(&mut self) {
|
||||||
if self.subscribe_all_subnets {
|
if self.subscribe_all_subnets {
|
||||||
// This case is not handled by this service.
|
// This case is not handled by this service.
|
||||||
@ -468,9 +647,12 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
/// Checks that the time in which the subscription would end is not in the past. If we are
|
/// Checks that the time in which the subscription would end is not in the past. If we are
|
||||||
/// already subscribed, extends the timeout if necessary. If this is a new subscription, we send
|
/// already subscribed, extends the timeout if necessary. If this is a new subscription, we send
|
||||||
/// out the appropriate events.
|
/// out the appropriate events.
|
||||||
|
///
|
||||||
|
/// On determinist long lived subnets, this is only used for short lived subscriptions.
|
||||||
fn subscribe_to_subnet_immediately(
|
fn subscribe_to_subnet_immediately(
|
||||||
&mut self,
|
&mut self,
|
||||||
subnet_id: SubnetId,
|
subnet_id: SubnetId,
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
subscription_kind: SubscriptionKind,
|
subscription_kind: SubscriptionKind,
|
||||||
end_slot: Slot,
|
end_slot: Slot,
|
||||||
) -> Result<(), &'static str> {
|
) -> Result<(), &'static str> {
|
||||||
@ -490,9 +672,13 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
return Err("Time when subscription would end has already passed.");
|
return Err("Time when subscription would end has already passed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
let subscription_kind = SubscriptionKind::ShortLived;
|
||||||
|
|
||||||
// We need to check and add a subscription for the right kind, regardless of the presence
|
// We need to check and add a subscription for the right kind, regardless of the presence
|
||||||
// of the subnet as a subscription of the other kind. This is mainly since long lived
|
// of the subnet as a subscription of the other kind. This is mainly since long lived
|
||||||
// subscriptions can be removed at any time when a validator goes offline.
|
// subscriptions can be removed at any time when a validator goes offline.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
let (subscriptions, already_subscribed_as_other_kind) = match subscription_kind {
|
let (subscriptions, already_subscribed_as_other_kind) = match subscription_kind {
|
||||||
SubscriptionKind::ShortLived => (
|
SubscriptionKind::ShortLived => (
|
||||||
&mut self.short_lived_subscriptions,
|
&mut self.short_lived_subscriptions,
|
||||||
@ -504,6 +690,12 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
let (subscriptions, already_subscribed_as_other_kind) = (
|
||||||
|
&mut self.short_lived_subscriptions,
|
||||||
|
self.long_lived_subscriptions.contains(&subnet_id),
|
||||||
|
);
|
||||||
|
|
||||||
match subscriptions.get(&subnet_id) {
|
match subscriptions.get(&subnet_id) {
|
||||||
Some(current_end_slot) => {
|
Some(current_end_slot) => {
|
||||||
// We are already subscribed. Check if we need to extend the subscription.
|
// We are already subscribed. Check if we need to extend the subscription.
|
||||||
@ -535,6 +727,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If this is a new long lived subscription, send out the appropriate events.
|
// If this is a new long lived subscription, send out the appropriate events.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
if SubscriptionKind::LongLived == subscription_kind {
|
if SubscriptionKind::LongLived == subscription_kind {
|
||||||
let subnet = Subnet::Attestation(subnet_id);
|
let subnet = Subnet::Attestation(subnet_id);
|
||||||
// Advertise this subnet in our ENR.
|
// Advertise this subnet in our ENR.
|
||||||
@ -564,6 +757,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
///
|
///
|
||||||
/// This function selects a new subnet to join, or extends the expiry if there are no more
|
/// This function selects a new subnet to join, or extends the expiry if there are no more
|
||||||
/// available subnets to choose from.
|
/// available subnets to choose from.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) {
|
fn handle_random_subnet_expiry(&mut self, subnet_id: SubnetId) {
|
||||||
self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived);
|
self.handle_removed_subnet(subnet_id, SubscriptionKind::LongLived);
|
||||||
|
|
||||||
@ -576,12 +770,15 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
// subscription of the other kind. For long lived subscriptions, it also removes the
|
// subscription of the other kind. For long lived subscriptions, it also removes the
|
||||||
// advertisement from our ENR.
|
// advertisement from our ENR.
|
||||||
fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) {
|
fn handle_removed_subnet(&mut self, subnet_id: SubnetId, subscription_kind: SubscriptionKind) {
|
||||||
let other_subscriptions = match subscription_kind {
|
let exists_in_other_subscriptions = match subscription_kind {
|
||||||
SubscriptionKind::LongLived => &self.short_lived_subscriptions,
|
SubscriptionKind::LongLived => self.short_lived_subscriptions.contains_key(&subnet_id),
|
||||||
SubscriptionKind::ShortLived => &self.long_lived_subscriptions,
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains(&subnet_id),
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
|
SubscriptionKind::ShortLived => self.long_lived_subscriptions.contains_key(&subnet_id),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !other_subscriptions.contains_key(&subnet_id) {
|
if !exists_in_other_subscriptions {
|
||||||
// Subscription no longer exists as short lived or long lived.
|
// Subscription no longer exists as short lived or long lived.
|
||||||
debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind);
|
debug!(self.log, "Unsubscribing from subnet"; "subnet" => ?subnet_id, "subscription_kind" => ?subscription_kind);
|
||||||
self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation(
|
self.queue_event(SubnetServiceMessage::Unsubscribe(Subnet::Attestation(
|
||||||
@ -603,6 +800,7 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
/// We don't keep track of a specific validator to random subnet, rather the ratio of active
|
/// We don't keep track of a specific validator to random subnet, rather the ratio of active
|
||||||
/// validators to random subnets. So when a validator goes offline, we can simply remove the
|
/// validators to random subnets. So when a validator goes offline, we can simply remove the
|
||||||
/// allocated amount of random subnets.
|
/// allocated amount of random subnets.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
fn handle_known_validator_expiry(&mut self) {
|
fn handle_known_validator_expiry(&mut self) {
|
||||||
// Calculate how many subnets should we remove.
|
// Calculate how many subnets should we remove.
|
||||||
let extra_subnet_count = {
|
let extra_subnet_count = {
|
||||||
@ -659,6 +857,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
|||||||
|
|
||||||
// Process first any known validator expiries, since these affect how many long lived
|
// Process first any known validator expiries, since these affect how many long lived
|
||||||
// subnets we need.
|
// subnets we need.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
match self.known_validators.poll_next_unpin(cx) {
|
match self.known_validators.poll_next_unpin(cx) {
|
||||||
Poll::Ready(Some(Ok(_validator_index))) => {
|
Poll::Ready(Some(Ok(_validator_index))) => {
|
||||||
self.handle_known_validator_expiry();
|
self.handle_known_validator_expiry();
|
||||||
@ -669,12 +868,19 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
|||||||
Poll::Ready(None) | Poll::Pending => {}
|
Poll::Ready(None) | Poll::Pending => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
match self.next_long_lived_subscription_event.as_mut().poll(cx) {
|
||||||
|
Poll::Ready(_) => self.recompute_long_lived_subnets(),
|
||||||
|
Poll::Pending => {}
|
||||||
|
}
|
||||||
|
|
||||||
// Process scheduled subscriptions that might be ready, since those can extend a soon to
|
// Process scheduled subscriptions that might be ready, since those can extend a soon to
|
||||||
// expire subscription.
|
// expire subscription.
|
||||||
match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) {
|
match self.scheduled_short_lived_subscriptions.poll_next_unpin(cx) {
|
||||||
Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => {
|
Poll::Ready(Some(Ok(ExactSubnet { subnet_id, slot }))) => {
|
||||||
if let Err(e) = self.subscribe_to_subnet_immediately(
|
if let Err(e) = self.subscribe_to_subnet_immediately(
|
||||||
subnet_id,
|
subnet_id,
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
SubscriptionKind::ShortLived,
|
SubscriptionKind::ShortLived,
|
||||||
slot + 1,
|
slot + 1,
|
||||||
) {
|
) {
|
||||||
@ -699,6 +905,7 @@ impl<T: BeaconChainTypes> Stream for AttestationService<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process any random subnet expiries.
|
// Process any random subnet expiries.
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
match self.long_lived_subscriptions.poll_next_unpin(cx) {
|
match self.long_lived_subscriptions.poll_next_unpin(cx) {
|
||||||
Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => {
|
Poll::Ready(Some(Ok((subnet_id, _end_slot)))) => {
|
||||||
self.handle_random_subnet_expiry(subnet_id)
|
self.handle_random_subnet_expiry(subnet_id)
|
||||||
|
@ -123,7 +123,15 @@ fn get_attestation_service(
|
|||||||
|
|
||||||
let beacon_chain = CHAIN.chain.clone();
|
let beacon_chain = CHAIN.chain.clone();
|
||||||
|
|
||||||
AttestationService::new(beacon_chain, &config, &log)
|
AttestationService::new(
|
||||||
|
beacon_chain,
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
lighthouse_network::discv5::enr::NodeId::random()
|
||||||
|
.raw()
|
||||||
|
.into(),
|
||||||
|
&config,
|
||||||
|
&log,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_sync_committee_service() -> SyncCommitteeService<TestBeaconChainType> {
|
fn get_sync_committee_service() -> SyncCommitteeService<TestBeaconChainType> {
|
||||||
@ -170,6 +178,9 @@ async fn get_events<S: Stream<Item = SubnetServiceMessage> + Unpin>(
|
|||||||
|
|
||||||
mod attestation_service {
|
mod attestation_service {
|
||||||
|
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD;
|
use crate::subnet_service::attestation_subnets::MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD;
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@ -190,6 +201,7 @@ mod attestation_service {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
fn get_subscriptions(
|
fn get_subscriptions(
|
||||||
validator_count: u64,
|
validator_count: u64,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
@ -268,8 +280,7 @@ mod attestation_service {
|
|||||||
// If the long lived and short lived subnets are the same, there should be no more events
|
// If the long lived and short lived subnets are the same, there should be no more events
|
||||||
// as we don't resubscribe already subscribed subnets.
|
// as we don't resubscribe already subscribed subnets.
|
||||||
if !attestation_service
|
if !attestation_service
|
||||||
.subscriptions(attestation_subnets::SubscriptionKind::LongLived)
|
.is_subscribed(&subnet_id, attestation_subnets::SubscriptionKind::LongLived)
|
||||||
.contains_key(&subnet_id)
|
|
||||||
{
|
{
|
||||||
assert_eq!(expected[..], events[3..]);
|
assert_eq!(expected[..], events[3..]);
|
||||||
}
|
}
|
||||||
@ -352,11 +363,12 @@ mod attestation_service {
|
|||||||
|
|
||||||
let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1));
|
let expected = SubnetServiceMessage::Subscribe(Subnet::Attestation(subnet_id1));
|
||||||
|
|
||||||
// Should be still subscribed to 1 long lived and 1 short lived subnet if both are different.
|
// Should be still subscribed to 1 long lived and 1 short lived subnet if both are
|
||||||
if !attestation_service
|
// different.
|
||||||
.subscriptions(attestation_subnets::SubscriptionKind::LongLived)
|
if !attestation_service.is_subscribed(
|
||||||
.contains_key(&subnet_id1)
|
&subnet_id1,
|
||||||
{
|
attestation_subnets::SubscriptionKind::LongLived,
|
||||||
|
) {
|
||||||
assert_eq!(expected, events[3]);
|
assert_eq!(expected, events[3]);
|
||||||
assert_eq!(attestation_service.subscription_count(), 2);
|
assert_eq!(attestation_service.subscription_count(), 2);
|
||||||
} else {
|
} else {
|
||||||
@ -366,11 +378,12 @@ mod attestation_service {
|
|||||||
// Get event for 1 more slot duration, we should get the unsubscribe event now.
|
// Get event for 1 more slot duration, we should get the unsubscribe event now.
|
||||||
let unsubscribe_event = get_events(&mut attestation_service, None, 1).await;
|
let unsubscribe_event = get_events(&mut attestation_service, None, 1).await;
|
||||||
|
|
||||||
// If the long lived and short lived subnets are different, we should get an unsubscription event.
|
// If the long lived and short lived subnets are different, we should get an unsubscription
|
||||||
if !attestation_service
|
// event.
|
||||||
.subscriptions(attestation_subnets::SubscriptionKind::LongLived)
|
if !attestation_service.is_subscribed(
|
||||||
.contains_key(&subnet_id1)
|
&subnet_id1,
|
||||||
{
|
attestation_subnets::SubscriptionKind::LongLived,
|
||||||
|
) {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
[SubnetServiceMessage::Unsubscribe(Subnet::Attestation(
|
[SubnetServiceMessage::Unsubscribe(Subnet::Attestation(
|
||||||
subnet_id1
|
subnet_id1
|
||||||
@ -383,6 +396,7 @@ mod attestation_service {
|
|||||||
assert_eq!(attestation_service.subscription_count(), 1);
|
assert_eq!(attestation_service.subscription_count(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn subscribe_all_random_subnets() {
|
async fn subscribe_all_random_subnets() {
|
||||||
let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count;
|
let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count;
|
||||||
@ -440,6 +454,7 @@ mod attestation_service {
|
|||||||
// test completed successfully
|
// test completed successfully
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "deterministic_long_lived_attnets"))]
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn subscribe_all_random_subnets_plus_one() {
|
async fn subscribe_all_random_subnets_plus_one() {
|
||||||
let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count;
|
let attestation_subnet_count = MainnetEthSpec::default_spec().attestation_subnet_count;
|
||||||
@ -573,10 +588,10 @@ mod attestation_service {
|
|||||||
let expected_unsubscription =
|
let expected_unsubscription =
|
||||||
SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1));
|
SubnetServiceMessage::Unsubscribe(Subnet::Attestation(subnet_id1));
|
||||||
|
|
||||||
if !attestation_service
|
if !attestation_service.is_subscribed(
|
||||||
.subscriptions(attestation_subnets::SubscriptionKind::LongLived)
|
&subnet_id1,
|
||||||
.contains_key(&subnet_id1)
|
attestation_subnets::SubscriptionKind::LongLived,
|
||||||
{
|
) {
|
||||||
assert_eq!(expected_subscription, events[3]);
|
assert_eq!(expected_subscription, events[3]);
|
||||||
// fourth is a discovery event
|
// fourth is a discovery event
|
||||||
assert_eq!(expected_unsubscription, events[5]);
|
assert_eq!(expected_unsubscription, events[5]);
|
||||||
@ -600,10 +615,10 @@ mod attestation_service {
|
|||||||
|
|
||||||
let second_subscribe_event = get_events(&mut attestation_service, None, 2).await;
|
let second_subscribe_event = get_events(&mut attestation_service, None, 2).await;
|
||||||
// If the long lived and short lived subnets are different, we should get an unsubscription event.
|
// If the long lived and short lived subnets are different, we should get an unsubscription event.
|
||||||
if !attestation_service
|
if !attestation_service.is_subscribed(
|
||||||
.subscriptions(attestation_subnets::SubscriptionKind::LongLived)
|
&subnet_id1,
|
||||||
.contains_key(&subnet_id1)
|
attestation_subnets::SubscriptionKind::LongLived,
|
||||||
{
|
) {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
[SubnetServiceMessage::Subscribe(Subnet::Attestation(
|
[SubnetServiceMessage::Subscribe(Subnet::Attestation(
|
||||||
subnet_id1
|
subnet_id1
|
||||||
@ -612,6 +627,43 @@ mod attestation_service {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[cfg(feature = "deterministic_long_lived_attnets")]
|
||||||
|
async fn test_update_deterministic_long_lived_subnets() {
|
||||||
|
let mut attestation_service = get_attestation_service(None);
|
||||||
|
let new_subnet = SubnetId::new(1);
|
||||||
|
let maintained_subnet = SubnetId::new(2);
|
||||||
|
let removed_subnet = SubnetId::new(3);
|
||||||
|
|
||||||
|
attestation_service
|
||||||
|
.set_long_lived_subscriptions(HashSet::from([removed_subnet, maintained_subnet]));
|
||||||
|
// clear initial events
|
||||||
|
let _events = get_events(&mut attestation_service, None, 1).await;
|
||||||
|
|
||||||
|
attestation_service
|
||||||
|
.update_long_lived_subnets_testing(HashSet::from([maintained_subnet, new_subnet]));
|
||||||
|
|
||||||
|
let events = get_events(&mut attestation_service, None, 1).await;
|
||||||
|
let new_subnet = Subnet::Attestation(new_subnet);
|
||||||
|
let removed_subnet = Subnet::Attestation(removed_subnet);
|
||||||
|
assert_eq!(
|
||||||
|
events,
|
||||||
|
[
|
||||||
|
// events for the new subnet
|
||||||
|
SubnetServiceMessage::Subscribe(new_subnet),
|
||||||
|
SubnetServiceMessage::EnrAdd(new_subnet),
|
||||||
|
SubnetServiceMessage::DiscoverPeers(vec![SubnetDiscovery {
|
||||||
|
subnet: new_subnet,
|
||||||
|
min_ttl: None
|
||||||
|
}]),
|
||||||
|
// events for the removed subnet
|
||||||
|
SubnetServiceMessage::Unsubscribe(removed_subnet),
|
||||||
|
SubnetServiceMessage::EnrRemove(removed_subnet),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
println!("{events:?}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod sync_committee_service {
|
mod sync_committee_service {
|
||||||
|
@ -372,9 +372,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.long("eth1-endpoints")
|
.long("eth1-endpoints")
|
||||||
.value_name("HTTP-ENDPOINTS")
|
.value_name("HTTP-ENDPOINTS")
|
||||||
.conflicts_with("eth1-endpoint")
|
.conflicts_with("eth1-endpoint")
|
||||||
.help("One or more comma-delimited server endpoints for web3 connection. \
|
.help("One http endpoint for a web3 connection to an execution node. \
|
||||||
If multiple endpoints are given the endpoints are used as fallback in the \
|
Note: This flag is now only useful for testing, use `--execution-endpoint` \
|
||||||
given order. Also enables the --eth1 flag. \
|
flag to connect to an execution node on mainnet and testnets.
|
||||||
Defaults to http://127.0.0.1:8545.")
|
Defaults to http://127.0.0.1:8545.")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
@ -440,7 +440,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
JSON-RPC connection. Uses the same endpoint to populate the \
|
JSON-RPC connection. Uses the same endpoint to populate the \
|
||||||
deposit cache.")
|
deposit cache.")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.requires("execution-jwt")
|
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("execution-jwt")
|
Arg::with_name("execution-jwt")
|
||||||
@ -452,6 +451,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.requires("execution-endpoint")
|
.requires("execution-endpoint")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("execution-jwt-secret-key")
|
||||||
|
.long("execution-jwt-secret-key")
|
||||||
|
.value_name("EXECUTION-JWT-SECRET-KEY")
|
||||||
|
.alias("jwt-secret-key")
|
||||||
|
.help("Hex-encoded JWT secret for the \
|
||||||
|
execution endpoint provided in the --execution-endpoint flag.")
|
||||||
|
.requires("execution-endpoint")
|
||||||
|
.conflicts_with("execution-jwt")
|
||||||
|
.takes_value(true)
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("execution-jwt-id")
|
Arg::with_name("execution-jwt-id")
|
||||||
.long("execution-jwt-id")
|
.long("execution-jwt-id")
|
||||||
@ -493,7 +503,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.requires("execution-endpoint")
|
.requires("execution-endpoint")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("execution-timeout-multiplier")
|
||||||
|
.long("execution-timeout-multiplier")
|
||||||
|
.value_name("NUM")
|
||||||
|
.help("Unsigned integer to multiply the default execution timeouts by.")
|
||||||
|
.default_value("1")
|
||||||
|
.takes_value(true)
|
||||||
|
)
|
||||||
/*
|
/*
|
||||||
* Database purging and compaction.
|
* Database purging and compaction.
|
||||||
*/
|
*/
|
||||||
@ -835,4 +852,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
failure caused by the execution layer.")
|
failure caused by the execution layer.")
|
||||||
.takes_value(false)
|
.takes_value(false)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("disable-deposit-contract-sync")
|
||||||
|
.long("disable-deposit-contract-sync")
|
||||||
|
.help("Explictly disables syncing of deposit logs from the execution node. \
|
||||||
|
This overrides any previous option that depends on it. \
|
||||||
|
Useful if you intend to run a non-validating beacon node.")
|
||||||
|
.takes_value(false)
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG;
|
|||||||
use client::{ClientConfig, ClientGenesis};
|
use client::{ClientConfig, ClientGenesis};
|
||||||
use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR};
|
use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR};
|
||||||
use environment::RuntimeContext;
|
use environment::RuntimeContext;
|
||||||
|
use execution_layer::DEFAULT_JWT_FILE;
|
||||||
use genesis::Eth1Endpoint;
|
use genesis::Eth1Endpoint;
|
||||||
use http_api::TlsConfig;
|
use http_api::TlsConfig;
|
||||||
use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized};
|
use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized};
|
||||||
@ -230,17 +231,14 @@ pub fn get_config<E: EthSpec>(
|
|||||||
);
|
);
|
||||||
client_config.sync_eth1_chain = true;
|
client_config.sync_eth1_chain = true;
|
||||||
|
|
||||||
let endpoints = vec![SensitiveUrl::parse(endpoint)
|
let endpoint = SensitiveUrl::parse(endpoint)
|
||||||
.map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?];
|
.map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?;
|
||||||
client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints);
|
client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint);
|
||||||
} else if let Some(endpoints) = cli_args.value_of("eth1-endpoints") {
|
} else if let Some(endpoint) = cli_args.value_of("eth1-endpoints") {
|
||||||
client_config.sync_eth1_chain = true;
|
client_config.sync_eth1_chain = true;
|
||||||
let endpoints = endpoints
|
let endpoint = SensitiveUrl::parse(endpoint)
|
||||||
.split(',')
|
|
||||||
.map(SensitiveUrl::parse)
|
|
||||||
.collect::<Result<_, _>>()
|
|
||||||
.map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?;
|
.map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?;
|
||||||
client_config.eth1.endpoints = Eth1Endpoint::NoAuth(endpoints);
|
client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") {
|
if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") {
|
||||||
@ -291,12 +289,34 @@ pub fn get_config<E: EthSpec>(
|
|||||||
let execution_endpoint =
|
let execution_endpoint =
|
||||||
parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?;
|
parse_only_one_value(endpoints, SensitiveUrl::parse, "--execution-endpoint", log)?;
|
||||||
|
|
||||||
// Parse a single JWT secret, logging warnings if multiple are supplied.
|
// JWTs are required if `--execution-endpoint` is supplied. They can be either passed via
|
||||||
//
|
// file_path or directly as string.
|
||||||
// JWTs are required if `--execution-endpoint` is supplied.
|
|
||||||
let secret_files: String = clap_utils::parse_required(cli_args, "execution-jwt")?;
|
let secret_file: PathBuf;
|
||||||
let secret_file =
|
// Parse a single JWT secret from a given file_path, logging warnings if multiple are supplied.
|
||||||
parse_only_one_value(&secret_files, PathBuf::from_str, "--execution-jwt", log)?;
|
if let Some(secret_files) = cli_args.value_of("execution-jwt") {
|
||||||
|
secret_file =
|
||||||
|
parse_only_one_value(secret_files, PathBuf::from_str, "--execution-jwt", log)?;
|
||||||
|
|
||||||
|
// Check if the JWT secret key is passed directly via cli flag and persist it to the default
|
||||||
|
// file location.
|
||||||
|
} else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") {
|
||||||
|
use std::fs::File;
|
||||||
|
use std::io::Write;
|
||||||
|
secret_file = client_config.data_dir.join(DEFAULT_JWT_FILE);
|
||||||
|
let mut jwt_secret_key_file = File::create(secret_file.clone())
|
||||||
|
.map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?;
|
||||||
|
jwt_secret_key_file
|
||||||
|
.write_all(jwt_secret_key.as_bytes())
|
||||||
|
.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Error occured while writing to jwt_secret_key file: {:?}",
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
} else {
|
||||||
|
return Err("Error! Please set either --execution-jwt file_path or --execution-jwt-secret-key directly via cli when using --execution-endpoint".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
// Parse and set the payload builder, if any.
|
// Parse and set the payload builder, if any.
|
||||||
if let Some(endpoint) = cli_args.value_of("builder") {
|
if let Some(endpoint) = cli_args.value_of("builder") {
|
||||||
@ -315,6 +335,9 @@ pub fn get_config<E: EthSpec>(
|
|||||||
el_config.default_datadir = client_config.data_dir.clone();
|
el_config.default_datadir = client_config.data_dir.clone();
|
||||||
el_config.builder_profit_threshold =
|
el_config.builder_profit_threshold =
|
||||||
clap_utils::parse_required(cli_args, "builder-profit-threshold")?;
|
clap_utils::parse_required(cli_args, "builder-profit-threshold")?;
|
||||||
|
let execution_timeout_multiplier =
|
||||||
|
clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?;
|
||||||
|
el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier);
|
||||||
|
|
||||||
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
|
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
|
||||||
// use `--execution-endpoint` instead. Also, log a deprecation warning.
|
// use `--execution-endpoint` instead. Also, log a deprecation warning.
|
||||||
@ -326,7 +349,7 @@ pub fn get_config<E: EthSpec>(
|
|||||||
--eth1-endpoints has been deprecated for post-merge configurations"
|
--eth1-endpoints has been deprecated for post-merge configurations"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
client_config.eth1.endpoints = Eth1Endpoint::Auth {
|
client_config.eth1.endpoint = Eth1Endpoint::Auth {
|
||||||
endpoint: execution_endpoint,
|
endpoint: execution_endpoint,
|
||||||
jwt_path: secret_file,
|
jwt_path: secret_file,
|
||||||
jwt_id: el_config.jwt_id.clone(),
|
jwt_id: el_config.jwt_id.clone(),
|
||||||
@ -645,6 +668,11 @@ pub fn get_config<E: EthSpec>(
|
|||||||
client_config.chain.enable_lock_timeouts = false;
|
client_config.chain.enable_lock_timeouts = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: This overrides any previous flags that enable this option.
|
||||||
|
if cli_args.is_present("disable-deposit-contract-sync") {
|
||||||
|
client_config.sync_eth1_chain = false;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(timeout) =
|
if let Some(timeout) =
|
||||||
clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")?
|
clap_utils::parse_optional(cli_args, "fork-choice-before-proposal-timeout")?
|
||||||
{
|
{
|
||||||
|
@ -117,7 +117,7 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
|||||||
info!(
|
info!(
|
||||||
log,
|
log,
|
||||||
"Block production enabled";
|
"Block production enabled";
|
||||||
"endpoints" => format!("{:?}", &client_config.eth1.endpoints),
|
"endpoint" => format!("{:?}", &client_config.eth1.endpoint),
|
||||||
"method" => "json rpc via http"
|
"method" => "json rpc via http"
|
||||||
);
|
);
|
||||||
builder
|
builder
|
||||||
|
@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
|
|||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use types::{Checkpoint, Hash256, Slot};
|
use types::{Checkpoint, Hash256, Slot};
|
||||||
|
|
||||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(12);
|
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13);
|
||||||
|
|
||||||
// All the keys that get stored under the `BeaconMeta` column.
|
// All the keys that get stored under the `BeaconMeta` column.
|
||||||
//
|
//
|
||||||
|
@ -4,7 +4,8 @@ use crate::{Error, ItemStore, KeyValueStore};
|
|||||||
use itertools::{process_results, Itertools};
|
use itertools::{process_results, Itertools};
|
||||||
use slog::info;
|
use slog::info;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
per_block_processing, per_slot_processing, BlockSignatureStrategy, VerifyBlockRoot,
|
per_block_processing, per_slot_processing, BlockSignatureStrategy, ConsensusContext,
|
||||||
|
VerifyBlockRoot,
|
||||||
};
|
};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::{EthSpec, Hash256};
|
use types::{EthSpec, Hash256};
|
||||||
@ -87,12 +88,16 @@ where
|
|||||||
|
|
||||||
// Apply block.
|
// Apply block.
|
||||||
if let Some(block) = block {
|
if let Some(block) = block {
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot())
|
||||||
|
.set_current_block_root(block_root)
|
||||||
|
.set_proposer_index(block.message().proposer_index());
|
||||||
|
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&block,
|
&block,
|
||||||
Some(block_root),
|
|
||||||
BlockSignatureStrategy::NoVerification,
|
BlockSignatureStrategy::NoVerification,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&self.spec,
|
&self.spec,
|
||||||
)
|
)
|
||||||
.map_err(HotColdDBError::BlockReplayBlockError)?;
|
.map_err(HotColdDBError::BlockReplayBlockError)?;
|
||||||
|
@ -59,7 +59,7 @@ TCP and UDP ports (9000 by default).
|
|||||||
Lighthouse has a number of CLI parameters for constructing and modifying the
|
Lighthouse has a number of CLI parameters for constructing and modifying the
|
||||||
local Ethereum Node Record (ENR). Examples are `--enr-address`,
|
local Ethereum Node Record (ENR). Examples are `--enr-address`,
|
||||||
`--enr-udp-port`, `--enr-tcp-port` and `--disable-enr-auto-update`. These
|
`--enr-udp-port`, `--enr-tcp-port` and `--disable-enr-auto-update`. These
|
||||||
settings allow you construct your initial ENR. Their primary intention is for
|
settings allow you to construct your initial ENR. Their primary intention is for
|
||||||
setting up boot-like nodes and having a contactable ENR on boot. On normal
|
setting up boot-like nodes and having a contactable ENR on boot. On normal
|
||||||
operation of a Lighthouse node, none of these flags need to be set. Setting
|
operation of a Lighthouse node, none of these flags need to be set. Setting
|
||||||
these flags incorrectly can lead to your node being incorrectly added to the
|
these flags incorrectly can lead to your node being incorrectly added to the
|
||||||
|
@ -188,7 +188,7 @@ with the builder network:
|
|||||||
INFO Published validator registrations to the builder network
|
INFO Published validator registrations to the builder network
|
||||||
```
|
```
|
||||||
|
|
||||||
When you succesfully propose a block using a builder, you will see this log on the beacon node:
|
When you successfully propose a block using a builder, you will see this log on the beacon node:
|
||||||
|
|
||||||
```
|
```
|
||||||
INFO Successfully published a block to the builder network
|
INFO Successfully published a block to the builder network
|
||||||
|
@ -103,7 +103,7 @@ opt-in). Instead, we assert that since the withdrawal keys can be regenerated
|
|||||||
from a mnemonic, having them lying around on the file-system only presents risk
|
from a mnemonic, having them lying around on the file-system only presents risk
|
||||||
and complexity.
|
and complexity.
|
||||||
|
|
||||||
At the time or writing, we do not expose the commands to regenerate keys from
|
At the time of writing, we do not expose the commands to regenerate keys from
|
||||||
mnemonics. However, key regeneration is tested on the public Lighthouse
|
mnemonics. However, key regeneration is tested on the public Lighthouse
|
||||||
repository and will be exposed prior to mainnet launch.
|
repository and will be exposed prior to mainnet launch.
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
This document provides detail for users who want to run a merge-ready Lighthouse node.
|
This document provides detail for users who want to run a merge-ready Lighthouse node.
|
||||||
|
|
||||||
> The merge is occuring on mainnet in September. You _must_ have a merge-ready setup by September 6
|
> The merge is occurring on mainnet in September. You _must_ have a merge-ready setup by September 6
|
||||||
> 2022.
|
> 2022.
|
||||||
|
|
||||||
## Necessary Configuration
|
## Necessary Configuration
|
||||||
@ -48,6 +48,10 @@ If you set up an execution engine with `--execution-endpoint` then you *must* pr
|
|||||||
using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse
|
using `--execution-jwt`. This is a mandatory form of authentication that ensures that Lighthouse
|
||||||
has authority to control the execution engine.
|
has authority to control the execution engine.
|
||||||
|
|
||||||
|
> Tip: the --execution-jwt-secret-key <STRING> flag can be used instead of --execution-jwt <FILE>.
|
||||||
|
> This is useful, for example, for users who wish to inject the value into a Docker container without
|
||||||
|
> needing to pass a jwt secret file.
|
||||||
|
|
||||||
The execution engine connection must be **exclusive**, i.e. you must have one execution node
|
The execution engine connection must be **exclusive**, i.e. you must have one execution node
|
||||||
per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please
|
per beacon node. The reason for this is that the beacon node _controls_ the execution node. Please
|
||||||
see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not
|
see the [FAQ](#faq) for further information about why many:1 and 1:many configurations are not
|
||||||
|
@ -11,7 +11,7 @@ backed up, all validator keys can be trivially re-generated.
|
|||||||
|
|
||||||
The 24-word string is randomly generated during wallet creation and printed out
|
The 24-word string is randomly generated during wallet creation and printed out
|
||||||
to the terminal. It's important to **make one or more backups of the mnemonic**
|
to the terminal. It's important to **make one or more backups of the mnemonic**
|
||||||
to ensure your ETH is not lost in the case of data loss. It very important to
|
to ensure your ETH is not lost in the case of data loss. It is very important to
|
||||||
**keep your mnemonic private** as it represents the ultimate control of your
|
**keep your mnemonic private** as it represents the ultimate control of your
|
||||||
ETH.
|
ETH.
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "boot_node"
|
name = "boot_node"
|
||||||
version = "3.1.2"
|
version = "3.2.1"
|
||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -11,7 +11,10 @@ use types::EthSpec;
|
|||||||
pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
||||||
// Print out useful information about the generated ENR
|
// Print out useful information about the generated ENR
|
||||||
|
|
||||||
let enr_socket = config.local_enr.udp_socket().expect("Enr has a UDP socket");
|
let enr_socket = config
|
||||||
|
.local_enr
|
||||||
|
.udp4_socket()
|
||||||
|
.expect("Enr has a UDP socket");
|
||||||
let eth2_field = config
|
let eth2_field = config
|
||||||
.local_enr
|
.local_enr
|
||||||
.eth2()
|
.eth2()
|
||||||
@ -39,7 +42,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
info!(
|
info!(
|
||||||
log,
|
log,
|
||||||
"Adding bootnode";
|
"Adding bootnode";
|
||||||
"address" => ?enr.udp_socket(),
|
"address" => ?enr.udp4_socket(),
|
||||||
"peer_id" => enr.peer_id().to_string(),
|
"peer_id" => enr.peer_id().to_string(),
|
||||||
"node_id" => enr.node_id().to_string()
|
"node_id" => enr.node_id().to_string()
|
||||||
);
|
);
|
||||||
@ -94,6 +97,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
Discv5Event::SocketUpdated(socket_addr) => {
|
Discv5Event::SocketUpdated(socket_addr) => {
|
||||||
info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr));
|
info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr));
|
||||||
}
|
}
|
||||||
|
Discv5Event::SessionEstablished{ .. } => {} // Ignore
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,10 @@ status = [
|
|||||||
"doppelganger-protection-test",
|
"doppelganger-protection-test",
|
||||||
"execution-engine-integration-ubuntu",
|
"execution-engine-integration-ubuntu",
|
||||||
"cargo-vendor",
|
"cargo-vendor",
|
||||||
"check-msrv"
|
"check-msrv",
|
||||||
|
"slasher-tests",
|
||||||
|
"syncing-simulator-ubuntu",
|
||||||
|
"disallowed-from-async-lint"
|
||||||
]
|
]
|
||||||
use_squash_merge = true
|
use_squash_merge = true
|
||||||
timeout_sec = 10800
|
timeout_sec = 10800
|
||||||
|
@ -9,7 +9,7 @@ build = "build.rs"
|
|||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
reqwest = { version = "0.11.0", features = ["blocking", "json", "native-tls-vendored"] }
|
reqwest = { version = "0.11.0", features = ["blocking", "json", "native-tls-vendored"] }
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
sha2 = "0.9.1"
|
sha2 = "0.10"
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
@ -114,6 +114,7 @@ pub struct Timeouts {
|
|||||||
pub sync_duties: Duration,
|
pub sync_duties: Duration,
|
||||||
pub get_beacon_blocks_ssz: Duration,
|
pub get_beacon_blocks_ssz: Duration,
|
||||||
pub get_debug_beacon_states: Duration,
|
pub get_debug_beacon_states: Duration,
|
||||||
|
pub get_deposit_snapshot: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Timeouts {
|
impl Timeouts {
|
||||||
@ -128,6 +129,7 @@ impl Timeouts {
|
|||||||
sync_duties: timeout,
|
sync_duties: timeout,
|
||||||
get_beacon_blocks_ssz: timeout,
|
get_beacon_blocks_ssz: timeout,
|
||||||
get_debug_beacon_states: timeout,
|
get_debug_beacon_states: timeout,
|
||||||
|
get_deposit_snapshot: timeout,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -934,6 +936,20 @@ impl BeaconNodeHttpClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// `GET beacon/deposit_snapshot`
|
||||||
|
pub async fn get_deposit_snapshot(&self) -> Result<Option<types::DepositTreeSnapshot>, Error> {
|
||||||
|
use ssz::Decode;
|
||||||
|
let mut path = self.eth_path(V1)?;
|
||||||
|
path.path_segments_mut()
|
||||||
|
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||||
|
.push("beacon")
|
||||||
|
.push("deposit_snapshot");
|
||||||
|
self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_deposit_snapshot)
|
||||||
|
.await?
|
||||||
|
.map(|bytes| DepositTreeSnapshot::from_ssz_bytes(&bytes).map_err(Error::InvalidSsz))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
/// `POST validator/contribution_and_proofs`
|
/// `POST validator/contribution_and_proofs`
|
||||||
pub async fn post_validator_contribution_and_proofs<T: EthSpec>(
|
pub async fn post_validator_contribution_and_proofs<T: EthSpec>(
|
||||||
&self,
|
&self,
|
||||||
|
@ -6,7 +6,10 @@ mod block_rewards;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ok_or_error,
|
ok_or_error,
|
||||||
types::{BeaconState, ChainSpec, Epoch, EthSpec, GenericResponse, ValidatorId},
|
types::{
|
||||||
|
BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock,
|
||||||
|
GenericResponse, ValidatorId,
|
||||||
|
},
|
||||||
BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode,
|
BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode,
|
||||||
};
|
};
|
||||||
use proto_array::core::ProtoArray;
|
use proto_array::core::ProtoArray;
|
||||||
@ -331,6 +334,19 @@ impl Eth1Block {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<Eth1Block> for FinalizedExecutionBlock {
|
||||||
|
fn from(eth1_block: Eth1Block) -> Self {
|
||||||
|
Self {
|
||||||
|
deposit_count: eth1_block.deposit_count.unwrap_or(0),
|
||||||
|
deposit_root: eth1_block
|
||||||
|
.deposit_root
|
||||||
|
.unwrap_or_else(|| DepositTreeSnapshot::default().deposit_root),
|
||||||
|
block_hash: eth1_block.hash,
|
||||||
|
block_height: eth1_block.number,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct DatabaseInfo {
|
pub struct DatabaseInfo {
|
||||||
pub schema_version: u64,
|
pub schema_version: u64,
|
||||||
|
@ -18,4 +18,4 @@ serde_yaml = "0.8.13"
|
|||||||
types = { path = "../../consensus/types"}
|
types = { path = "../../consensus/types"}
|
||||||
eth2_ssz = "0.4.1"
|
eth2_ssz = "0.4.1"
|
||||||
eth2_config = { path = "../eth2_config"}
|
eth2_config = { path = "../eth2_config"}
|
||||||
enr = { version = "0.5.1", features = ["ed25519", "k256"] }
|
enr = { version = "0.6.2", features = ["ed25519", "k256"] }
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "fallback"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors = ["blacktemplar <blacktemplar@a1.net>"]
|
|
||||||
edition = "2021"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
itertools = "0.10.0"
|
|
@ -1,63 +0,0 @@
|
|||||||
use itertools::{join, zip};
|
|
||||||
use std::fmt::{Debug, Display};
|
|
||||||
use std::future::Future;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Fallback<T> {
|
|
||||||
pub servers: Vec<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum FallbackError<E> {
|
|
||||||
AllErrored(Vec<E>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Fallback<T> {
|
|
||||||
pub fn new(servers: Vec<T>) -> Self {
|
|
||||||
Self { servers }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the first successful result along with number of previous errors encountered
|
|
||||||
/// or all the errors encountered if every server fails.
|
|
||||||
pub async fn first_success<'a, F, O, E, R>(
|
|
||||||
&'a self,
|
|
||||||
func: F,
|
|
||||||
) -> Result<(O, usize), FallbackError<E>>
|
|
||||||
where
|
|
||||||
F: Fn(&'a T) -> R,
|
|
||||||
R: Future<Output = Result<O, E>>,
|
|
||||||
{
|
|
||||||
let mut errors = vec![];
|
|
||||||
for server in &self.servers {
|
|
||||||
match func(server).await {
|
|
||||||
Ok(val) => return Ok((val, errors.len())),
|
|
||||||
Err(e) => errors.push(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(FallbackError::AllErrored(errors))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn map_format_error<'a, E, F, S>(&'a self, f: F, error: &FallbackError<E>) -> String
|
|
||||||
where
|
|
||||||
F: FnMut(&'a T) -> &'a S,
|
|
||||||
S: Display + 'a,
|
|
||||||
E: Debug,
|
|
||||||
{
|
|
||||||
match error {
|
|
||||||
FallbackError::AllErrored(v) => format!(
|
|
||||||
"All fallbacks errored: {}",
|
|
||||||
join(
|
|
||||||
zip(self.servers.iter().map(f), v.iter())
|
|
||||||
.map(|(server, error)| format!("{} => {:?}", server, error)),
|
|
||||||
", "
|
|
||||||
)
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Display> Fallback<T> {
|
|
||||||
pub fn format_error<E: Debug>(&self, error: &FallbackError<E>) -> String {
|
|
||||||
self.map_format_error(|s| s, error)
|
|
||||||
}
|
|
||||||
}
|
|
@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
|
|||||||
// NOTE: using --match instead of --exclude for compatibility with old Git
|
// NOTE: using --match instead of --exclude for compatibility with old Git
|
||||||
"--match=thiswillnevermatchlol"
|
"--match=thiswillnevermatchlol"
|
||||||
],
|
],
|
||||||
prefix = "Lighthouse/v3.1.2-",
|
prefix = "Lighthouse/v3.2.1-",
|
||||||
fallback = "Lighthouse/v3.1.2"
|
fallback = "Lighthouse/v3.2.1"
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Returns `VERSION`, but with platform information appended to the end.
|
/// Returns `VERSION`, but with platform information appended to the end.
|
||||||
|
@ -43,6 +43,16 @@ impl JsonMetric {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return a default json value given given the metric type.
|
||||||
|
fn get_typed_value_default(&self) -> serde_json::Value {
|
||||||
|
match self.ty {
|
||||||
|
JsonType::Integer => json!(0),
|
||||||
|
JsonType::Boolean => {
|
||||||
|
json!(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The required metrics for the beacon and validator processes.
|
/// The required metrics for the beacon and validator processes.
|
||||||
@ -155,6 +165,16 @@ pub fn gather_metrics(metrics_map: &HashMap<String, JsonMetric>) -> Option<serde
|
|||||||
let _ = res.insert(metric.json_output_key.to_string(), value);
|
let _ = res.insert(metric.json_output_key.to_string(), value);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
// Insert default metrics for all monitoring service metrics that do not
|
||||||
|
// exist as lighthouse metrics.
|
||||||
|
for json_metric in metrics_map.values() {
|
||||||
|
if !res.contains_key(json_metric.json_output_key) {
|
||||||
|
let _ = res.insert(
|
||||||
|
json_metric.json_output_key.to_string(),
|
||||||
|
json_metric.get_typed_value_default(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
Some(serde_json::Value::Object(res))
|
Some(serde_json::Value::Object(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,6 +19,8 @@ lazy_static! {
|
|||||||
/// indices are populated by non-zero leaves (perfect for the deposit contract tree).
|
/// indices are populated by non-zero leaves (perfect for the deposit contract tree).
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum MerkleTree {
|
pub enum MerkleTree {
|
||||||
|
/// Finalized Node
|
||||||
|
Finalized(H256),
|
||||||
/// Leaf node with the hash of its content.
|
/// Leaf node with the hash of its content.
|
||||||
Leaf(H256),
|
Leaf(H256),
|
||||||
/// Internal node with hash, left subtree and right subtree.
|
/// Internal node with hash, left subtree and right subtree.
|
||||||
@ -41,6 +43,24 @@ pub enum MerkleTreeError {
|
|||||||
DepthTooSmall,
|
DepthTooSmall,
|
||||||
// Overflow occurred
|
// Overflow occurred
|
||||||
ArithError,
|
ArithError,
|
||||||
|
// Can't finalize a zero node
|
||||||
|
ZeroNodeFinalized,
|
||||||
|
// Can't push to finalized node
|
||||||
|
FinalizedNodePushed,
|
||||||
|
// Invalid Snapshot
|
||||||
|
InvalidSnapshot(InvalidSnapshot),
|
||||||
|
// Can't proof a finalized node
|
||||||
|
ProofEncounteredFinalizedNode,
|
||||||
|
// This should never happen
|
||||||
|
PleaseNotifyTheDevs,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum InvalidSnapshot {
|
||||||
|
// Branch hashes are empty but deposits are not
|
||||||
|
EmptyBranchWithNonZeroDeposits(usize),
|
||||||
|
// End of tree reached but deposits != 1
|
||||||
|
EndOfTree,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MerkleTree {
|
impl MerkleTree {
|
||||||
@ -97,9 +117,11 @@ impl MerkleTree {
|
|||||||
let right: &mut MerkleTree = &mut *right;
|
let right: &mut MerkleTree = &mut *right;
|
||||||
match (&*left, &*right) {
|
match (&*left, &*right) {
|
||||||
// Tree is full
|
// Tree is full
|
||||||
(Leaf(_), Leaf(_)) => return Err(MerkleTreeError::MerkleTreeFull),
|
(Leaf(_), Leaf(_)) | (Finalized(_), Leaf(_)) => {
|
||||||
|
return Err(MerkleTreeError::MerkleTreeFull)
|
||||||
|
}
|
||||||
// There is a right node so insert in right node
|
// There is a right node so insert in right node
|
||||||
(Node(_, _, _), Node(_, _, _)) => {
|
(Node(_, _, _), Node(_, _, _)) | (Finalized(_), Node(_, _, _)) => {
|
||||||
right.push_leaf(elem, depth - 1)?;
|
right.push_leaf(elem, depth - 1)?;
|
||||||
}
|
}
|
||||||
// Both branches are zero, insert in left one
|
// Both branches are zero, insert in left one
|
||||||
@ -107,7 +129,7 @@ impl MerkleTree {
|
|||||||
*left = MerkleTree::create(&[elem], depth - 1);
|
*left = MerkleTree::create(&[elem], depth - 1);
|
||||||
}
|
}
|
||||||
// Leaf on left branch and zero on right branch, insert on right side
|
// Leaf on left branch and zero on right branch, insert on right side
|
||||||
(Leaf(_), Zero(_)) => {
|
(Leaf(_), Zero(_)) | (Finalized(_), Zero(_)) => {
|
||||||
*right = MerkleTree::create(&[elem], depth - 1);
|
*right = MerkleTree::create(&[elem], depth - 1);
|
||||||
}
|
}
|
||||||
// Try inserting on the left node -> if it fails because it is full, insert in right side.
|
// Try inserting on the left node -> if it fails because it is full, insert in right side.
|
||||||
@ -129,6 +151,7 @@ impl MerkleTree {
|
|||||||
right.hash().as_bytes(),
|
right.hash().as_bytes(),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
Finalized(_) => return Err(MerkleTreeError::FinalizedNodePushed),
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -137,6 +160,7 @@ impl MerkleTree {
|
|||||||
/// Retrieve the root hash of this Merkle tree.
|
/// Retrieve the root hash of this Merkle tree.
|
||||||
pub fn hash(&self) -> H256 {
|
pub fn hash(&self) -> H256 {
|
||||||
match *self {
|
match *self {
|
||||||
|
MerkleTree::Finalized(h) => h,
|
||||||
MerkleTree::Leaf(h) => h,
|
MerkleTree::Leaf(h) => h,
|
||||||
MerkleTree::Node(h, _, _) => h,
|
MerkleTree::Node(h, _, _) => h,
|
||||||
MerkleTree::Zero(depth) => H256::from_slice(&ZERO_HASHES[depth]),
|
MerkleTree::Zero(depth) => H256::from_slice(&ZERO_HASHES[depth]),
|
||||||
@ -146,7 +170,7 @@ impl MerkleTree {
|
|||||||
/// Get a reference to the left and right subtrees if they exist.
|
/// Get a reference to the left and right subtrees if they exist.
|
||||||
pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> {
|
pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> {
|
||||||
match *self {
|
match *self {
|
||||||
MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None,
|
MerkleTree::Finalized(_) | MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None,
|
||||||
MerkleTree::Node(_, ref l, ref r) => Some((l, r)),
|
MerkleTree::Node(_, ref l, ref r) => Some((l, r)),
|
||||||
MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])),
|
MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])),
|
||||||
}
|
}
|
||||||
@ -157,16 +181,125 @@ impl MerkleTree {
|
|||||||
matches!(self, MerkleTree::Leaf(_))
|
matches!(self, MerkleTree::Leaf(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Finalize deposits up to deposit with count = deposits_to_finalize
|
||||||
|
pub fn finalize_deposits(
|
||||||
|
&mut self,
|
||||||
|
deposits_to_finalize: usize,
|
||||||
|
level: usize,
|
||||||
|
) -> Result<(), MerkleTreeError> {
|
||||||
|
match self {
|
||||||
|
MerkleTree::Finalized(_) => Ok(()),
|
||||||
|
MerkleTree::Zero(_) => Err(MerkleTreeError::ZeroNodeFinalized),
|
||||||
|
MerkleTree::Leaf(hash) => {
|
||||||
|
if level != 0 {
|
||||||
|
// This shouldn't happen but this is a sanity check
|
||||||
|
return Err(MerkleTreeError::PleaseNotifyTheDevs);
|
||||||
|
}
|
||||||
|
*self = MerkleTree::Finalized(*hash);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
MerkleTree::Node(hash, left, right) => {
|
||||||
|
if level == 0 {
|
||||||
|
// this shouldn't happen but we'll put it here for safety
|
||||||
|
return Err(MerkleTreeError::PleaseNotifyTheDevs);
|
||||||
|
}
|
||||||
|
let deposits = 0x1 << level;
|
||||||
|
if deposits <= deposits_to_finalize {
|
||||||
|
*self = MerkleTree::Finalized(*hash);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
left.finalize_deposits(deposits_to_finalize, level - 1)?;
|
||||||
|
if deposits_to_finalize > deposits / 2 {
|
||||||
|
let remaining = deposits_to_finalize - deposits / 2;
|
||||||
|
right.finalize_deposits(remaining, level - 1)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_finalized_hashes(&self, result: &mut Vec<H256>) {
|
||||||
|
match self {
|
||||||
|
MerkleTree::Zero(_) | MerkleTree::Leaf(_) => {}
|
||||||
|
MerkleTree::Finalized(h) => result.push(*h),
|
||||||
|
MerkleTree::Node(_, left, right) => {
|
||||||
|
left.append_finalized_hashes(result);
|
||||||
|
right.append_finalized_hashes(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_finalized_hashes(&self) -> Vec<H256> {
|
||||||
|
let mut result = vec![];
|
||||||
|
self.append_finalized_hashes(&mut result);
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_finalized_snapshot(
|
||||||
|
finalized_branch: &[H256],
|
||||||
|
deposit_count: usize,
|
||||||
|
level: usize,
|
||||||
|
) -> Result<Self, MerkleTreeError> {
|
||||||
|
if finalized_branch.is_empty() {
|
||||||
|
return if deposit_count == 0 {
|
||||||
|
Ok(MerkleTree::Zero(level))
|
||||||
|
} else {
|
||||||
|
Err(InvalidSnapshot::EmptyBranchWithNonZeroDeposits(deposit_count).into())
|
||||||
|
};
|
||||||
|
}
|
||||||
|
if deposit_count == (0x1 << level) {
|
||||||
|
return Ok(MerkleTree::Finalized(
|
||||||
|
*finalized_branch
|
||||||
|
.get(0)
|
||||||
|
.ok_or(MerkleTreeError::PleaseNotifyTheDevs)?,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if level == 0 {
|
||||||
|
return Err(InvalidSnapshot::EndOfTree.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
let (left, right) = match deposit_count.checked_sub(0x1 << (level - 1)) {
|
||||||
|
// left tree is fully finalized
|
||||||
|
Some(right_deposits) => {
|
||||||
|
let (left_hash, right_branch) = finalized_branch
|
||||||
|
.split_first()
|
||||||
|
.ok_or(MerkleTreeError::PleaseNotifyTheDevs)?;
|
||||||
|
(
|
||||||
|
MerkleTree::Finalized(*left_hash),
|
||||||
|
MerkleTree::from_finalized_snapshot(right_branch, right_deposits, level - 1)?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// left tree is not fully finalized -> right tree is zero
|
||||||
|
None => (
|
||||||
|
MerkleTree::from_finalized_snapshot(finalized_branch, deposit_count, level - 1)?,
|
||||||
|
MerkleTree::Zero(level - 1),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let hash = H256::from_slice(&hash32_concat(
|
||||||
|
left.hash().as_bytes(),
|
||||||
|
right.hash().as_bytes(),
|
||||||
|
));
|
||||||
|
Ok(MerkleTree::Node(hash, Box::new(left), Box::new(right)))
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the leaf at `index` and a Merkle proof of its inclusion.
|
/// Return the leaf at `index` and a Merkle proof of its inclusion.
|
||||||
///
|
///
|
||||||
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
|
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
|
||||||
/// and moving up the tree. Its length will be exactly equal to `depth`.
|
/// and moving up the tree. Its length will be exactly equal to `depth`.
|
||||||
pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec<H256>) {
|
pub fn generate_proof(
|
||||||
|
&self,
|
||||||
|
index: usize,
|
||||||
|
depth: usize,
|
||||||
|
) -> Result<(H256, Vec<H256>), MerkleTreeError> {
|
||||||
let mut proof = vec![];
|
let mut proof = vec![];
|
||||||
let mut current_node = self;
|
let mut current_node = self;
|
||||||
let mut current_depth = depth;
|
let mut current_depth = depth;
|
||||||
while current_depth > 0 {
|
while current_depth > 0 {
|
||||||
let ith_bit = (index >> (current_depth - 1)) & 0x01;
|
let ith_bit = (index >> (current_depth - 1)) & 0x01;
|
||||||
|
if let &MerkleTree::Finalized(_) = current_node {
|
||||||
|
return Err(MerkleTreeError::ProofEncounteredFinalizedNode);
|
||||||
|
}
|
||||||
// Note: unwrap is safe because leaves are only ever constructed at depth == 0.
|
// Note: unwrap is safe because leaves are only ever constructed at depth == 0.
|
||||||
let (left, right) = current_node.left_and_right_branches().unwrap();
|
let (left, right) = current_node.left_and_right_branches().unwrap();
|
||||||
|
|
||||||
@ -187,7 +320,33 @@ impl MerkleTree {
|
|||||||
// Put proof in bottom-up order.
|
// Put proof in bottom-up order.
|
||||||
proof.reverse();
|
proof.reverse();
|
||||||
|
|
||||||
(current_node.hash(), proof)
|
Ok((current_node.hash(), proof))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// useful for debugging
|
||||||
|
pub fn print_node(&self, mut space: u32) {
|
||||||
|
const SPACES: u32 = 10;
|
||||||
|
space += SPACES;
|
||||||
|
let (pair, text) = match self {
|
||||||
|
MerkleTree::Node(hash, left, right) => (Some((left, right)), format!("Node({})", hash)),
|
||||||
|
MerkleTree::Leaf(hash) => (None, format!("Leaf({})", hash)),
|
||||||
|
MerkleTree::Zero(depth) => (
|
||||||
|
None,
|
||||||
|
format!("Z[{}]({})", depth, H256::from_slice(&ZERO_HASHES[*depth])),
|
||||||
|
),
|
||||||
|
MerkleTree::Finalized(hash) => (None, format!("Finl({})", hash)),
|
||||||
|
};
|
||||||
|
if let Some((_, right)) = pair {
|
||||||
|
right.print_node(space);
|
||||||
|
}
|
||||||
|
println!();
|
||||||
|
for _i in SPACES..space {
|
||||||
|
print!(" ");
|
||||||
|
}
|
||||||
|
println!("{}", text);
|
||||||
|
if let Some((left, _)) = pair {
|
||||||
|
left.print_node(space);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,6 +394,12 @@ impl From<ArithError> for MerkleTreeError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<InvalidSnapshot> for MerkleTreeError {
|
||||||
|
fn from(e: InvalidSnapshot) -> Self {
|
||||||
|
MerkleTreeError::InvalidSnapshot(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@ -255,7 +420,9 @@ mod tests {
|
|||||||
let merkle_root = merkle_tree.hash();
|
let merkle_root = merkle_tree.hash();
|
||||||
|
|
||||||
let proofs_ok = (0..leaves.len()).all(|i| {
|
let proofs_ok = (0..leaves.len()).all(|i| {
|
||||||
let (leaf, branch) = merkle_tree.generate_proof(i, depth);
|
let (leaf, branch) = merkle_tree
|
||||||
|
.generate_proof(i, depth)
|
||||||
|
.expect("should generate proof");
|
||||||
leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root)
|
leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root)
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -274,7 +441,9 @@ mod tests {
|
|||||||
|
|
||||||
let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| {
|
let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| {
|
||||||
assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(()));
|
assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(()));
|
||||||
let (stored_leaf, branch) = merkle_tree.generate_proof(i, depth);
|
let (stored_leaf, branch) = merkle_tree
|
||||||
|
.generate_proof(i, depth)
|
||||||
|
.expect("should generate proof");
|
||||||
stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash())
|
stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash())
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -246,6 +246,20 @@ impl Decode for NonZeroUsize {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: Decode> Decode for Option<T> {
|
||||||
|
fn is_ssz_fixed_len() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, DecodeError> {
|
||||||
|
let (selector, body) = split_union_bytes(bytes)?;
|
||||||
|
match selector.into() {
|
||||||
|
0u8 => Ok(None),
|
||||||
|
1u8 => <T as Decode>::from_ssz_bytes(body).map(Option::Some),
|
||||||
|
other => Err(DecodeError::UnionSelectorInvalid(other)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Decode> Decode for Arc<T> {
|
impl<T: Decode> Decode for Arc<T> {
|
||||||
fn is_ssz_fixed_len() -> bool {
|
fn is_ssz_fixed_len() -> bool {
|
||||||
T::is_ssz_fixed_len()
|
T::is_ssz_fixed_len()
|
||||||
|
@ -203,6 +203,34 @@ impl_encode_for_tuples! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: Encode> Encode for Option<T> {
|
||||||
|
fn is_ssz_fixed_len() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
fn ssz_append(&self, buf: &mut Vec<u8>) {
|
||||||
|
match self {
|
||||||
|
Option::None => {
|
||||||
|
let union_selector: u8 = 0u8;
|
||||||
|
buf.push(union_selector);
|
||||||
|
}
|
||||||
|
Option::Some(ref inner) => {
|
||||||
|
let union_selector: u8 = 1u8;
|
||||||
|
buf.push(union_selector);
|
||||||
|
inner.ssz_append(buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn ssz_bytes_len(&self) -> usize {
|
||||||
|
match self {
|
||||||
|
Option::None => 1usize,
|
||||||
|
Option::Some(ref inner) => inner
|
||||||
|
.ssz_bytes_len()
|
||||||
|
.checked_add(1)
|
||||||
|
.expect("encoded length must be less than usize::max_value"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Encode> Encode for Arc<T> {
|
impl<T: Encode> Encode for Arc<T> {
|
||||||
fn is_ssz_fixed_len() -> bool {
|
fn is_ssz_fixed_len() -> bool {
|
||||||
T::is_ssz_fixed_len()
|
T::is_ssz_fixed_len()
|
||||||
@ -562,6 +590,14 @@ mod tests {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ssz_encode_option_u8() {
|
||||||
|
let opt: Option<u8> = None;
|
||||||
|
assert_eq!(opt.as_ssz_bytes(), vec![0]);
|
||||||
|
let opt: Option<u8> = Some(2);
|
||||||
|
assert_eq!(opt.as_ssz_bytes(), vec![1, 2]);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn ssz_encode_bool() {
|
fn ssz_encode_bool() {
|
||||||
assert_eq!(true.as_ssz_bytes(), vec![1]);
|
assert_eq!(true.as_ssz_bytes(), vec![1]);
|
||||||
|
@ -22,6 +22,13 @@ mod round_trip {
|
|||||||
round_trip(items);
|
round_trip(items);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn option_u16() {
|
||||||
|
let items: Vec<Option<u16>> = vec![None, Some(2u16)];
|
||||||
|
|
||||||
|
round_trip(items);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn u8_array_4() {
|
fn u8_array_4() {
|
||||||
let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]];
|
let items: Vec<[u8; 4]> = vec![[0, 0, 0, 0], [1, 0, 0, 0], [1, 2, 3, 4], [1, 2, 0, 4]];
|
||||||
@ -46,6 +53,17 @@ mod round_trip {
|
|||||||
round_trip(items);
|
round_trip(items);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn option_vec_h256() {
|
||||||
|
let items: Vec<Option<Vec<H256>>> = vec![
|
||||||
|
None,
|
||||||
|
Some(vec![]),
|
||||||
|
Some(vec![H256::zero(), H256::from([1; 32]), H256::random()]),
|
||||||
|
];
|
||||||
|
|
||||||
|
round_trip(items);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn vec_u16() {
|
fn vec_u16() {
|
||||||
let items: Vec<Vec<u16>> = vec![
|
let items: Vec<Vec<u16>> = vec![
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing,
|
per_block_processing, per_epoch_processing::EpochProcessingSummary, per_slot_processing,
|
||||||
BlockProcessingError, BlockSignatureStrategy, SlotProcessingError, VerifyBlockRoot,
|
BlockProcessingError, BlockSignatureStrategy, ConsensusContext, SlotProcessingError,
|
||||||
|
VerifyBlockRoot,
|
||||||
};
|
};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||||
@ -254,12 +255,16 @@ where
|
|||||||
VerifyBlockRoot::False
|
VerifyBlockRoot::False
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
// Proposer index was already checked when this block was originally processed, we
|
||||||
|
// can omit recomputing it during replay.
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot())
|
||||||
|
.set_proposer_index(block.message().proposer_index());
|
||||||
per_block_processing(
|
per_block_processing(
|
||||||
&mut self.state,
|
&mut self.state,
|
||||||
block,
|
block,
|
||||||
None,
|
|
||||||
self.block_sig_strategy,
|
self.block_sig_strategy,
|
||||||
verify_block_root,
|
verify_block_root,
|
||||||
|
&mut ctxt,
|
||||||
self.spec,
|
self.spec,
|
||||||
)
|
)
|
||||||
.map_err(BlockReplayError::from)?;
|
.map_err(BlockReplayError::from)?;
|
||||||
|
@ -2,12 +2,14 @@ use eth2_hashing::hash;
|
|||||||
use int_to_bytes::int_to_bytes32;
|
use int_to_bytes::int_to_bytes32;
|
||||||
use merkle_proof::{MerkleTree, MerkleTreeError};
|
use merkle_proof::{MerkleTree, MerkleTreeError};
|
||||||
use safe_arith::SafeArith;
|
use safe_arith::SafeArith;
|
||||||
use types::Hash256;
|
use types::{DepositTreeSnapshot, FinalizedExecutionBlock, Hash256};
|
||||||
|
|
||||||
/// Emulates the eth1 deposit contract merkle tree.
|
/// Emulates the eth1 deposit contract merkle tree.
|
||||||
|
#[derive(PartialEq)]
|
||||||
pub struct DepositDataTree {
|
pub struct DepositDataTree {
|
||||||
tree: MerkleTree,
|
tree: MerkleTree,
|
||||||
mix_in_length: usize,
|
mix_in_length: usize,
|
||||||
|
finalized_execution_block: Option<FinalizedExecutionBlock>,
|
||||||
depth: usize,
|
depth: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -17,6 +19,7 @@ impl DepositDataTree {
|
|||||||
Self {
|
Self {
|
||||||
tree: MerkleTree::create(leaves, depth),
|
tree: MerkleTree::create(leaves, depth),
|
||||||
mix_in_length,
|
mix_in_length,
|
||||||
|
finalized_execution_block: None,
|
||||||
depth,
|
depth,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -38,10 +41,10 @@ impl DepositDataTree {
|
|||||||
///
|
///
|
||||||
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
|
/// The Merkle proof is in "bottom-up" order, starting with a leaf node
|
||||||
/// and moving up the tree. Its length will be exactly equal to `depth + 1`.
|
/// and moving up the tree. Its length will be exactly equal to `depth + 1`.
|
||||||
pub fn generate_proof(&self, index: usize) -> (Hash256, Vec<Hash256>) {
|
pub fn generate_proof(&self, index: usize) -> Result<(Hash256, Vec<Hash256>), MerkleTreeError> {
|
||||||
let (root, mut proof) = self.tree.generate_proof(index, self.depth);
|
let (root, mut proof) = self.tree.generate_proof(index, self.depth)?;
|
||||||
proof.push(Hash256::from_slice(&self.length_bytes()));
|
proof.push(Hash256::from_slice(&self.length_bytes()));
|
||||||
(root, proof)
|
Ok((root, proof))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a deposit to the merkle tree.
|
/// Add a deposit to the merkle tree.
|
||||||
@ -50,4 +53,50 @@ impl DepositDataTree {
|
|||||||
self.mix_in_length.safe_add_assign(1)?;
|
self.mix_in_length.safe_add_assign(1)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Finalize deposits up to `finalized_execution_block.deposit_count`
|
||||||
|
pub fn finalize(
|
||||||
|
&mut self,
|
||||||
|
finalized_execution_block: FinalizedExecutionBlock,
|
||||||
|
) -> Result<(), MerkleTreeError> {
|
||||||
|
self.tree
|
||||||
|
.finalize_deposits(finalized_execution_block.deposit_count as usize, self.depth)?;
|
||||||
|
self.finalized_execution_block = Some(finalized_execution_block);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get snapshot of finalized deposit tree (if tree is finalized)
|
||||||
|
pub fn get_snapshot(&self) -> Option<DepositTreeSnapshot> {
|
||||||
|
let finalized_execution_block = self.finalized_execution_block.as_ref()?;
|
||||||
|
Some(DepositTreeSnapshot {
|
||||||
|
finalized: self.tree.get_finalized_hashes(),
|
||||||
|
deposit_root: finalized_execution_block.deposit_root,
|
||||||
|
deposit_count: finalized_execution_block.deposit_count,
|
||||||
|
execution_block_hash: finalized_execution_block.block_hash,
|
||||||
|
execution_block_height: finalized_execution_block.block_height,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new Merkle tree from a snapshot
|
||||||
|
pub fn from_snapshot(
|
||||||
|
snapshot: &DepositTreeSnapshot,
|
||||||
|
depth: usize,
|
||||||
|
) -> Result<Self, MerkleTreeError> {
|
||||||
|
Ok(Self {
|
||||||
|
tree: MerkleTree::from_finalized_snapshot(
|
||||||
|
&snapshot.finalized,
|
||||||
|
snapshot.deposit_count as usize,
|
||||||
|
depth,
|
||||||
|
)?,
|
||||||
|
mix_in_length: snapshot.deposit_count as usize,
|
||||||
|
finalized_execution_block: Some(snapshot.into()),
|
||||||
|
depth,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn print_tree(&self) {
|
||||||
|
self.tree.print_node(0);
|
||||||
|
println!("========================================================");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,13 @@
|
|||||||
use crate::common::{decrease_balance, increase_balance, initiate_validator_exit};
|
use crate::{
|
||||||
|
common::{decrease_balance, increase_balance, initiate_validator_exit},
|
||||||
|
per_block_processing::errors::BlockProcessingError,
|
||||||
|
ConsensusContext,
|
||||||
|
};
|
||||||
use safe_arith::SafeArith;
|
use safe_arith::SafeArith;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use types::{
|
use types::{
|
||||||
consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR},
|
consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR},
|
||||||
BeaconStateError as Error, *,
|
*,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Slash the validator with index `slashed_index`.
|
/// Slash the validator with index `slashed_index`.
|
||||||
@ -11,8 +15,9 @@ pub fn slash_validator<T: EthSpec>(
|
|||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
slashed_index: usize,
|
slashed_index: usize,
|
||||||
opt_whistleblower_index: Option<usize>,
|
opt_whistleblower_index: Option<usize>,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
let epoch = state.current_epoch();
|
let epoch = state.current_epoch();
|
||||||
|
|
||||||
initiate_validator_exit(state, slashed_index, spec)?;
|
initiate_validator_exit(state, slashed_index, spec)?;
|
||||||
@ -39,7 +44,7 @@ pub fn slash_validator<T: EthSpec>(
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Apply proposer and whistleblower rewards
|
// Apply proposer and whistleblower rewards
|
||||||
let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)?;
|
let proposer_index = ctxt.get_proposer_index(state, spec)? as usize;
|
||||||
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);
|
let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index);
|
||||||
let whistleblower_reward =
|
let whistleblower_reward =
|
||||||
validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?;
|
validator_effective_balance.safe_div(spec.whistleblower_reward_quotient)?;
|
||||||
@ -55,7 +60,7 @@ pub fn slash_validator<T: EthSpec>(
|
|||||||
|
|
||||||
// Ensure the whistleblower index is in the validator registry.
|
// Ensure the whistleblower index is in the validator registry.
|
||||||
if state.validators().get(whistleblower_index).is_none() {
|
if state.validators().get(whistleblower_index).is_none() {
|
||||||
return Err(BeaconStateError::UnknownValidator(whistleblower_index));
|
return Err(BeaconStateError::UnknownValidator(whistleblower_index).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
increase_balance(state, proposer_index, proposer_reward)?;
|
increase_balance(state, proposer_index, proposer_reward)?;
|
||||||
|
92
consensus/state_processing/src/consensus_context.rs
Normal file
92
consensus/state_processing/src/consensus_context.rs
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
use std::marker::PhantomData;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
use types::{
|
||||||
|
BeaconState, BeaconStateError, ChainSpec, EthSpec, ExecPayload, Hash256, SignedBeaconBlock,
|
||||||
|
Slot,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ConsensusContext<T: EthSpec> {
|
||||||
|
/// Slot to act as an identifier/safeguard
|
||||||
|
slot: Slot,
|
||||||
|
/// Proposer index of the block at `slot`.
|
||||||
|
proposer_index: Option<u64>,
|
||||||
|
/// Block root of the block at `slot`.
|
||||||
|
current_block_root: Option<Hash256>,
|
||||||
|
_phantom: PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum ContextError {
|
||||||
|
BeaconState(BeaconStateError),
|
||||||
|
SlotMismatch { slot: Slot, expected: Slot },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BeaconStateError> for ContextError {
|
||||||
|
fn from(e: BeaconStateError) -> Self {
|
||||||
|
Self::BeaconState(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> ConsensusContext<T> {
|
||||||
|
pub fn new(slot: Slot) -> Self {
|
||||||
|
Self {
|
||||||
|
slot,
|
||||||
|
proposer_index: None,
|
||||||
|
current_block_root: None,
|
||||||
|
_phantom: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_proposer_index(mut self, proposer_index: u64) -> Self {
|
||||||
|
self.proposer_index = Some(proposer_index);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_proposer_index(
|
||||||
|
&mut self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<u64, ContextError> {
|
||||||
|
self.check_slot(state.slot())?;
|
||||||
|
|
||||||
|
if let Some(proposer_index) = self.proposer_index {
|
||||||
|
return Ok(proposer_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
let proposer_index = state.get_beacon_proposer_index(self.slot, spec)? as u64;
|
||||||
|
self.proposer_index = Some(proposer_index);
|
||||||
|
Ok(proposer_index)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_current_block_root(mut self, block_root: Hash256) -> Self {
|
||||||
|
self.current_block_root = Some(block_root);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_current_block_root<Payload: ExecPayload<T>>(
|
||||||
|
&mut self,
|
||||||
|
block: &SignedBeaconBlock<T, Payload>,
|
||||||
|
) -> Result<Hash256, ContextError> {
|
||||||
|
self.check_slot(block.slot())?;
|
||||||
|
|
||||||
|
if let Some(current_block_root) = self.current_block_root {
|
||||||
|
return Ok(current_block_root);
|
||||||
|
}
|
||||||
|
|
||||||
|
let current_block_root = block.message().tree_hash_root();
|
||||||
|
self.current_block_root = Some(current_block_root);
|
||||||
|
Ok(current_block_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_slot(&self, slot: Slot) -> Result<(), ContextError> {
|
||||||
|
if slot == self.slot {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(ContextError::SlotMismatch {
|
||||||
|
slot,
|
||||||
|
expected: self.slot,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -18,6 +18,7 @@ mod metrics;
|
|||||||
|
|
||||||
pub mod block_replayer;
|
pub mod block_replayer;
|
||||||
pub mod common;
|
pub mod common;
|
||||||
|
pub mod consensus_context;
|
||||||
pub mod genesis;
|
pub mod genesis;
|
||||||
pub mod per_block_processing;
|
pub mod per_block_processing;
|
||||||
pub mod per_epoch_processing;
|
pub mod per_epoch_processing;
|
||||||
@ -27,6 +28,7 @@ pub mod upgrade;
|
|||||||
pub mod verify_operation;
|
pub mod verify_operation;
|
||||||
|
|
||||||
pub use block_replayer::{BlockReplayError, BlockReplayer, StateRootStrategy};
|
pub use block_replayer::{BlockReplayError, BlockReplayer, StateRootStrategy};
|
||||||
|
pub use consensus_context::{ConsensusContext, ContextError};
|
||||||
pub use genesis::{
|
pub use genesis::{
|
||||||
eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state,
|
eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state,
|
||||||
process_activations,
|
process_activations,
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use crate::consensus_context::ConsensusContext;
|
||||||
use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid};
|
use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use safe_arith::{ArithError, SafeArith};
|
use safe_arith::{ArithError, SafeArith};
|
||||||
@ -92,9 +93,9 @@ pub enum VerifyBlockRoot {
|
|||||||
pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
signed_block: &SignedBeaconBlock<T, Payload>,
|
signed_block: &SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
|
||||||
block_signature_strategy: BlockSignatureStrategy,
|
block_signature_strategy: BlockSignatureStrategy,
|
||||||
verify_block_root: VerifyBlockRoot,
|
verify_block_root: VerifyBlockRoot,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
let block = signed_block.message();
|
let block = signed_block.message();
|
||||||
@ -112,6 +113,8 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
let verify_signatures = match block_signature_strategy {
|
let verify_signatures = match block_signature_strategy {
|
||||||
BlockSignatureStrategy::VerifyBulk => {
|
BlockSignatureStrategy::VerifyBulk => {
|
||||||
// Verify all signatures in the block at once.
|
// Verify all signatures in the block at once.
|
||||||
|
let block_root = Some(ctxt.get_current_block_root(signed_block)?);
|
||||||
|
let proposer_index = Some(ctxt.get_proposer_index(state, spec)?);
|
||||||
block_verify!(
|
block_verify!(
|
||||||
BlockSignatureVerifier::verify_entire_block(
|
BlockSignatureVerifier::verify_entire_block(
|
||||||
state,
|
state,
|
||||||
@ -119,6 +122,7 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
|pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned),
|
|pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned),
|
||||||
signed_block,
|
signed_block,
|
||||||
block_root,
|
block_root,
|
||||||
|
proposer_index,
|
||||||
spec
|
spec
|
||||||
)
|
)
|
||||||
.is_ok(),
|
.is_ok(),
|
||||||
@ -135,11 +139,12 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
state,
|
state,
|
||||||
block.temporary_block_header(),
|
block.temporary_block_header(),
|
||||||
verify_block_root,
|
verify_block_root,
|
||||||
|
ctxt,
|
||||||
spec,
|
spec,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if verify_signatures.is_true() {
|
if verify_signatures.is_true() {
|
||||||
verify_block_signature(state, signed_block, block_root, spec)?;
|
verify_block_signature(state, signed_block, ctxt, spec)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let verify_randao = if let BlockSignatureStrategy::VerifyRandao = block_signature_strategy {
|
let verify_randao = if let BlockSignatureStrategy::VerifyRandao = block_signature_strategy {
|
||||||
@ -159,9 +164,9 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
process_execution_payload::<T, Payload>(state, payload, spec)?;
|
process_execution_payload::<T, Payload>(state, payload, spec)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
process_randao(state, block, verify_randao, spec)?;
|
process_randao(state, block, verify_randao, ctxt, spec)?;
|
||||||
process_eth1_data(state, block.body().eth1_data())?;
|
process_eth1_data(state, block.body().eth1_data())?;
|
||||||
process_operations(state, block.body(), proposer_index, verify_signatures, spec)?;
|
process_operations(state, block.body(), verify_signatures, ctxt, spec)?;
|
||||||
|
|
||||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||||
process_sync_aggregate(
|
process_sync_aggregate(
|
||||||
@ -183,6 +188,7 @@ pub fn process_block_header<T: EthSpec>(
|
|||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block_header: BeaconBlockHeader,
|
block_header: BeaconBlockHeader,
|
||||||
verify_block_root: VerifyBlockRoot,
|
verify_block_root: VerifyBlockRoot,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<u64, BlockOperationError<HeaderInvalid>> {
|
) -> Result<u64, BlockOperationError<HeaderInvalid>> {
|
||||||
// Verify that the slots match
|
// Verify that the slots match
|
||||||
@ -201,8 +207,8 @@ pub fn process_block_header<T: EthSpec>(
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Verify that proposer index is the correct index
|
// Verify that proposer index is the correct index
|
||||||
let proposer_index = block_header.proposer_index as usize;
|
let proposer_index = block_header.proposer_index;
|
||||||
let state_proposer_index = state.get_beacon_proposer_index(block_header.slot, spec)?;
|
let state_proposer_index = ctxt.get_proposer_index(state, spec)?;
|
||||||
verify!(
|
verify!(
|
||||||
proposer_index == state_proposer_index,
|
proposer_index == state_proposer_index,
|
||||||
HeaderInvalid::ProposerIndexMismatch {
|
HeaderInvalid::ProposerIndexMismatch {
|
||||||
@ -226,11 +232,11 @@ pub fn process_block_header<T: EthSpec>(
|
|||||||
|
|
||||||
// Verify proposer is not slashed
|
// Verify proposer is not slashed
|
||||||
verify!(
|
verify!(
|
||||||
!state.get_validator(proposer_index)?.slashed,
|
!state.get_validator(proposer_index as usize)?.slashed,
|
||||||
HeaderInvalid::ProposerSlashed(proposer_index)
|
HeaderInvalid::ProposerSlashed(proposer_index)
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(proposer_index as u64)
|
Ok(proposer_index)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies the signature of a block.
|
/// Verifies the signature of a block.
|
||||||
@ -239,15 +245,18 @@ pub fn process_block_header<T: EthSpec>(
|
|||||||
pub fn verify_block_signature<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
pub fn verify_block_signature<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
block: &SignedBeaconBlock<T, Payload>,
|
block: &SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockOperationError<HeaderInvalid>> {
|
) -> Result<(), BlockOperationError<HeaderInvalid>> {
|
||||||
|
let block_root = Some(ctxt.get_current_block_root(block)?);
|
||||||
|
let proposer_index = Some(ctxt.get_proposer_index(state, spec)?);
|
||||||
verify!(
|
verify!(
|
||||||
block_proposal_signature_set(
|
block_proposal_signature_set(
|
||||||
state,
|
state,
|
||||||
|i| get_pubkey_from_state(state, i),
|
|i| get_pubkey_from_state(state, i),
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
|
proposer_index,
|
||||||
spec
|
spec
|
||||||
)?
|
)?
|
||||||
.verify(),
|
.verify(),
|
||||||
@ -263,12 +272,21 @@ pub fn process_randao<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block: BeaconBlockRef<'_, T, Payload>,
|
block: BeaconBlockRef<'_, T, Payload>,
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
if verify_signatures.is_true() {
|
if verify_signatures.is_true() {
|
||||||
// Verify RANDAO reveal signature.
|
// Verify RANDAO reveal signature.
|
||||||
|
let proposer_index = ctxt.get_proposer_index(state, spec)?;
|
||||||
block_verify!(
|
block_verify!(
|
||||||
randao_signature_set(state, |i| get_pubkey_from_state(state, i), block, spec)?.verify(),
|
randao_signature_set(
|
||||||
|
state,
|
||||||
|
|i| get_pubkey_from_state(state, i),
|
||||||
|
block,
|
||||||
|
Some(proposer_index),
|
||||||
|
spec
|
||||||
|
)?
|
||||||
|
.verify(),
|
||||||
BlockProcessingError::RandaoSignatureInvalid
|
BlockProcessingError::RandaoSignatureInvalid
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -123,10 +123,11 @@ where
|
|||||||
decompressor: D,
|
decompressor: D,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
block_root: Option<Hash256>,
|
||||||
|
verified_proposer_index: Option<u64>,
|
||||||
spec: &'a ChainSpec,
|
spec: &'a ChainSpec,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut verifier = Self::new(state, get_pubkey, decompressor, spec);
|
let mut verifier = Self::new(state, get_pubkey, decompressor, spec);
|
||||||
verifier.include_all_signatures(block, block_root)?;
|
verifier.include_all_signatures(block, block_root, verified_proposer_index)?;
|
||||||
verifier.verify()
|
verifier.verify()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,9 +136,10 @@ where
|
|||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
block_root: Option<Hash256>,
|
||||||
|
verified_proposer_index: Option<u64>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.include_block_proposal(block, block_root)?;
|
self.include_block_proposal(block, block_root, verified_proposer_index)?;
|
||||||
self.include_all_signatures_except_proposal(block)?;
|
self.include_all_signatures_except_proposal(block, verified_proposer_index)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -147,8 +149,9 @@ where
|
|||||||
pub fn include_all_signatures_except_proposal<Payload: AbstractExecPayload<T>>(
|
pub fn include_all_signatures_except_proposal<Payload: AbstractExecPayload<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
|
verified_proposer_index: Option<u64>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.include_randao_reveal(block)?;
|
self.include_randao_reveal(block, verified_proposer_index)?;
|
||||||
self.include_proposer_slashings(block)?;
|
self.include_proposer_slashings(block)?;
|
||||||
self.include_attester_slashings(block)?;
|
self.include_attester_slashings(block)?;
|
||||||
self.include_attestations(block)?;
|
self.include_attestations(block)?;
|
||||||
@ -164,12 +167,14 @@ where
|
|||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
block_root: Option<Hash256>,
|
||||||
|
verified_proposer_index: Option<u64>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let set = block_proposal_signature_set(
|
let set = block_proposal_signature_set(
|
||||||
self.state,
|
self.state,
|
||||||
self.get_pubkey.clone(),
|
self.get_pubkey.clone(),
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
|
verified_proposer_index,
|
||||||
self.spec,
|
self.spec,
|
||||||
)?;
|
)?;
|
||||||
self.sets.push(set);
|
self.sets.push(set);
|
||||||
@ -180,11 +185,13 @@ where
|
|||||||
pub fn include_randao_reveal<Payload: AbstractExecPayload<T>>(
|
pub fn include_randao_reveal<Payload: AbstractExecPayload<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
|
verified_proposer_index: Option<u64>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let set = randao_signature_set(
|
let set = randao_signature_set(
|
||||||
self.state,
|
self.state,
|
||||||
self.get_pubkey.clone(),
|
self.get_pubkey.clone(),
|
||||||
block.message(),
|
block.message(),
|
||||||
|
verified_proposer_index,
|
||||||
self.spec,
|
self.spec,
|
||||||
)?;
|
)?;
|
||||||
self.sets.push(set);
|
self.sets.push(set);
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use super::signature_sets::Error as SignatureSetError;
|
use super::signature_sets::Error as SignatureSetError;
|
||||||
|
use crate::ContextError;
|
||||||
use merkle_proof::MerkleTreeError;
|
use merkle_proof::MerkleTreeError;
|
||||||
use safe_arith::ArithError;
|
use safe_arith::ArithError;
|
||||||
use ssz::DecodeError;
|
use ssz::DecodeError;
|
||||||
@ -72,6 +73,7 @@ pub enum BlockProcessingError {
|
|||||||
found: u64,
|
found: u64,
|
||||||
},
|
},
|
||||||
ExecutionInvalid,
|
ExecutionInvalid,
|
||||||
|
ConsensusContext(ContextError),
|
||||||
BlobVersionHashMismatch,
|
BlobVersionHashMismatch,
|
||||||
/// The number of commitments in blob transactions in the payload does not match the number
|
/// The number of commitments in blob transactions in the payload does not match the number
|
||||||
/// of commitments in the block.
|
/// of commitments in the block.
|
||||||
@ -122,6 +124,12 @@ impl From<SyncAggregateInvalid> for BlockProcessingError {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ContextError> for BlockProcessingError {
|
||||||
|
fn from(e: ContextError) -> Self {
|
||||||
|
BlockProcessingError::ConsensusContext(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
|
impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
|
||||||
fn from(e: BlockOperationError<HeaderInvalid>) -> BlockProcessingError {
|
fn from(e: BlockOperationError<HeaderInvalid>) -> BlockProcessingError {
|
||||||
match e {
|
match e {
|
||||||
@ -129,6 +137,7 @@ impl From<BlockOperationError<HeaderInvalid>> for BlockProcessingError {
|
|||||||
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
|
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
|
||||||
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
|
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
|
||||||
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
|
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
|
||||||
|
BlockOperationError::ConsensusContext(e) => BlockProcessingError::ConsensusContext(e),
|
||||||
BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e),
|
BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -156,6 +165,7 @@ macro_rules! impl_into_block_processing_error_with_index {
|
|||||||
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
|
BlockOperationError::BeaconStateError(e) => BlockProcessingError::BeaconStateError(e),
|
||||||
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
|
BlockOperationError::SignatureSetError(e) => BlockProcessingError::SignatureSetError(e),
|
||||||
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
|
BlockOperationError::SszTypesError(e) => BlockProcessingError::SszTypesError(e),
|
||||||
|
BlockOperationError::ConsensusContext(e) => BlockProcessingError::ConsensusContext(e),
|
||||||
BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e),
|
BlockOperationError::ArithError(e) => BlockProcessingError::ArithError(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -187,6 +197,7 @@ pub enum BlockOperationError<T> {
|
|||||||
BeaconStateError(BeaconStateError),
|
BeaconStateError(BeaconStateError),
|
||||||
SignatureSetError(SignatureSetError),
|
SignatureSetError(SignatureSetError),
|
||||||
SszTypesError(ssz_types::Error),
|
SszTypesError(ssz_types::Error),
|
||||||
|
ConsensusContext(ContextError),
|
||||||
ArithError(ArithError),
|
ArithError(ArithError),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,6 +230,12 @@ impl<T> From<ArithError> for BlockOperationError<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> From<ContextError> for BlockOperationError<T> {
|
||||||
|
fn from(e: ContextError) -> Self {
|
||||||
|
BlockOperationError::ConsensusContext(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum HeaderInvalid {
|
pub enum HeaderInvalid {
|
||||||
ProposalSignatureInvalid,
|
ProposalSignatureInvalid,
|
||||||
@ -228,14 +245,14 @@ pub enum HeaderInvalid {
|
|||||||
block_slot: Slot,
|
block_slot: Slot,
|
||||||
},
|
},
|
||||||
ProposerIndexMismatch {
|
ProposerIndexMismatch {
|
||||||
block_proposer_index: usize,
|
block_proposer_index: u64,
|
||||||
state_proposer_index: usize,
|
state_proposer_index: u64,
|
||||||
},
|
},
|
||||||
ParentBlockRootMismatch {
|
ParentBlockRootMismatch {
|
||||||
state: Hash256,
|
state: Hash256,
|
||||||
block: Hash256,
|
block: Hash256,
|
||||||
},
|
},
|
||||||
ProposerSlashed(usize),
|
ProposerSlashed(u64),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
@ -330,6 +347,7 @@ impl From<BlockOperationError<IndexedAttestationInvalid>>
|
|||||||
BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e),
|
BlockOperationError::BeaconStateError(e) => BlockOperationError::BeaconStateError(e),
|
||||||
BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e),
|
BlockOperationError::SignatureSetError(e) => BlockOperationError::SignatureSetError(e),
|
||||||
BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e),
|
BlockOperationError::SszTypesError(e) => BlockOperationError::SszTypesError(e),
|
||||||
|
BlockOperationError::ConsensusContext(e) => BlockOperationError::ConsensusContext(e),
|
||||||
BlockOperationError::ArithError(e) => BlockOperationError::ArithError(e),
|
BlockOperationError::ArithError(e) => BlockOperationError::ArithError(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,23 +12,25 @@ use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_
|
|||||||
pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
pub fn process_operations<'a, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block_body: BeaconBlockBodyRef<'a, T, Payload>,
|
block_body: BeaconBlockBodyRef<'a, T, Payload>,
|
||||||
proposer_index: u64,
|
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
process_proposer_slashings(
|
process_proposer_slashings(
|
||||||
state,
|
state,
|
||||||
block_body.proposer_slashings(),
|
block_body.proposer_slashings(),
|
||||||
verify_signatures,
|
verify_signatures,
|
||||||
|
ctxt,
|
||||||
spec,
|
spec,
|
||||||
)?;
|
)?;
|
||||||
process_attester_slashings(
|
process_attester_slashings(
|
||||||
state,
|
state,
|
||||||
block_body.attester_slashings(),
|
block_body.attester_slashings(),
|
||||||
verify_signatures,
|
verify_signatures,
|
||||||
|
ctxt,
|
||||||
spec,
|
spec,
|
||||||
)?;
|
)?;
|
||||||
process_attestations(state, block_body, proposer_index, verify_signatures, spec)?;
|
process_attestations(state, block_body, verify_signatures, ctxt, spec)?;
|
||||||
process_deposits(state, block_body.deposits(), spec)?;
|
process_deposits(state, block_body.deposits(), spec)?;
|
||||||
process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?;
|
process_exits(state, block_body.voluntary_exits(), verify_signatures, spec)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -45,12 +47,13 @@ pub mod base {
|
|||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
attestations: &[Attestation<T>],
|
attestations: &[Attestation<T>],
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
// Ensure the previous epoch cache exists.
|
// Ensure the previous epoch cache exists.
|
||||||
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
||||||
|
|
||||||
let proposer_index = state.get_beacon_proposer_index(state.slot(), spec)? as u64;
|
let proposer_index = ctxt.get_proposer_index(state, spec)?;
|
||||||
|
|
||||||
// Verify and apply each attestation.
|
// Verify and apply each attestation.
|
||||||
for (i, attestation) in attestations.iter().enumerate() {
|
for (i, attestation) in attestations.iter().enumerate() {
|
||||||
@ -87,10 +90,11 @@ pub mod altair {
|
|||||||
pub fn process_attestations<T: EthSpec>(
|
pub fn process_attestations<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
attestations: &[Attestation<T>],
|
attestations: &[Attestation<T>],
|
||||||
proposer_index: u64,
|
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
|
let proposer_index = ctxt.get_proposer_index(state, spec)?;
|
||||||
attestations
|
attestations
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
@ -170,6 +174,7 @@ pub fn process_proposer_slashings<T: EthSpec>(
|
|||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
proposer_slashings: &[ProposerSlashing],
|
proposer_slashings: &[ProposerSlashing],
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
// Verify and apply proposer slashings in series.
|
// Verify and apply proposer slashings in series.
|
||||||
@ -186,6 +191,7 @@ pub fn process_proposer_slashings<T: EthSpec>(
|
|||||||
state,
|
state,
|
||||||
proposer_slashing.signed_header_1.message.proposer_index as usize,
|
proposer_slashing.signed_header_1.message.proposer_index as usize,
|
||||||
None,
|
None,
|
||||||
|
ctxt,
|
||||||
spec,
|
spec,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -201,6 +207,7 @@ pub fn process_attester_slashings<T: EthSpec>(
|
|||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
attester_slashings: &[AttesterSlashing<T>],
|
attester_slashings: &[AttesterSlashing<T>],
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
for (i, attester_slashing) in attester_slashings.iter().enumerate() {
|
for (i, attester_slashing) in attester_slashings.iter().enumerate() {
|
||||||
@ -211,7 +218,7 @@ pub fn process_attester_slashings<T: EthSpec>(
|
|||||||
get_slashable_indices(state, attester_slashing).map_err(|e| e.into_with_index(i))?;
|
get_slashable_indices(state, attester_slashing).map_err(|e| e.into_with_index(i))?;
|
||||||
|
|
||||||
for i in slashable_indices {
|
for i in slashable_indices {
|
||||||
slash_validator(state, i as usize, None, spec)?;
|
slash_validator(state, i as usize, None, ctxt, spec)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,13 +229,19 @@ pub fn process_attester_slashings<T: EthSpec>(
|
|||||||
pub fn process_attestations<'a, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
pub fn process_attestations<'a, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
block_body: BeaconBlockBodyRef<'a, T, Payload>,
|
block_body: BeaconBlockBodyRef<'a, T, Payload>,
|
||||||
proposer_index: u64,
|
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
match block_body {
|
match block_body {
|
||||||
BeaconBlockBodyRef::Base(_) => {
|
BeaconBlockBodyRef::Base(_) => {
|
||||||
base::process_attestations(state, block_body.attestations(), verify_signatures, spec)?;
|
base::process_attestations(
|
||||||
|
state,
|
||||||
|
block_body.attestations(),
|
||||||
|
verify_signatures,
|
||||||
|
ctxt,
|
||||||
|
spec,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
BeaconBlockBodyRef::Altair(_)
|
BeaconBlockBodyRef::Altair(_)
|
||||||
| BeaconBlockBodyRef::Merge(_)
|
| BeaconBlockBodyRef::Merge(_)
|
||||||
@ -237,8 +250,8 @@ pub fn process_attestations<'a, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
altair::process_attestations(
|
altair::process_attestations(
|
||||||
state,
|
state,
|
||||||
block_body.attestations(),
|
block_body.attestations(),
|
||||||
proposer_index,
|
|
||||||
verify_signatures,
|
verify_signatures,
|
||||||
|
ctxt,
|
||||||
spec,
|
spec,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -76,6 +76,7 @@ pub fn block_proposal_signature_set<'a, T, F, Payload: AbstractExecPayload<T>>(
|
|||||||
get_pubkey: F,
|
get_pubkey: F,
|
||||||
signed_block: &'a SignedBeaconBlock<T, Payload>,
|
signed_block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
block_root: Option<Hash256>,
|
||||||
|
verified_proposer_index: Option<u64>,
|
||||||
spec: &'a ChainSpec,
|
spec: &'a ChainSpec,
|
||||||
) -> Result<SignatureSet<'a>>
|
) -> Result<SignatureSet<'a>>
|
||||||
where
|
where
|
||||||
@ -83,8 +84,12 @@ where
|
|||||||
F: Fn(usize) -> Option<Cow<'a, PublicKey>>,
|
F: Fn(usize) -> Option<Cow<'a, PublicKey>>,
|
||||||
{
|
{
|
||||||
let block = signed_block.message();
|
let block = signed_block.message();
|
||||||
let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)? as u64;
|
|
||||||
|
|
||||||
|
let proposer_index = if let Some(proposer_index) = verified_proposer_index {
|
||||||
|
proposer_index
|
||||||
|
} else {
|
||||||
|
state.get_beacon_proposer_index(block.slot(), spec)? as u64
|
||||||
|
};
|
||||||
if proposer_index != block.proposer_index() {
|
if proposer_index != block.proposer_index() {
|
||||||
return Err(Error::IncorrectBlockProposer {
|
return Err(Error::IncorrectBlockProposer {
|
||||||
block: block.proposer_index(),
|
block: block.proposer_index(),
|
||||||
@ -156,13 +161,18 @@ pub fn randao_signature_set<'a, T, F, Payload: AbstractExecPayload<T>>(
|
|||||||
state: &'a BeaconState<T>,
|
state: &'a BeaconState<T>,
|
||||||
get_pubkey: F,
|
get_pubkey: F,
|
||||||
block: BeaconBlockRef<'a, T, Payload>,
|
block: BeaconBlockRef<'a, T, Payload>,
|
||||||
|
verified_proposer_index: Option<u64>,
|
||||||
spec: &'a ChainSpec,
|
spec: &'a ChainSpec,
|
||||||
) -> Result<SignatureSet<'a>>
|
) -> Result<SignatureSet<'a>>
|
||||||
where
|
where
|
||||||
T: EthSpec,
|
T: EthSpec,
|
||||||
F: Fn(usize) -> Option<Cow<'a, PublicKey>>,
|
F: Fn(usize) -> Option<Cow<'a, PublicKey>>,
|
||||||
{
|
{
|
||||||
let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?;
|
let proposer_index = if let Some(proposer_index) = verified_proposer_index {
|
||||||
|
proposer_index
|
||||||
|
} else {
|
||||||
|
state.get_beacon_proposer_index(block.slot(), spec)? as u64
|
||||||
|
};
|
||||||
|
|
||||||
let domain = spec.get_domain(
|
let domain = spec.get_domain(
|
||||||
block.slot().epoch(T::slots_per_epoch()),
|
block.slot().epoch(T::slots_per_epoch()),
|
||||||
@ -178,7 +188,7 @@ where
|
|||||||
|
|
||||||
Ok(SignatureSet::single_pubkey(
|
Ok(SignatureSet::single_pubkey(
|
||||||
block.body().randao_reveal(),
|
block.body().randao_reveal(),
|
||||||
get_pubkey(proposer_index).ok_or(Error::ValidatorUnknown(proposer_index as u64))?,
|
get_pubkey(proposer_index as usize).ok_or(Error::ValidatorUnknown(proposer_index))?,
|
||||||
message,
|
message,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ use crate::per_block_processing::errors::{
|
|||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
per_block_processing::{process_operations, verify_exit::verify_exit},
|
per_block_processing::{process_operations, verify_exit::verify_exit},
|
||||||
BlockSignatureStrategy, VerifyBlockRoot, VerifySignatures,
|
BlockSignatureStrategy, ConsensusContext, VerifyBlockRoot, VerifySignatures,
|
||||||
};
|
};
|
||||||
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
|
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
@ -67,12 +67,13 @@ async fn valid_block_ok() {
|
|||||||
.make_block_return_pre_state(state, slot + Slot::new(1))
|
.make_block_return_pre_state(state, slot + Slot::new(1))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot());
|
||||||
let result = per_block_processing(
|
let result = per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&block,
|
&block,
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::VerifyIndividual,
|
BlockSignatureStrategy::VerifyIndividual,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -91,12 +92,13 @@ async fn invalid_block_header_state_slot() {
|
|||||||
let (mut block, signature) = signed_block.deconstruct();
|
let (mut block, signature) = signed_block.deconstruct();
|
||||||
*block.slot_mut() = slot + Slot::new(1);
|
*block.slot_mut() = slot + Slot::new(1);
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot());
|
||||||
let result = per_block_processing(
|
let result = per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&SignedBeaconBlock::from_block(block, signature),
|
&SignedBeaconBlock::from_block(block, signature),
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::VerifyIndividual,
|
BlockSignatureStrategy::VerifyIndividual,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -122,12 +124,13 @@ async fn invalid_parent_block_root() {
|
|||||||
let (mut block, signature) = signed_block.deconstruct();
|
let (mut block, signature) = signed_block.deconstruct();
|
||||||
*block.parent_root_mut() = Hash256::from([0xAA; 32]);
|
*block.parent_root_mut() = Hash256::from([0xAA; 32]);
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot());
|
||||||
let result = per_block_processing(
|
let result = per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&SignedBeaconBlock::from_block(block, signature),
|
&SignedBeaconBlock::from_block(block, signature),
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::VerifyIndividual,
|
BlockSignatureStrategy::VerifyIndividual,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -154,12 +157,13 @@ async fn invalid_block_signature() {
|
|||||||
.await;
|
.await;
|
||||||
let (block, _) = signed_block.deconstruct();
|
let (block, _) = signed_block.deconstruct();
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(block.slot());
|
||||||
let result = per_block_processing(
|
let result = per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&SignedBeaconBlock::from_block(block, Signature::empty()),
|
&SignedBeaconBlock::from_block(block, Signature::empty()),
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::VerifyIndividual,
|
BlockSignatureStrategy::VerifyIndividual,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -186,12 +190,13 @@ async fn invalid_randao_reveal_signature() {
|
|||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(signed_block.slot());
|
||||||
let result = per_block_processing(
|
let result = per_block_processing(
|
||||||
&mut state,
|
&mut state,
|
||||||
&signed_block,
|
&signed_block,
|
||||||
None,
|
|
||||||
BlockSignatureStrategy::VerifyIndividual,
|
BlockSignatureStrategy::VerifyIndividual,
|
||||||
VerifyBlockRoot::True,
|
VerifyBlockRoot::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -386,11 +391,12 @@ async fn invalid_attestation_no_committee_for_index() {
|
|||||||
head_block.to_mut().body_mut().attestations_mut()[0]
|
head_block.to_mut().body_mut().attestations_mut()[0]
|
||||||
.data
|
.data
|
||||||
.index += 1;
|
.index += 1;
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attestations(
|
let result = process_operations::process_attestations(
|
||||||
&mut state,
|
&mut state,
|
||||||
head_block.body(),
|
head_block.body(),
|
||||||
head_block.proposer_index(),
|
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -424,11 +430,12 @@ async fn invalid_attestation_wrong_justified_checkpoint() {
|
|||||||
.data
|
.data
|
||||||
.source = new_justified_checkpoint;
|
.source = new_justified_checkpoint;
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attestations(
|
let result = process_operations::process_attestations(
|
||||||
&mut state,
|
&mut state,
|
||||||
head_block.body(),
|
head_block.body(),
|
||||||
head_block.proposer_index(),
|
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -463,11 +470,12 @@ async fn invalid_attestation_bad_aggregation_bitfield_len() {
|
|||||||
head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits =
|
head_block.to_mut().body_mut().attestations_mut()[0].aggregation_bits =
|
||||||
Bitfield::with_capacity(spec.target_committee_size).unwrap();
|
Bitfield::with_capacity(spec.target_committee_size).unwrap();
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attestations(
|
let result = process_operations::process_attestations(
|
||||||
&mut state,
|
&mut state,
|
||||||
head_block.body(),
|
head_block.body(),
|
||||||
head_block.proposer_index(),
|
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -495,11 +503,12 @@ async fn invalid_attestation_bad_signature() {
|
|||||||
.0;
|
.0;
|
||||||
head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty();
|
head_block.to_mut().body_mut().attestations_mut()[0].signature = AggregateSignature::empty();
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attestations(
|
let result = process_operations::process_attestations(
|
||||||
&mut state,
|
&mut state,
|
||||||
head_block.body(),
|
head_block.body(),
|
||||||
head_block.proposer_index(),
|
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
// Expecting BadSignature because we're signing with invalid secret_keys
|
// Expecting BadSignature because we're signing with invalid secret_keys
|
||||||
@ -533,11 +542,12 @@ async fn invalid_attestation_included_too_early() {
|
|||||||
.data
|
.data
|
||||||
.slot = new_attesation_slot;
|
.slot = new_attesation_slot;
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attestations(
|
let result = process_operations::process_attestations(
|
||||||
&mut state,
|
&mut state,
|
||||||
head_block.body(),
|
head_block.body(),
|
||||||
head_block.proposer_index(),
|
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -575,11 +585,12 @@ async fn invalid_attestation_included_too_late() {
|
|||||||
.data
|
.data
|
||||||
.slot = new_attesation_slot;
|
.slot = new_attesation_slot;
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attestations(
|
let result = process_operations::process_attestations(
|
||||||
&mut state,
|
&mut state,
|
||||||
head_block.body(),
|
head_block.body(),
|
||||||
head_block.proposer_index(),
|
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -613,11 +624,12 @@ async fn invalid_attestation_target_epoch_slot_mismatch() {
|
|||||||
.target
|
.target
|
||||||
.epoch += Epoch::new(1);
|
.epoch += Epoch::new(1);
|
||||||
|
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attestations(
|
let result = process_operations::process_attestations(
|
||||||
&mut state,
|
&mut state,
|
||||||
head_block.body(),
|
head_block.body(),
|
||||||
head_block.proposer_index(),
|
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
@ -640,10 +652,12 @@ async fn valid_insert_attester_slashing() {
|
|||||||
let attester_slashing = harness.make_attester_slashing(vec![1, 2]);
|
let attester_slashing = harness.make_attester_slashing(vec![1, 2]);
|
||||||
|
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attester_slashings(
|
let result = process_operations::process_attester_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[attester_slashing],
|
&[attester_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -660,10 +674,12 @@ async fn invalid_attester_slashing_not_slashable() {
|
|||||||
attester_slashing.attestation_1 = attester_slashing.attestation_2.clone();
|
attester_slashing.attestation_1 = attester_slashing.attestation_2.clone();
|
||||||
|
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attester_slashings(
|
let result = process_operations::process_attester_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[attester_slashing],
|
&[attester_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -686,10 +702,12 @@ async fn invalid_attester_slashing_1_invalid() {
|
|||||||
attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]);
|
attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]);
|
||||||
|
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attester_slashings(
|
let result = process_operations::process_attester_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[attester_slashing],
|
&[attester_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -715,10 +733,12 @@ async fn invalid_attester_slashing_2_invalid() {
|
|||||||
attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]);
|
attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]);
|
||||||
|
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_attester_slashings(
|
let result = process_operations::process_attester_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[attester_slashing],
|
&[attester_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -741,10 +761,12 @@ async fn valid_insert_proposer_slashing() {
|
|||||||
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
let harness = get_harness::<MainnetEthSpec>(EPOCH_OFFSET, VALIDATOR_COUNT).await;
|
||||||
let proposer_slashing = harness.make_proposer_slashing(1);
|
let proposer_slashing = harness.make_proposer_slashing(1);
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_proposer_slashings(
|
let result = process_operations::process_proposer_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing],
|
&[proposer_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
// Expecting Ok(_) because we inserted a valid proposer slashing
|
// Expecting Ok(_) because we inserted a valid proposer slashing
|
||||||
@ -760,10 +782,12 @@ async fn invalid_proposer_slashing_proposals_identical() {
|
|||||||
proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone();
|
proposer_slashing.signed_header_1.message = proposer_slashing.signed_header_2.message.clone();
|
||||||
|
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_proposer_slashings(
|
let result = process_operations::process_proposer_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing],
|
&[proposer_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -787,10 +811,12 @@ async fn invalid_proposer_slashing_proposer_unknown() {
|
|||||||
proposer_slashing.signed_header_2.message.proposer_index = 3_141_592;
|
proposer_slashing.signed_header_2.message.proposer_index = 3_141_592;
|
||||||
|
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_proposer_slashings(
|
let result = process_operations::process_proposer_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing],
|
&[proposer_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -811,10 +837,12 @@ async fn invalid_proposer_slashing_duplicate_slashing() {
|
|||||||
|
|
||||||
let proposer_slashing = harness.make_proposer_slashing(1);
|
let proposer_slashing = harness.make_proposer_slashing(1);
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result_1 = process_operations::process_proposer_slashings(
|
let result_1 = process_operations::process_proposer_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing.clone()],
|
&[proposer_slashing.clone()],
|
||||||
VerifySignatures::False,
|
VerifySignatures::False,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
assert!(result_1.is_ok());
|
assert!(result_1.is_ok());
|
||||||
@ -823,6 +851,7 @@ async fn invalid_proposer_slashing_duplicate_slashing() {
|
|||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing],
|
&[proposer_slashing],
|
||||||
VerifySignatures::False,
|
VerifySignatures::False,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
// Expecting ProposerNotSlashable because we've already slashed the validator
|
// Expecting ProposerNotSlashable because we've already slashed the validator
|
||||||
@ -842,10 +871,12 @@ async fn invalid_bad_proposal_1_signature() {
|
|||||||
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
||||||
proposer_slashing.signed_header_1.signature = Signature::empty();
|
proposer_slashing.signed_header_1.signature = Signature::empty();
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_proposer_slashings(
|
let result = process_operations::process_proposer_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing],
|
&[proposer_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -866,10 +897,12 @@ async fn invalid_bad_proposal_2_signature() {
|
|||||||
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
let mut proposer_slashing = harness.make_proposer_slashing(1);
|
||||||
proposer_slashing.signed_header_2.signature = Signature::empty();
|
proposer_slashing.signed_header_2.signature = Signature::empty();
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_proposer_slashings(
|
let result = process_operations::process_proposer_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing],
|
&[proposer_slashing],
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -891,10 +924,12 @@ async fn invalid_proposer_slashing_proposal_epoch_mismatch() {
|
|||||||
proposer_slashing.signed_header_1.message.slot = Slot::new(0);
|
proposer_slashing.signed_header_1.message.slot = Slot::new(0);
|
||||||
proposer_slashing.signed_header_2.message.slot = Slot::new(128);
|
proposer_slashing.signed_header_2.message.slot = Slot::new(128);
|
||||||
let mut state = harness.get_current_state();
|
let mut state = harness.get_current_state();
|
||||||
|
let mut ctxt = ConsensusContext::new(state.slot());
|
||||||
let result = process_operations::process_proposer_slashings(
|
let result = process_operations::process_proposer_slashings(
|
||||||
&mut state,
|
&mut state,
|
||||||
&[proposer_slashing],
|
&[proposer_slashing],
|
||||||
VerifySignatures::False,
|
VerifySignatures::False,
|
||||||
|
&mut ctxt,
|
||||||
&spec,
|
&spec,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -29,9 +29,7 @@ pub fn verify_deposit_signature(deposit_data: &DepositData, spec: &ChainSpec) ->
|
|||||||
/// Returns a `Some(validator index)` if a pubkey already exists in the `validators`,
|
/// Returns a `Some(validator index)` if a pubkey already exists in the `validators`,
|
||||||
/// otherwise returns `None`.
|
/// otherwise returns `None`.
|
||||||
///
|
///
|
||||||
/// ## Errors
|
/// Builds the pubkey cache if it is not already built.
|
||||||
///
|
|
||||||
/// Errors if the state's `pubkey_cache` is not current.
|
|
||||||
pub fn get_existing_validator_index<T: EthSpec>(
|
pub fn get_existing_validator_index<T: EthSpec>(
|
||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
pub_key: &PublicKeyBytes,
|
pub_key: &PublicKeyBytes,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "types"
|
name = "types"
|
||||||
version = "0.2.0"
|
version = "0.2.1"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -178,6 +178,9 @@ pub struct ChainSpec {
|
|||||||
pub attestation_subnet_count: u64,
|
pub attestation_subnet_count: u64,
|
||||||
pub random_subnets_per_validator: u64,
|
pub random_subnets_per_validator: u64,
|
||||||
pub epochs_per_random_subnet_subscription: u64,
|
pub epochs_per_random_subnet_subscription: u64,
|
||||||
|
pub subnets_per_node: u8,
|
||||||
|
pub epochs_per_subnet_subscription: u64,
|
||||||
|
attestation_subnet_extra_bits: u8,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Application params
|
* Application params
|
||||||
@ -467,6 +470,22 @@ impl ChainSpec {
|
|||||||
Hash256::from(domain)
|
Hash256::from(domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::integer_arithmetic)]
|
||||||
|
pub const fn attestation_subnet_prefix_bits(&self) -> u32 {
|
||||||
|
// maybe use log2 when stable https://github.com/rust-lang/rust/issues/70887
|
||||||
|
|
||||||
|
// NOTE: this line is here simply to guarantee that if self.attestation_subnet_count type
|
||||||
|
// is changed, a compiler warning will be raised. This code depends on the type being u64.
|
||||||
|
let attestation_subnet_count: u64 = self.attestation_subnet_count;
|
||||||
|
let attestation_subnet_count_bits = if attestation_subnet_count == 0 {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
63 - attestation_subnet_count.leading_zeros()
|
||||||
|
};
|
||||||
|
|
||||||
|
self.attestation_subnet_extra_bits as u32 + attestation_subnet_count_bits
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a `ChainSpec` compatible with the Ethereum Foundation specification.
|
/// Returns a `ChainSpec` compatible with the Ethereum Foundation specification.
|
||||||
pub fn mainnet() -> Self {
|
pub fn mainnet() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -630,9 +649,12 @@ impl ChainSpec {
|
|||||||
attestation_propagation_slot_range: 32,
|
attestation_propagation_slot_range: 32,
|
||||||
attestation_subnet_count: 64,
|
attestation_subnet_count: 64,
|
||||||
random_subnets_per_validator: 1,
|
random_subnets_per_validator: 1,
|
||||||
|
subnets_per_node: 1,
|
||||||
maximum_gossip_clock_disparity_millis: 500,
|
maximum_gossip_clock_disparity_millis: 500,
|
||||||
target_aggregators_per_committee: 16,
|
target_aggregators_per_committee: 16,
|
||||||
epochs_per_random_subnet_subscription: 256,
|
epochs_per_random_subnet_subscription: 256,
|
||||||
|
epochs_per_subnet_subscription: 256,
|
||||||
|
attestation_subnet_extra_bits: 6,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Application specific
|
* Application specific
|
||||||
@ -865,9 +887,12 @@ impl ChainSpec {
|
|||||||
attestation_propagation_slot_range: 32,
|
attestation_propagation_slot_range: 32,
|
||||||
attestation_subnet_count: 64,
|
attestation_subnet_count: 64,
|
||||||
random_subnets_per_validator: 1,
|
random_subnets_per_validator: 1,
|
||||||
|
subnets_per_node: 1,
|
||||||
maximum_gossip_clock_disparity_millis: 500,
|
maximum_gossip_clock_disparity_millis: 500,
|
||||||
target_aggregators_per_committee: 16,
|
target_aggregators_per_committee: 16,
|
||||||
epochs_per_random_subnet_subscription: 256,
|
epochs_per_random_subnet_subscription: 256,
|
||||||
|
epochs_per_subnet_subscription: 256,
|
||||||
|
attestation_subnet_extra_bits: 6,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Application specific
|
* Application specific
|
||||||
|
83
consensus/types/src/deposit_tree_snapshot.rs
Normal file
83
consensus/types/src/deposit_tree_snapshot.rs
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
use crate::*;
|
||||||
|
use eth2_hashing::{hash32_concat, ZERO_HASHES};
|
||||||
|
use int_to_bytes::int_to_bytes32;
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
use test_utils::TestRandom;
|
||||||
|
use DEPOSIT_TREE_DEPTH;
|
||||||
|
|
||||||
|
#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)]
|
||||||
|
pub struct FinalizedExecutionBlock {
|
||||||
|
pub deposit_root: Hash256,
|
||||||
|
pub deposit_count: u64,
|
||||||
|
pub block_hash: Hash256,
|
||||||
|
pub block_height: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&DepositTreeSnapshot> for FinalizedExecutionBlock {
|
||||||
|
fn from(snapshot: &DepositTreeSnapshot) -> Self {
|
||||||
|
Self {
|
||||||
|
deposit_root: snapshot.deposit_root,
|
||||||
|
deposit_count: snapshot.deposit_count,
|
||||||
|
block_hash: snapshot.execution_block_hash,
|
||||||
|
block_height: snapshot.execution_block_height,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)]
|
||||||
|
pub struct DepositTreeSnapshot {
|
||||||
|
pub finalized: Vec<Hash256>,
|
||||||
|
pub deposit_root: Hash256,
|
||||||
|
pub deposit_count: u64,
|
||||||
|
pub execution_block_hash: Hash256,
|
||||||
|
pub execution_block_height: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for DepositTreeSnapshot {
|
||||||
|
fn default() -> Self {
|
||||||
|
let mut result = Self {
|
||||||
|
finalized: vec![],
|
||||||
|
deposit_root: Hash256::default(),
|
||||||
|
deposit_count: 0,
|
||||||
|
execution_block_hash: Hash256::zero(),
|
||||||
|
execution_block_height: 0,
|
||||||
|
};
|
||||||
|
// properly set the empty deposit root
|
||||||
|
result.deposit_root = result.calculate_root().unwrap();
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DepositTreeSnapshot {
|
||||||
|
// Calculates the deposit tree root from the hashes in the snapshot
|
||||||
|
pub fn calculate_root(&self) -> Option<Hash256> {
|
||||||
|
let mut size = self.deposit_count;
|
||||||
|
let mut index = self.finalized.len();
|
||||||
|
let mut deposit_root = [0; 32];
|
||||||
|
for height in 0..DEPOSIT_TREE_DEPTH {
|
||||||
|
deposit_root = if (size & 1) == 1 {
|
||||||
|
index = index.checked_sub(1)?;
|
||||||
|
hash32_concat(self.finalized.get(index)?.as_bytes(), &deposit_root)
|
||||||
|
} else {
|
||||||
|
hash32_concat(&deposit_root, ZERO_HASHES.get(height)?)
|
||||||
|
};
|
||||||
|
size /= 2;
|
||||||
|
}
|
||||||
|
// add mix-in-length
|
||||||
|
deposit_root = hash32_concat(&deposit_root, &int_to_bytes32(self.deposit_count));
|
||||||
|
|
||||||
|
Some(Hash256::from_slice(&deposit_root))
|
||||||
|
}
|
||||||
|
pub fn is_valid(&self) -> bool {
|
||||||
|
self.calculate_root()
|
||||||
|
.map_or(false, |calculated| self.deposit_root == calculated)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
ssz_tests!(DepositTreeSnapshot);
|
||||||
|
}
|
@ -35,6 +35,7 @@ pub mod contribution_and_proof;
|
|||||||
pub mod deposit;
|
pub mod deposit;
|
||||||
pub mod deposit_data;
|
pub mod deposit_data;
|
||||||
pub mod deposit_message;
|
pub mod deposit_message;
|
||||||
|
pub mod deposit_tree_snapshot;
|
||||||
pub mod enr_fork_id;
|
pub mod enr_fork_id;
|
||||||
pub mod eth1_data;
|
pub mod eth1_data;
|
||||||
pub mod eth_spec;
|
pub mod eth_spec;
|
||||||
@ -48,6 +49,9 @@ pub mod free_attestation;
|
|||||||
pub mod graffiti;
|
pub mod graffiti;
|
||||||
pub mod historical_batch;
|
pub mod historical_batch;
|
||||||
pub mod indexed_attestation;
|
pub mod indexed_attestation;
|
||||||
|
pub mod light_client_bootstrap;
|
||||||
|
pub mod light_client_optimistic_update;
|
||||||
|
pub mod light_client_update;
|
||||||
pub mod pending_attestation;
|
pub mod pending_attestation;
|
||||||
pub mod proposer_preparation_data;
|
pub mod proposer_preparation_data;
|
||||||
pub mod proposer_slashing;
|
pub mod proposer_slashing;
|
||||||
@ -122,6 +126,7 @@ pub use crate::contribution_and_proof::ContributionAndProof;
|
|||||||
pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH};
|
pub use crate::deposit::{Deposit, DEPOSIT_TREE_DEPTH};
|
||||||
pub use crate::deposit_data::DepositData;
|
pub use crate::deposit_data::DepositData;
|
||||||
pub use crate::deposit_message::DepositMessage;
|
pub use crate::deposit_message::DepositMessage;
|
||||||
|
pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock};
|
||||||
pub use crate::enr_fork_id::EnrForkId;
|
pub use crate::enr_fork_id::EnrForkId;
|
||||||
pub use crate::eth1_data::Eth1Data;
|
pub use crate::eth1_data::Eth1Data;
|
||||||
pub use crate::eth_spec::EthSpecId;
|
pub use crate::eth_spec::EthSpecId;
|
||||||
|
45
consensus/types/src/light_client_bootstrap.rs
Normal file
45
consensus/types/src/light_client_bootstrap.rs
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
use super::{BeaconBlockHeader, BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee};
|
||||||
|
use crate::{light_client_update::*, test_utils::TestRandom};
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
|
||||||
|
/// A LightClientBootstrap is the initializer we send over to lightclient nodes
|
||||||
|
/// that are trying to generate their basic storage when booting up.
|
||||||
|
#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))]
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)]
|
||||||
|
#[serde(bound = "T: EthSpec")]
|
||||||
|
pub struct LightClientBootstrap<T: EthSpec> {
|
||||||
|
/// Requested beacon block header.
|
||||||
|
pub header: BeaconBlockHeader,
|
||||||
|
/// The `SyncCommittee` used in the requested period.
|
||||||
|
pub current_sync_committee: Arc<SyncCommittee<T>>,
|
||||||
|
/// Merkle proof for sync committee
|
||||||
|
pub current_sync_committee_branch: FixedVector<Hash256, CurrentSyncCommitteeProofLen>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> LightClientBootstrap<T> {
|
||||||
|
pub fn from_beacon_state(beacon_state: BeaconState<T>) -> Result<Self, Error> {
|
||||||
|
let mut header = beacon_state.latest_block_header().clone();
|
||||||
|
header.state_root = beacon_state.tree_hash_root();
|
||||||
|
Ok(LightClientBootstrap {
|
||||||
|
header,
|
||||||
|
current_sync_committee: beacon_state.current_sync_committee()?.clone(),
|
||||||
|
/// TODO(Giulio2002): Generate Merkle Proof, this is just empty hashes
|
||||||
|
current_sync_committee_branch: FixedVector::new(vec![
|
||||||
|
Hash256::zero();
|
||||||
|
CURRENT_SYNC_COMMITTEE_PROOF_LEN
|
||||||
|
])?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::MainnetEthSpec;
|
||||||
|
|
||||||
|
ssz_tests!(LightClientBootstrap<MainnetEthSpec>);
|
||||||
|
}
|
80
consensus/types/src/light_client_finality_update.rs
Normal file
80
consensus/types/src/light_client_finality_update.rs
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee};
|
||||||
|
use crate::{light_client_update::*, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec};
|
||||||
|
use safe_arith::ArithError;
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use ssz_types::typenum::{U5, U6};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
|
||||||
|
/// A LightClientFinalityUpdate is the update lightclient request or received by a gossip that
|
||||||
|
/// signal a new finalized beacon block header for the light client sync protocol.
|
||||||
|
#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))]
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)]
|
||||||
|
#[serde(bound = "T: EthSpec")]
|
||||||
|
pub struct LightClientFinalityUpdate<T: EthSpec> {
|
||||||
|
/// The last `BeaconBlockHeader` from the last attested block by the sync committee.
|
||||||
|
pub attested_header: BeaconBlockHeader,
|
||||||
|
/// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch).
|
||||||
|
pub finalized_header: BeaconBlockHeader,
|
||||||
|
/// Merkle proof attesting finalized header.
|
||||||
|
pub finality_branch: FixedVector<Hash256, FinalizedRootProofLen>,
|
||||||
|
/// current sync aggreggate
|
||||||
|
pub sync_aggregate: SyncAggregate<T>,
|
||||||
|
/// Slot of the sync aggregated singature
|
||||||
|
pub signature_slot: Slot,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> LightClientFinalityUpdate<T> {
|
||||||
|
pub fn new(
|
||||||
|
chain_spec: ChainSpec,
|
||||||
|
beacon_state: BeaconState<T>,
|
||||||
|
block: BeaconBlock<T>,
|
||||||
|
attested_state: BeaconState<T>,
|
||||||
|
finalized_block: BeaconBlock<T>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let altair_fork_epoch = chain_spec
|
||||||
|
.altair_fork_epoch
|
||||||
|
.ok_or(Error::AltairForkNotActive)?;
|
||||||
|
if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch {
|
||||||
|
return Err(Error::AltairForkNotActive);
|
||||||
|
}
|
||||||
|
|
||||||
|
let sync_aggregate = block.body().sync_aggregate()?;
|
||||||
|
if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize {
|
||||||
|
return Err(Error::NotEnoughSyncCommitteeParticipants);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute and validate attested header.
|
||||||
|
let mut attested_header = attested_state.latest_block_header().clone();
|
||||||
|
attested_header.state_root = attested_state.tree_hash_root();
|
||||||
|
// Build finalized header from finalized block
|
||||||
|
let finalized_header = BeaconBlockHeader {
|
||||||
|
slot: finalized_block.slot(),
|
||||||
|
proposer_index: finalized_block.proposer_index(),
|
||||||
|
parent_root: finalized_block.parent_root(),
|
||||||
|
state_root: finalized_block.state_root(),
|
||||||
|
body_root: finalized_block.body_root(),
|
||||||
|
};
|
||||||
|
if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root {
|
||||||
|
return Err(Error::InvalidFinalizedBlock);
|
||||||
|
}
|
||||||
|
// TODO(Giulio2002): compute proper merkle proofs.
|
||||||
|
Ok(Self {
|
||||||
|
attested_header: attested_header,
|
||||||
|
finalized_header: finalized_header,
|
||||||
|
finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?,
|
||||||
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
|
signature_slot: block.slot(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::MainnetEthSpec;
|
||||||
|
|
||||||
|
ssz_tests!(LightClientFinalityUpdate<MainnetEthSpec>);
|
||||||
|
}
|
59
consensus/types/src/light_client_optimistic_update.rs
Normal file
59
consensus/types/src/light_client_optimistic_update.rs
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
use super::{BeaconBlockHeader, EthSpec, Slot, SyncAggregate};
|
||||||
|
use crate::{
|
||||||
|
light_client_update::Error, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec,
|
||||||
|
};
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
|
||||||
|
/// A LightClientOptimisticUpdate is the update we send on each slot,
|
||||||
|
/// it is based off the current unfinalized epoch is verified only against BLS signature.
|
||||||
|
#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))]
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)]
|
||||||
|
#[serde(bound = "T: EthSpec")]
|
||||||
|
pub struct LightClientOptimisticUpdate<T: EthSpec> {
|
||||||
|
/// The last `BeaconBlockHeader` from the last attested block by the sync committee.
|
||||||
|
pub attested_header: BeaconBlockHeader,
|
||||||
|
/// current sync aggreggate
|
||||||
|
pub sync_aggregate: SyncAggregate<T>,
|
||||||
|
/// Slot of the sync aggregated singature
|
||||||
|
pub signature_slot: Slot,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> LightClientOptimisticUpdate<T> {
|
||||||
|
pub fn new(
|
||||||
|
chain_spec: ChainSpec,
|
||||||
|
block: BeaconBlock<T>,
|
||||||
|
attested_state: BeaconState<T>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let altair_fork_epoch = chain_spec
|
||||||
|
.altair_fork_epoch
|
||||||
|
.ok_or(Error::AltairForkNotActive)?;
|
||||||
|
if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch {
|
||||||
|
return Err(Error::AltairForkNotActive);
|
||||||
|
}
|
||||||
|
|
||||||
|
let sync_aggregate = block.body().sync_aggregate()?;
|
||||||
|
if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize {
|
||||||
|
return Err(Error::NotEnoughSyncCommitteeParticipants);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute and validate attested header.
|
||||||
|
let mut attested_header = attested_state.latest_block_header().clone();
|
||||||
|
attested_header.state_root = attested_state.tree_hash_root();
|
||||||
|
Ok(Self {
|
||||||
|
attested_header,
|
||||||
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
|
signature_slot: block.slot(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::MainnetEthSpec;
|
||||||
|
|
||||||
|
ssz_tests!(LightClientOptimisticUpdate<MainnetEthSpec>);
|
||||||
|
}
|
171
consensus/types/src/light_client_update.rs
Normal file
171
consensus/types/src/light_client_update.rs
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee};
|
||||||
|
use crate::{beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec};
|
||||||
|
use safe_arith::ArithError;
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
|
use ssz_types::typenum::{U5, U6};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use test_random_derive::TestRandom;
|
||||||
|
use tree_hash::TreeHash;
|
||||||
|
|
||||||
|
pub const FINALIZED_ROOT_INDEX: usize = 105;
|
||||||
|
pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54;
|
||||||
|
pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55;
|
||||||
|
|
||||||
|
pub type FinalizedRootProofLen = U6;
|
||||||
|
pub type CurrentSyncCommitteeProofLen = U5;
|
||||||
|
pub type NextSyncCommitteeProofLen = U5;
|
||||||
|
|
||||||
|
pub const FINALIZED_ROOT_PROOF_LEN: usize = 6;
|
||||||
|
pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5;
|
||||||
|
pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
|
pub enum Error {
|
||||||
|
SszTypesError(ssz_types::Error),
|
||||||
|
BeaconStateError(beacon_state::Error),
|
||||||
|
ArithError(ArithError),
|
||||||
|
AltairForkNotActive,
|
||||||
|
NotEnoughSyncCommitteeParticipants,
|
||||||
|
MismatchingPeriods,
|
||||||
|
InvalidFinalizedBlock,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ssz_types::Error> for Error {
|
||||||
|
fn from(e: ssz_types::Error) -> Error {
|
||||||
|
Error::SszTypesError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<beacon_state::Error> for Error {
|
||||||
|
fn from(e: beacon_state::Error) -> Error {
|
||||||
|
Error::BeaconStateError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ArithError> for Error {
|
||||||
|
fn from(e: ArithError) -> Error {
|
||||||
|
Error::ArithError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A LightClientUpdate is the update we request solely to either complete the bootstraping process,
|
||||||
|
/// or to sync up to the last committee period, we need to have one ready for each ALTAIR period
|
||||||
|
/// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD].
|
||||||
|
#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))]
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)]
|
||||||
|
#[serde(bound = "T: EthSpec")]
|
||||||
|
pub struct LightClientUpdate<T: EthSpec> {
|
||||||
|
/// The last `BeaconBlockHeader` from the last attested block by the sync committee.
|
||||||
|
pub attested_header: BeaconBlockHeader,
|
||||||
|
/// The `SyncCommittee` used in the next period.
|
||||||
|
pub next_sync_committee: Arc<SyncCommittee<T>>,
|
||||||
|
/// Merkle proof for next sync committee
|
||||||
|
pub next_sync_committee_branch: FixedVector<Hash256, NextSyncCommitteeProofLen>,
|
||||||
|
/// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch).
|
||||||
|
pub finalized_header: BeaconBlockHeader,
|
||||||
|
/// Merkle proof attesting finalized header.
|
||||||
|
pub finality_branch: FixedVector<Hash256, FinalizedRootProofLen>,
|
||||||
|
/// current sync aggreggate
|
||||||
|
pub sync_aggregate: SyncAggregate<T>,
|
||||||
|
/// Slot of the sync aggregated singature
|
||||||
|
pub signature_slot: Slot,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> LightClientUpdate<T> {
|
||||||
|
pub fn new(
|
||||||
|
chain_spec: ChainSpec,
|
||||||
|
beacon_state: BeaconState<T>,
|
||||||
|
block: BeaconBlock<T>,
|
||||||
|
attested_state: BeaconState<T>,
|
||||||
|
finalized_block: BeaconBlock<T>,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let altair_fork_epoch = chain_spec
|
||||||
|
.altair_fork_epoch
|
||||||
|
.ok_or(Error::AltairForkNotActive)?;
|
||||||
|
if attested_state.slot().epoch(T::slots_per_epoch()) < altair_fork_epoch {
|
||||||
|
return Err(Error::AltairForkNotActive);
|
||||||
|
}
|
||||||
|
|
||||||
|
let sync_aggregate = block.body().sync_aggregate()?;
|
||||||
|
if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize {
|
||||||
|
return Err(Error::NotEnoughSyncCommitteeParticipants);
|
||||||
|
}
|
||||||
|
|
||||||
|
let signature_period = block.epoch().sync_committee_period(&chain_spec)?;
|
||||||
|
// Compute and validate attested header.
|
||||||
|
let mut attested_header = attested_state.latest_block_header().clone();
|
||||||
|
attested_header.state_root = attested_state.tree_hash_root();
|
||||||
|
let attested_period = attested_header
|
||||||
|
.slot
|
||||||
|
.epoch(T::slots_per_epoch())
|
||||||
|
.sync_committee_period(&chain_spec)?;
|
||||||
|
if attested_period != signature_period {
|
||||||
|
return Err(Error::MismatchingPeriods);
|
||||||
|
}
|
||||||
|
// Build finalized header from finalized block
|
||||||
|
let finalized_header = BeaconBlockHeader {
|
||||||
|
slot: finalized_block.slot(),
|
||||||
|
proposer_index: finalized_block.proposer_index(),
|
||||||
|
parent_root: finalized_block.parent_root(),
|
||||||
|
state_root: finalized_block.state_root(),
|
||||||
|
body_root: finalized_block.body_root(),
|
||||||
|
};
|
||||||
|
if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root {
|
||||||
|
return Err(Error::InvalidFinalizedBlock);
|
||||||
|
}
|
||||||
|
// TODO(Giulio2002): compute proper merkle proofs.
|
||||||
|
Ok(Self {
|
||||||
|
attested_header,
|
||||||
|
next_sync_committee: attested_state.next_sync_committee()?.clone(),
|
||||||
|
next_sync_committee_branch: FixedVector::new(vec![
|
||||||
|
Hash256::zero();
|
||||||
|
NEXT_SYNC_COMMITTEE_PROOF_LEN
|
||||||
|
])?,
|
||||||
|
finalized_header,
|
||||||
|
finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?,
|
||||||
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
|
signature_slot: block.slot(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::MainnetEthSpec;
|
||||||
|
use ssz_types::typenum::Unsigned;
|
||||||
|
|
||||||
|
ssz_tests!(LightClientUpdate<MainnetEthSpec>);
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn finalized_root_params() {
|
||||||
|
assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX);
|
||||||
|
assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32 + 1) > FINALIZED_ROOT_INDEX);
|
||||||
|
assert_eq!(FinalizedRootProofLen::to_usize(), FINALIZED_ROOT_PROOF_LEN);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn current_sync_committee_params() {
|
||||||
|
assert!(
|
||||||
|
2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN as u32) <= CURRENT_SYNC_COMMITTEE_INDEX
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
2usize.pow(CURRENT_SYNC_COMMITTEE_PROOF_LEN as u32 + 1) > CURRENT_SYNC_COMMITTEE_INDEX
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
CurrentSyncCommitteeProofLen::to_usize(),
|
||||||
|
CURRENT_SYNC_COMMITTEE_PROOF_LEN
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn next_sync_committee_params() {
|
||||||
|
assert!(2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN as u32) <= NEXT_SYNC_COMMITTEE_INDEX);
|
||||||
|
assert!(2usize.pow(NEXT_SYNC_COMMITTEE_PROOF_LEN as u32 + 1) > NEXT_SYNC_COMMITTEE_INDEX);
|
||||||
|
assert_eq!(
|
||||||
|
NextSyncCommitteeProofLen::to_usize(),
|
||||||
|
NEXT_SYNC_COMMITTEE_PROOF_LEN
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
@ -1,8 +1,9 @@
|
|||||||
//! Identifies each shard by an integer identifier.
|
//! Identifies each shard by an integer identifier.
|
||||||
use crate::{AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot};
|
use crate::{AttestationData, ChainSpec, CommitteeIndex, Epoch, EthSpec, Slot};
|
||||||
use safe_arith::{ArithError, SafeArith};
|
use safe_arith::{ArithError, SafeArith};
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::ops::{Deref, DerefMut};
|
use std::ops::{Deref, DerefMut};
|
||||||
|
use swap_or_not_shuffle::compute_shuffled_index;
|
||||||
|
|
||||||
const MAX_SUBNET_ID: usize = 64;
|
const MAX_SUBNET_ID: usize = 64;
|
||||||
|
|
||||||
@ -71,6 +72,45 @@ impl SubnetId {
|
|||||||
.safe_rem(spec.attestation_subnet_count)?
|
.safe_rem(spec.attestation_subnet_count)?
|
||||||
.into())
|
.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::integer_arithmetic)]
|
||||||
|
/// Computes the set of subnets the node should be subscribed to during the current epoch,
|
||||||
|
/// along with the first epoch in which these subscriptions are no longer valid.
|
||||||
|
pub fn compute_subnets_for_epoch<T: EthSpec>(
|
||||||
|
node_id: ethereum_types::U256,
|
||||||
|
epoch: Epoch,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(impl Iterator<Item = SubnetId>, Epoch), &'static str> {
|
||||||
|
let node_id_prefix =
|
||||||
|
(node_id >> (256 - spec.attestation_subnet_prefix_bits() as usize)).as_usize();
|
||||||
|
|
||||||
|
let subscription_event_idx = epoch.as_u64() / spec.epochs_per_subnet_subscription;
|
||||||
|
let permutation_seed =
|
||||||
|
eth2_hashing::hash(&int_to_bytes::int_to_bytes8(subscription_event_idx));
|
||||||
|
|
||||||
|
let num_subnets = 1 << spec.attestation_subnet_prefix_bits();
|
||||||
|
|
||||||
|
let permutated_prefix = compute_shuffled_index(
|
||||||
|
node_id_prefix,
|
||||||
|
num_subnets,
|
||||||
|
&permutation_seed,
|
||||||
|
spec.shuffle_round_count,
|
||||||
|
)
|
||||||
|
.ok_or("Unable to shuffle")? as u64;
|
||||||
|
|
||||||
|
// Get the constants we need to avoid holding a reference to the spec
|
||||||
|
let &ChainSpec {
|
||||||
|
subnets_per_node,
|
||||||
|
attestation_subnet_count,
|
||||||
|
..
|
||||||
|
} = spec;
|
||||||
|
|
||||||
|
let subnet_set_generator = (0..subnets_per_node).map(move |idx| {
|
||||||
|
SubnetId::new((permutated_prefix + idx as u64) % attestation_subnet_count)
|
||||||
|
});
|
||||||
|
let valid_until_epoch = (subscription_event_idx + 1) * spec.epochs_per_subnet_subscription;
|
||||||
|
Ok((subnet_set_generator, valid_until_epoch.into()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Deref for SubnetId {
|
impl Deref for SubnetId {
|
||||||
|
@ -90,6 +90,7 @@ pub mod generics {
|
|||||||
pub use crate::generic_secret_key::GenericSecretKey;
|
pub use crate::generic_secret_key::GenericSecretKey;
|
||||||
pub use crate::generic_signature::GenericSignature;
|
pub use crate::generic_signature::GenericSignature;
|
||||||
pub use crate::generic_signature_bytes::GenericSignatureBytes;
|
pub use crate::generic_signature_bytes::GenericSignatureBytes;
|
||||||
|
pub use crate::generic_signature_set::WrappedSignature;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Defines all the fundamental BLS points which should be exported by this crate by making
|
/// Defines all the fundamental BLS points which should be exported by this crate by making
|
||||||
@ -109,6 +110,13 @@ macro_rules! define_mod {
|
|||||||
pub type AggregatePublicKey =
|
pub type AggregatePublicKey =
|
||||||
GenericAggregatePublicKey<bls_variant::PublicKey, bls_variant::AggregatePublicKey>;
|
GenericAggregatePublicKey<bls_variant::PublicKey, bls_variant::AggregatePublicKey>;
|
||||||
pub type Signature = GenericSignature<bls_variant::PublicKey, bls_variant::Signature>;
|
pub type Signature = GenericSignature<bls_variant::PublicKey, bls_variant::Signature>;
|
||||||
|
pub type BlsWrappedSignature<'a> = WrappedSignature<
|
||||||
|
'a,
|
||||||
|
bls_variant::PublicKey,
|
||||||
|
bls_variant::AggregatePublicKey,
|
||||||
|
bls_variant::Signature,
|
||||||
|
bls_variant::AggregateSignature,
|
||||||
|
>;
|
||||||
pub type AggregateSignature = GenericAggregateSignature<
|
pub type AggregateSignature = GenericAggregateSignature<
|
||||||
bls_variant::PublicKey,
|
bls_variant::PublicKey,
|
||||||
bls_variant::AggregatePublicKey,
|
bls_variant::AggregatePublicKey,
|
||||||
|
@ -10,7 +10,7 @@ description = "Hashing primitives used in Ethereum 2.0"
|
|||||||
lazy_static = { version = "1.4.0", optional = true }
|
lazy_static = { version = "1.4.0", optional = true }
|
||||||
cpufeatures = { version = "0.2.5", optional = true }
|
cpufeatures = { version = "0.2.5", optional = true }
|
||||||
ring = "0.16.19"
|
ring = "0.16.19"
|
||||||
sha2 = "0.10.2"
|
sha2 = "0.10"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rustc-hex = "2.1.0"
|
rustc-hex = "2.1.0"
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
sha2 = "0.9.1"
|
sha2 = "0.10"
|
||||||
zeroize = { version = "1.4.2", features = ["zeroize_derive"] }
|
zeroize = { version = "1.4.2", features = ["zeroize_derive"] }
|
||||||
num-bigint-dig = { version = "0.6.0", features = ["zeroize"] }
|
num-bigint-dig = { version = "0.6.0", features = ["zeroize"] }
|
||||||
ring = "0.16.19"
|
ring = "0.16.19"
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user