merge with upstream
This commit is contained in:
commit
dbc57ba2d9
@ -1046,46 +1046,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
Ok(self.store.get_state(state_root, slot)?)
|
||||
}
|
||||
|
||||
/// Run a function with mutable access to a state for `block_root`.
|
||||
///
|
||||
/// The primary purpose of this function is to borrow a state with its tree hash cache
|
||||
/// from the snapshot cache *without moving it*. This means that calls to this function should
|
||||
/// be kept to an absolute minimum, because holding the snapshot cache lock has the ability
|
||||
/// to delay block import.
|
||||
///
|
||||
/// If there is no appropriate state in the snapshot cache then one will be loaded from disk.
|
||||
/// If no state is found on disk then `Ok(None)` will be returned.
|
||||
///
|
||||
/// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used,
|
||||
/// which can inform logging/metrics.
|
||||
///
|
||||
/// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour
|
||||
/// of `tree-states`, where all caches are CoW and everything is good in the world.
|
||||
pub fn with_mutable_state_for_block<F, V, Payload: AbstractExecPayload<T::EthSpec>>(
|
||||
&self,
|
||||
block: &SignedBeaconBlock<T::EthSpec, Payload>,
|
||||
block_root: Hash256,
|
||||
f: F,
|
||||
) -> Result<Option<V>, Error>
|
||||
where
|
||||
F: FnOnce(&mut BeaconState<T::EthSpec>, bool) -> Result<V, Error>,
|
||||
{
|
||||
if let Some(state) = self
|
||||
.snapshot_cache
|
||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(Error::SnapshotCacheLockTimeout)?
|
||||
.borrow_unadvanced_state_mut(block_root)
|
||||
{
|
||||
let cache_hit = true;
|
||||
f(state, cache_hit).map(Some)
|
||||
} else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? {
|
||||
let cache_hit = false;
|
||||
f(&mut state, cache_hit).map(Some)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the sync committee at `slot + 1` from the canonical chain.
|
||||
///
|
||||
/// This is useful when dealing with sync committee messages, because messages are signed
|
||||
|
@ -61,7 +61,7 @@ pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V8),
|
||||
variants(V8),
|
||||
variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
@ -75,13 +75,11 @@ pub(crate) struct CacheItem {
|
||||
pub(crate) type CacheItem = CacheItemV8;
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V8),
|
||||
variants(V8),
|
||||
variant_attributes(derive(PartialEq, Clone, Default, Debug, Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct BalancesCache {
|
||||
#[superstruct(only(V1))]
|
||||
pub(crate) items: Vec<CacheItemV1>,
|
||||
#[superstruct(only(V8))]
|
||||
pub(crate) items: Vec<CacheItemV8>,
|
||||
}
|
||||
@ -366,26 +364,20 @@ where
|
||||
}
|
||||
|
||||
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
|
||||
#[superstruct(
|
||||
variants(V1, V7, V8, V10, V11),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)]
|
||||
pub struct PersistedForkChoiceStore {
|
||||
#[superstruct(only(V1, V7))]
|
||||
pub balances_cache: BalancesCacheV1,
|
||||
#[superstruct(only(V8, V10, V11))]
|
||||
#[superstruct(only(V11))]
|
||||
pub balances_cache: BalancesCacheV8,
|
||||
pub time: Slot,
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
pub justified_balances: Vec<u64>,
|
||||
pub best_justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V10, V11))]
|
||||
#[superstruct(only(V11))]
|
||||
pub unrealized_justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V10, V11))]
|
||||
#[superstruct(only(V11))]
|
||||
pub unrealized_finalized_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V7, V8, V10, V11))]
|
||||
#[superstruct(only(V11))]
|
||||
pub proposer_boost_root: Hash256,
|
||||
#[superstruct(only(V11))]
|
||||
pub equivocating_indices: BTreeSet<u64>,
|
||||
|
@ -1,6 +1,8 @@
|
||||
use slot_clock::SlotClock;
|
||||
|
||||
use crate::beacon_chain::{BeaconChain, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
|
||||
use bls::PublicKey;
|
||||
use types::consts::eip4844::BLS_MODULUS;
|
||||
use crate::{kzg_utils, BeaconChainError};
|
||||
use state_processing::per_block_processing::eip4844::eip4844::verify_kzg_commitments_against_transactions;
|
||||
use types::{BeaconStateError, BlobsSidecar, Hash256, KzgCommitment, Slot, Transactions};
|
||||
|
@ -1,7 +1,4 @@
|
||||
use crate::beacon_fork_choice_store::{
|
||||
PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11,
|
||||
PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8,
|
||||
};
|
||||
use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11;
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use store::{DBColumn, Error, StoreItem};
|
||||
@ -10,21 +7,9 @@ use superstruct::superstruct;
|
||||
// If adding a new version you should update this type alias and fix the breakages.
|
||||
pub type PersistedForkChoice = PersistedForkChoiceV11;
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V7, V8, V10, V11),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)]
|
||||
pub struct PersistedForkChoice {
|
||||
pub fork_choice: fork_choice::PersistedForkChoice,
|
||||
#[superstruct(only(V1))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV1,
|
||||
#[superstruct(only(V7))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV7,
|
||||
#[superstruct(only(V8))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV8,
|
||||
#[superstruct(only(V10))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV10,
|
||||
#[superstruct(only(V11))]
|
||||
pub fork_choice_store: PersistedForkChoiceStoreV11,
|
||||
}
|
||||
@ -47,8 +32,4 @@ macro_rules! impl_store_item {
|
||||
};
|
||||
}
|
||||
|
||||
impl_store_item!(PersistedForkChoiceV1);
|
||||
impl_store_item!(PersistedForkChoiceV7);
|
||||
impl_store_item!(PersistedForkChoiceV8);
|
||||
impl_store_item!(PersistedForkChoiceV10);
|
||||
impl_store_item!(PersistedForkChoiceV11);
|
||||
|
@ -1,20 +1,9 @@
|
||||
//! Utilities for managing database schema changes.
|
||||
mod migration_schema_v10;
|
||||
mod migration_schema_v11;
|
||||
mod migration_schema_v12;
|
||||
mod migration_schema_v13;
|
||||
mod migration_schema_v6;
|
||||
mod migration_schema_v7;
|
||||
mod migration_schema_v8;
|
||||
mod migration_schema_v9;
|
||||
mod types;
|
||||
|
||||
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY};
|
||||
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
|
||||
use crate::eth1_chain::SszEth1;
|
||||
use crate::persisted_fork_choice::{
|
||||
PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7,
|
||||
PersistedForkChoiceV8,
|
||||
};
|
||||
use crate::types::ChainSpec;
|
||||
use slog::{warn, Logger};
|
||||
use std::sync::Arc;
|
||||
@ -23,6 +12,7 @@ use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
|
||||
use store::{Error as StoreError, StoreItem};
|
||||
|
||||
/// Migrate the database from one schema version to another, applying all requisite mutations.
|
||||
#[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future
|
||||
pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
deposit_contract_deploy_block: u64,
|
||||
@ -62,156 +52,9 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
||||
}
|
||||
|
||||
//
|
||||
// Migrations from before SchemaVersion(5) are deprecated.
|
||||
// Migrations from before SchemaVersion(11) are deprecated.
|
||||
//
|
||||
|
||||
// Migration for adding `execution_status` field to the fork choice store.
|
||||
(SchemaVersion(5), SchemaVersion(6)) => {
|
||||
// Database operations to be done atomically
|
||||
let mut ops = vec![];
|
||||
|
||||
// The top-level `PersistedForkChoice` struct is still V1 but will have its internal
|
||||
// bytes for the fork choice updated to V6.
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(mut persisted_fork_choice) = fork_choice_opt {
|
||||
migration_schema_v6::update_execution_statuses::<T>(&mut persisted_fork_choice)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
// Store the converted fork choice store under the same key.
|
||||
ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// 1. Add `proposer_boost_root`.
|
||||
// 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to
|
||||
// `finalized_checkpoint`.
|
||||
// 3. This migration also includes a potential update to the justified
|
||||
// checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint
|
||||
// combination does not actually exist for any blocks in fork choice. This was possible in
|
||||
// the consensus spec prior to v1.1.6.
|
||||
//
|
||||
// Relevant issues:
|
||||
//
|
||||
// https://github.com/sigp/lighthouse/issues/2741
|
||||
// https://github.com/ethereum/consensus-specs/pull/2727
|
||||
// https://github.com/ethereum/consensus-specs/pull/2730
|
||||
(SchemaVersion(6), SchemaVersion(7)) => {
|
||||
// Database operations to be done atomically
|
||||
let mut ops = vec![];
|
||||
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(persisted_fork_choice_v1) = fork_choice_opt {
|
||||
// This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field.
|
||||
let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into();
|
||||
|
||||
let result = migration_schema_v7::update_fork_choice::<T>(
|
||||
&mut persisted_fork_choice_v7,
|
||||
db.clone(),
|
||||
);
|
||||
|
||||
// Fall back to re-initializing fork choice from an anchor state if necessary.
|
||||
if let Err(e) = result {
|
||||
warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e);
|
||||
migration_schema_v7::update_with_reinitialized_fork_choice::<T>(
|
||||
&mut persisted_fork_choice_v7,
|
||||
db.clone(),
|
||||
spec,
|
||||
)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
}
|
||||
|
||||
// Store the converted fork choice store under the same key.
|
||||
ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Migration to add an `epoch` key to the fork choice's balances cache.
|
||||
(SchemaVersion(7), SchemaVersion(8)) => {
|
||||
let mut ops = vec![];
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV7>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(fork_choice) = fork_choice_opt {
|
||||
let updated_fork_choice =
|
||||
migration_schema_v8::update_fork_choice::<T>(fork_choice, db.clone())?;
|
||||
|
||||
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Upgrade from v8 to v9 to separate the execution payloads into their own column.
|
||||
(SchemaVersion(8), SchemaVersion(9)) => {
|
||||
migration_schema_v9::upgrade_to_v9::<T>(db.clone(), log)?;
|
||||
db.store_schema_version(to)
|
||||
}
|
||||
// Downgrade from v9 to v8 to ignore the separation of execution payloads
|
||||
// NOTE: only works before the Bellatrix fork epoch.
|
||||
(SchemaVersion(9), SchemaVersion(8)) => {
|
||||
migration_schema_v9::downgrade_from_v9::<T>(db.clone(), log)?;
|
||||
db.store_schema_version(to)
|
||||
}
|
||||
(SchemaVersion(9), SchemaVersion(10)) => {
|
||||
let mut ops = vec![];
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV8>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(fork_choice) = fork_choice_opt {
|
||||
let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?;
|
||||
|
||||
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
(SchemaVersion(10), SchemaVersion(9)) => {
|
||||
let mut ops = vec![];
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV10>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(fork_choice) = fork_choice_opt {
|
||||
let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?;
|
||||
|
||||
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Upgrade from v10 to v11 adding support for equivocating indices to fork choice.
|
||||
(SchemaVersion(10), SchemaVersion(11)) => {
|
||||
let mut ops = vec![];
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV10>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(fork_choice) = fork_choice_opt {
|
||||
let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice);
|
||||
|
||||
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Downgrade from v11 to v10 removing support for equivocating indices from fork choice.
|
||||
(SchemaVersion(11), SchemaVersion(10)) => {
|
||||
let mut ops = vec![];
|
||||
let fork_choice_opt = db.get_item::<PersistedForkChoiceV11>(&FORK_CHOICE_DB_KEY)?;
|
||||
if let Some(fork_choice) = fork_choice_opt {
|
||||
let updated_fork_choice =
|
||||
migration_schema_v11::downgrade_fork_choice(fork_choice, log);
|
||||
|
||||
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
||||
}
|
||||
|
||||
db.store_schema_version_atomically(to, ops)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Upgrade from v11 to v12 to store richer metadata in the attestation op pool.
|
||||
(SchemaVersion(11), SchemaVersion(12)) => {
|
||||
let ops = migration_schema_v12::upgrade_to_v12::<T>(db.clone(), log)?;
|
||||
|
@ -1,97 +0,0 @@
|
||||
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8};
|
||||
use crate::schema_change::{
|
||||
types::{SszContainerV10, SszContainerV7},
|
||||
StoreError,
|
||||
};
|
||||
use proto_array::core::SszContainer;
|
||||
use ssz::{Decode, Encode};
|
||||
|
||||
pub fn update_fork_choice(
|
||||
mut fork_choice: PersistedForkChoiceV8,
|
||||
) -> Result<PersistedForkChoiceV10, StoreError> {
|
||||
let ssz_container_v7 = SszContainerV7::from_ssz_bytes(
|
||||
&fork_choice.fork_choice.proto_array_bytes,
|
||||
)
|
||||
.map_err(|e| {
|
||||
StoreError::SchemaMigrationError(format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// These transformations instantiate `node.unrealized_justified_checkpoint` and
|
||||
// `node.unrealized_finalized_checkpoint` to `None`.
|
||||
let ssz_container_v10: SszContainerV10 = ssz_container_v7.into();
|
||||
let ssz_container: SszContainer = ssz_container_v10.into();
|
||||
fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes();
|
||||
|
||||
Ok(fork_choice.into())
|
||||
}
|
||||
|
||||
pub fn downgrade_fork_choice(
|
||||
mut fork_choice: PersistedForkChoiceV10,
|
||||
) -> Result<PersistedForkChoiceV8, StoreError> {
|
||||
let ssz_container_v10 = SszContainerV10::from_ssz_bytes(
|
||||
&fork_choice.fork_choice.proto_array_bytes,
|
||||
)
|
||||
.map_err(|e| {
|
||||
StoreError::SchemaMigrationError(format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
let ssz_container_v7: SszContainerV7 = ssz_container_v10.into();
|
||||
fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes();
|
||||
|
||||
Ok(fork_choice.into())
|
||||
}
|
||||
|
||||
impl From<PersistedForkChoiceStoreV8> for PersistedForkChoiceStoreV10 {
|
||||
fn from(other: PersistedForkChoiceStoreV8) -> Self {
|
||||
Self {
|
||||
balances_cache: other.balances_cache,
|
||||
time: other.time,
|
||||
finalized_checkpoint: other.finalized_checkpoint,
|
||||
justified_checkpoint: other.justified_checkpoint,
|
||||
justified_balances: other.justified_balances,
|
||||
best_justified_checkpoint: other.best_justified_checkpoint,
|
||||
unrealized_justified_checkpoint: other.best_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: other.finalized_checkpoint,
|
||||
proposer_boost_root: other.proposer_boost_root,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PersistedForkChoiceV8> for PersistedForkChoiceV10 {
|
||||
fn from(other: PersistedForkChoiceV8) -> Self {
|
||||
Self {
|
||||
fork_choice: other.fork_choice,
|
||||
fork_choice_store: other.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PersistedForkChoiceStoreV10> for PersistedForkChoiceStoreV8 {
|
||||
fn from(other: PersistedForkChoiceStoreV10) -> Self {
|
||||
Self {
|
||||
balances_cache: other.balances_cache,
|
||||
time: other.time,
|
||||
finalized_checkpoint: other.finalized_checkpoint,
|
||||
justified_checkpoint: other.justified_checkpoint,
|
||||
justified_balances: other.justified_balances,
|
||||
best_justified_checkpoint: other.best_justified_checkpoint,
|
||||
proposer_boost_root: other.proposer_boost_root,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PersistedForkChoiceV10> for PersistedForkChoiceV8 {
|
||||
fn from(other: PersistedForkChoiceV10) -> Self {
|
||||
Self {
|
||||
fork_choice: other.fork_choice,
|
||||
fork_choice_store: other.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11};
|
||||
use slog::{warn, Logger};
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
/// Add the equivocating indices field.
|
||||
pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 {
|
||||
let PersistedForkChoiceStoreV10 {
|
||||
balances_cache,
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint,
|
||||
proposer_boost_root,
|
||||
} = fork_choice_v10.fork_choice_store;
|
||||
|
||||
PersistedForkChoiceV11 {
|
||||
fork_choice: fork_choice_v10.fork_choice,
|
||||
fork_choice_store: PersistedForkChoiceStoreV11 {
|
||||
balances_cache,
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint,
|
||||
proposer_boost_root,
|
||||
equivocating_indices: BTreeSet::new(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn downgrade_fork_choice(
|
||||
fork_choice_v11: PersistedForkChoiceV11,
|
||||
log: Logger,
|
||||
) -> PersistedForkChoiceV10 {
|
||||
let PersistedForkChoiceStoreV11 {
|
||||
balances_cache,
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint,
|
||||
proposer_boost_root,
|
||||
equivocating_indices,
|
||||
} = fork_choice_v11.fork_choice_store;
|
||||
|
||||
if !equivocating_indices.is_empty() {
|
||||
warn!(
|
||||
log,
|
||||
"Deleting slashed validators from fork choice store";
|
||||
"count" => equivocating_indices.len(),
|
||||
"message" => "this may make your node more susceptible to following the wrong chain",
|
||||
);
|
||||
}
|
||||
|
||||
PersistedForkChoiceV10 {
|
||||
fork_choice: fork_choice_v11.fork_choice,
|
||||
fork_choice_store: PersistedForkChoiceStoreV10 {
|
||||
balances_cache,
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint,
|
||||
proposer_boost_root,
|
||||
},
|
||||
}
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
///! These functions and structs are only relevant to the database migration from schema 5 to 6.
|
||||
use crate::persisted_fork_choice::PersistedForkChoiceV1;
|
||||
use crate::schema_change::types::{SszContainerV1, SszContainerV6};
|
||||
use crate::BeaconChainTypes;
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::{Decode, Encode};
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
|
||||
pub(crate) fn update_execution_statuses<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV1,
|
||||
) -> Result<(), String> {
|
||||
let ssz_container_v1 =
|
||||
SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
|
||||
.map_err(|e| {
|
||||
format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
)
|
||||
})?;
|
||||
|
||||
let ssz_container_v6: SszContainerV6 = ssz_container_v1.into();
|
||||
|
||||
persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes();
|
||||
Ok(())
|
||||
}
|
@ -1,341 +0,0 @@
|
||||
///! These functions and structs are only relevant to the database migration from schema 6 to 7.
|
||||
use crate::beacon_chain::BeaconChainTypes;
|
||||
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
|
||||
use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7};
|
||||
use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot};
|
||||
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
|
||||
use fork_choice::ForkChoice;
|
||||
use proto_array::{core::ProtoNode, core::SszContainer, CountUnrealizedFull, ProtoArrayForkChoice};
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::{Decode, Encode};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use store::hot_cold_store::HotColdDB;
|
||||
use store::iter::BlockRootsIterator;
|
||||
use store::Error as StoreError;
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
|
||||
/// This method is used to re-initialize fork choice from the finalized state in case we hit an
|
||||
/// error during this migration.
|
||||
pub(crate) fn update_with_reinitialized_fork_choice<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
spec: &ChainSpec,
|
||||
) -> Result<(), String> {
|
||||
let anchor_block_root = persisted_fork_choice
|
||||
.fork_choice_store
|
||||
.finalized_checkpoint
|
||||
.root;
|
||||
let anchor_block = db
|
||||
.get_full_block_prior_to_v9(&anchor_block_root)
|
||||
.map_err(|e| format!("{:?}", e))?
|
||||
.ok_or_else(|| "Missing anchor beacon block".to_string())?;
|
||||
let anchor_state = db
|
||||
.get_state(&anchor_block.state_root(), Some(anchor_block.slot()))
|
||||
.map_err(|e| format!("{:?}", e))?
|
||||
.ok_or_else(|| "Missing anchor beacon state".to_string())?;
|
||||
let snapshot = BeaconSnapshot {
|
||||
beacon_block: Arc::new(anchor_block),
|
||||
beacon_block_root: anchor_block_root,
|
||||
beacon_state: anchor_state,
|
||||
};
|
||||
let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot);
|
||||
let fork_choice = ForkChoice::from_anchor(
|
||||
store,
|
||||
anchor_block_root,
|
||||
&snapshot.beacon_block,
|
||||
&snapshot.beacon_state,
|
||||
// Don't provide the current slot here, just use what's in the store. We don't need to know
|
||||
// the head here, plus it's nice to avoid mutating fork choice during this process.
|
||||
None,
|
||||
// This config will get overwritten on startup.
|
||||
CountUnrealizedFull::default(),
|
||||
spec,
|
||||
)
|
||||
.map_err(|e| format!("{:?}", e))?;
|
||||
persisted_fork_choice.fork_choice = fork_choice.to_persisted();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn update_fork_choice<T: BeaconChainTypes>(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), StoreError> {
|
||||
// `PersistedForkChoice` stores the `ProtoArray` as a `Vec<u8>`. Deserialize these
|
||||
// bytes assuming the legacy struct, and transform them to the new struct before
|
||||
// re-serializing.
|
||||
let ssz_container_v6 =
|
||||
SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
|
||||
.map_err(|e| {
|
||||
StoreError::SchemaMigrationError(format!(
|
||||
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Clone the V6 proto nodes in order to maintain information about `node.justified_epoch`
|
||||
// and `node.finalized_epoch`.
|
||||
let nodes_v6 = ssz_container_v6.nodes.clone();
|
||||
|
||||
let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint;
|
||||
let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint;
|
||||
|
||||
// These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint`
|
||||
// to `None`.
|
||||
let ssz_container_v7: SszContainerV7 =
|
||||
ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint);
|
||||
let ssz_container_v10: SszContainerV10 = ssz_container_v7.into();
|
||||
let ssz_container: SszContainer = ssz_container_v10.into();
|
||||
// `CountUnrealizedFull::default()` represents the count-unrealized-full config which will be overwritten on startup.
|
||||
let mut fork_choice: ProtoArrayForkChoice =
|
||||
(ssz_container, CountUnrealizedFull::default()).into();
|
||||
|
||||
update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
// Update the justified checkpoint in the store in case we have a discrepancy
|
||||
// between the store and the proto array nodes.
|
||||
update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice)
|
||||
.map_err(StoreError::SchemaMigrationError)?;
|
||||
|
||||
// Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence.
|
||||
let ssz_container = SszContainer::from(&fork_choice);
|
||||
let ssz_container_v7 = SszContainerV7::from(ssz_container);
|
||||
|
||||
persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes();
|
||||
persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct HeadInfo {
|
||||
index: usize,
|
||||
root: Hash256,
|
||||
slot: Slot,
|
||||
}
|
||||
|
||||
fn update_checkpoints<T: BeaconChainTypes>(
|
||||
finalized_root: Hash256,
|
||||
nodes_v6: &[ProtoNodeV6],
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<(), String> {
|
||||
let heads = find_finalized_descendant_heads(finalized_root, fork_choice);
|
||||
|
||||
// For each head, first gather all epochs we will need to find justified or finalized roots for.
|
||||
for head in heads {
|
||||
// `relevant_epochs` are epochs for which we will need to find the root at the start slot.
|
||||
// We don't need to worry about whether the are finalized or justified epochs.
|
||||
let mut relevant_epochs = HashSet::new();
|
||||
let relevant_epoch_finder = |index, _: &mut ProtoNode| {
|
||||
let (justified_epoch, finalized_epoch) = nodes_v6
|
||||
.get(index)
|
||||
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
|
||||
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
|
||||
relevant_epochs.insert(justified_epoch);
|
||||
relevant_epochs.insert(finalized_epoch);
|
||||
Ok(())
|
||||
};
|
||||
|
||||
apply_to_chain_of_ancestors(
|
||||
finalized_root,
|
||||
head.index,
|
||||
fork_choice,
|
||||
relevant_epoch_finder,
|
||||
)?;
|
||||
|
||||
// find the block roots associated with each relevant epoch.
|
||||
let roots_by_epoch =
|
||||
map_relevant_epochs_to_roots::<T>(head.root, head.slot, relevant_epochs, db.clone())?;
|
||||
|
||||
// Apply this mutator to the chain of descendants from this head, adding justified
|
||||
// and finalized checkpoints for each.
|
||||
let node_mutator = |index, node: &mut ProtoNode| {
|
||||
let (justified_epoch, finalized_epoch) = nodes_v6
|
||||
.get(index)
|
||||
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
|
||||
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
|
||||
|
||||
// Update the checkpoints only if they haven't already been populated.
|
||||
if node.justified_checkpoint.is_none() {
|
||||
let justified_checkpoint =
|
||||
roots_by_epoch
|
||||
.get(&justified_epoch)
|
||||
.map(|&root| Checkpoint {
|
||||
epoch: justified_epoch,
|
||||
root,
|
||||
});
|
||||
node.justified_checkpoint = justified_checkpoint;
|
||||
}
|
||||
if node.finalized_checkpoint.is_none() {
|
||||
let finalized_checkpoint =
|
||||
roots_by_epoch
|
||||
.get(&finalized_epoch)
|
||||
.map(|&root| Checkpoint {
|
||||
epoch: finalized_epoch,
|
||||
root,
|
||||
});
|
||||
node.finalized_checkpoint = finalized_checkpoint;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Coverts the given `HashSet<Epoch>` to a `Vec<Epoch>` then reverse sorts by `Epoch`. Next, a
|
||||
/// single `BlockRootsIterator` is created which is used to iterate backwards from the given
|
||||
/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch.
|
||||
fn map_relevant_epochs_to_roots<T: BeaconChainTypes>(
|
||||
head_root: Hash256,
|
||||
head_slot: Slot,
|
||||
epochs: HashSet<Epoch>,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<HashMap<Epoch, Hash256>, String> {
|
||||
// Convert the `HashSet` to a `Vec` and reverse sort the epochs.
|
||||
let mut relevant_epochs = epochs.into_iter().collect::<Vec<_>>();
|
||||
relevant_epochs.sort_unstable_by(|a, b| b.cmp(a));
|
||||
|
||||
// Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch.
|
||||
let mut iter = std::iter::once(Ok((head_root, head_slot)))
|
||||
.chain(BlockRootsIterator::from_block(&db, head_root).map_err(|e| format!("{:?}", e))?);
|
||||
let mut roots_by_epoch = HashMap::new();
|
||||
for epoch in relevant_epochs {
|
||||
let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
let root = iter
|
||||
.find_map(|next| match next {
|
||||
Ok((root, slot)) => (slot == start_slot).then_some(Ok(root)),
|
||||
Err(e) => Some(Err(format!("{:?}", e))),
|
||||
})
|
||||
.transpose()?
|
||||
.ok_or_else(|| "Justified root not found".to_string())?;
|
||||
roots_by_epoch.insert(epoch, root);
|
||||
}
|
||||
Ok(roots_by_epoch)
|
||||
}
|
||||
|
||||
/// Applies a mutator to every node in a chain, starting from the node at the given
|
||||
/// `head_index` and iterating through ancestors until the `finalized_root` is reached.
|
||||
fn apply_to_chain_of_ancestors<F>(
|
||||
finalized_root: Hash256,
|
||||
head_index: usize,
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
mut node_mutator: F,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
F: FnMut(usize, &mut ProtoNode) -> Result<(), String>,
|
||||
{
|
||||
let head = fork_choice
|
||||
.core_proto_array_mut()
|
||||
.nodes
|
||||
.get_mut(head_index)
|
||||
.ok_or_else(|| "Head index not found in proto nodes".to_string())?;
|
||||
|
||||
node_mutator(head_index, head)?;
|
||||
|
||||
let mut parent_index_opt = head.parent;
|
||||
let mut parent_opt =
|
||||
parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
|
||||
|
||||
// Iterate backwards through all parents until there is no reference to a parent or we reach
|
||||
// the `finalized_root` node.
|
||||
while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) {
|
||||
node_mutator(parent_index, parent)?;
|
||||
|
||||
// Break out of this while loop *after* the `node_mutator` has been applied to the finalized
|
||||
// node.
|
||||
if parent.root == finalized_root {
|
||||
break;
|
||||
}
|
||||
|
||||
// Update parent values
|
||||
parent_index_opt = parent.parent;
|
||||
parent_opt = parent_index_opt
|
||||
.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then
|
||||
/// checks that these nodes are descendants of the finalized root in order to determine if they are
|
||||
/// relevant.
|
||||
fn find_finalized_descendant_heads(
|
||||
finalized_root: Hash256,
|
||||
fork_choice: &ProtoArrayForkChoice,
|
||||
) -> Vec<HeadInfo> {
|
||||
let nodes_referenced_as_parents: HashSet<usize> = fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.filter_map(|node| node.parent)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(index, node)| {
|
||||
(!nodes_referenced_as_parents.contains(&index)
|
||||
&& fork_choice.is_descendant(finalized_root, node.root))
|
||||
.then_some(HeadInfo {
|
||||
index,
|
||||
root: node.root,
|
||||
slot: node.slot,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn update_store_justified_checkpoint(
|
||||
persisted_fork_choice: &mut PersistedForkChoiceV7,
|
||||
fork_choice: &mut ProtoArrayForkChoice,
|
||||
) -> Result<(), String> {
|
||||
let justified_checkpoint = fork_choice
|
||||
.core_proto_array()
|
||||
.nodes
|
||||
.iter()
|
||||
.filter_map(|node| {
|
||||
(node.finalized_checkpoint
|
||||
== Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint))
|
||||
.then_some(node.justified_checkpoint)
|
||||
.flatten()
|
||||
})
|
||||
.max_by_key(|justified_checkpoint| justified_checkpoint.epoch)
|
||||
.ok_or("Proto node with current finalized checkpoint not found")?;
|
||||
|
||||
fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Add a zero `proposer_boost_root` when migrating from V1-6 to V7.
|
||||
impl From<PersistedForkChoiceStoreV1> for PersistedForkChoiceStoreV7 {
|
||||
fn from(other: PersistedForkChoiceStoreV1) -> Self {
|
||||
Self {
|
||||
balances_cache: other.balances_cache,
|
||||
time: other.time,
|
||||
finalized_checkpoint: other.finalized_checkpoint,
|
||||
justified_checkpoint: other.justified_checkpoint,
|
||||
justified_balances: other.justified_balances,
|
||||
best_justified_checkpoint: other.best_justified_checkpoint,
|
||||
proposer_boost_root: Hash256::zero(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PersistedForkChoiceV1> for PersistedForkChoiceV7 {
|
||||
fn from(other: PersistedForkChoiceV1) -> Self {
|
||||
Self {
|
||||
fork_choice: other.fork_choice,
|
||||
fork_choice_store: other.fork_choice_store.into(),
|
||||
}
|
||||
}
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
use crate::beacon_chain::BeaconChainTypes;
|
||||
use crate::beacon_fork_choice_store::{
|
||||
BalancesCacheV8, CacheItemV8, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8,
|
||||
};
|
||||
use crate::persisted_fork_choice::{PersistedForkChoiceV7, PersistedForkChoiceV8};
|
||||
use std::sync::Arc;
|
||||
use store::{Error as StoreError, HotColdDB};
|
||||
use types::EthSpec;
|
||||
|
||||
pub fn update_fork_choice<T: BeaconChainTypes>(
|
||||
fork_choice: PersistedForkChoiceV7,
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
) -> Result<PersistedForkChoiceV8, StoreError> {
|
||||
let PersistedForkChoiceStoreV7 {
|
||||
balances_cache,
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
proposer_boost_root,
|
||||
} = fork_choice.fork_choice_store;
|
||||
let mut fork_choice_store = PersistedForkChoiceStoreV8 {
|
||||
balances_cache: BalancesCacheV8::default(),
|
||||
time,
|
||||
finalized_checkpoint,
|
||||
justified_checkpoint,
|
||||
justified_balances,
|
||||
best_justified_checkpoint,
|
||||
proposer_boost_root,
|
||||
};
|
||||
|
||||
// Add epochs to the balances cache. It's safe to just use the block's epoch because
|
||||
// before schema v8 the cache would always miss on skipped slots.
|
||||
for item in balances_cache.items {
|
||||
// Drop any blocks that aren't found, they're presumably too old and this is only a cache.
|
||||
if let Some(block) = db.get_full_block_prior_to_v9(&item.block_root)? {
|
||||
fork_choice_store.balances_cache.items.push(CacheItemV8 {
|
||||
block_root: item.block_root,
|
||||
epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()),
|
||||
balances: item.balances,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(PersistedForkChoiceV8 {
|
||||
fork_choice: fork_choice.fork_choice,
|
||||
fork_choice_store,
|
||||
})
|
||||
}
|
@ -1,176 +0,0 @@
|
||||
use crate::beacon_chain::BeaconChainTypes;
|
||||
use slog::{debug, error, info, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use store::{DBColumn, Error, HotColdDB, KeyValueStore};
|
||||
use types::{EthSpec, Hash256, Slot};
|
||||
|
||||
const OPS_PER_BLOCK_WRITE: usize = 2;
|
||||
|
||||
/// The slot clock isn't usually available before the database is initialized, so we construct a
|
||||
/// temporary slot clock by reading the genesis state. It should always exist if the database is
|
||||
/// initialized at a prior schema version, however we still handle the lack of genesis state
|
||||
/// gracefully.
|
||||
fn get_slot_clock<T: BeaconChainTypes>(
|
||||
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
|
||||
log: &Logger,
|
||||
) -> Result<Option<T::SlotClock>, Error> {
|
||||
// At schema v8 the genesis block must be a *full* block (with payload). In all likeliness it
|
||||
// actually has no payload.
|
||||
let spec = db.get_chain_spec();
|
||||
let genesis_block = if let Some(block) = db.get_full_block_prior_to_v9(&Hash256::zero())? {
|
||||
block
|
||||
} else {
|
||||
error!(log, "Missing genesis block");
|
||||
return Ok(None);
|
||||
};
|
||||
let genesis_state =
|
||||
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
|
||||
state
|
||||
} else {
|
||||
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(T::SlotClock::new(
|
||||
spec.genesis_slot,
|
||||
Duration::from_secs(genesis_state.genesis_time()),
|
||||
Duration::from_secs(spec.seconds_per_slot),
|
||||
)))
|
||||
}
|
||||
|
||||
pub fn upgrade_to_v9<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
log: Logger,
|
||||
) -> Result<(), Error> {
|
||||
// This upgrade is a no-op if the Bellatrix fork epoch has not already passed. This migration
|
||||
// was implemented before the activation of Bellatrix on all networks except Kiln, so the only
|
||||
// users who will need to wait for the slow copying migration are Kiln users.
|
||||
let slot_clock = if let Some(slot_clock) = get_slot_clock::<T>(&db, &log)? {
|
||||
slot_clock
|
||||
} else {
|
||||
error!(
|
||||
log,
|
||||
"Unable to complete migration because genesis state or genesis block is missing"
|
||||
);
|
||||
return Err(Error::SlotClockUnavailableForMigration);
|
||||
};
|
||||
|
||||
let current_epoch = if let Some(slot) = slot_clock.now() {
|
||||
slot.epoch(T::EthSpec::slots_per_epoch())
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch {
|
||||
fork_epoch
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Upgrading database schema to v9 (no-op)";
|
||||
"info" => "To downgrade before the merge run `lighthouse db migrate`"
|
||||
);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if current_epoch >= bellatrix_fork_epoch {
|
||||
info!(
|
||||
log,
|
||||
"Upgrading database schema to v9";
|
||||
"info" => "This will take several minutes. Each block will be read from and \
|
||||
re-written to the database. You may safely exit now (Ctrl-C) and resume \
|
||||
the migration later. Downgrading is no longer possible."
|
||||
);
|
||||
|
||||
for res in db.hot_db.iter_column_keys(DBColumn::BeaconBlock) {
|
||||
let block_root = res?;
|
||||
let block = match db.get_full_block_prior_to_v9(&block_root) {
|
||||
// A pre-v9 block is present.
|
||||
Ok(Some(block)) => block,
|
||||
// A block is missing.
|
||||
Ok(None) => return Err(Error::BlockNotFound(block_root)),
|
||||
// There was an error reading a pre-v9 block. Try reading it as a post-v9 block.
|
||||
Err(_) => {
|
||||
if db.try_get_full_block(&block_root)?.is_some() {
|
||||
// The block is present as a post-v9 block, assume that it was already
|
||||
// correctly migrated.
|
||||
continue;
|
||||
} else {
|
||||
// This scenario should not be encountered since a prior check has ensured
|
||||
// that this block exists.
|
||||
return Err(Error::V9MigrationFailure(block_root));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if block.message().execution_payload().is_ok() {
|
||||
// Overwrite block with blinded block and store execution payload separately.
|
||||
debug!(
|
||||
log,
|
||||
"Rewriting Bellatrix block";
|
||||
"block_root" => ?block_root,
|
||||
);
|
||||
|
||||
let mut kv_batch = Vec::with_capacity(OPS_PER_BLOCK_WRITE);
|
||||
db.block_as_kv_store_ops(&block_root, block, &mut kv_batch)?;
|
||||
db.hot_db.do_atomically(kv_batch)?;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Upgrading database schema to v9 (no-op)";
|
||||
"info" => "To downgrade before the merge run `lighthouse db migrate`"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// This downgrade is conditional and will only succeed if the Bellatrix fork epoch hasn't been
|
||||
// reached.
|
||||
pub fn downgrade_from_v9<T: BeaconChainTypes>(
|
||||
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||
log: Logger,
|
||||
) -> Result<(), Error> {
|
||||
let slot_clock = if let Some(slot_clock) = get_slot_clock::<T>(&db, &log)? {
|
||||
slot_clock
|
||||
} else {
|
||||
error!(
|
||||
log,
|
||||
"Unable to complete migration because genesis state or genesis block is missing"
|
||||
);
|
||||
return Err(Error::SlotClockUnavailableForMigration);
|
||||
};
|
||||
|
||||
let current_epoch = if let Some(slot) = slot_clock.now() {
|
||||
slot.epoch(T::EthSpec::slots_per_epoch())
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch {
|
||||
fork_epoch
|
||||
} else {
|
||||
info!(
|
||||
log,
|
||||
"Downgrading database schema from v9";
|
||||
"info" => "You need to upgrade to v9 again before the merge"
|
||||
);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if current_epoch >= bellatrix_fork_epoch {
|
||||
error!(
|
||||
log,
|
||||
"Downgrading from schema v9 after the Bellatrix fork epoch is not supported";
|
||||
"current_epoch" => current_epoch,
|
||||
"bellatrix_fork_epoch" => bellatrix_fork_epoch,
|
||||
"reason" => "You need a v9 schema database to run on a merged version of Prater or \
|
||||
mainnet. On Kiln, you have to re-sync",
|
||||
);
|
||||
Err(Error::ResyncRequiredForExecutionPayloadSeparation)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,315 +0,0 @@
|
||||
use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot};
|
||||
use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker};
|
||||
use proto_array::ExecutionStatus;
|
||||
use ssz::four_byte_option_impl;
|
||||
use ssz::Encode;
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use superstruct::superstruct;
|
||||
|
||||
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
|
||||
// selector.
|
||||
four_byte_option_impl!(four_byte_option_usize, usize);
|
||||
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V6, V7, V10),
|
||||
variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
pub struct ProtoNode {
|
||||
pub slot: Slot,
|
||||
pub state_root: Hash256,
|
||||
pub target_root: Hash256,
|
||||
pub current_epoch_shuffling_id: AttestationShufflingId,
|
||||
pub next_epoch_shuffling_id: AttestationShufflingId,
|
||||
pub root: Hash256,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub parent: Option<usize>,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub justified_epoch: Epoch,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub finalized_epoch: Epoch,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V7, V10))]
|
||||
pub justified_checkpoint: Option<Checkpoint>,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V7, V10))]
|
||||
pub finalized_checkpoint: Option<Checkpoint>,
|
||||
pub weight: u64,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub best_child: Option<usize>,
|
||||
#[ssz(with = "four_byte_option_usize")]
|
||||
pub best_descendant: Option<usize>,
|
||||
#[superstruct(only(V6, V7, V10))]
|
||||
pub execution_status: ExecutionStatus,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V10))]
|
||||
pub unrealized_justified_checkpoint: Option<Checkpoint>,
|
||||
#[ssz(with = "four_byte_option_checkpoint")]
|
||||
#[superstruct(only(V10))]
|
||||
pub unrealized_finalized_checkpoint: Option<Checkpoint>,
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV6> for ProtoNodeV1 {
|
||||
fn into(self) -> ProtoNodeV6 {
|
||||
ProtoNodeV6 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_epoch: self.justified_epoch,
|
||||
finalized_epoch: self.finalized_epoch,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
// We set the following execution value as if the block is a pre-merge-fork block. This
|
||||
// is safe as long as we never import a merge block with the old version of proto-array.
|
||||
// This will be safe since we can't actually process merge blocks until we've made this
|
||||
// change to fork choice.
|
||||
execution_status: ExecutionStatus::irrelevant(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV7> for ProtoNodeV6 {
|
||||
fn into(self) -> ProtoNodeV7 {
|
||||
ProtoNodeV7 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: None,
|
||||
finalized_checkpoint: None,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV10> for ProtoNodeV7 {
|
||||
fn into(self) -> ProtoNodeV10 {
|
||||
ProtoNodeV10 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
unrealized_justified_checkpoint: None,
|
||||
unrealized_finalized_checkpoint: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNodeV7> for ProtoNodeV10 {
|
||||
fn into(self) -> ProtoNodeV7 {
|
||||
ProtoNodeV7 {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<ProtoNode> for ProtoNodeV10 {
|
||||
fn into(self) -> ProtoNode {
|
||||
ProtoNode {
|
||||
slot: self.slot,
|
||||
state_root: self.state_root,
|
||||
target_root: self.target_root,
|
||||
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
|
||||
root: self.root,
|
||||
parent: self.parent,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
weight: self.weight,
|
||||
best_child: self.best_child,
|
||||
best_descendant: self.best_descendant,
|
||||
execution_status: self.execution_status,
|
||||
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
|
||||
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ProtoNode> for ProtoNodeV7 {
|
||||
fn from(container: ProtoNode) -> Self {
|
||||
Self {
|
||||
slot: container.slot,
|
||||
state_root: container.state_root,
|
||||
target_root: container.target_root,
|
||||
current_epoch_shuffling_id: container.current_epoch_shuffling_id,
|
||||
next_epoch_shuffling_id: container.next_epoch_shuffling_id,
|
||||
root: container.root,
|
||||
parent: container.parent,
|
||||
justified_checkpoint: container.justified_checkpoint,
|
||||
finalized_checkpoint: container.finalized_checkpoint,
|
||||
weight: container.weight,
|
||||
best_child: container.best_child,
|
||||
best_descendant: container.best_descendant,
|
||||
execution_status: container.execution_status,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(V1, V6, V7, V10),
|
||||
variant_attributes(derive(Encode, Decode)),
|
||||
no_enum
|
||||
)]
|
||||
#[derive(Encode, Decode)]
|
||||
pub struct SszContainer {
|
||||
pub votes: Vec<VoteTracker>,
|
||||
pub balances: Vec<u64>,
|
||||
pub prune_threshold: usize,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub justified_epoch: Epoch,
|
||||
#[superstruct(only(V1, V6))]
|
||||
pub finalized_epoch: Epoch,
|
||||
#[superstruct(only(V7, V10))]
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V7, V10))]
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
#[superstruct(only(V1))]
|
||||
pub nodes: Vec<ProtoNodeV1>,
|
||||
#[superstruct(only(V6))]
|
||||
pub nodes: Vec<ProtoNodeV6>,
|
||||
#[superstruct(only(V7))]
|
||||
pub nodes: Vec<ProtoNodeV7>,
|
||||
#[superstruct(only(V10))]
|
||||
pub nodes: Vec<ProtoNodeV10>,
|
||||
pub indices: Vec<(Hash256, usize)>,
|
||||
#[superstruct(only(V7, V10))]
|
||||
pub previous_proposer_boost: ProposerBoost,
|
||||
}
|
||||
|
||||
impl Into<SszContainerV6> for SszContainerV1 {
|
||||
fn into(self) -> SszContainerV6 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV6 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_epoch: self.justified_epoch,
|
||||
finalized_epoch: self.finalized_epoch,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SszContainerV6 {
|
||||
pub(crate) fn into_ssz_container_v7(
|
||||
self,
|
||||
justified_checkpoint: Checkpoint,
|
||||
finalized_checkpoint: Checkpoint,
|
||||
) -> SszContainerV7 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV7 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint,
|
||||
finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: ProposerBoost::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SszContainerV10> for SszContainerV7 {
|
||||
fn into(self) -> SszContainerV10 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV10 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: self.previous_proposer_boost,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SszContainerV7> for SszContainerV10 {
|
||||
fn into(self) -> SszContainerV7 {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainerV7 {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: self.previous_proposer_boost,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SszContainer> for SszContainerV10 {
|
||||
fn into(self) -> SszContainer {
|
||||
let nodes = self.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
SszContainer {
|
||||
votes: self.votes,
|
||||
balances: self.balances,
|
||||
prune_threshold: self.prune_threshold,
|
||||
justified_checkpoint: self.justified_checkpoint,
|
||||
finalized_checkpoint: self.finalized_checkpoint,
|
||||
nodes,
|
||||
indices: self.indices,
|
||||
previous_proposer_boost: self.previous_proposer_boost,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SszContainer> for SszContainerV7 {
|
||||
fn from(container: SszContainer) -> Self {
|
||||
let nodes = container.nodes.into_iter().map(Into::into).collect();
|
||||
|
||||
Self {
|
||||
votes: container.votes,
|
||||
balances: container.balances,
|
||||
prune_threshold: container.prune_threshold,
|
||||
justified_checkpoint: container.justified_checkpoint,
|
||||
finalized_checkpoint: container.finalized_checkpoint,
|
||||
nodes,
|
||||
indices: container.indices,
|
||||
previous_proposer_boost: container.previous_proposer_boost,
|
||||
}
|
||||
}
|
||||
}
|
@ -298,27 +298,6 @@ impl<T: EthSpec> SnapshotCache<T> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*.
|
||||
///
|
||||
/// Care must be taken not to mutate the state in an invalid way. This function should only
|
||||
/// be used to mutate the *caches* of the state, for example the tree hash cache when
|
||||
/// calculating a light client merkle proof.
|
||||
pub fn borrow_unadvanced_state_mut(
|
||||
&mut self,
|
||||
block_root: Hash256,
|
||||
) -> Option<&mut BeaconState<T>> {
|
||||
self.snapshots
|
||||
.iter_mut()
|
||||
.find(|snapshot| {
|
||||
// If the pre-state exists then state advance has already taken the state for
|
||||
// `block_root` and mutated its tree hash cache. Rather than re-building it while
|
||||
// holding the snapshot cache lock (>1 second), prefer to return `None` from this
|
||||
// function and force the caller to load it from disk.
|
||||
snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none()
|
||||
})
|
||||
.map(|snapshot| &mut snapshot.beacon_state)
|
||||
}
|
||||
|
||||
/// If there is a snapshot with `block_root`, clone it and return the clone.
|
||||
pub fn get_cloned(
|
||||
&self,
|
||||
|
@ -977,8 +977,7 @@ mod test {
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use types::{
|
||||
ExecutionPayloadMerge, ForkName, FullPayload, MainnetEthSpec, Transactions, Unsigned,
|
||||
VariableList,
|
||||
ExecutionPayloadMerge, ForkName, MainnetEthSpec, Transactions, Unsigned, VariableList,
|
||||
};
|
||||
|
||||
struct Tester {
|
||||
|
@ -35,8 +35,6 @@ use tokio::{
|
||||
time::sleep,
|
||||
};
|
||||
use tokio_stream::wrappers::WatchStream;
|
||||
#[cfg(feature = "withdrawals")]
|
||||
use types::Withdrawal;
|
||||
use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment};
|
||||
use types::{
|
||||
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::{
|
||||
test_utils::{
|
||||
Block, MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET,
|
||||
DEFAULT_TERMINAL_BLOCK, DEFAULT_TERMINAL_DIFFICULTY,
|
||||
MockServer, DEFAULT_BUILDER_THRESHOLD_WEI, DEFAULT_JWT_SECRET, DEFAULT_TERMINAL_BLOCK,
|
||||
DEFAULT_TERMINAL_DIFFICULTY,
|
||||
},
|
||||
Config, *,
|
||||
};
|
||||
|
@ -17,6 +17,7 @@ mod proposer_duties;
|
||||
mod publish_blocks;
|
||||
mod state_id;
|
||||
mod sync_committees;
|
||||
mod ui;
|
||||
mod validator_inclusion;
|
||||
mod version;
|
||||
|
||||
@ -2940,6 +2941,18 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/ui/validator_count
|
||||
let get_lighthouse_ui_validator_count = warp::path("lighthouse")
|
||||
.and(warp::path("ui"))
|
||||
.and(warp::path("validator_count"))
|
||||
.and(warp::path::end())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(|chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
ui::get_validator_count(chain).map(api_types::GenericResponse::from)
|
||||
})
|
||||
});
|
||||
|
||||
// GET lighthouse/syncing
|
||||
let get_lighthouse_syncing = warp::path("lighthouse")
|
||||
.and(warp::path("syncing"))
|
||||
@ -3408,6 +3421,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.or(get_lighthouse_attestation_performance.boxed())
|
||||
.or(get_lighthouse_block_packing_efficiency.boxed())
|
||||
.or(get_lighthouse_merge_readiness.boxed())
|
||||
.or(get_lighthouse_ui_validator_count.boxed())
|
||||
.or(get_events.boxed()),
|
||||
)
|
||||
.boxed()
|
||||
|
71
beacon_node/http_api/src/ui.rs
Normal file
71
beacon_node/http_api/src/ui.rs
Normal file
@ -0,0 +1,71 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use eth2::types::ValidatorStatus;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use warp_utils::reject::beacon_chain_error;
|
||||
|
||||
#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)]
|
||||
pub struct ValidatorCountResponse {
|
||||
pub active_ongoing: u64,
|
||||
pub active_exiting: u64,
|
||||
pub active_slashed: u64,
|
||||
pub pending_initialized: u64,
|
||||
pub pending_queued: u64,
|
||||
pub withdrawal_possible: u64,
|
||||
pub withdrawal_done: u64,
|
||||
pub exited_unslashed: u64,
|
||||
pub exited_slashed: u64,
|
||||
}
|
||||
|
||||
pub fn get_validator_count<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
) -> Result<ValidatorCountResponse, warp::Rejection> {
|
||||
let spec = &chain.spec;
|
||||
let mut active_ongoing = 0;
|
||||
let mut active_exiting = 0;
|
||||
let mut active_slashed = 0;
|
||||
let mut pending_initialized = 0;
|
||||
let mut pending_queued = 0;
|
||||
let mut withdrawal_possible = 0;
|
||||
let mut withdrawal_done = 0;
|
||||
let mut exited_unslashed = 0;
|
||||
let mut exited_slashed = 0;
|
||||
|
||||
chain
|
||||
.with_head(|head| {
|
||||
let state = &head.beacon_state;
|
||||
let epoch = state.current_epoch();
|
||||
for validator in state.validators() {
|
||||
let status =
|
||||
ValidatorStatus::from_validator(validator, epoch, spec.far_future_epoch);
|
||||
|
||||
match status {
|
||||
ValidatorStatus::ActiveOngoing => active_ongoing += 1,
|
||||
ValidatorStatus::ActiveExiting => active_exiting += 1,
|
||||
ValidatorStatus::ActiveSlashed => active_slashed += 1,
|
||||
ValidatorStatus::PendingInitialized => pending_initialized += 1,
|
||||
ValidatorStatus::PendingQueued => pending_queued += 1,
|
||||
ValidatorStatus::WithdrawalPossible => withdrawal_possible += 1,
|
||||
ValidatorStatus::WithdrawalDone => withdrawal_done += 1,
|
||||
ValidatorStatus::ExitedUnslashed => exited_unslashed += 1,
|
||||
ValidatorStatus::ExitedSlashed => exited_slashed += 1,
|
||||
// Since we are not invoking `superset`, all other variants will be 0.
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
Ok::<(), BeaconChainError>(())
|
||||
})
|
||||
.map_err(beacon_chain_error)?;
|
||||
|
||||
Ok(ValidatorCountResponse {
|
||||
active_ongoing,
|
||||
active_exiting,
|
||||
active_slashed,
|
||||
pending_initialized,
|
||||
pending_queued,
|
||||
withdrawal_possible,
|
||||
withdrawal_done,
|
||||
exited_unslashed,
|
||||
exited_slashed,
|
||||
})
|
||||
}
|
@ -2122,7 +2122,7 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_blinded_block_production<Payload: ExecPayload<E>>(&self) {
|
||||
pub async fn test_blinded_block_production<Payload: AbstractExecPayload<E>>(&self) {
|
||||
let fork = self.chain.canonical_head.cached_head().head_fork();
|
||||
let genesis_validators_root = self.chain.genesis_validators_root;
|
||||
|
||||
@ -2182,7 +2182,7 @@ impl ApiTester {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn test_blinded_block_production_no_verify_randao<Payload: ExecPayload<E>>(
|
||||
pub async fn test_blinded_block_production_no_verify_randao<Payload: AbstractExecPayload<E>>(
|
||||
self,
|
||||
) -> Self {
|
||||
for _ in 0..E::slots_per_epoch() {
|
||||
@ -2206,7 +2206,9 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_blinded_block_production_verify_randao_invalid<Payload: ExecPayload<E>>(
|
||||
pub async fn test_blinded_block_production_verify_randao_invalid<
|
||||
Payload: AbstractExecPayload<E>,
|
||||
>(
|
||||
self,
|
||||
) -> Self {
|
||||
let fork = self.chain.canonical_head.cached_head().head_fork();
|
||||
@ -2664,7 +2666,7 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -2673,14 +2675,11 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
|
||||
assert_eq!(
|
||||
payload.execution_payload_header.fee_recipient,
|
||||
expected_fee_recipient
|
||||
);
|
||||
assert_eq!(payload.execution_payload_header.gas_limit, 11_111_111);
|
||||
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
|
||||
assert_eq!(payload.gas_limit(), 11_111_111);
|
||||
|
||||
// If this cache is empty, it indicates fallback was not used, so the payload came from the
|
||||
// mock builder.
|
||||
@ -2707,7 +2706,7 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -2716,14 +2715,11 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
|
||||
assert_eq!(
|
||||
payload.execution_payload_header.fee_recipient,
|
||||
expected_fee_recipient
|
||||
);
|
||||
assert_eq!(payload.execution_payload_header.gas_limit, 30_000_000);
|
||||
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
|
||||
assert_eq!(payload.gas_limit(), 30_000_000);
|
||||
|
||||
// This cache should not be populated because fallback should not have been used.
|
||||
assert!(self
|
||||
@ -2753,7 +2749,7 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -2762,12 +2758,9 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
assert_eq!(
|
||||
payload.execution_payload_header.fee_recipient,
|
||||
test_fee_recipient
|
||||
);
|
||||
assert_eq!(payload.fee_recipient(), test_fee_recipient);
|
||||
|
||||
// This cache should not be populated because fallback should not have been used.
|
||||
assert!(self
|
||||
@ -2801,11 +2794,11 @@ impl ApiTester {
|
||||
.beacon_state
|
||||
.latest_execution_payload_header()
|
||||
.unwrap()
|
||||
.block_hash;
|
||||
.block_hash();
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -2814,12 +2807,9 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
assert_eq!(
|
||||
payload.execution_payload_header.parent_hash,
|
||||
expected_parent_hash
|
||||
);
|
||||
assert_eq!(payload.parent_hash(), expected_parent_hash);
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -2856,7 +2846,7 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -2865,12 +2855,9 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
assert_eq!(
|
||||
payload.execution_payload_header.prev_randao,
|
||||
expected_prev_randao
|
||||
);
|
||||
assert_eq!(payload.prev_randao(), expected_prev_randao);
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -2901,12 +2888,12 @@ impl ApiTester {
|
||||
.beacon_state
|
||||
.latest_execution_payload_header()
|
||||
.unwrap()
|
||||
.block_number
|
||||
.block_number()
|
||||
+ 1;
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -2915,12 +2902,9 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
assert_eq!(
|
||||
payload.execution_payload_header.block_number,
|
||||
expected_block_number
|
||||
);
|
||||
assert_eq!(payload.block_number(), expected_block_number);
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -2951,11 +2935,11 @@ impl ApiTester {
|
||||
.beacon_state
|
||||
.latest_execution_payload_header()
|
||||
.unwrap()
|
||||
.timestamp;
|
||||
.timestamp();
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -2964,9 +2948,9 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
assert!(payload.execution_payload_header.timestamp > min_expected_timestamp);
|
||||
assert!(payload.timestamp() > min_expected_timestamp);
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -2991,7 +2975,7 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3000,7 +2984,7 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -3028,7 +3012,7 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3037,7 +3021,7 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -3071,7 +3055,7 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3080,7 +3064,7 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
// This cache should not be populated because fallback should not have been used.
|
||||
assert!(self
|
||||
@ -3100,7 +3084,7 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3109,7 +3093,7 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -3149,7 +3133,7 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3158,7 +3142,7 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -3188,7 +3172,7 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(next_slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3197,7 +3181,7 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
// This cache should not be populated because fallback should not have been used.
|
||||
assert!(self
|
||||
@ -3231,7 +3215,7 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3240,13 +3224,10 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64);
|
||||
assert_eq!(
|
||||
payload.execution_payload_header.fee_recipient,
|
||||
expected_fee_recipient
|
||||
);
|
||||
assert_eq!(payload.fee_recipient(), expected_fee_recipient);
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
@ -3275,7 +3256,7 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let payload = self
|
||||
let payload: BlindedPayload<E> = self
|
||||
.client
|
||||
.get_validator_blinded_blocks::<E, BlindedPayload<E>>(slot, &randao_reveal, None)
|
||||
.await
|
||||
@ -3284,7 +3265,7 @@ impl ApiTester {
|
||||
.body()
|
||||
.execution_payload()
|
||||
.unwrap()
|
||||
.clone();
|
||||
.into();
|
||||
|
||||
// If this cache is populated, it indicates fallback to the local EE was correctly used.
|
||||
assert!(self
|
||||
|
@ -34,7 +34,6 @@ use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol};
|
||||
use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent};
|
||||
use libp2p::PeerId;
|
||||
use slog::{crit, debug, info, o, trace, warn};
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::{
|
||||
|
@ -9,8 +9,8 @@ use std::time::Duration;
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio::time::sleep;
|
||||
use types::{
|
||||
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, Epoch, EthSpec, ForkContext,
|
||||
ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
|
||||
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, EmptyBlock, Epoch, EthSpec,
|
||||
ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
mod common;
|
||||
|
@ -3,7 +3,6 @@ use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
|
||||
use beacon_chain::store::Error;
|
||||
use beacon_chain::{
|
||||
attestation_verification::{self, Error as AttnError, VerifiedAttestation},
|
||||
blob_verification::BlobError,
|
||||
observed_operations::ObservationOutcome,
|
||||
sync_committee_verification::{self, Error as SyncCommitteeError},
|
||||
validator_monitor::get_block_delay_ms,
|
||||
|
@ -395,16 +395,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a schema V8 or earlier full block by reading it and its payload from disk.
|
||||
pub fn get_full_block_prior_to_v9(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
) -> Result<Option<SignedBeaconBlock<E>>, Error> {
|
||||
self.get_block_with(block_root, |bytes| {
|
||||
SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec)
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert a blinded block into a full block by loading its execution payload if necessary.
|
||||
pub fn make_full_block(
|
||||
&self,
|
||||
|
@ -99,6 +99,28 @@ curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: applicatio
|
||||
}
|
||||
```
|
||||
|
||||
### `/lighthouse/ui/validator_count`
|
||||
|
||||
```bash
|
||||
curl -X GET "http://localhost:5052/lighthouse/ui/validator_count" -H "accept: application/json" | jq
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"data": {
|
||||
"active_ongoing":479508,
|
||||
"active_exiting":0,
|
||||
"active_slashed":0,
|
||||
"pending_initialized":28,
|
||||
"pending_queued":0,
|
||||
"withdrawal_possible":933,
|
||||
"withdrawal_done":0,
|
||||
"exited_unslashed":0,
|
||||
"exited_slashed":3
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `/lighthouse/syncing`
|
||||
|
||||
```bash
|
||||
|
@ -19,13 +19,13 @@ validator client or the slasher**.
|
||||
| v2.0.0 | Oct 2021 | v5 | no |
|
||||
| v2.1.0 | Jan 2022 | v8 | no |
|
||||
| v2.2.0 | Apr 2022 | v8 | no |
|
||||
| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) |
|
||||
| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) |
|
||||
| v2.3.0 | May 2022 | v9 | yes from <= v3.3.0 |
|
||||
| v2.4.0 | Jul 2022 | v9 | yes from <= v3.3.0 |
|
||||
| v2.5.0 | Aug 2022 | v11 | yes |
|
||||
| v3.0.0 | Aug 2022 | v11 | yes |
|
||||
| v3.1.0 | Sep 2022 | v12 | yes |
|
||||
| v3.2.0 | Oct 2022 | v12 | yes |
|
||||
| v3.3.0 | TBD | v13 | yes |
|
||||
| v3.3.0 | Nov 2022 | v13 | yes |
|
||||
|
||||
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
|
||||
> (e.g. v2.3.0).
|
||||
|
@ -36,6 +36,12 @@ ALTAIR_FORK_EPOCH: 512
|
||||
# Merge
|
||||
BELLATRIX_FORK_VERSION: 0x02000064
|
||||
BELLATRIX_FORK_EPOCH: 385536
|
||||
# Capella
|
||||
CAPELLA_FORK_VERSION: 0x03000064
|
||||
CAPELLA_FORK_EPOCH: 18446744073709551615
|
||||
# Eip4844
|
||||
EIP4844_FORK_VERSION: 0x04000064
|
||||
EIP4844_FORK_EPOCH: 18446744073709551615
|
||||
# Sharding
|
||||
SHARDING_FORK_VERSION: 0x03000064
|
||||
SHARDING_FORK_EPOCH: 18446744073709551615
|
||||
|
@ -36,6 +36,12 @@ ALTAIR_FORK_EPOCH: 74240
|
||||
# Merge
|
||||
BELLATRIX_FORK_VERSION: 0x02000000
|
||||
BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
|
||||
# Capella
|
||||
CAPELLA_FORK_VERSION: 0x03000000
|
||||
CAPELLA_FORK_EPOCH: 18446744073709551615
|
||||
# Eip4844
|
||||
EIP4844_FORK_VERSION: 0x04000000
|
||||
EIP4844_FORK_EPOCH: 18446744073709551615
|
||||
# Sharding
|
||||
SHARDING_FORK_VERSION: 0x03000000
|
||||
SHARDING_FORK_EPOCH: 18446744073709551615
|
||||
|
@ -8,7 +8,7 @@ use rayon::prelude::*;
|
||||
use std::borrow::Cow;
|
||||
use types::{
|
||||
AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256,
|
||||
IndexedAttestation, SignedBeaconBlock,
|
||||
SignedBeaconBlock,
|
||||
};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
@ -633,7 +633,7 @@ impl ChainSpec {
|
||||
* Capella hard fork params
|
||||
*/
|
||||
capella_fork_version: [0x03, 00, 00, 00],
|
||||
capella_fork_epoch: Some(Epoch::new(18446744073709551615)),
|
||||
capella_fork_epoch: None,
|
||||
|
||||
/*
|
||||
* Eip4844 hard fork params
|
||||
@ -970,6 +970,7 @@ pub struct Config {
|
||||
#[serde(default = "default_eip4844_fork_version")]
|
||||
#[serde(with = "eth2_serde_utils::bytes_4_hex")]
|
||||
eip4844_fork_version: [u8; 4],
|
||||
#[serde(default)]
|
||||
#[serde(serialize_with = "serialize_fork_epoch")]
|
||||
#[serde(deserialize_with = "deserialize_fork_epoch")]
|
||||
pub eip4844_fork_epoch: Option<MaybeQuoted<Epoch>>,
|
||||
|
@ -38,8 +38,7 @@ pub type Withdrawals<T> = VariableList<Withdrawal, <T as EthSpec>::MaxWithdrawal
|
||||
)]
|
||||
#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)]
|
||||
#[derivative(PartialEq, Hash(bound = "T: EthSpec"))]
|
||||
#[serde(untagged)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
#[serde(bound = "T: EthSpec", untagged)]
|
||||
#[ssz(enum_behaviour = "transparent")]
|
||||
#[tree_hash(enum_behaviour = "transparent")]
|
||||
#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))]
|
||||
|
@ -33,7 +33,7 @@ use BeaconStateError;
|
||||
)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)]
|
||||
#[derivative(PartialEq, Hash(bound = "T: EthSpec"))]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
#[serde(bound = "T: EthSpec", untagged)]
|
||||
#[tree_hash(enum_behaviour = "transparent")]
|
||||
#[ssz(enum_behaviour = "transparent")]
|
||||
#[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))]
|
||||
|
@ -199,7 +199,7 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn previous_and_next_fork_consistent() {
|
||||
assert_eq!(ForkName::Merge.next_fork(), None);
|
||||
assert_eq!(ForkName::Eip4844.next_fork(), None);
|
||||
assert_eq!(ForkName::Base.previous_fork(), None);
|
||||
|
||||
for (prev_fork, fork) in ForkName::list_all().into_iter().tuple_windows() {
|
||||
|
@ -166,11 +166,10 @@ impl<'a, T: EthSpec> From<FullPayloadRef<'a, T>> for ExecutionPayload<T> {
|
||||
// FIXME: can this be implemented as Deref or Clone somehow?
|
||||
impl<'a, T: EthSpec> From<FullPayloadRef<'a, T>> for FullPayload<T> {
|
||||
fn from(full_payload_ref: FullPayloadRef<'a, T>) -> Self {
|
||||
match full_payload_ref {
|
||||
FullPayloadRef::Merge(payload_ref) => FullPayload::Merge(payload_ref.clone()),
|
||||
FullPayloadRef::Capella(payload_ref) => FullPayload::Capella(payload_ref.clone()),
|
||||
FullPayloadRef::Eip4844(payload_ref) => FullPayload::Eip4844(payload_ref.clone()),
|
||||
}
|
||||
map_full_payload_ref!(&'a _, full_payload_ref, move |payload, cons| {
|
||||
cons(payload);
|
||||
payload.clone().into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -451,6 +450,15 @@ pub struct BlindedPayload<T: EthSpec> {
|
||||
pub execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec> From<BlindedPayloadRef<'a, T>> for BlindedPayload<T> {
|
||||
fn from(blinded_payload_ref: BlindedPayloadRef<'a, T>) -> Self {
|
||||
map_blinded_payload_ref!(&'a _, blinded_payload_ref, move |payload, cons| {
|
||||
cons(payload);
|
||||
payload.clone().into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> {
|
||||
fn block_type() -> BlockType {
|
||||
BlockType::Blinded
|
||||
|
@ -20,11 +20,15 @@ use state_processing::{
|
||||
ConsensusContext,
|
||||
};
|
||||
use std::fmt::Debug;
|
||||
#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))]
|
||||
use std::marker::PhantomData;
|
||||
use std::path::Path;
|
||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
||||
use types::SignedBlsToExecutionChange;
|
||||
use types::{
|
||||
Attestation, AttesterSlashing, BeaconBlock, BeaconState, BlindedPayload, ChainSpec, Deposit,
|
||||
EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedBlsToExecutionChange,
|
||||
SignedVoluntaryExit, SyncAggregate,
|
||||
EthSpec, ExecutionPayload, ForkName, FullPayload, ProposerSlashing, SignedVoluntaryExit,
|
||||
SyncAggregate,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize)]
|
||||
@ -41,7 +45,10 @@ struct ExecutionMetadata {
|
||||
/// Newtype for testing withdrawals.
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct WithdrawalsPayload<T: EthSpec> {
|
||||
#[cfg(all(feature = "withdrawals", feature = "withdrawals-processing"))]
|
||||
payload: FullPayload<T>,
|
||||
#[cfg(not(all(feature = "withdrawals", feature = "withdrawals-processing")))]
|
||||
_phantom_data: PhantomData<T>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
@ -6,15 +6,14 @@ use crate::{
|
||||
};
|
||||
use crate::{http_metrics::metrics, validator_store::ValidatorStore};
|
||||
use environment::RuntimeContext;
|
||||
use eth2::types::{Graffiti, VariableList};
|
||||
use slog::{crit, debug, error, info, trace, warn};
|
||||
use slot_clock::SlotClock;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use types::{
|
||||
AbstractExecPayload, BlindedPayload, BlobsSidecar, BlockType, EthSpec, ExecPayload, ForkName,
|
||||
FullPayload, PublicKeyBytes, Slot,
|
||||
AbstractExecPayload, BlindedPayload, BlockType, EthSpec, FullPayload, Graffiti, PublicKeyBytes,
|
||||
Slot,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -19,12 +19,11 @@ use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use types::{
|
||||
attestation::Error as AttestationError, graffiti::GraffitiString, AbstractExecPayload, Address,
|
||||
AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, BlobsSidecar, ChainSpec,
|
||||
ContributionAndProof, Domain, Epoch, EthSpec, ExecPayload, Fork, FullPayload, Graffiti,
|
||||
Hash256, Keypair, PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof,
|
||||
SignedBeaconBlock, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData,
|
||||
Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage,
|
||||
SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData,
|
||||
AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof,
|
||||
Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, Keypair, PublicKeyBytes, SelectionProof,
|
||||
Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, SignedRoot,
|
||||
SignedValidatorRegistrationData, Slot, SyncAggregatorSelectionData, SyncCommitteeContribution,
|
||||
SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData,
|
||||
};
|
||||
use validator_dir::ValidatorDir;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user