Delete DB schema migrations for v11 and earlier (#3761)

## Proposed Changes

Now that the Gnosis merge is scheduled, all users should have upgraded beyond Lighthouse v3.0.0. Accordingly we can delete schema migrations for versions prior to v3.0.0.

## Additional Info

I also deleted the state cache stuff I added in #3714 as it turned out to be useless for the light client proofs due to the one-slot offset.
This commit is contained in:
Michael Sproul 2022-12-02 00:07:43 +00:00
parent 18c9be595d
commit 84392d63fa
14 changed files with 15 additions and 1354 deletions

View File

@ -997,46 +997,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(self.store.get_state(state_root, slot)?)
}
/// Run a function with mutable access to a state for `block_root`.
///
/// The primary purpose of this function is to borrow a state with its tree hash cache
/// from the snapshot cache *without moving it*. This means that calls to this function should
/// be kept to an absolute minimum, because holding the snapshot cache lock has the ability
/// to delay block import.
///
/// If there is no appropriate state in the snapshot cache then one will be loaded from disk.
/// If no state is found on disk then `Ok(None)` will be returned.
///
/// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used,
/// which can inform logging/metrics.
///
/// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour
/// of `tree-states`, where all caches are CoW and everything is good in the world.
pub fn with_mutable_state_for_block<F, V, Payload: ExecPayload<T::EthSpec>>(
&self,
block: &SignedBeaconBlock<T::EthSpec, Payload>,
block_root: Hash256,
f: F,
) -> Result<Option<V>, Error>
where
F: FnOnce(&mut BeaconState<T::EthSpec>, bool) -> Result<V, Error>,
{
if let Some(state) = self
.snapshot_cache
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
.ok_or(Error::SnapshotCacheLockTimeout)?
.borrow_unadvanced_state_mut(block_root)
{
let cache_hit = true;
f(state, cache_hit).map(Some)
} else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? {
let cache_hit = false;
f(&mut state, cache_hit).map(Some)
} else {
Ok(None)
}
}
/// Return the sync committee at `slot + 1` from the canonical chain.
///
/// This is useful when dealing with sync committee messages, because messages are signed

View File

@ -61,7 +61,7 @@ pub fn get_effective_balances<T: EthSpec>(state: &BeaconState<T>) -> Vec<u64> {
}
#[superstruct(
variants(V1, V8),
variants(V8),
variant_attributes(derive(PartialEq, Clone, Debug, Encode, Decode)),
no_enum
)]
@ -75,13 +75,11 @@ pub(crate) struct CacheItem {
pub(crate) type CacheItem = CacheItemV8;
#[superstruct(
variants(V1, V8),
variants(V8),
variant_attributes(derive(PartialEq, Clone, Default, Debug, Encode, Decode)),
no_enum
)]
pub struct BalancesCache {
#[superstruct(only(V1))]
pub(crate) items: Vec<CacheItemV1>,
#[superstruct(only(V8))]
pub(crate) items: Vec<CacheItemV8>,
}
@ -366,26 +364,20 @@ where
}
/// A container which allows persisting the `BeaconForkChoiceStore` to the on-disk database.
#[superstruct(
variants(V1, V7, V8, V10, V11),
variant_attributes(derive(Encode, Decode)),
no_enum
)]
#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)]
pub struct PersistedForkChoiceStore {
#[superstruct(only(V1, V7))]
pub balances_cache: BalancesCacheV1,
#[superstruct(only(V8, V10, V11))]
#[superstruct(only(V11))]
pub balances_cache: BalancesCacheV8,
pub time: Slot,
pub finalized_checkpoint: Checkpoint,
pub justified_checkpoint: Checkpoint,
pub justified_balances: Vec<u64>,
pub best_justified_checkpoint: Checkpoint,
#[superstruct(only(V10, V11))]
#[superstruct(only(V11))]
pub unrealized_justified_checkpoint: Checkpoint,
#[superstruct(only(V10, V11))]
#[superstruct(only(V11))]
pub unrealized_finalized_checkpoint: Checkpoint,
#[superstruct(only(V7, V8, V10, V11))]
#[superstruct(only(V11))]
pub proposer_boost_root: Hash256,
#[superstruct(only(V11))]
pub equivocating_indices: BTreeSet<u64>,

View File

@ -1,7 +1,4 @@
use crate::beacon_fork_choice_store::{
PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11,
PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8,
};
use crate::beacon_fork_choice_store::PersistedForkChoiceStoreV11;
use ssz::{Decode, Encode};
use ssz_derive::{Decode, Encode};
use store::{DBColumn, Error, StoreItem};
@ -10,21 +7,9 @@ use superstruct::superstruct;
// If adding a new version you should update this type alias and fix the breakages.
pub type PersistedForkChoice = PersistedForkChoiceV11;
#[superstruct(
variants(V1, V7, V8, V10, V11),
variant_attributes(derive(Encode, Decode)),
no_enum
)]
#[superstruct(variants(V11), variant_attributes(derive(Encode, Decode)), no_enum)]
pub struct PersistedForkChoice {
pub fork_choice: fork_choice::PersistedForkChoice,
#[superstruct(only(V1))]
pub fork_choice_store: PersistedForkChoiceStoreV1,
#[superstruct(only(V7))]
pub fork_choice_store: PersistedForkChoiceStoreV7,
#[superstruct(only(V8))]
pub fork_choice_store: PersistedForkChoiceStoreV8,
#[superstruct(only(V10))]
pub fork_choice_store: PersistedForkChoiceStoreV10,
#[superstruct(only(V11))]
pub fork_choice_store: PersistedForkChoiceStoreV11,
}
@ -47,8 +32,4 @@ macro_rules! impl_store_item {
};
}
impl_store_item!(PersistedForkChoiceV1);
impl_store_item!(PersistedForkChoiceV7);
impl_store_item!(PersistedForkChoiceV8);
impl_store_item!(PersistedForkChoiceV10);
impl_store_item!(PersistedForkChoiceV11);

View File

@ -1,20 +1,9 @@
//! Utilities for managing database schema changes.
mod migration_schema_v10;
mod migration_schema_v11;
mod migration_schema_v12;
mod migration_schema_v13;
mod migration_schema_v6;
mod migration_schema_v7;
mod migration_schema_v8;
mod migration_schema_v9;
mod types;
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY};
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
use crate::eth1_chain::SszEth1;
use crate::persisted_fork_choice::{
PersistedForkChoiceV1, PersistedForkChoiceV10, PersistedForkChoiceV11, PersistedForkChoiceV7,
PersistedForkChoiceV8,
};
use crate::types::ChainSpec;
use slog::{warn, Logger};
use std::sync::Arc;
@ -23,6 +12,7 @@ use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
use store::{Error as StoreError, StoreItem};
/// Migrate the database from one schema version to another, applying all requisite mutations.
#[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future
pub fn migrate_schema<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
deposit_contract_deploy_block: u64,
@ -62,156 +52,9 @@ pub fn migrate_schema<T: BeaconChainTypes>(
}
//
// Migrations from before SchemaVersion(5) are deprecated.
// Migrations from before SchemaVersion(11) are deprecated.
//
// Migration for adding `execution_status` field to the fork choice store.
(SchemaVersion(5), SchemaVersion(6)) => {
// Database operations to be done atomically
let mut ops = vec![];
// The top-level `PersistedForkChoice` struct is still V1 but will have its internal
// bytes for the fork choice updated to V6.
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
if let Some(mut persisted_fork_choice) = fork_choice_opt {
migration_schema_v6::update_execution_statuses::<T>(&mut persisted_fork_choice)
.map_err(StoreError::SchemaMigrationError)?;
// Store the converted fork choice store under the same key.
ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// 1. Add `proposer_boost_root`.
// 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to
// `finalized_checkpoint`.
// 3. This migration also includes a potential update to the justified
// checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint
// combination does not actually exist for any blocks in fork choice. This was possible in
// the consensus spec prior to v1.1.6.
//
// Relevant issues:
//
// https://github.com/sigp/lighthouse/issues/2741
// https://github.com/ethereum/consensus-specs/pull/2727
// https://github.com/ethereum/consensus-specs/pull/2730
(SchemaVersion(6), SchemaVersion(7)) => {
// Database operations to be done atomically
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
if let Some(persisted_fork_choice_v1) = fork_choice_opt {
// This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field.
let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into();
let result = migration_schema_v7::update_fork_choice::<T>(
&mut persisted_fork_choice_v7,
db.clone(),
);
// Fall back to re-initializing fork choice from an anchor state if necessary.
if let Err(e) = result {
warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e);
migration_schema_v7::update_with_reinitialized_fork_choice::<T>(
&mut persisted_fork_choice_v7,
db.clone(),
spec,
)
.map_err(StoreError::SchemaMigrationError)?;
}
// Store the converted fork choice store under the same key.
ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// Migration to add an `epoch` key to the fork choice's balances cache.
(SchemaVersion(7), SchemaVersion(8)) => {
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV7>(&FORK_CHOICE_DB_KEY)?;
if let Some(fork_choice) = fork_choice_opt {
let updated_fork_choice =
migration_schema_v8::update_fork_choice::<T>(fork_choice, db.clone())?;
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// Upgrade from v8 to v9 to separate the execution payloads into their own column.
(SchemaVersion(8), SchemaVersion(9)) => {
migration_schema_v9::upgrade_to_v9::<T>(db.clone(), log)?;
db.store_schema_version(to)
}
// Downgrade from v9 to v8 to ignore the separation of execution payloads
// NOTE: only works before the Bellatrix fork epoch.
(SchemaVersion(9), SchemaVersion(8)) => {
migration_schema_v9::downgrade_from_v9::<T>(db.clone(), log)?;
db.store_schema_version(to)
}
(SchemaVersion(9), SchemaVersion(10)) => {
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV8>(&FORK_CHOICE_DB_KEY)?;
if let Some(fork_choice) = fork_choice_opt {
let updated_fork_choice = migration_schema_v10::update_fork_choice(fork_choice)?;
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
(SchemaVersion(10), SchemaVersion(9)) => {
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV10>(&FORK_CHOICE_DB_KEY)?;
if let Some(fork_choice) = fork_choice_opt {
let updated_fork_choice = migration_schema_v10::downgrade_fork_choice(fork_choice)?;
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// Upgrade from v10 to v11 adding support for equivocating indices to fork choice.
(SchemaVersion(10), SchemaVersion(11)) => {
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV10>(&FORK_CHOICE_DB_KEY)?;
if let Some(fork_choice) = fork_choice_opt {
let updated_fork_choice = migration_schema_v11::update_fork_choice(fork_choice);
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// Downgrade from v11 to v10 removing support for equivocating indices from fork choice.
(SchemaVersion(11), SchemaVersion(10)) => {
let mut ops = vec![];
let fork_choice_opt = db.get_item::<PersistedForkChoiceV11>(&FORK_CHOICE_DB_KEY)?;
if let Some(fork_choice) = fork_choice_opt {
let updated_fork_choice =
migration_schema_v11::downgrade_fork_choice(fork_choice, log);
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
}
db.store_schema_version_atomically(to, ops)?;
Ok(())
}
// Upgrade from v11 to v12 to store richer metadata in the attestation op pool.
(SchemaVersion(11), SchemaVersion(12)) => {
let ops = migration_schema_v12::upgrade_to_v12::<T>(db.clone(), log)?;

View File

@ -1,97 +0,0 @@
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV8};
use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV8};
use crate::schema_change::{
types::{SszContainerV10, SszContainerV7},
StoreError,
};
use proto_array::core::SszContainer;
use ssz::{Decode, Encode};
pub fn update_fork_choice(
mut fork_choice: PersistedForkChoiceV8,
) -> Result<PersistedForkChoiceV10, StoreError> {
let ssz_container_v7 = SszContainerV7::from_ssz_bytes(
&fork_choice.fork_choice.proto_array_bytes,
)
.map_err(|e| {
StoreError::SchemaMigrationError(format!(
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
e
))
})?;
// These transformations instantiate `node.unrealized_justified_checkpoint` and
// `node.unrealized_finalized_checkpoint` to `None`.
let ssz_container_v10: SszContainerV10 = ssz_container_v7.into();
let ssz_container: SszContainer = ssz_container_v10.into();
fork_choice.fork_choice.proto_array_bytes = ssz_container.as_ssz_bytes();
Ok(fork_choice.into())
}
pub fn downgrade_fork_choice(
mut fork_choice: PersistedForkChoiceV10,
) -> Result<PersistedForkChoiceV8, StoreError> {
let ssz_container_v10 = SszContainerV10::from_ssz_bytes(
&fork_choice.fork_choice.proto_array_bytes,
)
.map_err(|e| {
StoreError::SchemaMigrationError(format!(
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
e
))
})?;
let ssz_container_v7: SszContainerV7 = ssz_container_v10.into();
fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes();
Ok(fork_choice.into())
}
impl From<PersistedForkChoiceStoreV8> for PersistedForkChoiceStoreV10 {
fn from(other: PersistedForkChoiceStoreV8) -> Self {
Self {
balances_cache: other.balances_cache,
time: other.time,
finalized_checkpoint: other.finalized_checkpoint,
justified_checkpoint: other.justified_checkpoint,
justified_balances: other.justified_balances,
best_justified_checkpoint: other.best_justified_checkpoint,
unrealized_justified_checkpoint: other.best_justified_checkpoint,
unrealized_finalized_checkpoint: other.finalized_checkpoint,
proposer_boost_root: other.proposer_boost_root,
}
}
}
impl From<PersistedForkChoiceV8> for PersistedForkChoiceV10 {
fn from(other: PersistedForkChoiceV8) -> Self {
Self {
fork_choice: other.fork_choice,
fork_choice_store: other.fork_choice_store.into(),
}
}
}
impl From<PersistedForkChoiceStoreV10> for PersistedForkChoiceStoreV8 {
fn from(other: PersistedForkChoiceStoreV10) -> Self {
Self {
balances_cache: other.balances_cache,
time: other.time,
finalized_checkpoint: other.finalized_checkpoint,
justified_checkpoint: other.justified_checkpoint,
justified_balances: other.justified_balances,
best_justified_checkpoint: other.best_justified_checkpoint,
proposer_boost_root: other.proposer_boost_root,
}
}
}
impl From<PersistedForkChoiceV10> for PersistedForkChoiceV8 {
fn from(other: PersistedForkChoiceV10) -> Self {
Self {
fork_choice: other.fork_choice,
fork_choice_store: other.fork_choice_store.into(),
}
}
}

View File

@ -1,77 +0,0 @@
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV10, PersistedForkChoiceStoreV11};
use crate::persisted_fork_choice::{PersistedForkChoiceV10, PersistedForkChoiceV11};
use slog::{warn, Logger};
use std::collections::BTreeSet;
/// Add the equivocating indices field.
pub fn update_fork_choice(fork_choice_v10: PersistedForkChoiceV10) -> PersistedForkChoiceV11 {
let PersistedForkChoiceStoreV10 {
balances_cache,
time,
finalized_checkpoint,
justified_checkpoint,
justified_balances,
best_justified_checkpoint,
unrealized_justified_checkpoint,
unrealized_finalized_checkpoint,
proposer_boost_root,
} = fork_choice_v10.fork_choice_store;
PersistedForkChoiceV11 {
fork_choice: fork_choice_v10.fork_choice,
fork_choice_store: PersistedForkChoiceStoreV11 {
balances_cache,
time,
finalized_checkpoint,
justified_checkpoint,
justified_balances,
best_justified_checkpoint,
unrealized_justified_checkpoint,
unrealized_finalized_checkpoint,
proposer_boost_root,
equivocating_indices: BTreeSet::new(),
},
}
}
pub fn downgrade_fork_choice(
fork_choice_v11: PersistedForkChoiceV11,
log: Logger,
) -> PersistedForkChoiceV10 {
let PersistedForkChoiceStoreV11 {
balances_cache,
time,
finalized_checkpoint,
justified_checkpoint,
justified_balances,
best_justified_checkpoint,
unrealized_justified_checkpoint,
unrealized_finalized_checkpoint,
proposer_boost_root,
equivocating_indices,
} = fork_choice_v11.fork_choice_store;
if !equivocating_indices.is_empty() {
warn!(
log,
"Deleting slashed validators from fork choice store";
"count" => equivocating_indices.len(),
"message" => "this may make your node more susceptible to following the wrong chain",
);
}
PersistedForkChoiceV10 {
fork_choice: fork_choice_v11.fork_choice,
fork_choice_store: PersistedForkChoiceStoreV10 {
balances_cache,
time,
finalized_checkpoint,
justified_checkpoint,
justified_balances,
best_justified_checkpoint,
unrealized_justified_checkpoint,
unrealized_finalized_checkpoint,
proposer_boost_root,
},
}
}

View File

@ -1,28 +0,0 @@
///! These functions and structs are only relevant to the database migration from schema 5 to 6.
use crate::persisted_fork_choice::PersistedForkChoiceV1;
use crate::schema_change::types::{SszContainerV1, SszContainerV6};
use crate::BeaconChainTypes;
use ssz::four_byte_option_impl;
use ssz::{Decode, Encode};
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
// selector.
four_byte_option_impl!(four_byte_option_usize, usize);
pub(crate) fn update_execution_statuses<T: BeaconChainTypes>(
persisted_fork_choice: &mut PersistedForkChoiceV1,
) -> Result<(), String> {
let ssz_container_v1 =
SszContainerV1::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
.map_err(|e| {
format!(
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
e
)
})?;
let ssz_container_v6: SszContainerV6 = ssz_container_v1.into();
persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v6.as_ssz_bytes();
Ok(())
}

View File

@ -1,341 +0,0 @@
///! These functions and structs are only relevant to the database migration from schema 6 to 7.
use crate::beacon_chain::BeaconChainTypes;
use crate::beacon_fork_choice_store::{PersistedForkChoiceStoreV1, PersistedForkChoiceStoreV7};
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
use crate::schema_change::types::{ProtoNodeV6, SszContainerV10, SszContainerV6, SszContainerV7};
use crate::types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, Slot};
use crate::{BeaconForkChoiceStore, BeaconSnapshot};
use fork_choice::ForkChoice;
use proto_array::{core::ProtoNode, core::SszContainer, CountUnrealizedFull, ProtoArrayForkChoice};
use ssz::four_byte_option_impl;
use ssz::{Decode, Encode};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use store::hot_cold_store::HotColdDB;
use store::iter::BlockRootsIterator;
use store::Error as StoreError;
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
// selector.
four_byte_option_impl!(four_byte_option_usize, usize);
/// This method is used to re-initialize fork choice from the finalized state in case we hit an
/// error during this migration.
pub(crate) fn update_with_reinitialized_fork_choice<T: BeaconChainTypes>(
persisted_fork_choice: &mut PersistedForkChoiceV7,
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
spec: &ChainSpec,
) -> Result<(), String> {
let anchor_block_root = persisted_fork_choice
.fork_choice_store
.finalized_checkpoint
.root;
let anchor_block = db
.get_full_block_prior_to_v9(&anchor_block_root)
.map_err(|e| format!("{:?}", e))?
.ok_or_else(|| "Missing anchor beacon block".to_string())?;
let anchor_state = db
.get_state(&anchor_block.state_root(), Some(anchor_block.slot()))
.map_err(|e| format!("{:?}", e))?
.ok_or_else(|| "Missing anchor beacon state".to_string())?;
let snapshot = BeaconSnapshot {
beacon_block: Arc::new(anchor_block),
beacon_block_root: anchor_block_root,
beacon_state: anchor_state,
};
let store = BeaconForkChoiceStore::get_forkchoice_store(db, &snapshot);
let fork_choice = ForkChoice::from_anchor(
store,
anchor_block_root,
&snapshot.beacon_block,
&snapshot.beacon_state,
// Don't provide the current slot here, just use what's in the store. We don't need to know
// the head here, plus it's nice to avoid mutating fork choice during this process.
None,
// This config will get overwritten on startup.
CountUnrealizedFull::default(),
spec,
)
.map_err(|e| format!("{:?}", e))?;
persisted_fork_choice.fork_choice = fork_choice.to_persisted();
Ok(())
}
pub(crate) fn update_fork_choice<T: BeaconChainTypes>(
persisted_fork_choice: &mut PersistedForkChoiceV7,
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
) -> Result<(), StoreError> {
// `PersistedForkChoice` stores the `ProtoArray` as a `Vec<u8>`. Deserialize these
// bytes assuming the legacy struct, and transform them to the new struct before
// re-serializing.
let ssz_container_v6 =
SszContainerV6::from_ssz_bytes(&persisted_fork_choice.fork_choice.proto_array_bytes)
.map_err(|e| {
StoreError::SchemaMigrationError(format!(
"Failed to decode ProtoArrayForkChoice during schema migration: {:?}",
e
))
})?;
// Clone the V6 proto nodes in order to maintain information about `node.justified_epoch`
// and `node.finalized_epoch`.
let nodes_v6 = ssz_container_v6.nodes.clone();
let justified_checkpoint = persisted_fork_choice.fork_choice_store.justified_checkpoint;
let finalized_checkpoint = persisted_fork_choice.fork_choice_store.finalized_checkpoint;
// These transformations instantiate `node.justified_checkpoint` and `node.finalized_checkpoint`
// to `None`.
let ssz_container_v7: SszContainerV7 =
ssz_container_v6.into_ssz_container_v7(justified_checkpoint, finalized_checkpoint);
let ssz_container_v10: SszContainerV10 = ssz_container_v7.into();
let ssz_container: SszContainer = ssz_container_v10.into();
// `CountUnrealizedFull::default()` represents the count-unrealized-full config which will be overwritten on startup.
let mut fork_choice: ProtoArrayForkChoice =
(ssz_container, CountUnrealizedFull::default()).into();
update_checkpoints::<T>(finalized_checkpoint.root, &nodes_v6, &mut fork_choice, db)
.map_err(StoreError::SchemaMigrationError)?;
// Update the justified checkpoint in the store in case we have a discrepancy
// between the store and the proto array nodes.
update_store_justified_checkpoint(persisted_fork_choice, &mut fork_choice)
.map_err(StoreError::SchemaMigrationError)?;
// Need to downgrade the SSZ container to V7 so that all migrations can be applied in sequence.
let ssz_container = SszContainer::from(&fork_choice);
let ssz_container_v7 = SszContainerV7::from(ssz_container);
persisted_fork_choice.fork_choice.proto_array_bytes = ssz_container_v7.as_ssz_bytes();
persisted_fork_choice.fork_choice_store.justified_checkpoint = justified_checkpoint;
Ok(())
}
struct HeadInfo {
index: usize,
root: Hash256,
slot: Slot,
}
fn update_checkpoints<T: BeaconChainTypes>(
finalized_root: Hash256,
nodes_v6: &[ProtoNodeV6],
fork_choice: &mut ProtoArrayForkChoice,
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
) -> Result<(), String> {
let heads = find_finalized_descendant_heads(finalized_root, fork_choice);
// For each head, first gather all epochs we will need to find justified or finalized roots for.
for head in heads {
// `relevant_epochs` are epochs for which we will need to find the root at the start slot.
// We don't need to worry about whether the are finalized or justified epochs.
let mut relevant_epochs = HashSet::new();
let relevant_epoch_finder = |index, _: &mut ProtoNode| {
let (justified_epoch, finalized_epoch) = nodes_v6
.get(index)
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
relevant_epochs.insert(justified_epoch);
relevant_epochs.insert(finalized_epoch);
Ok(())
};
apply_to_chain_of_ancestors(
finalized_root,
head.index,
fork_choice,
relevant_epoch_finder,
)?;
// find the block roots associated with each relevant epoch.
let roots_by_epoch =
map_relevant_epochs_to_roots::<T>(head.root, head.slot, relevant_epochs, db.clone())?;
// Apply this mutator to the chain of descendants from this head, adding justified
// and finalized checkpoints for each.
let node_mutator = |index, node: &mut ProtoNode| {
let (justified_epoch, finalized_epoch) = nodes_v6
.get(index)
.map(|node: &ProtoNodeV6| (node.justified_epoch, node.finalized_epoch))
.ok_or_else(|| "Index not found in legacy proto nodes".to_string())?;
// Update the checkpoints only if they haven't already been populated.
if node.justified_checkpoint.is_none() {
let justified_checkpoint =
roots_by_epoch
.get(&justified_epoch)
.map(|&root| Checkpoint {
epoch: justified_epoch,
root,
});
node.justified_checkpoint = justified_checkpoint;
}
if node.finalized_checkpoint.is_none() {
let finalized_checkpoint =
roots_by_epoch
.get(&finalized_epoch)
.map(|&root| Checkpoint {
epoch: finalized_epoch,
root,
});
node.finalized_checkpoint = finalized_checkpoint;
}
Ok(())
};
apply_to_chain_of_ancestors(finalized_root, head.index, fork_choice, node_mutator)?;
}
Ok(())
}
/// Coverts the given `HashSet<Epoch>` to a `Vec<Epoch>` then reverse sorts by `Epoch`. Next, a
/// single `BlockRootsIterator` is created which is used to iterate backwards from the given
/// `head_root` and `head_slot`, finding the block root at the start slot of each epoch.
fn map_relevant_epochs_to_roots<T: BeaconChainTypes>(
head_root: Hash256,
head_slot: Slot,
epochs: HashSet<Epoch>,
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
) -> Result<HashMap<Epoch, Hash256>, String> {
// Convert the `HashSet` to a `Vec` and reverse sort the epochs.
let mut relevant_epochs = epochs.into_iter().collect::<Vec<_>>();
relevant_epochs.sort_unstable_by(|a, b| b.cmp(a));
// Iterate backwards from the given `head_root` and `head_slot` and find the block root at each epoch.
let mut iter = std::iter::once(Ok((head_root, head_slot)))
.chain(BlockRootsIterator::from_block(&db, head_root).map_err(|e| format!("{:?}", e))?);
let mut roots_by_epoch = HashMap::new();
for epoch in relevant_epochs {
let start_slot = epoch.start_slot(T::EthSpec::slots_per_epoch());
let root = iter
.find_map(|next| match next {
Ok((root, slot)) => (slot == start_slot).then_some(Ok(root)),
Err(e) => Some(Err(format!("{:?}", e))),
})
.transpose()?
.ok_or_else(|| "Justified root not found".to_string())?;
roots_by_epoch.insert(epoch, root);
}
Ok(roots_by_epoch)
}
/// Applies a mutator to every node in a chain, starting from the node at the given
/// `head_index` and iterating through ancestors until the `finalized_root` is reached.
fn apply_to_chain_of_ancestors<F>(
finalized_root: Hash256,
head_index: usize,
fork_choice: &mut ProtoArrayForkChoice,
mut node_mutator: F,
) -> Result<(), String>
where
F: FnMut(usize, &mut ProtoNode) -> Result<(), String>,
{
let head = fork_choice
.core_proto_array_mut()
.nodes
.get_mut(head_index)
.ok_or_else(|| "Head index not found in proto nodes".to_string())?;
node_mutator(head_index, head)?;
let mut parent_index_opt = head.parent;
let mut parent_opt =
parent_index_opt.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
// Iterate backwards through all parents until there is no reference to a parent or we reach
// the `finalized_root` node.
while let (Some(parent), Some(parent_index)) = (parent_opt, parent_index_opt) {
node_mutator(parent_index, parent)?;
// Break out of this while loop *after* the `node_mutator` has been applied to the finalized
// node.
if parent.root == finalized_root {
break;
}
// Update parent values
parent_index_opt = parent.parent;
parent_opt = parent_index_opt
.and_then(|index| fork_choice.core_proto_array_mut().nodes.get_mut(index));
}
Ok(())
}
/// Finds all heads by finding all nodes in the proto array that are not referenced as parents. Then
/// checks that these nodes are descendants of the finalized root in order to determine if they are
/// relevant.
fn find_finalized_descendant_heads(
finalized_root: Hash256,
fork_choice: &ProtoArrayForkChoice,
) -> Vec<HeadInfo> {
let nodes_referenced_as_parents: HashSet<usize> = fork_choice
.core_proto_array()
.nodes
.iter()
.filter_map(|node| node.parent)
.collect::<HashSet<_>>();
fork_choice
.core_proto_array()
.nodes
.iter()
.enumerate()
.filter_map(|(index, node)| {
(!nodes_referenced_as_parents.contains(&index)
&& fork_choice.is_descendant(finalized_root, node.root))
.then_some(HeadInfo {
index,
root: node.root,
slot: node.slot,
})
})
.collect::<Vec<_>>()
}
fn update_store_justified_checkpoint(
persisted_fork_choice: &mut PersistedForkChoiceV7,
fork_choice: &mut ProtoArrayForkChoice,
) -> Result<(), String> {
let justified_checkpoint = fork_choice
.core_proto_array()
.nodes
.iter()
.filter_map(|node| {
(node.finalized_checkpoint
== Some(persisted_fork_choice.fork_choice_store.finalized_checkpoint))
.then_some(node.justified_checkpoint)
.flatten()
})
.max_by_key(|justified_checkpoint| justified_checkpoint.epoch)
.ok_or("Proto node with current finalized checkpoint not found")?;
fork_choice.core_proto_array_mut().justified_checkpoint = justified_checkpoint;
Ok(())
}
// Add a zero `proposer_boost_root` when migrating from V1-6 to V7.
impl From<PersistedForkChoiceStoreV1> for PersistedForkChoiceStoreV7 {
fn from(other: PersistedForkChoiceStoreV1) -> Self {
Self {
balances_cache: other.balances_cache,
time: other.time,
finalized_checkpoint: other.finalized_checkpoint,
justified_checkpoint: other.justified_checkpoint,
justified_balances: other.justified_balances,
best_justified_checkpoint: other.best_justified_checkpoint,
proposer_boost_root: Hash256::zero(),
}
}
}
impl From<PersistedForkChoiceV1> for PersistedForkChoiceV7 {
fn from(other: PersistedForkChoiceV1) -> Self {
Self {
fork_choice: other.fork_choice,
fork_choice_store: other.fork_choice_store.into(),
}
}
}

View File

@ -1,50 +0,0 @@
use crate::beacon_chain::BeaconChainTypes;
use crate::beacon_fork_choice_store::{
BalancesCacheV8, CacheItemV8, PersistedForkChoiceStoreV7, PersistedForkChoiceStoreV8,
};
use crate::persisted_fork_choice::{PersistedForkChoiceV7, PersistedForkChoiceV8};
use std::sync::Arc;
use store::{Error as StoreError, HotColdDB};
use types::EthSpec;
pub fn update_fork_choice<T: BeaconChainTypes>(
fork_choice: PersistedForkChoiceV7,
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
) -> Result<PersistedForkChoiceV8, StoreError> {
let PersistedForkChoiceStoreV7 {
balances_cache,
time,
finalized_checkpoint,
justified_checkpoint,
justified_balances,
best_justified_checkpoint,
proposer_boost_root,
} = fork_choice.fork_choice_store;
let mut fork_choice_store = PersistedForkChoiceStoreV8 {
balances_cache: BalancesCacheV8::default(),
time,
finalized_checkpoint,
justified_checkpoint,
justified_balances,
best_justified_checkpoint,
proposer_boost_root,
};
// Add epochs to the balances cache. It's safe to just use the block's epoch because
// before schema v8 the cache would always miss on skipped slots.
for item in balances_cache.items {
// Drop any blocks that aren't found, they're presumably too old and this is only a cache.
if let Some(block) = db.get_full_block_prior_to_v9(&item.block_root)? {
fork_choice_store.balances_cache.items.push(CacheItemV8 {
block_root: item.block_root,
epoch: block.slot().epoch(T::EthSpec::slots_per_epoch()),
balances: item.balances,
});
}
}
Ok(PersistedForkChoiceV8 {
fork_choice: fork_choice.fork_choice,
fork_choice_store,
})
}

View File

@ -1,176 +0,0 @@
use crate::beacon_chain::BeaconChainTypes;
use slog::{debug, error, info, Logger};
use slot_clock::SlotClock;
use std::sync::Arc;
use std::time::Duration;
use store::{DBColumn, Error, HotColdDB, KeyValueStore};
use types::{EthSpec, Hash256, Slot};
const OPS_PER_BLOCK_WRITE: usize = 2;
/// The slot clock isn't usually available before the database is initialized, so we construct a
/// temporary slot clock by reading the genesis state. It should always exist if the database is
/// initialized at a prior schema version, however we still handle the lack of genesis state
/// gracefully.
fn get_slot_clock<T: BeaconChainTypes>(
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
log: &Logger,
) -> Result<Option<T::SlotClock>, Error> {
// At schema v8 the genesis block must be a *full* block (with payload). In all likeliness it
// actually has no payload.
let spec = db.get_chain_spec();
let genesis_block = if let Some(block) = db.get_full_block_prior_to_v9(&Hash256::zero())? {
block
} else {
error!(log, "Missing genesis block");
return Ok(None);
};
let genesis_state =
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
state
} else {
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
return Ok(None);
};
Ok(Some(T::SlotClock::new(
spec.genesis_slot,
Duration::from_secs(genesis_state.genesis_time()),
Duration::from_secs(spec.seconds_per_slot),
)))
}
pub fn upgrade_to_v9<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<(), Error> {
// This upgrade is a no-op if the Bellatrix fork epoch has not already passed. This migration
// was implemented before the activation of Bellatrix on all networks except Kiln, so the only
// users who will need to wait for the slow copying migration are Kiln users.
let slot_clock = if let Some(slot_clock) = get_slot_clock::<T>(&db, &log)? {
slot_clock
} else {
error!(
log,
"Unable to complete migration because genesis state or genesis block is missing"
);
return Err(Error::SlotClockUnavailableForMigration);
};
let current_epoch = if let Some(slot) = slot_clock.now() {
slot.epoch(T::EthSpec::slots_per_epoch())
} else {
return Ok(());
};
let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch {
fork_epoch
} else {
info!(
log,
"Upgrading database schema to v9 (no-op)";
"info" => "To downgrade before the merge run `lighthouse db migrate`"
);
return Ok(());
};
if current_epoch >= bellatrix_fork_epoch {
info!(
log,
"Upgrading database schema to v9";
"info" => "This will take several minutes. Each block will be read from and \
re-written to the database. You may safely exit now (Ctrl-C) and resume \
the migration later. Downgrading is no longer possible."
);
for res in db.hot_db.iter_column_keys(DBColumn::BeaconBlock) {
let block_root = res?;
let block = match db.get_full_block_prior_to_v9(&block_root) {
// A pre-v9 block is present.
Ok(Some(block)) => block,
// A block is missing.
Ok(None) => return Err(Error::BlockNotFound(block_root)),
// There was an error reading a pre-v9 block. Try reading it as a post-v9 block.
Err(_) => {
if db.try_get_full_block(&block_root)?.is_some() {
// The block is present as a post-v9 block, assume that it was already
// correctly migrated.
continue;
} else {
// This scenario should not be encountered since a prior check has ensured
// that this block exists.
return Err(Error::V9MigrationFailure(block_root));
}
}
};
if block.message().execution_payload().is_ok() {
// Overwrite block with blinded block and store execution payload separately.
debug!(
log,
"Rewriting Bellatrix block";
"block_root" => ?block_root,
);
let mut kv_batch = Vec::with_capacity(OPS_PER_BLOCK_WRITE);
db.block_as_kv_store_ops(&block_root, block, &mut kv_batch)?;
db.hot_db.do_atomically(kv_batch)?;
}
}
} else {
info!(
log,
"Upgrading database schema to v9 (no-op)";
"info" => "To downgrade before the merge run `lighthouse db migrate`"
);
}
Ok(())
}
// This downgrade is conditional and will only succeed if the Bellatrix fork epoch hasn't been
// reached.
pub fn downgrade_from_v9<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger,
) -> Result<(), Error> {
let slot_clock = if let Some(slot_clock) = get_slot_clock::<T>(&db, &log)? {
slot_clock
} else {
error!(
log,
"Unable to complete migration because genesis state or genesis block is missing"
);
return Err(Error::SlotClockUnavailableForMigration);
};
let current_epoch = if let Some(slot) = slot_clock.now() {
slot.epoch(T::EthSpec::slots_per_epoch())
} else {
return Ok(());
};
let bellatrix_fork_epoch = if let Some(fork_epoch) = db.get_chain_spec().bellatrix_fork_epoch {
fork_epoch
} else {
info!(
log,
"Downgrading database schema from v9";
"info" => "You need to upgrade to v9 again before the merge"
);
return Ok(());
};
if current_epoch >= bellatrix_fork_epoch {
error!(
log,
"Downgrading from schema v9 after the Bellatrix fork epoch is not supported";
"current_epoch" => current_epoch,
"bellatrix_fork_epoch" => bellatrix_fork_epoch,
"reason" => "You need a v9 schema database to run on a merged version of Prater or \
mainnet. On Kiln, you have to re-sync",
);
Err(Error::ResyncRequiredForExecutionPayloadSeparation)
} else {
Ok(())
}
}

View File

@ -1,315 +0,0 @@
use crate::types::{AttestationShufflingId, Checkpoint, Epoch, Hash256, Slot};
use proto_array::core::{ProposerBoost, ProtoNode, SszContainer, VoteTracker};
use proto_array::ExecutionStatus;
use ssz::four_byte_option_impl;
use ssz::Encode;
use ssz_derive::{Decode, Encode};
use superstruct::superstruct;
// Define a "legacy" implementation of `Option<usize>` which uses four bytes for encoding the union
// selector.
four_byte_option_impl!(four_byte_option_usize, usize);
four_byte_option_impl!(four_byte_option_checkpoint, Checkpoint);
#[superstruct(
variants(V1, V6, V7, V10),
variant_attributes(derive(Clone, PartialEq, Debug, Encode, Decode)),
no_enum
)]
pub struct ProtoNode {
pub slot: Slot,
pub state_root: Hash256,
pub target_root: Hash256,
pub current_epoch_shuffling_id: AttestationShufflingId,
pub next_epoch_shuffling_id: AttestationShufflingId,
pub root: Hash256,
#[ssz(with = "four_byte_option_usize")]
pub parent: Option<usize>,
#[superstruct(only(V1, V6))]
pub justified_epoch: Epoch,
#[superstruct(only(V1, V6))]
pub finalized_epoch: Epoch,
#[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V7, V10))]
pub justified_checkpoint: Option<Checkpoint>,
#[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V7, V10))]
pub finalized_checkpoint: Option<Checkpoint>,
pub weight: u64,
#[ssz(with = "four_byte_option_usize")]
pub best_child: Option<usize>,
#[ssz(with = "four_byte_option_usize")]
pub best_descendant: Option<usize>,
#[superstruct(only(V6, V7, V10))]
pub execution_status: ExecutionStatus,
#[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V10))]
pub unrealized_justified_checkpoint: Option<Checkpoint>,
#[ssz(with = "four_byte_option_checkpoint")]
#[superstruct(only(V10))]
pub unrealized_finalized_checkpoint: Option<Checkpoint>,
}
impl Into<ProtoNodeV6> for ProtoNodeV1 {
fn into(self) -> ProtoNodeV6 {
ProtoNodeV6 {
slot: self.slot,
state_root: self.state_root,
target_root: self.target_root,
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
root: self.root,
parent: self.parent,
justified_epoch: self.justified_epoch,
finalized_epoch: self.finalized_epoch,
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
// We set the following execution value as if the block is a pre-merge-fork block. This
// is safe as long as we never import a merge block with the old version of proto-array.
// This will be safe since we can't actually process merge blocks until we've made this
// change to fork choice.
execution_status: ExecutionStatus::irrelevant(),
}
}
}
impl Into<ProtoNodeV7> for ProtoNodeV6 {
fn into(self) -> ProtoNodeV7 {
ProtoNodeV7 {
slot: self.slot,
state_root: self.state_root,
target_root: self.target_root,
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
root: self.root,
parent: self.parent,
justified_checkpoint: None,
finalized_checkpoint: None,
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
execution_status: self.execution_status,
}
}
}
impl Into<ProtoNodeV10> for ProtoNodeV7 {
fn into(self) -> ProtoNodeV10 {
ProtoNodeV10 {
slot: self.slot,
state_root: self.state_root,
target_root: self.target_root,
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
root: self.root,
parent: self.parent,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
execution_status: self.execution_status,
unrealized_justified_checkpoint: None,
unrealized_finalized_checkpoint: None,
}
}
}
impl Into<ProtoNodeV7> for ProtoNodeV10 {
fn into(self) -> ProtoNodeV7 {
ProtoNodeV7 {
slot: self.slot,
state_root: self.state_root,
target_root: self.target_root,
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
root: self.root,
parent: self.parent,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
execution_status: self.execution_status,
}
}
}
impl Into<ProtoNode> for ProtoNodeV10 {
fn into(self) -> ProtoNode {
ProtoNode {
slot: self.slot,
state_root: self.state_root,
target_root: self.target_root,
current_epoch_shuffling_id: self.current_epoch_shuffling_id,
next_epoch_shuffling_id: self.next_epoch_shuffling_id,
root: self.root,
parent: self.parent,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
weight: self.weight,
best_child: self.best_child,
best_descendant: self.best_descendant,
execution_status: self.execution_status,
unrealized_justified_checkpoint: self.unrealized_justified_checkpoint,
unrealized_finalized_checkpoint: self.unrealized_finalized_checkpoint,
}
}
}
impl From<ProtoNode> for ProtoNodeV7 {
fn from(container: ProtoNode) -> Self {
Self {
slot: container.slot,
state_root: container.state_root,
target_root: container.target_root,
current_epoch_shuffling_id: container.current_epoch_shuffling_id,
next_epoch_shuffling_id: container.next_epoch_shuffling_id,
root: container.root,
parent: container.parent,
justified_checkpoint: container.justified_checkpoint,
finalized_checkpoint: container.finalized_checkpoint,
weight: container.weight,
best_child: container.best_child,
best_descendant: container.best_descendant,
execution_status: container.execution_status,
}
}
}
#[superstruct(
variants(V1, V6, V7, V10),
variant_attributes(derive(Encode, Decode)),
no_enum
)]
#[derive(Encode, Decode)]
pub struct SszContainer {
pub votes: Vec<VoteTracker>,
pub balances: Vec<u64>,
pub prune_threshold: usize,
#[superstruct(only(V1, V6))]
pub justified_epoch: Epoch,
#[superstruct(only(V1, V6))]
pub finalized_epoch: Epoch,
#[superstruct(only(V7, V10))]
pub justified_checkpoint: Checkpoint,
#[superstruct(only(V7, V10))]
pub finalized_checkpoint: Checkpoint,
#[superstruct(only(V1))]
pub nodes: Vec<ProtoNodeV1>,
#[superstruct(only(V6))]
pub nodes: Vec<ProtoNodeV6>,
#[superstruct(only(V7))]
pub nodes: Vec<ProtoNodeV7>,
#[superstruct(only(V10))]
pub nodes: Vec<ProtoNodeV10>,
pub indices: Vec<(Hash256, usize)>,
#[superstruct(only(V7, V10))]
pub previous_proposer_boost: ProposerBoost,
}
impl Into<SszContainerV6> for SszContainerV1 {
fn into(self) -> SszContainerV6 {
let nodes = self.nodes.into_iter().map(Into::into).collect();
SszContainerV6 {
votes: self.votes,
balances: self.balances,
prune_threshold: self.prune_threshold,
justified_epoch: self.justified_epoch,
finalized_epoch: self.finalized_epoch,
nodes,
indices: self.indices,
}
}
}
impl SszContainerV6 {
pub(crate) fn into_ssz_container_v7(
self,
justified_checkpoint: Checkpoint,
finalized_checkpoint: Checkpoint,
) -> SszContainerV7 {
let nodes = self.nodes.into_iter().map(Into::into).collect();
SszContainerV7 {
votes: self.votes,
balances: self.balances,
prune_threshold: self.prune_threshold,
justified_checkpoint,
finalized_checkpoint,
nodes,
indices: self.indices,
previous_proposer_boost: ProposerBoost::default(),
}
}
}
impl Into<SszContainerV10> for SszContainerV7 {
fn into(self) -> SszContainerV10 {
let nodes = self.nodes.into_iter().map(Into::into).collect();
SszContainerV10 {
votes: self.votes,
balances: self.balances,
prune_threshold: self.prune_threshold,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
nodes,
indices: self.indices,
previous_proposer_boost: self.previous_proposer_boost,
}
}
}
impl Into<SszContainerV7> for SszContainerV10 {
fn into(self) -> SszContainerV7 {
let nodes = self.nodes.into_iter().map(Into::into).collect();
SszContainerV7 {
votes: self.votes,
balances: self.balances,
prune_threshold: self.prune_threshold,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
nodes,
indices: self.indices,
previous_proposer_boost: self.previous_proposer_boost,
}
}
}
impl Into<SszContainer> for SszContainerV10 {
fn into(self) -> SszContainer {
let nodes = self.nodes.into_iter().map(Into::into).collect();
SszContainer {
votes: self.votes,
balances: self.balances,
prune_threshold: self.prune_threshold,
justified_checkpoint: self.justified_checkpoint,
finalized_checkpoint: self.finalized_checkpoint,
nodes,
indices: self.indices,
previous_proposer_boost: self.previous_proposer_boost,
}
}
}
impl From<SszContainer> for SszContainerV7 {
fn from(container: SszContainer) -> Self {
let nodes = container.nodes.into_iter().map(Into::into).collect();
Self {
votes: container.votes,
balances: container.balances,
prune_threshold: container.prune_threshold,
justified_checkpoint: container.justified_checkpoint,
finalized_checkpoint: container.finalized_checkpoint,
nodes,
indices: container.indices,
previous_proposer_boost: container.previous_proposer_boost,
}
}
}

View File

@ -298,27 +298,6 @@ impl<T: EthSpec> SnapshotCache<T> {
})
}
/// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*.
///
/// Care must be taken not to mutate the state in an invalid way. This function should only
/// be used to mutate the *caches* of the state, for example the tree hash cache when
/// calculating a light client merkle proof.
pub fn borrow_unadvanced_state_mut(
&mut self,
block_root: Hash256,
) -> Option<&mut BeaconState<T>> {
self.snapshots
.iter_mut()
.find(|snapshot| {
// If the pre-state exists then state advance has already taken the state for
// `block_root` and mutated its tree hash cache. Rather than re-building it while
// holding the snapshot cache lock (>1 second), prefer to return `None` from this
// function and force the caller to load it from disk.
snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none()
})
.map(|snapshot| &mut snapshot.beacon_state)
}
/// If there is a snapshot with `block_root`, clone it and return the clone.
pub fn get_cloned(
&self,

View File

@ -386,16 +386,6 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
}
}
/// Get a schema V8 or earlier full block by reading it and its payload from disk.
pub fn get_full_block_prior_to_v9(
&self,
block_root: &Hash256,
) -> Result<Option<SignedBeaconBlock<E>>, Error> {
self.get_block_with(block_root, |bytes| {
SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec)
})
}
/// Convert a blinded block into a full block by loading its execution payload if necessary.
pub fn make_full_block(
&self,

View File

@ -19,13 +19,13 @@ validator client or the slasher**.
| v2.0.0 | Oct 2021 | v5 | no |
| v2.1.0 | Jan 2022 | v8 | no |
| v2.2.0 | Apr 2022 | v8 | no |
| v2.3.0 | May 2022 | v9 | yes (pre Bellatrix) |
| v2.4.0 | Jul 2022 | v9 | yes (pre Bellatrix) |
| v2.3.0 | May 2022 | v9 | yes from <= v3.3.0 |
| v2.4.0 | Jul 2022 | v9 | yes from <= v3.3.0 |
| v2.5.0 | Aug 2022 | v11 | yes |
| v3.0.0 | Aug 2022 | v11 | yes |
| v3.1.0 | Sep 2022 | v12 | yes |
| v3.2.0 | Oct 2022 | v12 | yes |
| v3.3.0 | TBD | v13 | yes |
| v3.3.0 | Nov 2022 | v13 | yes |
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
> (e.g. v2.3.0).