Fix Capella schema downgrades (#4004)

This commit is contained in:
Michael Sproul 2023-02-20 17:50:42 +11:00 committed by GitHub
parent 9a41f65b89
commit 0b6850221e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 171 additions and 11 deletions

View File

@ -2,9 +2,41 @@ use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
use operation_pool::{ use operation_pool::{
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
}; };
use slog::{debug, info, Logger}; use slog::{debug, error, info, Logger};
use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
use types::{EthSpec, Hash256, Slot};
/// The slot clock isn't usually available before the database is initialized, so we construct a
/// temporary slot clock by reading the genesis state. It should always exist if the database is
/// initialized at a prior schema version, however we still handle the lack of genesis state
/// gracefully.
fn get_slot_clock<T: BeaconChainTypes>(
db: &HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>,
log: &Logger,
) -> Result<Option<T::SlotClock>, Error> {
let spec = db.get_chain_spec();
let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? {
block
} else {
error!(log, "Missing genesis block");
return Ok(None);
};
let genesis_state =
if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? {
state
} else {
error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root());
return Ok(None);
};
Ok(Some(T::SlotClock::new(
spec.genesis_slot,
Duration::from_secs(genesis_state.genesis_time()),
Duration::from_secs(spec.seconds_per_slot),
)))
}
pub fn upgrade_to_v14<T: BeaconChainTypes>( pub fn upgrade_to_v14<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>, db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
@ -41,17 +73,35 @@ pub fn downgrade_from_v14<T: BeaconChainTypes>(
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>, db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
log: Logger, log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> { ) -> Result<Vec<KeyValueStoreOp>, Error> {
// We cannot downgrade from V14 once the Capella fork has been reached because there will
// be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions
// of Lighthouse can't handle that.
if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch {
let current_epoch = get_slot_clock::<T>(&db, &log)?
.and_then(|clock| clock.now())
.map(|slot| slot.epoch(T::EthSpec::slots_per_epoch()))
.ok_or(Error::SlotClockUnavailableForMigration)?;
if current_epoch >= capella_fork_epoch {
error!(
log,
"Capella already active: v14+ is mandatory";
"current_epoch" => current_epoch,
"capella_fork_epoch" => capella_fork_epoch,
);
return Err(Error::UnableToDowngrade);
}
}
// Load a V14 op pool and transform it to V12. // Load a V14 op pool and transform it to V12.
let PersistedOperationPoolV14 { let PersistedOperationPoolV14::<T::EthSpec> {
attestations, attestations,
sync_contributions, sync_contributions,
attester_slashings, attester_slashings,
proposer_slashings, proposer_slashings,
voluntary_exits, voluntary_exits,
bls_to_execution_changes, bls_to_execution_changes,
} = if let Some(PersistedOperationPool::<T::EthSpec>::V14(op_pool)) = } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
db.get_item(&OP_POOL_DB_KEY)?
{
op_pool op_pool
} else { } else {
debug!(log, "Nothing to do, no operation pool stored"); debug!(log, "Nothing to do, no operation pool stored");

View File

@ -43,7 +43,7 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
log: Logger, log: Logger,
) -> Result<Vec<KeyValueStoreOp>, Error> { ) -> Result<Vec<KeyValueStoreOp>, Error> {
// Load a V15 op pool and transform it to V14. // Load a V15 op pool and transform it to V14.
let PersistedOperationPoolV15 { let PersistedOperationPoolV15::<T::EthSpec> {
attestations, attestations,
sync_contributions, sync_contributions,
attester_slashings, attester_slashings,
@ -51,9 +51,7 @@ pub fn downgrade_from_v15<T: BeaconChainTypes>(
voluntary_exits, voluntary_exits,
bls_to_execution_changes, bls_to_execution_changes,
capella_bls_change_broadcast_indices, capella_bls_change_broadcast_indices,
} = if let Some(PersistedOperationPool::<T::EthSpec>::V15(op_pool)) = } = if let Some(op_pool) = db.get_item(&OP_POOL_DB_KEY)? {
db.get_item(&OP_POOL_DB_KEY)?
{
op_pool op_pool
} else { } else {
debug!(log, "Nothing to do, no operation pool stored"); debug!(log, "Nothing to do, no operation pool stored");

View File

@ -2,6 +2,7 @@
use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::attestation_verification::Error as AttnError;
use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::builder::BeaconChainBuilder;
use beacon_chain::schema_change::migrate_schema;
use beacon_chain::test_utils::{ use beacon_chain::test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
}; };
@ -22,6 +23,7 @@ use std::collections::HashSet;
use std::convert::TryInto; use std::convert::TryInto;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
use store::{ use store::{
iter::{BlockRootsIterator, StateRootsIterator}, iter::{BlockRootsIterator, StateRootsIterator},
HotColdDB, LevelDB, StoreConfig, HotColdDB, LevelDB, StoreConfig,
@ -68,6 +70,7 @@ fn get_harness(
let harness = BeaconChainHarness::builder(MinimalEthSpec) let harness = BeaconChainHarness::builder(MinimalEthSpec)
.default_spec() .default_spec()
.keypairs(KEYPAIRS[0..validator_count].to_vec()) .keypairs(KEYPAIRS[0..validator_count].to_vec())
.logger(store.logger().clone())
.fresh_disk_store(store) .fresh_disk_store(store)
.mock_execution_layer() .mock_execution_layer()
.build(); .build();
@ -2529,6 +2532,91 @@ async fn revert_minority_fork_on_resume() {
assert_eq!(heads.len(), 1); assert_eq!(heads.len(), 1);
} }
// This test checks whether the schema downgrade from the latest version to some minimum supported
// version is correct. This is the easiest schema test to write without historic versions of
// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually
// as old downgrades are deprecated.
#[tokio::test]
async fn schema_downgrade_to_min_version() {
let num_blocks_produced = E::slots_per_epoch() * 4;
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
let spec = &harness.chain.spec.clone();
harness
.extend_chain(
num_blocks_produced as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
let min_version = if harness.spec.capella_fork_epoch.is_some() {
// Can't downgrade beyond V14 once Capella is reached, for simplicity don't test that
// at all if Capella is enabled.
SchemaVersion(14)
} else {
SchemaVersion(11)
};
// Close the database to ensure everything is written to disk.
drop(store);
drop(harness);
// Re-open the store.
let store = get_store(&db_path);
// Downgrade.
let deposit_contract_deploy_block = 0;
migrate_schema::<DiskHarnessType<E>>(
store.clone(),
deposit_contract_deploy_block,
CURRENT_SCHEMA_VERSION,
min_version,
store.logger().clone(),
spec,
)
.expect("schema downgrade to minimum version should work");
// Upgrade back.
migrate_schema::<DiskHarnessType<E>>(
store.clone(),
deposit_contract_deploy_block,
min_version,
CURRENT_SCHEMA_VERSION,
store.logger().clone(),
spec,
)
.expect("schema upgrade from minimum version should work");
// Rescreate the harness.
let harness = BeaconChainHarness::builder(MinimalEthSpec)
.default_spec()
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
.logger(store.logger().clone())
.resumed_disk_store(store.clone())
.mock_execution_layer()
.build();
check_finalization(&harness, num_blocks_produced);
check_split_slot(&harness, store.clone());
check_chain_dump(&harness, num_blocks_produced + 1);
check_iterators(&harness);
// Check that downgrading beyond the minimum version fails (bound is *tight*).
let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap());
migrate_schema::<DiskHarnessType<E>>(
store.clone(),
deposit_contract_deploy_block,
CURRENT_SCHEMA_VERSION,
min_version_sub_1,
harness.logger().clone(),
spec,
)
.expect_err("should not downgrade below minimum version");
}
/// Checks that two chains are the same, for the purpose of these tests. /// Checks that two chains are the same, for the purpose of these tests.
/// ///
/// Several fields that are hard/impossible to check are ignored (e.g., the store). /// Several fields that are hard/impossible to check are ignored (e.g., the store).

View File

@ -242,6 +242,20 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV14<T> {
} }
} }
impl<T: EthSpec> StoreItem for PersistedOperationPoolV15<T> {
fn db_column() -> DBColumn {
DBColumn::OpPool
}
fn as_store_bytes(&self) -> Vec<u8> {
self.as_ssz_bytes()
}
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
PersistedOperationPoolV15::from_ssz_bytes(bytes).map_err(Into::into)
}
}
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`. /// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> { impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
fn db_column() -> DBColumn { fn db_column() -> DBColumn {

View File

@ -42,9 +42,8 @@ pub enum Error {
}, },
BlockReplayError(BlockReplayError), BlockReplayError(BlockReplayError),
AddPayloadLogicError, AddPayloadLogicError,
ResyncRequiredForExecutionPayloadSeparation,
SlotClockUnavailableForMigration, SlotClockUnavailableForMigration,
V9MigrationFailure(Hash256), UnableToDowngrade,
InconsistentFork(InconsistentFork), InconsistentFork(InconsistentFork),
} }

View File

@ -1176,6 +1176,11 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
&self.spec &self.spec
} }
/// Get a reference to the `Logger` used by the database.
pub fn logger(&self) -> &Logger {
&self.log
}
/// Fetch a copy of the current split slot from memory. /// Fetch a copy of the current split slot from memory.
pub fn get_split_slot(&self) -> Slot { pub fn get_split_slot(&self) -> Slot {
self.split.read_recursive().slot self.split.read_recursive().slot

View File

@ -26,10 +26,16 @@ validator client or the slasher**.
| v3.1.0 | Sep 2022 | v12 | yes | | v3.1.0 | Sep 2022 | v12 | yes |
| v3.2.0 | Oct 2022 | v12 | yes | | v3.2.0 | Oct 2022 | v12 | yes |
| v3.3.0 | Nov 2022 | v13 | yes | | v3.3.0 | Nov 2022 | v13 | yes |
| v3.4.0 | Jan 2023 | v13 | yes |
| v3.5.0 | Feb 2023 | v15 | yes before Capella |
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release > **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
> (e.g. v2.3.0). > (e.g. v2.3.0).
> **Note**: Support for old schemas is gradually removed from newer versions of Lighthouse. We
usually do this after a major version has been out for a while and everyone has upgraded. In this
case the above table will continue to record the deprecated schema changes for reference.
## How to apply a database downgrade ## How to apply a database downgrade
To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters. To apply a downgrade you need to use the `lighthouse db migrate` command with the correct parameters.