2021-03-04 01:25:12 +00:00
|
|
|
//! Utilities for managing database schema changes.
|
2021-12-13 20:43:22 +00:00
|
|
|
mod migration_schema_v6;
|
|
|
|
mod migration_schema_v7;
|
2021-12-13 23:35:57 +00:00
|
|
|
mod migration_schema_v8;
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
mod migration_schema_v9;
|
2021-12-13 20:43:22 +00:00
|
|
|
mod types;
|
|
|
|
|
2022-05-17 04:54:39 +00:00
|
|
|
use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY};
|
2021-12-13 20:43:22 +00:00
|
|
|
use crate::persisted_fork_choice::{PersistedForkChoiceV1, PersistedForkChoiceV7};
|
|
|
|
use slog::{warn, Logger};
|
2021-03-04 01:25:12 +00:00
|
|
|
use std::path::Path;
|
|
|
|
use std::sync::Arc;
|
|
|
|
use store::hot_cold_store::{HotColdDB, HotColdDBError};
|
2022-05-17 04:54:39 +00:00
|
|
|
use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION};
|
|
|
|
use store::{Error as StoreError, StoreItem};
|
2021-03-04 01:25:12 +00:00
|
|
|
|
|
|
|
/// Migrate the database from one schema version to another, applying all requisite mutations.
|
|
|
|
pub fn migrate_schema<T: BeaconChainTypes>(
|
|
|
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
|
|
|
datadir: &Path,
|
|
|
|
from: SchemaVersion,
|
|
|
|
to: SchemaVersion,
|
2021-12-13 20:43:22 +00:00
|
|
|
log: Logger,
|
2021-03-04 01:25:12 +00:00
|
|
|
) -> Result<(), StoreError> {
|
|
|
|
match (from, to) {
|
|
|
|
// Migrating from the current schema version to iself is always OK, a no-op.
|
|
|
|
(_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()),
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
// Upgrade across multiple versions by recursively migrating one step at a time.
|
2021-03-04 01:25:12 +00:00
|
|
|
(_, _) if from.as_u64() + 1 < to.as_u64() => {
|
|
|
|
let next = SchemaVersion(from.as_u64() + 1);
|
2021-12-13 20:43:22 +00:00
|
|
|
migrate_schema::<T>(db.clone(), datadir, from, next, log.clone())?;
|
|
|
|
migrate_schema::<T>(db, datadir, next, to, log)
|
2021-03-04 01:25:12 +00:00
|
|
|
}
|
2021-07-15 00:52:02 +00:00
|
|
|
|
2022-05-17 04:54:39 +00:00
|
|
|
//
|
|
|
|
// Migrations from before SchemaVersion(5) are deprecated.
|
|
|
|
//
|
2021-09-22 00:37:28 +00:00
|
|
|
|
2021-12-13 20:43:22 +00:00
|
|
|
// Migration for adding `execution_status` field to the fork choice store.
|
2021-09-28 09:56:49 +00:00
|
|
|
(SchemaVersion(5), SchemaVersion(6)) => {
|
2021-12-13 20:43:22 +00:00
|
|
|
// Database operations to be done atomically
|
|
|
|
let mut ops = vec![];
|
|
|
|
|
|
|
|
// The top-level `PersistedForkChoice` struct is still V1 but will have its internal
|
|
|
|
// bytes for the fork choice updated to V6.
|
|
|
|
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
|
|
|
if let Some(mut persisted_fork_choice) = fork_choice_opt {
|
|
|
|
migration_schema_v6::update_execution_statuses::<T>(&mut persisted_fork_choice)
|
|
|
|
.map_err(StoreError::SchemaMigrationError)?;
|
|
|
|
|
2021-12-13 23:35:57 +00:00
|
|
|
// Store the converted fork choice store under the same key.
|
|
|
|
ops.push(persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
2021-12-13 20:43:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
db.store_schema_version_atomically(to, ops)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
// 1. Add `proposer_boost_root`.
|
|
|
|
// 2. Update `justified_epoch` to `justified_checkpoint` and `finalized_epoch` to
|
|
|
|
// `finalized_checkpoint`.
|
|
|
|
// 3. This migration also includes a potential update to the justified
|
|
|
|
// checkpoint in case the fork choice store's justified checkpoint and finalized checkpoint
|
|
|
|
// combination does not actually exist for any blocks in fork choice. This was possible in
|
|
|
|
// the consensus spec prior to v1.1.6.
|
|
|
|
//
|
|
|
|
// Relevant issues:
|
|
|
|
//
|
|
|
|
// https://github.com/sigp/lighthouse/issues/2741
|
|
|
|
// https://github.com/ethereum/consensus-specs/pull/2727
|
|
|
|
// https://github.com/ethereum/consensus-specs/pull/2730
|
|
|
|
(SchemaVersion(6), SchemaVersion(7)) => {
|
|
|
|
// Database operations to be done atomically
|
|
|
|
let mut ops = vec![];
|
|
|
|
|
|
|
|
let fork_choice_opt = db.get_item::<PersistedForkChoiceV1>(&FORK_CHOICE_DB_KEY)?;
|
|
|
|
if let Some(persisted_fork_choice_v1) = fork_choice_opt {
|
|
|
|
// This migrates the `PersistedForkChoiceStore`, adding the `proposer_boost_root` field.
|
|
|
|
let mut persisted_fork_choice_v7 = persisted_fork_choice_v1.into();
|
|
|
|
|
|
|
|
let result = migration_schema_v7::update_fork_choice::<T>(
|
|
|
|
&mut persisted_fork_choice_v7,
|
|
|
|
db.clone(),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Fall back to re-initializing fork choice from an anchor state if necessary.
|
|
|
|
if let Err(e) = result {
|
|
|
|
warn!(log, "Unable to migrate to database schema 7, re-initializing fork choice"; "error" => ?e);
|
|
|
|
migration_schema_v7::update_with_reinitialized_fork_choice::<T>(
|
|
|
|
&mut persisted_fork_choice_v7,
|
|
|
|
db.clone(),
|
|
|
|
)
|
|
|
|
.map_err(StoreError::SchemaMigrationError)?;
|
|
|
|
}
|
|
|
|
|
2021-09-28 09:56:49 +00:00
|
|
|
// Store the converted fork choice store under the same key.
|
2021-12-13 23:35:57 +00:00
|
|
|
ops.push(persisted_fork_choice_v7.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
|
|
|
}
|
|
|
|
|
|
|
|
db.store_schema_version_atomically(to, ops)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
// Migration to add an `epoch` key to the fork choice's balances cache.
|
|
|
|
(SchemaVersion(7), SchemaVersion(8)) => {
|
|
|
|
let mut ops = vec![];
|
|
|
|
let fork_choice_opt = db.get_item::<PersistedForkChoiceV7>(&FORK_CHOICE_DB_KEY)?;
|
|
|
|
if let Some(fork_choice) = fork_choice_opt {
|
|
|
|
let updated_fork_choice =
|
|
|
|
migration_schema_v8::update_fork_choice::<T>(fork_choice, db.clone())?;
|
|
|
|
|
|
|
|
ops.push(updated_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY));
|
2021-09-28 09:56:49 +00:00
|
|
|
}
|
|
|
|
|
2021-12-13 20:43:22 +00:00
|
|
|
db.store_schema_version_atomically(to, ops)?;
|
2021-09-28 09:56:49 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
// Upgrade from v8 to v9 to separate the execution payloads into their own column.
|
|
|
|
(SchemaVersion(8), SchemaVersion(9)) => {
|
|
|
|
migration_schema_v9::upgrade_to_v9::<T>(db.clone(), log)?;
|
|
|
|
db.store_schema_version(to)
|
|
|
|
}
|
|
|
|
// Downgrade from v9 to v8 to ignore the separation of execution payloads
|
|
|
|
// NOTE: only works before the Bellatrix fork epoch.
|
|
|
|
(SchemaVersion(9), SchemaVersion(8)) => {
|
|
|
|
migration_schema_v9::downgrade_from_v9::<T>(db.clone(), log)?;
|
|
|
|
db.store_schema_version(to)
|
|
|
|
}
|
2021-03-04 01:25:12 +00:00
|
|
|
// Anything else is an error.
|
|
|
|
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
|
|
|
target_version: to,
|
|
|
|
current_version: from,
|
|
|
|
}
|
|
|
|
.into()),
|
|
|
|
}
|
|
|
|
}
|