2021-09-22 00:37:28 +00:00
|
|
|
use crate::errors::HandleUnavailable;
|
2020-06-16 01:34:04 +00:00
|
|
|
use crate::{Error, HotColdDB, ItemStore};
|
2019-06-18 15:47:21 +00:00
|
|
|
use std::borrow::Cow;
|
2019-12-06 03:29:06 +00:00
|
|
|
use std::marker::PhantomData;
|
|
|
|
use types::{
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
typenum::Unsigned, BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256,
|
|
|
|
SignedBeaconBlock, Slot,
|
2019-12-06 03:29:06 +00:00
|
|
|
};
|
2019-06-15 13:56:41 +00:00
|
|
|
|
2019-07-29 02:08:52 +00:00
|
|
|
/// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over.
|
2019-08-08 02:28:10 +00:00
|
|
|
///
|
|
|
|
/// ## Note
|
|
|
|
///
|
|
|
|
/// It is assumed that all ancestors for this object are stored in the database. If this is not the
|
|
|
|
/// case, the iterator will start returning `None` prior to genesis.
|
2021-12-21 06:30:52 +00:00
|
|
|
pub trait AncestorIter<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>, I: Iterator> {
|
2019-07-29 02:08:52 +00:00
|
|
|
/// Returns an iterator over the roots of the ancestors of `self`.
|
2021-12-21 06:30:52 +00:00
|
|
|
fn try_iter_ancestor_roots(&self, store: &'a HotColdDB<E, Hot, Cold>) -> Option<I>;
|
2019-07-29 02:08:52 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
|
2021-12-21 06:30:52 +00:00
|
|
|
AncestorIter<'a, E, Hot, Cold, BlockRootsIterator<'a, E, Hot, Cold>> for SignedBeaconBlock<E>
|
2019-12-06 07:52:11 +00:00
|
|
|
{
|
2019-08-14 00:55:24 +00:00
|
|
|
/// Iterates across all available prior block roots of `self`, starting at the most recent and ending
|
2019-07-29 02:08:52 +00:00
|
|
|
/// at genesis.
|
2020-06-16 01:34:04 +00:00
|
|
|
fn try_iter_ancestor_roots(
|
|
|
|
&self,
|
2021-12-21 06:30:52 +00:00
|
|
|
store: &'a HotColdDB<E, Hot, Cold>,
|
2020-06-16 01:34:04 +00:00
|
|
|
) -> Option<BlockRootsIterator<'a, E, Hot, Cold>> {
|
2020-02-10 23:19:36 +00:00
|
|
|
let state = store
|
2021-07-09 06:15:32 +00:00
|
|
|
.get_state(&self.message().state_root(), Some(self.slot()))
|
2020-02-10 23:19:36 +00:00
|
|
|
.ok()??;
|
2019-07-29 02:08:52 +00:00
|
|
|
|
2019-08-14 00:55:24 +00:00
|
|
|
Some(BlockRootsIterator::owned(store, state))
|
2019-07-29 02:08:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
|
2021-12-21 06:30:52 +00:00
|
|
|
AncestorIter<'a, E, Hot, Cold, StateRootsIterator<'a, E, Hot, Cold>> for BeaconState<E>
|
2019-12-06 07:52:11 +00:00
|
|
|
{
|
2019-08-14 00:55:24 +00:00
|
|
|
/// Iterates across all available prior state roots of `self`, starting at the most recent and ending
|
2019-08-10 07:15:15 +00:00
|
|
|
/// at genesis.
|
2020-06-16 01:34:04 +00:00
|
|
|
fn try_iter_ancestor_roots(
|
|
|
|
&self,
|
2021-12-21 06:30:52 +00:00
|
|
|
store: &'a HotColdDB<E, Hot, Cold>,
|
2020-06-16 01:34:04 +00:00
|
|
|
) -> Option<StateRootsIterator<'a, E, Hot, Cold>> {
|
2019-08-10 07:15:15 +00:00
|
|
|
// The `self.clone()` here is wasteful.
|
2019-08-14 00:55:24 +00:00
|
|
|
Some(StateRootsIterator::owned(store, self.clone()))
|
2019-08-10 07:15:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
pub struct StateRootsIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
|
|
|
|
inner: RootsIterator<'a, T, Hot, Cold>,
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Clone
|
|
|
|
for StateRootsIterator<'a, T, Hot, Cold>
|
|
|
|
{
|
2020-04-20 09:59:56 +00:00
|
|
|
fn clone(&self) -> Self {
|
|
|
|
Self {
|
|
|
|
inner: self.inner.clone(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> StateRootsIterator<'a, T, Hot, Cold> {
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn new(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: &'a BeaconState<T>) -> Self {
|
2020-04-20 09:59:56 +00:00
|
|
|
Self {
|
|
|
|
inner: RootsIterator::new(store, beacon_state),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn owned(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: BeaconState<T>) -> Self {
|
2020-04-20 09:59:56 +00:00
|
|
|
Self {
|
|
|
|
inner: RootsIterator::owned(store, beacon_state),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
|
|
|
|
for StateRootsIterator<'a, T, Hot, Cold>
|
|
|
|
{
|
2020-06-09 23:55:44 +00:00
|
|
|
type Item = Result<(Hash256, Slot), Error>;
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
self.inner
|
|
|
|
.next()
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(|result| result.map(|(_, state_root, slot)| (state_root, slot)))
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the
|
|
|
|
/// iterator returns `None` indefinitely.
|
|
|
|
///
|
|
|
|
/// Uses the `block_roots` field of `BeaconState` as the source of block roots and will
|
|
|
|
/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been
|
|
|
|
/// exhausted.
|
|
|
|
///
|
|
|
|
/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`.
|
2020-06-16 01:34:04 +00:00
|
|
|
pub struct BlockRootsIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
|
|
|
|
inner: RootsIterator<'a, T, Hot, Cold>,
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Clone
|
|
|
|
for BlockRootsIterator<'a, T, Hot, Cold>
|
|
|
|
{
|
2020-04-20 09:59:56 +00:00
|
|
|
fn clone(&self) -> Self {
|
|
|
|
Self {
|
|
|
|
inner: self.inner.clone(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> BlockRootsIterator<'a, T, Hot, Cold> {
|
2020-04-20 09:59:56 +00:00
|
|
|
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn new(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: &'a BeaconState<T>) -> Self {
|
2020-04-20 09:59:56 +00:00
|
|
|
Self {
|
|
|
|
inner: RootsIterator::new(store, beacon_state),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a new iterator over all block roots in the given `beacon_state` and prior states.
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn owned(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: BeaconState<T>) -> Self {
|
2020-04-20 09:59:56 +00:00
|
|
|
Self {
|
|
|
|
inner: RootsIterator::owned(store, beacon_state),
|
|
|
|
}
|
|
|
|
}
|
2021-12-13 20:43:22 +00:00
|
|
|
|
|
|
|
pub fn from_block(
|
2021-12-21 06:30:52 +00:00
|
|
|
store: &'a HotColdDB<T, Hot, Cold>,
|
2021-12-13 20:43:22 +00:00
|
|
|
block_hash: Hash256,
|
|
|
|
) -> Result<Self, Error> {
|
|
|
|
Ok(Self {
|
|
|
|
inner: RootsIterator::from_block(store, block_hash)?,
|
|
|
|
})
|
|
|
|
}
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
|
|
|
|
for BlockRootsIterator<'a, T, Hot, Cold>
|
|
|
|
{
|
2020-06-09 23:55:44 +00:00
|
|
|
type Item = Result<(Hash256, Slot), Error>;
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
self.inner
|
|
|
|
.next()
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(|result| result.map(|(block_root, _, slot)| (block_root, slot)))
|
2020-04-20 09:59:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Iterator over state and block roots that backtracks using the vectors from a `BeaconState`.
|
2020-06-16 01:34:04 +00:00
|
|
|
pub struct RootsIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
|
2021-12-21 06:30:52 +00:00
|
|
|
store: &'a HotColdDB<T, Hot, Cold>,
|
2019-06-18 15:47:21 +00:00
|
|
|
beacon_state: Cow<'a, BeaconState<T>>,
|
|
|
|
slot: Slot,
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Clone
|
|
|
|
for RootsIterator<'a, T, Hot, Cold>
|
|
|
|
{
|
2019-11-26 23:54:46 +00:00
|
|
|
fn clone(&self) -> Self {
|
|
|
|
Self {
|
2021-12-21 06:30:52 +00:00
|
|
|
store: self.store,
|
2019-11-26 23:54:46 +00:00
|
|
|
beacon_state: self.beacon_state.clone(),
|
|
|
|
slot: self.slot,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> RootsIterator<'a, T, Hot, Cold> {
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn new(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: &'a BeaconState<T>) -> Self {
|
2019-06-18 15:47:21 +00:00
|
|
|
Self {
|
|
|
|
store,
|
2021-07-09 06:15:32 +00:00
|
|
|
slot: beacon_state.slot(),
|
2019-06-18 15:47:21 +00:00
|
|
|
beacon_state: Cow::Borrowed(beacon_state),
|
|
|
|
}
|
|
|
|
}
|
2019-06-18 16:06:23 +00:00
|
|
|
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn owned(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: BeaconState<T>) -> Self {
|
2019-06-18 16:06:23 +00:00
|
|
|
Self {
|
|
|
|
store,
|
2021-07-09 06:15:32 +00:00
|
|
|
slot: beacon_state.slot(),
|
2019-07-16 07:28:15 +00:00
|
|
|
beacon_state: Cow::Owned(beacon_state),
|
2019-06-18 16:06:23 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-20 09:59:56 +00:00
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
pub fn from_block(
|
2021-12-21 06:30:52 +00:00
|
|
|
store: &'a HotColdDB<T, Hot, Cold>,
|
2020-06-16 01:34:04 +00:00
|
|
|
block_hash: Hash256,
|
|
|
|
) -> Result<Self, Error> {
|
2020-04-20 09:59:56 +00:00
|
|
|
let block = store
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
.get_blinded_block(&block_hash)?
|
2020-04-20 09:59:56 +00:00
|
|
|
.ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?;
|
|
|
|
let state = store
|
|
|
|
.get_state(&block.state_root(), Some(block.slot()))?
|
|
|
|
.ok_or_else(|| BeaconStateError::MissingBeaconState(block.state_root().into()))?;
|
|
|
|
Ok(Self::owned(store, state))
|
|
|
|
}
|
2019-06-18 15:47:21 +00:00
|
|
|
|
2020-06-09 23:55:44 +00:00
|
|
|
fn do_next(&mut self) -> Result<Option<(Hash256, Hash256, Slot)>, Error> {
|
2021-07-09 06:15:32 +00:00
|
|
|
if self.slot == 0 || self.slot > self.beacon_state.slot() {
|
2020-06-09 23:55:44 +00:00
|
|
|
return Ok(None);
|
2019-06-18 15:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
self.slot -= 1;
|
|
|
|
|
2020-04-20 09:59:56 +00:00
|
|
|
match (
|
|
|
|
self.beacon_state.get_block_root(self.slot),
|
|
|
|
self.beacon_state.get_state_root(self.slot),
|
|
|
|
) {
|
2020-06-09 23:55:44 +00:00
|
|
|
(Ok(block_root), Ok(state_root)) => Ok(Some((*block_root, *state_root, self.slot))),
|
2020-04-20 09:59:56 +00:00
|
|
|
(Err(BeaconStateError::SlotOutOfBounds), Err(BeaconStateError::SlotOutOfBounds)) => {
|
2019-12-06 03:29:06 +00:00
|
|
|
// Read a `BeaconState` from the store that has access to prior historical roots.
|
2021-09-22 00:37:28 +00:00
|
|
|
if let Some(beacon_state) =
|
2022-08-12 00:56:39 +00:00
|
|
|
next_historical_root_backtrack_state(self.store, &self.beacon_state)
|
2021-09-22 00:37:28 +00:00
|
|
|
.handle_unavailable()?
|
|
|
|
{
|
|
|
|
self.beacon_state = Cow::Owned(beacon_state);
|
|
|
|
|
|
|
|
let block_root = *self.beacon_state.get_block_root(self.slot)?;
|
|
|
|
let state_root = *self.beacon_state.get_state_root(self.slot)?;
|
|
|
|
|
|
|
|
Ok(Some((block_root, state_root, self.slot)))
|
|
|
|
} else {
|
|
|
|
// No more states available due to weak subjectivity sync.
|
|
|
|
Ok(None)
|
|
|
|
}
|
2019-06-18 15:47:21 +00:00
|
|
|
}
|
2020-06-09 23:55:44 +00:00
|
|
|
(Err(e), _) => Err(e.into()),
|
|
|
|
(Ok(_), Err(e)) => Err(e.into()),
|
2019-06-18 15:47:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
|
|
|
|
for RootsIterator<'a, T, Hot, Cold>
|
|
|
|
{
|
2020-06-09 23:55:44 +00:00
|
|
|
/// (block_root, state_root, slot)
|
|
|
|
type Item = Result<(Hash256, Hash256, Slot), Error>;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
self.do_next().transpose()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-06 03:29:06 +00:00
|
|
|
/// Block iterator that uses the `parent_root` of each block to backtrack.
|
2020-06-16 01:34:04 +00:00
|
|
|
pub struct ParentRootBlockIterator<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
|
|
|
store: &'a HotColdDB<E, Hot, Cold>,
|
2019-12-06 03:29:06 +00:00
|
|
|
next_block_root: Hash256,
|
2021-08-30 06:41:31 +00:00
|
|
|
decode_any_variant: bool,
|
2019-12-06 03:29:06 +00:00
|
|
|
_phantom: PhantomData<E>,
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>
|
|
|
|
ParentRootBlockIterator<'a, E, Hot, Cold>
|
|
|
|
{
|
|
|
|
pub fn new(store: &'a HotColdDB<E, Hot, Cold>, start_block_root: Hash256) -> Self {
|
2019-12-06 03:29:06 +00:00
|
|
|
Self {
|
|
|
|
store,
|
|
|
|
next_block_root: start_block_root,
|
2021-08-30 06:41:31 +00:00
|
|
|
decode_any_variant: false,
|
|
|
|
_phantom: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Block iterator that is tolerant of blocks that have the wrong fork for their slot.
|
|
|
|
pub fn fork_tolerant(store: &'a HotColdDB<E, Hot, Cold>, start_block_root: Hash256) -> Self {
|
|
|
|
Self {
|
|
|
|
store,
|
|
|
|
next_block_root: start_block_root,
|
|
|
|
decode_any_variant: true,
|
2019-12-06 03:29:06 +00:00
|
|
|
_phantom: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
fn do_next(
|
|
|
|
&mut self,
|
|
|
|
) -> Result<Option<(Hash256, SignedBeaconBlock<E, BlindedPayload<E>>)>, Error> {
|
2019-12-06 03:29:06 +00:00
|
|
|
// Stop once we reach the zero parent, otherwise we'll keep returning the genesis
|
|
|
|
// block forever.
|
|
|
|
if self.next_block_root.is_zero() {
|
2020-05-16 03:23:32 +00:00
|
|
|
Ok(None)
|
2019-12-06 03:29:06 +00:00
|
|
|
} else {
|
2020-01-08 02:58:01 +00:00
|
|
|
let block_root = self.next_block_root;
|
2021-08-30 06:41:31 +00:00
|
|
|
let block = if self.decode_any_variant {
|
|
|
|
self.store.get_block_any_variant(&block_root)
|
|
|
|
} else {
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
self.store.get_blinded_block(&block_root)
|
2021-08-30 06:41:31 +00:00
|
|
|
}?
|
|
|
|
.ok_or(Error::BlockNotFound(block_root))?;
|
2021-07-09 06:15:32 +00:00
|
|
|
self.next_block_root = block.message().parent_root();
|
2020-05-16 03:23:32 +00:00
|
|
|
Ok(Some((block_root, block)))
|
2019-12-06 03:29:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> Iterator
|
|
|
|
for ParentRootBlockIterator<'a, E, Hot, Cold>
|
|
|
|
{
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
type Item = Result<(Hash256, SignedBeaconBlock<E, BlindedPayload<E>>), Error>;
|
2020-05-16 03:23:32 +00:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
self.do_next().transpose()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-18 15:47:21 +00:00
|
|
|
#[derive(Clone)]
|
2020-02-10 23:19:36 +00:00
|
|
|
/// Extends `BlockRootsIterator`, returning `SignedBeaconBlock` instances, instead of their roots.
|
2020-06-16 01:34:04 +00:00
|
|
|
pub struct BlockIterator<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> {
|
|
|
|
roots: BlockRootsIterator<'a, T, Hot, Cold>,
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> BlockIterator<'a, T, Hot, Cold> {
|
2019-06-15 13:56:41 +00:00
|
|
|
/// Create a new iterator over all blocks in the given `beacon_state` and prior states.
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn new(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: &'a BeaconState<T>) -> Self {
|
2019-06-15 13:56:41 +00:00
|
|
|
Self {
|
2019-08-14 00:55:24 +00:00
|
|
|
roots: BlockRootsIterator::new(store, beacon_state),
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-18 16:06:23 +00:00
|
|
|
|
|
|
|
/// Create a new iterator over all blocks in the given `beacon_state` and prior states.
|
2021-12-21 06:30:52 +00:00
|
|
|
pub fn owned(store: &'a HotColdDB<T, Hot, Cold>, beacon_state: BeaconState<T>) -> Self {
|
2019-06-18 16:06:23 +00:00
|
|
|
Self {
|
2019-08-14 00:55:24 +00:00
|
|
|
roots: BlockRootsIterator::owned(store, beacon_state),
|
2019-06-18 16:06:23 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-09 23:55:44 +00:00
|
|
|
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
fn do_next(&mut self) -> Result<Option<SignedBeaconBlock<T, BlindedPayload<T>>>, Error> {
|
2020-06-09 23:55:44 +00:00
|
|
|
if let Some(result) = self.roots.next() {
|
|
|
|
let (root, _slot) = result?;
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
self.roots.inner.store.get_blinded_block(&root)
|
2020-06-09 23:55:44 +00:00
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
impl<'a, T: EthSpec, Hot: ItemStore<T>, Cold: ItemStore<T>> Iterator
|
|
|
|
for BlockIterator<'a, T, Hot, Cold>
|
|
|
|
{
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
type Item = Result<SignedBeaconBlock<T, BlindedPayload<T>>, Error>;
|
2019-06-15 13:56:41 +00:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
2020-06-09 23:55:44 +00:00
|
|
|
self.do_next().transpose()
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-06 03:29:06 +00:00
|
|
|
/// Fetch the next state to use whilst backtracking in `*RootsIterator`.
|
2021-09-22 00:37:28 +00:00
|
|
|
///
|
|
|
|
/// Return `Err(HistoryUnavailable)` in the case where no more backtrack states are available
|
|
|
|
/// due to weak subjectivity sync.
|
2020-06-16 01:34:04 +00:00
|
|
|
fn next_historical_root_backtrack_state<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|
|
|
store: &HotColdDB<E, Hot, Cold>,
|
2019-12-06 03:29:06 +00:00
|
|
|
current_state: &BeaconState<E>,
|
2020-06-09 23:55:44 +00:00
|
|
|
) -> Result<BeaconState<E>, Error> {
|
2019-12-06 03:29:06 +00:00
|
|
|
// For compatibility with the freezer database's restore points, we load a state at
|
|
|
|
// a restore point slot (thus avoiding replaying blocks). In the case where we're
|
|
|
|
// not frozen, this just means we might not jump back by the maximum amount on
|
|
|
|
// our first jump (i.e. at most 1 extra state load).
|
2021-07-09 06:15:32 +00:00
|
|
|
let new_state_slot = slot_of_prev_restore_point::<E>(current_state.slot());
|
2021-09-22 00:37:28 +00:00
|
|
|
|
|
|
|
let (_, historic_state_upper_limit) = store.get_historic_state_limits();
|
|
|
|
|
|
|
|
if new_state_slot >= historic_state_upper_limit {
|
|
|
|
let new_state_root = current_state.get_state_root(new_state_slot)?;
|
|
|
|
Ok(store
|
|
|
|
.get_state(new_state_root, Some(new_state_slot))?
|
|
|
|
.ok_or_else(|| BeaconStateError::MissingBeaconState((*new_state_root).into()))?)
|
|
|
|
} else {
|
|
|
|
Err(Error::HistoryUnavailable)
|
|
|
|
}
|
2019-12-06 03:29:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Compute the slot of the last guaranteed restore point in the freezer database.
|
|
|
|
fn slot_of_prev_restore_point<E: EthSpec>(current_slot: Slot) -> Slot {
|
|
|
|
let slots_per_historical_root = E::SlotsPerHistoricalRoot::to_u64();
|
|
|
|
(current_slot - 1) / slots_per_historical_root * slots_per_historical_root
|
|
|
|
}
|
|
|
|
|
2019-06-15 13:56:41 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
2020-06-16 01:34:04 +00:00
|
|
|
use crate::HotColdDB;
|
2021-07-09 06:15:32 +00:00
|
|
|
use crate::StoreConfig as Config;
|
|
|
|
use beacon_chain::test_utils::BeaconChainHarness;
|
2021-10-14 02:58:10 +00:00
|
|
|
use beacon_chain::types::{ChainSpec, MainnetEthSpec};
|
2020-06-16 01:34:04 +00:00
|
|
|
use sloggers::{null::NullLoggerBuilder, Build};
|
2019-06-15 13:56:41 +00:00
|
|
|
|
|
|
|
fn get_state<T: EthSpec>() -> BeaconState<T> {
|
2021-10-14 02:58:10 +00:00
|
|
|
let harness = BeaconChainHarness::builder(T::default())
|
|
|
|
.default_spec()
|
|
|
|
.deterministic_keypairs(1)
|
|
|
|
.fresh_ephemeral_store()
|
|
|
|
.build();
|
2021-07-09 06:15:32 +00:00
|
|
|
harness.advance_slot();
|
|
|
|
harness.get_current_state()
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2019-06-18 15:47:21 +00:00
|
|
|
fn block_root_iter() {
|
2020-06-16 01:34:04 +00:00
|
|
|
let log = NullLoggerBuilder.build().unwrap();
|
2021-12-21 06:30:52 +00:00
|
|
|
let store =
|
|
|
|
HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap();
|
2019-06-15 13:56:41 +00:00
|
|
|
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
|
|
|
|
|
|
|
|
let mut state_a: BeaconState<MainnetEthSpec> = get_state();
|
|
|
|
let mut state_b: BeaconState<MainnetEthSpec> = get_state();
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
*state_a.slot_mut() = Slot::from(slots_per_historical_root);
|
|
|
|
*state_b.slot_mut() = Slot::from(slots_per_historical_root * 2);
|
2019-06-15 13:56:41 +00:00
|
|
|
|
2019-09-30 03:58:45 +00:00
|
|
|
let mut hashes = (0..).map(Hash256::from_low_u64_be);
|
2021-07-09 06:15:32 +00:00
|
|
|
let roots_a = state_a.block_roots_mut();
|
|
|
|
for i in 0..roots_a.len() {
|
|
|
|
roots_a[i] = hashes.next().unwrap()
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|
2021-07-09 06:15:32 +00:00
|
|
|
let roots_b = state_b.block_roots_mut();
|
|
|
|
for i in 0..roots_b.len() {
|
|
|
|
roots_b[i] = hashes.next().unwrap()
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let state_a_root = hashes.next().unwrap();
|
2021-07-09 06:15:32 +00:00
|
|
|
state_b.state_roots_mut()[0] = state_a_root;
|
2020-04-06 00:53:33 +00:00
|
|
|
store.put_state(&state_a_root, &state_a).unwrap();
|
2019-06-15 13:56:41 +00:00
|
|
|
|
2021-12-21 06:30:52 +00:00
|
|
|
let iter = BlockRootsIterator::new(&store, &state_b);
|
2019-06-18 15:47:21 +00:00
|
|
|
|
|
|
|
assert!(
|
2020-06-09 23:55:44 +00:00
|
|
|
iter.clone()
|
|
|
|
.any(|result| result.map(|(_root, slot)| slot == 0).unwrap()),
|
2019-06-18 15:47:21 +00:00
|
|
|
"iter should contain zero slot"
|
|
|
|
);
|
|
|
|
|
2020-06-09 23:55:44 +00:00
|
|
|
let mut collected: Vec<(Hash256, Slot)> = iter.collect::<Result<Vec<_>, _>>().unwrap();
|
2019-06-15 13:56:41 +00:00
|
|
|
collected.reverse();
|
|
|
|
|
2019-07-16 07:28:15 +00:00
|
|
|
let expected_len = 2 * MainnetEthSpec::slots_per_historical_root();
|
|
|
|
|
|
|
|
assert_eq!(collected.len(), expected_len);
|
|
|
|
|
2019-09-30 03:58:45 +00:00
|
|
|
for (i, item) in collected.iter().enumerate() {
|
|
|
|
assert_eq!(item.0, Hash256::from_low_u64_be(i as u64));
|
2019-08-05 06:27:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-18 15:47:21 +00:00
|
|
|
#[test]
|
|
|
|
fn state_root_iter() {
|
2020-06-16 01:34:04 +00:00
|
|
|
let log = NullLoggerBuilder.build().unwrap();
|
2021-12-21 06:30:52 +00:00
|
|
|
let store =
|
|
|
|
HotColdDB::open_ephemeral(Config::default(), ChainSpec::minimal(), log).unwrap();
|
2019-06-18 15:47:21 +00:00
|
|
|
let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root();
|
|
|
|
|
|
|
|
let mut state_a: BeaconState<MainnetEthSpec> = get_state();
|
|
|
|
let mut state_b: BeaconState<MainnetEthSpec> = get_state();
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
*state_a.slot_mut() = Slot::from(slots_per_historical_root);
|
|
|
|
*state_b.slot_mut() = Slot::from(slots_per_historical_root * 2);
|
2019-06-18 15:47:21 +00:00
|
|
|
|
2019-09-30 03:58:45 +00:00
|
|
|
let mut hashes = (0..).map(Hash256::from_low_u64_be);
|
2019-06-18 15:47:21 +00:00
|
|
|
|
|
|
|
for slot in 0..slots_per_historical_root {
|
|
|
|
state_a
|
|
|
|
.set_state_root(Slot::from(slot), hashes.next().unwrap())
|
2019-09-30 03:58:45 +00:00
|
|
|
.unwrap_or_else(|_| panic!("should set state_a slot {}", slot));
|
2019-06-18 15:47:21 +00:00
|
|
|
}
|
|
|
|
for slot in slots_per_historical_root..slots_per_historical_root * 2 {
|
|
|
|
state_b
|
|
|
|
.set_state_root(Slot::from(slot), hashes.next().unwrap())
|
2019-09-30 03:58:45 +00:00
|
|
|
.unwrap_or_else(|_| panic!("should set state_b slot {}", slot));
|
2019-06-18 15:47:21 +00:00
|
|
|
}
|
|
|
|
|
2019-08-06 04:41:42 +00:00
|
|
|
let state_a_root = Hash256::from_low_u64_be(slots_per_historical_root as u64);
|
|
|
|
let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2);
|
2019-06-18 15:47:21 +00:00
|
|
|
|
2020-04-06 00:53:33 +00:00
|
|
|
store.put_state(&state_a_root, &state_a).unwrap();
|
2021-01-28 23:31:06 +00:00
|
|
|
store.put_state(&state_b_root, &state_b).unwrap();
|
2019-06-18 15:47:21 +00:00
|
|
|
|
2021-12-21 06:30:52 +00:00
|
|
|
let iter = StateRootsIterator::new(&store, &state_b);
|
2019-06-18 15:47:21 +00:00
|
|
|
|
|
|
|
assert!(
|
2020-06-09 23:55:44 +00:00
|
|
|
iter.clone()
|
|
|
|
.any(|result| result.map(|(_root, slot)| slot == 0).unwrap()),
|
2019-06-18 15:47:21 +00:00
|
|
|
"iter should contain zero slot"
|
|
|
|
);
|
|
|
|
|
2020-06-09 23:55:44 +00:00
|
|
|
let mut collected: Vec<(Hash256, Slot)> = iter.collect::<Result<Vec<_>, _>>().unwrap();
|
2019-06-18 15:47:21 +00:00
|
|
|
collected.reverse();
|
|
|
|
|
2019-07-16 07:28:15 +00:00
|
|
|
let expected_len = MainnetEthSpec::slots_per_historical_root() * 2;
|
2019-06-18 15:47:21 +00:00
|
|
|
|
|
|
|
assert_eq!(collected.len(), expected_len, "collection length incorrect");
|
|
|
|
|
2019-09-30 03:58:45 +00:00
|
|
|
for (i, item) in collected.iter().enumerate() {
|
|
|
|
let (hash, slot) = *item;
|
2019-06-18 15:47:21 +00:00
|
|
|
|
|
|
|
assert_eq!(slot, i as u64, "slot mismatch at {}: {} vs {}", i, slot, i);
|
|
|
|
|
2019-08-06 04:41:42 +00:00
|
|
|
assert_eq!(
|
|
|
|
hash,
|
|
|
|
Hash256::from_low_u64_be(i as u64),
|
|
|
|
"hash mismatch at {}",
|
|
|
|
i
|
|
|
|
);
|
2019-06-18 15:47:21 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-15 13:56:41 +00:00
|
|
|
}
|