2020-03-04 06:10:22 +00:00
|
|
|
#![cfg(not(debug_assertions))]
|
|
|
|
|
2021-07-09 06:15:32 +00:00
|
|
|
use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy};
|
|
|
|
use beacon_chain::{StateSkipConfig, WhenSlotSkipped};
|
2021-10-01 19:57:50 +00:00
|
|
|
use lazy_static::lazy_static;
|
2020-03-04 06:10:22 +00:00
|
|
|
use tree_hash::TreeHash;
|
2020-03-04 23:35:39 +00:00
|
|
|
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
pub const VALIDATOR_COUNT: usize = 16;
|
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
/// A cached set of keys.
|
|
|
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This test builds a chain that is just long enough to finalize an epoch then it produces an
|
|
|
|
/// attestation at each slot from genesis through to three epochs past the head.
|
|
|
|
///
|
|
|
|
/// It checks the produced attestation against some locally computed values.
|
|
|
|
#[test]
|
|
|
|
fn produces_attestations() {
|
|
|
|
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
|
2021-01-20 06:52:37 +00:00
|
|
|
let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3;
|
2020-03-04 06:10:22 +00:00
|
|
|
|
2021-10-14 02:58:10 +00:00
|
|
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
|
|
|
.default_spec()
|
|
|
|
.keypairs(KEYPAIRS[..].to_vec())
|
|
|
|
.fresh_ephemeral_store()
|
2022-02-28 22:07:48 +00:00
|
|
|
.mock_execution_layer()
|
2021-10-14 02:58:10 +00:00
|
|
|
.build();
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
let chain = &harness.chain;
|
|
|
|
|
2021-01-20 06:52:37 +00:00
|
|
|
// Test all valid committee indices for all slots in the chain.
|
|
|
|
// for slot in 0..=current_slot.as_u64() + MainnetEthSpec::slots_per_epoch() * 3 {
|
|
|
|
for slot in 0..=num_blocks_produced + additional_slots_tested {
|
|
|
|
if slot > 0 && slot <= num_blocks_produced {
|
|
|
|
harness.advance_slot();
|
2020-03-04 06:10:22 +00:00
|
|
|
|
2021-01-20 06:52:37 +00:00
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
}
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
let slot = Slot::from(slot);
|
2020-10-19 05:58:39 +00:00
|
|
|
let mut state = chain
|
2020-03-04 06:10:22 +00:00
|
|
|
.state_at_slot(slot, StateSkipConfig::WithStateRoots)
|
|
|
|
.expect("should get state");
|
|
|
|
|
2021-01-20 06:52:37 +00:00
|
|
|
let block_slot = if slot <= num_blocks_produced {
|
2020-03-04 06:10:22 +00:00
|
|
|
slot
|
2021-01-20 06:52:37 +00:00
|
|
|
} else {
|
|
|
|
Slot::from(num_blocks_produced)
|
2020-03-04 06:10:22 +00:00
|
|
|
};
|
2021-01-20 06:52:37 +00:00
|
|
|
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
let blinded_block = chain
|
Use the forwards iterator more often (#2376)
## Issue Addressed
NA
## Primary Change
When investigating memory usage, I noticed that retrieving a block from an early slot (e.g., slot 900) would cause a sharp increase in the memory footprint (from 400mb to 800mb+) which seemed to be ever-lasting.
After some investigation, I found that the reverse iteration from the head back to that slot was the likely culprit. To counter this, I've switched the `BeaconChain::block_root_at_slot` to use the forwards iterator, instead of the reverse one.
I also noticed that the networking stack is using `BeaconChain::root_at_slot` to check if a peer is relevant (`check_peer_relevance`). Perhaps the steep, seemingly-random-but-consistent increases in memory usage are caused by the use of this function.
Using the forwards iterator with the HTTP API alleviated the sharp increases in memory usage. It also made the response much faster (before it felt like to took 1-2s, now it feels instant).
## Additional Changes
In the process I also noticed that we have two functions for getting block roots:
- `BeaconChain::block_root_at_slot`: returns `None` for a skip slot.
- `BeaconChain::root_at_slot`: returns the previous root for a skip slot.
I unified these two functions into `block_root_at_slot` and added the `WhenSlotSkipped` enum. Now, the caller must be explicit about the skip-slot behaviour when requesting a root.
Additionally, I replaced `vec![]` with `Vec::with_capacity` in `store::chunked_vector::range_query`. I stumbled across this whilst debugging and made this modification to see what effect it would have (not much). It seems like a decent change to keep around, but I'm not concerned either way.
Also, `BeaconChain::get_ancestor_block_root` is unused, so I got rid of it :wastebasket:.
## Additional Info
I haven't also done the same for state roots here. Whilst it's possible and a good idea, it's more work since the fwds iterators are presently block-roots-specific.
Whilst there's a few places a reverse iteration of state roots could be triggered (e.g., attestation production, HTTP API), they're no where near as common as the `check_peer_relevance` call. As such, I think we should get this PR merged first, then come back for the state root iters. I made an issue here https://github.com/sigp/lighthouse/issues/2377.
2021-05-31 04:18:20 +00:00
|
|
|
.block_at_slot(block_slot, WhenSlotSkipped::Prev)
|
2020-03-04 06:10:22 +00:00
|
|
|
.expect("should get block")
|
|
|
|
.expect("block should not be skipped");
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
let block_root = blinded_block.message().tree_hash_root();
|
|
|
|
let block = chain
|
|
|
|
.store
|
|
|
|
.make_full_block(&block_root, blinded_block)
|
|
|
|
.unwrap();
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
let epoch_boundary_slot = state
|
|
|
|
.current_epoch()
|
|
|
|
.start_slot(MainnetEthSpec::slots_per_epoch());
|
2021-07-09 06:15:32 +00:00
|
|
|
let target_root = if state.slot() == epoch_boundary_slot {
|
2020-03-04 06:10:22 +00:00
|
|
|
block_root
|
|
|
|
} else {
|
|
|
|
*state
|
|
|
|
.get_block_root(epoch_boundary_slot)
|
|
|
|
.expect("should get target block root")
|
|
|
|
};
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
state
|
|
|
|
.build_committee_cache(RelativeEpoch::Current, &harness.chain.spec)
|
|
|
|
.unwrap();
|
2020-03-04 06:10:22 +00:00
|
|
|
let committee_cache = state
|
|
|
|
.committee_cache(RelativeEpoch::Current)
|
|
|
|
.expect("should get committee_cache");
|
|
|
|
|
|
|
|
let committee_count = committee_cache.committees_per_slot();
|
|
|
|
|
|
|
|
for index in 0..committee_count {
|
|
|
|
let committee_len = committee_cache
|
|
|
|
.get_beacon_committee(slot, index)
|
|
|
|
.expect("should get committee for slot")
|
|
|
|
.committee
|
|
|
|
.len();
|
|
|
|
|
|
|
|
let attestation = chain
|
2020-05-06 11:42:56 +00:00
|
|
|
.produce_unaggregated_attestation(slot, index)
|
2020-03-04 06:10:22 +00:00
|
|
|
.expect("should produce attestation");
|
|
|
|
|
|
|
|
let data = &attestation.data;
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
attestation.aggregation_bits.len(),
|
|
|
|
committee_len,
|
|
|
|
"bad committee len"
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
attestation.aggregation_bits.is_zero(),
|
|
|
|
"some committee bits are set"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
attestation.signature,
|
2020-07-25 02:03:18 +00:00
|
|
|
AggregateSignature::empty(),
|
2020-03-04 06:10:22 +00:00
|
|
|
"bad signature"
|
|
|
|
);
|
|
|
|
assert_eq!(data.index, index, "bad index");
|
|
|
|
assert_eq!(data.slot, slot, "bad slot");
|
|
|
|
assert_eq!(data.beacon_block_root, block_root, "bad block root");
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
data.source,
|
|
|
|
state.current_justified_checkpoint(),
|
2020-03-04 06:10:22 +00:00
|
|
|
"bad source"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-07-09 06:15:32 +00:00
|
|
|
data.source,
|
|
|
|
state.current_justified_checkpoint(),
|
2020-03-04 06:10:22 +00:00
|
|
|
"bad source"
|
|
|
|
);
|
|
|
|
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
|
|
|
|
assert_eq!(data.target.root, target_root, "bad target root");
|
2022-01-11 01:35:55 +00:00
|
|
|
|
|
|
|
let early_attestation = {
|
|
|
|
let proto_block = chain.fork_choice.read().get_block(&block_root).unwrap();
|
|
|
|
chain
|
|
|
|
.early_attester_cache
|
|
|
|
.add_head_block(block_root, block.clone(), proto_block, &state, &chain.spec)
|
|
|
|
.unwrap();
|
|
|
|
chain
|
|
|
|
.early_attester_cache
|
|
|
|
.try_attest(slot, index, &chain.spec)
|
|
|
|
.unwrap()
|
|
|
|
.unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
attestation, early_attestation,
|
|
|
|
"early attester cache inconsistent"
|
|
|
|
);
|
2020-03-04 06:10:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-05-17 01:51:25 +00:00
|
|
|
|
|
|
|
/// Ensures that the early attester cache wont create an attestation to a block in a later slot than
|
|
|
|
/// the one requested.
|
|
|
|
#[test]
|
|
|
|
fn early_attester_cache_old_request() {
|
|
|
|
let harness = BeaconChainHarness::builder(MainnetEthSpec)
|
|
|
|
.default_spec()
|
|
|
|
.keypairs(KEYPAIRS[..].to_vec())
|
|
|
|
.fresh_ephemeral_store()
|
|
|
|
.mock_execution_layer()
|
|
|
|
.build();
|
|
|
|
|
|
|
|
harness.advance_slot();
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
2,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
let head = harness.chain.head().unwrap();
|
|
|
|
assert_eq!(head.beacon_block.slot(), 2);
|
|
|
|
let head_proto_block = harness
|
|
|
|
.chain
|
|
|
|
.fork_choice
|
|
|
|
.read()
|
|
|
|
.get_block(&head.beacon_block_root)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.early_attester_cache
|
|
|
|
.add_head_block(
|
|
|
|
head.beacon_block_root,
|
|
|
|
head.beacon_block.clone(),
|
|
|
|
head_proto_block,
|
|
|
|
&head.beacon_state,
|
|
|
|
&harness.chain.spec,
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let attest_slot = head.beacon_block.slot() - 1;
|
|
|
|
let attestation = harness
|
|
|
|
.chain
|
|
|
|
.produce_unaggregated_attestation(attest_slot, 0)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
assert_eq!(attestation.data.slot, attest_slot);
|
|
|
|
let attested_block = harness
|
|
|
|
.chain
|
|
|
|
.get_blinded_block(&attestation.data.beacon_block_root)
|
|
|
|
.unwrap()
|
|
|
|
.unwrap();
|
|
|
|
assert_eq!(attested_block.slot(), attest_slot);
|
|
|
|
}
|