2020-03-04 06:10:22 +00:00
|
|
|
#![cfg(not(debug_assertions))]
|
|
|
|
|
|
|
|
#[macro_use]
|
|
|
|
extern crate lazy_static;
|
|
|
|
|
|
|
|
use beacon_chain::{
|
|
|
|
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy},
|
Use the forwards iterator more often (#2376)
## Issue Addressed
NA
## Primary Change
When investigating memory usage, I noticed that retrieving a block from an early slot (e.g., slot 900) would cause a sharp increase in the memory footprint (from 400mb to 800mb+) which seemed to be ever-lasting.
After some investigation, I found that the reverse iteration from the head back to that slot was the likely culprit. To counter this, I've switched the `BeaconChain::block_root_at_slot` to use the forwards iterator, instead of the reverse one.
I also noticed that the networking stack is using `BeaconChain::root_at_slot` to check if a peer is relevant (`check_peer_relevance`). Perhaps the steep, seemingly-random-but-consistent increases in memory usage are caused by the use of this function.
Using the forwards iterator with the HTTP API alleviated the sharp increases in memory usage. It also made the response much faster (before it felt like to took 1-2s, now it feels instant).
## Additional Changes
In the process I also noticed that we have two functions for getting block roots:
- `BeaconChain::block_root_at_slot`: returns `None` for a skip slot.
- `BeaconChain::root_at_slot`: returns the previous root for a skip slot.
I unified these two functions into `block_root_at_slot` and added the `WhenSlotSkipped` enum. Now, the caller must be explicit about the skip-slot behaviour when requesting a root.
Additionally, I replaced `vec![]` with `Vec::with_capacity` in `store::chunked_vector::range_query`. I stumbled across this whilst debugging and made this modification to see what effect it would have (not much). It seems like a decent change to keep around, but I'm not concerned either way.
Also, `BeaconChain::get_ancestor_block_root` is unused, so I got rid of it :wastebasket:.
## Additional Info
I haven't also done the same for state roots here. Whilst it's possible and a good idea, it's more work since the fwds iterators are presently block-roots-specific.
Whilst there's a few places a reverse iteration of state roots could be triggered (e.g., attestation production, HTTP API), they're no where near as common as the `check_peer_relevance` call. As such, I think we should get this PR merged first, then come back for the state root iters. I made an issue here https://github.com/sigp/lighthouse/issues/2377.
2021-05-31 04:18:20 +00:00
|
|
|
StateSkipConfig, WhenSlotSkipped,
|
2020-03-04 06:10:22 +00:00
|
|
|
};
|
2020-06-16 01:34:04 +00:00
|
|
|
use store::config::StoreConfig;
|
2020-03-04 06:10:22 +00:00
|
|
|
use tree_hash::TreeHash;
|
2020-03-04 23:35:39 +00:00
|
|
|
use types::{AggregateSignature, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot};
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
pub const VALIDATOR_COUNT: usize = 16;
|
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
/// A cached set of keys.
|
|
|
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This test builds a chain that is just long enough to finalize an epoch then it produces an
|
|
|
|
/// attestation at each slot from genesis through to three epochs past the head.
|
|
|
|
///
|
|
|
|
/// It checks the produced attestation against some locally computed values.
|
|
|
|
#[test]
|
|
|
|
fn produces_attestations() {
|
|
|
|
let num_blocks_produced = MainnetEthSpec::slots_per_epoch() * 4;
|
2021-01-20 06:52:37 +00:00
|
|
|
let additional_slots_tested = MainnetEthSpec::slots_per_epoch() * 3;
|
2020-03-04 06:10:22 +00:00
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
let harness = BeaconChainHarness::new_with_store_config(
|
2020-06-16 01:34:04 +00:00
|
|
|
MainnetEthSpec,
|
|
|
|
KEYPAIRS[..].to_vec(),
|
|
|
|
StoreConfig::default(),
|
|
|
|
);
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
let chain = &harness.chain;
|
|
|
|
|
2021-01-20 06:52:37 +00:00
|
|
|
// Test all valid committee indices for all slots in the chain.
|
|
|
|
// for slot in 0..=current_slot.as_u64() + MainnetEthSpec::slots_per_epoch() * 3 {
|
|
|
|
for slot in 0..=num_blocks_produced + additional_slots_tested {
|
|
|
|
if slot > 0 && slot <= num_blocks_produced {
|
|
|
|
harness.advance_slot();
|
2020-03-04 06:10:22 +00:00
|
|
|
|
2021-01-20 06:52:37 +00:00
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
}
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
let slot = Slot::from(slot);
|
2020-10-19 05:58:39 +00:00
|
|
|
let mut state = chain
|
2020-03-04 06:10:22 +00:00
|
|
|
.state_at_slot(slot, StateSkipConfig::WithStateRoots)
|
|
|
|
.expect("should get state");
|
|
|
|
|
2021-01-20 06:52:37 +00:00
|
|
|
let block_slot = if slot <= num_blocks_produced {
|
2020-03-04 06:10:22 +00:00
|
|
|
slot
|
2021-01-20 06:52:37 +00:00
|
|
|
} else {
|
|
|
|
Slot::from(num_blocks_produced)
|
2020-03-04 06:10:22 +00:00
|
|
|
};
|
2021-01-20 06:52:37 +00:00
|
|
|
|
2020-03-04 06:10:22 +00:00
|
|
|
let block = chain
|
Use the forwards iterator more often (#2376)
## Issue Addressed
NA
## Primary Change
When investigating memory usage, I noticed that retrieving a block from an early slot (e.g., slot 900) would cause a sharp increase in the memory footprint (from 400mb to 800mb+) which seemed to be ever-lasting.
After some investigation, I found that the reverse iteration from the head back to that slot was the likely culprit. To counter this, I've switched the `BeaconChain::block_root_at_slot` to use the forwards iterator, instead of the reverse one.
I also noticed that the networking stack is using `BeaconChain::root_at_slot` to check if a peer is relevant (`check_peer_relevance`). Perhaps the steep, seemingly-random-but-consistent increases in memory usage are caused by the use of this function.
Using the forwards iterator with the HTTP API alleviated the sharp increases in memory usage. It also made the response much faster (before it felt like to took 1-2s, now it feels instant).
## Additional Changes
In the process I also noticed that we have two functions for getting block roots:
- `BeaconChain::block_root_at_slot`: returns `None` for a skip slot.
- `BeaconChain::root_at_slot`: returns the previous root for a skip slot.
I unified these two functions into `block_root_at_slot` and added the `WhenSlotSkipped` enum. Now, the caller must be explicit about the skip-slot behaviour when requesting a root.
Additionally, I replaced `vec![]` with `Vec::with_capacity` in `store::chunked_vector::range_query`. I stumbled across this whilst debugging and made this modification to see what effect it would have (not much). It seems like a decent change to keep around, but I'm not concerned either way.
Also, `BeaconChain::get_ancestor_block_root` is unused, so I got rid of it :wastebasket:.
## Additional Info
I haven't also done the same for state roots here. Whilst it's possible and a good idea, it's more work since the fwds iterators are presently block-roots-specific.
Whilst there's a few places a reverse iteration of state roots could be triggered (e.g., attestation production, HTTP API), they're no where near as common as the `check_peer_relevance` call. As such, I think we should get this PR merged first, then come back for the state root iters. I made an issue here https://github.com/sigp/lighthouse/issues/2377.
2021-05-31 04:18:20 +00:00
|
|
|
.block_at_slot(block_slot, WhenSlotSkipped::Prev)
|
2020-03-04 06:10:22 +00:00
|
|
|
.expect("should get block")
|
|
|
|
.expect("block should not be skipped");
|
2020-03-04 23:35:39 +00:00
|
|
|
let block_root = block.message.tree_hash_root();
|
2020-03-04 06:10:22 +00:00
|
|
|
|
|
|
|
let epoch_boundary_slot = state
|
|
|
|
.current_epoch()
|
|
|
|
.start_slot(MainnetEthSpec::slots_per_epoch());
|
|
|
|
let target_root = if state.slot == epoch_boundary_slot {
|
|
|
|
block_root
|
|
|
|
} else {
|
|
|
|
*state
|
|
|
|
.get_block_root(epoch_boundary_slot)
|
|
|
|
.expect("should get target block root")
|
|
|
|
};
|
|
|
|
|
2020-10-19 05:58:39 +00:00
|
|
|
state
|
|
|
|
.build_committee_cache(RelativeEpoch::Current, &harness.chain.spec)
|
|
|
|
.unwrap();
|
2020-03-04 06:10:22 +00:00
|
|
|
let committee_cache = state
|
|
|
|
.committee_cache(RelativeEpoch::Current)
|
|
|
|
.expect("should get committee_cache");
|
|
|
|
|
|
|
|
let committee_count = committee_cache.committees_per_slot();
|
|
|
|
|
|
|
|
for index in 0..committee_count {
|
|
|
|
let committee_len = committee_cache
|
|
|
|
.get_beacon_committee(slot, index)
|
|
|
|
.expect("should get committee for slot")
|
|
|
|
.committee
|
|
|
|
.len();
|
|
|
|
|
|
|
|
let attestation = chain
|
2020-05-06 11:42:56 +00:00
|
|
|
.produce_unaggregated_attestation(slot, index)
|
2020-03-04 06:10:22 +00:00
|
|
|
.expect("should produce attestation");
|
|
|
|
|
|
|
|
let data = &attestation.data;
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
attestation.aggregation_bits.len(),
|
|
|
|
committee_len,
|
|
|
|
"bad committee len"
|
|
|
|
);
|
|
|
|
assert!(
|
|
|
|
attestation.aggregation_bits.is_zero(),
|
|
|
|
"some committee bits are set"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
attestation.signature,
|
2020-07-25 02:03:18 +00:00
|
|
|
AggregateSignature::empty(),
|
2020-03-04 06:10:22 +00:00
|
|
|
"bad signature"
|
|
|
|
);
|
|
|
|
assert_eq!(data.index, index, "bad index");
|
|
|
|
assert_eq!(data.slot, slot, "bad slot");
|
|
|
|
assert_eq!(data.beacon_block_root, block_root, "bad block root");
|
|
|
|
assert_eq!(
|
|
|
|
data.source, state.current_justified_checkpoint,
|
|
|
|
"bad source"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
data.source, state.current_justified_checkpoint,
|
|
|
|
"bad source"
|
|
|
|
);
|
|
|
|
assert_eq!(data.target.epoch, state.current_epoch(), "bad target epoch");
|
|
|
|
assert_eq!(data.target.root, target_root, "bad target root");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|