2020-05-08 23:37:21 +00:00
|
|
|
#![cfg(not(debug_assertions))]
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
#[macro_use]
|
|
|
|
extern crate lazy_static;
|
|
|
|
|
2020-07-02 23:47:31 +00:00
|
|
|
#[macro_use]
|
|
|
|
extern crate slog;
|
|
|
|
extern crate slog_term;
|
|
|
|
|
|
|
|
use crate::slog::Drain;
|
2020-05-06 11:42:56 +00:00
|
|
|
use beacon_chain::attestation_verification::Error as AttnError;
|
2019-11-26 23:54:46 +00:00
|
|
|
use beacon_chain::test_utils::{
|
|
|
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
|
|
|
};
|
2020-04-20 09:59:56 +00:00
|
|
|
use beacon_chain::BeaconSnapshot;
|
2020-05-06 11:42:56 +00:00
|
|
|
use beacon_chain::StateSkipConfig;
|
2019-11-26 23:54:46 +00:00
|
|
|
use rand::Rng;
|
2020-04-20 09:59:56 +00:00
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::collections::HashSet;
|
2019-11-26 23:54:46 +00:00
|
|
|
use std::sync::Arc;
|
2020-03-04 05:48:35 +00:00
|
|
|
use store::{
|
|
|
|
iter::{BlockRootsIterator, StateRootsIterator},
|
2020-06-16 01:34:04 +00:00
|
|
|
HotColdDB, LevelDB, StoreConfig,
|
2020-03-04 05:48:35 +00:00
|
|
|
};
|
2019-11-26 23:54:46 +00:00
|
|
|
use tempfile::{tempdir, TempDir};
|
|
|
|
use tree_hash::TreeHash;
|
|
|
|
use types::test_utils::{SeedableRng, XorShiftRng};
|
|
|
|
use types::*;
|
|
|
|
|
|
|
|
// Should ideally be divisible by 3.
|
2020-04-20 02:34:37 +00:00
|
|
|
pub const LOW_VALIDATOR_COUNT: usize = 24;
|
|
|
|
pub const HIGH_VALIDATOR_COUNT: usize = 64;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
/// A cached set of keys.
|
2020-04-20 02:34:37 +00:00
|
|
|
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(HIGH_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type E = MinimalEthSpec;
|
|
|
|
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
fn get_store(db_path: &TempDir) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
|
2019-11-26 23:54:46 +00:00
|
|
|
let spec = MinimalEthSpec::default_spec();
|
|
|
|
let hot_path = db_path.path().join("hot_db");
|
|
|
|
let cold_path = db_path.path().join("cold_db");
|
2020-02-10 00:30:21 +00:00
|
|
|
let config = StoreConfig::default();
|
2020-07-02 23:47:31 +00:00
|
|
|
|
|
|
|
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
|
|
|
let drain = slog_term::FullFormat::new(decorator).build();
|
|
|
|
let log = slog::Logger::root(std::sync::Mutex::new(drain).fuse(), o!());
|
|
|
|
|
2019-11-26 23:54:46 +00:00
|
|
|
Arc::new(
|
2020-05-31 22:13:49 +00:00
|
|
|
HotColdDB::open(&hot_path, &cold_path, config, spec, log)
|
2019-12-06 03:29:06 +00:00
|
|
|
.expect("disk store should initialize"),
|
2019-11-26 23:54:46 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-06-16 01:34:04 +00:00
|
|
|
fn get_harness(
|
|
|
|
store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>,
|
|
|
|
validator_count: usize,
|
|
|
|
) -> TestHarness {
|
Prepare for public testnet (#628)
* Update to spec v0.9.0
* Update to v0.9.1
* Bump spec tags for v0.9.1
* Formatting, fix CI failures
* Resolve accidental KeyPair merge conflict
* Document new BeaconState functions
* Add `validator` changes from `validator-to-rest`
* Add initial (failing) REST api tests
* Fix signature parsing
* Add more tests
* Refactor http router
* Add working tests for publish beacon block
* Add validator duties tests
* Move account_manager under `lighthouse` binary
* Unify logfile handling in `environment` crate.
* Fix incorrect cache drops in `advance_caches`
* Update fork choice for v0.9.1
* Add `deposit_contract` crate
* Add progress on validator onboarding
* Add unfinished attesation code
* Update account manager CLI
* Write eth1 data file as hex string
* Integrate ValidatorDirectory with validator_client
* Move ValidatorDirectory into validator_client
* Clean up some FIXMEs
* Add beacon_chain_sim
* Fix a few docs/logs
* Expand `beacon_chain_sim`
* Fix spec for `beacon_chain_sim
* More testing for api
* Start work on attestation endpoint
* Reject empty attestations
* Allow attestations to genesis block
* Add working tests for `rest_api` validator endpoint
* Remove grpc from beacon_node
* Start heavy refactor of validator client
- Block production is working
* Prune old validator client files
* Start works on attestation service
* Add attestation service to validator client
* Use full pubkey for validator directories
* Add validator duties post endpoint
* Use par_iter for keypair generation
* Use bulk duties request in validator client
* Add version http endpoint tests
* Add interop keys and startup wait
* Ensure a prompt exit
* Add duties pruning
* Fix compile error in beacon node tests
* Add github workflow
* Modify rust.yaml
* Modify gitlab actions
* Add to CI file
* Add sudo to CI npm install
* Move cargo fmt to own job in tests
* Fix cargo fmt in CI
* Add rustup update before cargo fmt
* Change name of CI job
* Make other CI jobs require cargo fmt
* Add CI badge
* Remove gitlab and travis files
* Add different http timeout for debug
* Update docker file, use makefile in CI
* Use make in the dockerfile, skip the test
* Use the makefile for debug GI test
* Update book
* Tidy grpc and misc things
* Apply discv5 fixes
* Address other minor issues
* Fix warnings
* Attempt fix for addr parsing
* Tidy validator config, CLIs
* Tidy comments
* Tidy signing, reduce ForkService duplication
* Fail if skipping too many slots
* Set default recent genesis time to 0
* Add custom http timeout to validator
* Fix compile bug in node_test_rig
* Remove old bootstrap flag from val CLI
* Update docs
* Tidy val client
* Change val client log levels
* Add comments, more validity checks
* Fix compile error, add comments
* Undo changes to eth2-libp2p/src
* Reduce duplication of keypair generation
* Add more logging for validator duties
* Fix beacon_chain_sim, nitpicks
* Fix compile error, minor nits
* Update to use v0.9.2 version of deposit contract
* Add efforts to automate eth1 testnet deployment
* Fix lcli testnet deployer
* Modify bn CLI to parse eth2_testnet_dir
* Progress with account_manager deposit tools
* Make account manager submit deposits
* Add password option for submitting deposits
* Allow custom deposit amount
* Add long names to lcli clap
* Add password option to lcli deploy command
* Fix minor bugs whilst testing
* Address Michael's comments
* Add refund-deposit-contract to lcli
* Use time instead of skip count for denying long skips
* Improve logging for eth1
* Fix bug with validator services exiting on error
* Drop the block cache after genesis
* Modify eth1 testnet config
* Improve eth1 logging
* Make validator wait until genesis time
* Fix bug in eth1 voting
* Add more logging to eth1 voting
* Handle errors in eth1 http module
* Set SECONDS_PER_DAY to sensible minimum
* Shorten delay before testnet start
* Ensure eth1 block is produced without any votes
* Improve eth1 logging
* Fix broken tests in eth1
* Tidy code in rest_api
* Fix failing test in deposit_contract
* Make CLI args more consistent
* Change validator/duties endpoint
* Add time-based skip slot limiting
* Add new error type missed in previous commit
* Add log when waiting for genesis
* Refactor beacon node CLI
* Remove unused dep
* Add lcli eth1-genesis command
* Fix bug in master merge
* Apply clippy lints to beacon node
* Add support for YamlConfig in Eth2TestnetDir
* Upgrade tesnet deposit contract version
* Remove unnecessary logging and correct formatting
* Add a hardcoded eth2 testnet config
* Ensure http server flag works. Overwrite configs with flags.
* Ensure boot nodes are loaded from testnet dir
* Fix account manager CLI bugs
* Fix bugs with beacon node cli
* Allow testnet dir without boot nodes
* Write genesis state as SSZ
* Remove ---/n from the start of testnet_dir files
* Set default libp2p address
* Tidy account manager CLI, add logging
* Add check to see if testnet dir exists
* Apply reviewers suggestions
* Add HeadTracker struct
* Add fork choice persistence
* Shorten slot time for simulator
* Add the /beacon/heads API endpoint
* Update hardcoded testnet
* Add tests for BeaconChain persistence + fix bugs
* Extend BeaconChain persistence testing
* Ensure chain is finalized b4 persistence tests
* Ensure boot_enr.yaml is include in binary
* Refactor beacon_chain_sim
* Move files about in beacon sim
* Update beacon_chain_sim
* Fix bug with deposit inclusion
* Increase log in genesis service, fix todo
* Tidy sim, fix broken rest_api tests
* Fix more broken tests
* Update testnet
* Fix broken rest api test
* Tidy account manager CLI
* Use tempdir for account manager
* Stop hardcoded testnet dir from creating dir
* Rename Eth2TestnetDir to Eth2TestnetConfig
* Change hardcoded -> hard_coded
* Tidy account manager
* Add log to account manager
* Tidy, ensure head tracker is loaded from disk
* Tidy beacon chain builder
* Tidy eth1_chain
* Adds log support for simulator
* Revert "Adds log support for simulator"
This reverts commit ec77c66a052350f551db145cf20f213823428dd3.
* Adds log support for simulator
* Tidy after self-review
* Change default log level
* Address Michael's delicious PR comments
* Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
|
|
|
let harness = BeaconChainHarness::new_with_disk_store(
|
2019-11-26 23:54:46 +00:00
|
|
|
MinimalEthSpec,
|
|
|
|
store,
|
|
|
|
KEYPAIRS[0..validator_count].to_vec(),
|
|
|
|
);
|
|
|
|
harness.advance_slot();
|
|
|
|
harness
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn full_participation_no_skips() {
|
|
|
|
let num_blocks_produced = E::slots_per_epoch() * 5;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-04-20 02:34:37 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks_produced as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_finalization(&harness, num_blocks_produced);
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, num_blocks_produced + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn randomised_skips() {
|
|
|
|
let num_slots = E::slots_per_epoch() * 5;
|
|
|
|
let mut num_blocks_produced = 0;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-04-20 02:34:37 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
let rng = &mut XorShiftRng::from_seed([42; 16]);
|
|
|
|
|
|
|
|
let mut head_slot = 0;
|
|
|
|
|
|
|
|
for slot in 1..=num_slots {
|
|
|
|
if rng.gen_bool(0.8) {
|
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::ForkCanonicalChainAt {
|
|
|
|
previous_slot: Slot::new(head_slot),
|
|
|
|
first_slot: Slot::new(slot),
|
|
|
|
},
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
harness.advance_slot();
|
|
|
|
num_blocks_produced += 1;
|
|
|
|
head_slot = slot;
|
|
|
|
} else {
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
assert_eq!(state.slot, num_slots, "head should be at the current slot");
|
|
|
|
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, num_blocks_produced + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn long_skip() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-04-20 02:34:37 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
// Number of blocks to create in the first run, intentionally not falling on an epoch
|
|
|
|
// boundary in order to check that the DB hot -> cold migration is capable of reaching
|
|
|
|
// back across the skip distance, and correctly migrating those extra non-finalized states.
|
|
|
|
let initial_blocks = E::slots_per_epoch() * 5 + E::slots_per_epoch() / 2;
|
|
|
|
let skip_slots = E::slots_per_historical_root() as u64 * 8;
|
2020-04-20 02:34:37 +00:00
|
|
|
// Create the minimum ~2.5 epochs of extra blocks required to re-finalize the chain.
|
|
|
|
// Having this set lower ensures that we start justifying and finalizing quickly after a skip.
|
|
|
|
let final_blocks = 2 * E::slots_per_epoch() + E::slots_per_epoch() / 2;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
initial_blocks as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_finalization(&harness, initial_blocks);
|
|
|
|
|
|
|
|
// 2. Skip slots
|
|
|
|
for _ in 0..skip_slots {
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3. Produce more blocks, establish a new finalized epoch
|
|
|
|
harness.extend_chain(
|
|
|
|
final_blocks as usize,
|
|
|
|
BlockStrategy::ForkCanonicalChainAt {
|
|
|
|
previous_slot: Slot::new(initial_blocks),
|
|
|
|
first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1),
|
|
|
|
},
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_finalization(&harness, initial_blocks + skip_slots + final_blocks);
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, initial_blocks + final_blocks + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Go forward to the point where the genesis randao value is no longer part of the vector.
|
|
|
|
///
|
|
|
|
/// This implicitly checks that:
|
|
|
|
/// 1. The chunked vector scheme doesn't attempt to store an incorrect genesis value
|
|
|
|
/// 2. We correctly load the genesis value for all required slots
|
|
|
|
/// NOTE: this test takes about a minute to run
|
|
|
|
#[test]
|
|
|
|
fn randao_genesis_storage() {
|
|
|
|
let validator_count = 8;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(store.clone(), validator_count);
|
|
|
|
|
|
|
|
let num_slots = E::slots_per_epoch() * (E::epochs_per_historical_vector() - 1) as u64;
|
|
|
|
|
|
|
|
// Check we have a non-trivial genesis value
|
|
|
|
let genesis_value = *harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
|
|
|
.get_randao_mix(Epoch::new(0))
|
|
|
|
.expect("randao mix ok");
|
|
|
|
assert!(!genesis_value.is_zero());
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_slots as usize - 1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Check that genesis value is still present
|
|
|
|
assert!(harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
|
|
|
.randao_mixes
|
|
|
|
.iter()
|
|
|
|
.find(|x| **x == genesis_value)
|
|
|
|
.is_some());
|
|
|
|
|
|
|
|
// Then upon adding one more block, it isn't
|
|
|
|
harness.advance_slot();
|
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
assert!(harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
|
|
|
.randao_mixes
|
|
|
|
.iter()
|
|
|
|
.find(|x| **x == genesis_value)
|
|
|
|
.is_none());
|
|
|
|
|
|
|
|
check_finalization(&harness, num_slots);
|
|
|
|
check_split_slot(&harness, store);
|
|
|
|
check_chain_dump(&harness, num_slots + 1);
|
2019-12-06 03:29:06 +00:00
|
|
|
check_iterators(&harness);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that closing and reopening a freezer DB restores the split slot to its correct value.
|
|
|
|
#[test]
|
|
|
|
fn split_slot_restore() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
|
|
|
|
let split_slot = {
|
|
|
|
let store = get_store(&db_path);
|
2020-04-20 02:34:37 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
let num_blocks = 4 * E::slots_per_epoch();
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
num_blocks as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
store.get_split_slot()
|
|
|
|
};
|
|
|
|
assert_ne!(split_slot, Slot::new(0));
|
|
|
|
|
|
|
|
// Re-open the store
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
|
|
|
|
assert_eq!(store.get_split_slot(), split_slot);
|
|
|
|
}
|
|
|
|
|
2020-01-08 02:58:01 +00:00
|
|
|
// Check attestation processing and `load_epoch_boundary_state` in the presence of a split DB.
|
|
|
|
// This is a bit of a monster test in that it tests lots of different things, but until they're
|
|
|
|
// tested elsewhere, this is as good a place as any.
|
|
|
|
#[test]
|
|
|
|
fn epoch_boundary_state_attestation_processing() {
|
|
|
|
let num_blocks_produced = E::slots_per_epoch() * 5;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-04-20 02:34:37 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2020-01-08 02:58:01 +00:00
|
|
|
|
|
|
|
let late_validators = vec![0, 1];
|
2020-04-20 02:34:37 +00:00
|
|
|
let timely_validators = (2..LOW_VALIDATOR_COUNT).collect::<Vec<_>>();
|
2020-01-08 02:58:01 +00:00
|
|
|
|
|
|
|
let mut late_attestations = vec![];
|
|
|
|
|
|
|
|
for _ in 0..num_blocks_produced {
|
|
|
|
harness.extend_chain(
|
|
|
|
1,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::SomeValidators(timely_validators.clone()),
|
|
|
|
);
|
|
|
|
|
|
|
|
let head = harness.chain.head().expect("head ok");
|
2020-05-06 11:42:56 +00:00
|
|
|
late_attestations.extend(harness.get_unaggregated_attestations(
|
2020-01-08 02:58:01 +00:00
|
|
|
&AttestationStrategy::SomeValidators(late_validators.clone()),
|
|
|
|
&head.beacon_state,
|
|
|
|
head.beacon_block_root,
|
2020-02-10 23:19:36 +00:00
|
|
|
head.beacon_block.slot(),
|
2020-01-08 02:58:01 +00:00
|
|
|
));
|
|
|
|
|
|
|
|
harness.advance_slot();
|
|
|
|
}
|
|
|
|
|
|
|
|
check_finalization(&harness, num_blocks_produced);
|
|
|
|
check_split_slot(&harness, store.clone());
|
|
|
|
check_chain_dump(&harness, num_blocks_produced + 1);
|
|
|
|
check_iterators(&harness);
|
|
|
|
|
|
|
|
let mut checked_pre_fin = false;
|
|
|
|
|
2020-06-18 09:11:03 +00:00
|
|
|
for (attestation, subnet_id) in late_attestations.into_iter().flatten() {
|
2020-01-08 02:58:01 +00:00
|
|
|
// load_epoch_boundary_state is idempotent!
|
|
|
|
let block_root = attestation.data.beacon_block_root;
|
2020-02-10 23:19:36 +00:00
|
|
|
let block = store.get_block(&block_root).unwrap().expect("block exists");
|
2020-01-08 02:58:01 +00:00
|
|
|
let epoch_boundary_state = store
|
2020-02-10 23:19:36 +00:00
|
|
|
.load_epoch_boundary_state(&block.state_root())
|
2020-01-08 02:58:01 +00:00
|
|
|
.expect("no error")
|
|
|
|
.expect("epoch boundary state exists");
|
|
|
|
let ebs_of_ebs = store
|
|
|
|
.load_epoch_boundary_state(&epoch_boundary_state.canonical_root())
|
|
|
|
.expect("no error")
|
|
|
|
.expect("ebs of ebs exists");
|
|
|
|
assert_eq!(epoch_boundary_state, ebs_of_ebs);
|
|
|
|
|
|
|
|
// If the attestation is pre-finalization it should be rejected.
|
|
|
|
let finalized_epoch = harness
|
|
|
|
.chain
|
|
|
|
.head_info()
|
|
|
|
.expect("head ok")
|
|
|
|
.finalized_checkpoint
|
|
|
|
.epoch;
|
2020-05-06 11:42:56 +00:00
|
|
|
|
2020-01-08 02:58:01 +00:00
|
|
|
let res = harness
|
|
|
|
.chain
|
2020-06-18 09:11:03 +00:00
|
|
|
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
|
2020-03-05 06:19:35 +00:00
|
|
|
|
2020-05-06 11:42:56 +00:00
|
|
|
let current_slot = harness.chain.slot().expect("should get slot");
|
2020-05-21 00:21:44 +00:00
|
|
|
let expected_attestation_slot = attestation.data.slot;
|
2020-05-06 11:42:56 +00:00
|
|
|
// Extra -1 to handle gossip clock disparity.
|
2020-05-21 00:21:44 +00:00
|
|
|
let expected_earliest_permissible_slot = current_slot - E::slots_per_epoch() - 1;
|
2020-03-05 06:19:35 +00:00
|
|
|
|
2020-05-21 00:21:44 +00:00
|
|
|
if expected_attestation_slot <= finalized_epoch.start_slot(E::slots_per_epoch())
|
|
|
|
|| expected_attestation_slot < expected_earliest_permissible_slot
|
2020-03-05 06:19:35 +00:00
|
|
|
{
|
2020-01-08 02:58:01 +00:00
|
|
|
checked_pre_fin = true;
|
2020-05-21 00:21:44 +00:00
|
|
|
assert!(matches!(
|
2020-05-06 11:42:56 +00:00
|
|
|
res.err().unwrap(),
|
|
|
|
AttnError::PastSlot {
|
|
|
|
attestation_slot,
|
|
|
|
earliest_permissible_slot,
|
|
|
|
}
|
2020-05-21 00:21:44 +00:00
|
|
|
if attestation_slot == expected_attestation_slot && earliest_permissible_slot == expected_earliest_permissible_slot
|
|
|
|
));
|
2020-01-08 02:58:01 +00:00
|
|
|
} else {
|
2020-05-06 11:42:56 +00:00
|
|
|
res.expect("should have verified attetation");
|
2020-01-08 02:58:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
assert!(checked_pre_fin);
|
|
|
|
}
|
|
|
|
|
2020-03-04 05:48:35 +00:00
|
|
|
#[test]
|
|
|
|
fn delete_blocks_and_states() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
2020-04-20 02:34:37 +00:00
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
2020-03-04 05:48:35 +00:00
|
|
|
|
|
|
|
let unforked_blocks = 4 * E::slots_per_epoch();
|
|
|
|
|
|
|
|
// Finalize an initial portion of the chain.
|
|
|
|
harness.extend_chain(
|
|
|
|
unforked_blocks as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Create a fork post-finalization.
|
2020-04-20 02:34:37 +00:00
|
|
|
let two_thirds = (LOW_VALIDATOR_COUNT / 3) * 2;
|
2020-03-04 05:48:35 +00:00
|
|
|
let honest_validators: Vec<usize> = (0..two_thirds).collect();
|
2020-04-20 02:34:37 +00:00
|
|
|
let faulty_validators: Vec<usize> = (two_thirds..LOW_VALIDATOR_COUNT).collect();
|
2020-03-04 05:48:35 +00:00
|
|
|
|
2020-04-20 02:34:37 +00:00
|
|
|
let fork_blocks = 2 * E::slots_per_epoch();
|
2020-03-04 05:48:35 +00:00
|
|
|
|
|
|
|
let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block(
|
|
|
|
&honest_validators,
|
|
|
|
&faulty_validators,
|
|
|
|
fork_blocks as usize,
|
|
|
|
fork_blocks as usize,
|
|
|
|
);
|
|
|
|
|
2020-05-13 07:05:12 +00:00
|
|
|
assert_ne!(honest_head, faulty_head, "forks should be distinct");
|
2020-03-04 05:48:35 +00:00
|
|
|
let head_info = harness.chain.head_info().expect("should get head");
|
|
|
|
assert_eq!(head_info.slot, unforked_blocks + fork_blocks);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
head_info.block_root, honest_head,
|
|
|
|
"the honest chain should be the canonical chain",
|
|
|
|
);
|
|
|
|
|
|
|
|
let faulty_head_block = store
|
|
|
|
.get_block(&faulty_head)
|
|
|
|
.expect("no errors")
|
|
|
|
.expect("faulty head block exists");
|
|
|
|
|
|
|
|
let faulty_head_state = store
|
|
|
|
.get_state(
|
|
|
|
&faulty_head_block.state_root(),
|
|
|
|
Some(faulty_head_block.slot()),
|
|
|
|
)
|
|
|
|
.expect("no db error")
|
|
|
|
.expect("faulty head state exists");
|
|
|
|
|
|
|
|
let states_to_delete = StateRootsIterator::new(store.clone(), &faulty_head_state)
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2020-03-04 05:48:35 +00:00
|
|
|
.take_while(|(_, slot)| *slot > unforked_blocks)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
// Delete faulty fork
|
|
|
|
// Attempting to load those states should find them unavailable
|
|
|
|
for (state_root, slot) in &states_to_delete {
|
2020-05-21 00:21:44 +00:00
|
|
|
store.delete_state(state_root, *slot).unwrap();
|
|
|
|
assert_eq!(store.get_state(state_root, Some(*slot)).unwrap(), None);
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Double-deleting should also be OK (deleting non-existent things is fine)
|
|
|
|
for (state_root, slot) in &states_to_delete {
|
2020-05-21 00:21:44 +00:00
|
|
|
store.delete_state(state_root, *slot).unwrap();
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Deleting the blocks from the fork should remove them completely
|
|
|
|
let blocks_to_delete = BlockRootsIterator::new(store.clone(), &faulty_head_state)
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2020-03-04 05:48:35 +00:00
|
|
|
// Extra +1 here accounts for the skipped slot that started this fork
|
|
|
|
.take_while(|(_, slot)| *slot > unforked_blocks + 1)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
for (block_root, _) in blocks_to_delete {
|
2020-05-21 00:21:44 +00:00
|
|
|
store.delete_block(&block_root).unwrap();
|
|
|
|
assert_eq!(store.get_block(&block_root).unwrap(), None);
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Deleting frozen states should do nothing
|
|
|
|
let split_slot = store.get_split_slot();
|
|
|
|
let finalized_states = harness
|
|
|
|
.chain
|
|
|
|
.rev_iter_state_roots()
|
|
|
|
.expect("rev iter ok")
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2020-03-04 05:48:35 +00:00
|
|
|
.filter(|(_, slot)| *slot < split_slot);
|
|
|
|
|
|
|
|
for (state_root, slot) in finalized_states {
|
2020-05-21 00:21:44 +00:00
|
|
|
store.delete_state(&state_root, slot).unwrap();
|
2020-03-04 05:48:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// After all that, the chain dump should still be OK
|
|
|
|
check_chain_dump(&harness, unforked_blocks + fork_blocks + 1);
|
|
|
|
}
|
|
|
|
|
2020-04-20 02:34:37 +00:00
|
|
|
// Check that we never produce invalid blocks when there is deep forking that changes the shuffling.
|
|
|
|
// See https://github.com/sigp/lighthouse/issues/845
|
|
|
|
fn multi_epoch_fork_valid_blocks_test(
|
|
|
|
initial_blocks: usize,
|
|
|
|
num_fork1_blocks: usize,
|
|
|
|
num_fork2_blocks: usize,
|
|
|
|
num_fork1_validators: usize,
|
|
|
|
) -> (TempDir, TestHarness, Hash256, Hash256) {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
|
|
|
|
|
|
|
// Create the initial portion of the chain
|
|
|
|
if initial_blocks > 0 {
|
|
|
|
harness.extend_chain(
|
|
|
|
initial_blocks,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(num_fork1_validators <= LOW_VALIDATOR_COUNT);
|
|
|
|
let fork1_validators: Vec<usize> = (0..num_fork1_validators).collect();
|
|
|
|
let fork2_validators: Vec<usize> = (num_fork1_validators..LOW_VALIDATOR_COUNT).collect();
|
|
|
|
|
|
|
|
let (head1, head2) = harness.generate_two_forks_by_skipping_a_block(
|
|
|
|
&fork1_validators,
|
|
|
|
&fork2_validators,
|
|
|
|
num_fork1_blocks,
|
|
|
|
num_fork2_blocks,
|
|
|
|
);
|
|
|
|
|
|
|
|
(db_path, harness, head1, head2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is the minimal test of block production with different shufflings.
|
|
|
|
#[test]
|
|
|
|
fn block_production_different_shuffling_early() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
multi_epoch_fork_valid_blocks_test(
|
|
|
|
slots_per_epoch - 2,
|
|
|
|
slots_per_epoch + 3,
|
|
|
|
slots_per_epoch + 3,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn block_production_different_shuffling_long() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
multi_epoch_fork_valid_blocks_test(
|
|
|
|
2 * slots_per_epoch - 2,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the op pool safely includes multiple attestations per block when necessary.
|
|
|
|
// This checks the correctness of the shuffling compatibility memoization.
|
|
|
|
#[test]
|
|
|
|
fn multiple_attestations_per_block() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(store, HIGH_VALIDATOR_COUNT);
|
|
|
|
let chain = &harness.chain;
|
|
|
|
|
|
|
|
harness.extend_chain(
|
|
|
|
MainnetEthSpec::slots_per_epoch() as usize * 3,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
let head = chain.head().unwrap();
|
|
|
|
let committees_per_slot = head
|
|
|
|
.beacon_state
|
|
|
|
.get_committee_count_at_slot(head.beacon_state.slot)
|
|
|
|
.unwrap();
|
|
|
|
assert!(committees_per_slot > 1);
|
|
|
|
|
|
|
|
for snapshot in chain.chain_dump().unwrap() {
|
|
|
|
assert_eq!(
|
|
|
|
snapshot.beacon_block.message.body.attestations.len() as u64,
|
|
|
|
if snapshot.beacon_block.slot() <= 1 {
|
|
|
|
0
|
|
|
|
} else {
|
|
|
|
committees_per_slot
|
|
|
|
}
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_linear_chain() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
|
|
|
|
|
|
|
// Skip the block at the end of the first epoch.
|
|
|
|
let head_block_root = harness.extend_chain(
|
|
|
|
4 * E::slots_per_epoch() as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_shuffling_compatible(
|
|
|
|
&harness,
|
|
|
|
&get_state_for_block(&harness, head_block_root),
|
|
|
|
head_block_root,
|
|
|
|
true,
|
|
|
|
true,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_missing_pivot_block() {
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
|
|
|
|
|
|
|
|
// Skip the block at the end of the first epoch.
|
|
|
|
harness.extend_chain(
|
|
|
|
E::slots_per_epoch() as usize - 2,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
harness.advance_slot();
|
|
|
|
harness.advance_slot();
|
|
|
|
let head_block_root = harness.extend_chain(
|
|
|
|
2 * E::slots_per_epoch() as usize,
|
|
|
|
BlockStrategy::OnCanonicalHead,
|
|
|
|
AttestationStrategy::AllValidators,
|
|
|
|
);
|
|
|
|
|
|
|
|
check_shuffling_compatible(
|
|
|
|
&harness,
|
|
|
|
&get_state_for_block(&harness, head_block_root),
|
|
|
|
head_block_root,
|
|
|
|
true,
|
|
|
|
true,
|
|
|
|
Some(E::slots_per_epoch() - 2),
|
|
|
|
Some(E::slots_per_epoch() - 2),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_simple_fork() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test(
|
|
|
|
2 * slots_per_epoch,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
3 * slots_per_epoch,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
|
|
|
|
let head1_state = get_state_for_block(&harness, head1);
|
|
|
|
let head2_state = get_state_for_block(&harness, head2);
|
|
|
|
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head2, false, false, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head2_state, head1, false, false, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head2_state, head2, true, true, None, None);
|
|
|
|
|
|
|
|
drop(db_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn shuffling_compatible_short_fork() {
|
|
|
|
let slots_per_epoch = E::slots_per_epoch() as usize;
|
|
|
|
let (db_path, harness, head1, head2) = multi_epoch_fork_valid_blocks_test(
|
|
|
|
2 * slots_per_epoch - 2,
|
|
|
|
slots_per_epoch + 2,
|
|
|
|
slots_per_epoch + 2,
|
|
|
|
LOW_VALIDATOR_COUNT / 2,
|
|
|
|
);
|
|
|
|
|
|
|
|
let head1_state = get_state_for_block(&harness, head1);
|
|
|
|
let head2_state = get_state_for_block(&harness, head2);
|
|
|
|
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head1, true, true, None, None);
|
|
|
|
check_shuffling_compatible(&harness, &head1_state, head2, false, true, None, None);
|
|
|
|
// NOTE: don't check this case, as block 14 from the first chain appears valid on the second
|
|
|
|
// chain due to it matching the second chain's block 15.
|
|
|
|
// check_shuffling_compatible(&harness, &head2_state, head1, false, true, None, None);
|
|
|
|
check_shuffling_compatible(
|
|
|
|
&harness,
|
|
|
|
&head2_state,
|
|
|
|
head2,
|
|
|
|
true,
|
|
|
|
true,
|
|
|
|
// Required because of the skipped slot.
|
|
|
|
Some(2 * E::slots_per_epoch() - 2),
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
|
|
|
|
drop(db_path);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconState<E> {
|
|
|
|
let head_block = harness.chain.get_block(&block_root).unwrap().unwrap();
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.get_state(&head_block.state_root(), Some(head_block.slot()))
|
|
|
|
.unwrap()
|
|
|
|
.unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check the invariants that apply to `shuffling_is_compatible`.
|
|
|
|
fn check_shuffling_compatible(
|
|
|
|
harness: &TestHarness,
|
|
|
|
head_state: &BeaconState<E>,
|
|
|
|
head_block_root: Hash256,
|
|
|
|
current_epoch_valid: bool,
|
|
|
|
previous_epoch_valid: bool,
|
|
|
|
current_epoch_cutoff_slot: Option<u64>,
|
|
|
|
previous_epoch_cutoff_slot: Option<u64>,
|
|
|
|
) {
|
|
|
|
let shuffling_lookahead = harness.chain.spec.min_seed_lookahead.as_u64() + 1;
|
|
|
|
let current_pivot_slot =
|
|
|
|
(head_state.current_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
|
|
|
let previous_pivot_slot =
|
|
|
|
(head_state.previous_epoch() - shuffling_lookahead).end_slot(E::slots_per_epoch());
|
|
|
|
|
2020-06-09 23:55:44 +00:00
|
|
|
for maybe_tuple in harness
|
2020-04-20 02:34:37 +00:00
|
|
|
.chain
|
|
|
|
.rev_iter_block_roots_from(head_block_root)
|
|
|
|
.unwrap()
|
|
|
|
{
|
2020-06-09 23:55:44 +00:00
|
|
|
let (block_root, slot) = maybe_tuple.unwrap();
|
2020-04-20 02:34:37 +00:00
|
|
|
// Shuffling is compatible targeting the current epoch,
|
|
|
|
// iff slot is greater than or equal to the current epoch pivot block
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.current_epoch(),
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
current_epoch_valid
|
|
|
|
&& slot >= current_epoch_cutoff_slot.unwrap_or(current_pivot_slot.as_u64())
|
|
|
|
);
|
|
|
|
// Similarly for the previous epoch
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.previous_epoch(),
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
previous_epoch_valid
|
|
|
|
&& slot >= previous_epoch_cutoff_slot.unwrap_or(previous_pivot_slot.as_u64())
|
|
|
|
);
|
|
|
|
// Targeting the next epoch should always return false
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.current_epoch() + 1,
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
false
|
|
|
|
);
|
|
|
|
// Targeting two epochs before the current epoch should also always return false
|
|
|
|
if head_state.current_epoch() >= 2 {
|
|
|
|
assert_eq!(
|
|
|
|
harness.chain.shuffling_is_compatible(
|
|
|
|
&block_root,
|
|
|
|
head_state.current_epoch() - 2,
|
|
|
|
&head_state
|
|
|
|
),
|
|
|
|
false
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-20 09:59:56 +00:00
|
|
|
// Ensure blocks from abandoned forks are pruned from the Hot DB
|
|
|
|
#[test]
|
|
|
|
fn prunes_abandoned_fork_between_two_finalized_checkpoints() {
|
|
|
|
const VALIDATOR_COUNT: usize = 24;
|
|
|
|
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
|
|
|
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
|
|
|
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
|
|
|
|
|
|
|
let slot = harness.get_chain_slot();
|
|
|
|
let state = harness.get_head_state();
|
|
|
|
let (canonical_blocks_pre_finalization, _, slot, _, state) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
|
|
|
let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks(
|
|
|
|
harness.get_head_state(),
|
|
|
|
slot,
|
|
|
|
slots_per_epoch - 1,
|
|
|
|
&faulty_validators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Precondition: Ensure all stray_blocks blocks are still known
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_some(),
|
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness
|
|
|
|
.chain
|
|
|
|
.get_state(&state_hash.into(), Some(slot))
|
|
|
|
.unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_some(),
|
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Precondition: Only genesis is finalized
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
get_finalized_epoch_boundary_blocks(&chain_dump),
|
|
|
|
vec![Hash256::zero().into()].into_iter().collect(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(harness.chain.knows_head(&stray_head));
|
|
|
|
|
|
|
|
// Trigger finalization
|
|
|
|
let (canonical_blocks_post_finalization, _, _, _, _) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 5, &honest_validators);
|
|
|
|
|
|
|
|
// Postcondition: New blocks got finalized
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
|
|
|
assert_eq!(
|
|
|
|
finalized_blocks,
|
|
|
|
vec![
|
|
|
|
Hash256::zero().into(),
|
|
|
|
canonical_blocks_pre_finalization[&Slot::new(slots_per_epoch as u64)],
|
|
|
|
canonical_blocks_post_finalization[&Slot::new((slots_per_epoch * 2) as u64)],
|
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect()
|
|
|
|
);
|
|
|
|
|
|
|
|
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_none(),
|
|
|
|
"abandoned block {} should have been pruned",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_none(),
|
|
|
|
"stray state {} at slot {} should have been deleted",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(!harness.chain.knows_head(&stray_head));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn pruning_does_not_touch_abandoned_block_shared_with_canonical_chain() {
|
|
|
|
const VALIDATOR_COUNT: usize = 24;
|
|
|
|
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
|
|
|
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
|
|
|
let all_validators: Vec<usize> = (0..VALIDATOR_COUNT).collect();
|
|
|
|
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
|
|
|
|
|
|
|
// Fill up 0th epoch
|
|
|
|
let slot = harness.get_chain_slot();
|
|
|
|
let state = harness.get_head_state();
|
|
|
|
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
|
|
|
|
|
|
|
// Fill up 1st epoch
|
|
|
|
let (_, _, canonical_slot, shared_head, canonical_state) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, 1, &all_validators);
|
|
|
|
let (stray_blocks, stray_states, _, stray_head, _) = harness.add_stray_blocks(
|
|
|
|
canonical_state.clone(),
|
|
|
|
canonical_slot,
|
|
|
|
1,
|
|
|
|
&faulty_validators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Preconditions
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_some(),
|
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_some(),
|
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
get_finalized_epoch_boundary_blocks(&chain_dump),
|
|
|
|
vec![Hash256::zero().into()].into_iter().collect(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
|
|
|
|
|
|
|
// Trigger finalization
|
|
|
|
let (canonical_blocks, _, _, _, _) = harness.add_canonical_chain_blocks(
|
|
|
|
canonical_state,
|
|
|
|
canonical_slot,
|
|
|
|
slots_per_epoch * 5,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
|
|
|
assert_eq!(
|
|
|
|
finalized_blocks,
|
|
|
|
vec![
|
|
|
|
Hash256::zero().into(),
|
|
|
|
canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)],
|
|
|
|
canonical_blocks[&Slot::new((slots_per_epoch * 2) as u64)],
|
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect()
|
|
|
|
);
|
|
|
|
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.get_block(&block_hash.into())
|
|
|
|
.unwrap()
|
|
|
|
.is_none(),
|
|
|
|
"stray block {} should have been pruned",
|
|
|
|
block_hash,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_none(),
|
|
|
|
"stray state {} at slot {} should have been deleted",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(!harness.chain.knows_head(&stray_head));
|
|
|
|
assert!(get_blocks(&chain_dump).contains(&shared_head));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn pruning_does_not_touch_blocks_prior_to_finalization() {
|
|
|
|
const VALIDATOR_COUNT: usize = 24;
|
|
|
|
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
|
|
|
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
|
|
|
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
|
|
|
|
|
|
|
// Fill up 0th epoch with canonical chain blocks
|
|
|
|
let slot = harness.get_chain_slot();
|
|
|
|
let state = harness.get_head_state();
|
|
|
|
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
|
|
|
|
|
|
|
// Fill up 1st epoch. Contains a fork.
|
|
|
|
let (stray_blocks, stray_states, _, stray_head, _) =
|
|
|
|
harness.add_stray_blocks(state.clone(), slot, slots_per_epoch - 1, &faulty_validators);
|
|
|
|
|
|
|
|
// Preconditions
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_some(),
|
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_some(),
|
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
get_finalized_epoch_boundary_blocks(&chain_dump),
|
|
|
|
vec![Hash256::zero().into()].into_iter().collect(),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Trigger finalization
|
|
|
|
let (_, _, _, _, _) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch * 4, &honest_validators);
|
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
|
|
|
assert_eq!(
|
|
|
|
finalized_blocks,
|
|
|
|
vec![
|
|
|
|
Hash256::zero().into(),
|
|
|
|
canonical_blocks_zeroth_epoch[&Slot::new(slots_per_epoch as u64)],
|
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect()
|
|
|
|
);
|
|
|
|
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_some(),
|
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_some(),
|
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(harness.chain.knows_head(&stray_head));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn prunes_fork_running_past_finalized_checkpoint() {
|
|
|
|
const VALIDATOR_COUNT: usize = 24;
|
|
|
|
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
|
|
|
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
|
|
|
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
|
|
|
|
|
|
|
// Fill up 0th epoch with canonical chain blocks
|
|
|
|
let slot = harness.get_chain_slot();
|
|
|
|
let state = harness.get_head_state();
|
|
|
|
let (canonical_blocks_zeroth_epoch, _, slot, _, state) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
|
|
|
|
|
|
|
// Fill up 1st epoch. Contains a fork.
|
|
|
|
let (stray_blocks_first_epoch, stray_states_first_epoch, stray_slot, _, stray_state) =
|
|
|
|
harness.add_stray_blocks(state.clone(), slot, slots_per_epoch, &faulty_validators);
|
|
|
|
|
|
|
|
let (canonical_blocks_first_epoch, _, canonical_slot, _, canonical_state) =
|
|
|
|
harness.add_canonical_chain_blocks(state, slot, slots_per_epoch, &honest_validators);
|
|
|
|
|
|
|
|
// Fill up 2nd epoch. Extends both the canonical chain and the fork.
|
|
|
|
let (stray_blocks_second_epoch, stray_states_second_epoch, _, stray_head, _) = harness
|
|
|
|
.add_stray_blocks(
|
|
|
|
stray_state,
|
|
|
|
stray_slot,
|
|
|
|
slots_per_epoch - 1,
|
|
|
|
&faulty_validators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Precondition: Ensure all stray_blocks blocks are still known
|
|
|
|
let stray_blocks: HashMap<Slot, SignedBeaconBlockHash> = stray_blocks_first_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(stray_blocks_second_epoch.into_iter())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let stray_states: HashMap<Slot, BeaconStateHash> = stray_states_first_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(stray_states_second_epoch.into_iter())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_some(),
|
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_some(),
|
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Precondition: Only genesis is finalized
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
get_finalized_epoch_boundary_blocks(&chain_dump),
|
|
|
|
vec![Hash256::zero().into()].into_iter().collect(),
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(harness.chain.knows_head(&stray_head));
|
|
|
|
|
|
|
|
// Trigger finalization
|
|
|
|
let (canonical_blocks_second_epoch, _, _, _, _) = harness.add_canonical_chain_blocks(
|
|
|
|
canonical_state,
|
|
|
|
canonical_slot,
|
2020-05-06 11:42:56 +00:00
|
|
|
slots_per_epoch * 6,
|
2020-04-20 09:59:56 +00:00
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-05-06 11:42:56 +00:00
|
|
|
assert_ne!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.unwrap()
|
|
|
|
.beacon_state
|
|
|
|
.finalized_checkpoint
|
|
|
|
.epoch,
|
|
|
|
0,
|
|
|
|
"chain should have finalized"
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(canonical_blocks_first_epoch.into_iter())
|
|
|
|
.chain(canonical_blocks_second_epoch.into_iter())
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
// Postcondition: New blocks got finalized
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
|
|
|
assert_eq!(
|
|
|
|
finalized_blocks,
|
|
|
|
vec![
|
|
|
|
Hash256::zero().into(),
|
2020-05-06 11:42:56 +00:00
|
|
|
canonical_blocks[&Slot::new(slots_per_epoch as u64 * 3)],
|
|
|
|
canonical_blocks[&Slot::new(slots_per_epoch as u64 * 4)],
|
2020-04-20 09:59:56 +00:00
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect()
|
|
|
|
);
|
|
|
|
|
|
|
|
// Postcondition: Ensure all stray_blocks blocks have been pruned
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_none(),
|
|
|
|
"abandoned block {} should have been pruned",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_none(),
|
|
|
|
"stray state {} at slot {} should have been deleted",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert!(!harness.chain.knows_head(&stray_head));
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is to check if state outside of normal block processing are pruned correctly.
|
|
|
|
#[test]
|
|
|
|
fn prunes_skipped_slots_states() {
|
|
|
|
const VALIDATOR_COUNT: usize = 24;
|
|
|
|
const VALIDATOR_SUPERMAJORITY: usize = (VALIDATOR_COUNT / 3) * 2;
|
|
|
|
let db_path = tempdir().unwrap();
|
|
|
|
let store = get_store(&db_path);
|
|
|
|
let harness = get_harness(Arc::clone(&store), VALIDATOR_COUNT);
|
|
|
|
const HONEST_VALIDATOR_COUNT: usize = VALIDATOR_SUPERMAJORITY;
|
|
|
|
let honest_validators: Vec<usize> = (0..HONEST_VALIDATOR_COUNT).collect();
|
|
|
|
let faulty_validators: Vec<usize> = (HONEST_VALIDATOR_COUNT..VALIDATOR_COUNT).collect();
|
|
|
|
let slots_per_epoch: usize = MinimalEthSpec::slots_per_epoch() as usize;
|
|
|
|
|
|
|
|
// Arrange skipped slots so as to cross the epoch boundary. That way, we excercise the code
|
|
|
|
// responsible for storing state outside of normal block processing.
|
|
|
|
|
|
|
|
let canonical_slot = harness.get_chain_slot();
|
|
|
|
let canonical_state = harness.get_head_state();
|
|
|
|
let (canonical_blocks_zeroth_epoch, _, canonical_slot, _, canonical_state) = harness
|
|
|
|
.add_canonical_chain_blocks(
|
|
|
|
canonical_state,
|
|
|
|
canonical_slot,
|
|
|
|
slots_per_epoch - 1,
|
|
|
|
&honest_validators,
|
|
|
|
);
|
|
|
|
|
|
|
|
let (stray_blocks, stray_states, stray_slot, _, _) = harness.add_stray_blocks(
|
|
|
|
canonical_state.clone(),
|
|
|
|
canonical_slot,
|
|
|
|
slots_per_epoch,
|
|
|
|
&faulty_validators,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Preconditions
|
|
|
|
for &block_hash in stray_blocks.values() {
|
|
|
|
let block = harness.chain.get_block(&block_hash.into()).unwrap();
|
|
|
|
assert!(
|
|
|
|
block.is_some(),
|
|
|
|
"stray block {} should be still present",
|
|
|
|
block_hash
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_some(),
|
|
|
|
"stray state {} at slot {} should be still present",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
assert_eq!(
|
|
|
|
get_finalized_epoch_boundary_blocks(&chain_dump),
|
|
|
|
vec![Hash256::zero().into()].into_iter().collect(),
|
|
|
|
);
|
|
|
|
|
|
|
|
// Make sure slots were skipped
|
|
|
|
let stray_state = harness
|
|
|
|
.chain
|
|
|
|
.state_at_slot(stray_slot, StateSkipConfig::WithoutStateRoots)
|
|
|
|
.unwrap();
|
|
|
|
let block_root = stray_state.get_block_root(canonical_slot - 1);
|
|
|
|
assert_eq!(stray_state.get_block_root(canonical_slot), block_root);
|
|
|
|
assert_eq!(stray_state.get_block_root(canonical_slot + 1), block_root);
|
|
|
|
|
|
|
|
let skipped_slots = vec![canonical_slot, canonical_slot + 1];
|
|
|
|
for &slot in &skipped_slots {
|
|
|
|
assert_eq!(stray_state.get_block_root(slot), block_root);
|
|
|
|
let state_hash = stray_state.get_state_root(slot).unwrap();
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.get_state(&state_hash, Some(slot))
|
|
|
|
.unwrap()
|
|
|
|
.is_some(),
|
|
|
|
"skipped slots state should be still present"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Trigger finalization
|
|
|
|
let (canonical_blocks_post_finalization, _, _, _, _) = harness.add_canonical_chain_blocks(
|
|
|
|
canonical_state,
|
|
|
|
canonical_slot,
|
2020-05-06 11:42:56 +00:00
|
|
|
slots_per_epoch * 6,
|
2020-04-20 09:59:56 +00:00
|
|
|
&honest_validators,
|
|
|
|
);
|
2020-05-06 11:42:56 +00:00
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.head()
|
|
|
|
.unwrap()
|
|
|
|
.beacon_state
|
|
|
|
.finalized_checkpoint
|
|
|
|
.epoch,
|
|
|
|
2,
|
|
|
|
"chain should have finalized"
|
|
|
|
);
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
// Postconditions
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
let finalized_blocks = get_finalized_epoch_boundary_blocks(&chain_dump);
|
|
|
|
let canonical_blocks: HashMap<Slot, SignedBeaconBlockHash> = canonical_blocks_zeroth_epoch
|
|
|
|
.into_iter()
|
|
|
|
.chain(canonical_blocks_post_finalization.into_iter())
|
|
|
|
.collect();
|
|
|
|
assert_eq!(
|
|
|
|
finalized_blocks,
|
|
|
|
vec![
|
|
|
|
Hash256::zero().into(),
|
2020-05-06 11:42:56 +00:00
|
|
|
canonical_blocks[&Slot::new(slots_per_epoch as u64 * 2)],
|
2020-04-20 09:59:56 +00:00
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect()
|
|
|
|
);
|
|
|
|
|
|
|
|
for (&slot, &state_hash) in &stray_states {
|
|
|
|
let state = harness.chain.get_state(&state_hash.into(), None).unwrap();
|
|
|
|
assert!(
|
|
|
|
state.is_none(),
|
|
|
|
"stray state {} at slot {} should have been deleted",
|
|
|
|
state_hash,
|
|
|
|
slot
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
for &slot in &skipped_slots {
|
|
|
|
assert_eq!(stray_state.get_block_root(slot), block_root);
|
|
|
|
let state_hash = stray_state.get_state_root(slot).unwrap();
|
|
|
|
assert!(
|
|
|
|
harness
|
|
|
|
.chain
|
2020-05-06 11:42:56 +00:00
|
|
|
.get_state(&state_hash, None)
|
2020-04-20 09:59:56 +00:00
|
|
|
.unwrap()
|
|
|
|
.is_none(),
|
2020-05-06 11:42:56 +00:00
|
|
|
"skipped slot {} state {} should have been pruned",
|
|
|
|
slot,
|
|
|
|
state_hash
|
2020-04-20 09:59:56 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-26 23:54:46 +00:00
|
|
|
/// Check that the head state's slot matches `expected_slot`.
|
|
|
|
fn check_slot(harness: &TestHarness, expected_slot: u64) {
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
state.slot, expected_slot,
|
|
|
|
"head should be at the current slot"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check that the chain has finalized under best-case assumptions, and check the head slot.
|
|
|
|
fn check_finalization(harness: &TestHarness, expected_slot: u64) {
|
2020-01-06 06:30:37 +00:00
|
|
|
let state = &harness.chain.head().expect("should get head").beacon_state;
|
2019-11-26 23:54:46 +00:00
|
|
|
|
|
|
|
check_slot(harness, expected_slot);
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
state.current_justified_checkpoint.epoch,
|
|
|
|
state.current_epoch() - 1,
|
|
|
|
"the head should be justified one behind the current epoch"
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
state.finalized_checkpoint.epoch,
|
|
|
|
state.current_epoch() - 2,
|
|
|
|
"the head should be finalized two behind the current epoch"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-05-31 22:13:49 +00:00
|
|
|
/// Check that the HotColdDB's split_slot is equal to the start slot of the last finalized epoch.
|
2020-06-16 01:34:04 +00:00
|
|
|
fn check_split_slot(harness: &TestHarness, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) {
|
2019-11-26 23:54:46 +00:00
|
|
|
let split_slot = store.get_split_slot();
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.head()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get head")
|
2019-11-26 23:54:46 +00:00
|
|
|
.beacon_state
|
|
|
|
.finalized_checkpoint
|
|
|
|
.epoch
|
|
|
|
.start_slot(E::slots_per_epoch()),
|
|
|
|
split_slot
|
|
|
|
);
|
|
|
|
assert_ne!(split_slot, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check that all the states in a chain dump have the correct tree hash.
|
|
|
|
fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
|
|
|
|
let chain_dump = harness.chain.chain_dump().unwrap();
|
|
|
|
|
|
|
|
assert_eq!(chain_dump.len() as u64, expected_len);
|
|
|
|
|
2019-12-06 07:52:11 +00:00
|
|
|
for checkpoint in &chain_dump {
|
2019-12-06 03:29:06 +00:00
|
|
|
// Check that the tree hash of the stored state is as expected
|
2019-11-26 23:54:46 +00:00
|
|
|
assert_eq!(
|
|
|
|
checkpoint.beacon_state_root,
|
2020-03-04 21:07:27 +00:00
|
|
|
checkpoint.beacon_state.tree_hash_root(),
|
2019-11-26 23:54:46 +00:00
|
|
|
"tree hash of stored state is incorrect"
|
|
|
|
);
|
2019-12-06 03:29:06 +00:00
|
|
|
|
|
|
|
// Check that looking up the state root with no slot hint succeeds.
|
|
|
|
// This tests the state root -> slot mapping.
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.store
|
2019-12-06 07:52:11 +00:00
|
|
|
.get_state(&checkpoint.beacon_state_root, None)
|
2019-12-06 03:29:06 +00:00
|
|
|
.expect("no error")
|
|
|
|
.expect("state exists")
|
|
|
|
.slot,
|
|
|
|
checkpoint.beacon_state.slot
|
|
|
|
);
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
2019-12-06 07:52:11 +00:00
|
|
|
|
|
|
|
// Check the forwards block roots iterator against the chain dump
|
|
|
|
let chain_dump_block_roots = chain_dump
|
|
|
|
.iter()
|
2020-02-10 23:19:36 +00:00
|
|
|
.map(|checkpoint| (checkpoint.beacon_block_root, checkpoint.beacon_block.slot()))
|
2019-12-06 07:52:11 +00:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
2020-01-06 06:30:37 +00:00
|
|
|
let head = harness.chain.head().expect("should get head");
|
2020-06-16 01:34:04 +00:00
|
|
|
let mut forward_block_roots = HotColdDB::forwards_block_roots_iterator(
|
2019-12-06 07:52:11 +00:00
|
|
|
harness.chain.store.clone(),
|
|
|
|
Slot::new(0),
|
|
|
|
head.beacon_state,
|
|
|
|
head.beacon_block_root,
|
|
|
|
&harness.spec,
|
|
|
|
)
|
2020-06-09 23:55:44 +00:00
|
|
|
.unwrap()
|
|
|
|
.map(Result::unwrap)
|
2019-12-06 07:52:11 +00:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
// Drop the block roots for skipped slots.
|
|
|
|
forward_block_roots.dedup_by_key(|(block_root, _)| *block_root);
|
|
|
|
|
|
|
|
for i in 0..std::cmp::max(chain_dump_block_roots.len(), forward_block_roots.len()) {
|
|
|
|
assert_eq!(
|
|
|
|
chain_dump_block_roots[i],
|
|
|
|
forward_block_roots[i],
|
|
|
|
"split slot is {}",
|
|
|
|
harness.chain.store.get_split_slot()
|
|
|
|
);
|
|
|
|
}
|
2019-11-26 23:54:46 +00:00
|
|
|
}
|
2019-12-06 03:29:06 +00:00
|
|
|
|
|
|
|
/// Check that state and block root iterators can reach genesis
|
|
|
|
fn check_iterators(harness: &TestHarness) {
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.rev_iter_state_roots()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get iter")
|
2019-12-06 03:29:06 +00:00
|
|
|
.last()
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2019-12-06 03:29:06 +00:00
|
|
|
.map(|(_, slot)| slot),
|
|
|
|
Some(Slot::new(0))
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
harness
|
|
|
|
.chain
|
|
|
|
.rev_iter_block_roots()
|
2020-01-06 06:30:37 +00:00
|
|
|
.expect("should get iter")
|
2019-12-06 03:29:06 +00:00
|
|
|
.last()
|
2020-06-09 23:55:44 +00:00
|
|
|
.map(Result::unwrap)
|
2019-12-06 03:29:06 +00:00
|
|
|
.map(|(_, slot)| slot),
|
|
|
|
Some(Slot::new(0))
|
|
|
|
);
|
|
|
|
}
|
2020-04-20 09:59:56 +00:00
|
|
|
|
|
|
|
fn get_finalized_epoch_boundary_blocks(
|
|
|
|
dump: &[BeaconSnapshot<MinimalEthSpec>],
|
|
|
|
) -> HashSet<SignedBeaconBlockHash> {
|
|
|
|
dump.iter()
|
|
|
|
.cloned()
|
|
|
|
.map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint.root.into())
|
|
|
|
.collect()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_blocks(dump: &[BeaconSnapshot<MinimalEthSpec>]) -> HashSet<SignedBeaconBlockHash> {
|
|
|
|
dump.iter()
|
|
|
|
.cloned()
|
|
|
|
.map(|checkpoint| checkpoint.beacon_block_root.into())
|
|
|
|
.collect()
|
|
|
|
}
|