lighthouse/beacon_node/beacon_chain/tests/store_tests.rs

331 lines
9.8 KiB
Rust
Raw Normal View History

#![cfg(not(debug_assertions))]
#[macro_use]
extern crate lazy_static;
use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
};
use rand::Rng;
use sloggers::{null::NullLoggerBuilder, Build};
use std::sync::Arc;
use store::{DiskStore, Store};
use tempfile::{tempdir, TempDir};
use tree_hash::TreeHash;
use types::test_utils::{SeedableRng, XorShiftRng};
use types::*;
// Should ideally be divisible by 3.
pub const VALIDATOR_COUNT: usize = 24;
lazy_static! {
/// A cached set of keys.
static ref KEYPAIRS: Vec<Keypair> = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT);
}
type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
fn get_store(db_path: &TempDir) -> Arc<DiskStore> {
let spec = MinimalEthSpec::default_spec();
let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db");
let slots_per_restore_point = MinimalEthSpec::slots_per_historical_root() as u64;
let log = NullLoggerBuilder.build().expect("logger should build");
Arc::new(
DiskStore::open::<E>(&hot_path, &cold_path, slots_per_restore_point, spec, log)
.expect("disk store should initialize"),
)
}
fn get_harness(store: Arc<DiskStore>, validator_count: usize) -> TestHarness {
Prepare for public testnet (#628) * Update to spec v0.9.0 * Update to v0.9.1 * Bump spec tags for v0.9.1 * Formatting, fix CI failures * Resolve accidental KeyPair merge conflict * Document new BeaconState functions * Add `validator` changes from `validator-to-rest` * Add initial (failing) REST api tests * Fix signature parsing * Add more tests * Refactor http router * Add working tests for publish beacon block * Add validator duties tests * Move account_manager under `lighthouse` binary * Unify logfile handling in `environment` crate. * Fix incorrect cache drops in `advance_caches` * Update fork choice for v0.9.1 * Add `deposit_contract` crate * Add progress on validator onboarding * Add unfinished attesation code * Update account manager CLI * Write eth1 data file as hex string * Integrate ValidatorDirectory with validator_client * Move ValidatorDirectory into validator_client * Clean up some FIXMEs * Add beacon_chain_sim * Fix a few docs/logs * Expand `beacon_chain_sim` * Fix spec for `beacon_chain_sim * More testing for api * Start work on attestation endpoint * Reject empty attestations * Allow attestations to genesis block * Add working tests for `rest_api` validator endpoint * Remove grpc from beacon_node * Start heavy refactor of validator client - Block production is working * Prune old validator client files * Start works on attestation service * Add attestation service to validator client * Use full pubkey for validator directories * Add validator duties post endpoint * Use par_iter for keypair generation * Use bulk duties request in validator client * Add version http endpoint tests * Add interop keys and startup wait * Ensure a prompt exit * Add duties pruning * Fix compile error in beacon node tests * Add github workflow * Modify rust.yaml * Modify gitlab actions * Add to CI file * Add sudo to CI npm install * Move cargo fmt to own job in tests * Fix cargo fmt in CI * Add rustup update before cargo fmt * Change name of CI job * Make other CI jobs require cargo fmt * Add CI badge * Remove gitlab and travis files * Add different http timeout for debug * Update docker file, use makefile in CI * Use make in the dockerfile, skip the test * Use the makefile for debug GI test * Update book * Tidy grpc and misc things * Apply discv5 fixes * Address other minor issues * Fix warnings * Attempt fix for addr parsing * Tidy validator config, CLIs * Tidy comments * Tidy signing, reduce ForkService duplication * Fail if skipping too many slots * Set default recent genesis time to 0 * Add custom http timeout to validator * Fix compile bug in node_test_rig * Remove old bootstrap flag from val CLI * Update docs * Tidy val client * Change val client log levels * Add comments, more validity checks * Fix compile error, add comments * Undo changes to eth2-libp2p/src * Reduce duplication of keypair generation * Add more logging for validator duties * Fix beacon_chain_sim, nitpicks * Fix compile error, minor nits * Update to use v0.9.2 version of deposit contract * Add efforts to automate eth1 testnet deployment * Fix lcli testnet deployer * Modify bn CLI to parse eth2_testnet_dir * Progress with account_manager deposit tools * Make account manager submit deposits * Add password option for submitting deposits * Allow custom deposit amount * Add long names to lcli clap * Add password option to lcli deploy command * Fix minor bugs whilst testing * Address Michael's comments * Add refund-deposit-contract to lcli * Use time instead of skip count for denying long skips * Improve logging for eth1 * Fix bug with validator services exiting on error * Drop the block cache after genesis * Modify eth1 testnet config * Improve eth1 logging * Make validator wait until genesis time * Fix bug in eth1 voting * Add more logging to eth1 voting * Handle errors in eth1 http module * Set SECONDS_PER_DAY to sensible minimum * Shorten delay before testnet start * Ensure eth1 block is produced without any votes * Improve eth1 logging * Fix broken tests in eth1 * Tidy code in rest_api * Fix failing test in deposit_contract * Make CLI args more consistent * Change validator/duties endpoint * Add time-based skip slot limiting * Add new error type missed in previous commit * Add log when waiting for genesis * Refactor beacon node CLI * Remove unused dep * Add lcli eth1-genesis command * Fix bug in master merge * Apply clippy lints to beacon node * Add support for YamlConfig in Eth2TestnetDir * Upgrade tesnet deposit contract version * Remove unnecessary logging and correct formatting * Add a hardcoded eth2 testnet config * Ensure http server flag works. Overwrite configs with flags. * Ensure boot nodes are loaded from testnet dir * Fix account manager CLI bugs * Fix bugs with beacon node cli * Allow testnet dir without boot nodes * Write genesis state as SSZ * Remove ---/n from the start of testnet_dir files * Set default libp2p address * Tidy account manager CLI, add logging * Add check to see if testnet dir exists * Apply reviewers suggestions * Add HeadTracker struct * Add fork choice persistence * Shorten slot time for simulator * Add the /beacon/heads API endpoint * Update hardcoded testnet * Add tests for BeaconChain persistence + fix bugs * Extend BeaconChain persistence testing * Ensure chain is finalized b4 persistence tests * Ensure boot_enr.yaml is include in binary * Refactor beacon_chain_sim * Move files about in beacon sim * Update beacon_chain_sim * Fix bug with deposit inclusion * Increase log in genesis service, fix todo * Tidy sim, fix broken rest_api tests * Fix more broken tests * Update testnet * Fix broken rest api test * Tidy account manager CLI * Use tempdir for account manager * Stop hardcoded testnet dir from creating dir * Rename Eth2TestnetDir to Eth2TestnetConfig * Change hardcoded -> hard_coded * Tidy account manager * Add log to account manager * Tidy, ensure head tracker is loaded from disk * Tidy beacon chain builder * Tidy eth1_chain * Adds log support for simulator * Revert "Adds log support for simulator" This reverts commit ec77c66a052350f551db145cf20f213823428dd3. * Adds log support for simulator * Tidy after self-review * Change default log level * Address Michael's delicious PR comments * Fix off-by-one in tests
2019-12-03 04:28:57 +00:00
let harness = BeaconChainHarness::new_with_disk_store(
MinimalEthSpec,
store,
KEYPAIRS[0..validator_count].to_vec(),
);
harness.advance_slot();
harness
}
#[test]
fn full_participation_no_skips() {
let num_blocks_produced = E::slots_per_epoch() * 5;
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
harness.extend_chain(
num_blocks_produced as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
check_finalization(&harness, num_blocks_produced);
check_split_slot(&harness, store);
check_chain_dump(&harness, num_blocks_produced + 1);
check_iterators(&harness);
}
#[test]
fn randomised_skips() {
let num_slots = E::slots_per_epoch() * 5;
let mut num_blocks_produced = 0;
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let rng = &mut XorShiftRng::from_seed([42; 16]);
let mut head_slot = 0;
for slot in 1..=num_slots {
if rng.gen_bool(0.8) {
harness.extend_chain(
1,
BlockStrategy::ForkCanonicalChainAt {
previous_slot: Slot::new(head_slot),
first_slot: Slot::new(slot),
},
AttestationStrategy::AllValidators,
);
harness.advance_slot();
num_blocks_produced += 1;
head_slot = slot;
} else {
harness.advance_slot();
}
}
let state = &harness.chain.head().beacon_state;
assert_eq!(state.slot, num_slots, "head should be at the current slot");
check_split_slot(&harness, store);
check_chain_dump(&harness, num_blocks_produced + 1);
check_iterators(&harness);
}
#[test]
fn long_skip() {
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
// Number of blocks to create in the first run, intentionally not falling on an epoch
// boundary in order to check that the DB hot -> cold migration is capable of reaching
// back across the skip distance, and correctly migrating those extra non-finalized states.
let initial_blocks = E::slots_per_epoch() * 5 + E::slots_per_epoch() / 2;
let skip_slots = E::slots_per_historical_root() as u64 * 8;
let final_blocks = E::slots_per_epoch() * 4;
harness.extend_chain(
initial_blocks as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
check_finalization(&harness, initial_blocks);
// 2. Skip slots
for _ in 0..skip_slots {
harness.advance_slot();
}
// 3. Produce more blocks, establish a new finalized epoch
harness.extend_chain(
final_blocks as usize,
BlockStrategy::ForkCanonicalChainAt {
previous_slot: Slot::new(initial_blocks),
first_slot: Slot::new(initial_blocks + skip_slots as u64 + 1),
},
AttestationStrategy::AllValidators,
);
check_finalization(&harness, initial_blocks + skip_slots + final_blocks);
check_split_slot(&harness, store);
check_chain_dump(&harness, initial_blocks + final_blocks + 1);
check_iterators(&harness);
}
/// Go forward to the point where the genesis randao value is no longer part of the vector.
///
/// This implicitly checks that:
/// 1. The chunked vector scheme doesn't attempt to store an incorrect genesis value
/// 2. We correctly load the genesis value for all required slots
/// NOTE: this test takes about a minute to run
#[test]
fn randao_genesis_storage() {
let validator_count = 8;
let db_path = tempdir().unwrap();
let store = get_store(&db_path);
let harness = get_harness(store.clone(), validator_count);
let num_slots = E::slots_per_epoch() * (E::epochs_per_historical_vector() - 1) as u64;
// Check we have a non-trivial genesis value
let genesis_value = *harness
.chain
.head()
.beacon_state
.get_randao_mix(Epoch::new(0))
.expect("randao mix ok");
assert!(!genesis_value.is_zero());
harness.extend_chain(
num_slots as usize - 1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
// Check that genesis value is still present
assert!(harness
.chain
.head()
.beacon_state
.randao_mixes
.iter()
.find(|x| **x == genesis_value)
.is_some());
// Then upon adding one more block, it isn't
harness.advance_slot();
harness.extend_chain(
1,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
assert!(harness
.chain
.head()
.beacon_state
.randao_mixes
.iter()
.find(|x| **x == genesis_value)
.is_none());
check_finalization(&harness, num_slots);
check_split_slot(&harness, store);
check_chain_dump(&harness, num_slots + 1);
check_iterators(&harness);
}
// Check that closing and reopening a freezer DB restores the split slot to its correct value.
#[test]
fn split_slot_restore() {
let db_path = tempdir().unwrap();
let split_slot = {
let store = get_store(&db_path);
let harness = get_harness(store.clone(), VALIDATOR_COUNT);
let num_blocks = 4 * E::slots_per_epoch();
harness.extend_chain(
num_blocks as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
);
store.get_split_slot()
};
assert_ne!(split_slot, Slot::new(0));
// Re-open the store
let store = get_store(&db_path);
assert_eq!(store.get_split_slot(), split_slot);
}
/// Check that the head state's slot matches `expected_slot`.
fn check_slot(harness: &TestHarness, expected_slot: u64) {
let state = &harness.chain.head().beacon_state;
assert_eq!(
state.slot, expected_slot,
"head should be at the current slot"
);
}
/// Check that the chain has finalized under best-case assumptions, and check the head slot.
fn check_finalization(harness: &TestHarness, expected_slot: u64) {
let state = &harness.chain.head().beacon_state;
check_slot(harness, expected_slot);
assert_eq!(
state.current_justified_checkpoint.epoch,
state.current_epoch() - 1,
"the head should be justified one behind the current epoch"
);
assert_eq!(
state.finalized_checkpoint.epoch,
state.current_epoch() - 2,
"the head should be finalized two behind the current epoch"
);
}
/// Check that the DiskStore's split_slot is equal to the start slot of the last finalized epoch.
fn check_split_slot(harness: &TestHarness, store: Arc<DiskStore>) {
let split_slot = store.get_split_slot();
assert_eq!(
harness
.chain
.head()
.beacon_state
.finalized_checkpoint
.epoch
.start_slot(E::slots_per_epoch()),
split_slot
);
assert_ne!(split_slot, 0);
}
/// Check that all the states in a chain dump have the correct tree hash.
fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
let chain_dump = harness.chain.chain_dump().unwrap();
assert_eq!(chain_dump.len() as u64, expected_len);
for checkpoint in chain_dump {
// Check that the tree hash of the stored state is as expected
assert_eq!(
checkpoint.beacon_state_root,
Hash256::from_slice(&checkpoint.beacon_state.tree_hash_root()),
"tree hash of stored state is incorrect"
);
// Check that looking up the state root with no slot hint succeeds.
// This tests the state root -> slot mapping.
assert_eq!(
harness
.chain
.store
.get_state::<E>(&checkpoint.beacon_state_root, None)
.expect("no error")
.expect("state exists")
.slot,
checkpoint.beacon_state.slot
);
}
}
/// Check that state and block root iterators can reach genesis
fn check_iterators(harness: &TestHarness) {
assert_eq!(
harness
.chain
.rev_iter_state_roots()
.last()
.map(|(_, slot)| slot),
Some(Slot::new(0))
);
assert_eq!(
harness
.chain
.rev_iter_block_roots()
.last()
.map(|(_, slot)| slot),
Some(Slot::new(0))
);
}